Linux Audio

Check our new training course

Loading...
v5.4
  1/* SPDX-License-Identifier: GPL-2.0+ */
  2/*
  3 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
  4 * Internal non-public definitions.
  5 *
  6 * Copyright IBM Corporation, 2008
  7 *
  8 * Author: Ingo Molnar <mingo@elte.hu>
  9 *	   Paul E. McKenney <paulmck@linux.ibm.com>
 10 */
 11
 12#include <linux/cache.h>
 
 13#include <linux/spinlock.h>
 14#include <linux/rtmutex.h>
 15#include <linux/threads.h>
 16#include <linux/cpumask.h>
 17#include <linux/seqlock.h>
 18#include <linux/swait.h>
 19#include <linux/stop_machine.h>
 20#include <linux/rcu_node_tree.h>
 21
 22#include "rcu_segcblist.h"
 23
 24/* Communicate arguments to a workqueue handler. */
 25struct rcu_exp_work {
 26	unsigned long rew_s;
 27	struct work_struct rew_work;
 28};
 29
 30/* RCU's kthread states for tracing. */
 31#define RCU_KTHREAD_STOPPED  0
 32#define RCU_KTHREAD_RUNNING  1
 33#define RCU_KTHREAD_WAITING  2
 34#define RCU_KTHREAD_OFFCPU   3
 35#define RCU_KTHREAD_YIELDING 4
 36#define RCU_KTHREAD_MAX      4
 37
 38/*
 39 * Definition for node within the RCU grace-period-detection hierarchy.
 40 */
 41struct rcu_node {
 42	raw_spinlock_t __private lock;	/* Root rcu_node's lock protects */
 43					/*  some rcu_state fields as well as */
 44					/*  following. */
 45	unsigned long gp_seq;	/* Track rsp->rcu_gp_seq. */
 46	unsigned long gp_seq_needed; /* Track furthest future GP request. */
 47	unsigned long completedqs; /* All QSes done for this node. */
 48	unsigned long qsmask;	/* CPUs or groups that need to switch in */
 49				/*  order for current grace period to proceed.*/
 50				/*  In leaf rcu_node, each bit corresponds to */
 51				/*  an rcu_data structure, otherwise, each */
 52				/*  bit corresponds to a child rcu_node */
 53				/*  structure. */
 54	unsigned long rcu_gp_init_mask;	/* Mask of offline CPUs at GP init. */
 55	unsigned long qsmaskinit;
 56				/* Per-GP initial value for qsmask. */
 57				/*  Initialized from ->qsmaskinitnext at the */
 58				/*  beginning of each grace period. */
 59	unsigned long qsmaskinitnext;
 60				/* Online CPUs for next grace period. */
 61	unsigned long expmask;	/* CPUs or groups that need to check in */
 62				/*  to allow the current expedited GP */
 63				/*  to complete. */
 64	unsigned long expmaskinit;
 65				/* Per-GP initial values for expmask. */
 66				/*  Initialized from ->expmaskinitnext at the */
 67				/*  beginning of each expedited GP. */
 68	unsigned long expmaskinitnext;
 69				/* Online CPUs for next expedited GP. */
 70				/*  Any CPU that has ever been online will */
 71				/*  have its bit set. */
 
 
 
 
 
 72	unsigned long ffmask;	/* Fully functional CPUs. */
 73	unsigned long grpmask;	/* Mask to apply to parent qsmask. */
 74				/*  Only one bit will be set in this mask. */
 75	int	grplo;		/* lowest-numbered CPU or group here. */
 76	int	grphi;		/* highest-numbered CPU or group here. */
 77	u8	grpnum;		/* CPU/group number for next level up. */
 78	u8	level;		/* root is at level 0. */
 79	bool	wait_blkd_tasks;/* Necessary to wait for blocked tasks to */
 80				/*  exit RCU read-side critical sections */
 81				/*  before propagating offline up the */
 82				/*  rcu_node tree? */
 83	struct rcu_node *parent;
 84	struct list_head blkd_tasks;
 85				/* Tasks blocked in RCU read-side critical */
 86				/*  section.  Tasks are placed at the head */
 87				/*  of this list and age towards the tail. */
 88	struct list_head *gp_tasks;
 89				/* Pointer to the first task blocking the */
 90				/*  current grace period, or NULL if there */
 91				/*  is no such task. */
 92	struct list_head *exp_tasks;
 93				/* Pointer to the first task blocking the */
 94				/*  current expedited grace period, or NULL */
 95				/*  if there is no such task.  If there */
 96				/*  is no current expedited grace period, */
 97				/*  then there can cannot be any such task. */
 98	struct list_head *boost_tasks;
 99				/* Pointer to first task that needs to be */
100				/*  priority boosted, or NULL if no priority */
101				/*  boosting is needed for this rcu_node */
102				/*  structure.  If there are no tasks */
103				/*  queued on this rcu_node structure that */
104				/*  are blocking the current grace period, */
105				/*  there can be no such task. */
106	struct rt_mutex boost_mtx;
107				/* Used only for the priority-boosting */
108				/*  side effect, not as a lock. */
109	unsigned long boost_time;
110				/* When to start boosting (jiffies). */
 
 
 
111	struct task_struct *boost_kthread_task;
112				/* kthread that takes care of priority */
113				/*  boosting for this rcu_node structure. */
114	unsigned int boost_kthread_status;
115				/* State of boost_kthread_task for tracing. */
 
116#ifdef CONFIG_RCU_NOCB_CPU
117	struct swait_queue_head nocb_gp_wq[2];
118				/* Place for rcu_nocb_kthread() to wait GP. */
119#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
120	raw_spinlock_t fqslock ____cacheline_internodealigned_in_smp;
121
122	spinlock_t exp_lock ____cacheline_internodealigned_in_smp;
123	unsigned long exp_seq_rq;
124	wait_queue_head_t exp_wq[4];
125	struct rcu_exp_work rew;
126	bool exp_need_flush;	/* Need to flush workitem? */
 
 
 
 
127} ____cacheline_internodealigned_in_smp;
128
129/*
130 * Bitmasks in an rcu_node cover the interval [grplo, grphi] of CPU IDs, and
131 * are indexed relative to this interval rather than the global CPU ID space.
132 * This generates the bit for a CPU in node-local masks.
133 */
134#define leaf_node_cpu_bit(rnp, cpu) (BIT((cpu) - (rnp)->grplo))
135
136/*
137 * Union to allow "aggregate OR" operation on the need for a quiescent
138 * state by the normal and expedited grace periods.
139 */
140union rcu_noqs {
141	struct {
142		u8 norm;
143		u8 exp;
144	} b; /* Bits. */
145	u16 s; /* Set of bits, aggregate OR here. */
146};
147
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
148/* Per-CPU data for read-copy update. */
149struct rcu_data {
150	/* 1) quiescent-state and grace-period handling : */
151	unsigned long	gp_seq;		/* Track rsp->rcu_gp_seq counter. */
152	unsigned long	gp_seq_needed;	/* Track furthest future GP request. */
153	union rcu_noqs	cpu_no_qs;	/* No QSes yet for this CPU. */
154	bool		core_needs_qs;	/* Core waits for quiesc state. */
155	bool		beenonline;	/* CPU online at least once. */
156	bool		gpwrap;		/* Possible ->gp_seq wrap. */
157	bool		exp_deferred_qs; /* This CPU awaiting a deferred QS? */
158	struct rcu_node *mynode;	/* This CPU's leaf of hierarchy */
159	unsigned long grpmask;		/* Mask to apply to leaf qsmask. */
160	unsigned long	ticks_this_gp;	/* The number of scheduling-clock */
161					/*  ticks this CPU has handled */
162					/*  during and after the last grace */
163					/* period it is aware of. */
164	struct irq_work defer_qs_iw;	/* Obtain later scheduler attention. */
165	bool defer_qs_iw_pending;	/* Scheduler attention pending? */
 
166
167	/* 2) batch handling */
168	struct rcu_segcblist cblist;	/* Segmented callback list, with */
169					/* different callbacks waiting for */
170					/* different grace periods. */
171	long		qlen_last_fqs_check;
172					/* qlen at last check for QS forcing */
 
173	unsigned long	n_force_qs_snap;
174					/* did other CPU force QS recently? */
175	long		blimit;		/* Upper limit on a processed batch */
176
177	/* 3) dynticks interface. */
178	int dynticks_snap;		/* Per-GP tracking for dynticks. */
179	long dynticks_nesting;		/* Track process nesting level. */
180	long dynticks_nmi_nesting;	/* Track irq/NMI nesting level. */
181	atomic_t dynticks;		/* Even value for idle, else odd. */
182	bool rcu_need_heavy_qs;		/* GP old, so heavy quiescent state! */
183	bool rcu_urgent_qs;		/* GP old need light quiescent state. */
184#ifdef CONFIG_RCU_FAST_NO_HZ
185	bool all_lazy;			/* All CPU's CBs lazy at idle start? */
186	unsigned long last_accelerate;	/* Last jiffy CBs were accelerated. */
187	unsigned long last_advance_all;	/* Last jiffy CBs were all advanced. */
188	int tick_nohz_enabled_snap;	/* Previously seen value from sysfs. */
189#endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
190
191	/* 4) rcu_barrier(), OOM callbacks, and expediting. */
 
192	struct rcu_head barrier_head;
193	int exp_dynticks_snap;		/* Double-check need for IPI. */
194
195	/* 5) Callback offloading. */
196#ifdef CONFIG_RCU_NOCB_CPU
197	struct swait_queue_head nocb_cb_wq; /* For nocb kthreads to sleep on. */
 
198	struct task_struct *nocb_gp_kthread;
199	raw_spinlock_t nocb_lock;	/* Guard following pair of fields. */
200	atomic_t nocb_lock_contended;	/* Contention experienced. */
201	int nocb_defer_wakeup;		/* Defer wakeup of nocb_kthread. */
202	struct timer_list nocb_timer;	/* Enforce finite deferral. */
203	unsigned long nocb_gp_adv_time;	/* Last call_rcu() CB adv (jiffies). */
 
 
204
205	/* The following fields are used by call_rcu, hence own cacheline. */
206	raw_spinlock_t nocb_bypass_lock ____cacheline_internodealigned_in_smp;
207	struct rcu_cblist nocb_bypass;	/* Lock-contention-bypass CB list. */
208	unsigned long nocb_bypass_first; /* Time (jiffies) of first enqueue. */
209	unsigned long nocb_nobypass_last; /* Last ->cblist enqueue (jiffies). */
210	int nocb_nobypass_count;	/* # ->cblist enqueues at ^^^ time. */
211
212	/* The following fields are used by GP kthread, hence own cacheline. */
213	raw_spinlock_t nocb_gp_lock ____cacheline_internodealigned_in_smp;
214	struct timer_list nocb_bypass_timer; /* Force nocb_bypass flush. */
215	u8 nocb_gp_sleep;		/* Is the nocb GP thread asleep? */
216	u8 nocb_gp_bypass;		/* Found a bypass on last scan? */
217	u8 nocb_gp_gp;			/* GP to wait for on last scan? */
218	unsigned long nocb_gp_seq;	/*  If so, ->gp_seq to wait for. */
219	unsigned long nocb_gp_loops;	/* # passes through wait code. */
220	struct swait_queue_head nocb_gp_wq; /* For nocb kthreads to sleep on. */
221	bool nocb_cb_sleep;		/* Is the nocb CB thread asleep? */
222	struct task_struct *nocb_cb_kthread;
223	struct rcu_data *nocb_next_cb_rdp;
224					/* Next rcu_data in wakeup chain. */
 
 
 
 
225
226	/* The following fields are used by CB kthread, hence new cacheline. */
227	struct rcu_data *nocb_gp_rdp ____cacheline_internodealigned_in_smp;
228					/* GP rdp takes GP-end wakeups. */
229#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
230
231	/* 6) RCU priority boosting. */
232	struct task_struct *rcu_cpu_kthread_task;
233					/* rcuc per-CPU kthread or NULL. */
234	unsigned int rcu_cpu_kthread_status;
235	char rcu_cpu_has_work;
 
236
237	/* 7) Diagnostic data, including RCU CPU stall warnings. */
238	unsigned int softirq_snap;	/* Snapshot of softirq activity. */
239	/* ->rcu_iw* fields protected by leaf rcu_node ->lock. */
240	struct irq_work rcu_iw;		/* Check for non-irq activity. */
241	bool rcu_iw_pending;		/* Is ->rcu_iw pending? */
242	unsigned long rcu_iw_gp_seq;	/* ->gp_seq associated with ->rcu_iw. */
243	unsigned long rcu_ofl_gp_seq;	/* ->gp_seq at last offline. */
244	short rcu_ofl_gp_flags;		/* ->gp_flags at last offline. */
245	unsigned long rcu_onl_gp_seq;	/* ->gp_seq at last online. */
246	short rcu_onl_gp_flags;		/* ->gp_flags at last online. */
247	unsigned long last_fqs_resched;	/* Time of last rcu_resched(). */
 
 
 
248
 
249	int cpu;
250};
251
252/* Values for nocb_defer_wakeup field in struct rcu_data. */
253#define RCU_NOCB_WAKE_NOT	0
254#define RCU_NOCB_WAKE		1
255#define RCU_NOCB_WAKE_FORCE	2
 
 
256
257#define RCU_JIFFIES_TILL_FORCE_QS (1 + (HZ > 250) + (HZ > 500))
258					/* For jiffies_till_first_fqs and */
259					/*  and jiffies_till_next_fqs. */
260
261#define RCU_JIFFIES_FQS_DIV	256	/* Very large systems need more */
262					/*  delay between bouts of */
263					/*  quiescent-state forcing. */
264
265#define RCU_STALL_RAT_DELAY	2	/* Allow other CPUs time to take */
266					/*  at least one scheduling clock */
267					/*  irq before ratting on them. */
268
269#define rcu_wait(cond)							\
270do {									\
271	for (;;) {							\
272		set_current_state(TASK_INTERRUPTIBLE);			\
273		if (cond)						\
274			break;						\
275		schedule();						\
276	}								\
277	__set_current_state(TASK_RUNNING);				\
278} while (0)
279
280/*
 
 
 
 
 
 
 
 
 
 
 
 
 
281 * RCU global state, including node hierarchy.  This hierarchy is
282 * represented in "heap" form in a dense array.  The root (first level)
283 * of the hierarchy is in ->node[0] (referenced by ->level[0]), the second
284 * level in ->node[1] through ->node[m] (->node[1] referenced by ->level[1]),
285 * and the third level in ->node[m+1] and following (->node[m+1] referenced
286 * by ->level[2]).  The number of levels is determined by the number of
287 * CPUs and by CONFIG_RCU_FANOUT.  Small systems will have a "hierarchy"
288 * consisting of a single rcu_node.
289 */
290struct rcu_state {
291	struct rcu_node node[NUM_RCU_NODES];	/* Hierarchy. */
292	struct rcu_node *level[RCU_NUM_LVLS + 1];
293						/* Hierarchy levels (+1 to */
294						/*  shut bogus gcc warning) */
295	int ncpus;				/* # CPUs seen so far. */
 
296
297	/* The following fields are guarded by the root rcu_node's lock. */
298
299	u8	boost ____cacheline_internodealigned_in_smp;
300						/* Subject to priority boost. */
301	unsigned long gp_seq;			/* Grace-period sequence #. */
 
302	struct task_struct *gp_kthread;		/* Task for grace periods. */
303	struct swait_queue_head gp_wq;		/* Where GP task waits. */
304	short gp_flags;				/* Commands for GP task. */
305	short gp_state;				/* GP kthread sleep state. */
306	unsigned long gp_wake_time;		/* Last GP kthread wake. */
307	unsigned long gp_wake_seq;		/* ->gp_seq at ^^^. */
 
 
 
308
309	/* End of fields guarded by root rcu_node's lock. */
310
311	struct mutex barrier_mutex;		/* Guards barrier fields. */
312	atomic_t barrier_cpu_count;		/* # CPUs waiting on. */
313	struct completion barrier_completion;	/* Wake at barrier end. */
314	unsigned long barrier_sequence;		/* ++ at start and end of */
315						/*  rcu_barrier(). */
316	/* End of fields guarded by barrier_mutex. */
317
 
 
318	struct mutex exp_mutex;			/* Serialize expedited GP. */
319	struct mutex exp_wake_mutex;		/* Serialize wakeup. */
320	unsigned long expedited_sequence;	/* Take a ticket. */
321	atomic_t expedited_need_qs;		/* # CPUs left to check in. */
322	struct swait_queue_head expedited_wq;	/* Wait for check-ins. */
323	int ncpus_snap;				/* # CPUs seen last time. */
 
 
324
325	unsigned long jiffies_force_qs;		/* Time at which to invoke */
326						/*  force_quiescent_state(). */
327	unsigned long jiffies_kick_kthreads;	/* Time at which to kick */
328						/*  kthreads, if configured. */
329	unsigned long n_force_qs;		/* Number of calls to */
330						/*  force_quiescent_state(). */
331	unsigned long gp_start;			/* Time at which GP started, */
332						/*  but in jiffies. */
333	unsigned long gp_end;			/* Time last GP ended, again */
334						/*  in jiffies. */
335	unsigned long gp_activity;		/* Time of last GP kthread */
336						/*  activity in jiffies. */
337	unsigned long gp_req_activity;		/* Time of last GP request */
338						/*  in jiffies. */
339	unsigned long jiffies_stall;		/* Time at which to check */
340						/*  for CPU stalls. */
 
 
 
 
341	unsigned long jiffies_resched;		/* Time at which to resched */
342						/*  a reluctant CPU. */
343	unsigned long n_force_qs_gpstart;	/* Snapshot of n_force_qs at */
344						/*  GP start. */
345	unsigned long gp_max;			/* Maximum GP duration in */
346						/*  jiffies. */
347	const char *name;			/* Name of structure. */
348	char abbr;				/* Abbreviated name. */
349
350	raw_spinlock_t ofl_lock ____cacheline_internodealigned_in_smp;
351						/* Synchronize offline with */
352						/*  GP pre-initialization. */
 
 
 
 
 
 
 
 
 
 
 
 
 
353};
354
355/* Values for rcu_state structure's gp_flags field. */
356#define RCU_GP_FLAG_INIT 0x1	/* Need grace-period initialization. */
357#define RCU_GP_FLAG_FQS  0x2	/* Need grace-period quiescent-state forcing. */
 
358
359/* Values for rcu_state structure's gp_state field. */
360#define RCU_GP_IDLE	 0	/* Initial state and no GP in progress. */
361#define RCU_GP_WAIT_GPS  1	/* Wait for grace-period start. */
362#define RCU_GP_DONE_GPS  2	/* Wait done for grace-period start. */
363#define RCU_GP_ONOFF     3	/* Grace-period initialization hotplug. */
364#define RCU_GP_INIT      4	/* Grace-period initialization. */
365#define RCU_GP_WAIT_FQS  5	/* Wait for force-quiescent-state time. */
366#define RCU_GP_DOING_FQS 6	/* Wait done for force-quiescent-state time. */
367#define RCU_GP_CLEANUP   7	/* Grace-period cleanup started. */
368#define RCU_GP_CLEANED   8	/* Grace-period cleanup complete. */
369
370static const char * const gp_state_names[] = {
371	"RCU_GP_IDLE",
372	"RCU_GP_WAIT_GPS",
373	"RCU_GP_DONE_GPS",
374	"RCU_GP_ONOFF",
375	"RCU_GP_INIT",
376	"RCU_GP_WAIT_FQS",
377	"RCU_GP_DOING_FQS",
378	"RCU_GP_CLEANUP",
379	"RCU_GP_CLEANED",
380};
381
382/*
383 * In order to export the rcu_state name to the tracing tools, it
384 * needs to be added in the __tracepoint_string section.
385 * This requires defining a separate variable tp_<sname>_varname
386 * that points to the string being used, and this will allow
387 * the tracing userspace tools to be able to decipher the string
388 * address to the matching string.
389 */
390#ifdef CONFIG_PREEMPT_RCU
391#define RCU_ABBR 'p'
392#define RCU_NAME_RAW "rcu_preempt"
393#else /* #ifdef CONFIG_PREEMPT_RCU */
394#define RCU_ABBR 's'
395#define RCU_NAME_RAW "rcu_sched"
396#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
397#ifndef CONFIG_TRACING
398#define RCU_NAME RCU_NAME_RAW
399#else /* #ifdef CONFIG_TRACING */
400static char rcu_name[] = RCU_NAME_RAW;
401static const char *tp_rcu_varname __used __tracepoint_string = rcu_name;
402#define RCU_NAME rcu_name
403#endif /* #else #ifdef CONFIG_TRACING */
404
405int rcu_dynticks_snap(struct rcu_data *rdp);
406
407/* Forward declarations for tree_plugin.h */
408static void rcu_bootup_announce(void);
409static void rcu_qs(void);
410static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
411#ifdef CONFIG_HOTPLUG_CPU
412static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
413#endif /* #ifdef CONFIG_HOTPLUG_CPU */
414static int rcu_print_task_exp_stall(struct rcu_node *rnp);
415static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
416static void rcu_flavor_sched_clock_irq(int user);
417void call_rcu(struct rcu_head *head, rcu_callback_t func);
418static void dump_blkd_tasks(struct rcu_node *rnp, int ncheck);
419static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
420static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
421static bool rcu_is_callbacks_kthread(void);
422static void rcu_cpu_kthread_setup(unsigned int cpu);
423static void __init rcu_spawn_boost_kthreads(void);
424static void rcu_prepare_kthreads(int cpu);
425static void rcu_cleanup_after_idle(void);
426static void rcu_prepare_for_idle(void);
427static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
428static bool rcu_preempt_need_deferred_qs(struct task_struct *t);
429static void rcu_preempt_deferred_qs(struct task_struct *t);
430static void zero_cpu_stall_ticks(struct rcu_data *rdp);
431static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp);
432static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq);
433static void rcu_init_one_nocb(struct rcu_node *rnp);
 
434static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
435				  unsigned long j);
436static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
437				bool *was_alldone, unsigned long flags);
438static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_empty,
439				 unsigned long flags);
440static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp);
441static void do_nocb_deferred_wakeup(struct rcu_data *rdp);
442static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp);
443static void rcu_spawn_cpu_nocb_kthread(int cpu);
444static void __init rcu_spawn_nocb_kthreads(void);
445static void show_rcu_nocb_state(struct rcu_data *rdp);
446static void rcu_nocb_lock(struct rcu_data *rdp);
447static void rcu_nocb_unlock(struct rcu_data *rdp);
448static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp,
449				       unsigned long flags);
450static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp);
451#ifdef CONFIG_RCU_NOCB_CPU
452static void __init rcu_organize_nocb_kthreads(void);
453#define rcu_nocb_lock_irqsave(rdp, flags)				\
454do {									\
455	if (!rcu_segcblist_is_offloaded(&(rdp)->cblist))		\
456		local_irq_save(flags);					\
457	else								\
458		raw_spin_lock_irqsave(&(rdp)->nocb_lock, (flags));	\
 
 
 
 
459} while (0)
460#else /* #ifdef CONFIG_RCU_NOCB_CPU */
461#define rcu_nocb_lock_irqsave(rdp, flags) local_irq_save(flags)
462#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
463
464static void rcu_bind_gp_kthread(void);
465static bool rcu_nohz_full_cpu(void);
466static void rcu_dynticks_task_enter(void);
467static void rcu_dynticks_task_exit(void);
468
469/* Forward declarations for tree_stall.h */
470static void record_gp_stall_check_time(void);
471static void rcu_iw_handler(struct irq_work *iwp);
472static void check_cpu_stall(struct rcu_data *rdp);
473static void rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp,
474				     const unsigned long gpssdelay);
v6.13.7
  1/* SPDX-License-Identifier: GPL-2.0+ */
  2/*
  3 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
  4 * Internal non-public definitions.
  5 *
  6 * Copyright IBM Corporation, 2008
  7 *
  8 * Author: Ingo Molnar <mingo@elte.hu>
  9 *	   Paul E. McKenney <paulmck@linux.ibm.com>
 10 */
 11
 12#include <linux/cache.h>
 13#include <linux/kthread.h>
 14#include <linux/spinlock.h>
 15#include <linux/rtmutex.h>
 16#include <linux/threads.h>
 17#include <linux/cpumask.h>
 18#include <linux/seqlock.h>
 19#include <linux/swait.h>
 
 20#include <linux/rcu_node_tree.h>
 21
 22#include "rcu_segcblist.h"
 23
 24/* Communicate arguments to a kthread worker handler. */
 25struct rcu_exp_work {
 26	unsigned long rew_s;
 27	struct kthread_work rew_work;
 28};
 29
 30/* RCU's kthread states for tracing. */
 31#define RCU_KTHREAD_STOPPED  0
 32#define RCU_KTHREAD_RUNNING  1
 33#define RCU_KTHREAD_WAITING  2
 34#define RCU_KTHREAD_OFFCPU   3
 35#define RCU_KTHREAD_YIELDING 4
 36#define RCU_KTHREAD_MAX      4
 37
 38/*
 39 * Definition for node within the RCU grace-period-detection hierarchy.
 40 */
 41struct rcu_node {
 42	raw_spinlock_t __private lock;	/* Root rcu_node's lock protects */
 43					/*  some rcu_state fields as well as */
 44					/*  following. */
 45	unsigned long gp_seq;	/* Track rsp->gp_seq. */
 46	unsigned long gp_seq_needed; /* Track furthest future GP request. */
 47	unsigned long completedqs; /* All QSes done for this node. */
 48	unsigned long qsmask;	/* CPUs or groups that need to switch in */
 49				/*  order for current grace period to proceed.*/
 50				/*  In leaf rcu_node, each bit corresponds to */
 51				/*  an rcu_data structure, otherwise, each */
 52				/*  bit corresponds to a child rcu_node */
 53				/*  structure. */
 54	unsigned long rcu_gp_init_mask;	/* Mask of offline CPUs at GP init. */
 55	unsigned long qsmaskinit;
 56				/* Per-GP initial value for qsmask. */
 57				/*  Initialized from ->qsmaskinitnext at the */
 58				/*  beginning of each grace period. */
 59	unsigned long qsmaskinitnext;
 
 60	unsigned long expmask;	/* CPUs or groups that need to check in */
 61				/*  to allow the current expedited GP */
 62				/*  to complete. */
 63	unsigned long expmaskinit;
 64				/* Per-GP initial values for expmask. */
 65				/*  Initialized from ->expmaskinitnext at the */
 66				/*  beginning of each expedited GP. */
 67	unsigned long expmaskinitnext;
 68				/* Online CPUs for next expedited GP. */
 69				/*  Any CPU that has ever been online will */
 70				/*  have its bit set. */
 71	struct kthread_worker *exp_kworker;
 72				/* Workers performing per node expedited GP */
 73				/* initialization. */
 74	unsigned long cbovldmask;
 75				/* CPUs experiencing callback overload. */
 76	unsigned long ffmask;	/* Fully functional CPUs. */
 77	unsigned long grpmask;	/* Mask to apply to parent qsmask. */
 78				/*  Only one bit will be set in this mask. */
 79	int	grplo;		/* lowest-numbered CPU here. */
 80	int	grphi;		/* highest-numbered CPU here. */
 81	u8	grpnum;		/* group number for next level up. */
 82	u8	level;		/* root is at level 0. */
 83	bool	wait_blkd_tasks;/* Necessary to wait for blocked tasks to */
 84				/*  exit RCU read-side critical sections */
 85				/*  before propagating offline up the */
 86				/*  rcu_node tree? */
 87	struct rcu_node *parent;
 88	struct list_head blkd_tasks;
 89				/* Tasks blocked in RCU read-side critical */
 90				/*  section.  Tasks are placed at the head */
 91				/*  of this list and age towards the tail. */
 92	struct list_head *gp_tasks;
 93				/* Pointer to the first task blocking the */
 94				/*  current grace period, or NULL if there */
 95				/*  is no such task. */
 96	struct list_head *exp_tasks;
 97				/* Pointer to the first task blocking the */
 98				/*  current expedited grace period, or NULL */
 99				/*  if there is no such task.  If there */
100				/*  is no current expedited grace period, */
101				/*  then there can cannot be any such task. */
102	struct list_head *boost_tasks;
103				/* Pointer to first task that needs to be */
104				/*  priority boosted, or NULL if no priority */
105				/*  boosting is needed for this rcu_node */
106				/*  structure.  If there are no tasks */
107				/*  queued on this rcu_node structure that */
108				/*  are blocking the current grace period, */
109				/*  there can be no such task. */
110	struct rt_mutex boost_mtx;
111				/* Used only for the priority-boosting */
112				/*  side effect, not as a lock. */
113	unsigned long boost_time;
114				/* When to start boosting (jiffies). */
115	struct mutex kthread_mutex;
116				/* Exclusion for thread spawning and affinity */
117				/*  manipulation. */
118	struct task_struct *boost_kthread_task;
119				/* kthread that takes care of priority */
120				/*  boosting for this rcu_node structure. */
121	unsigned int boost_kthread_status;
122				/* State of boost_kthread_task for tracing. */
123	unsigned long n_boosts;	/* Number of boosts for this rcu_node structure. */
124#ifdef CONFIG_RCU_NOCB_CPU
125	struct swait_queue_head nocb_gp_wq[2];
126				/* Place for rcu_nocb_kthread() to wait GP. */
127#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
128	raw_spinlock_t fqslock ____cacheline_internodealigned_in_smp;
129
130	spinlock_t exp_lock ____cacheline_internodealigned_in_smp;
131	unsigned long exp_seq_rq;
132	wait_queue_head_t exp_wq[4];
133	struct rcu_exp_work rew;
134	bool exp_need_flush;	/* Need to flush workitem? */
135	raw_spinlock_t exp_poll_lock;
136				/* Lock and data for polled expedited grace periods. */
137	unsigned long exp_seq_poll_rq;
138	struct work_struct exp_poll_wq;
139} ____cacheline_internodealigned_in_smp;
140
141/*
142 * Bitmasks in an rcu_node cover the interval [grplo, grphi] of CPU IDs, and
143 * are indexed relative to this interval rather than the global CPU ID space.
144 * This generates the bit for a CPU in node-local masks.
145 */
146#define leaf_node_cpu_bit(rnp, cpu) (BIT((cpu) - (rnp)->grplo))
147
148/*
149 * Union to allow "aggregate OR" operation on the need for a quiescent
150 * state by the normal and expedited grace periods.
151 */
152union rcu_noqs {
153	struct {
154		u8 norm;
155		u8 exp;
156	} b; /* Bits. */
157	u16 s; /* Set of bits, aggregate OR here. */
158};
159
160/*
161 * Record the snapshot of the core stats at half of the first RCU stall timeout.
162 * The member gp_seq is used to ensure that all members are updated only once
163 * during the sampling period. The snapshot is taken only if this gp_seq is not
164 * equal to rdp->gp_seq.
165 */
166struct rcu_snap_record {
167	unsigned long	gp_seq;		/* Track rdp->gp_seq counter */
168	u64		cputime_irq;	/* Accumulated cputime of hard irqs */
169	u64		cputime_softirq;/* Accumulated cputime of soft irqs */
170	u64		cputime_system; /* Accumulated cputime of kernel tasks */
171	unsigned long	nr_hardirqs;	/* Accumulated number of hard irqs */
172	unsigned int	nr_softirqs;	/* Accumulated number of soft irqs */
173	unsigned long long nr_csw;	/* Accumulated number of task switches */
174	unsigned long   jiffies;	/* Track jiffies value */
175};
176
177/* Per-CPU data for read-copy update. */
178struct rcu_data {
179	/* 1) quiescent-state and grace-period handling : */
180	unsigned long	gp_seq;		/* Track rsp->gp_seq counter. */
181	unsigned long	gp_seq_needed;	/* Track furthest future GP request. */
182	union rcu_noqs	cpu_no_qs;	/* No QSes yet for this CPU. */
183	bool		core_needs_qs;	/* Core waits for quiescent state. */
184	bool		beenonline;	/* CPU online at least once. */
185	bool		gpwrap;		/* Possible ->gp_seq wrap. */
186	bool		cpu_started;	/* RCU watching this onlining CPU. */
187	struct rcu_node *mynode;	/* This CPU's leaf of hierarchy */
188	unsigned long grpmask;		/* Mask to apply to leaf qsmask. */
189	unsigned long	ticks_this_gp;	/* The number of scheduling-clock */
190					/*  ticks this CPU has handled */
191					/*  during and after the last grace */
192					/* period it is aware of. */
193	struct irq_work defer_qs_iw;	/* Obtain later scheduler attention. */
194	bool defer_qs_iw_pending;	/* Scheduler attention pending? */
195	struct work_struct strict_work;	/* Schedule readers for strict GPs. */
196
197	/* 2) batch handling */
198	struct rcu_segcblist cblist;	/* Segmented callback list, with */
199					/* different callbacks waiting for */
200					/* different grace periods. */
201	long		qlen_last_fqs_check;
202					/* qlen at last check for QS forcing */
203	unsigned long	n_cbs_invoked;	/* # callbacks invoked since boot. */
204	unsigned long	n_force_qs_snap;
205					/* did other CPU force QS recently? */
206	long		blimit;		/* Upper limit on a processed batch */
207
208	/* 3) dynticks interface. */
209	int  watching_snap;		/* Per-GP tracking for dynticks. */
 
 
 
210	bool rcu_need_heavy_qs;		/* GP old, so heavy quiescent state! */
211	bool rcu_urgent_qs;		/* GP old need light quiescent state. */
212	bool rcu_forced_tick;		/* Forced tick to provide QS. */
213	bool rcu_forced_tick_exp;	/*   ... provide QS to expedited GP. */
 
 
 
 
214
215	/* 4) rcu_barrier(), OOM callbacks, and expediting. */
216	unsigned long barrier_seq_snap;	/* Snap of rcu_state.barrier_sequence. */
217	struct rcu_head barrier_head;
218	int exp_watching_snap;		/* Double-check need for IPI. */
219
220	/* 5) Callback offloading. */
221#ifdef CONFIG_RCU_NOCB_CPU
222	struct swait_queue_head nocb_cb_wq; /* For nocb kthreads to sleep on. */
223	struct swait_queue_head nocb_state_wq; /* For offloading state changes */
224	struct task_struct *nocb_gp_kthread;
225	raw_spinlock_t nocb_lock;	/* Guard following pair of fields. */
 
226	int nocb_defer_wakeup;		/* Defer wakeup of nocb_kthread. */
227	struct timer_list nocb_timer;	/* Enforce finite deferral. */
228	unsigned long nocb_gp_adv_time;	/* Last call_rcu() CB adv (jiffies). */
229	struct mutex nocb_gp_kthread_mutex; /* Exclusion for nocb gp kthread */
230					    /* spawning */
231
232	/* The following fields are used by call_rcu, hence own cacheline. */
233	raw_spinlock_t nocb_bypass_lock ____cacheline_internodealigned_in_smp;
234	struct rcu_cblist nocb_bypass;	/* Lock-contention-bypass CB list. */
235	unsigned long nocb_bypass_first; /* Time (jiffies) of first enqueue. */
236	unsigned long nocb_nobypass_last; /* Last ->cblist enqueue (jiffies). */
237	int nocb_nobypass_count;	/* # ->cblist enqueues at ^^^ time. */
238
239	/* The following fields are used by GP kthread, hence own cacheline. */
240	raw_spinlock_t nocb_gp_lock ____cacheline_internodealigned_in_smp;
 
241	u8 nocb_gp_sleep;		/* Is the nocb GP thread asleep? */
242	u8 nocb_gp_bypass;		/* Found a bypass on last scan? */
243	u8 nocb_gp_gp;			/* GP to wait for on last scan? */
244	unsigned long nocb_gp_seq;	/*  If so, ->gp_seq to wait for. */
245	unsigned long nocb_gp_loops;	/* # passes through wait code. */
246	struct swait_queue_head nocb_gp_wq; /* For nocb kthreads to sleep on. */
247	bool nocb_cb_sleep;		/* Is the nocb CB thread asleep? */
248	struct task_struct *nocb_cb_kthread;
249	struct list_head nocb_head_rdp; /*
250					 * Head of rcu_data list in wakeup chain,
251					 * if rdp_gp.
252					 */
253	struct list_head nocb_entry_rdp; /* rcu_data node in wakeup chain. */
254	struct rcu_data *nocb_toggling_rdp; /* rdp queued for (de-)offloading */
255
256	/* The following fields are used by CB kthread, hence new cacheline. */
257	struct rcu_data *nocb_gp_rdp ____cacheline_internodealigned_in_smp;
258					/* GP rdp takes GP-end wakeups. */
259#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
260
261	/* 6) RCU priority boosting. */
262	struct task_struct *rcu_cpu_kthread_task;
263					/* rcuc per-CPU kthread or NULL. */
264	unsigned int rcu_cpu_kthread_status;
265	char rcu_cpu_has_work;
266	unsigned long rcuc_activity;
267
268	/* 7) Diagnostic data, including RCU CPU stall warnings. */
269	unsigned int softirq_snap;	/* Snapshot of softirq activity. */
270	/* ->rcu_iw* fields protected by leaf rcu_node ->lock. */
271	struct irq_work rcu_iw;		/* Check for non-irq activity. */
272	bool rcu_iw_pending;		/* Is ->rcu_iw pending? */
273	unsigned long rcu_iw_gp_seq;	/* ->gp_seq associated with ->rcu_iw. */
274	unsigned long rcu_ofl_gp_seq;	/* ->gp_seq at last offline. */
275	short rcu_ofl_gp_state;		/* ->gp_state at last offline. */
276	unsigned long rcu_onl_gp_seq;	/* ->gp_seq at last online. */
277	short rcu_onl_gp_state;		/* ->gp_state at last online. */
278	unsigned long last_fqs_resched;	/* Time of last rcu_resched(). */
279	unsigned long last_sched_clock;	/* Jiffies of last rcu_sched_clock_irq(). */
280	struct rcu_snap_record snap_record; /* Snapshot of core stats at half of */
281					    /* the first RCU stall timeout */
282
283	long lazy_len;			/* Length of buffered lazy callbacks. */
284	int cpu;
285};
286
287/* Values for nocb_defer_wakeup field in struct rcu_data. */
288#define RCU_NOCB_WAKE_NOT	0
289#define RCU_NOCB_WAKE_BYPASS	1
290#define RCU_NOCB_WAKE_LAZY	2
291#define RCU_NOCB_WAKE		3
292#define RCU_NOCB_WAKE_FORCE	4
293
294#define RCU_JIFFIES_TILL_FORCE_QS (1 + (HZ > 250) + (HZ > 500))
295					/* For jiffies_till_first_fqs and */
296					/*  and jiffies_till_next_fqs. */
297
298#define RCU_JIFFIES_FQS_DIV	256	/* Very large systems need more */
299					/*  delay between bouts of */
300					/*  quiescent-state forcing. */
301
302#define RCU_STALL_RAT_DELAY	2	/* Allow other CPUs time to take */
303					/*  at least one scheduling clock */
304					/*  irq before ratting on them. */
305
306#define rcu_wait(cond)							\
307do {									\
308	for (;;) {							\
309		set_current_state(TASK_INTERRUPTIBLE);			\
310		if (cond)						\
311			break;						\
312		schedule();						\
313	}								\
314	__set_current_state(TASK_RUNNING);				\
315} while (0)
316
317/*
318 * A max threshold for synchronize_rcu() users which are
319 * awaken directly by the rcu_gp_kthread(). Left part is
320 * deferred to the main worker.
321 */
322#define SR_MAX_USERS_WAKE_FROM_GP 5
323#define SR_NORMAL_GP_WAIT_HEAD_MAX 5
324
325struct sr_wait_node {
326	atomic_t inuse;
327	struct llist_node node;
328};
329
330/*
331 * RCU global state, including node hierarchy.  This hierarchy is
332 * represented in "heap" form in a dense array.  The root (first level)
333 * of the hierarchy is in ->node[0] (referenced by ->level[0]), the second
334 * level in ->node[1] through ->node[m] (->node[1] referenced by ->level[1]),
335 * and the third level in ->node[m+1] and following (->node[m+1] referenced
336 * by ->level[2]).  The number of levels is determined by the number of
337 * CPUs and by CONFIG_RCU_FANOUT.  Small systems will have a "hierarchy"
338 * consisting of a single rcu_node.
339 */
340struct rcu_state {
341	struct rcu_node node[NUM_RCU_NODES];	/* Hierarchy. */
342	struct rcu_node *level[RCU_NUM_LVLS + 1];
343						/* Hierarchy levels (+1 to */
344						/*  shut bogus gcc warning) */
345	int ncpus;				/* # CPUs seen so far. */
346	int n_online_cpus;			/* # CPUs online for RCU. */
347
348	/* The following fields are guarded by the root rcu_node's lock. */
349
350	unsigned long gp_seq ____cacheline_internodealigned_in_smp;
351						/* Grace-period sequence #. */
352	unsigned long gp_max;			/* Maximum GP duration in */
353						/*  jiffies. */
354	struct task_struct *gp_kthread;		/* Task for grace periods. */
355	struct swait_queue_head gp_wq;		/* Where GP task waits. */
356	short gp_flags;				/* Commands for GP task. */
357	short gp_state;				/* GP kthread sleep state. */
358	unsigned long gp_wake_time;		/* Last GP kthread wake. */
359	unsigned long gp_wake_seq;		/* ->gp_seq at ^^^. */
360	unsigned long gp_seq_polled;		/* GP seq for polled API. */
361	unsigned long gp_seq_polled_snap;	/* ->gp_seq_polled at normal GP start. */
362	unsigned long gp_seq_polled_exp_snap;	/* ->gp_seq_polled at expedited GP start. */
363
364	/* End of fields guarded by root rcu_node's lock. */
365
366	struct mutex barrier_mutex;		/* Guards barrier fields. */
367	atomic_t barrier_cpu_count;		/* # CPUs waiting on. */
368	struct completion barrier_completion;	/* Wake at barrier end. */
369	unsigned long barrier_sequence;		/* ++ at start and end of */
370						/*  rcu_barrier(). */
371	/* End of fields guarded by barrier_mutex. */
372
373	raw_spinlock_t barrier_lock;		/* Protects ->barrier_seq_snap. */
374
375	struct mutex exp_mutex;			/* Serialize expedited GP. */
376	struct mutex exp_wake_mutex;		/* Serialize wakeup. */
377	unsigned long expedited_sequence;	/* Take a ticket. */
378	atomic_t expedited_need_qs;		/* # CPUs left to check in. */
379	struct swait_queue_head expedited_wq;	/* Wait for check-ins. */
380	int ncpus_snap;				/* # CPUs seen last time. */
381	u8 cbovld;				/* Callback overload now? */
382	u8 cbovldnext;				/* ^        ^  next time? */
383
384	unsigned long jiffies_force_qs;		/* Time at which to invoke */
385						/*  force_quiescent_state(). */
386	unsigned long jiffies_kick_kthreads;	/* Time at which to kick */
387						/*  kthreads, if configured. */
388	unsigned long n_force_qs;		/* Number of calls to */
389						/*  force_quiescent_state(). */
390	unsigned long gp_start;			/* Time at which GP started, */
391						/*  but in jiffies. */
392	unsigned long gp_end;			/* Time last GP ended, again */
393						/*  in jiffies. */
394	unsigned long gp_activity;		/* Time of last GP kthread */
395						/*  activity in jiffies. */
396	unsigned long gp_req_activity;		/* Time of last GP request */
397						/*  in jiffies. */
398	unsigned long jiffies_stall;		/* Time at which to check */
399						/*  for CPU stalls. */
400	int nr_fqs_jiffies_stall;		/* Number of fqs loops after
401						 * which read jiffies and set
402						 * jiffies_stall. Stall
403						 * warnings disabled if !0. */
404	unsigned long jiffies_resched;		/* Time at which to resched */
405						/*  a reluctant CPU. */
406	unsigned long n_force_qs_gpstart;	/* Snapshot of n_force_qs at */
407						/*  GP start. */
 
 
408	const char *name;			/* Name of structure. */
409	char abbr;				/* Abbreviated name. */
410
411	arch_spinlock_t ofl_lock ____cacheline_internodealigned_in_smp;
412						/* Synchronize offline with */
413						/*  GP pre-initialization. */
414
415	/* synchronize_rcu() part. */
416	struct llist_head srs_next;	/* request a GP users. */
417	struct llist_node *srs_wait_tail; /* wait for GP users. */
418	struct llist_node *srs_done_tail; /* ready for GP users. */
419	struct sr_wait_node srs_wait_nodes[SR_NORMAL_GP_WAIT_HEAD_MAX];
420	struct work_struct srs_cleanup_work;
421	atomic_t srs_cleanups_pending; /* srs inflight worker cleanups. */
422
423#ifdef CONFIG_RCU_NOCB_CPU
424	struct mutex nocb_mutex;		/* Guards (de-)offloading */
425	int nocb_is_setup;			/* nocb is setup from boot */
426#endif
427};
428
429/* Values for rcu_state structure's gp_flags field. */
430#define RCU_GP_FLAG_INIT 0x1	/* Need grace-period initialization. */
431#define RCU_GP_FLAG_FQS  0x2	/* Need grace-period quiescent-state forcing. */
432#define RCU_GP_FLAG_OVLD 0x4	/* Experiencing callback overload. */
433
434/* Values for rcu_state structure's gp_state field. */
435#define RCU_GP_IDLE	 0	/* Initial state and no GP in progress. */
436#define RCU_GP_WAIT_GPS  1	/* Wait for grace-period start. */
437#define RCU_GP_DONE_GPS  2	/* Wait done for grace-period start. */
438#define RCU_GP_ONOFF     3	/* Grace-period initialization hotplug. */
439#define RCU_GP_INIT      4	/* Grace-period initialization. */
440#define RCU_GP_WAIT_FQS  5	/* Wait for force-quiescent-state time. */
441#define RCU_GP_DOING_FQS 6	/* Wait done for force-quiescent-state time. */
442#define RCU_GP_CLEANUP   7	/* Grace-period cleanup started. */
443#define RCU_GP_CLEANED   8	/* Grace-period cleanup complete. */
444
 
 
 
 
 
 
 
 
 
 
 
 
445/*
446 * In order to export the rcu_state name to the tracing tools, it
447 * needs to be added in the __tracepoint_string section.
448 * This requires defining a separate variable tp_<sname>_varname
449 * that points to the string being used, and this will allow
450 * the tracing userspace tools to be able to decipher the string
451 * address to the matching string.
452 */
453#ifdef CONFIG_PREEMPT_RCU
454#define RCU_ABBR 'p'
455#define RCU_NAME_RAW "rcu_preempt"
456#else /* #ifdef CONFIG_PREEMPT_RCU */
457#define RCU_ABBR 's'
458#define RCU_NAME_RAW "rcu_sched"
459#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
460#ifndef CONFIG_TRACING
461#define RCU_NAME RCU_NAME_RAW
462#else /* #ifdef CONFIG_TRACING */
463static char rcu_name[] = RCU_NAME_RAW;
464static const char *tp_rcu_varname __used __tracepoint_string = rcu_name;
465#define RCU_NAME rcu_name
466#endif /* #else #ifdef CONFIG_TRACING */
467
 
 
468/* Forward declarations for tree_plugin.h */
469static void rcu_bootup_announce(void);
470static void rcu_qs(void);
471static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
472#ifdef CONFIG_HOTPLUG_CPU
473static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
474#endif /* #ifdef CONFIG_HOTPLUG_CPU */
475static int rcu_print_task_exp_stall(struct rcu_node *rnp);
476static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
477static void rcu_flavor_sched_clock_irq(int user);
 
478static void dump_blkd_tasks(struct rcu_node *rnp, int ncheck);
479static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
480static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
481static bool rcu_is_callbacks_kthread(struct rcu_data *rdp);
482static void rcu_cpu_kthread_setup(unsigned int cpu);
483static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp);
 
 
 
484static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
485static bool rcu_preempt_need_deferred_qs(struct task_struct *t);
 
486static void zero_cpu_stall_ticks(struct rcu_data *rdp);
487static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp);
488static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq);
489static void rcu_init_one_nocb(struct rcu_node *rnp);
490static bool wake_nocb_gp(struct rcu_data *rdp, bool force);
491static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
492				  unsigned long j, bool lazy);
493static void call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *head,
494			  rcu_callback_t func, unsigned long flags, bool lazy);
495static void __maybe_unused __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_empty,
496						unsigned long flags);
497static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp, int level);
498static bool do_nocb_deferred_wakeup(struct rcu_data *rdp);
499static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp);
500static void rcu_spawn_cpu_nocb_kthread(int cpu);
 
501static void show_rcu_nocb_state(struct rcu_data *rdp);
502static void rcu_nocb_lock(struct rcu_data *rdp);
503static void rcu_nocb_unlock(struct rcu_data *rdp);
504static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp,
505				       unsigned long flags);
506static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp);
507#ifdef CONFIG_RCU_NOCB_CPU
508static void __init rcu_organize_nocb_kthreads(void);
509
510/*
511 * Disable IRQs before checking offloaded state so that local
512 * locking is safe against concurrent de-offloading.
513 */
514#define rcu_nocb_lock_irqsave(rdp, flags)			\
515do {								\
516	local_irq_save(flags);					\
517	if (rcu_segcblist_is_offloaded(&(rdp)->cblist))	\
518		raw_spin_lock(&(rdp)->nocb_lock);		\
519} while (0)
520#else /* #ifdef CONFIG_RCU_NOCB_CPU */
521#define rcu_nocb_lock_irqsave(rdp, flags) local_irq_save(flags)
522#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
523
524static void rcu_bind_gp_kthread(void);
525static bool rcu_nohz_full_cpu(void);
 
 
526
527/* Forward declarations for tree_stall.h */
528static void record_gp_stall_check_time(void);
529static void rcu_iw_handler(struct irq_work *iwp);
530static void check_cpu_stall(struct rcu_data *rdp);
531static void rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp,
532				     const unsigned long gpssdelay);
533
534/* Forward declarations for tree_exp.h. */
535static void sync_rcu_do_polled_gp(struct work_struct *wp);