Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.17.
  1/*
  2 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
  3 * Internal non-public definitions.
  4 *
  5 * This program is free software; you can redistribute it and/or modify
  6 * it under the terms of the GNU General Public License as published by
  7 * the Free Software Foundation; either version 2 of the License, or
  8 * (at your option) any later version.
  9 *
 10 * This program is distributed in the hope that it will be useful,
 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 13 * GNU General Public License for more details.
 14 *
 15 * You should have received a copy of the GNU General Public License
 16 * along with this program; if not, write to the Free Software
 17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 18 *
 19 * Copyright IBM Corporation, 2008
 20 *
 21 * Author: Ingo Molnar <mingo@elte.hu>
 22 *	   Paul E. McKenney <paulmck@linux.vnet.ibm.com>
 23 */
 24
 25#include <linux/cache.h>
 26#include <linux/spinlock.h>
 27#include <linux/threads.h>
 28#include <linux/cpumask.h>
 29#include <linux/seqlock.h>
 30
 31/*
 32 * Define shape of hierarchy based on NR_CPUS and CONFIG_RCU_FANOUT.
 33 * In theory, it should be possible to add more levels straightforwardly.
 34 * In practice, this did work well going from three levels to four.
 35 * Of course, your mileage may vary.
 36 */
 37#define MAX_RCU_LVLS 4
 38#if CONFIG_RCU_FANOUT > 16
 39#define RCU_FANOUT_LEAF       16
 40#else /* #if CONFIG_RCU_FANOUT > 16 */
 41#define RCU_FANOUT_LEAF       (CONFIG_RCU_FANOUT)
 42#endif /* #else #if CONFIG_RCU_FANOUT > 16 */
 43#define RCU_FANOUT_1	      (RCU_FANOUT_LEAF)
 44#define RCU_FANOUT_2	      (RCU_FANOUT_1 * CONFIG_RCU_FANOUT)
 45#define RCU_FANOUT_3	      (RCU_FANOUT_2 * CONFIG_RCU_FANOUT)
 46#define RCU_FANOUT_4	      (RCU_FANOUT_3 * CONFIG_RCU_FANOUT)
 47
 48#if NR_CPUS <= RCU_FANOUT_1
 49#  define NUM_RCU_LVLS	      1
 50#  define NUM_RCU_LVL_0	      1
 51#  define NUM_RCU_LVL_1	      (NR_CPUS)
 52#  define NUM_RCU_LVL_2	      0
 53#  define NUM_RCU_LVL_3	      0
 54#  define NUM_RCU_LVL_4	      0
 55#elif NR_CPUS <= RCU_FANOUT_2
 56#  define NUM_RCU_LVLS	      2
 57#  define NUM_RCU_LVL_0	      1
 58#  define NUM_RCU_LVL_1	      DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
 59#  define NUM_RCU_LVL_2	      (NR_CPUS)
 60#  define NUM_RCU_LVL_3	      0
 61#  define NUM_RCU_LVL_4	      0
 62#elif NR_CPUS <= RCU_FANOUT_3
 63#  define NUM_RCU_LVLS	      3
 64#  define NUM_RCU_LVL_0	      1
 65#  define NUM_RCU_LVL_1	      DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2)
 66#  define NUM_RCU_LVL_2	      DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
 67#  define NUM_RCU_LVL_3	      (NR_CPUS)
 68#  define NUM_RCU_LVL_4	      0
 69#elif NR_CPUS <= RCU_FANOUT_4
 70#  define NUM_RCU_LVLS	      4
 71#  define NUM_RCU_LVL_0	      1
 72#  define NUM_RCU_LVL_1	      DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_3)
 73#  define NUM_RCU_LVL_2	      DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2)
 74#  define NUM_RCU_LVL_3	      DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
 75#  define NUM_RCU_LVL_4	      (NR_CPUS)
 76#else
 77# error "CONFIG_RCU_FANOUT insufficient for NR_CPUS"
 78#endif /* #if (NR_CPUS) <= RCU_FANOUT_1 */
 79
 80#define RCU_SUM (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3 + NUM_RCU_LVL_4)
 81#define NUM_RCU_NODES (RCU_SUM - NR_CPUS)
 82
 83/*
 84 * Dynticks per-CPU state.
 85 */
 86struct rcu_dynticks {
 87	int dynticks_nesting;	/* Track irq/process nesting level. */
 88	int dynticks_nmi_nesting; /* Track NMI nesting level. */
 89	atomic_t dynticks;	/* Even value for dynticks-idle, else odd. */
 90};
 91
 92/* RCU's kthread states for tracing. */
 93#define RCU_KTHREAD_STOPPED  0
 94#define RCU_KTHREAD_RUNNING  1
 95#define RCU_KTHREAD_WAITING  2
 96#define RCU_KTHREAD_OFFCPU   3
 97#define RCU_KTHREAD_YIELDING 4
 98#define RCU_KTHREAD_MAX      4
 99
100/*
101 * Definition for node within the RCU grace-period-detection hierarchy.
102 */
103struct rcu_node {
104	raw_spinlock_t lock;	/* Root rcu_node's lock protects some */
105				/*  rcu_state fields as well as following. */
106	unsigned long gpnum;	/* Current grace period for this node. */
107				/*  This will either be equal to or one */
108				/*  behind the root rcu_node's gpnum. */
109	unsigned long completed; /* Last GP completed for this node. */
110				/*  This will either be equal to or one */
111				/*  behind the root rcu_node's gpnum. */
112	unsigned long qsmask;	/* CPUs or groups that need to switch in */
113				/*  order for current grace period to proceed.*/
114				/*  In leaf rcu_node, each bit corresponds to */
115				/*  an rcu_data structure, otherwise, each */
116				/*  bit corresponds to a child rcu_node */
117				/*  structure. */
118	unsigned long expmask;	/* Groups that have ->blkd_tasks */
119				/*  elements that need to drain to allow the */
120				/*  current expedited grace period to */
121				/*  complete (only for TREE_PREEMPT_RCU). */
122	atomic_t wakemask;	/* CPUs whose kthread needs to be awakened. */
123				/*  Since this has meaning only for leaf */
124				/*  rcu_node structures, 32 bits suffices. */
125	unsigned long qsmaskinit;
126				/* Per-GP initial value for qsmask & expmask. */
127	unsigned long grpmask;	/* Mask to apply to parent qsmask. */
128				/*  Only one bit will be set in this mask. */
129	int	grplo;		/* lowest-numbered CPU or group here. */
130	int	grphi;		/* highest-numbered CPU or group here. */
131	u8	grpnum;		/* CPU/group number for next level up. */
132	u8	level;		/* root is at level 0. */
133	struct rcu_node *parent;
134	struct list_head blkd_tasks;
135				/* Tasks blocked in RCU read-side critical */
136				/*  section.  Tasks are placed at the head */
137				/*  of this list and age towards the tail. */
138	struct list_head *gp_tasks;
139				/* Pointer to the first task blocking the */
140				/*  current grace period, or NULL if there */
141				/*  is no such task. */
142	struct list_head *exp_tasks;
143				/* Pointer to the first task blocking the */
144				/*  current expedited grace period, or NULL */
145				/*  if there is no such task.  If there */
146				/*  is no current expedited grace period, */
147				/*  then there can cannot be any such task. */
148#ifdef CONFIG_RCU_BOOST
149	struct list_head *boost_tasks;
150				/* Pointer to first task that needs to be */
151				/*  priority boosted, or NULL if no priority */
152				/*  boosting is needed for this rcu_node */
153				/*  structure.  If there are no tasks */
154				/*  queued on this rcu_node structure that */
155				/*  are blocking the current grace period, */
156				/*  there can be no such task. */
157	unsigned long boost_time;
158				/* When to start boosting (jiffies). */
159	struct task_struct *boost_kthread_task;
160				/* kthread that takes care of priority */
161				/*  boosting for this rcu_node structure. */
162	unsigned int boost_kthread_status;
163				/* State of boost_kthread_task for tracing. */
164	unsigned long n_tasks_boosted;
165				/* Total number of tasks boosted. */
166	unsigned long n_exp_boosts;
167				/* Number of tasks boosted for expedited GP. */
168	unsigned long n_normal_boosts;
169				/* Number of tasks boosted for normal GP. */
170	unsigned long n_balk_blkd_tasks;
171				/* Refused to boost: no blocked tasks. */
172	unsigned long n_balk_exp_gp_tasks;
173				/* Refused to boost: nothing blocking GP. */
174	unsigned long n_balk_boost_tasks;
175				/* Refused to boost: already boosting. */
176	unsigned long n_balk_notblocked;
177				/* Refused to boost: RCU RS CS still running. */
178	unsigned long n_balk_notyet;
179				/* Refused to boost: not yet time. */
180	unsigned long n_balk_nos;
181				/* Refused to boost: not sure why, though. */
182				/*  This can happen due to race conditions. */
183#endif /* #ifdef CONFIG_RCU_BOOST */
184	struct task_struct *node_kthread_task;
185				/* kthread that takes care of this rcu_node */
186				/*  structure, for example, awakening the */
187				/*  per-CPU kthreads as needed. */
188	unsigned int node_kthread_status;
189				/* State of node_kthread_task for tracing. */
190} ____cacheline_internodealigned_in_smp;
191
192/*
193 * Do a full breadth-first scan of the rcu_node structures for the
194 * specified rcu_state structure.
195 */
196#define rcu_for_each_node_breadth_first(rsp, rnp) \
197	for ((rnp) = &(rsp)->node[0]; \
198	     (rnp) < &(rsp)->node[NUM_RCU_NODES]; (rnp)++)
199
200/*
201 * Do a breadth-first scan of the non-leaf rcu_node structures for the
202 * specified rcu_state structure.  Note that if there is a singleton
203 * rcu_node tree with but one rcu_node structure, this loop is a no-op.
204 */
205#define rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) \
206	for ((rnp) = &(rsp)->node[0]; \
207	     (rnp) < (rsp)->level[NUM_RCU_LVLS - 1]; (rnp)++)
208
209/*
210 * Scan the leaves of the rcu_node hierarchy for the specified rcu_state
211 * structure.  Note that if there is a singleton rcu_node tree with but
212 * one rcu_node structure, this loop -will- visit the rcu_node structure.
213 * It is still a leaf node, even if it is also the root node.
214 */
215#define rcu_for_each_leaf_node(rsp, rnp) \
216	for ((rnp) = (rsp)->level[NUM_RCU_LVLS - 1]; \
217	     (rnp) < &(rsp)->node[NUM_RCU_NODES]; (rnp)++)
218
219/* Index values for nxttail array in struct rcu_data. */
220#define RCU_DONE_TAIL		0	/* Also RCU_WAIT head. */
221#define RCU_WAIT_TAIL		1	/* Also RCU_NEXT_READY head. */
222#define RCU_NEXT_READY_TAIL	2	/* Also RCU_NEXT head. */
223#define RCU_NEXT_TAIL		3
224#define RCU_NEXT_SIZE		4
225
226/* Per-CPU data for read-copy update. */
227struct rcu_data {
228	/* 1) quiescent-state and grace-period handling : */
229	unsigned long	completed;	/* Track rsp->completed gp number */
230					/*  in order to detect GP end. */
231	unsigned long	gpnum;		/* Highest gp number that this CPU */
232					/*  is aware of having started. */
233	unsigned long	passed_quiesc_completed;
234					/* Value of completed at time of qs. */
235	bool		passed_quiesc;	/* User-mode/idle loop etc. */
236	bool		qs_pending;	/* Core waits for quiesc state. */
237	bool		beenonline;	/* CPU online at least once. */
238	bool		preemptible;	/* Preemptible RCU? */
239	struct rcu_node *mynode;	/* This CPU's leaf of hierarchy */
240	unsigned long grpmask;		/* Mask to apply to leaf qsmask. */
241
242	/* 2) batch handling */
243	/*
244	 * If nxtlist is not NULL, it is partitioned as follows.
245	 * Any of the partitions might be empty, in which case the
246	 * pointer to that partition will be equal to the pointer for
247	 * the following partition.  When the list is empty, all of
248	 * the nxttail elements point to the ->nxtlist pointer itself,
249	 * which in that case is NULL.
250	 *
251	 * [nxtlist, *nxttail[RCU_DONE_TAIL]):
252	 *	Entries that batch # <= ->completed
253	 *	The grace period for these entries has completed, and
254	 *	the other grace-period-completed entries may be moved
255	 *	here temporarily in rcu_process_callbacks().
256	 * [*nxttail[RCU_DONE_TAIL], *nxttail[RCU_WAIT_TAIL]):
257	 *	Entries that batch # <= ->completed - 1: waiting for current GP
258	 * [*nxttail[RCU_WAIT_TAIL], *nxttail[RCU_NEXT_READY_TAIL]):
259	 *	Entries known to have arrived before current GP ended
260	 * [*nxttail[RCU_NEXT_READY_TAIL], *nxttail[RCU_NEXT_TAIL]):
261	 *	Entries that might have arrived after current GP ended
262	 *	Note that the value of *nxttail[RCU_NEXT_TAIL] will
263	 *	always be NULL, as this is the end of the list.
264	 */
265	struct rcu_head *nxtlist;
266	struct rcu_head **nxttail[RCU_NEXT_SIZE];
267	long		qlen;		/* # of queued callbacks */
268	long		qlen_last_fqs_check;
269					/* qlen at last check for QS forcing */
270	unsigned long	n_cbs_invoked;	/* count of RCU cbs invoked. */
271	unsigned long   n_cbs_orphaned; /* RCU cbs orphaned by dying CPU */
272	unsigned long   n_cbs_adopted;  /* RCU cbs adopted from dying CPU */
273	unsigned long	n_force_qs_snap;
274					/* did other CPU force QS recently? */
275	long		blimit;		/* Upper limit on a processed batch */
276
277#ifdef CONFIG_NO_HZ
278	/* 3) dynticks interface. */
279	struct rcu_dynticks *dynticks;	/* Shared per-CPU dynticks state. */
280	int dynticks_snap;		/* Per-GP tracking for dynticks. */
281#endif /* #ifdef CONFIG_NO_HZ */
282
283	/* 4) reasons this CPU needed to be kicked by force_quiescent_state */
284#ifdef CONFIG_NO_HZ
285	unsigned long dynticks_fqs;	/* Kicked due to dynticks idle. */
286#endif /* #ifdef CONFIG_NO_HZ */
287	unsigned long offline_fqs;	/* Kicked due to being offline. */
288	unsigned long resched_ipi;	/* Sent a resched IPI. */
289
290	/* 5) __rcu_pending() statistics. */
291	unsigned long n_rcu_pending;	/* rcu_pending() calls since boot. */
292	unsigned long n_rp_qs_pending;
293	unsigned long n_rp_report_qs;
294	unsigned long n_rp_cb_ready;
295	unsigned long n_rp_cpu_needs_gp;
296	unsigned long n_rp_gp_completed;
297	unsigned long n_rp_gp_started;
298	unsigned long n_rp_need_fqs;
299	unsigned long n_rp_need_nothing;
300
301	int cpu;
302};
303
304/* Values for signaled field in struct rcu_state. */
305#define RCU_GP_IDLE		0	/* No grace period in progress. */
306#define RCU_GP_INIT		1	/* Grace period being initialized. */
307#define RCU_SAVE_DYNTICK	2	/* Need to scan dyntick state. */
308#define RCU_FORCE_QS		3	/* Need to force quiescent state. */
309#ifdef CONFIG_NO_HZ
310#define RCU_SIGNAL_INIT		RCU_SAVE_DYNTICK
311#else /* #ifdef CONFIG_NO_HZ */
312#define RCU_SIGNAL_INIT		RCU_FORCE_QS
313#endif /* #else #ifdef CONFIG_NO_HZ */
314
315#define RCU_JIFFIES_TILL_FORCE_QS	 3	/* for rsp->jiffies_force_qs */
316
317#ifdef CONFIG_PROVE_RCU
318#define RCU_STALL_DELAY_DELTA	       (5 * HZ)
319#else
320#define RCU_STALL_DELAY_DELTA	       0
321#endif
322
323#define RCU_SECONDS_TILL_STALL_CHECK   (CONFIG_RCU_CPU_STALL_TIMEOUT * HZ + \
324					RCU_STALL_DELAY_DELTA)
325						/* for rsp->jiffies_stall */
326#define RCU_SECONDS_TILL_STALL_RECHECK (3 * RCU_SECONDS_TILL_STALL_CHECK + 30)
327						/* for rsp->jiffies_stall */
328#define RCU_STALL_RAT_DELAY		2	/* Allow other CPUs time */
329						/*  to take at least one */
330						/*  scheduling clock irq */
331						/*  before ratting on them. */
332
333#define rcu_wait(cond)							\
334do {									\
335	for (;;) {							\
336		set_current_state(TASK_INTERRUPTIBLE);			\
337		if (cond)						\
338			break;						\
339		schedule();						\
340	}								\
341	__set_current_state(TASK_RUNNING);				\
342} while (0)
343
344/*
345 * RCU global state, including node hierarchy.  This hierarchy is
346 * represented in "heap" form in a dense array.  The root (first level)
347 * of the hierarchy is in ->node[0] (referenced by ->level[0]), the second
348 * level in ->node[1] through ->node[m] (->node[1] referenced by ->level[1]),
349 * and the third level in ->node[m+1] and following (->node[m+1] referenced
350 * by ->level[2]).  The number of levels is determined by the number of
351 * CPUs and by CONFIG_RCU_FANOUT.  Small systems will have a "hierarchy"
352 * consisting of a single rcu_node.
353 */
354struct rcu_state {
355	struct rcu_node node[NUM_RCU_NODES];	/* Hierarchy. */
356	struct rcu_node *level[NUM_RCU_LVLS];	/* Hierarchy levels. */
357	u32 levelcnt[MAX_RCU_LVLS + 1];		/* # nodes in each level. */
358	u8 levelspread[NUM_RCU_LVLS];		/* kids/node in each level. */
359	struct rcu_data __percpu *rda;		/* pointer of percu rcu_data. */
360
361	/* The following fields are guarded by the root rcu_node's lock. */
362
363	u8	signaled ____cacheline_internodealigned_in_smp;
364						/* Force QS state. */
365	u8	fqs_active;			/* force_quiescent_state() */
366						/*  is running. */
367	u8	fqs_need_gp;			/* A CPU was prevented from */
368						/*  starting a new grace */
369						/*  period because */
370						/*  force_quiescent_state() */
371						/*  was running. */
372	u8	boost;				/* Subject to priority boost. */
373	unsigned long gpnum;			/* Current gp number. */
374	unsigned long completed;		/* # of last completed gp. */
375
376	/* End of fields guarded by root rcu_node's lock. */
377
378	raw_spinlock_t onofflock;		/* exclude on/offline and */
379						/*  starting new GP. */
380	raw_spinlock_t fqslock;			/* Only one task forcing */
381						/*  quiescent states. */
382	unsigned long jiffies_force_qs;		/* Time at which to invoke */
383						/*  force_quiescent_state(). */
384	unsigned long n_force_qs;		/* Number of calls to */
385						/*  force_quiescent_state(). */
386	unsigned long n_force_qs_lh;		/* ~Number of calls leaving */
387						/*  due to lock unavailable. */
388	unsigned long n_force_qs_ngp;		/* Number of calls leaving */
389						/*  due to no GP active. */
390	unsigned long gp_start;			/* Time at which GP started, */
391						/*  but in jiffies. */
392	unsigned long jiffies_stall;		/* Time at which to check */
393						/*  for CPU stalls. */
394	unsigned long gp_max;			/* Maximum GP duration in */
395						/*  jiffies. */
396	char *name;				/* Name of structure. */
397};
398
399/* Return values for rcu_preempt_offline_tasks(). */
400
401#define RCU_OFL_TASKS_NORM_GP	0x1		/* Tasks blocking normal */
402						/*  GP were moved to root. */
403#define RCU_OFL_TASKS_EXP_GP	0x2		/* Tasks blocking expedited */
404						/*  GP were moved to root. */
405
406/*
407 * RCU implementation internal declarations:
408 */
409extern struct rcu_state rcu_sched_state;
410DECLARE_PER_CPU(struct rcu_data, rcu_sched_data);
411
412extern struct rcu_state rcu_bh_state;
413DECLARE_PER_CPU(struct rcu_data, rcu_bh_data);
414
415#ifdef CONFIG_TREE_PREEMPT_RCU
416extern struct rcu_state rcu_preempt_state;
417DECLARE_PER_CPU(struct rcu_data, rcu_preempt_data);
418#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
419
420#ifndef RCU_TREE_NONCORE
421
422/* Forward declarations for rcutree_plugin.h */
423static void rcu_bootup_announce(void);
424long rcu_batches_completed(void);
425static void rcu_preempt_note_context_switch(int cpu);
426static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
427#ifdef CONFIG_HOTPLUG_CPU
428static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp,
429				      unsigned long flags);
430static void rcu_stop_cpu_kthread(int cpu);
431#endif /* #ifdef CONFIG_HOTPLUG_CPU */
432static void rcu_print_detail_task_stall(struct rcu_state *rsp);
433static void rcu_print_task_stall(struct rcu_node *rnp);
434static void rcu_preempt_stall_reset(void);
435static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
436#ifdef CONFIG_HOTPLUG_CPU
437static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
438				     struct rcu_node *rnp,
439				     struct rcu_data *rdp);
440static void rcu_preempt_offline_cpu(int cpu);
441#endif /* #ifdef CONFIG_HOTPLUG_CPU */
442static void rcu_preempt_check_callbacks(int cpu);
443static void rcu_preempt_process_callbacks(void);
444void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
445#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU)
446static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp);
447#endif /* #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU) */
448static int rcu_preempt_pending(int cpu);
449static int rcu_preempt_needs_cpu(int cpu);
450static void __cpuinit rcu_preempt_init_percpu_data(int cpu);
451static void rcu_preempt_send_cbs_to_online(void);
452static void __init __rcu_init_preempt(void);
453static void rcu_needs_cpu_flush(void);
454static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
455static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
456static void invoke_rcu_callbacks_kthread(void);
457#ifdef CONFIG_RCU_BOOST
458static void rcu_preempt_do_callbacks(void);
459static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp,
460					  cpumask_var_t cm);
461static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
462						 struct rcu_node *rnp,
463						 int rnp_index);
464static void invoke_rcu_node_kthread(struct rcu_node *rnp);
465static void rcu_yield(void (*f)(unsigned long), unsigned long arg);
466#endif /* #ifdef CONFIG_RCU_BOOST */
467static void rcu_cpu_kthread_setrt(int cpu, int to_rt);
468static void __cpuinit rcu_prepare_kthreads(int cpu);
469
470#endif /* #ifndef RCU_TREE_NONCORE */