Linux Audio

Check our new training course

Loading...
v4.6
 
  1/*
  2 * Read-Copy Update definitions shared among RCU implementations.
  3 *
  4 * This program is free software; you can redistribute it and/or modify
  5 * it under the terms of the GNU General Public License as published by
  6 * the Free Software Foundation; either version 2 of the License, or
  7 * (at your option) any later version.
  8 *
  9 * This program is distributed in the hope that it will be useful,
 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 12 * GNU General Public License for more details.
 13 *
 14 * You should have received a copy of the GNU General Public License
 15 * along with this program; if not, you can access it online at
 16 * http://www.gnu.org/licenses/gpl-2.0.html.
 17 *
 18 * Copyright IBM Corporation, 2011
 19 *
 20 * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
 21 */
 22
 23#ifndef __LINUX_RCU_H
 24#define __LINUX_RCU_H
 25
 26#include <trace/events/rcu.h>
 27#ifdef CONFIG_RCU_TRACE
 28#define RCU_TRACE(stmt) stmt
 29#else /* #ifdef CONFIG_RCU_TRACE */
 30#define RCU_TRACE(stmt)
 31#endif /* #else #ifdef CONFIG_RCU_TRACE */
 32
 33/*
 34 * Process-level increment to ->dynticks_nesting field.  This allows for
 35 * architectures that use half-interrupts and half-exceptions from
 36 * process context.
 37 *
 38 * DYNTICK_TASK_NEST_MASK defines a field of width DYNTICK_TASK_NEST_WIDTH
 39 * that counts the number of process-based reasons why RCU cannot
 40 * consider the corresponding CPU to be idle, and DYNTICK_TASK_NEST_VALUE
 41 * is the value used to increment or decrement this field.
 42 *
 43 * The rest of the bits could in principle be used to count interrupts,
 44 * but this would mean that a negative-one value in the interrupt
 45 * field could incorrectly zero out the DYNTICK_TASK_NEST_MASK field.
 46 * We therefore provide a two-bit guard field defined by DYNTICK_TASK_MASK
 47 * that is set to DYNTICK_TASK_FLAG upon initial exit from idle.
 48 * The DYNTICK_TASK_EXIT_IDLE value is thus the combined value used upon
 49 * initial exit from idle.
 50 */
 51#define DYNTICK_TASK_NEST_WIDTH 7
 52#define DYNTICK_TASK_NEST_VALUE ((LLONG_MAX >> DYNTICK_TASK_NEST_WIDTH) + 1)
 53#define DYNTICK_TASK_NEST_MASK  (LLONG_MAX - DYNTICK_TASK_NEST_VALUE + 1)
 54#define DYNTICK_TASK_FLAG	   ((DYNTICK_TASK_NEST_VALUE / 8) * 2)
 55#define DYNTICK_TASK_MASK	   ((DYNTICK_TASK_NEST_VALUE / 8) * 3)
 56#define DYNTICK_TASK_EXIT_IDLE	   (DYNTICK_TASK_NEST_VALUE + \
 57				    DYNTICK_TASK_FLAG)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 58
 59/*
 60 * debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally
 61 * by call_rcu() and rcu callback execution, and are therefore not part of the
 62 * RCU API. Leaving in rcupdate.h because they are used by all RCU flavors.
 
 63 */
 64
 65#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
 66# define STATE_RCU_HEAD_READY	0
 67# define STATE_RCU_HEAD_QUEUED	1
 68
 69extern struct debug_obj_descr rcuhead_debug_descr;
 70
 71static inline int debug_rcu_head_queue(struct rcu_head *head)
 72{
 73	int r1;
 74
 75	r1 = debug_object_activate(head, &rcuhead_debug_descr);
 76	debug_object_active_state(head, &rcuhead_debug_descr,
 77				  STATE_RCU_HEAD_READY,
 78				  STATE_RCU_HEAD_QUEUED);
 79	return r1;
 80}
 81
 82static inline void debug_rcu_head_unqueue(struct rcu_head *head)
 83{
 84	debug_object_active_state(head, &rcuhead_debug_descr,
 85				  STATE_RCU_HEAD_QUEUED,
 86				  STATE_RCU_HEAD_READY);
 87	debug_object_deactivate(head, &rcuhead_debug_descr);
 88}
 89#else	/* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
 90static inline int debug_rcu_head_queue(struct rcu_head *head)
 91{
 92	return 0;
 93}
 94
 95static inline void debug_rcu_head_unqueue(struct rcu_head *head)
 96{
 97}
 98#endif	/* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
 99
100void kfree(const void *);
101
102/*
103 * Reclaim the specified callback, either by invoking it (non-lazy case)
104 * or freeing it directly (lazy case).  Return true if lazy, false otherwise.
105 */
106static inline bool __rcu_reclaim(const char *rn, struct rcu_head *head)
107{
108	unsigned long offset = (unsigned long)head->func;
109
110	rcu_lock_acquire(&rcu_callback_map);
111	if (__is_kfree_rcu_offset(offset)) {
112		RCU_TRACE(trace_rcu_invoke_kfree_callback(rn, head, offset));
113		kfree((void *)head - offset);
114		rcu_lock_release(&rcu_callback_map);
115		return true;
116	} else {
117		RCU_TRACE(trace_rcu_invoke_callback(rn, head));
118		head->func(head);
119		rcu_lock_release(&rcu_callback_map);
120		return false;
121	}
122}
123
124#ifdef CONFIG_RCU_STALL_COMMON
125
 
126extern int rcu_cpu_stall_suppress;
 
 
127int rcu_jiffies_till_stall_check(void);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
128
 
 
 
 
 
 
 
 
129#endif /* #ifdef CONFIG_RCU_STALL_COMMON */
130
131/*
132 * Strings used in tracepoints need to be exported via the
133 * tracing system such that tools like perf and trace-cmd can
134 * translate the string address pointers to actual text.
135 */
136#define TPS(x)  tracepoint_string(x)
137
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
138void rcu_early_boot_tests(void);
 
139
140/*
141 * This function really isn't for public consumption, but RCU is special in
142 * that context switches can allow the state machine to make progress.
143 */
144extern void resched_cpu(int cpu);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
145
146#endif /* __LINUX_RCU_H */
v6.2
  1/* SPDX-License-Identifier: GPL-2.0+ */
  2/*
  3 * Read-Copy Update definitions shared among RCU implementations.
  4 *
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  5 * Copyright IBM Corporation, 2011
  6 *
  7 * Author: Paul E. McKenney <paulmck@linux.ibm.com>
  8 */
  9
 10#ifndef __LINUX_RCU_H
 11#define __LINUX_RCU_H
 12
 13#include <trace/events/rcu.h>
 14
 15/*
 16 * Grace-period counter management.
 17 */
 18
 19#define RCU_SEQ_CTR_SHIFT	2
 20#define RCU_SEQ_STATE_MASK	((1 << RCU_SEQ_CTR_SHIFT) - 1)
 21
 22/* Low-order bit definition for polled grace-period APIs. */
 23#define RCU_GET_STATE_COMPLETED	0x1
 24
 25extern int sysctl_sched_rt_runtime;
 26
 27/*
 28 * Return the counter portion of a sequence number previously returned
 29 * by rcu_seq_snap() or rcu_seq_current().
 30 */
 31static inline unsigned long rcu_seq_ctr(unsigned long s)
 32{
 33	return s >> RCU_SEQ_CTR_SHIFT;
 34}
 35
 36/*
 37 * Return the state portion of a sequence number previously returned
 38 * by rcu_seq_snap() or rcu_seq_current().
 39 */
 40static inline int rcu_seq_state(unsigned long s)
 41{
 42	return s & RCU_SEQ_STATE_MASK;
 43}
 44
 45/*
 46 * Set the state portion of the pointed-to sequence number.
 47 * The caller is responsible for preventing conflicting updates.
 48 */
 49static inline void rcu_seq_set_state(unsigned long *sp, int newstate)
 50{
 51	WARN_ON_ONCE(newstate & ~RCU_SEQ_STATE_MASK);
 52	WRITE_ONCE(*sp, (*sp & ~RCU_SEQ_STATE_MASK) + newstate);
 53}
 54
 55/* Adjust sequence number for start of update-side operation. */
 56static inline void rcu_seq_start(unsigned long *sp)
 57{
 58	WRITE_ONCE(*sp, *sp + 1);
 59	smp_mb(); /* Ensure update-side operation after counter increment. */
 60	WARN_ON_ONCE(rcu_seq_state(*sp) != 1);
 61}
 62
 63/* Compute the end-of-grace-period value for the specified sequence number. */
 64static inline unsigned long rcu_seq_endval(unsigned long *sp)
 65{
 66	return (*sp | RCU_SEQ_STATE_MASK) + 1;
 67}
 68
 69/* Adjust sequence number for end of update-side operation. */
 70static inline void rcu_seq_end(unsigned long *sp)
 71{
 72	smp_mb(); /* Ensure update-side operation before counter increment. */
 73	WARN_ON_ONCE(!rcu_seq_state(*sp));
 74	WRITE_ONCE(*sp, rcu_seq_endval(sp));
 75}
 76
 77/*
 78 * rcu_seq_snap - Take a snapshot of the update side's sequence number.
 79 *
 80 * This function returns the earliest value of the grace-period sequence number
 81 * that will indicate that a full grace period has elapsed since the current
 82 * time.  Once the grace-period sequence number has reached this value, it will
 83 * be safe to invoke all callbacks that have been registered prior to the
 84 * current time. This value is the current grace-period number plus two to the
 85 * power of the number of low-order bits reserved for state, then rounded up to
 86 * the next value in which the state bits are all zero.
 87 */
 88static inline unsigned long rcu_seq_snap(unsigned long *sp)
 89{
 90	unsigned long s;
 91
 92	s = (READ_ONCE(*sp) + 2 * RCU_SEQ_STATE_MASK + 1) & ~RCU_SEQ_STATE_MASK;
 93	smp_mb(); /* Above access must not bleed into critical section. */
 94	return s;
 95}
 96
 97/* Return the current value the update side's sequence number, no ordering. */
 98static inline unsigned long rcu_seq_current(unsigned long *sp)
 99{
100	return READ_ONCE(*sp);
101}
102
103/*
104 * Given a snapshot from rcu_seq_snap(), determine whether or not the
105 * corresponding update-side operation has started.
106 */
107static inline bool rcu_seq_started(unsigned long *sp, unsigned long s)
108{
109	return ULONG_CMP_LT((s - 1) & ~RCU_SEQ_STATE_MASK, READ_ONCE(*sp));
110}
111
112/*
113 * Given a snapshot from rcu_seq_snap(), determine whether or not a
114 * full update-side operation has occurred.
115 */
116static inline bool rcu_seq_done(unsigned long *sp, unsigned long s)
117{
118	return ULONG_CMP_GE(READ_ONCE(*sp), s);
119}
120
121/*
122 * Given a snapshot from rcu_seq_snap(), determine whether or not a
123 * full update-side operation has occurred, but do not allow the
124 * (ULONG_MAX / 2) safety-factor/guard-band.
125 */
126static inline bool rcu_seq_done_exact(unsigned long *sp, unsigned long s)
127{
128	unsigned long cur_s = READ_ONCE(*sp);
129
130	return ULONG_CMP_GE(cur_s, s) || ULONG_CMP_LT(cur_s, s - (2 * RCU_SEQ_STATE_MASK + 1));
131}
132
133/*
134 * Has a grace period completed since the time the old gp_seq was collected?
135 */
136static inline bool rcu_seq_completed_gp(unsigned long old, unsigned long new)
137{
138	return ULONG_CMP_LT(old, new & ~RCU_SEQ_STATE_MASK);
139}
140
141/*
142 * Has a grace period started since the time the old gp_seq was collected?
143 */
144static inline bool rcu_seq_new_gp(unsigned long old, unsigned long new)
145{
146	return ULONG_CMP_LT((old + RCU_SEQ_STATE_MASK) & ~RCU_SEQ_STATE_MASK,
147			    new);
148}
149
150/*
151 * Roughly how many full grace periods have elapsed between the collection
152 * of the two specified grace periods?
153 */
154static inline unsigned long rcu_seq_diff(unsigned long new, unsigned long old)
155{
156	unsigned long rnd_diff;
157
158	if (old == new)
159		return 0;
160	/*
161	 * Compute the number of grace periods (still shifted up), plus
162	 * one if either of new and old is not an exact grace period.
163	 */
164	rnd_diff = (new & ~RCU_SEQ_STATE_MASK) -
165		   ((old + RCU_SEQ_STATE_MASK) & ~RCU_SEQ_STATE_MASK) +
166		   ((new & RCU_SEQ_STATE_MASK) || (old & RCU_SEQ_STATE_MASK));
167	if (ULONG_CMP_GE(RCU_SEQ_STATE_MASK, rnd_diff))
168		return 1; /* Definitely no grace period has elapsed. */
169	return ((rnd_diff - RCU_SEQ_STATE_MASK - 1) >> RCU_SEQ_CTR_SHIFT) + 2;
170}
171
172/*
173 * debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally
174 * by call_rcu() and rcu callback execution, and are therefore not part
175 * of the RCU API. These are in rcupdate.h because they are used by all
176 * RCU implementations.
177 */
178
179#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
180# define STATE_RCU_HEAD_READY	0
181# define STATE_RCU_HEAD_QUEUED	1
182
183extern const struct debug_obj_descr rcuhead_debug_descr;
184
185static inline int debug_rcu_head_queue(struct rcu_head *head)
186{
187	int r1;
188
189	r1 = debug_object_activate(head, &rcuhead_debug_descr);
190	debug_object_active_state(head, &rcuhead_debug_descr,
191				  STATE_RCU_HEAD_READY,
192				  STATE_RCU_HEAD_QUEUED);
193	return r1;
194}
195
196static inline void debug_rcu_head_unqueue(struct rcu_head *head)
197{
198	debug_object_active_state(head, &rcuhead_debug_descr,
199				  STATE_RCU_HEAD_QUEUED,
200				  STATE_RCU_HEAD_READY);
201	debug_object_deactivate(head, &rcuhead_debug_descr);
202}
203#else	/* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
204static inline int debug_rcu_head_queue(struct rcu_head *head)
205{
206	return 0;
207}
208
209static inline void debug_rcu_head_unqueue(struct rcu_head *head)
210{
211}
212#endif	/* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
213
214extern int rcu_cpu_stall_suppress_at_boot;
215
216static inline bool rcu_stall_is_suppressed_at_boot(void)
 
 
 
 
217{
218	return rcu_cpu_stall_suppress_at_boot && !rcu_inkernel_boot_has_ended();
 
 
 
 
 
 
 
 
 
 
 
 
 
219}
220
221#ifdef CONFIG_RCU_STALL_COMMON
222
223extern int rcu_cpu_stall_ftrace_dump;
224extern int rcu_cpu_stall_suppress;
225extern int rcu_cpu_stall_timeout;
226extern int rcu_exp_cpu_stall_timeout;
227int rcu_jiffies_till_stall_check(void);
228int rcu_exp_jiffies_till_stall_check(void);
229
230static inline bool rcu_stall_is_suppressed(void)
231{
232	return rcu_stall_is_suppressed_at_boot() || rcu_cpu_stall_suppress;
233}
234
235#define rcu_ftrace_dump_stall_suppress() \
236do { \
237	if (!rcu_cpu_stall_suppress) \
238		rcu_cpu_stall_suppress = 3; \
239} while (0)
240
241#define rcu_ftrace_dump_stall_unsuppress() \
242do { \
243	if (rcu_cpu_stall_suppress == 3) \
244		rcu_cpu_stall_suppress = 0; \
245} while (0)
246
247#else /* #endif #ifdef CONFIG_RCU_STALL_COMMON */
248
249static inline bool rcu_stall_is_suppressed(void)
250{
251	return rcu_stall_is_suppressed_at_boot();
252}
253#define rcu_ftrace_dump_stall_suppress()
254#define rcu_ftrace_dump_stall_unsuppress()
255#endif /* #ifdef CONFIG_RCU_STALL_COMMON */
256
257/*
258 * Strings used in tracepoints need to be exported via the
259 * tracing system such that tools like perf and trace-cmd can
260 * translate the string address pointers to actual text.
261 */
262#define TPS(x)  tracepoint_string(x)
263
264/*
265 * Dump the ftrace buffer, but only one time per callsite per boot.
266 */
267#define rcu_ftrace_dump(oops_dump_mode) \
268do { \
269	static atomic_t ___rfd_beenhere = ATOMIC_INIT(0); \
270	\
271	if (!atomic_read(&___rfd_beenhere) && \
272	    !atomic_xchg(&___rfd_beenhere, 1)) { \
273		tracing_off(); \
274		rcu_ftrace_dump_stall_suppress(); \
275		ftrace_dump(oops_dump_mode); \
276		rcu_ftrace_dump_stall_unsuppress(); \
277	} \
278} while (0)
279
280void rcu_early_boot_tests(void);
281void rcu_test_sync_prims(void);
282
283/*
284 * This function really isn't for public consumption, but RCU is special in
285 * that context switches can allow the state machine to make progress.
286 */
287extern void resched_cpu(int cpu);
288
289#if !defined(CONFIG_TINY_RCU)
290
291#include <linux/rcu_node_tree.h>
292
293extern int rcu_num_lvls;
294extern int num_rcu_lvl[];
295extern int rcu_num_nodes;
296static bool rcu_fanout_exact;
297static int rcu_fanout_leaf;
298
299/*
300 * Compute the per-level fanout, either using the exact fanout specified
301 * or balancing the tree, depending on the rcu_fanout_exact boot parameter.
302 */
303static inline void rcu_init_levelspread(int *levelspread, const int *levelcnt)
304{
305	int i;
306
307	for (i = 0; i < RCU_NUM_LVLS; i++)
308		levelspread[i] = INT_MIN;
309	if (rcu_fanout_exact) {
310		levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf;
311		for (i = rcu_num_lvls - 2; i >= 0; i--)
312			levelspread[i] = RCU_FANOUT;
313	} else {
314		int ccur;
315		int cprv;
316
317		cprv = nr_cpu_ids;
318		for (i = rcu_num_lvls - 1; i >= 0; i--) {
319			ccur = levelcnt[i];
320			levelspread[i] = (cprv + ccur - 1) / ccur;
321			cprv = ccur;
322		}
323	}
324}
325
326extern void rcu_init_geometry(void);
327
328/* Returns a pointer to the first leaf rcu_node structure. */
329#define rcu_first_leaf_node() (rcu_state.level[rcu_num_lvls - 1])
330
331/* Is this rcu_node a leaf? */
332#define rcu_is_leaf_node(rnp) ((rnp)->level == rcu_num_lvls - 1)
333
334/* Is this rcu_node the last leaf? */
335#define rcu_is_last_leaf_node(rnp) ((rnp) == &rcu_state.node[rcu_num_nodes - 1])
336
337/*
338 * Do a full breadth-first scan of the {s,}rcu_node structures for the
339 * specified state structure (for SRCU) or the only rcu_state structure
340 * (for RCU).
341 */
342#define srcu_for_each_node_breadth_first(sp, rnp) \
343	for ((rnp) = &(sp)->node[0]; \
344	     (rnp) < &(sp)->node[rcu_num_nodes]; (rnp)++)
345#define rcu_for_each_node_breadth_first(rnp) \
346	srcu_for_each_node_breadth_first(&rcu_state, rnp)
347
348/*
349 * Scan the leaves of the rcu_node hierarchy for the rcu_state structure.
350 * Note that if there is a singleton rcu_node tree with but one rcu_node
351 * structure, this loop -will- visit the rcu_node structure.  It is still
352 * a leaf node, even if it is also the root node.
353 */
354#define rcu_for_each_leaf_node(rnp) \
355	for ((rnp) = rcu_first_leaf_node(); \
356	     (rnp) < &rcu_state.node[rcu_num_nodes]; (rnp)++)
357
358/*
359 * Iterate over all possible CPUs in a leaf RCU node.
360 */
361#define for_each_leaf_node_possible_cpu(rnp, cpu) \
362	for (WARN_ON_ONCE(!rcu_is_leaf_node(rnp)), \
363	     (cpu) = cpumask_next((rnp)->grplo - 1, cpu_possible_mask); \
364	     (cpu) <= rnp->grphi; \
365	     (cpu) = cpumask_next((cpu), cpu_possible_mask))
366
367/*
368 * Iterate over all CPUs in a leaf RCU node's specified mask.
369 */
370#define rcu_find_next_bit(rnp, cpu, mask) \
371	((rnp)->grplo + find_next_bit(&(mask), BITS_PER_LONG, (cpu)))
372#define for_each_leaf_node_cpu_mask(rnp, cpu, mask) \
373	for (WARN_ON_ONCE(!rcu_is_leaf_node(rnp)), \
374	     (cpu) = rcu_find_next_bit((rnp), 0, (mask)); \
375	     (cpu) <= rnp->grphi; \
376	     (cpu) = rcu_find_next_bit((rnp), (cpu) + 1 - (rnp->grplo), (mask)))
377
378#endif /* !defined(CONFIG_TINY_RCU) */
379
380#if !defined(CONFIG_TINY_RCU) || defined(CONFIG_TASKS_RCU_GENERIC)
381
382/*
383 * Wrappers for the rcu_node::lock acquire and release.
384 *
385 * Because the rcu_nodes form a tree, the tree traversal locking will observe
386 * different lock values, this in turn means that an UNLOCK of one level
387 * followed by a LOCK of another level does not imply a full memory barrier;
388 * and most importantly transitivity is lost.
389 *
390 * In order to restore full ordering between tree levels, augment the regular
391 * lock acquire functions with smp_mb__after_unlock_lock().
392 *
393 * As ->lock of struct rcu_node is a __private field, therefore one should use
394 * these wrappers rather than directly call raw_spin_{lock,unlock}* on ->lock.
395 */
396#define raw_spin_lock_rcu_node(p)					\
397do {									\
398	raw_spin_lock(&ACCESS_PRIVATE(p, lock));			\
399	smp_mb__after_unlock_lock();					\
400} while (0)
401
402#define raw_spin_unlock_rcu_node(p)					\
403do {									\
404	lockdep_assert_irqs_disabled();					\
405	raw_spin_unlock(&ACCESS_PRIVATE(p, lock));			\
406} while (0)
407
408#define raw_spin_lock_irq_rcu_node(p)					\
409do {									\
410	raw_spin_lock_irq(&ACCESS_PRIVATE(p, lock));			\
411	smp_mb__after_unlock_lock();					\
412} while (0)
413
414#define raw_spin_unlock_irq_rcu_node(p)					\
415do {									\
416	lockdep_assert_irqs_disabled();					\
417	raw_spin_unlock_irq(&ACCESS_PRIVATE(p, lock));			\
418} while (0)
419
420#define raw_spin_lock_irqsave_rcu_node(p, flags)			\
421do {									\
422	raw_spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags);	\
423	smp_mb__after_unlock_lock();					\
424} while (0)
425
426#define raw_spin_unlock_irqrestore_rcu_node(p, flags)			\
427do {									\
428	lockdep_assert_irqs_disabled();					\
429	raw_spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags);	\
430} while (0)
431
432#define raw_spin_trylock_rcu_node(p)					\
433({									\
434	bool ___locked = raw_spin_trylock(&ACCESS_PRIVATE(p, lock));	\
435									\
436	if (___locked)							\
437		smp_mb__after_unlock_lock();				\
438	___locked;							\
439})
440
441#define raw_lockdep_assert_held_rcu_node(p)				\
442	lockdep_assert_held(&ACCESS_PRIVATE(p, lock))
443
444#endif // #if !defined(CONFIG_TINY_RCU) || defined(CONFIG_TASKS_RCU_GENERIC)
445
446#ifdef CONFIG_TINY_RCU
447/* Tiny RCU doesn't expedite, as its purpose in life is instead to be tiny. */
448static inline bool rcu_gp_is_normal(void) { return true; }
449static inline bool rcu_gp_is_expedited(void) { return false; }
450static inline void rcu_expedite_gp(void) { }
451static inline void rcu_unexpedite_gp(void) { }
452static inline void rcu_request_urgent_qs_task(struct task_struct *t) { }
453#else /* #ifdef CONFIG_TINY_RCU */
454bool rcu_gp_is_normal(void);     /* Internal RCU use. */
455bool rcu_gp_is_expedited(void);  /* Internal RCU use. */
456void rcu_expedite_gp(void);
457void rcu_unexpedite_gp(void);
458void rcupdate_announce_bootup_oddness(void);
459#ifdef CONFIG_TASKS_RCU_GENERIC
460void show_rcu_tasks_gp_kthreads(void);
461#else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
462static inline void show_rcu_tasks_gp_kthreads(void) {}
463#endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */
464void rcu_request_urgent_qs_task(struct task_struct *t);
465#endif /* #else #ifdef CONFIG_TINY_RCU */
466
467#define RCU_SCHEDULER_INACTIVE	0
468#define RCU_SCHEDULER_INIT	1
469#define RCU_SCHEDULER_RUNNING	2
470
471enum rcutorture_type {
472	RCU_FLAVOR,
473	RCU_TASKS_FLAVOR,
474	RCU_TASKS_RUDE_FLAVOR,
475	RCU_TASKS_TRACING_FLAVOR,
476	RCU_TRIVIAL_FLAVOR,
477	SRCU_FLAVOR,
478	INVALID_RCU_FLAVOR
479};
480
481#if defined(CONFIG_RCU_LAZY)
482unsigned long rcu_lazy_get_jiffies_till_flush(void);
483void rcu_lazy_set_jiffies_till_flush(unsigned long j);
484#else
485static inline unsigned long rcu_lazy_get_jiffies_till_flush(void) { return 0; }
486static inline void rcu_lazy_set_jiffies_till_flush(unsigned long j) { }
487#endif
488
489#if defined(CONFIG_TREE_RCU)
490void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
491			    unsigned long *gp_seq);
492void do_trace_rcu_torture_read(const char *rcutorturename,
493			       struct rcu_head *rhp,
494			       unsigned long secs,
495			       unsigned long c_old,
496			       unsigned long c);
497void rcu_gp_set_torture_wait(int duration);
498#else
499static inline void rcutorture_get_gp_data(enum rcutorture_type test_type,
500					  int *flags, unsigned long *gp_seq)
501{
502	*flags = 0;
503	*gp_seq = 0;
504}
505#ifdef CONFIG_RCU_TRACE
506void do_trace_rcu_torture_read(const char *rcutorturename,
507			       struct rcu_head *rhp,
508			       unsigned long secs,
509			       unsigned long c_old,
510			       unsigned long c);
511#else
512#define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
513	do { } while (0)
514#endif
515static inline void rcu_gp_set_torture_wait(int duration) { }
516#endif
517
518#if IS_ENABLED(CONFIG_RCU_TORTURE_TEST) || IS_MODULE(CONFIG_RCU_TORTURE_TEST)
519long rcutorture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask);
520#endif
521
522#ifdef CONFIG_TINY_SRCU
523
524static inline void srcutorture_get_gp_data(enum rcutorture_type test_type,
525					   struct srcu_struct *sp, int *flags,
526					   unsigned long *gp_seq)
527{
528	if (test_type != SRCU_FLAVOR)
529		return;
530	*flags = 0;
531	*gp_seq = sp->srcu_idx;
532}
533
534#elif defined(CONFIG_TREE_SRCU)
535
536void srcutorture_get_gp_data(enum rcutorture_type test_type,
537			     struct srcu_struct *sp, int *flags,
538			     unsigned long *gp_seq);
539
540#endif
541
542#ifdef CONFIG_TINY_RCU
543static inline bool rcu_dynticks_zero_in_eqs(int cpu, int *vp) { return false; }
544static inline unsigned long rcu_get_gp_seq(void) { return 0; }
545static inline unsigned long rcu_exp_batches_completed(void) { return 0; }
546static inline unsigned long
547srcu_batches_completed(struct srcu_struct *sp) { return 0; }
548static inline void rcu_force_quiescent_state(void) { }
549static inline bool rcu_check_boost_fail(unsigned long gp_state, int *cpup) { return true; }
550static inline void show_rcu_gp_kthreads(void) { }
551static inline int rcu_get_gp_kthreads_prio(void) { return 0; }
552static inline void rcu_fwd_progress_check(unsigned long j) { }
553static inline void rcu_gp_slow_register(atomic_t *rgssp) { }
554static inline void rcu_gp_slow_unregister(atomic_t *rgssp) { }
555#else /* #ifdef CONFIG_TINY_RCU */
556bool rcu_dynticks_zero_in_eqs(int cpu, int *vp);
557unsigned long rcu_get_gp_seq(void);
558unsigned long rcu_exp_batches_completed(void);
559unsigned long srcu_batches_completed(struct srcu_struct *sp);
560bool rcu_check_boost_fail(unsigned long gp_state, int *cpup);
561void show_rcu_gp_kthreads(void);
562int rcu_get_gp_kthreads_prio(void);
563void rcu_fwd_progress_check(unsigned long j);
564void rcu_force_quiescent_state(void);
565extern struct workqueue_struct *rcu_gp_wq;
566#ifdef CONFIG_RCU_EXP_KTHREAD
567extern struct kthread_worker *rcu_exp_gp_kworker;
568extern struct kthread_worker *rcu_exp_par_gp_kworker;
569#else /* !CONFIG_RCU_EXP_KTHREAD */
570extern struct workqueue_struct *rcu_par_gp_wq;
571#endif /* CONFIG_RCU_EXP_KTHREAD */
572void rcu_gp_slow_register(atomic_t *rgssp);
573void rcu_gp_slow_unregister(atomic_t *rgssp);
574#endif /* #else #ifdef CONFIG_TINY_RCU */
575
576#ifdef CONFIG_RCU_NOCB_CPU
577void rcu_bind_current_to_nocb(void);
578#else
579static inline void rcu_bind_current_to_nocb(void) { }
580#endif
581
582#if !defined(CONFIG_TINY_RCU) && defined(CONFIG_TASKS_RCU)
583void show_rcu_tasks_classic_gp_kthread(void);
584#else
585static inline void show_rcu_tasks_classic_gp_kthread(void) {}
586#endif
587#if !defined(CONFIG_TINY_RCU) && defined(CONFIG_TASKS_RUDE_RCU)
588void show_rcu_tasks_rude_gp_kthread(void);
589#else
590static inline void show_rcu_tasks_rude_gp_kthread(void) {}
591#endif
592#if !defined(CONFIG_TINY_RCU) && defined(CONFIG_TASKS_TRACE_RCU)
593void show_rcu_tasks_trace_gp_kthread(void);
594#else
595static inline void show_rcu_tasks_trace_gp_kthread(void) {}
596#endif
597
598#endif /* __LINUX_RCU_H */