Loading...
1/*
2 * Read-Copy Update definitions shared among RCU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
17 *
18 * Copyright IBM Corporation, 2011
19 *
20 * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
21 */
22
23#ifndef __LINUX_RCU_H
24#define __LINUX_RCU_H
25
26#include <trace/events/rcu.h>
27#ifdef CONFIG_RCU_TRACE
28#define RCU_TRACE(stmt) stmt
29#else /* #ifdef CONFIG_RCU_TRACE */
30#define RCU_TRACE(stmt)
31#endif /* #else #ifdef CONFIG_RCU_TRACE */
32
33/*
34 * Process-level increment to ->dynticks_nesting field. This allows for
35 * architectures that use half-interrupts and half-exceptions from
36 * process context.
37 *
38 * DYNTICK_TASK_NEST_MASK defines a field of width DYNTICK_TASK_NEST_WIDTH
39 * that counts the number of process-based reasons why RCU cannot
40 * consider the corresponding CPU to be idle, and DYNTICK_TASK_NEST_VALUE
41 * is the value used to increment or decrement this field.
42 *
43 * The rest of the bits could in principle be used to count interrupts,
44 * but this would mean that a negative-one value in the interrupt
45 * field could incorrectly zero out the DYNTICK_TASK_NEST_MASK field.
46 * We therefore provide a two-bit guard field defined by DYNTICK_TASK_MASK
47 * that is set to DYNTICK_TASK_FLAG upon initial exit from idle.
48 * The DYNTICK_TASK_EXIT_IDLE value is thus the combined value used upon
49 * initial exit from idle.
50 */
51#define DYNTICK_TASK_NEST_WIDTH 7
52#define DYNTICK_TASK_NEST_VALUE ((LLONG_MAX >> DYNTICK_TASK_NEST_WIDTH) + 1)
53#define DYNTICK_TASK_NEST_MASK (LLONG_MAX - DYNTICK_TASK_NEST_VALUE + 1)
54#define DYNTICK_TASK_FLAG ((DYNTICK_TASK_NEST_VALUE / 8) * 2)
55#define DYNTICK_TASK_MASK ((DYNTICK_TASK_NEST_VALUE / 8) * 3)
56#define DYNTICK_TASK_EXIT_IDLE (DYNTICK_TASK_NEST_VALUE + \
57 DYNTICK_TASK_FLAG)
58
59/*
60 * debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally
61 * by call_rcu() and rcu callback execution, and are therefore not part of the
62 * RCU API. Leaving in rcupdate.h because they are used by all RCU flavors.
63 */
64
65#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
66# define STATE_RCU_HEAD_READY 0
67# define STATE_RCU_HEAD_QUEUED 1
68
69extern struct debug_obj_descr rcuhead_debug_descr;
70
71static inline int debug_rcu_head_queue(struct rcu_head *head)
72{
73 int r1;
74
75 r1 = debug_object_activate(head, &rcuhead_debug_descr);
76 debug_object_active_state(head, &rcuhead_debug_descr,
77 STATE_RCU_HEAD_READY,
78 STATE_RCU_HEAD_QUEUED);
79 return r1;
80}
81
82static inline void debug_rcu_head_unqueue(struct rcu_head *head)
83{
84 debug_object_active_state(head, &rcuhead_debug_descr,
85 STATE_RCU_HEAD_QUEUED,
86 STATE_RCU_HEAD_READY);
87 debug_object_deactivate(head, &rcuhead_debug_descr);
88}
89#else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
90static inline int debug_rcu_head_queue(struct rcu_head *head)
91{
92 return 0;
93}
94
95static inline void debug_rcu_head_unqueue(struct rcu_head *head)
96{
97}
98#endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
99
100void kfree(const void *);
101
102static inline bool __rcu_reclaim(const char *rn, struct rcu_head *head)
103{
104 unsigned long offset = (unsigned long)head->func;
105
106 rcu_lock_acquire(&rcu_callback_map);
107 if (__is_kfree_rcu_offset(offset)) {
108 RCU_TRACE(trace_rcu_invoke_kfree_callback(rn, head, offset));
109 kfree((void *)head - offset);
110 rcu_lock_release(&rcu_callback_map);
111 return 1;
112 } else {
113 RCU_TRACE(trace_rcu_invoke_callback(rn, head));
114 head->func(head);
115 rcu_lock_release(&rcu_callback_map);
116 return 0;
117 }
118}
119
120#ifdef CONFIG_RCU_STALL_COMMON
121
122extern int rcu_cpu_stall_suppress;
123int rcu_jiffies_till_stall_check(void);
124
125#endif /* #ifdef CONFIG_RCU_STALL_COMMON */
126
127/*
128 * Strings used in tracepoints need to be exported via the
129 * tracing system such that tools like perf and trace-cmd can
130 * translate the string address pointers to actual text.
131 */
132#define TPS(x) tracepoint_string(x)
133
134#endif /* __LINUX_RCU_H */
1/* SPDX-License-Identifier: GPL-2.0+ */
2/*
3 * Read-Copy Update definitions shared among RCU implementations.
4 *
5 * Copyright IBM Corporation, 2011
6 *
7 * Author: Paul E. McKenney <paulmck@linux.ibm.com>
8 */
9
10#ifndef __LINUX_RCU_H
11#define __LINUX_RCU_H
12
13#include <trace/events/rcu.h>
14
15/* Offset to allow distinguishing irq vs. task-based idle entry/exit. */
16#define DYNTICK_IRQ_NONIDLE ((LONG_MAX / 2) + 1)
17
18
19/*
20 * Grace-period counter management.
21 */
22
23#define RCU_SEQ_CTR_SHIFT 2
24#define RCU_SEQ_STATE_MASK ((1 << RCU_SEQ_CTR_SHIFT) - 1)
25
26/*
27 * Return the counter portion of a sequence number previously returned
28 * by rcu_seq_snap() or rcu_seq_current().
29 */
30static inline unsigned long rcu_seq_ctr(unsigned long s)
31{
32 return s >> RCU_SEQ_CTR_SHIFT;
33}
34
35/*
36 * Return the state portion of a sequence number previously returned
37 * by rcu_seq_snap() or rcu_seq_current().
38 */
39static inline int rcu_seq_state(unsigned long s)
40{
41 return s & RCU_SEQ_STATE_MASK;
42}
43
44/*
45 * Set the state portion of the pointed-to sequence number.
46 * The caller is responsible for preventing conflicting updates.
47 */
48static inline void rcu_seq_set_state(unsigned long *sp, int newstate)
49{
50 WARN_ON_ONCE(newstate & ~RCU_SEQ_STATE_MASK);
51 WRITE_ONCE(*sp, (*sp & ~RCU_SEQ_STATE_MASK) + newstate);
52}
53
54/* Adjust sequence number for start of update-side operation. */
55static inline void rcu_seq_start(unsigned long *sp)
56{
57 WRITE_ONCE(*sp, *sp + 1);
58 smp_mb(); /* Ensure update-side operation after counter increment. */
59 WARN_ON_ONCE(rcu_seq_state(*sp) != 1);
60}
61
62/* Compute the end-of-grace-period value for the specified sequence number. */
63static inline unsigned long rcu_seq_endval(unsigned long *sp)
64{
65 return (*sp | RCU_SEQ_STATE_MASK) + 1;
66}
67
68/* Adjust sequence number for end of update-side operation. */
69static inline void rcu_seq_end(unsigned long *sp)
70{
71 smp_mb(); /* Ensure update-side operation before counter increment. */
72 WARN_ON_ONCE(!rcu_seq_state(*sp));
73 WRITE_ONCE(*sp, rcu_seq_endval(sp));
74}
75
76/*
77 * rcu_seq_snap - Take a snapshot of the update side's sequence number.
78 *
79 * This function returns the earliest value of the grace-period sequence number
80 * that will indicate that a full grace period has elapsed since the current
81 * time. Once the grace-period sequence number has reached this value, it will
82 * be safe to invoke all callbacks that have been registered prior to the
83 * current time. This value is the current grace-period number plus two to the
84 * power of the number of low-order bits reserved for state, then rounded up to
85 * the next value in which the state bits are all zero.
86 */
87static inline unsigned long rcu_seq_snap(unsigned long *sp)
88{
89 unsigned long s;
90
91 s = (READ_ONCE(*sp) + 2 * RCU_SEQ_STATE_MASK + 1) & ~RCU_SEQ_STATE_MASK;
92 smp_mb(); /* Above access must not bleed into critical section. */
93 return s;
94}
95
96/* Return the current value the update side's sequence number, no ordering. */
97static inline unsigned long rcu_seq_current(unsigned long *sp)
98{
99 return READ_ONCE(*sp);
100}
101
102/*
103 * Given a snapshot from rcu_seq_snap(), determine whether or not the
104 * corresponding update-side operation has started.
105 */
106static inline bool rcu_seq_started(unsigned long *sp, unsigned long s)
107{
108 return ULONG_CMP_LT((s - 1) & ~RCU_SEQ_STATE_MASK, READ_ONCE(*sp));
109}
110
111/*
112 * Given a snapshot from rcu_seq_snap(), determine whether or not a
113 * full update-side operation has occurred.
114 */
115static inline bool rcu_seq_done(unsigned long *sp, unsigned long s)
116{
117 return ULONG_CMP_GE(READ_ONCE(*sp), s);
118}
119
120/*
121 * Has a grace period completed since the time the old gp_seq was collected?
122 */
123static inline bool rcu_seq_completed_gp(unsigned long old, unsigned long new)
124{
125 return ULONG_CMP_LT(old, new & ~RCU_SEQ_STATE_MASK);
126}
127
128/*
129 * Has a grace period started since the time the old gp_seq was collected?
130 */
131static inline bool rcu_seq_new_gp(unsigned long old, unsigned long new)
132{
133 return ULONG_CMP_LT((old + RCU_SEQ_STATE_MASK) & ~RCU_SEQ_STATE_MASK,
134 new);
135}
136
137/*
138 * Roughly how many full grace periods have elapsed between the collection
139 * of the two specified grace periods?
140 */
141static inline unsigned long rcu_seq_diff(unsigned long new, unsigned long old)
142{
143 unsigned long rnd_diff;
144
145 if (old == new)
146 return 0;
147 /*
148 * Compute the number of grace periods (still shifted up), plus
149 * one if either of new and old is not an exact grace period.
150 */
151 rnd_diff = (new & ~RCU_SEQ_STATE_MASK) -
152 ((old + RCU_SEQ_STATE_MASK) & ~RCU_SEQ_STATE_MASK) +
153 ((new & RCU_SEQ_STATE_MASK) || (old & RCU_SEQ_STATE_MASK));
154 if (ULONG_CMP_GE(RCU_SEQ_STATE_MASK, rnd_diff))
155 return 1; /* Definitely no grace period has elapsed. */
156 return ((rnd_diff - RCU_SEQ_STATE_MASK - 1) >> RCU_SEQ_CTR_SHIFT) + 2;
157}
158
159/*
160 * debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally
161 * by call_rcu() and rcu callback execution, and are therefore not part
162 * of the RCU API. These are in rcupdate.h because they are used by all
163 * RCU implementations.
164 */
165
166#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
167# define STATE_RCU_HEAD_READY 0
168# define STATE_RCU_HEAD_QUEUED 1
169
170extern const struct debug_obj_descr rcuhead_debug_descr;
171
172static inline int debug_rcu_head_queue(struct rcu_head *head)
173{
174 int r1;
175
176 r1 = debug_object_activate(head, &rcuhead_debug_descr);
177 debug_object_active_state(head, &rcuhead_debug_descr,
178 STATE_RCU_HEAD_READY,
179 STATE_RCU_HEAD_QUEUED);
180 return r1;
181}
182
183static inline void debug_rcu_head_unqueue(struct rcu_head *head)
184{
185 debug_object_active_state(head, &rcuhead_debug_descr,
186 STATE_RCU_HEAD_QUEUED,
187 STATE_RCU_HEAD_READY);
188 debug_object_deactivate(head, &rcuhead_debug_descr);
189}
190#else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
191static inline int debug_rcu_head_queue(struct rcu_head *head)
192{
193 return 0;
194}
195
196static inline void debug_rcu_head_unqueue(struct rcu_head *head)
197{
198}
199#endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
200
201extern int rcu_cpu_stall_suppress_at_boot;
202
203static inline bool rcu_stall_is_suppressed_at_boot(void)
204{
205 return rcu_cpu_stall_suppress_at_boot && !rcu_inkernel_boot_has_ended();
206}
207
208#ifdef CONFIG_RCU_STALL_COMMON
209
210extern int rcu_cpu_stall_ftrace_dump;
211extern int rcu_cpu_stall_suppress;
212extern int rcu_cpu_stall_timeout;
213int rcu_jiffies_till_stall_check(void);
214
215static inline bool rcu_stall_is_suppressed(void)
216{
217 return rcu_stall_is_suppressed_at_boot() || rcu_cpu_stall_suppress;
218}
219
220#define rcu_ftrace_dump_stall_suppress() \
221do { \
222 if (!rcu_cpu_stall_suppress) \
223 rcu_cpu_stall_suppress = 3; \
224} while (0)
225
226#define rcu_ftrace_dump_stall_unsuppress() \
227do { \
228 if (rcu_cpu_stall_suppress == 3) \
229 rcu_cpu_stall_suppress = 0; \
230} while (0)
231
232#else /* #endif #ifdef CONFIG_RCU_STALL_COMMON */
233
234static inline bool rcu_stall_is_suppressed(void)
235{
236 return rcu_stall_is_suppressed_at_boot();
237}
238#define rcu_ftrace_dump_stall_suppress()
239#define rcu_ftrace_dump_stall_unsuppress()
240#endif /* #ifdef CONFIG_RCU_STALL_COMMON */
241
242/*
243 * Strings used in tracepoints need to be exported via the
244 * tracing system such that tools like perf and trace-cmd can
245 * translate the string address pointers to actual text.
246 */
247#define TPS(x) tracepoint_string(x)
248
249/*
250 * Dump the ftrace buffer, but only one time per callsite per boot.
251 */
252#define rcu_ftrace_dump(oops_dump_mode) \
253do { \
254 static atomic_t ___rfd_beenhere = ATOMIC_INIT(0); \
255 \
256 if (!atomic_read(&___rfd_beenhere) && \
257 !atomic_xchg(&___rfd_beenhere, 1)) { \
258 tracing_off(); \
259 rcu_ftrace_dump_stall_suppress(); \
260 ftrace_dump(oops_dump_mode); \
261 rcu_ftrace_dump_stall_unsuppress(); \
262 } \
263} while (0)
264
265void rcu_early_boot_tests(void);
266void rcu_test_sync_prims(void);
267
268/*
269 * This function really isn't for public consumption, but RCU is special in
270 * that context switches can allow the state machine to make progress.
271 */
272extern void resched_cpu(int cpu);
273
274#if defined(CONFIG_SRCU) || !defined(CONFIG_TINY_RCU)
275
276#include <linux/rcu_node_tree.h>
277
278extern int rcu_num_lvls;
279extern int num_rcu_lvl[];
280extern int rcu_num_nodes;
281static bool rcu_fanout_exact;
282static int rcu_fanout_leaf;
283
284/*
285 * Compute the per-level fanout, either using the exact fanout specified
286 * or balancing the tree, depending on the rcu_fanout_exact boot parameter.
287 */
288static inline void rcu_init_levelspread(int *levelspread, const int *levelcnt)
289{
290 int i;
291
292 for (i = 0; i < RCU_NUM_LVLS; i++)
293 levelspread[i] = INT_MIN;
294 if (rcu_fanout_exact) {
295 levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf;
296 for (i = rcu_num_lvls - 2; i >= 0; i--)
297 levelspread[i] = RCU_FANOUT;
298 } else {
299 int ccur;
300 int cprv;
301
302 cprv = nr_cpu_ids;
303 for (i = rcu_num_lvls - 1; i >= 0; i--) {
304 ccur = levelcnt[i];
305 levelspread[i] = (cprv + ccur - 1) / ccur;
306 cprv = ccur;
307 }
308 }
309}
310
311extern void rcu_init_geometry(void);
312
313/* Returns a pointer to the first leaf rcu_node structure. */
314#define rcu_first_leaf_node() (rcu_state.level[rcu_num_lvls - 1])
315
316/* Is this rcu_node a leaf? */
317#define rcu_is_leaf_node(rnp) ((rnp)->level == rcu_num_lvls - 1)
318
319/* Is this rcu_node the last leaf? */
320#define rcu_is_last_leaf_node(rnp) ((rnp) == &rcu_state.node[rcu_num_nodes - 1])
321
322/*
323 * Do a full breadth-first scan of the {s,}rcu_node structures for the
324 * specified state structure (for SRCU) or the only rcu_state structure
325 * (for RCU).
326 */
327#define srcu_for_each_node_breadth_first(sp, rnp) \
328 for ((rnp) = &(sp)->node[0]; \
329 (rnp) < &(sp)->node[rcu_num_nodes]; (rnp)++)
330#define rcu_for_each_node_breadth_first(rnp) \
331 srcu_for_each_node_breadth_first(&rcu_state, rnp)
332
333/*
334 * Scan the leaves of the rcu_node hierarchy for the rcu_state structure.
335 * Note that if there is a singleton rcu_node tree with but one rcu_node
336 * structure, this loop -will- visit the rcu_node structure. It is still
337 * a leaf node, even if it is also the root node.
338 */
339#define rcu_for_each_leaf_node(rnp) \
340 for ((rnp) = rcu_first_leaf_node(); \
341 (rnp) < &rcu_state.node[rcu_num_nodes]; (rnp)++)
342
343/*
344 * Iterate over all possible CPUs in a leaf RCU node.
345 */
346#define for_each_leaf_node_possible_cpu(rnp, cpu) \
347 for (WARN_ON_ONCE(!rcu_is_leaf_node(rnp)), \
348 (cpu) = cpumask_next((rnp)->grplo - 1, cpu_possible_mask); \
349 (cpu) <= rnp->grphi; \
350 (cpu) = cpumask_next((cpu), cpu_possible_mask))
351
352/*
353 * Iterate over all CPUs in a leaf RCU node's specified mask.
354 */
355#define rcu_find_next_bit(rnp, cpu, mask) \
356 ((rnp)->grplo + find_next_bit(&(mask), BITS_PER_LONG, (cpu)))
357#define for_each_leaf_node_cpu_mask(rnp, cpu, mask) \
358 for (WARN_ON_ONCE(!rcu_is_leaf_node(rnp)), \
359 (cpu) = rcu_find_next_bit((rnp), 0, (mask)); \
360 (cpu) <= rnp->grphi; \
361 (cpu) = rcu_find_next_bit((rnp), (cpu) + 1 - (rnp->grplo), (mask)))
362
363/*
364 * Wrappers for the rcu_node::lock acquire and release.
365 *
366 * Because the rcu_nodes form a tree, the tree traversal locking will observe
367 * different lock values, this in turn means that an UNLOCK of one level
368 * followed by a LOCK of another level does not imply a full memory barrier;
369 * and most importantly transitivity is lost.
370 *
371 * In order to restore full ordering between tree levels, augment the regular
372 * lock acquire functions with smp_mb__after_unlock_lock().
373 *
374 * As ->lock of struct rcu_node is a __private field, therefore one should use
375 * these wrappers rather than directly call raw_spin_{lock,unlock}* on ->lock.
376 */
377#define raw_spin_lock_rcu_node(p) \
378do { \
379 raw_spin_lock(&ACCESS_PRIVATE(p, lock)); \
380 smp_mb__after_unlock_lock(); \
381} while (0)
382
383#define raw_spin_unlock_rcu_node(p) \
384do { \
385 lockdep_assert_irqs_disabled(); \
386 raw_spin_unlock(&ACCESS_PRIVATE(p, lock)); \
387} while (0)
388
389#define raw_spin_lock_irq_rcu_node(p) \
390do { \
391 raw_spin_lock_irq(&ACCESS_PRIVATE(p, lock)); \
392 smp_mb__after_unlock_lock(); \
393} while (0)
394
395#define raw_spin_unlock_irq_rcu_node(p) \
396do { \
397 lockdep_assert_irqs_disabled(); \
398 raw_spin_unlock_irq(&ACCESS_PRIVATE(p, lock)); \
399} while (0)
400
401#define raw_spin_lock_irqsave_rcu_node(p, flags) \
402do { \
403 raw_spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \
404 smp_mb__after_unlock_lock(); \
405} while (0)
406
407#define raw_spin_unlock_irqrestore_rcu_node(p, flags) \
408do { \
409 lockdep_assert_irqs_disabled(); \
410 raw_spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags); \
411} while (0)
412
413#define raw_spin_trylock_rcu_node(p) \
414({ \
415 bool ___locked = raw_spin_trylock(&ACCESS_PRIVATE(p, lock)); \
416 \
417 if (___locked) \
418 smp_mb__after_unlock_lock(); \
419 ___locked; \
420})
421
422#define raw_lockdep_assert_held_rcu_node(p) \
423 lockdep_assert_held(&ACCESS_PRIVATE(p, lock))
424
425#endif /* #if defined(CONFIG_SRCU) || !defined(CONFIG_TINY_RCU) */
426
427#ifdef CONFIG_TINY_RCU
428/* Tiny RCU doesn't expedite, as its purpose in life is instead to be tiny. */
429static inline bool rcu_gp_is_normal(void) { return true; }
430static inline bool rcu_gp_is_expedited(void) { return false; }
431static inline void rcu_expedite_gp(void) { }
432static inline void rcu_unexpedite_gp(void) { }
433static inline void rcu_request_urgent_qs_task(struct task_struct *t) { }
434#else /* #ifdef CONFIG_TINY_RCU */
435bool rcu_gp_is_normal(void); /* Internal RCU use. */
436bool rcu_gp_is_expedited(void); /* Internal RCU use. */
437void rcu_expedite_gp(void);
438void rcu_unexpedite_gp(void);
439void rcupdate_announce_bootup_oddness(void);
440#ifdef CONFIG_TASKS_RCU_GENERIC
441void show_rcu_tasks_gp_kthreads(void);
442#else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
443static inline void show_rcu_tasks_gp_kthreads(void) {}
444#endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */
445void rcu_request_urgent_qs_task(struct task_struct *t);
446#endif /* #else #ifdef CONFIG_TINY_RCU */
447
448#define RCU_SCHEDULER_INACTIVE 0
449#define RCU_SCHEDULER_INIT 1
450#define RCU_SCHEDULER_RUNNING 2
451
452enum rcutorture_type {
453 RCU_FLAVOR,
454 RCU_TASKS_FLAVOR,
455 RCU_TASKS_RUDE_FLAVOR,
456 RCU_TASKS_TRACING_FLAVOR,
457 RCU_TRIVIAL_FLAVOR,
458 SRCU_FLAVOR,
459 INVALID_RCU_FLAVOR
460};
461
462#if defined(CONFIG_TREE_RCU)
463void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
464 unsigned long *gp_seq);
465void do_trace_rcu_torture_read(const char *rcutorturename,
466 struct rcu_head *rhp,
467 unsigned long secs,
468 unsigned long c_old,
469 unsigned long c);
470void rcu_gp_set_torture_wait(int duration);
471#else
472static inline void rcutorture_get_gp_data(enum rcutorture_type test_type,
473 int *flags, unsigned long *gp_seq)
474{
475 *flags = 0;
476 *gp_seq = 0;
477}
478#ifdef CONFIG_RCU_TRACE
479void do_trace_rcu_torture_read(const char *rcutorturename,
480 struct rcu_head *rhp,
481 unsigned long secs,
482 unsigned long c_old,
483 unsigned long c);
484#else
485#define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
486 do { } while (0)
487#endif
488static inline void rcu_gp_set_torture_wait(int duration) { }
489#endif
490
491#if IS_ENABLED(CONFIG_RCU_TORTURE_TEST) || IS_MODULE(CONFIG_RCU_TORTURE_TEST)
492long rcutorture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask);
493#endif
494
495#ifdef CONFIG_TINY_SRCU
496
497static inline void srcutorture_get_gp_data(enum rcutorture_type test_type,
498 struct srcu_struct *sp, int *flags,
499 unsigned long *gp_seq)
500{
501 if (test_type != SRCU_FLAVOR)
502 return;
503 *flags = 0;
504 *gp_seq = sp->srcu_idx;
505}
506
507#elif defined(CONFIG_TREE_SRCU)
508
509void srcutorture_get_gp_data(enum rcutorture_type test_type,
510 struct srcu_struct *sp, int *flags,
511 unsigned long *gp_seq);
512
513#endif
514
515#ifdef CONFIG_TINY_RCU
516static inline bool rcu_dynticks_zero_in_eqs(int cpu, int *vp) { return false; }
517static inline unsigned long rcu_get_gp_seq(void) { return 0; }
518static inline unsigned long rcu_exp_batches_completed(void) { return 0; }
519static inline unsigned long
520srcu_batches_completed(struct srcu_struct *sp) { return 0; }
521static inline void rcu_force_quiescent_state(void) { }
522static inline bool rcu_check_boost_fail(unsigned long gp_state, int *cpup) { return true; }
523static inline void show_rcu_gp_kthreads(void) { }
524static inline int rcu_get_gp_kthreads_prio(void) { return 0; }
525static inline void rcu_fwd_progress_check(unsigned long j) { }
526#else /* #ifdef CONFIG_TINY_RCU */
527bool rcu_dynticks_zero_in_eqs(int cpu, int *vp);
528unsigned long rcu_get_gp_seq(void);
529unsigned long rcu_exp_batches_completed(void);
530unsigned long srcu_batches_completed(struct srcu_struct *sp);
531bool rcu_check_boost_fail(unsigned long gp_state, int *cpup);
532void show_rcu_gp_kthreads(void);
533int rcu_get_gp_kthreads_prio(void);
534void rcu_fwd_progress_check(unsigned long j);
535void rcu_force_quiescent_state(void);
536extern struct workqueue_struct *rcu_gp_wq;
537extern struct workqueue_struct *rcu_par_gp_wq;
538#endif /* #else #ifdef CONFIG_TINY_RCU */
539
540#ifdef CONFIG_RCU_NOCB_CPU
541bool rcu_is_nocb_cpu(int cpu);
542void rcu_bind_current_to_nocb(void);
543#else
544static inline bool rcu_is_nocb_cpu(int cpu) { return false; }
545static inline void rcu_bind_current_to_nocb(void) { }
546#endif
547
548#if !defined(CONFIG_TINY_RCU) && defined(CONFIG_TASKS_RCU)
549void show_rcu_tasks_classic_gp_kthread(void);
550#else
551static inline void show_rcu_tasks_classic_gp_kthread(void) {}
552#endif
553#if !defined(CONFIG_TINY_RCU) && defined(CONFIG_TASKS_RUDE_RCU)
554void show_rcu_tasks_rude_gp_kthread(void);
555#else
556static inline void show_rcu_tasks_rude_gp_kthread(void) {}
557#endif
558#if !defined(CONFIG_TINY_RCU) && defined(CONFIG_TASKS_TRACE_RCU)
559void show_rcu_tasks_trace_gp_kthread(void);
560#else
561static inline void show_rcu_tasks_trace_gp_kthread(void) {}
562#endif
563
564#endif /* __LINUX_RCU_H */