Loading...
1/* SPDX-License-Identifier: GPL-2.0+ */
2/*
3 * Read-Copy Update definitions shared among RCU implementations.
4 *
5 * Copyright IBM Corporation, 2011
6 *
7 * Author: Paul E. McKenney <paulmck@linux.ibm.com>
8 */
9
10#ifndef __LINUX_RCU_H
11#define __LINUX_RCU_H
12
13#include <trace/events/rcu.h>
14
15/*
16 * Grace-period counter management.
17 */
18
19#define RCU_SEQ_CTR_SHIFT 2
20#define RCU_SEQ_STATE_MASK ((1 << RCU_SEQ_CTR_SHIFT) - 1)
21
22/* Low-order bit definition for polled grace-period APIs. */
23#define RCU_GET_STATE_COMPLETED 0x1
24
25extern int sysctl_sched_rt_runtime;
26
27/*
28 * Return the counter portion of a sequence number previously returned
29 * by rcu_seq_snap() or rcu_seq_current().
30 */
31static inline unsigned long rcu_seq_ctr(unsigned long s)
32{
33 return s >> RCU_SEQ_CTR_SHIFT;
34}
35
36/*
37 * Return the state portion of a sequence number previously returned
38 * by rcu_seq_snap() or rcu_seq_current().
39 */
40static inline int rcu_seq_state(unsigned long s)
41{
42 return s & RCU_SEQ_STATE_MASK;
43}
44
45/*
46 * Set the state portion of the pointed-to sequence number.
47 * The caller is responsible for preventing conflicting updates.
48 */
49static inline void rcu_seq_set_state(unsigned long *sp, int newstate)
50{
51 WARN_ON_ONCE(newstate & ~RCU_SEQ_STATE_MASK);
52 WRITE_ONCE(*sp, (*sp & ~RCU_SEQ_STATE_MASK) + newstate);
53}
54
55/* Adjust sequence number for start of update-side operation. */
56static inline void rcu_seq_start(unsigned long *sp)
57{
58 WRITE_ONCE(*sp, *sp + 1);
59 smp_mb(); /* Ensure update-side operation after counter increment. */
60 WARN_ON_ONCE(rcu_seq_state(*sp) != 1);
61}
62
63/* Compute the end-of-grace-period value for the specified sequence number. */
64static inline unsigned long rcu_seq_endval(unsigned long *sp)
65{
66 return (*sp | RCU_SEQ_STATE_MASK) + 1;
67}
68
69/* Adjust sequence number for end of update-side operation. */
70static inline void rcu_seq_end(unsigned long *sp)
71{
72 smp_mb(); /* Ensure update-side operation before counter increment. */
73 WARN_ON_ONCE(!rcu_seq_state(*sp));
74 WRITE_ONCE(*sp, rcu_seq_endval(sp));
75}
76
77/*
78 * rcu_seq_snap - Take a snapshot of the update side's sequence number.
79 *
80 * This function returns the earliest value of the grace-period sequence number
81 * that will indicate that a full grace period has elapsed since the current
82 * time. Once the grace-period sequence number has reached this value, it will
83 * be safe to invoke all callbacks that have been registered prior to the
84 * current time. This value is the current grace-period number plus two to the
85 * power of the number of low-order bits reserved for state, then rounded up to
86 * the next value in which the state bits are all zero.
87 */
88static inline unsigned long rcu_seq_snap(unsigned long *sp)
89{
90 unsigned long s;
91
92 s = (READ_ONCE(*sp) + 2 * RCU_SEQ_STATE_MASK + 1) & ~RCU_SEQ_STATE_MASK;
93 smp_mb(); /* Above access must not bleed into critical section. */
94 return s;
95}
96
97/* Return the current value the update side's sequence number, no ordering. */
98static inline unsigned long rcu_seq_current(unsigned long *sp)
99{
100 return READ_ONCE(*sp);
101}
102
103/*
104 * Given a snapshot from rcu_seq_snap(), determine whether or not the
105 * corresponding update-side operation has started.
106 */
107static inline bool rcu_seq_started(unsigned long *sp, unsigned long s)
108{
109 return ULONG_CMP_LT((s - 1) & ~RCU_SEQ_STATE_MASK, READ_ONCE(*sp));
110}
111
112/*
113 * Given a snapshot from rcu_seq_snap(), determine whether or not a
114 * full update-side operation has occurred.
115 */
116static inline bool rcu_seq_done(unsigned long *sp, unsigned long s)
117{
118 return ULONG_CMP_GE(READ_ONCE(*sp), s);
119}
120
121/*
122 * Given a snapshot from rcu_seq_snap(), determine whether or not a
123 * full update-side operation has occurred, but do not allow the
124 * (ULONG_MAX / 2) safety-factor/guard-band.
125 */
126static inline bool rcu_seq_done_exact(unsigned long *sp, unsigned long s)
127{
128 unsigned long cur_s = READ_ONCE(*sp);
129
130 return ULONG_CMP_GE(cur_s, s) || ULONG_CMP_LT(cur_s, s - (2 * RCU_SEQ_STATE_MASK + 1));
131}
132
133/*
134 * Has a grace period completed since the time the old gp_seq was collected?
135 */
136static inline bool rcu_seq_completed_gp(unsigned long old, unsigned long new)
137{
138 return ULONG_CMP_LT(old, new & ~RCU_SEQ_STATE_MASK);
139}
140
141/*
142 * Has a grace period started since the time the old gp_seq was collected?
143 */
144static inline bool rcu_seq_new_gp(unsigned long old, unsigned long new)
145{
146 return ULONG_CMP_LT((old + RCU_SEQ_STATE_MASK) & ~RCU_SEQ_STATE_MASK,
147 new);
148}
149
150/*
151 * Roughly how many full grace periods have elapsed between the collection
152 * of the two specified grace periods?
153 */
154static inline unsigned long rcu_seq_diff(unsigned long new, unsigned long old)
155{
156 unsigned long rnd_diff;
157
158 if (old == new)
159 return 0;
160 /*
161 * Compute the number of grace periods (still shifted up), plus
162 * one if either of new and old is not an exact grace period.
163 */
164 rnd_diff = (new & ~RCU_SEQ_STATE_MASK) -
165 ((old + RCU_SEQ_STATE_MASK) & ~RCU_SEQ_STATE_MASK) +
166 ((new & RCU_SEQ_STATE_MASK) || (old & RCU_SEQ_STATE_MASK));
167 if (ULONG_CMP_GE(RCU_SEQ_STATE_MASK, rnd_diff))
168 return 1; /* Definitely no grace period has elapsed. */
169 return ((rnd_diff - RCU_SEQ_STATE_MASK - 1) >> RCU_SEQ_CTR_SHIFT) + 2;
170}
171
172/*
173 * debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally
174 * by call_rcu() and rcu callback execution, and are therefore not part
175 * of the RCU API. These are in rcupdate.h because they are used by all
176 * RCU implementations.
177 */
178
179#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
180# define STATE_RCU_HEAD_READY 0
181# define STATE_RCU_HEAD_QUEUED 1
182
183extern const struct debug_obj_descr rcuhead_debug_descr;
184
185static inline int debug_rcu_head_queue(struct rcu_head *head)
186{
187 int r1;
188
189 r1 = debug_object_activate(head, &rcuhead_debug_descr);
190 debug_object_active_state(head, &rcuhead_debug_descr,
191 STATE_RCU_HEAD_READY,
192 STATE_RCU_HEAD_QUEUED);
193 return r1;
194}
195
196static inline void debug_rcu_head_unqueue(struct rcu_head *head)
197{
198 debug_object_active_state(head, &rcuhead_debug_descr,
199 STATE_RCU_HEAD_QUEUED,
200 STATE_RCU_HEAD_READY);
201 debug_object_deactivate(head, &rcuhead_debug_descr);
202}
203#else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
204static inline int debug_rcu_head_queue(struct rcu_head *head)
205{
206 return 0;
207}
208
209static inline void debug_rcu_head_unqueue(struct rcu_head *head)
210{
211}
212#endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
213
214extern int rcu_cpu_stall_suppress_at_boot;
215
216static inline bool rcu_stall_is_suppressed_at_boot(void)
217{
218 return rcu_cpu_stall_suppress_at_boot && !rcu_inkernel_boot_has_ended();
219}
220
221#ifdef CONFIG_RCU_STALL_COMMON
222
223extern int rcu_cpu_stall_ftrace_dump;
224extern int rcu_cpu_stall_suppress;
225extern int rcu_cpu_stall_timeout;
226extern int rcu_exp_cpu_stall_timeout;
227int rcu_jiffies_till_stall_check(void);
228int rcu_exp_jiffies_till_stall_check(void);
229
230static inline bool rcu_stall_is_suppressed(void)
231{
232 return rcu_stall_is_suppressed_at_boot() || rcu_cpu_stall_suppress;
233}
234
235#define rcu_ftrace_dump_stall_suppress() \
236do { \
237 if (!rcu_cpu_stall_suppress) \
238 rcu_cpu_stall_suppress = 3; \
239} while (0)
240
241#define rcu_ftrace_dump_stall_unsuppress() \
242do { \
243 if (rcu_cpu_stall_suppress == 3) \
244 rcu_cpu_stall_suppress = 0; \
245} while (0)
246
247#else /* #endif #ifdef CONFIG_RCU_STALL_COMMON */
248
249static inline bool rcu_stall_is_suppressed(void)
250{
251 return rcu_stall_is_suppressed_at_boot();
252}
253#define rcu_ftrace_dump_stall_suppress()
254#define rcu_ftrace_dump_stall_unsuppress()
255#endif /* #ifdef CONFIG_RCU_STALL_COMMON */
256
257/*
258 * Strings used in tracepoints need to be exported via the
259 * tracing system such that tools like perf and trace-cmd can
260 * translate the string address pointers to actual text.
261 */
262#define TPS(x) tracepoint_string(x)
263
264/*
265 * Dump the ftrace buffer, but only one time per callsite per boot.
266 */
267#define rcu_ftrace_dump(oops_dump_mode) \
268do { \
269 static atomic_t ___rfd_beenhere = ATOMIC_INIT(0); \
270 \
271 if (!atomic_read(&___rfd_beenhere) && \
272 !atomic_xchg(&___rfd_beenhere, 1)) { \
273 tracing_off(); \
274 rcu_ftrace_dump_stall_suppress(); \
275 ftrace_dump(oops_dump_mode); \
276 rcu_ftrace_dump_stall_unsuppress(); \
277 } \
278} while (0)
279
280void rcu_early_boot_tests(void);
281void rcu_test_sync_prims(void);
282
283/*
284 * This function really isn't for public consumption, but RCU is special in
285 * that context switches can allow the state machine to make progress.
286 */
287extern void resched_cpu(int cpu);
288
289#if !defined(CONFIG_TINY_RCU)
290
291#include <linux/rcu_node_tree.h>
292
293extern int rcu_num_lvls;
294extern int num_rcu_lvl[];
295extern int rcu_num_nodes;
296static bool rcu_fanout_exact;
297static int rcu_fanout_leaf;
298
299/*
300 * Compute the per-level fanout, either using the exact fanout specified
301 * or balancing the tree, depending on the rcu_fanout_exact boot parameter.
302 */
303static inline void rcu_init_levelspread(int *levelspread, const int *levelcnt)
304{
305 int i;
306
307 for (i = 0; i < RCU_NUM_LVLS; i++)
308 levelspread[i] = INT_MIN;
309 if (rcu_fanout_exact) {
310 levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf;
311 for (i = rcu_num_lvls - 2; i >= 0; i--)
312 levelspread[i] = RCU_FANOUT;
313 } else {
314 int ccur;
315 int cprv;
316
317 cprv = nr_cpu_ids;
318 for (i = rcu_num_lvls - 1; i >= 0; i--) {
319 ccur = levelcnt[i];
320 levelspread[i] = (cprv + ccur - 1) / ccur;
321 cprv = ccur;
322 }
323 }
324}
325
326extern void rcu_init_geometry(void);
327
328/* Returns a pointer to the first leaf rcu_node structure. */
329#define rcu_first_leaf_node() (rcu_state.level[rcu_num_lvls - 1])
330
331/* Is this rcu_node a leaf? */
332#define rcu_is_leaf_node(rnp) ((rnp)->level == rcu_num_lvls - 1)
333
334/* Is this rcu_node the last leaf? */
335#define rcu_is_last_leaf_node(rnp) ((rnp) == &rcu_state.node[rcu_num_nodes - 1])
336
337/*
338 * Do a full breadth-first scan of the {s,}rcu_node structures for the
339 * specified state structure (for SRCU) or the only rcu_state structure
340 * (for RCU).
341 */
342#define srcu_for_each_node_breadth_first(sp, rnp) \
343 for ((rnp) = &(sp)->node[0]; \
344 (rnp) < &(sp)->node[rcu_num_nodes]; (rnp)++)
345#define rcu_for_each_node_breadth_first(rnp) \
346 srcu_for_each_node_breadth_first(&rcu_state, rnp)
347
348/*
349 * Scan the leaves of the rcu_node hierarchy for the rcu_state structure.
350 * Note that if there is a singleton rcu_node tree with but one rcu_node
351 * structure, this loop -will- visit the rcu_node structure. It is still
352 * a leaf node, even if it is also the root node.
353 */
354#define rcu_for_each_leaf_node(rnp) \
355 for ((rnp) = rcu_first_leaf_node(); \
356 (rnp) < &rcu_state.node[rcu_num_nodes]; (rnp)++)
357
358/*
359 * Iterate over all possible CPUs in a leaf RCU node.
360 */
361#define for_each_leaf_node_possible_cpu(rnp, cpu) \
362 for (WARN_ON_ONCE(!rcu_is_leaf_node(rnp)), \
363 (cpu) = cpumask_next((rnp)->grplo - 1, cpu_possible_mask); \
364 (cpu) <= rnp->grphi; \
365 (cpu) = cpumask_next((cpu), cpu_possible_mask))
366
367/*
368 * Iterate over all CPUs in a leaf RCU node's specified mask.
369 */
370#define rcu_find_next_bit(rnp, cpu, mask) \
371 ((rnp)->grplo + find_next_bit(&(mask), BITS_PER_LONG, (cpu)))
372#define for_each_leaf_node_cpu_mask(rnp, cpu, mask) \
373 for (WARN_ON_ONCE(!rcu_is_leaf_node(rnp)), \
374 (cpu) = rcu_find_next_bit((rnp), 0, (mask)); \
375 (cpu) <= rnp->grphi; \
376 (cpu) = rcu_find_next_bit((rnp), (cpu) + 1 - (rnp->grplo), (mask)))
377
378#endif /* !defined(CONFIG_TINY_RCU) */
379
380#if !defined(CONFIG_TINY_RCU) || defined(CONFIG_TASKS_RCU_GENERIC)
381
382/*
383 * Wrappers for the rcu_node::lock acquire and release.
384 *
385 * Because the rcu_nodes form a tree, the tree traversal locking will observe
386 * different lock values, this in turn means that an UNLOCK of one level
387 * followed by a LOCK of another level does not imply a full memory barrier;
388 * and most importantly transitivity is lost.
389 *
390 * In order to restore full ordering between tree levels, augment the regular
391 * lock acquire functions with smp_mb__after_unlock_lock().
392 *
393 * As ->lock of struct rcu_node is a __private field, therefore one should use
394 * these wrappers rather than directly call raw_spin_{lock,unlock}* on ->lock.
395 */
396#define raw_spin_lock_rcu_node(p) \
397do { \
398 raw_spin_lock(&ACCESS_PRIVATE(p, lock)); \
399 smp_mb__after_unlock_lock(); \
400} while (0)
401
402#define raw_spin_unlock_rcu_node(p) \
403do { \
404 lockdep_assert_irqs_disabled(); \
405 raw_spin_unlock(&ACCESS_PRIVATE(p, lock)); \
406} while (0)
407
408#define raw_spin_lock_irq_rcu_node(p) \
409do { \
410 raw_spin_lock_irq(&ACCESS_PRIVATE(p, lock)); \
411 smp_mb__after_unlock_lock(); \
412} while (0)
413
414#define raw_spin_unlock_irq_rcu_node(p) \
415do { \
416 lockdep_assert_irqs_disabled(); \
417 raw_spin_unlock_irq(&ACCESS_PRIVATE(p, lock)); \
418} while (0)
419
420#define raw_spin_lock_irqsave_rcu_node(p, flags) \
421do { \
422 raw_spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \
423 smp_mb__after_unlock_lock(); \
424} while (0)
425
426#define raw_spin_unlock_irqrestore_rcu_node(p, flags) \
427do { \
428 lockdep_assert_irqs_disabled(); \
429 raw_spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags); \
430} while (0)
431
432#define raw_spin_trylock_rcu_node(p) \
433({ \
434 bool ___locked = raw_spin_trylock(&ACCESS_PRIVATE(p, lock)); \
435 \
436 if (___locked) \
437 smp_mb__after_unlock_lock(); \
438 ___locked; \
439})
440
441#define raw_lockdep_assert_held_rcu_node(p) \
442 lockdep_assert_held(&ACCESS_PRIVATE(p, lock))
443
444#endif // #if !defined(CONFIG_TINY_RCU) || defined(CONFIG_TASKS_RCU_GENERIC)
445
446#ifdef CONFIG_TINY_RCU
447/* Tiny RCU doesn't expedite, as its purpose in life is instead to be tiny. */
448static inline bool rcu_gp_is_normal(void) { return true; }
449static inline bool rcu_gp_is_expedited(void) { return false; }
450static inline void rcu_expedite_gp(void) { }
451static inline void rcu_unexpedite_gp(void) { }
452static inline void rcu_request_urgent_qs_task(struct task_struct *t) { }
453#else /* #ifdef CONFIG_TINY_RCU */
454bool rcu_gp_is_normal(void); /* Internal RCU use. */
455bool rcu_gp_is_expedited(void); /* Internal RCU use. */
456void rcu_expedite_gp(void);
457void rcu_unexpedite_gp(void);
458void rcupdate_announce_bootup_oddness(void);
459#ifdef CONFIG_TASKS_RCU_GENERIC
460void show_rcu_tasks_gp_kthreads(void);
461#else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
462static inline void show_rcu_tasks_gp_kthreads(void) {}
463#endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */
464void rcu_request_urgent_qs_task(struct task_struct *t);
465#endif /* #else #ifdef CONFIG_TINY_RCU */
466
467#define RCU_SCHEDULER_INACTIVE 0
468#define RCU_SCHEDULER_INIT 1
469#define RCU_SCHEDULER_RUNNING 2
470
471enum rcutorture_type {
472 RCU_FLAVOR,
473 RCU_TASKS_FLAVOR,
474 RCU_TASKS_RUDE_FLAVOR,
475 RCU_TASKS_TRACING_FLAVOR,
476 RCU_TRIVIAL_FLAVOR,
477 SRCU_FLAVOR,
478 INVALID_RCU_FLAVOR
479};
480
481#if defined(CONFIG_RCU_LAZY)
482unsigned long rcu_lazy_get_jiffies_till_flush(void);
483void rcu_lazy_set_jiffies_till_flush(unsigned long j);
484#else
485static inline unsigned long rcu_lazy_get_jiffies_till_flush(void) { return 0; }
486static inline void rcu_lazy_set_jiffies_till_flush(unsigned long j) { }
487#endif
488
489#if defined(CONFIG_TREE_RCU)
490void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
491 unsigned long *gp_seq);
492void do_trace_rcu_torture_read(const char *rcutorturename,
493 struct rcu_head *rhp,
494 unsigned long secs,
495 unsigned long c_old,
496 unsigned long c);
497void rcu_gp_set_torture_wait(int duration);
498#else
499static inline void rcutorture_get_gp_data(enum rcutorture_type test_type,
500 int *flags, unsigned long *gp_seq)
501{
502 *flags = 0;
503 *gp_seq = 0;
504}
505#ifdef CONFIG_RCU_TRACE
506void do_trace_rcu_torture_read(const char *rcutorturename,
507 struct rcu_head *rhp,
508 unsigned long secs,
509 unsigned long c_old,
510 unsigned long c);
511#else
512#define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
513 do { } while (0)
514#endif
515static inline void rcu_gp_set_torture_wait(int duration) { }
516#endif
517
518#if IS_ENABLED(CONFIG_RCU_TORTURE_TEST) || IS_MODULE(CONFIG_RCU_TORTURE_TEST)
519long rcutorture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask);
520#endif
521
522#ifdef CONFIG_TINY_SRCU
523
524static inline void srcutorture_get_gp_data(enum rcutorture_type test_type,
525 struct srcu_struct *sp, int *flags,
526 unsigned long *gp_seq)
527{
528 if (test_type != SRCU_FLAVOR)
529 return;
530 *flags = 0;
531 *gp_seq = sp->srcu_idx;
532}
533
534#elif defined(CONFIG_TREE_SRCU)
535
536void srcutorture_get_gp_data(enum rcutorture_type test_type,
537 struct srcu_struct *sp, int *flags,
538 unsigned long *gp_seq);
539
540#endif
541
542#ifdef CONFIG_TINY_RCU
543static inline bool rcu_dynticks_zero_in_eqs(int cpu, int *vp) { return false; }
544static inline unsigned long rcu_get_gp_seq(void) { return 0; }
545static inline unsigned long rcu_exp_batches_completed(void) { return 0; }
546static inline unsigned long
547srcu_batches_completed(struct srcu_struct *sp) { return 0; }
548static inline void rcu_force_quiescent_state(void) { }
549static inline bool rcu_check_boost_fail(unsigned long gp_state, int *cpup) { return true; }
550static inline void show_rcu_gp_kthreads(void) { }
551static inline int rcu_get_gp_kthreads_prio(void) { return 0; }
552static inline void rcu_fwd_progress_check(unsigned long j) { }
553static inline void rcu_gp_slow_register(atomic_t *rgssp) { }
554static inline void rcu_gp_slow_unregister(atomic_t *rgssp) { }
555#else /* #ifdef CONFIG_TINY_RCU */
556bool rcu_dynticks_zero_in_eqs(int cpu, int *vp);
557unsigned long rcu_get_gp_seq(void);
558unsigned long rcu_exp_batches_completed(void);
559unsigned long srcu_batches_completed(struct srcu_struct *sp);
560bool rcu_check_boost_fail(unsigned long gp_state, int *cpup);
561void show_rcu_gp_kthreads(void);
562int rcu_get_gp_kthreads_prio(void);
563void rcu_fwd_progress_check(unsigned long j);
564void rcu_force_quiescent_state(void);
565extern struct workqueue_struct *rcu_gp_wq;
566#ifdef CONFIG_RCU_EXP_KTHREAD
567extern struct kthread_worker *rcu_exp_gp_kworker;
568extern struct kthread_worker *rcu_exp_par_gp_kworker;
569#else /* !CONFIG_RCU_EXP_KTHREAD */
570extern struct workqueue_struct *rcu_par_gp_wq;
571#endif /* CONFIG_RCU_EXP_KTHREAD */
572void rcu_gp_slow_register(atomic_t *rgssp);
573void rcu_gp_slow_unregister(atomic_t *rgssp);
574#endif /* #else #ifdef CONFIG_TINY_RCU */
575
576#ifdef CONFIG_RCU_NOCB_CPU
577void rcu_bind_current_to_nocb(void);
578#else
579static inline void rcu_bind_current_to_nocb(void) { }
580#endif
581
582#if !defined(CONFIG_TINY_RCU) && defined(CONFIG_TASKS_RCU)
583void show_rcu_tasks_classic_gp_kthread(void);
584#else
585static inline void show_rcu_tasks_classic_gp_kthread(void) {}
586#endif
587#if !defined(CONFIG_TINY_RCU) && defined(CONFIG_TASKS_RUDE_RCU)
588void show_rcu_tasks_rude_gp_kthread(void);
589#else
590static inline void show_rcu_tasks_rude_gp_kthread(void) {}
591#endif
592#if !defined(CONFIG_TINY_RCU) && defined(CONFIG_TASKS_TRACE_RCU)
593void show_rcu_tasks_trace_gp_kthread(void);
594#else
595static inline void show_rcu_tasks_trace_gp_kthread(void) {}
596#endif
597
598#endif /* __LINUX_RCU_H */
1/* SPDX-License-Identifier: GPL-2.0+ */
2/*
3 * Read-Copy Update definitions shared among RCU implementations.
4 *
5 * Copyright IBM Corporation, 2011
6 *
7 * Author: Paul E. McKenney <paulmck@linux.ibm.com>
8 */
9
10#ifndef __LINUX_RCU_H
11#define __LINUX_RCU_H
12
13#include <linux/slab.h>
14#include <trace/events/rcu.h>
15
16/*
17 * Grace-period counter management.
18 *
19 * The two least significant bits contain the control flags.
20 * The most significant bits contain the grace-period sequence counter.
21 *
22 * When both control flags are zero, no grace period is in progress.
23 * When either bit is non-zero, a grace period has started and is in
24 * progress. When the grace period completes, the control flags are reset
25 * to 0 and the grace-period sequence counter is incremented.
26 *
27 * However some specific RCU usages make use of custom values.
28 *
29 * SRCU special control values:
30 *
31 * SRCU_SNP_INIT_SEQ : Invalid/init value set when SRCU node
32 * is initialized.
33 *
34 * SRCU_STATE_IDLE : No SRCU gp is in progress
35 *
36 * SRCU_STATE_SCAN1 : State set by rcu_seq_start(). Indicates
37 * we are scanning the readers on the slot
38 * defined as inactive (there might well
39 * be pending readers that will use that
40 * index, but their number is bounded).
41 *
42 * SRCU_STATE_SCAN2 : State set manually via rcu_seq_set_state()
43 * Indicates we are flipping the readers
44 * index and then scanning the readers on the
45 * slot newly designated as inactive (again,
46 * the number of pending readers that will use
47 * this inactive index is bounded).
48 *
49 * RCU polled GP special control value:
50 *
51 * RCU_GET_STATE_COMPLETED : State value indicating an already-completed
52 * polled GP has completed. This value covers
53 * both the state and the counter of the
54 * grace-period sequence number.
55 */
56
57#define RCU_SEQ_CTR_SHIFT 2
58#define RCU_SEQ_STATE_MASK ((1 << RCU_SEQ_CTR_SHIFT) - 1)
59
60/* Low-order bit definition for polled grace-period APIs. */
61#define RCU_GET_STATE_COMPLETED 0x1
62
63extern int sysctl_sched_rt_runtime;
64
65/*
66 * Return the counter portion of a sequence number previously returned
67 * by rcu_seq_snap() or rcu_seq_current().
68 */
69static inline unsigned long rcu_seq_ctr(unsigned long s)
70{
71 return s >> RCU_SEQ_CTR_SHIFT;
72}
73
74/*
75 * Return the state portion of a sequence number previously returned
76 * by rcu_seq_snap() or rcu_seq_current().
77 */
78static inline int rcu_seq_state(unsigned long s)
79{
80 return s & RCU_SEQ_STATE_MASK;
81}
82
83/*
84 * Set the state portion of the pointed-to sequence number.
85 * The caller is responsible for preventing conflicting updates.
86 */
87static inline void rcu_seq_set_state(unsigned long *sp, int newstate)
88{
89 WARN_ON_ONCE(newstate & ~RCU_SEQ_STATE_MASK);
90 WRITE_ONCE(*sp, (*sp & ~RCU_SEQ_STATE_MASK) + newstate);
91}
92
93/* Adjust sequence number for start of update-side operation. */
94static inline void rcu_seq_start(unsigned long *sp)
95{
96 WRITE_ONCE(*sp, *sp + 1);
97 smp_mb(); /* Ensure update-side operation after counter increment. */
98 WARN_ON_ONCE(rcu_seq_state(*sp) != 1);
99}
100
101/* Compute the end-of-grace-period value for the specified sequence number. */
102static inline unsigned long rcu_seq_endval(unsigned long *sp)
103{
104 return (*sp | RCU_SEQ_STATE_MASK) + 1;
105}
106
107/* Adjust sequence number for end of update-side operation. */
108static inline void rcu_seq_end(unsigned long *sp)
109{
110 smp_mb(); /* Ensure update-side operation before counter increment. */
111 WARN_ON_ONCE(!rcu_seq_state(*sp));
112 WRITE_ONCE(*sp, rcu_seq_endval(sp));
113}
114
115/*
116 * rcu_seq_snap - Take a snapshot of the update side's sequence number.
117 *
118 * This function returns the earliest value of the grace-period sequence number
119 * that will indicate that a full grace period has elapsed since the current
120 * time. Once the grace-period sequence number has reached this value, it will
121 * be safe to invoke all callbacks that have been registered prior to the
122 * current time. This value is the current grace-period number plus two to the
123 * power of the number of low-order bits reserved for state, then rounded up to
124 * the next value in which the state bits are all zero.
125 */
126static inline unsigned long rcu_seq_snap(unsigned long *sp)
127{
128 unsigned long s;
129
130 s = (READ_ONCE(*sp) + 2 * RCU_SEQ_STATE_MASK + 1) & ~RCU_SEQ_STATE_MASK;
131 smp_mb(); /* Above access must not bleed into critical section. */
132 return s;
133}
134
135/* Return the current value the update side's sequence number, no ordering. */
136static inline unsigned long rcu_seq_current(unsigned long *sp)
137{
138 return READ_ONCE(*sp);
139}
140
141/*
142 * Given a snapshot from rcu_seq_snap(), determine whether or not the
143 * corresponding update-side operation has started.
144 */
145static inline bool rcu_seq_started(unsigned long *sp, unsigned long s)
146{
147 return ULONG_CMP_LT((s - 1) & ~RCU_SEQ_STATE_MASK, READ_ONCE(*sp));
148}
149
150/*
151 * Given a snapshot from rcu_seq_snap(), determine whether or not a
152 * full update-side operation has occurred.
153 */
154static inline bool rcu_seq_done(unsigned long *sp, unsigned long s)
155{
156 return ULONG_CMP_GE(READ_ONCE(*sp), s);
157}
158
159/*
160 * Given a snapshot from rcu_seq_snap(), determine whether or not a
161 * full update-side operation has occurred, but do not allow the
162 * (ULONG_MAX / 2) safety-factor/guard-band.
163 */
164static inline bool rcu_seq_done_exact(unsigned long *sp, unsigned long s)
165{
166 unsigned long cur_s = READ_ONCE(*sp);
167
168 return ULONG_CMP_GE(cur_s, s) || ULONG_CMP_LT(cur_s, s - (2 * RCU_SEQ_STATE_MASK + 1));
169}
170
171/*
172 * Has a grace period completed since the time the old gp_seq was collected?
173 */
174static inline bool rcu_seq_completed_gp(unsigned long old, unsigned long new)
175{
176 return ULONG_CMP_LT(old, new & ~RCU_SEQ_STATE_MASK);
177}
178
179/*
180 * Has a grace period started since the time the old gp_seq was collected?
181 */
182static inline bool rcu_seq_new_gp(unsigned long old, unsigned long new)
183{
184 return ULONG_CMP_LT((old + RCU_SEQ_STATE_MASK) & ~RCU_SEQ_STATE_MASK,
185 new);
186}
187
188/*
189 * Roughly how many full grace periods have elapsed between the collection
190 * of the two specified grace periods?
191 */
192static inline unsigned long rcu_seq_diff(unsigned long new, unsigned long old)
193{
194 unsigned long rnd_diff;
195
196 if (old == new)
197 return 0;
198 /*
199 * Compute the number of grace periods (still shifted up), plus
200 * one if either of new and old is not an exact grace period.
201 */
202 rnd_diff = (new & ~RCU_SEQ_STATE_MASK) -
203 ((old + RCU_SEQ_STATE_MASK) & ~RCU_SEQ_STATE_MASK) +
204 ((new & RCU_SEQ_STATE_MASK) || (old & RCU_SEQ_STATE_MASK));
205 if (ULONG_CMP_GE(RCU_SEQ_STATE_MASK, rnd_diff))
206 return 1; /* Definitely no grace period has elapsed. */
207 return ((rnd_diff - RCU_SEQ_STATE_MASK - 1) >> RCU_SEQ_CTR_SHIFT) + 2;
208}
209
210/*
211 * debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally
212 * by call_rcu() and rcu callback execution, and are therefore not part
213 * of the RCU API. These are in rcupdate.h because they are used by all
214 * RCU implementations.
215 */
216
217#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
218# define STATE_RCU_HEAD_READY 0
219# define STATE_RCU_HEAD_QUEUED 1
220
221extern const struct debug_obj_descr rcuhead_debug_descr;
222
223static inline int debug_rcu_head_queue(struct rcu_head *head)
224{
225 int r1;
226
227 r1 = debug_object_activate(head, &rcuhead_debug_descr);
228 debug_object_active_state(head, &rcuhead_debug_descr,
229 STATE_RCU_HEAD_READY,
230 STATE_RCU_HEAD_QUEUED);
231 return r1;
232}
233
234static inline void debug_rcu_head_unqueue(struct rcu_head *head)
235{
236 debug_object_active_state(head, &rcuhead_debug_descr,
237 STATE_RCU_HEAD_QUEUED,
238 STATE_RCU_HEAD_READY);
239 debug_object_deactivate(head, &rcuhead_debug_descr);
240}
241#else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
242static inline int debug_rcu_head_queue(struct rcu_head *head)
243{
244 return 0;
245}
246
247static inline void debug_rcu_head_unqueue(struct rcu_head *head)
248{
249}
250#endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
251
252static inline void debug_rcu_head_callback(struct rcu_head *rhp)
253{
254 if (unlikely(!rhp->func))
255 kmem_dump_obj(rhp);
256}
257
258extern int rcu_cpu_stall_suppress_at_boot;
259
260static inline bool rcu_stall_is_suppressed_at_boot(void)
261{
262 return rcu_cpu_stall_suppress_at_boot && !rcu_inkernel_boot_has_ended();
263}
264
265extern int rcu_cpu_stall_notifiers;
266
267#ifdef CONFIG_RCU_STALL_COMMON
268
269extern int rcu_cpu_stall_ftrace_dump;
270extern int rcu_cpu_stall_suppress;
271extern int rcu_cpu_stall_timeout;
272extern int rcu_exp_cpu_stall_timeout;
273extern int rcu_cpu_stall_cputime;
274extern bool rcu_exp_stall_task_details __read_mostly;
275int rcu_jiffies_till_stall_check(void);
276int rcu_exp_jiffies_till_stall_check(void);
277
278static inline bool rcu_stall_is_suppressed(void)
279{
280 return rcu_stall_is_suppressed_at_boot() || rcu_cpu_stall_suppress;
281}
282
283#define rcu_ftrace_dump_stall_suppress() \
284do { \
285 if (!rcu_cpu_stall_suppress) \
286 rcu_cpu_stall_suppress = 3; \
287} while (0)
288
289#define rcu_ftrace_dump_stall_unsuppress() \
290do { \
291 if (rcu_cpu_stall_suppress == 3) \
292 rcu_cpu_stall_suppress = 0; \
293} while (0)
294
295#else /* #endif #ifdef CONFIG_RCU_STALL_COMMON */
296
297static inline bool rcu_stall_is_suppressed(void)
298{
299 return rcu_stall_is_suppressed_at_boot();
300}
301#define rcu_ftrace_dump_stall_suppress()
302#define rcu_ftrace_dump_stall_unsuppress()
303#endif /* #ifdef CONFIG_RCU_STALL_COMMON */
304
305/*
306 * Strings used in tracepoints need to be exported via the
307 * tracing system such that tools like perf and trace-cmd can
308 * translate the string address pointers to actual text.
309 */
310#define TPS(x) tracepoint_string(x)
311
312/*
313 * Dump the ftrace buffer, but only one time per callsite per boot.
314 */
315#define rcu_ftrace_dump(oops_dump_mode) \
316do { \
317 static atomic_t ___rfd_beenhere = ATOMIC_INIT(0); \
318 \
319 if (!atomic_read(&___rfd_beenhere) && \
320 !atomic_xchg(&___rfd_beenhere, 1)) { \
321 tracing_off(); \
322 rcu_ftrace_dump_stall_suppress(); \
323 ftrace_dump(oops_dump_mode); \
324 rcu_ftrace_dump_stall_unsuppress(); \
325 } \
326} while (0)
327
328void rcu_early_boot_tests(void);
329void rcu_test_sync_prims(void);
330
331/*
332 * This function really isn't for public consumption, but RCU is special in
333 * that context switches can allow the state machine to make progress.
334 */
335extern void resched_cpu(int cpu);
336
337#if !defined(CONFIG_TINY_RCU)
338
339#include <linux/rcu_node_tree.h>
340
341extern int rcu_num_lvls;
342extern int num_rcu_lvl[];
343extern int rcu_num_nodes;
344static bool rcu_fanout_exact;
345static int rcu_fanout_leaf;
346
347/*
348 * Compute the per-level fanout, either using the exact fanout specified
349 * or balancing the tree, depending on the rcu_fanout_exact boot parameter.
350 */
351static inline void rcu_init_levelspread(int *levelspread, const int *levelcnt)
352{
353 int i;
354
355 for (i = 0; i < RCU_NUM_LVLS; i++)
356 levelspread[i] = INT_MIN;
357 if (rcu_fanout_exact) {
358 levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf;
359 for (i = rcu_num_lvls - 2; i >= 0; i--)
360 levelspread[i] = RCU_FANOUT;
361 } else {
362 int ccur;
363 int cprv;
364
365 cprv = nr_cpu_ids;
366 for (i = rcu_num_lvls - 1; i >= 0; i--) {
367 ccur = levelcnt[i];
368 levelspread[i] = (cprv + ccur - 1) / ccur;
369 cprv = ccur;
370 }
371 }
372}
373
374extern void rcu_init_geometry(void);
375
376/* Returns a pointer to the first leaf rcu_node structure. */
377#define rcu_first_leaf_node() (rcu_state.level[rcu_num_lvls - 1])
378
379/* Is this rcu_node a leaf? */
380#define rcu_is_leaf_node(rnp) ((rnp)->level == rcu_num_lvls - 1)
381
382/* Is this rcu_node the last leaf? */
383#define rcu_is_last_leaf_node(rnp) ((rnp) == &rcu_state.node[rcu_num_nodes - 1])
384
385/*
386 * Do a full breadth-first scan of the {s,}rcu_node structures for the
387 * specified state structure (for SRCU) or the only rcu_state structure
388 * (for RCU).
389 */
390#define _rcu_for_each_node_breadth_first(sp, rnp) \
391 for ((rnp) = &(sp)->node[0]; \
392 (rnp) < &(sp)->node[rcu_num_nodes]; (rnp)++)
393#define rcu_for_each_node_breadth_first(rnp) \
394 _rcu_for_each_node_breadth_first(&rcu_state, rnp)
395#define srcu_for_each_node_breadth_first(ssp, rnp) \
396 _rcu_for_each_node_breadth_first(ssp->srcu_sup, rnp)
397
398/*
399 * Scan the leaves of the rcu_node hierarchy for the rcu_state structure.
400 * Note that if there is a singleton rcu_node tree with but one rcu_node
401 * structure, this loop -will- visit the rcu_node structure. It is still
402 * a leaf node, even if it is also the root node.
403 */
404#define rcu_for_each_leaf_node(rnp) \
405 for ((rnp) = rcu_first_leaf_node(); \
406 (rnp) < &rcu_state.node[rcu_num_nodes]; (rnp)++)
407
408/*
409 * Iterate over all possible CPUs in a leaf RCU node.
410 */
411#define for_each_leaf_node_possible_cpu(rnp, cpu) \
412 for (WARN_ON_ONCE(!rcu_is_leaf_node(rnp)), \
413 (cpu) = cpumask_next((rnp)->grplo - 1, cpu_possible_mask); \
414 (cpu) <= rnp->grphi; \
415 (cpu) = cpumask_next((cpu), cpu_possible_mask))
416
417/*
418 * Iterate over all CPUs in a leaf RCU node's specified mask.
419 */
420#define rcu_find_next_bit(rnp, cpu, mask) \
421 ((rnp)->grplo + find_next_bit(&(mask), BITS_PER_LONG, (cpu)))
422#define for_each_leaf_node_cpu_mask(rnp, cpu, mask) \
423 for (WARN_ON_ONCE(!rcu_is_leaf_node(rnp)), \
424 (cpu) = rcu_find_next_bit((rnp), 0, (mask)); \
425 (cpu) <= rnp->grphi; \
426 (cpu) = rcu_find_next_bit((rnp), (cpu) + 1 - (rnp->grplo), (mask)))
427
428#endif /* !defined(CONFIG_TINY_RCU) */
429
430#if !defined(CONFIG_TINY_RCU) || defined(CONFIG_TASKS_RCU_GENERIC)
431
432/*
433 * Wrappers for the rcu_node::lock acquire and release.
434 *
435 * Because the rcu_nodes form a tree, the tree traversal locking will observe
436 * different lock values, this in turn means that an UNLOCK of one level
437 * followed by a LOCK of another level does not imply a full memory barrier;
438 * and most importantly transitivity is lost.
439 *
440 * In order to restore full ordering between tree levels, augment the regular
441 * lock acquire functions with smp_mb__after_unlock_lock().
442 *
443 * As ->lock of struct rcu_node is a __private field, therefore one should use
444 * these wrappers rather than directly call raw_spin_{lock,unlock}* on ->lock.
445 */
446#define raw_spin_lock_rcu_node(p) \
447do { \
448 raw_spin_lock(&ACCESS_PRIVATE(p, lock)); \
449 smp_mb__after_unlock_lock(); \
450} while (0)
451
452#define raw_spin_unlock_rcu_node(p) \
453do { \
454 lockdep_assert_irqs_disabled(); \
455 raw_spin_unlock(&ACCESS_PRIVATE(p, lock)); \
456} while (0)
457
458#define raw_spin_lock_irq_rcu_node(p) \
459do { \
460 raw_spin_lock_irq(&ACCESS_PRIVATE(p, lock)); \
461 smp_mb__after_unlock_lock(); \
462} while (0)
463
464#define raw_spin_unlock_irq_rcu_node(p) \
465do { \
466 lockdep_assert_irqs_disabled(); \
467 raw_spin_unlock_irq(&ACCESS_PRIVATE(p, lock)); \
468} while (0)
469
470#define raw_spin_lock_irqsave_rcu_node(p, flags) \
471do { \
472 raw_spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \
473 smp_mb__after_unlock_lock(); \
474} while (0)
475
476#define raw_spin_unlock_irqrestore_rcu_node(p, flags) \
477do { \
478 lockdep_assert_irqs_disabled(); \
479 raw_spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags); \
480} while (0)
481
482#define raw_spin_trylock_rcu_node(p) \
483({ \
484 bool ___locked = raw_spin_trylock(&ACCESS_PRIVATE(p, lock)); \
485 \
486 if (___locked) \
487 smp_mb__after_unlock_lock(); \
488 ___locked; \
489})
490
491#define raw_lockdep_assert_held_rcu_node(p) \
492 lockdep_assert_held(&ACCESS_PRIVATE(p, lock))
493
494#endif // #if !defined(CONFIG_TINY_RCU) || defined(CONFIG_TASKS_RCU_GENERIC)
495
496#ifdef CONFIG_TINY_RCU
497/* Tiny RCU doesn't expedite, as its purpose in life is instead to be tiny. */
498static inline bool rcu_gp_is_normal(void) { return true; }
499static inline bool rcu_gp_is_expedited(void) { return false; }
500static inline bool rcu_async_should_hurry(void) { return false; }
501static inline void rcu_expedite_gp(void) { }
502static inline void rcu_unexpedite_gp(void) { }
503static inline void rcu_async_hurry(void) { }
504static inline void rcu_async_relax(void) { }
505static inline bool rcu_cpu_online(int cpu) { return true; }
506#else /* #ifdef CONFIG_TINY_RCU */
507bool rcu_gp_is_normal(void); /* Internal RCU use. */
508bool rcu_gp_is_expedited(void); /* Internal RCU use. */
509bool rcu_async_should_hurry(void); /* Internal RCU use. */
510void rcu_expedite_gp(void);
511void rcu_unexpedite_gp(void);
512void rcu_async_hurry(void);
513void rcu_async_relax(void);
514void rcupdate_announce_bootup_oddness(void);
515bool rcu_cpu_online(int cpu);
516#ifdef CONFIG_TASKS_RCU_GENERIC
517void show_rcu_tasks_gp_kthreads(void);
518#else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
519static inline void show_rcu_tasks_gp_kthreads(void) {}
520#endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */
521#endif /* #else #ifdef CONFIG_TINY_RCU */
522
523#ifdef CONFIG_TASKS_RCU
524struct task_struct *get_rcu_tasks_gp_kthread(void);
525#endif // # ifdef CONFIG_TASKS_RCU
526
527#ifdef CONFIG_TASKS_RUDE_RCU
528struct task_struct *get_rcu_tasks_rude_gp_kthread(void);
529#endif // # ifdef CONFIG_TASKS_RUDE_RCU
530
531#define RCU_SCHEDULER_INACTIVE 0
532#define RCU_SCHEDULER_INIT 1
533#define RCU_SCHEDULER_RUNNING 2
534
535enum rcutorture_type {
536 RCU_FLAVOR,
537 RCU_TASKS_FLAVOR,
538 RCU_TASKS_RUDE_FLAVOR,
539 RCU_TASKS_TRACING_FLAVOR,
540 RCU_TRIVIAL_FLAVOR,
541 SRCU_FLAVOR,
542 INVALID_RCU_FLAVOR
543};
544
545#if defined(CONFIG_RCU_LAZY)
546unsigned long rcu_lazy_get_jiffies_till_flush(void);
547void rcu_lazy_set_jiffies_till_flush(unsigned long j);
548#else
549static inline unsigned long rcu_lazy_get_jiffies_till_flush(void) { return 0; }
550static inline void rcu_lazy_set_jiffies_till_flush(unsigned long j) { }
551#endif
552
553#if defined(CONFIG_TREE_RCU)
554void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
555 unsigned long *gp_seq);
556void do_trace_rcu_torture_read(const char *rcutorturename,
557 struct rcu_head *rhp,
558 unsigned long secs,
559 unsigned long c_old,
560 unsigned long c);
561void rcu_gp_set_torture_wait(int duration);
562#else
563static inline void rcutorture_get_gp_data(enum rcutorture_type test_type,
564 int *flags, unsigned long *gp_seq)
565{
566 *flags = 0;
567 *gp_seq = 0;
568}
569#ifdef CONFIG_RCU_TRACE
570void do_trace_rcu_torture_read(const char *rcutorturename,
571 struct rcu_head *rhp,
572 unsigned long secs,
573 unsigned long c_old,
574 unsigned long c);
575#else
576#define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
577 do { } while (0)
578#endif
579static inline void rcu_gp_set_torture_wait(int duration) { }
580#endif
581
582#ifdef CONFIG_TINY_SRCU
583
584static inline void srcutorture_get_gp_data(enum rcutorture_type test_type,
585 struct srcu_struct *sp, int *flags,
586 unsigned long *gp_seq)
587{
588 if (test_type != SRCU_FLAVOR)
589 return;
590 *flags = 0;
591 *gp_seq = sp->srcu_idx;
592}
593
594#elif defined(CONFIG_TREE_SRCU)
595
596void srcutorture_get_gp_data(enum rcutorture_type test_type,
597 struct srcu_struct *sp, int *flags,
598 unsigned long *gp_seq);
599
600#endif
601
602#ifdef CONFIG_TINY_RCU
603static inline bool rcu_dynticks_zero_in_eqs(int cpu, int *vp) { return false; }
604static inline unsigned long rcu_get_gp_seq(void) { return 0; }
605static inline unsigned long rcu_exp_batches_completed(void) { return 0; }
606static inline unsigned long
607srcu_batches_completed(struct srcu_struct *sp) { return 0; }
608static inline void rcu_force_quiescent_state(void) { }
609static inline bool rcu_check_boost_fail(unsigned long gp_state, int *cpup) { return true; }
610static inline void show_rcu_gp_kthreads(void) { }
611static inline int rcu_get_gp_kthreads_prio(void) { return 0; }
612static inline void rcu_fwd_progress_check(unsigned long j) { }
613static inline void rcu_gp_slow_register(atomic_t *rgssp) { }
614static inline void rcu_gp_slow_unregister(atomic_t *rgssp) { }
615#else /* #ifdef CONFIG_TINY_RCU */
616bool rcu_dynticks_zero_in_eqs(int cpu, int *vp);
617unsigned long rcu_get_gp_seq(void);
618unsigned long rcu_exp_batches_completed(void);
619unsigned long srcu_batches_completed(struct srcu_struct *sp);
620bool rcu_check_boost_fail(unsigned long gp_state, int *cpup);
621void show_rcu_gp_kthreads(void);
622int rcu_get_gp_kthreads_prio(void);
623void rcu_fwd_progress_check(unsigned long j);
624void rcu_force_quiescent_state(void);
625extern struct workqueue_struct *rcu_gp_wq;
626#ifdef CONFIG_RCU_EXP_KTHREAD
627extern struct kthread_worker *rcu_exp_gp_kworker;
628extern struct kthread_worker *rcu_exp_par_gp_kworker;
629#else /* !CONFIG_RCU_EXP_KTHREAD */
630extern struct workqueue_struct *rcu_par_gp_wq;
631#endif /* CONFIG_RCU_EXP_KTHREAD */
632void rcu_gp_slow_register(atomic_t *rgssp);
633void rcu_gp_slow_unregister(atomic_t *rgssp);
634#endif /* #else #ifdef CONFIG_TINY_RCU */
635
636#ifdef CONFIG_RCU_NOCB_CPU
637void rcu_bind_current_to_nocb(void);
638#else
639static inline void rcu_bind_current_to_nocb(void) { }
640#endif
641
642#if !defined(CONFIG_TINY_RCU) && defined(CONFIG_TASKS_RCU)
643void show_rcu_tasks_classic_gp_kthread(void);
644#else
645static inline void show_rcu_tasks_classic_gp_kthread(void) {}
646#endif
647#if !defined(CONFIG_TINY_RCU) && defined(CONFIG_TASKS_RUDE_RCU)
648void show_rcu_tasks_rude_gp_kthread(void);
649#else
650static inline void show_rcu_tasks_rude_gp_kthread(void) {}
651#endif
652#if !defined(CONFIG_TINY_RCU) && defined(CONFIG_TASKS_TRACE_RCU)
653void show_rcu_tasks_trace_gp_kthread(void);
654#else
655static inline void show_rcu_tasks_trace_gp_kthread(void) {}
656#endif
657
658#ifdef CONFIG_TINY_RCU
659static inline bool rcu_cpu_beenfullyonline(int cpu) { return true; }
660#else
661bool rcu_cpu_beenfullyonline(int cpu);
662#endif
663
664#if defined(CONFIG_RCU_STALL_COMMON) && defined(CONFIG_RCU_CPU_STALL_NOTIFIER)
665int rcu_stall_notifier_call_chain(unsigned long val, void *v);
666#else // #if defined(CONFIG_RCU_STALL_COMMON) && defined(CONFIG_RCU_CPU_STALL_NOTIFIER)
667static inline int rcu_stall_notifier_call_chain(unsigned long val, void *v) { return NOTIFY_DONE; }
668#endif // #else // #if defined(CONFIG_RCU_STALL_COMMON) && defined(CONFIG_RCU_CPU_STALL_NOTIFIER)
669
670#endif /* __LINUX_RCU_H */