Loading...
1/*
2 * Read-Copy Update definitions shared among RCU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
17 *
18 * Copyright IBM Corporation, 2011
19 *
20 * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
21 */
22
23#ifndef __LINUX_RCU_H
24#define __LINUX_RCU_H
25
26#include <trace/events/rcu.h>
27#ifdef CONFIG_RCU_TRACE
28#define RCU_TRACE(stmt) stmt
29#else /* #ifdef CONFIG_RCU_TRACE */
30#define RCU_TRACE(stmt)
31#endif /* #else #ifdef CONFIG_RCU_TRACE */
32
33/*
34 * Process-level increment to ->dynticks_nesting field. This allows for
35 * architectures that use half-interrupts and half-exceptions from
36 * process context.
37 *
38 * DYNTICK_TASK_NEST_MASK defines a field of width DYNTICK_TASK_NEST_WIDTH
39 * that counts the number of process-based reasons why RCU cannot
40 * consider the corresponding CPU to be idle, and DYNTICK_TASK_NEST_VALUE
41 * is the value used to increment or decrement this field.
42 *
43 * The rest of the bits could in principle be used to count interrupts,
44 * but this would mean that a negative-one value in the interrupt
45 * field could incorrectly zero out the DYNTICK_TASK_NEST_MASK field.
46 * We therefore provide a two-bit guard field defined by DYNTICK_TASK_MASK
47 * that is set to DYNTICK_TASK_FLAG upon initial exit from idle.
48 * The DYNTICK_TASK_EXIT_IDLE value is thus the combined value used upon
49 * initial exit from idle.
50 */
51#define DYNTICK_TASK_NEST_WIDTH 7
52#define DYNTICK_TASK_NEST_VALUE ((LLONG_MAX >> DYNTICK_TASK_NEST_WIDTH) + 1)
53#define DYNTICK_TASK_NEST_MASK (LLONG_MAX - DYNTICK_TASK_NEST_VALUE + 1)
54#define DYNTICK_TASK_FLAG ((DYNTICK_TASK_NEST_VALUE / 8) * 2)
55#define DYNTICK_TASK_MASK ((DYNTICK_TASK_NEST_VALUE / 8) * 3)
56#define DYNTICK_TASK_EXIT_IDLE (DYNTICK_TASK_NEST_VALUE + \
57 DYNTICK_TASK_FLAG)
58
59/*
60 * debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally
61 * by call_rcu() and rcu callback execution, and are therefore not part of the
62 * RCU API. Leaving in rcupdate.h because they are used by all RCU flavors.
63 */
64
65#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
66# define STATE_RCU_HEAD_READY 0
67# define STATE_RCU_HEAD_QUEUED 1
68
69extern struct debug_obj_descr rcuhead_debug_descr;
70
71static inline int debug_rcu_head_queue(struct rcu_head *head)
72{
73 int r1;
74
75 r1 = debug_object_activate(head, &rcuhead_debug_descr);
76 debug_object_active_state(head, &rcuhead_debug_descr,
77 STATE_RCU_HEAD_READY,
78 STATE_RCU_HEAD_QUEUED);
79 return r1;
80}
81
82static inline void debug_rcu_head_unqueue(struct rcu_head *head)
83{
84 debug_object_active_state(head, &rcuhead_debug_descr,
85 STATE_RCU_HEAD_QUEUED,
86 STATE_RCU_HEAD_READY);
87 debug_object_deactivate(head, &rcuhead_debug_descr);
88}
89#else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
90static inline int debug_rcu_head_queue(struct rcu_head *head)
91{
92 return 0;
93}
94
95static inline void debug_rcu_head_unqueue(struct rcu_head *head)
96{
97}
98#endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
99
100void kfree(const void *);
101
102/*
103 * Reclaim the specified callback, either by invoking it (non-lazy case)
104 * or freeing it directly (lazy case). Return true if lazy, false otherwise.
105 */
106static inline bool __rcu_reclaim(const char *rn, struct rcu_head *head)
107{
108 unsigned long offset = (unsigned long)head->func;
109
110 rcu_lock_acquire(&rcu_callback_map);
111 if (__is_kfree_rcu_offset(offset)) {
112 RCU_TRACE(trace_rcu_invoke_kfree_callback(rn, head, offset));
113 kfree((void *)head - offset);
114 rcu_lock_release(&rcu_callback_map);
115 return true;
116 } else {
117 RCU_TRACE(trace_rcu_invoke_callback(rn, head));
118 head->func(head);
119 rcu_lock_release(&rcu_callback_map);
120 return false;
121 }
122}
123
124#ifdef CONFIG_RCU_STALL_COMMON
125
126extern int rcu_cpu_stall_suppress;
127int rcu_jiffies_till_stall_check(void);
128
129#endif /* #ifdef CONFIG_RCU_STALL_COMMON */
130
131/*
132 * Strings used in tracepoints need to be exported via the
133 * tracing system such that tools like perf and trace-cmd can
134 * translate the string address pointers to actual text.
135 */
136#define TPS(x) tracepoint_string(x)
137
138void rcu_early_boot_tests(void);
139
140/*
141 * This function really isn't for public consumption, but RCU is special in
142 * that context switches can allow the state machine to make progress.
143 */
144extern void resched_cpu(int cpu);
145
146#endif /* __LINUX_RCU_H */
1/* SPDX-License-Identifier: GPL-2.0+ */
2/*
3 * Read-Copy Update definitions shared among RCU implementations.
4 *
5 * Copyright IBM Corporation, 2011
6 *
7 * Author: Paul E. McKenney <paulmck@linux.ibm.com>
8 */
9
10#ifndef __LINUX_RCU_H
11#define __LINUX_RCU_H
12
13#include <linux/slab.h>
14#include <trace/events/rcu.h>
15
16/*
17 * Grace-period counter management.
18 *
19 * The two least significant bits contain the control flags.
20 * The most significant bits contain the grace-period sequence counter.
21 *
22 * When both control flags are zero, no grace period is in progress.
23 * When either bit is non-zero, a grace period has started and is in
24 * progress. When the grace period completes, the control flags are reset
25 * to 0 and the grace-period sequence counter is incremented.
26 *
27 * However some specific RCU usages make use of custom values.
28 *
29 * SRCU special control values:
30 *
31 * SRCU_SNP_INIT_SEQ : Invalid/init value set when SRCU node
32 * is initialized.
33 *
34 * SRCU_STATE_IDLE : No SRCU gp is in progress
35 *
36 * SRCU_STATE_SCAN1 : State set by rcu_seq_start(). Indicates
37 * we are scanning the readers on the slot
38 * defined as inactive (there might well
39 * be pending readers that will use that
40 * index, but their number is bounded).
41 *
42 * SRCU_STATE_SCAN2 : State set manually via rcu_seq_set_state()
43 * Indicates we are flipping the readers
44 * index and then scanning the readers on the
45 * slot newly designated as inactive (again,
46 * the number of pending readers that will use
47 * this inactive index is bounded).
48 *
49 * RCU polled GP special control value:
50 *
51 * RCU_GET_STATE_COMPLETED : State value indicating an already-completed
52 * polled GP has completed. This value covers
53 * both the state and the counter of the
54 * grace-period sequence number.
55 */
56
57/* Low-order bit definition for polled grace-period APIs. */
58#define RCU_GET_STATE_COMPLETED 0x1
59
60extern int sysctl_sched_rt_runtime;
61
62/*
63 * Return the counter portion of a sequence number previously returned
64 * by rcu_seq_snap() or rcu_seq_current().
65 */
66static inline unsigned long rcu_seq_ctr(unsigned long s)
67{
68 return s >> RCU_SEQ_CTR_SHIFT;
69}
70
71/*
72 * Return the state portion of a sequence number previously returned
73 * by rcu_seq_snap() or rcu_seq_current().
74 */
75static inline int rcu_seq_state(unsigned long s)
76{
77 return s & RCU_SEQ_STATE_MASK;
78}
79
80/*
81 * Set the state portion of the pointed-to sequence number.
82 * The caller is responsible for preventing conflicting updates.
83 */
84static inline void rcu_seq_set_state(unsigned long *sp, int newstate)
85{
86 WARN_ON_ONCE(newstate & ~RCU_SEQ_STATE_MASK);
87 WRITE_ONCE(*sp, (*sp & ~RCU_SEQ_STATE_MASK) + newstate);
88}
89
90/* Adjust sequence number for start of update-side operation. */
91static inline void rcu_seq_start(unsigned long *sp)
92{
93 WRITE_ONCE(*sp, *sp + 1);
94 smp_mb(); /* Ensure update-side operation after counter increment. */
95 WARN_ON_ONCE(rcu_seq_state(*sp) != 1);
96}
97
98/* Compute the end-of-grace-period value for the specified sequence number. */
99static inline unsigned long rcu_seq_endval(unsigned long *sp)
100{
101 return (*sp | RCU_SEQ_STATE_MASK) + 1;
102}
103
104/* Adjust sequence number for end of update-side operation. */
105static inline void rcu_seq_end(unsigned long *sp)
106{
107 smp_mb(); /* Ensure update-side operation before counter increment. */
108 WARN_ON_ONCE(!rcu_seq_state(*sp));
109 WRITE_ONCE(*sp, rcu_seq_endval(sp));
110}
111
112/*
113 * rcu_seq_snap - Take a snapshot of the update side's sequence number.
114 *
115 * This function returns the earliest value of the grace-period sequence number
116 * that will indicate that a full grace period has elapsed since the current
117 * time. Once the grace-period sequence number has reached this value, it will
118 * be safe to invoke all callbacks that have been registered prior to the
119 * current time. This value is the current grace-period number plus two to the
120 * power of the number of low-order bits reserved for state, then rounded up to
121 * the next value in which the state bits are all zero.
122 */
123static inline unsigned long rcu_seq_snap(unsigned long *sp)
124{
125 unsigned long s;
126
127 s = (READ_ONCE(*sp) + 2 * RCU_SEQ_STATE_MASK + 1) & ~RCU_SEQ_STATE_MASK;
128 smp_mb(); /* Above access must not bleed into critical section. */
129 return s;
130}
131
132/* Return the current value the update side's sequence number, no ordering. */
133static inline unsigned long rcu_seq_current(unsigned long *sp)
134{
135 return READ_ONCE(*sp);
136}
137
138/*
139 * Given a snapshot from rcu_seq_snap(), determine whether or not the
140 * corresponding update-side operation has started.
141 */
142static inline bool rcu_seq_started(unsigned long *sp, unsigned long s)
143{
144 return ULONG_CMP_LT((s - 1) & ~RCU_SEQ_STATE_MASK, READ_ONCE(*sp));
145}
146
147/*
148 * Given a snapshot from rcu_seq_snap(), determine whether or not a
149 * full update-side operation has occurred.
150 */
151static inline bool rcu_seq_done(unsigned long *sp, unsigned long s)
152{
153 return ULONG_CMP_GE(READ_ONCE(*sp), s);
154}
155
156/*
157 * Given a snapshot from rcu_seq_snap(), determine whether or not a
158 * full update-side operation has occurred, but do not allow the
159 * (ULONG_MAX / 2) safety-factor/guard-band.
160 */
161static inline bool rcu_seq_done_exact(unsigned long *sp, unsigned long s)
162{
163 unsigned long cur_s = READ_ONCE(*sp);
164
165 return ULONG_CMP_GE(cur_s, s) || ULONG_CMP_LT(cur_s, s - (2 * RCU_SEQ_STATE_MASK + 1));
166}
167
168/*
169 * Has a grace period completed since the time the old gp_seq was collected?
170 */
171static inline bool rcu_seq_completed_gp(unsigned long old, unsigned long new)
172{
173 return ULONG_CMP_LT(old, new & ~RCU_SEQ_STATE_MASK);
174}
175
176/*
177 * Has a grace period started since the time the old gp_seq was collected?
178 */
179static inline bool rcu_seq_new_gp(unsigned long old, unsigned long new)
180{
181 return ULONG_CMP_LT((old + RCU_SEQ_STATE_MASK) & ~RCU_SEQ_STATE_MASK,
182 new);
183}
184
185/*
186 * Roughly how many full grace periods have elapsed between the collection
187 * of the two specified grace periods?
188 */
189static inline unsigned long rcu_seq_diff(unsigned long new, unsigned long old)
190{
191 unsigned long rnd_diff;
192
193 if (old == new)
194 return 0;
195 /*
196 * Compute the number of grace periods (still shifted up), plus
197 * one if either of new and old is not an exact grace period.
198 */
199 rnd_diff = (new & ~RCU_SEQ_STATE_MASK) -
200 ((old + RCU_SEQ_STATE_MASK) & ~RCU_SEQ_STATE_MASK) +
201 ((new & RCU_SEQ_STATE_MASK) || (old & RCU_SEQ_STATE_MASK));
202 if (ULONG_CMP_GE(RCU_SEQ_STATE_MASK, rnd_diff))
203 return 1; /* Definitely no grace period has elapsed. */
204 return ((rnd_diff - RCU_SEQ_STATE_MASK - 1) >> RCU_SEQ_CTR_SHIFT) + 2;
205}
206
207/*
208 * debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally
209 * by call_rcu() and rcu callback execution, and are therefore not part
210 * of the RCU API. These are in rcupdate.h because they are used by all
211 * RCU implementations.
212 */
213
214#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
215# define STATE_RCU_HEAD_READY 0
216# define STATE_RCU_HEAD_QUEUED 1
217
218extern const struct debug_obj_descr rcuhead_debug_descr;
219
220static inline int debug_rcu_head_queue(struct rcu_head *head)
221{
222 int r1;
223
224 r1 = debug_object_activate(head, &rcuhead_debug_descr);
225 debug_object_active_state(head, &rcuhead_debug_descr,
226 STATE_RCU_HEAD_READY,
227 STATE_RCU_HEAD_QUEUED);
228 return r1;
229}
230
231static inline void debug_rcu_head_unqueue(struct rcu_head *head)
232{
233 debug_object_active_state(head, &rcuhead_debug_descr,
234 STATE_RCU_HEAD_QUEUED,
235 STATE_RCU_HEAD_READY);
236 debug_object_deactivate(head, &rcuhead_debug_descr);
237}
238#else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
239static inline int debug_rcu_head_queue(struct rcu_head *head)
240{
241 return 0;
242}
243
244static inline void debug_rcu_head_unqueue(struct rcu_head *head)
245{
246}
247#endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
248
249static inline void debug_rcu_head_callback(struct rcu_head *rhp)
250{
251 if (unlikely(!rhp->func))
252 kmem_dump_obj(rhp);
253}
254
255static inline bool rcu_barrier_cb_is_done(struct rcu_head *rhp)
256{
257 return rhp->next == rhp;
258}
259
260extern int rcu_cpu_stall_suppress_at_boot;
261
262static inline bool rcu_stall_is_suppressed_at_boot(void)
263{
264 return rcu_cpu_stall_suppress_at_boot && !rcu_inkernel_boot_has_ended();
265}
266
267extern int rcu_cpu_stall_notifiers;
268
269#ifdef CONFIG_RCU_STALL_COMMON
270
271extern int rcu_cpu_stall_ftrace_dump;
272extern int rcu_cpu_stall_suppress;
273extern int rcu_cpu_stall_timeout;
274extern int rcu_exp_cpu_stall_timeout;
275extern int rcu_cpu_stall_cputime;
276extern bool rcu_exp_stall_task_details __read_mostly;
277int rcu_jiffies_till_stall_check(void);
278int rcu_exp_jiffies_till_stall_check(void);
279
280static inline bool rcu_stall_is_suppressed(void)
281{
282 return rcu_stall_is_suppressed_at_boot() || rcu_cpu_stall_suppress;
283}
284
285#define rcu_ftrace_dump_stall_suppress() \
286do { \
287 if (!rcu_cpu_stall_suppress) \
288 rcu_cpu_stall_suppress = 3; \
289} while (0)
290
291#define rcu_ftrace_dump_stall_unsuppress() \
292do { \
293 if (rcu_cpu_stall_suppress == 3) \
294 rcu_cpu_stall_suppress = 0; \
295} while (0)
296
297#else /* #endif #ifdef CONFIG_RCU_STALL_COMMON */
298
299static inline bool rcu_stall_is_suppressed(void)
300{
301 return rcu_stall_is_suppressed_at_boot();
302}
303#define rcu_ftrace_dump_stall_suppress()
304#define rcu_ftrace_dump_stall_unsuppress()
305#endif /* #ifdef CONFIG_RCU_STALL_COMMON */
306
307/*
308 * Strings used in tracepoints need to be exported via the
309 * tracing system such that tools like perf and trace-cmd can
310 * translate the string address pointers to actual text.
311 */
312#define TPS(x) tracepoint_string(x)
313
314/*
315 * Dump the ftrace buffer, but only one time per callsite per boot.
316 */
317#define rcu_ftrace_dump(oops_dump_mode) \
318do { \
319 static atomic_t ___rfd_beenhere = ATOMIC_INIT(0); \
320 \
321 if (!atomic_read(&___rfd_beenhere) && \
322 !atomic_xchg(&___rfd_beenhere, 1)) { \
323 tracing_off(); \
324 rcu_ftrace_dump_stall_suppress(); \
325 ftrace_dump(oops_dump_mode); \
326 rcu_ftrace_dump_stall_unsuppress(); \
327 } \
328} while (0)
329
330void rcu_early_boot_tests(void);
331void rcu_test_sync_prims(void);
332
333/*
334 * This function really isn't for public consumption, but RCU is special in
335 * that context switches can allow the state machine to make progress.
336 */
337extern void resched_cpu(int cpu);
338
339#if !defined(CONFIG_TINY_RCU)
340
341#include <linux/rcu_node_tree.h>
342
343extern int rcu_num_lvls;
344extern int num_rcu_lvl[];
345extern int rcu_num_nodes;
346static bool rcu_fanout_exact;
347static int rcu_fanout_leaf;
348
349/*
350 * Compute the per-level fanout, either using the exact fanout specified
351 * or balancing the tree, depending on the rcu_fanout_exact boot parameter.
352 */
353static inline void rcu_init_levelspread(int *levelspread, const int *levelcnt)
354{
355 int i;
356
357 for (i = 0; i < RCU_NUM_LVLS; i++)
358 levelspread[i] = INT_MIN;
359 if (rcu_fanout_exact) {
360 levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf;
361 for (i = rcu_num_lvls - 2; i >= 0; i--)
362 levelspread[i] = RCU_FANOUT;
363 } else {
364 int ccur;
365 int cprv;
366
367 cprv = nr_cpu_ids;
368 for (i = rcu_num_lvls - 1; i >= 0; i--) {
369 ccur = levelcnt[i];
370 levelspread[i] = (cprv + ccur - 1) / ccur;
371 cprv = ccur;
372 }
373 }
374}
375
376extern void rcu_init_geometry(void);
377
378/* Returns a pointer to the first leaf rcu_node structure. */
379#define rcu_first_leaf_node() (rcu_state.level[rcu_num_lvls - 1])
380
381/* Is this rcu_node a leaf? */
382#define rcu_is_leaf_node(rnp) ((rnp)->level == rcu_num_lvls - 1)
383
384/* Is this rcu_node the last leaf? */
385#define rcu_is_last_leaf_node(rnp) ((rnp) == &rcu_state.node[rcu_num_nodes - 1])
386
387/*
388 * Do a full breadth-first scan of the {s,}rcu_node structures for the
389 * specified state structure (for SRCU) or the only rcu_state structure
390 * (for RCU).
391 */
392#define _rcu_for_each_node_breadth_first(sp, rnp) \
393 for ((rnp) = &(sp)->node[0]; \
394 (rnp) < &(sp)->node[rcu_num_nodes]; (rnp)++)
395#define rcu_for_each_node_breadth_first(rnp) \
396 _rcu_for_each_node_breadth_first(&rcu_state, rnp)
397#define srcu_for_each_node_breadth_first(ssp, rnp) \
398 _rcu_for_each_node_breadth_first(ssp->srcu_sup, rnp)
399
400/*
401 * Scan the leaves of the rcu_node hierarchy for the rcu_state structure.
402 * Note that if there is a singleton rcu_node tree with but one rcu_node
403 * structure, this loop -will- visit the rcu_node structure. It is still
404 * a leaf node, even if it is also the root node.
405 */
406#define rcu_for_each_leaf_node(rnp) \
407 for ((rnp) = rcu_first_leaf_node(); \
408 (rnp) < &rcu_state.node[rcu_num_nodes]; (rnp)++)
409
410/*
411 * Iterate over all possible CPUs in a leaf RCU node.
412 */
413#define for_each_leaf_node_possible_cpu(rnp, cpu) \
414 for (WARN_ON_ONCE(!rcu_is_leaf_node(rnp)), \
415 (cpu) = cpumask_next((rnp)->grplo - 1, cpu_possible_mask); \
416 (cpu) <= rnp->grphi; \
417 (cpu) = cpumask_next((cpu), cpu_possible_mask))
418
419/*
420 * Iterate over all CPUs in a leaf RCU node's specified mask.
421 */
422#define rcu_find_next_bit(rnp, cpu, mask) \
423 ((rnp)->grplo + find_next_bit(&(mask), BITS_PER_LONG, (cpu)))
424#define for_each_leaf_node_cpu_mask(rnp, cpu, mask) \
425 for (WARN_ON_ONCE(!rcu_is_leaf_node(rnp)), \
426 (cpu) = rcu_find_next_bit((rnp), 0, (mask)); \
427 (cpu) <= rnp->grphi; \
428 (cpu) = rcu_find_next_bit((rnp), (cpu) + 1 - (rnp->grplo), (mask)))
429
430#endif /* !defined(CONFIG_TINY_RCU) */
431
432#if !defined(CONFIG_TINY_RCU) || defined(CONFIG_TASKS_RCU_GENERIC)
433
434/*
435 * Wrappers for the rcu_node::lock acquire and release.
436 *
437 * Because the rcu_nodes form a tree, the tree traversal locking will observe
438 * different lock values, this in turn means that an UNLOCK of one level
439 * followed by a LOCK of another level does not imply a full memory barrier;
440 * and most importantly transitivity is lost.
441 *
442 * In order to restore full ordering between tree levels, augment the regular
443 * lock acquire functions with smp_mb__after_unlock_lock().
444 *
445 * As ->lock of struct rcu_node is a __private field, therefore one should use
446 * these wrappers rather than directly call raw_spin_{lock,unlock}* on ->lock.
447 */
448#define raw_spin_lock_rcu_node(p) \
449do { \
450 raw_spin_lock(&ACCESS_PRIVATE(p, lock)); \
451 smp_mb__after_unlock_lock(); \
452} while (0)
453
454#define raw_spin_unlock_rcu_node(p) \
455do { \
456 lockdep_assert_irqs_disabled(); \
457 raw_spin_unlock(&ACCESS_PRIVATE(p, lock)); \
458} while (0)
459
460#define raw_spin_lock_irq_rcu_node(p) \
461do { \
462 raw_spin_lock_irq(&ACCESS_PRIVATE(p, lock)); \
463 smp_mb__after_unlock_lock(); \
464} while (0)
465
466#define raw_spin_unlock_irq_rcu_node(p) \
467do { \
468 lockdep_assert_irqs_disabled(); \
469 raw_spin_unlock_irq(&ACCESS_PRIVATE(p, lock)); \
470} while (0)
471
472#define raw_spin_lock_irqsave_rcu_node(p, flags) \
473do { \
474 raw_spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \
475 smp_mb__after_unlock_lock(); \
476} while (0)
477
478#define raw_spin_unlock_irqrestore_rcu_node(p, flags) \
479do { \
480 lockdep_assert_irqs_disabled(); \
481 raw_spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags); \
482} while (0)
483
484#define raw_spin_trylock_rcu_node(p) \
485({ \
486 bool ___locked = raw_spin_trylock(&ACCESS_PRIVATE(p, lock)); \
487 \
488 if (___locked) \
489 smp_mb__after_unlock_lock(); \
490 ___locked; \
491})
492
493#define raw_lockdep_assert_held_rcu_node(p) \
494 lockdep_assert_held(&ACCESS_PRIVATE(p, lock))
495
496#endif // #if !defined(CONFIG_TINY_RCU) || defined(CONFIG_TASKS_RCU_GENERIC)
497
498#ifdef CONFIG_TINY_RCU
499/* Tiny RCU doesn't expedite, as its purpose in life is instead to be tiny. */
500static inline bool rcu_gp_is_normal(void) { return true; }
501static inline bool rcu_gp_is_expedited(void) { return false; }
502static inline bool rcu_async_should_hurry(void) { return false; }
503static inline void rcu_expedite_gp(void) { }
504static inline void rcu_unexpedite_gp(void) { }
505static inline void rcu_async_hurry(void) { }
506static inline void rcu_async_relax(void) { }
507static inline bool rcu_cpu_online(int cpu) { return true; }
508#else /* #ifdef CONFIG_TINY_RCU */
509bool rcu_gp_is_normal(void); /* Internal RCU use. */
510bool rcu_gp_is_expedited(void); /* Internal RCU use. */
511bool rcu_async_should_hurry(void); /* Internal RCU use. */
512void rcu_expedite_gp(void);
513void rcu_unexpedite_gp(void);
514void rcu_async_hurry(void);
515void rcu_async_relax(void);
516void rcupdate_announce_bootup_oddness(void);
517bool rcu_cpu_online(int cpu);
518#ifdef CONFIG_TASKS_RCU_GENERIC
519void show_rcu_tasks_gp_kthreads(void);
520#else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
521static inline void show_rcu_tasks_gp_kthreads(void) {}
522#endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */
523#endif /* #else #ifdef CONFIG_TINY_RCU */
524
525#ifdef CONFIG_TASKS_RCU
526struct task_struct *get_rcu_tasks_gp_kthread(void);
527void rcu_tasks_get_gp_data(int *flags, unsigned long *gp_seq);
528#endif // # ifdef CONFIG_TASKS_RCU
529
530#ifdef CONFIG_TASKS_RUDE_RCU
531struct task_struct *get_rcu_tasks_rude_gp_kthread(void);
532void rcu_tasks_rude_get_gp_data(int *flags, unsigned long *gp_seq);
533#endif // # ifdef CONFIG_TASKS_RUDE_RCU
534
535#ifdef CONFIG_TASKS_TRACE_RCU
536void rcu_tasks_trace_get_gp_data(int *flags, unsigned long *gp_seq);
537#endif
538
539#ifdef CONFIG_TASKS_RCU_GENERIC
540void tasks_cblist_init_generic(void);
541#else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
542static inline void tasks_cblist_init_generic(void) { }
543#endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */
544
545#define RCU_SCHEDULER_INACTIVE 0
546#define RCU_SCHEDULER_INIT 1
547#define RCU_SCHEDULER_RUNNING 2
548
549enum rcutorture_type {
550 RCU_FLAVOR,
551 RCU_TASKS_FLAVOR,
552 RCU_TASKS_RUDE_FLAVOR,
553 RCU_TASKS_TRACING_FLAVOR,
554 RCU_TRIVIAL_FLAVOR,
555 SRCU_FLAVOR,
556 INVALID_RCU_FLAVOR
557};
558
559#if defined(CONFIG_RCU_LAZY)
560unsigned long rcu_get_jiffies_lazy_flush(void);
561void rcu_set_jiffies_lazy_flush(unsigned long j);
562#else
563static inline unsigned long rcu_get_jiffies_lazy_flush(void) { return 0; }
564static inline void rcu_set_jiffies_lazy_flush(unsigned long j) { }
565#endif
566
567#if defined(CONFIG_TREE_RCU)
568void rcutorture_get_gp_data(int *flags, unsigned long *gp_seq);
569void do_trace_rcu_torture_read(const char *rcutorturename,
570 struct rcu_head *rhp,
571 unsigned long secs,
572 unsigned long c_old,
573 unsigned long c);
574void rcu_gp_set_torture_wait(int duration);
575#else
576static inline void rcutorture_get_gp_data(int *flags, unsigned long *gp_seq)
577{
578 *flags = 0;
579 *gp_seq = 0;
580}
581#ifdef CONFIG_RCU_TRACE
582void do_trace_rcu_torture_read(const char *rcutorturename,
583 struct rcu_head *rhp,
584 unsigned long secs,
585 unsigned long c_old,
586 unsigned long c);
587#else
588#define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
589 do { } while (0)
590#endif
591static inline void rcu_gp_set_torture_wait(int duration) { }
592#endif
593
594#ifdef CONFIG_TINY_SRCU
595
596static inline void srcutorture_get_gp_data(struct srcu_struct *sp, int *flags,
597 unsigned long *gp_seq)
598{
599 *flags = 0;
600 *gp_seq = sp->srcu_idx;
601}
602
603#elif defined(CONFIG_TREE_SRCU)
604
605void srcutorture_get_gp_data(struct srcu_struct *sp, int *flags,
606 unsigned long *gp_seq);
607
608#endif
609
610#ifdef CONFIG_TINY_RCU
611static inline bool rcu_watching_zero_in_eqs(int cpu, int *vp) { return false; }
612static inline unsigned long rcu_get_gp_seq(void) { return 0; }
613static inline unsigned long rcu_exp_batches_completed(void) { return 0; }
614static inline unsigned long
615srcu_batches_completed(struct srcu_struct *sp) { return 0; }
616static inline void rcu_force_quiescent_state(void) { }
617static inline bool rcu_check_boost_fail(unsigned long gp_state, int *cpup) { return true; }
618static inline void show_rcu_gp_kthreads(void) { }
619static inline int rcu_get_gp_kthreads_prio(void) { return 0; }
620static inline void rcu_fwd_progress_check(unsigned long j) { }
621static inline void rcu_gp_slow_register(atomic_t *rgssp) { }
622static inline void rcu_gp_slow_unregister(atomic_t *rgssp) { }
623#else /* #ifdef CONFIG_TINY_RCU */
624bool rcu_watching_zero_in_eqs(int cpu, int *vp);
625unsigned long rcu_get_gp_seq(void);
626unsigned long rcu_exp_batches_completed(void);
627unsigned long srcu_batches_completed(struct srcu_struct *sp);
628bool rcu_check_boost_fail(unsigned long gp_state, int *cpup);
629void show_rcu_gp_kthreads(void);
630int rcu_get_gp_kthreads_prio(void);
631void rcu_fwd_progress_check(unsigned long j);
632void rcu_force_quiescent_state(void);
633extern struct workqueue_struct *rcu_gp_wq;
634extern struct kthread_worker *rcu_exp_gp_kworker;
635void rcu_gp_slow_register(atomic_t *rgssp);
636void rcu_gp_slow_unregister(atomic_t *rgssp);
637#endif /* #else #ifdef CONFIG_TINY_RCU */
638
639#ifdef CONFIG_RCU_NOCB_CPU
640void rcu_bind_current_to_nocb(void);
641#else
642static inline void rcu_bind_current_to_nocb(void) { }
643#endif
644
645#if !defined(CONFIG_TINY_RCU) && defined(CONFIG_TASKS_RCU)
646void show_rcu_tasks_classic_gp_kthread(void);
647#else
648static inline void show_rcu_tasks_classic_gp_kthread(void) {}
649#endif
650#if !defined(CONFIG_TINY_RCU) && defined(CONFIG_TASKS_RUDE_RCU)
651void show_rcu_tasks_rude_gp_kthread(void);
652#else
653static inline void show_rcu_tasks_rude_gp_kthread(void) {}
654#endif
655#if !defined(CONFIG_TINY_RCU) && defined(CONFIG_TASKS_TRACE_RCU)
656void show_rcu_tasks_trace_gp_kthread(void);
657#else
658static inline void show_rcu_tasks_trace_gp_kthread(void) {}
659#endif
660
661#ifdef CONFIG_TINY_RCU
662static inline bool rcu_cpu_beenfullyonline(int cpu) { return true; }
663#else
664bool rcu_cpu_beenfullyonline(int cpu);
665#endif
666
667#if defined(CONFIG_RCU_STALL_COMMON) && defined(CONFIG_RCU_CPU_STALL_NOTIFIER)
668int rcu_stall_notifier_call_chain(unsigned long val, void *v);
669#else // #if defined(CONFIG_RCU_STALL_COMMON) && defined(CONFIG_RCU_CPU_STALL_NOTIFIER)
670static inline int rcu_stall_notifier_call_chain(unsigned long val, void *v) { return NOTIFY_DONE; }
671#endif // #else // #if defined(CONFIG_RCU_STALL_COMMON) && defined(CONFIG_RCU_CPU_STALL_NOTIFIER)
672
673#endif /* __LINUX_RCU_H */