Linux Audio

Check our new training course

Loading...
v5.14.15
  1/* SPDX-License-Identifier: GPL-2.0+ */
  2/*
  3 * Sleepable Read-Copy Update mechanism for mutual exclusion
  4 *
  5 * Copyright (C) IBM Corporation, 2006
  6 * Copyright (C) Fujitsu, 2012
  7 *
  8 * Author: Paul McKenney <paulmck@linux.ibm.com>
  9 *	   Lai Jiangshan <laijs@cn.fujitsu.com>
 10 *
 11 * For detailed explanation of Read-Copy Update mechanism see -
 12 *		Documentation/RCU/ *.txt
 13 *
 14 */
 15
 16#ifndef _LINUX_SRCU_H
 17#define _LINUX_SRCU_H
 18
 19#include <linux/mutex.h>
 20#include <linux/rcupdate.h>
 21#include <linux/workqueue.h>
 22#include <linux/rcu_segcblist.h>
 23
 24struct srcu_struct;
 25
 26#ifdef CONFIG_DEBUG_LOCK_ALLOC
 27
 28int __init_srcu_struct(struct srcu_struct *ssp, const char *name,
 29		       struct lock_class_key *key);
 30
 31#define init_srcu_struct(ssp) \
 32({ \
 33	static struct lock_class_key __srcu_key; \
 34	\
 35	__init_srcu_struct((ssp), #ssp, &__srcu_key); \
 36})
 37
 38#define __SRCU_DEP_MAP_INIT(srcu_name)	.dep_map = { .name = #srcu_name },
 39#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
 40
 41int init_srcu_struct(struct srcu_struct *ssp);
 42
 43#define __SRCU_DEP_MAP_INIT(srcu_name)
 44#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
 45
 46#ifdef CONFIG_TINY_SRCU
 47#include <linux/srcutiny.h>
 48#elif defined(CONFIG_TREE_SRCU)
 49#include <linux/srcutree.h>
 50#elif defined(CONFIG_SRCU)
 51#error "Unknown SRCU implementation specified to kernel configuration"
 52#else
 53/* Dummy definition for things like notifiers.  Actual use gets link error. */
 54struct srcu_struct { };
 55#endif
 56
 57void call_srcu(struct srcu_struct *ssp, struct rcu_head *head,
 58		void (*func)(struct rcu_head *head));
 59void cleanup_srcu_struct(struct srcu_struct *ssp);
 60int __srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp);
 61void __srcu_read_unlock(struct srcu_struct *ssp, int idx) __releases(ssp);
 
 
 
 
 
 
 
 62void synchronize_srcu(struct srcu_struct *ssp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 63unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp);
 64unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp);
 65bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie);
 66
 67#ifdef CONFIG_SRCU
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 68void srcu_init(void);
 69#else /* #ifdef CONFIG_SRCU */
 70static inline void srcu_init(void) { }
 71#endif /* #else #ifdef CONFIG_SRCU */
 72
 73#ifdef CONFIG_DEBUG_LOCK_ALLOC
 74
 75/**
 76 * srcu_read_lock_held - might we be in SRCU read-side critical section?
 77 * @ssp: The srcu_struct structure to check
 78 *
 79 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an SRCU
 80 * read-side critical section.  In absence of CONFIG_DEBUG_LOCK_ALLOC,
 81 * this assumes we are in an SRCU read-side critical section unless it can
 82 * prove otherwise.
 83 *
 84 * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot
 85 * and while lockdep is disabled.
 86 *
 87 * Note that SRCU is based on its own statemachine and it doesn't
 88 * relies on normal RCU, it can be called from the CPU which
 89 * is in the idle loop from an RCU point of view or offline.
 90 */
 91static inline int srcu_read_lock_held(const struct srcu_struct *ssp)
 92{
 93	if (!debug_lockdep_rcu_enabled())
 94		return 1;
 95	return lock_is_held(&ssp->dep_map);
 96}
 97
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 98#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
 99
100static inline int srcu_read_lock_held(const struct srcu_struct *ssp)
101{
102	return 1;
103}
104
 
 
 
 
105#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
106
 
107/**
108 * srcu_dereference_check - fetch SRCU-protected pointer for later dereferencing
109 * @p: the pointer to fetch and protect for later dereferencing
110 * @ssp: pointer to the srcu_struct, which is used to check that we
111 *	really are in an SRCU read-side critical section.
112 * @c: condition to check for update-side use
113 *
114 * If PROVE_RCU is enabled, invoking this outside of an RCU read-side
115 * critical section will result in an RCU-lockdep splat, unless @c evaluates
116 * to 1.  The @c argument will normally be a logical expression containing
117 * lockdep_is_held() calls.
118 */
119#define srcu_dereference_check(p, ssp, c) \
120	__rcu_dereference_check((p), (c) || srcu_read_lock_held(ssp), __rcu)
 
121
122/**
123 * srcu_dereference - fetch SRCU-protected pointer for later dereferencing
124 * @p: the pointer to fetch and protect for later dereferencing
125 * @ssp: pointer to the srcu_struct, which is used to check that we
126 *	really are in an SRCU read-side critical section.
127 *
128 * Makes rcu_dereference_check() do the dirty work.  If PROVE_RCU
129 * is enabled, invoking this outside of an RCU read-side critical
130 * section will result in an RCU-lockdep splat.
131 */
132#define srcu_dereference(p, ssp) srcu_dereference_check((p), (ssp), 0)
133
134/**
135 * srcu_dereference_notrace - no tracing and no lockdep calls from here
136 * @p: the pointer to fetch and protect for later dereferencing
137 * @ssp: pointer to the srcu_struct, which is used to check that we
138 *	really are in an SRCU read-side critical section.
139 */
140#define srcu_dereference_notrace(p, ssp) srcu_dereference_check((p), (ssp), 1)
141
142/**
143 * srcu_read_lock - register a new reader for an SRCU-protected structure.
144 * @ssp: srcu_struct in which to register the new reader.
145 *
146 * Enter an SRCU read-side critical section.  Note that SRCU read-side
147 * critical sections may be nested.  However, it is illegal to
148 * call anything that waits on an SRCU grace period for the same
149 * srcu_struct, whether directly or indirectly.  Please note that
150 * one way to indirectly wait on an SRCU grace period is to acquire
151 * a mutex that is held elsewhere while calling synchronize_srcu() or
152 * synchronize_srcu_expedited().
153 *
154 * Note that srcu_read_lock() and the matching srcu_read_unlock() must
155 * occur in the same context, for example, it is illegal to invoke
156 * srcu_read_unlock() in an irq handler if the matching srcu_read_lock()
157 * was invoked in process context.
 
 
 
158 */
159static inline int srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp)
160{
161	int retval;
162
 
163	retval = __srcu_read_lock(ssp);
164	rcu_lock_acquire(&(ssp)->dep_map);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
165	return retval;
166}
167
168/* Used by tracing, cannot be traced and cannot invoke lockdep. */
169static inline notrace int
170srcu_read_lock_notrace(struct srcu_struct *ssp) __acquires(ssp)
171{
172	int retval;
173
 
174	retval = __srcu_read_lock(ssp);
175	return retval;
176}
177
178/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
179 * srcu_read_unlock - unregister a old reader from an SRCU-protected structure.
180 * @ssp: srcu_struct in which to unregister the old reader.
181 * @idx: return value from corresponding srcu_read_lock().
182 *
183 * Exit an SRCU read-side critical section.
184 */
185static inline void srcu_read_unlock(struct srcu_struct *ssp, int idx)
186	__releases(ssp)
187{
188	WARN_ON_ONCE(idx & ~0x1);
189	rcu_lock_release(&(ssp)->dep_map);
 
190	__srcu_read_unlock(ssp, idx);
191}
192
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
193/* Used by tracing, cannot be traced and cannot call lockdep. */
194static inline notrace void
195srcu_read_unlock_notrace(struct srcu_struct *ssp, int idx) __releases(ssp)
196{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
197	__srcu_read_unlock(ssp, idx);
198}
199
200/**
201 * smp_mb__after_srcu_read_unlock - ensure full ordering after srcu_read_unlock
202 *
203 * Converts the preceding srcu_read_unlock into a two-way memory barrier.
204 *
205 * Call this after srcu_read_unlock, to guarantee that all memory operations
206 * that occur after smp_mb__after_srcu_read_unlock will appear to happen after
207 * the preceding srcu_read_unlock.
208 */
209static inline void smp_mb__after_srcu_read_unlock(void)
210{
211	/* __srcu_read_unlock has smp_mb() internally so nothing to do here. */
212}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
213
214#endif
v6.13.7
  1/* SPDX-License-Identifier: GPL-2.0+ */
  2/*
  3 * Sleepable Read-Copy Update mechanism for mutual exclusion
  4 *
  5 * Copyright (C) IBM Corporation, 2006
  6 * Copyright (C) Fujitsu, 2012
  7 *
  8 * Author: Paul McKenney <paulmck@linux.ibm.com>
  9 *	   Lai Jiangshan <laijs@cn.fujitsu.com>
 10 *
 11 * For detailed explanation of Read-Copy Update mechanism see -
 12 *		Documentation/RCU/ *.txt
 13 *
 14 */
 15
 16#ifndef _LINUX_SRCU_H
 17#define _LINUX_SRCU_H
 18
 19#include <linux/mutex.h>
 20#include <linux/rcupdate.h>
 21#include <linux/workqueue.h>
 22#include <linux/rcu_segcblist.h>
 23
 24struct srcu_struct;
 25
 26#ifdef CONFIG_DEBUG_LOCK_ALLOC
 27
 28int __init_srcu_struct(struct srcu_struct *ssp, const char *name,
 29		       struct lock_class_key *key);
 30
 31#define init_srcu_struct(ssp) \
 32({ \
 33	static struct lock_class_key __srcu_key; \
 34	\
 35	__init_srcu_struct((ssp), #ssp, &__srcu_key); \
 36})
 37
 38#define __SRCU_DEP_MAP_INIT(srcu_name)	.dep_map = { .name = #srcu_name },
 39#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
 40
 41int init_srcu_struct(struct srcu_struct *ssp);
 42
 43#define __SRCU_DEP_MAP_INIT(srcu_name)
 44#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
 45
 46#ifdef CONFIG_TINY_SRCU
 47#include <linux/srcutiny.h>
 48#elif defined(CONFIG_TREE_SRCU)
 49#include <linux/srcutree.h>
 
 
 50#else
 51#error "Unknown SRCU implementation specified to kernel configuration"
 
 52#endif
 53
 54void call_srcu(struct srcu_struct *ssp, struct rcu_head *head,
 55		void (*func)(struct rcu_head *head));
 56void cleanup_srcu_struct(struct srcu_struct *ssp);
 57int __srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp);
 58void __srcu_read_unlock(struct srcu_struct *ssp, int idx) __releases(ssp);
 59#ifdef CONFIG_TINY_SRCU
 60#define __srcu_read_lock_lite __srcu_read_lock
 61#define __srcu_read_unlock_lite __srcu_read_unlock
 62#else // #ifdef CONFIG_TINY_SRCU
 63int __srcu_read_lock_lite(struct srcu_struct *ssp) __acquires(ssp);
 64void __srcu_read_unlock_lite(struct srcu_struct *ssp, int idx) __releases(ssp);
 65#endif // #else // #ifdef CONFIG_TINY_SRCU
 66void synchronize_srcu(struct srcu_struct *ssp);
 67
 68#define SRCU_GET_STATE_COMPLETED 0x1
 69
 70/**
 71 * get_completed_synchronize_srcu - Return a pre-completed polled state cookie
 72 *
 73 * Returns a value that poll_state_synchronize_srcu() will always treat
 74 * as a cookie whose grace period has already completed.
 75 */
 76static inline unsigned long get_completed_synchronize_srcu(void)
 77{
 78	return SRCU_GET_STATE_COMPLETED;
 79}
 80
 81unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp);
 82unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp);
 83bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie);
 84
 85// Maximum number of unsigned long values corresponding to
 86// not-yet-completed SRCU grace periods.
 87#define NUM_ACTIVE_SRCU_POLL_OLDSTATE 2
 88
 89/**
 90 * same_state_synchronize_srcu - Are two old-state values identical?
 91 * @oldstate1: First old-state value.
 92 * @oldstate2: Second old-state value.
 93 *
 94 * The two old-state values must have been obtained from either
 95 * get_state_synchronize_srcu(), start_poll_synchronize_srcu(), or
 96 * get_completed_synchronize_srcu().  Returns @true if the two values are
 97 * identical and @false otherwise.  This allows structures whose lifetimes
 98 * are tracked by old-state values to push these values to a list header,
 99 * allowing those structures to be slightly smaller.
100 */
101static inline bool same_state_synchronize_srcu(unsigned long oldstate1, unsigned long oldstate2)
102{
103	return oldstate1 == oldstate2;
104}
105
106#ifdef CONFIG_NEED_SRCU_NMI_SAFE
107int __srcu_read_lock_nmisafe(struct srcu_struct *ssp) __acquires(ssp);
108void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx) __releases(ssp);
109#else
110static inline int __srcu_read_lock_nmisafe(struct srcu_struct *ssp)
111{
112	return __srcu_read_lock(ssp);
113}
114static inline void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx)
115{
116	__srcu_read_unlock(ssp, idx);
117}
118#endif /* CONFIG_NEED_SRCU_NMI_SAFE */
119
120void srcu_init(void);
 
 
 
121
122#ifdef CONFIG_DEBUG_LOCK_ALLOC
123
124/**
125 * srcu_read_lock_held - might we be in SRCU read-side critical section?
126 * @ssp: The srcu_struct structure to check
127 *
128 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an SRCU
129 * read-side critical section.  In absence of CONFIG_DEBUG_LOCK_ALLOC,
130 * this assumes we are in an SRCU read-side critical section unless it can
131 * prove otherwise.
132 *
133 * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot
134 * and while lockdep is disabled.
135 *
136 * Note that SRCU is based on its own statemachine and it doesn't
137 * relies on normal RCU, it can be called from the CPU which
138 * is in the idle loop from an RCU point of view or offline.
139 */
140static inline int srcu_read_lock_held(const struct srcu_struct *ssp)
141{
142	if (!debug_lockdep_rcu_enabled())
143		return 1;
144	return lock_is_held(&ssp->dep_map);
145}
146
147/*
148 * Annotations provide deadlock detection for SRCU.
149 *
150 * Similar to other lockdep annotations, except there is an additional
151 * srcu_lock_sync(), which is basically an empty *write*-side critical section,
152 * see lock_sync() for more information.
153 */
154
155/* Annotates a srcu_read_lock() */
156static inline void srcu_lock_acquire(struct lockdep_map *map)
157{
158	lock_map_acquire_read(map);
159}
160
161/* Annotates a srcu_read_lock() */
162static inline void srcu_lock_release(struct lockdep_map *map)
163{
164	lock_map_release(map);
165}
166
167/* Annotates a synchronize_srcu() */
168static inline void srcu_lock_sync(struct lockdep_map *map)
169{
170	lock_map_sync(map);
171}
172
173#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
174
175static inline int srcu_read_lock_held(const struct srcu_struct *ssp)
176{
177	return 1;
178}
179
180#define srcu_lock_acquire(m) do { } while (0)
181#define srcu_lock_release(m) do { } while (0)
182#define srcu_lock_sync(m) do { } while (0)
183
184#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
185
186
187/**
188 * srcu_dereference_check - fetch SRCU-protected pointer for later dereferencing
189 * @p: the pointer to fetch and protect for later dereferencing
190 * @ssp: pointer to the srcu_struct, which is used to check that we
191 *	really are in an SRCU read-side critical section.
192 * @c: condition to check for update-side use
193 *
194 * If PROVE_RCU is enabled, invoking this outside of an RCU read-side
195 * critical section will result in an RCU-lockdep splat, unless @c evaluates
196 * to 1.  The @c argument will normally be a logical expression containing
197 * lockdep_is_held() calls.
198 */
199#define srcu_dereference_check(p, ssp, c) \
200	__rcu_dereference_check((p), __UNIQUE_ID(rcu), \
201				(c) || srcu_read_lock_held(ssp), __rcu)
202
203/**
204 * srcu_dereference - fetch SRCU-protected pointer for later dereferencing
205 * @p: the pointer to fetch and protect for later dereferencing
206 * @ssp: pointer to the srcu_struct, which is used to check that we
207 *	really are in an SRCU read-side critical section.
208 *
209 * Makes rcu_dereference_check() do the dirty work.  If PROVE_RCU
210 * is enabled, invoking this outside of an RCU read-side critical
211 * section will result in an RCU-lockdep splat.
212 */
213#define srcu_dereference(p, ssp) srcu_dereference_check((p), (ssp), 0)
214
215/**
216 * srcu_dereference_notrace - no tracing and no lockdep calls from here
217 * @p: the pointer to fetch and protect for later dereferencing
218 * @ssp: pointer to the srcu_struct, which is used to check that we
219 *	really are in an SRCU read-side critical section.
220 */
221#define srcu_dereference_notrace(p, ssp) srcu_dereference_check((p), (ssp), 1)
222
223/**
224 * srcu_read_lock - register a new reader for an SRCU-protected structure.
225 * @ssp: srcu_struct in which to register the new reader.
226 *
227 * Enter an SRCU read-side critical section.  Note that SRCU read-side
228 * critical sections may be nested.  However, it is illegal to
229 * call anything that waits on an SRCU grace period for the same
230 * srcu_struct, whether directly or indirectly.  Please note that
231 * one way to indirectly wait on an SRCU grace period is to acquire
232 * a mutex that is held elsewhere while calling synchronize_srcu() or
233 * synchronize_srcu_expedited().
234 *
235 * The return value from srcu_read_lock() must be passed unaltered
236 * to the matching srcu_read_unlock().  Note that srcu_read_lock() and
237 * the matching srcu_read_unlock() must occur in the same context, for
238 * example, it is illegal to invoke srcu_read_unlock() in an irq handler
239 * if the matching srcu_read_lock() was invoked in process context.  Or,
240 * for that matter to invoke srcu_read_unlock() from one task and the
241 * matching srcu_read_lock() from another.
242 */
243static inline int srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp)
244{
245	int retval;
246
247	srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL);
248	retval = __srcu_read_lock(ssp);
249	srcu_lock_acquire(&ssp->dep_map);
250	return retval;
251}
252
253/**
254 * srcu_read_lock_lite - register a new reader for an SRCU-protected structure.
255 * @ssp: srcu_struct in which to register the new reader.
256 *
257 * Enter an SRCU read-side critical section, but for a light-weight
258 * smp_mb()-free reader.  See srcu_read_lock() for more information.
259 *
260 * If srcu_read_lock_lite() is ever used on an srcu_struct structure,
261 * then none of the other flavors may be used, whether before, during,
262 * or after.  Note that grace-period auto-expediting is disabled for _lite
263 * srcu_struct structures because auto-expedited grace periods invoke
264 * synchronize_rcu_expedited(), IPIs and all.
265 *
266 * Note that srcu_read_lock_lite() can be invoked only from those contexts
267 * where RCU is watching, that is, from contexts where it would be legal
268 * to invoke rcu_read_lock().  Otherwise, lockdep will complain.
269 */
270static inline int srcu_read_lock_lite(struct srcu_struct *ssp) __acquires(ssp)
271{
272	int retval;
273
274	srcu_check_read_flavor_lite(ssp);
275	retval = __srcu_read_lock_lite(ssp);
276	rcu_try_lock_acquire(&ssp->dep_map);
277	return retval;
278}
279
280/**
281 * srcu_read_lock_nmisafe - register a new reader for an SRCU-protected structure.
282 * @ssp: srcu_struct in which to register the new reader.
283 *
284 * Enter an SRCU read-side critical section, but in an NMI-safe manner.
285 * See srcu_read_lock() for more information.
286 *
287 * If srcu_read_lock_nmisafe() is ever used on an srcu_struct structure,
288 * then none of the other flavors may be used, whether before, during,
289 * or after.
290 */
291static inline int srcu_read_lock_nmisafe(struct srcu_struct *ssp) __acquires(ssp)
292{
293	int retval;
294
295	srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NMI);
296	retval = __srcu_read_lock_nmisafe(ssp);
297	rcu_try_lock_acquire(&ssp->dep_map);
298	return retval;
299}
300
301/* Used by tracing, cannot be traced and cannot invoke lockdep. */
302static inline notrace int
303srcu_read_lock_notrace(struct srcu_struct *ssp) __acquires(ssp)
304{
305	int retval;
306
307	srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL);
308	retval = __srcu_read_lock(ssp);
309	return retval;
310}
311
312/**
313 * srcu_down_read - register a new reader for an SRCU-protected structure.
314 * @ssp: srcu_struct in which to register the new reader.
315 *
316 * Enter a semaphore-like SRCU read-side critical section.  Note that
317 * SRCU read-side critical sections may be nested.  However, it is
318 * illegal to call anything that waits on an SRCU grace period for the
319 * same srcu_struct, whether directly or indirectly.  Please note that
320 * one way to indirectly wait on an SRCU grace period is to acquire
321 * a mutex that is held elsewhere while calling synchronize_srcu() or
322 * synchronize_srcu_expedited().  But if you want lockdep to help you
323 * keep this stuff straight, you should instead use srcu_read_lock().
324 *
325 * The semaphore-like nature of srcu_down_read() means that the matching
326 * srcu_up_read() can be invoked from some other context, for example,
327 * from some other task or from an irq handler.  However, neither
328 * srcu_down_read() nor srcu_up_read() may be invoked from an NMI handler.
329 *
330 * Calls to srcu_down_read() may be nested, similar to the manner in
331 * which calls to down_read() may be nested.
332 */
333static inline int srcu_down_read(struct srcu_struct *ssp) __acquires(ssp)
334{
335	WARN_ON_ONCE(in_nmi());
336	srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL);
337	return __srcu_read_lock(ssp);
338}
339
340/**
341 * srcu_read_unlock - unregister a old reader from an SRCU-protected structure.
342 * @ssp: srcu_struct in which to unregister the old reader.
343 * @idx: return value from corresponding srcu_read_lock().
344 *
345 * Exit an SRCU read-side critical section.
346 */
347static inline void srcu_read_unlock(struct srcu_struct *ssp, int idx)
348	__releases(ssp)
349{
350	WARN_ON_ONCE(idx & ~0x1);
351	srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL);
352	srcu_lock_release(&ssp->dep_map);
353	__srcu_read_unlock(ssp, idx);
354}
355
356/**
357 * srcu_read_unlock_lite - unregister a old reader from an SRCU-protected structure.
358 * @ssp: srcu_struct in which to unregister the old reader.
359 * @idx: return value from corresponding srcu_read_lock().
360 *
361 * Exit a light-weight SRCU read-side critical section.
362 */
363static inline void srcu_read_unlock_lite(struct srcu_struct *ssp, int idx)
364	__releases(ssp)
365{
366	WARN_ON_ONCE(idx & ~0x1);
367	srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_LITE);
368	srcu_lock_release(&ssp->dep_map);
369	__srcu_read_unlock_lite(ssp, idx);
370}
371
372/**
373 * srcu_read_unlock_nmisafe - unregister a old reader from an SRCU-protected structure.
374 * @ssp: srcu_struct in which to unregister the old reader.
375 * @idx: return value from corresponding srcu_read_lock().
376 *
377 * Exit an SRCU read-side critical section, but in an NMI-safe manner.
378 */
379static inline void srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx)
380	__releases(ssp)
381{
382	WARN_ON_ONCE(idx & ~0x1);
383	srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NMI);
384	rcu_lock_release(&ssp->dep_map);
385	__srcu_read_unlock_nmisafe(ssp, idx);
386}
387
388/* Used by tracing, cannot be traced and cannot call lockdep. */
389static inline notrace void
390srcu_read_unlock_notrace(struct srcu_struct *ssp, int idx) __releases(ssp)
391{
392	srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL);
393	__srcu_read_unlock(ssp, idx);
394}
395
396/**
397 * srcu_up_read - unregister a old reader from an SRCU-protected structure.
398 * @ssp: srcu_struct in which to unregister the old reader.
399 * @idx: return value from corresponding srcu_read_lock().
400 *
401 * Exit an SRCU read-side critical section, but not necessarily from
402 * the same context as the maching srcu_down_read().
403 */
404static inline void srcu_up_read(struct srcu_struct *ssp, int idx)
405	__releases(ssp)
406{
407	WARN_ON_ONCE(idx & ~0x1);
408	WARN_ON_ONCE(in_nmi());
409	srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL);
410	__srcu_read_unlock(ssp, idx);
411}
412
413/**
414 * smp_mb__after_srcu_read_unlock - ensure full ordering after srcu_read_unlock
415 *
416 * Converts the preceding srcu_read_unlock into a two-way memory barrier.
417 *
418 * Call this after srcu_read_unlock, to guarantee that all memory operations
419 * that occur after smp_mb__after_srcu_read_unlock will appear to happen after
420 * the preceding srcu_read_unlock.
421 */
422static inline void smp_mb__after_srcu_read_unlock(void)
423{
424	/* __srcu_read_unlock has smp_mb() internally so nothing to do here. */
425}
426
427/**
428 * smp_mb__after_srcu_read_lock - ensure full ordering after srcu_read_lock
429 *
430 * Converts the preceding srcu_read_lock into a two-way memory barrier.
431 *
432 * Call this after srcu_read_lock, to guarantee that all memory operations
433 * that occur after smp_mb__after_srcu_read_lock will appear to happen after
434 * the preceding srcu_read_lock.
435 */
436static inline void smp_mb__after_srcu_read_lock(void)
437{
438	/* __srcu_read_lock has smp_mb() internally so nothing to do here. */
439}
440
441DEFINE_LOCK_GUARD_1(srcu, struct srcu_struct,
442		    _T->idx = srcu_read_lock(_T->lock),
443		    srcu_read_unlock(_T->lock, _T->idx),
444		    int idx)
445
446#endif