Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Mar 24-27, 2025, special US time zones
Register
Loading...
v6.9.4
  1/* SPDX-License-Identifier: GPL-2.0+ */
  2/*
  3 * RCU segmented callback lists, internal-to-rcu header file
  4 *
  5 * Copyright IBM Corporation, 2017
  6 *
  7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
  8 */
  9
 10#include <linux/rcu_segcblist.h>
 11
 12/* Return number of callbacks in the specified callback list. */
 13static inline long rcu_cblist_n_cbs(struct rcu_cblist *rclp)
 14{
 15	return READ_ONCE(rclp->len);
 16}
 17
 18long rcu_segcblist_get_seglen(struct rcu_segcblist *rsclp, int seg);
 19
 20/* Return number of callbacks in segmented callback list by summing seglen. */
 21long rcu_segcblist_n_segment_cbs(struct rcu_segcblist *rsclp);
 
 
 
 
 22
 23void rcu_cblist_init(struct rcu_cblist *rclp);
 24void rcu_cblist_enqueue(struct rcu_cblist *rclp, struct rcu_head *rhp);
 25void rcu_cblist_flush_enqueue(struct rcu_cblist *drclp,
 26			      struct rcu_cblist *srclp,
 27			      struct rcu_head *rhp);
 28struct rcu_head *rcu_cblist_dequeue(struct rcu_cblist *rclp);
 29
 30/*
 31 * Is the specified rcu_segcblist structure empty?
 32 *
 33 * But careful!  The fact that the ->head field is NULL does not
 34 * necessarily imply that there are no callbacks associated with
 35 * this structure.  When callbacks are being invoked, they are
 36 * removed as a group.  If callback invocation must be preempted,
 37 * the remaining callbacks will be added back to the list.  Either
 38 * way, the counts are updated later.
 39 *
 40 * So it is often the case that rcu_segcblist_n_cbs() should be used
 41 * instead.
 42 */
 43static inline bool rcu_segcblist_empty(struct rcu_segcblist *rsclp)
 44{
 45	return !READ_ONCE(rsclp->head);
 46}
 47
 48/* Return number of callbacks in segmented callback list. */
 49static inline long rcu_segcblist_n_cbs(struct rcu_segcblist *rsclp)
 50{
 51#ifdef CONFIG_RCU_NOCB_CPU
 52	return atomic_long_read(&rsclp->len);
 53#else
 54	return READ_ONCE(rsclp->len);
 55#endif
 56}
 57
 58static inline void rcu_segcblist_set_flags(struct rcu_segcblist *rsclp,
 59					   int flags)
 60{
 61	WRITE_ONCE(rsclp->flags, rsclp->flags | flags);
 62}
 63
 64static inline void rcu_segcblist_clear_flags(struct rcu_segcblist *rsclp,
 65					     int flags)
 66{
 67	WRITE_ONCE(rsclp->flags, rsclp->flags & ~flags);
 68}
 69
 70static inline bool rcu_segcblist_test_flags(struct rcu_segcblist *rsclp,
 71					    int flags)
 72{
 73	return READ_ONCE(rsclp->flags) & flags;
 74}
 75
 76/*
 77 * Is the specified rcu_segcblist enabled, for example, not corresponding
 78 * to an offline CPU?
 79 */
 80static inline bool rcu_segcblist_is_enabled(struct rcu_segcblist *rsclp)
 81{
 82	return rcu_segcblist_test_flags(rsclp, SEGCBLIST_ENABLED);
 83}
 84
 85/*
 86 * Is the specified rcu_segcblist NOCB offloaded (or in the middle of the
 87 * [de]offloading process)?
 88 */
 89static inline bool rcu_segcblist_is_offloaded(struct rcu_segcblist *rsclp)
 90{
 91	if (IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
 92	    rcu_segcblist_test_flags(rsclp, SEGCBLIST_LOCKING))
 93		return true;
 94
 95	return false;
 96}
 97
 98static inline bool rcu_segcblist_completely_offloaded(struct rcu_segcblist *rsclp)
 99{
100	if (IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
101	    !rcu_segcblist_test_flags(rsclp, SEGCBLIST_RCU_CORE))
102		return true;
103
104	return false;
105}
106
107/*
108 * Are all segments following the specified segment of the specified
109 * rcu_segcblist structure empty of callbacks?  (The specified
110 * segment might well contain callbacks.)
111 */
112static inline bool rcu_segcblist_restempty(struct rcu_segcblist *rsclp, int seg)
113{
114	return !READ_ONCE(*READ_ONCE(rsclp->tails[seg]));
115}
116
117/*
118 * Is the specified segment of the specified rcu_segcblist structure
119 * empty of callbacks?
120 */
121static inline bool rcu_segcblist_segempty(struct rcu_segcblist *rsclp, int seg)
122{
123	if (seg == RCU_DONE_TAIL)
124		return &rsclp->head == rsclp->tails[RCU_DONE_TAIL];
125	return rsclp->tails[seg - 1] == rsclp->tails[seg];
126}
127
128void rcu_segcblist_inc_len(struct rcu_segcblist *rsclp);
129void rcu_segcblist_add_len(struct rcu_segcblist *rsclp, long v);
130void rcu_segcblist_init(struct rcu_segcblist *rsclp);
131void rcu_segcblist_disable(struct rcu_segcblist *rsclp);
132void rcu_segcblist_offload(struct rcu_segcblist *rsclp, bool offload);
133bool rcu_segcblist_ready_cbs(struct rcu_segcblist *rsclp);
134bool rcu_segcblist_pend_cbs(struct rcu_segcblist *rsclp);
135struct rcu_head *rcu_segcblist_first_cb(struct rcu_segcblist *rsclp);
136struct rcu_head *rcu_segcblist_first_pend_cb(struct rcu_segcblist *rsclp);
137bool rcu_segcblist_nextgp(struct rcu_segcblist *rsclp, unsigned long *lp);
138void rcu_segcblist_enqueue(struct rcu_segcblist *rsclp,
139			   struct rcu_head *rhp);
140bool rcu_segcblist_entrain(struct rcu_segcblist *rsclp,
141			   struct rcu_head *rhp);
 
 
142void rcu_segcblist_extract_done_cbs(struct rcu_segcblist *rsclp,
143				    struct rcu_cblist *rclp);
144void rcu_segcblist_extract_pend_cbs(struct rcu_segcblist *rsclp,
145				    struct rcu_cblist *rclp);
146void rcu_segcblist_insert_count(struct rcu_segcblist *rsclp,
147				struct rcu_cblist *rclp);
148void rcu_segcblist_insert_done_cbs(struct rcu_segcblist *rsclp,
149				   struct rcu_cblist *rclp);
150void rcu_segcblist_insert_pend_cbs(struct rcu_segcblist *rsclp,
151				   struct rcu_cblist *rclp);
152void rcu_segcblist_advance(struct rcu_segcblist *rsclp, unsigned long seq);
153bool rcu_segcblist_accelerate(struct rcu_segcblist *rsclp, unsigned long seq);
154void rcu_segcblist_merge(struct rcu_segcblist *dst_rsclp,
155			 struct rcu_segcblist *src_rsclp);
v5.4
  1/* SPDX-License-Identifier: GPL-2.0+ */
  2/*
  3 * RCU segmented callback lists, internal-to-rcu header file
  4 *
  5 * Copyright IBM Corporation, 2017
  6 *
  7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
  8 */
  9
 10#include <linux/rcu_segcblist.h>
 11
 12/* Return number of callbacks in the specified callback list. */
 13static inline long rcu_cblist_n_cbs(struct rcu_cblist *rclp)
 14{
 15	return READ_ONCE(rclp->len);
 16}
 17
 18/*
 19 * Account for the fact that a previously dequeued callback turned out
 20 * to be marked as lazy.
 21 */
 22static inline void rcu_cblist_dequeued_lazy(struct rcu_cblist *rclp)
 23{
 24	rclp->len_lazy--;
 25}
 26
 27void rcu_cblist_init(struct rcu_cblist *rclp);
 28void rcu_cblist_enqueue(struct rcu_cblist *rclp, struct rcu_head *rhp);
 29void rcu_cblist_flush_enqueue(struct rcu_cblist *drclp,
 30			      struct rcu_cblist *srclp,
 31			      struct rcu_head *rhp);
 32struct rcu_head *rcu_cblist_dequeue(struct rcu_cblist *rclp);
 33
 34/*
 35 * Is the specified rcu_segcblist structure empty?
 36 *
 37 * But careful!  The fact that the ->head field is NULL does not
 38 * necessarily imply that there are no callbacks associated with
 39 * this structure.  When callbacks are being invoked, they are
 40 * removed as a group.  If callback invocation must be preempted,
 41 * the remaining callbacks will be added back to the list.  Either
 42 * way, the counts are updated later.
 43 *
 44 * So it is often the case that rcu_segcblist_n_cbs() should be used
 45 * instead.
 46 */
 47static inline bool rcu_segcblist_empty(struct rcu_segcblist *rsclp)
 48{
 49	return !READ_ONCE(rsclp->head);
 50}
 51
 52/* Return number of callbacks in segmented callback list. */
 53static inline long rcu_segcblist_n_cbs(struct rcu_segcblist *rsclp)
 54{
 55#ifdef CONFIG_RCU_NOCB_CPU
 56	return atomic_long_read(&rsclp->len);
 57#else
 58	return READ_ONCE(rsclp->len);
 59#endif
 60}
 61
 62/* Return number of lazy callbacks in segmented callback list. */
 63static inline long rcu_segcblist_n_lazy_cbs(struct rcu_segcblist *rsclp)
 64{
 65	return rsclp->len_lazy;
 66}
 67
 68/* Return number of lazy callbacks in segmented callback list. */
 69static inline long rcu_segcblist_n_nonlazy_cbs(struct rcu_segcblist *rsclp)
 70{
 71	return rcu_segcblist_n_cbs(rsclp) - rsclp->len_lazy;
 
 
 
 
 
 
 72}
 73
 74/*
 75 * Is the specified rcu_segcblist enabled, for example, not corresponding
 76 * to an offline CPU?
 77 */
 78static inline bool rcu_segcblist_is_enabled(struct rcu_segcblist *rsclp)
 79{
 80	return rsclp->enabled;
 81}
 82
 83/* Is the specified rcu_segcblist offloaded?  */
 
 
 
 84static inline bool rcu_segcblist_is_offloaded(struct rcu_segcblist *rsclp)
 85{
 86	return rsclp->offloaded;
 
 
 
 
 
 
 
 
 
 
 
 
 
 87}
 88
 89/*
 90 * Are all segments following the specified segment of the specified
 91 * rcu_segcblist structure empty of callbacks?  (The specified
 92 * segment might well contain callbacks.)
 93 */
 94static inline bool rcu_segcblist_restempty(struct rcu_segcblist *rsclp, int seg)
 95{
 96	return !READ_ONCE(*READ_ONCE(rsclp->tails[seg]));
 97}
 98
 
 
 
 
 
 
 
 
 
 
 
 99void rcu_segcblist_inc_len(struct rcu_segcblist *rsclp);
 
100void rcu_segcblist_init(struct rcu_segcblist *rsclp);
101void rcu_segcblist_disable(struct rcu_segcblist *rsclp);
102void rcu_segcblist_offload(struct rcu_segcblist *rsclp);
103bool rcu_segcblist_ready_cbs(struct rcu_segcblist *rsclp);
104bool rcu_segcblist_pend_cbs(struct rcu_segcblist *rsclp);
105struct rcu_head *rcu_segcblist_first_cb(struct rcu_segcblist *rsclp);
106struct rcu_head *rcu_segcblist_first_pend_cb(struct rcu_segcblist *rsclp);
107bool rcu_segcblist_nextgp(struct rcu_segcblist *rsclp, unsigned long *lp);
108void rcu_segcblist_enqueue(struct rcu_segcblist *rsclp,
109			   struct rcu_head *rhp, bool lazy);
110bool rcu_segcblist_entrain(struct rcu_segcblist *rsclp,
111			   struct rcu_head *rhp, bool lazy);
112void rcu_segcblist_extract_count(struct rcu_segcblist *rsclp,
113				 struct rcu_cblist *rclp);
114void rcu_segcblist_extract_done_cbs(struct rcu_segcblist *rsclp,
115				    struct rcu_cblist *rclp);
116void rcu_segcblist_extract_pend_cbs(struct rcu_segcblist *rsclp,
117				    struct rcu_cblist *rclp);
118void rcu_segcblist_insert_count(struct rcu_segcblist *rsclp,
119				struct rcu_cblist *rclp);
120void rcu_segcblist_insert_done_cbs(struct rcu_segcblist *rsclp,
121				   struct rcu_cblist *rclp);
122void rcu_segcblist_insert_pend_cbs(struct rcu_segcblist *rsclp,
123				   struct rcu_cblist *rclp);
124void rcu_segcblist_advance(struct rcu_segcblist *rsclp, unsigned long seq);
125bool rcu_segcblist_accelerate(struct rcu_segcblist *rsclp, unsigned long seq);
126void rcu_segcblist_merge(struct rcu_segcblist *dst_rsclp,
127			 struct rcu_segcblist *src_rsclp);