Linux Audio

Check our new training course

Linux kernel drivers training

May 6-19, 2025
Register
Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0+
  2/*
  3 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
  4 *
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  5 * Copyright IBM Corporation, 2008
  6 *
  7 * Author: Paul E. McKenney <paulmck@linux.ibm.com>
  8 *
  9 * For detailed explanation of Read-Copy Update mechanism see -
 10 *		Documentation/RCU
 11 */
 12#include <linux/completion.h>
 13#include <linux/interrupt.h>
 14#include <linux/notifier.h>
 15#include <linux/rcupdate_wait.h>
 16#include <linux/kernel.h>
 17#include <linux/export.h>
 18#include <linux/mutex.h>
 19#include <linux/sched.h>
 20#include <linux/types.h>
 21#include <linux/init.h>
 22#include <linux/time.h>
 23#include <linux/cpu.h>
 24#include <linux/prefetch.h>
 25#include <linux/slab.h>
 26#include <linux/mm.h>
 27
 28#include "rcu.h"
 29
 30/* Global control variables for rcupdate callback mechanism. */
 31struct rcu_ctrlblk {
 32	struct rcu_head *rcucblist;	/* List of pending callbacks (CBs). */
 33	struct rcu_head **donetail;	/* ->next pointer of last "done" CB. */
 34	struct rcu_head **curtail;	/* ->next pointer of last CB. */
 35	unsigned long gp_seq;		/* Grace-period counter. */
 36};
 37
 38/* Definition for rcupdate control block. */
 39static struct rcu_ctrlblk rcu_ctrlblk = {
 40	.donetail	= &rcu_ctrlblk.rcucblist,
 41	.curtail	= &rcu_ctrlblk.rcucblist,
 42	.gp_seq		= 0 - 300UL,
 
 
 
 
 43};
 44
 45void rcu_barrier(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 46{
 47	wait_rcu_gp(call_rcu_hurry);
 
 
 
 
 
 48}
 49EXPORT_SYMBOL(rcu_barrier);
 50
 51/* Record an rcu quiescent state.  */
 52void rcu_qs(void)
 
 
 
 
 53{
 54	unsigned long flags;
 55
 56	local_irq_save(flags);
 57	if (rcu_ctrlblk.donetail != rcu_ctrlblk.curtail) {
 58		rcu_ctrlblk.donetail = rcu_ctrlblk.curtail;
 59		raise_softirq_irqoff(RCU_SOFTIRQ);
 60	}
 61	WRITE_ONCE(rcu_ctrlblk.gp_seq, rcu_ctrlblk.gp_seq + 2);
 
 
 
 
 
 
 
 
 
 
 
 62	local_irq_restore(flags);
 63}
 64
 65/*
 66 * Check to see if the scheduling-clock interrupt came from an extended
 67 * quiescent state, and, if so, tell RCU about it.  This function must
 68 * be called from hardirq context.  It is normally called from the
 69 * scheduling-clock interrupt.
 70 */
 71void rcu_sched_clock_irq(int user)
 72{
 73	if (user) {
 74		rcu_qs();
 75	} else if (rcu_ctrlblk.donetail != rcu_ctrlblk.curtail) {
 76		set_tsk_need_resched(current);
 77		set_preempt_need_resched();
 78	}
 79}
 80
 81/*
 82 * Reclaim the specified callback, either by invoking it for non-kfree cases or
 83 * freeing it directly (for kfree). Return true if kfreeing, false otherwise.
 84 */
 85static inline bool rcu_reclaim_tiny(struct rcu_head *head)
 86{
 87	rcu_callback_t f;
 88	unsigned long offset = (unsigned long)head->func;
 89
 90	rcu_lock_acquire(&rcu_callback_map);
 91	if (__is_kvfree_rcu_offset(offset)) {
 92		trace_rcu_invoke_kvfree_callback("", head, offset);
 93		kvfree((void *)head - offset);
 94		rcu_lock_release(&rcu_callback_map);
 95		return true;
 96	}
 97
 98	trace_rcu_invoke_callback("", head);
 99	f = head->func;
100	WRITE_ONCE(head->func, (rcu_callback_t)0L);
101	f(head);
102	rcu_lock_release(&rcu_callback_map);
103	return false;
104}
105
106/* Invoke the RCU callbacks whose grace period has elapsed.  */
107static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
108{
109	struct rcu_head *next, *list;
110	unsigned long flags;
111
112	/* Move the ready-to-invoke callbacks to a local list. */
113	local_irq_save(flags);
114	if (rcu_ctrlblk.donetail == &rcu_ctrlblk.rcucblist) {
115		/* No callbacks ready, so just leave. */
116		local_irq_restore(flags);
117		return;
118	}
119	list = rcu_ctrlblk.rcucblist;
120	rcu_ctrlblk.rcucblist = *rcu_ctrlblk.donetail;
121	*rcu_ctrlblk.donetail = NULL;
122	if (rcu_ctrlblk.curtail == rcu_ctrlblk.donetail)
123		rcu_ctrlblk.curtail = &rcu_ctrlblk.rcucblist;
124	rcu_ctrlblk.donetail = &rcu_ctrlblk.rcucblist;
125	local_irq_restore(flags);
126
127	/* Invoke the callbacks on the local list. */
128	while (list) {
129		next = list->next;
130		prefetch(next);
131		debug_rcu_head_unqueue(list);
132		local_bh_disable();
133		rcu_reclaim_tiny(list);
134		local_bh_enable();
135		list = next;
136	}
137}
138
 
 
 
 
 
 
139/*
140 * Wait for a grace period to elapse.  But it is illegal to invoke
141 * synchronize_rcu() from within an RCU read-side critical section.
142 * Therefore, any legal call to synchronize_rcu() is a quiescent state,
143 * and so on a UP system, synchronize_rcu() need do nothing, other than
144 * let the polled APIs know that another grace period elapsed.
145 *
146 * (But Lai Jiangshan points out the benefits of doing might_sleep()
147 * to reduce latency.)
148 *
149 * Cool, huh?  (Due to Josh Triplett.)
150 */
151void synchronize_rcu(void)
152{
153	RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
154			 lock_is_held(&rcu_lock_map) ||
155			 lock_is_held(&rcu_sched_lock_map),
156			 "Illegal synchronize_rcu() in RCU read-side critical section");
157	WRITE_ONCE(rcu_ctrlblk.gp_seq, rcu_ctrlblk.gp_seq + 2);
158}
159EXPORT_SYMBOL_GPL(synchronize_rcu);
160
161static void tiny_rcu_leak_callback(struct rcu_head *rhp)
162{
163}
 
164
165/*
166 * Post an RCU callback to be invoked after the end of an RCU grace
167 * period.  But since we have but one CPU, that would be after any
168 * quiescent state.
169 */
170void call_rcu(struct rcu_head *head, rcu_callback_t func)
 
 
171{
172	static atomic_t doublefrees;
173	unsigned long flags;
174
175	if (debug_rcu_head_queue(head)) {
176		if (atomic_inc_return(&doublefrees) < 4) {
177			pr_err("%s(): Double-freed CB %p->%pS()!!!  ", __func__, head, head->func);
178			mem_dump_obj(head);
179		}
180
181		if (!__is_kvfree_rcu_offset((unsigned long)head->func))
182			WRITE_ONCE(head->func, tiny_rcu_leak_callback);
183		return;
184	}
185
186	head->func = func;
187	head->next = NULL;
188
189	local_irq_save(flags);
190	*rcu_ctrlblk.curtail = head;
191	rcu_ctrlblk.curtail = &head->next;
192	local_irq_restore(flags);
193
194	if (unlikely(is_idle_task(current))) {
195		/* force scheduling for rcu_qs() */
196		resched_cpu(0);
197	}
198}
199EXPORT_SYMBOL_GPL(call_rcu);
200
201/*
202 * Store a grace-period-counter "cookie".  For more information,
203 * see the Tree RCU header comment.
204 */
205void get_completed_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
206{
207	rgosp->rgos_norm = RCU_GET_STATE_COMPLETED;
208}
209EXPORT_SYMBOL_GPL(get_completed_synchronize_rcu_full);
210
211/*
212 * Return a grace-period-counter "cookie".  For more information,
213 * see the Tree RCU header comment.
214 */
215unsigned long get_state_synchronize_rcu(void)
216{
217	return READ_ONCE(rcu_ctrlblk.gp_seq);
218}
219EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
220
221/*
222 * Return a grace-period-counter "cookie" and ensure that a future grace
223 * period completes.  For more information, see the Tree RCU header comment.
224 */
225unsigned long start_poll_synchronize_rcu(void)
226{
227	unsigned long gp_seq = get_state_synchronize_rcu();
228
229	if (unlikely(is_idle_task(current))) {
230		/* force scheduling for rcu_qs() */
231		resched_cpu(0);
232	}
233	return gp_seq;
234}
235EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu);
236
237/*
238 * Return true if the grace period corresponding to oldstate has completed
239 * and false otherwise.  For more information, see the Tree RCU header
240 * comment.
241 */
242bool poll_state_synchronize_rcu(unsigned long oldstate)
243{
244	return oldstate == RCU_GET_STATE_COMPLETED || READ_ONCE(rcu_ctrlblk.gp_seq) != oldstate;
245}
246EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu);
247
248#ifdef CONFIG_KASAN_GENERIC
249void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
250{
251	if (head) {
252		void *ptr = (void *) head - (unsigned long) func;
253
254		kasan_record_aux_stack_noalloc(ptr);
255	}
256
257	__kvfree_call_rcu(head, func);
258}
259EXPORT_SYMBOL_GPL(kvfree_call_rcu);
260#endif
261
262void __init rcu_init(void)
263{
264	open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
265	rcu_early_boot_tests();
266}
v4.17
 
  1/*
  2 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
  3 *
  4 * This program is free software; you can redistribute it and/or modify
  5 * it under the terms of the GNU General Public License as published by
  6 * the Free Software Foundation; either version 2 of the License, or
  7 * (at your option) any later version.
  8 *
  9 * This program is distributed in the hope that it will be useful,
 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 12 * GNU General Public License for more details.
 13 *
 14 * You should have received a copy of the GNU General Public License
 15 * along with this program; if not, you can access it online at
 16 * http://www.gnu.org/licenses/gpl-2.0.html.
 17 *
 18 * Copyright IBM Corporation, 2008
 19 *
 20 * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
 21 *
 22 * For detailed explanation of Read-Copy Update mechanism see -
 23 *		Documentation/RCU
 24 */
 25#include <linux/completion.h>
 26#include <linux/interrupt.h>
 27#include <linux/notifier.h>
 28#include <linux/rcupdate_wait.h>
 29#include <linux/kernel.h>
 30#include <linux/export.h>
 31#include <linux/mutex.h>
 32#include <linux/sched.h>
 33#include <linux/types.h>
 34#include <linux/init.h>
 35#include <linux/time.h>
 36#include <linux/cpu.h>
 37#include <linux/prefetch.h>
 
 
 38
 39#include "rcu.h"
 40
 41/* Global control variables for rcupdate callback mechanism. */
 42struct rcu_ctrlblk {
 43	struct rcu_head *rcucblist;	/* List of pending callbacks (CBs). */
 44	struct rcu_head **donetail;	/* ->next pointer of last "done" CB. */
 45	struct rcu_head **curtail;	/* ->next pointer of last CB. */
 
 46};
 47
 48/* Definition for rcupdate control block. */
 49static struct rcu_ctrlblk rcu_sched_ctrlblk = {
 50	.donetail	= &rcu_sched_ctrlblk.rcucblist,
 51	.curtail	= &rcu_sched_ctrlblk.rcucblist,
 52};
 53
 54static struct rcu_ctrlblk rcu_bh_ctrlblk = {
 55	.donetail	= &rcu_bh_ctrlblk.rcucblist,
 56	.curtail	= &rcu_bh_ctrlblk.rcucblist,
 57};
 58
 59void rcu_barrier_bh(void)
 60{
 61	wait_rcu_gp(call_rcu_bh);
 62}
 63EXPORT_SYMBOL(rcu_barrier_bh);
 64
 65void rcu_barrier_sched(void)
 66{
 67	wait_rcu_gp(call_rcu_sched);
 68}
 69EXPORT_SYMBOL(rcu_barrier_sched);
 70
 71/*
 72 * Helper function for rcu_sched_qs() and rcu_bh_qs().
 73 * Also irqs are disabled to avoid confusion due to interrupt handlers
 74 * invoking call_rcu().
 75 */
 76static int rcu_qsctr_help(struct rcu_ctrlblk *rcp)
 77{
 78	if (rcp->donetail != rcp->curtail) {
 79		rcp->donetail = rcp->curtail;
 80		return 1;
 81	}
 82
 83	return 0;
 84}
 
 85
 86/*
 87 * Record an rcu quiescent state.  And an rcu_bh quiescent state while we
 88 * are at it, given that any rcu quiescent state is also an rcu_bh
 89 * quiescent state.  Use "+" instead of "||" to defeat short circuiting.
 90 */
 91void rcu_sched_qs(void)
 92{
 93	unsigned long flags;
 94
 95	local_irq_save(flags);
 96	if (rcu_qsctr_help(&rcu_sched_ctrlblk) +
 97	    rcu_qsctr_help(&rcu_bh_ctrlblk))
 98		raise_softirq(RCU_SOFTIRQ);
 99	local_irq_restore(flags);
100}
101
102/*
103 * Record an rcu_bh quiescent state.
104 */
105void rcu_bh_qs(void)
106{
107	unsigned long flags;
108
109	local_irq_save(flags);
110	if (rcu_qsctr_help(&rcu_bh_ctrlblk))
111		raise_softirq(RCU_SOFTIRQ);
112	local_irq_restore(flags);
113}
114
115/*
116 * Check to see if the scheduling-clock interrupt came from an extended
117 * quiescent state, and, if so, tell RCU about it.  This function must
118 * be called from hardirq context.  It is normally called from the
119 * scheduling-clock interrupt.
120 */
121void rcu_check_callbacks(int user)
122{
123	if (user)
124		rcu_sched_qs();
125	else if (!in_softirq())
126		rcu_bh_qs();
127	if (user)
128		rcu_note_voluntary_context_switch(current);
129}
130
131/*
132 * Invoke the RCU callbacks on the specified rcu_ctrlkblk structure
133 * whose grace period has elapsed.
134 */
135static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
136{
137	struct rcu_head *next, *list;
138	unsigned long flags;
139
140	/* Move the ready-to-invoke callbacks to a local list. */
141	local_irq_save(flags);
142	if (rcp->donetail == &rcp->rcucblist) {
143		/* No callbacks ready, so just leave. */
144		local_irq_restore(flags);
145		return;
146	}
147	list = rcp->rcucblist;
148	rcp->rcucblist = *rcp->donetail;
149	*rcp->donetail = NULL;
150	if (rcp->curtail == rcp->donetail)
151		rcp->curtail = &rcp->rcucblist;
152	rcp->donetail = &rcp->rcucblist;
153	local_irq_restore(flags);
154
155	/* Invoke the callbacks on the local list. */
156	while (list) {
157		next = list->next;
158		prefetch(next);
159		debug_rcu_head_unqueue(list);
160		local_bh_disable();
161		__rcu_reclaim("", list);
162		local_bh_enable();
163		list = next;
164	}
165}
166
167static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
168{
169	__rcu_process_callbacks(&rcu_sched_ctrlblk);
170	__rcu_process_callbacks(&rcu_bh_ctrlblk);
171}
172
173/*
174 * Wait for a grace period to elapse.  But it is illegal to invoke
175 * synchronize_sched() from within an RCU read-side critical section.
176 * Therefore, any legal call to synchronize_sched() is a quiescent
177 * state, and so on a UP system, synchronize_sched() need do nothing.
178 * Ditto for synchronize_rcu_bh().  (But Lai Jiangshan points out the
179 * benefits of doing might_sleep() to reduce latency.)
 
 
180 *
181 * Cool, huh?  (Due to Josh Triplett.)
182 */
183void synchronize_sched(void)
184{
185	RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
186			 lock_is_held(&rcu_lock_map) ||
187			 lock_is_held(&rcu_sched_lock_map),
188			 "Illegal synchronize_sched() in RCU read-side critical section");
 
 
 
 
 
 
189}
190EXPORT_SYMBOL_GPL(synchronize_sched);
191
192/*
193 * Helper function for call_rcu() and call_rcu_bh().
 
 
194 */
195static void __call_rcu(struct rcu_head *head,
196		       rcu_callback_t func,
197		       struct rcu_ctrlblk *rcp)
198{
 
199	unsigned long flags;
200
201	debug_rcu_head_queue(head);
 
 
 
 
 
 
 
 
 
 
202	head->func = func;
203	head->next = NULL;
204
205	local_irq_save(flags);
206	*rcp->curtail = head;
207	rcp->curtail = &head->next;
208	local_irq_restore(flags);
209
210	if (unlikely(is_idle_task(current))) {
211		/* force scheduling for rcu_sched_qs() */
212		resched_cpu(0);
213	}
214}
 
 
 
 
 
 
 
 
 
 
 
215
216/*
217 * Post an RCU callback to be invoked after the end of an RCU-sched grace
218 * period.  But since we have but one CPU, that would be after any
219 * quiescent state.
 
 
 
 
 
 
 
 
 
220 */
221void call_rcu_sched(struct rcu_head *head, rcu_callback_t func)
222{
223	__call_rcu(head, func, &rcu_sched_ctrlblk);
 
 
 
 
 
 
224}
225EXPORT_SYMBOL_GPL(call_rcu_sched);
226
227/*
228 * Post an RCU bottom-half callback to be invoked after any subsequent
229 * quiescent state.
 
230 */
231void call_rcu_bh(struct rcu_head *head, rcu_callback_t func)
232{
233	__call_rcu(head, func, &rcu_bh_ctrlblk);
 
 
 
 
 
 
 
 
 
 
 
 
 
234}
235EXPORT_SYMBOL_GPL(call_rcu_bh);
 
236
237void __init rcu_init(void)
238{
239	open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
240	rcu_early_boot_tests();
241}