Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0+
  2/*
  3 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
  4 *
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  5 * Copyright IBM Corporation, 2008
  6 *
  7 * Author: Paul E. McKenney <paulmck@linux.ibm.com>
  8 *
  9 * For detailed explanation of Read-Copy Update mechanism see -
 10 *		Documentation/RCU
 11 */
 12#include <linux/completion.h>
 13#include <linux/interrupt.h>
 14#include <linux/notifier.h>
 15#include <linux/rcupdate_wait.h>
 16#include <linux/kernel.h>
 17#include <linux/export.h>
 18#include <linux/mutex.h>
 19#include <linux/sched.h>
 20#include <linux/types.h>
 21#include <linux/init.h>
 22#include <linux/time.h>
 23#include <linux/cpu.h>
 24#include <linux/prefetch.h>
 25#include <linux/slab.h>
 26#include <linux/mm.h>
 27
 28#include "rcu.h"
 29
 30/* Global control variables for rcupdate callback mechanism. */
 31struct rcu_ctrlblk {
 32	struct rcu_head *rcucblist;	/* List of pending callbacks (CBs). */
 33	struct rcu_head **donetail;	/* ->next pointer of last "done" CB. */
 34	struct rcu_head **curtail;	/* ->next pointer of last CB. */
 35	unsigned long gp_seq;		/* Grace-period counter. */
 36};
 37
 38/* Definition for rcupdate control block. */
 39static struct rcu_ctrlblk rcu_ctrlblk = {
 40	.donetail	= &rcu_ctrlblk.rcucblist,
 41	.curtail	= &rcu_ctrlblk.rcucblist,
 42	.gp_seq		= 0 - 300UL,
 43};
 
 
 
 
 
 
 
 
 44
 45void rcu_barrier(void)
 
 
 
 
 
 46{
 47	wait_rcu_gp(call_rcu_hurry);
 
 
 
 
 
 
 48}
 49EXPORT_SYMBOL(rcu_barrier);
 50
 51/* Record an rcu quiescent state.  */
 52void rcu_qs(void)
 
 
 
 
 53{
 54	unsigned long flags;
 55
 56	local_irq_save(flags);
 57	if (rcu_ctrlblk.donetail != rcu_ctrlblk.curtail) {
 58		rcu_ctrlblk.donetail = rcu_ctrlblk.curtail;
 59		raise_softirq_irqoff(RCU_SOFTIRQ);
 60	}
 61	WRITE_ONCE(rcu_ctrlblk.gp_seq, rcu_ctrlblk.gp_seq + 2);
 
 
 
 
 
 
 
 
 
 
 
 62	local_irq_restore(flags);
 63}
 64
 65/*
 66 * Check to see if the scheduling-clock interrupt came from an extended
 67 * quiescent state, and, if so, tell RCU about it.  This function must
 68 * be called from hardirq context.  It is normally called from the
 69 * scheduling-clock interrupt.
 70 */
 71void rcu_sched_clock_irq(int user)
 72{
 73	if (user) {
 74		rcu_qs();
 75	} else if (rcu_ctrlblk.donetail != rcu_ctrlblk.curtail) {
 76		set_tsk_need_resched(current);
 77		set_preempt_need_resched();
 78	}
 
 79}
 80
 81/*
 82 * Reclaim the specified callback, either by invoking it for non-kfree cases or
 83 * freeing it directly (for kfree). Return true if kfreeing, false otherwise.
 84 */
 85static inline bool rcu_reclaim_tiny(struct rcu_head *head)
 86{
 87	rcu_callback_t f;
 88	unsigned long offset = (unsigned long)head->func;
 89
 90	rcu_lock_acquire(&rcu_callback_map);
 91	if (__is_kvfree_rcu_offset(offset)) {
 92		trace_rcu_invoke_kvfree_callback("", head, offset);
 93		kvfree((void *)head - offset);
 94		rcu_lock_release(&rcu_callback_map);
 95		return true;
 96	}
 97
 98	trace_rcu_invoke_callback("", head);
 99	f = head->func;
100	WRITE_ONCE(head->func, (rcu_callback_t)0L);
101	f(head);
102	rcu_lock_release(&rcu_callback_map);
103	return false;
104}
105
106/* Invoke the RCU callbacks whose grace period has elapsed.  */
107static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
108{
 
109	struct rcu_head *next, *list;
110	unsigned long flags;
 
111
112	/* Move the ready-to-invoke callbacks to a local list. */
113	local_irq_save(flags);
114	if (rcu_ctrlblk.donetail == &rcu_ctrlblk.rcucblist) {
115		/* No callbacks ready, so just leave. */
116		local_irq_restore(flags);
117		return;
118	}
119	list = rcu_ctrlblk.rcucblist;
120	rcu_ctrlblk.rcucblist = *rcu_ctrlblk.donetail;
121	*rcu_ctrlblk.donetail = NULL;
122	if (rcu_ctrlblk.curtail == rcu_ctrlblk.donetail)
123		rcu_ctrlblk.curtail = &rcu_ctrlblk.rcucblist;
124	rcu_ctrlblk.donetail = &rcu_ctrlblk.rcucblist;
 
125	local_irq_restore(flags);
126
127	/* Invoke the callbacks on the local list. */
 
128	while (list) {
129		next = list->next;
130		prefetch(next);
131		debug_rcu_head_unqueue(list);
132		local_bh_disable();
133		rcu_reclaim_tiny(list);
134		local_bh_enable();
135		list = next;
 
136	}
 
 
 
 
 
 
 
 
 
 
 
137}
138
139/*
140 * Wait for a grace period to elapse.  But it is illegal to invoke
141 * synchronize_rcu() from within an RCU read-side critical section.
142 * Therefore, any legal call to synchronize_rcu() is a quiescent state,
143 * and so on a UP system, synchronize_rcu() need do nothing, other than
144 * let the polled APIs know that another grace period elapsed.
145 *
146 * (But Lai Jiangshan points out the benefits of doing might_sleep()
147 * to reduce latency.)
148 *
149 * Cool, huh?  (Due to Josh Triplett.)
 
 
 
150 */
151void synchronize_rcu(void)
152{
153	RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
154			 lock_is_held(&rcu_lock_map) ||
155			 lock_is_held(&rcu_sched_lock_map),
156			 "Illegal synchronize_rcu() in RCU read-side critical section");
157	WRITE_ONCE(rcu_ctrlblk.gp_seq, rcu_ctrlblk.gp_seq + 2);
158}
159EXPORT_SYMBOL_GPL(synchronize_rcu);
160
161static void tiny_rcu_leak_callback(struct rcu_head *rhp)
162{
163}
 
164
165/*
166 * Post an RCU callback to be invoked after the end of an RCU grace
167 * period.  But since we have but one CPU, that would be after any
168 * quiescent state.
169 */
170void call_rcu(struct rcu_head *head, rcu_callback_t func)
 
 
171{
172	static atomic_t doublefrees;
173	unsigned long flags;
174
175	if (debug_rcu_head_queue(head)) {
176		if (atomic_inc_return(&doublefrees) < 4) {
177			pr_err("%s(): Double-freed CB %p->%pS()!!!  ", __func__, head, head->func);
178			mem_dump_obj(head);
179		}
180
181		if (!__is_kvfree_rcu_offset((unsigned long)head->func))
182			WRITE_ONCE(head->func, tiny_rcu_leak_callback);
183		return;
184	}
185
186	head->func = func;
187	head->next = NULL;
188
189	local_irq_save(flags);
190	*rcu_ctrlblk.curtail = head;
191	rcu_ctrlblk.curtail = &head->next;
 
192	local_irq_restore(flags);
193
194	if (unlikely(is_idle_task(current))) {
195		/* force scheduling for rcu_qs() */
196		resched_cpu(0);
197	}
198}
199EXPORT_SYMBOL_GPL(call_rcu);
200
201/*
202 * Store a grace-period-counter "cookie".  For more information,
203 * see the Tree RCU header comment.
204 */
205void get_completed_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
206{
207	rgosp->rgos_norm = RCU_GET_STATE_COMPLETED;
208}
209EXPORT_SYMBOL_GPL(get_completed_synchronize_rcu_full);
210
211/*
212 * Return a grace-period-counter "cookie".  For more information,
213 * see the Tree RCU header comment.
214 */
215unsigned long get_state_synchronize_rcu(void)
216{
217	return READ_ONCE(rcu_ctrlblk.gp_seq);
218}
219EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
220
221/*
222 * Return a grace-period-counter "cookie" and ensure that a future grace
223 * period completes.  For more information, see the Tree RCU header comment.
 
224 */
225unsigned long start_poll_synchronize_rcu(void)
226{
227	unsigned long gp_seq = get_state_synchronize_rcu();
228
229	if (unlikely(is_idle_task(current))) {
230		/* force scheduling for rcu_qs() */
231		resched_cpu(0);
232	}
233	return gp_seq;
234}
235EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu);
236
237/*
238 * Return true if the grace period corresponding to oldstate has completed
239 * and false otherwise.  For more information, see the Tree RCU header
240 * comment.
241 */
242bool poll_state_synchronize_rcu(unsigned long oldstate)
243{
244	return oldstate == RCU_GET_STATE_COMPLETED || READ_ONCE(rcu_ctrlblk.gp_seq) != oldstate;
245}
246EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu);
247
248#ifdef CONFIG_KASAN_GENERIC
249void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
250{
251	if (head) {
252		void *ptr = (void *) head - (unsigned long) func;
253
254		kasan_record_aux_stack_noalloc(ptr);
255	}
256
257	__kvfree_call_rcu(head, func);
258}
259EXPORT_SYMBOL_GPL(kvfree_call_rcu);
260#endif
261
262void __init rcu_init(void)
263{
264	open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
 
 
 
265	rcu_early_boot_tests();
266}
v4.6
 
  1/*
  2 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
  3 *
  4 * This program is free software; you can redistribute it and/or modify
  5 * it under the terms of the GNU General Public License as published by
  6 * the Free Software Foundation; either version 2 of the License, or
  7 * (at your option) any later version.
  8 *
  9 * This program is distributed in the hope that it will be useful,
 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 12 * GNU General Public License for more details.
 13 *
 14 * You should have received a copy of the GNU General Public License
 15 * along with this program; if not, you can access it online at
 16 * http://www.gnu.org/licenses/gpl-2.0.html.
 17 *
 18 * Copyright IBM Corporation, 2008
 19 *
 20 * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
 21 *
 22 * For detailed explanation of Read-Copy Update mechanism see -
 23 *		Documentation/RCU
 24 */
 25#include <linux/completion.h>
 26#include <linux/interrupt.h>
 27#include <linux/notifier.h>
 28#include <linux/rcupdate.h>
 29#include <linux/kernel.h>
 30#include <linux/export.h>
 31#include <linux/mutex.h>
 32#include <linux/sched.h>
 33#include <linux/types.h>
 34#include <linux/init.h>
 35#include <linux/time.h>
 36#include <linux/cpu.h>
 37#include <linux/prefetch.h>
 38#include <linux/trace_events.h>
 
 39
 40#include "rcu.h"
 41
 42/* Forward declarations for tiny_plugin.h. */
 43struct rcu_ctrlblk;
 44static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
 45static void rcu_process_callbacks(struct softirq_action *unused);
 46static void __call_rcu(struct rcu_head *head,
 47		       rcu_callback_t func,
 48		       struct rcu_ctrlblk *rcp);
 49
 50#include "tiny_plugin.h"
 51
 52#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE)
 53
 54/*
 55 * Test whether RCU thinks that the current CPU is idle.
 56 */
 57bool notrace __rcu_is_watching(void)
 58{
 59	return true;
 60}
 61EXPORT_SYMBOL(__rcu_is_watching);
 62
 63#endif /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */
 64
 65/*
 66 * Helper function for rcu_sched_qs() and rcu_bh_qs().
 67 * Also irqs are disabled to avoid confusion due to interrupt handlers
 68 * invoking call_rcu().
 69 */
 70static int rcu_qsctr_help(struct rcu_ctrlblk *rcp)
 71{
 72	RCU_TRACE(reset_cpu_stall_ticks(rcp));
 73	if (rcp->donetail != rcp->curtail) {
 74		rcp->donetail = rcp->curtail;
 75		return 1;
 76	}
 77
 78	return 0;
 79}
 
 80
 81/*
 82 * Record an rcu quiescent state.  And an rcu_bh quiescent state while we
 83 * are at it, given that any rcu quiescent state is also an rcu_bh
 84 * quiescent state.  Use "+" instead of "||" to defeat short circuiting.
 85 */
 86void rcu_sched_qs(void)
 87{
 88	unsigned long flags;
 89
 90	local_irq_save(flags);
 91	if (rcu_qsctr_help(&rcu_sched_ctrlblk) +
 92	    rcu_qsctr_help(&rcu_bh_ctrlblk))
 93		raise_softirq(RCU_SOFTIRQ);
 94	local_irq_restore(flags);
 95}
 96
 97/*
 98 * Record an rcu_bh quiescent state.
 99 */
100void rcu_bh_qs(void)
101{
102	unsigned long flags;
103
104	local_irq_save(flags);
105	if (rcu_qsctr_help(&rcu_bh_ctrlblk))
106		raise_softirq(RCU_SOFTIRQ);
107	local_irq_restore(flags);
108}
109
110/*
111 * Check to see if the scheduling-clock interrupt came from an extended
112 * quiescent state, and, if so, tell RCU about it.  This function must
113 * be called from hardirq context.  It is normally called from the
114 * scheduling-clock interrupt.
115 */
116void rcu_check_callbacks(int user)
117{
118	RCU_TRACE(check_cpu_stalls());
119	if (user)
120		rcu_sched_qs();
121	else if (!in_softirq())
122		rcu_bh_qs();
123	if (user)
124		rcu_note_voluntary_context_switch(current);
125}
126
127/*
128 * Invoke the RCU callbacks on the specified rcu_ctrlkblk structure
129 * whose grace period has elapsed.
130 */
131static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
132{
133	const char *rn = NULL;
134	struct rcu_head *next, *list;
135	unsigned long flags;
136	RCU_TRACE(int cb_count = 0);
137
138	/* Move the ready-to-invoke callbacks to a local list. */
139	local_irq_save(flags);
140	if (rcp->donetail == &rcp->rcucblist) {
141		/* No callbacks ready, so just leave. */
142		local_irq_restore(flags);
143		return;
144	}
145	RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, rcp->qlen, -1));
146	list = rcp->rcucblist;
147	rcp->rcucblist = *rcp->donetail;
148	*rcp->donetail = NULL;
149	if (rcp->curtail == rcp->donetail)
150		rcp->curtail = &rcp->rcucblist;
151	rcp->donetail = &rcp->rcucblist;
152	local_irq_restore(flags);
153
154	/* Invoke the callbacks on the local list. */
155	RCU_TRACE(rn = rcp->name);
156	while (list) {
157		next = list->next;
158		prefetch(next);
159		debug_rcu_head_unqueue(list);
160		local_bh_disable();
161		__rcu_reclaim(rn, list);
162		local_bh_enable();
163		list = next;
164		RCU_TRACE(cb_count++);
165	}
166	RCU_TRACE(rcu_trace_sub_qlen(rcp, cb_count));
167	RCU_TRACE(trace_rcu_batch_end(rcp->name,
168				      cb_count, 0, need_resched(),
169				      is_idle_task(current),
170				      false));
171}
172
173static void rcu_process_callbacks(struct softirq_action *unused)
174{
175	__rcu_process_callbacks(&rcu_sched_ctrlblk);
176	__rcu_process_callbacks(&rcu_bh_ctrlblk);
177}
178
179/*
180 * Wait for a grace period to elapse.  But it is illegal to invoke
181 * synchronize_sched() from within an RCU read-side critical section.
182 * Therefore, any legal call to synchronize_sched() is a quiescent
183 * state, and so on a UP system, synchronize_sched() need do nothing.
184 * Ditto for synchronize_rcu_bh().  (But Lai Jiangshan points out the
185 * benefits of doing might_sleep() to reduce latency.)
 
 
186 *
187 * Cool, huh?  (Due to Josh Triplett.)
188 *
189 * But we want to make this a static inline later.  The cond_resched()
190 * currently makes this problematic.
191 */
192void synchronize_sched(void)
193{
194	RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
195			 lock_is_held(&rcu_lock_map) ||
196			 lock_is_held(&rcu_sched_lock_map),
197			 "Illegal synchronize_sched() in RCU read-side critical section");
198	cond_resched();
 
 
 
 
 
199}
200EXPORT_SYMBOL_GPL(synchronize_sched);
201
202/*
203 * Helper function for call_rcu() and call_rcu_bh().
 
 
204 */
205static void __call_rcu(struct rcu_head *head,
206		       rcu_callback_t func,
207		       struct rcu_ctrlblk *rcp)
208{
 
209	unsigned long flags;
210
211	debug_rcu_head_queue(head);
 
 
 
 
 
 
 
 
 
 
212	head->func = func;
213	head->next = NULL;
214
215	local_irq_save(flags);
216	*rcp->curtail = head;
217	rcp->curtail = &head->next;
218	RCU_TRACE(rcp->qlen++);
219	local_irq_restore(flags);
220
221	if (unlikely(is_idle_task(current))) {
222		/* force scheduling for rcu_sched_qs() */
223		resched_cpu(0);
224	}
225}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
226
227/*
228 * Post an RCU callback to be invoked after the end of an RCU-sched grace
229 * period.  But since we have but one CPU, that would be after any
230 * quiescent state.
231 */
232void call_rcu_sched(struct rcu_head *head, rcu_callback_t func)
233{
234	__call_rcu(head, func, &rcu_sched_ctrlblk);
 
 
 
 
 
 
235}
236EXPORT_SYMBOL_GPL(call_rcu_sched);
237
238/*
239 * Post an RCU bottom-half callback to be invoked after any subsequent
240 * quiescent state.
 
241 */
242void call_rcu_bh(struct rcu_head *head, rcu_callback_t func)
243{
244	__call_rcu(head, func, &rcu_bh_ctrlblk);
245}
246EXPORT_SYMBOL_GPL(call_rcu_bh);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
247
248void __init rcu_init(void)
249{
250	open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
251	RCU_TRACE(reset_cpu_stall_ticks(&rcu_sched_ctrlblk));
252	RCU_TRACE(reset_cpu_stall_ticks(&rcu_bh_ctrlblk));
253
254	rcu_early_boot_tests();
255}