Linux Audio

Check our new training course

Loading...
v4.6
  1/*
  2 * Read-Copy Update mechanism for mutual exclusion
  3 *
  4 * This program is free software; you can redistribute it and/or modify
  5 * it under the terms of the GNU General Public License as published by
  6 * the Free Software Foundation; either version 2 of the License, or
  7 * (at your option) any later version.
  8 *
  9 * This program is distributed in the hope that it will be useful,
 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 12 * GNU General Public License for more details.
 13 *
 14 * You should have received a copy of the GNU General Public License
 15 * along with this program; if not, you can access it online at
 16 * http://www.gnu.org/licenses/gpl-2.0.html.
 17 *
 18 * Copyright IBM Corporation, 2001
 19 *
 20 * Authors: Dipankar Sarma <dipankar@in.ibm.com>
 21 *	    Manfred Spraul <manfred@colorfullife.com>
 22 *
 23 * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
 24 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
 25 * Papers:
 26 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
 27 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
 28 *
 29 * For detailed explanation of Read-Copy Update mechanism see -
 30 *		http://lse.sourceforge.net/locking/rcupdate.html
 31 *
 32 */
 33#include <linux/types.h>
 34#include <linux/kernel.h>
 35#include <linux/init.h>
 36#include <linux/spinlock.h>
 37#include <linux/smp.h>
 38#include <linux/interrupt.h>
 39#include <linux/sched.h>
 40#include <linux/atomic.h>
 41#include <linux/bitops.h>
 42#include <linux/percpu.h>
 43#include <linux/notifier.h>
 44#include <linux/cpu.h>
 45#include <linux/mutex.h>
 46#include <linux/export.h>
 47#include <linux/hardirq.h>
 48#include <linux/delay.h>
 49#include <linux/module.h>
 50#include <linux/kthread.h>
 51#include <linux/tick.h>
 52
 53#define CREATE_TRACE_POINTS
 54
 55#include "rcu.h"
 56
 57MODULE_ALIAS("rcupdate");
 58#ifdef MODULE_PARAM_PREFIX
 59#undef MODULE_PARAM_PREFIX
 60#endif
 61#define MODULE_PARAM_PREFIX "rcupdate."
 62
 63#ifndef CONFIG_TINY_RCU
 64module_param(rcu_expedited, int, 0);
 65module_param(rcu_normal, int, 0);
 66static int rcu_normal_after_boot;
 67module_param(rcu_normal_after_boot, int, 0);
 68#endif /* #ifndef CONFIG_TINY_RCU */
 69
 70#if defined(CONFIG_DEBUG_LOCK_ALLOC) && defined(CONFIG_PREEMPT_COUNT)
 71/**
 72 * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
 73 *
 74 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an
 75 * RCU-sched read-side critical section.  In absence of
 76 * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
 77 * critical section unless it can prove otherwise.  Note that disabling
 78 * of preemption (including disabling irqs) counts as an RCU-sched
 79 * read-side critical section.  This is useful for debug checks in functions
 80 * that required that they be called within an RCU-sched read-side
 81 * critical section.
 82 *
 83 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot
 84 * and while lockdep is disabled.
 85 *
 86 * Note that if the CPU is in the idle loop from an RCU point of
 87 * view (ie: that we are in the section between rcu_idle_enter() and
 88 * rcu_idle_exit()) then rcu_read_lock_held() returns false even if the CPU
 89 * did an rcu_read_lock().  The reason for this is that RCU ignores CPUs
 90 * that are in such a section, considering these as in extended quiescent
 91 * state, so such a CPU is effectively never in an RCU read-side critical
 92 * section regardless of what RCU primitives it invokes.  This state of
 93 * affairs is required --- we need to keep an RCU-free window in idle
 94 * where the CPU may possibly enter into low power mode. This way we can
 95 * notice an extended quiescent state to other CPUs that started a grace
 96 * period. Otherwise we would delay any grace period as long as we run in
 97 * the idle task.
 98 *
 99 * Similarly, we avoid claiming an SRCU read lock held if the current
100 * CPU is offline.
101 */
102int rcu_read_lock_sched_held(void)
103{
104	int lockdep_opinion = 0;
105
106	if (!debug_lockdep_rcu_enabled())
107		return 1;
108	if (!rcu_is_watching())
109		return 0;
110	if (!rcu_lockdep_current_cpu_online())
111		return 0;
112	if (debug_locks)
113		lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
114	return lockdep_opinion || preempt_count() != 0 || irqs_disabled();
115}
116EXPORT_SYMBOL(rcu_read_lock_sched_held);
117#endif
118
119#ifndef CONFIG_TINY_RCU
120
121/*
122 * Should expedited grace-period primitives always fall back to their
123 * non-expedited counterparts?  Intended for use within RCU.  Note
124 * that if the user specifies both rcu_expedited and rcu_normal, then
125 * rcu_normal wins.
 
 
126 */
127bool rcu_gp_is_normal(void)
128{
129	return READ_ONCE(rcu_normal);
 
130}
131EXPORT_SYMBOL_GPL(rcu_gp_is_normal);
132
133static atomic_t rcu_expedited_nesting =
134	ATOMIC_INIT(IS_ENABLED(CONFIG_RCU_EXPEDITE_BOOT) ? 1 : 0);
135
136/*
137 * Should normal grace-period primitives be expedited?  Intended for
138 * use within RCU.  Note that this function takes the rcu_expedited
139 * sysfs/boot variable into account as well as the rcu_expedite_gp()
140 * nesting.  So looping on rcu_unexpedite_gp() until rcu_gp_is_expedited()
141 * returns false is a -really- bad idea.
142 */
143bool rcu_gp_is_expedited(void)
144{
145	return rcu_expedited || atomic_read(&rcu_expedited_nesting);
 
146}
147EXPORT_SYMBOL_GPL(rcu_gp_is_expedited);
148
149/**
150 * rcu_expedite_gp - Expedite future RCU grace periods
151 *
152 * After a call to this function, future calls to synchronize_rcu() and
153 * friends act as the corresponding synchronize_rcu_expedited() function
154 * had instead been called.
155 */
156void rcu_expedite_gp(void)
157{
158	atomic_inc(&rcu_expedited_nesting);
159}
160EXPORT_SYMBOL_GPL(rcu_expedite_gp);
161
162/**
163 * rcu_unexpedite_gp - Cancel prior rcu_expedite_gp() invocation
164 *
165 * Undo a prior call to rcu_expedite_gp().  If all prior calls to
166 * rcu_expedite_gp() are undone by a subsequent call to rcu_unexpedite_gp(),
167 * and if the rcu_expedited sysfs/boot parameter is not set, then all
168 * subsequent calls to synchronize_rcu() and friends will return to
169 * their normal non-expedited behavior.
170 */
171void rcu_unexpedite_gp(void)
172{
173	atomic_dec(&rcu_expedited_nesting);
174}
175EXPORT_SYMBOL_GPL(rcu_unexpedite_gp);
176
177/*
178 * Inform RCU of the end of the in-kernel boot sequence.
179 */
180void rcu_end_inkernel_boot(void)
181{
182	if (IS_ENABLED(CONFIG_RCU_EXPEDITE_BOOT))
183		rcu_unexpedite_gp();
184	if (rcu_normal_after_boot)
185		WRITE_ONCE(rcu_normal, 1);
186}
187
188#endif /* #ifndef CONFIG_TINY_RCU */
189
190#ifdef CONFIG_PREEMPT_RCU
191
192/*
193 * Preemptible RCU implementation for rcu_read_lock().
194 * Just increment ->rcu_read_lock_nesting, shared state will be updated
195 * if we block.
196 */
197void __rcu_read_lock(void)
198{
199	current->rcu_read_lock_nesting++;
200	barrier();  /* critical section after entry code. */
201}
202EXPORT_SYMBOL_GPL(__rcu_read_lock);
203
204/*
205 * Preemptible RCU implementation for rcu_read_unlock().
206 * Decrement ->rcu_read_lock_nesting.  If the result is zero (outermost
207 * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
208 * invoke rcu_read_unlock_special() to clean up after a context switch
209 * in an RCU read-side critical section and other special cases.
210 */
211void __rcu_read_unlock(void)
212{
213	struct task_struct *t = current;
214
215	if (t->rcu_read_lock_nesting != 1) {
216		--t->rcu_read_lock_nesting;
217	} else {
218		barrier();  /* critical section before exit code. */
219		t->rcu_read_lock_nesting = INT_MIN;
220		barrier();  /* assign before ->rcu_read_unlock_special load */
221		if (unlikely(READ_ONCE(t->rcu_read_unlock_special.s)))
222			rcu_read_unlock_special(t);
223		barrier();  /* ->rcu_read_unlock_special load before assign */
224		t->rcu_read_lock_nesting = 0;
225	}
226#ifdef CONFIG_PROVE_LOCKING
227	{
228		int rrln = READ_ONCE(t->rcu_read_lock_nesting);
229
230		WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2);
231	}
232#endif /* #ifdef CONFIG_PROVE_LOCKING */
233}
234EXPORT_SYMBOL_GPL(__rcu_read_unlock);
235
236#endif /* #ifdef CONFIG_PREEMPT_RCU */
237
238#ifdef CONFIG_DEBUG_LOCK_ALLOC
239static struct lock_class_key rcu_lock_key;
240struct lockdep_map rcu_lock_map =
241	STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key);
242EXPORT_SYMBOL_GPL(rcu_lock_map);
243
244static struct lock_class_key rcu_bh_lock_key;
245struct lockdep_map rcu_bh_lock_map =
246	STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_bh", &rcu_bh_lock_key);
247EXPORT_SYMBOL_GPL(rcu_bh_lock_map);
248
249static struct lock_class_key rcu_sched_lock_key;
250struct lockdep_map rcu_sched_lock_map =
251	STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_sched", &rcu_sched_lock_key);
252EXPORT_SYMBOL_GPL(rcu_sched_lock_map);
253
254static struct lock_class_key rcu_callback_key;
255struct lockdep_map rcu_callback_map =
256	STATIC_LOCKDEP_MAP_INIT("rcu_callback", &rcu_callback_key);
257EXPORT_SYMBOL_GPL(rcu_callback_map);
258
259int notrace debug_lockdep_rcu_enabled(void)
260{
261	return rcu_scheduler_active && debug_locks &&
262	       current->lockdep_recursion == 0;
263}
264EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
265
266/**
267 * rcu_read_lock_held() - might we be in RCU read-side critical section?
268 *
269 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU
270 * read-side critical section.  In absence of CONFIG_DEBUG_LOCK_ALLOC,
271 * this assumes we are in an RCU read-side critical section unless it can
272 * prove otherwise.  This is useful for debug checks in functions that
273 * require that they be called within an RCU read-side critical section.
274 *
275 * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot
276 * and while lockdep is disabled.
277 *
278 * Note that rcu_read_lock() and the matching rcu_read_unlock() must
279 * occur in the same context, for example, it is illegal to invoke
280 * rcu_read_unlock() in process context if the matching rcu_read_lock()
281 * was invoked from within an irq handler.
282 *
283 * Note that rcu_read_lock() is disallowed if the CPU is either idle or
284 * offline from an RCU perspective, so check for those as well.
285 */
286int rcu_read_lock_held(void)
287{
288	if (!debug_lockdep_rcu_enabled())
289		return 1;
290	if (!rcu_is_watching())
291		return 0;
292	if (!rcu_lockdep_current_cpu_online())
293		return 0;
294	return lock_is_held(&rcu_lock_map);
295}
296EXPORT_SYMBOL_GPL(rcu_read_lock_held);
297
298/**
299 * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section?
300 *
301 * Check for bottom half being disabled, which covers both the
302 * CONFIG_PROVE_RCU and not cases.  Note that if someone uses
303 * rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled)
304 * will show the situation.  This is useful for debug checks in functions
305 * that require that they be called within an RCU read-side critical
306 * section.
307 *
308 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot.
309 *
310 * Note that rcu_read_lock() is disallowed if the CPU is either idle or
311 * offline from an RCU perspective, so check for those as well.
312 */
313int rcu_read_lock_bh_held(void)
314{
315	if (!debug_lockdep_rcu_enabled())
316		return 1;
317	if (!rcu_is_watching())
318		return 0;
319	if (!rcu_lockdep_current_cpu_online())
320		return 0;
321	return in_softirq() || irqs_disabled();
322}
323EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
324
325#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
326
327/**
328 * wakeme_after_rcu() - Callback function to awaken a task after grace period
329 * @head: Pointer to rcu_head member within rcu_synchronize structure
330 *
331 * Awaken the corresponding task now that a grace period has elapsed.
332 */
333void wakeme_after_rcu(struct rcu_head *head)
334{
335	struct rcu_synchronize *rcu;
336
337	rcu = container_of(head, struct rcu_synchronize, head);
338	complete(&rcu->completion);
339}
340EXPORT_SYMBOL_GPL(wakeme_after_rcu);
341
342void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array,
343		   struct rcu_synchronize *rs_array)
344{
345	int i;
346
347	/* Initialize and register callbacks for each flavor specified. */
348	for (i = 0; i < n; i++) {
349		if (checktiny &&
350		    (crcu_array[i] == call_rcu ||
351		     crcu_array[i] == call_rcu_bh)) {
352			might_sleep();
353			continue;
354		}
355		init_rcu_head_on_stack(&rs_array[i].head);
356		init_completion(&rs_array[i].completion);
357		(crcu_array[i])(&rs_array[i].head, wakeme_after_rcu);
358	}
359
360	/* Wait for all callbacks to be invoked. */
361	for (i = 0; i < n; i++) {
362		if (checktiny &&
363		    (crcu_array[i] == call_rcu ||
364		     crcu_array[i] == call_rcu_bh))
365			continue;
366		wait_for_completion(&rs_array[i].completion);
367		destroy_rcu_head_on_stack(&rs_array[i].head);
368	}
369}
370EXPORT_SYMBOL_GPL(__wait_rcu_gp);
371
372#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
373void init_rcu_head(struct rcu_head *head)
374{
375	debug_object_init(head, &rcuhead_debug_descr);
376}
377
378void destroy_rcu_head(struct rcu_head *head)
379{
380	debug_object_free(head, &rcuhead_debug_descr);
381}
382
383/*
384 * fixup_activate is called when:
385 * - an active object is activated
386 * - an unknown object is activated (might be a statically initialized object)
387 * Activation is performed internally by call_rcu().
388 */
389static int rcuhead_fixup_activate(void *addr, enum debug_obj_state state)
390{
391	struct rcu_head *head = addr;
392
393	switch (state) {
394
395	case ODEBUG_STATE_NOTAVAILABLE:
396		/*
397		 * This is not really a fixup. We just make sure that it is
398		 * tracked in the object tracker.
399		 */
400		debug_object_init(head, &rcuhead_debug_descr);
401		debug_object_activate(head, &rcuhead_debug_descr);
402		return 0;
403	default:
404		return 1;
405	}
406}
407
408/**
409 * init_rcu_head_on_stack() - initialize on-stack rcu_head for debugobjects
410 * @head: pointer to rcu_head structure to be initialized
411 *
412 * This function informs debugobjects of a new rcu_head structure that
413 * has been allocated as an auto variable on the stack.  This function
414 * is not required for rcu_head structures that are statically defined or
415 * that are dynamically allocated on the heap.  This function has no
416 * effect for !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds.
417 */
418void init_rcu_head_on_stack(struct rcu_head *head)
419{
420	debug_object_init_on_stack(head, &rcuhead_debug_descr);
421}
422EXPORT_SYMBOL_GPL(init_rcu_head_on_stack);
423
424/**
425 * destroy_rcu_head_on_stack() - destroy on-stack rcu_head for debugobjects
426 * @head: pointer to rcu_head structure to be initialized
427 *
428 * This function informs debugobjects that an on-stack rcu_head structure
429 * is about to go out of scope.  As with init_rcu_head_on_stack(), this
430 * function is not required for rcu_head structures that are statically
431 * defined or that are dynamically allocated on the heap.  Also as with
432 * init_rcu_head_on_stack(), this function has no effect for
433 * !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds.
434 */
435void destroy_rcu_head_on_stack(struct rcu_head *head)
436{
437	debug_object_free(head, &rcuhead_debug_descr);
438}
439EXPORT_SYMBOL_GPL(destroy_rcu_head_on_stack);
440
441struct debug_obj_descr rcuhead_debug_descr = {
442	.name = "rcu_head",
443	.fixup_activate = rcuhead_fixup_activate,
444};
445EXPORT_SYMBOL_GPL(rcuhead_debug_descr);
446#endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
447
448#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) || defined(CONFIG_RCU_TRACE)
449void do_trace_rcu_torture_read(const char *rcutorturename, struct rcu_head *rhp,
450			       unsigned long secs,
451			       unsigned long c_old, unsigned long c)
452{
453	trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c);
454}
455EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read);
456#else
457#define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
458	do { } while (0)
459#endif
460
461#ifdef CONFIG_RCU_STALL_COMMON
462
463#ifdef CONFIG_PROVE_RCU
464#define RCU_STALL_DELAY_DELTA	       (5 * HZ)
465#else
466#define RCU_STALL_DELAY_DELTA	       0
467#endif
468
469int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */
470static int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT;
471
472module_param(rcu_cpu_stall_suppress, int, 0644);
473module_param(rcu_cpu_stall_timeout, int, 0644);
474
475int rcu_jiffies_till_stall_check(void)
476{
477	int till_stall_check = READ_ONCE(rcu_cpu_stall_timeout);
478
479	/*
480	 * Limit check must be consistent with the Kconfig limits
481	 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
482	 */
483	if (till_stall_check < 3) {
484		WRITE_ONCE(rcu_cpu_stall_timeout, 3);
485		till_stall_check = 3;
486	} else if (till_stall_check > 300) {
487		WRITE_ONCE(rcu_cpu_stall_timeout, 300);
488		till_stall_check = 300;
489	}
490	return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
491}
492
493void rcu_sysrq_start(void)
494{
495	if (!rcu_cpu_stall_suppress)
496		rcu_cpu_stall_suppress = 2;
497}
498
499void rcu_sysrq_end(void)
500{
501	if (rcu_cpu_stall_suppress == 2)
502		rcu_cpu_stall_suppress = 0;
503}
504
505static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr)
506{
507	rcu_cpu_stall_suppress = 1;
508	return NOTIFY_DONE;
509}
510
511static struct notifier_block rcu_panic_block = {
512	.notifier_call = rcu_panic,
513};
514
515static int __init check_cpu_stall_init(void)
516{
517	atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block);
518	return 0;
519}
520early_initcall(check_cpu_stall_init);
521
522#endif /* #ifdef CONFIG_RCU_STALL_COMMON */
523
524#ifdef CONFIG_TASKS_RCU
525
526/*
527 * Simple variant of RCU whose quiescent states are voluntary context switch,
528 * user-space execution, and idle.  As such, grace periods can take one good
529 * long time.  There are no read-side primitives similar to rcu_read_lock()
530 * and rcu_read_unlock() because this implementation is intended to get
531 * the system into a safe state for some of the manipulations involved in
532 * tracing and the like.  Finally, this implementation does not support
533 * high call_rcu_tasks() rates from multiple CPUs.  If this is required,
534 * per-CPU callback lists will be needed.
535 */
536
537/* Global list of callbacks and associated lock. */
538static struct rcu_head *rcu_tasks_cbs_head;
539static struct rcu_head **rcu_tasks_cbs_tail = &rcu_tasks_cbs_head;
540static DECLARE_WAIT_QUEUE_HEAD(rcu_tasks_cbs_wq);
541static DEFINE_RAW_SPINLOCK(rcu_tasks_cbs_lock);
542
543/* Track exiting tasks in order to allow them to be waited for. */
544DEFINE_SRCU(tasks_rcu_exit_srcu);
545
546/* Control stall timeouts.  Disable with <= 0, otherwise jiffies till stall. */
547static int rcu_task_stall_timeout __read_mostly = HZ * 60 * 10;
548module_param(rcu_task_stall_timeout, int, 0644);
549
550static void rcu_spawn_tasks_kthread(void);
 
551
552/*
553 * Post an RCU-tasks callback.  First call must be from process context
554 * after the scheduler if fully operational.
555 */
556void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
557{
558	unsigned long flags;
559	bool needwake;
 
560
561	rhp->next = NULL;
562	rhp->func = func;
563	raw_spin_lock_irqsave(&rcu_tasks_cbs_lock, flags);
564	needwake = !rcu_tasks_cbs_head;
565	*rcu_tasks_cbs_tail = rhp;
566	rcu_tasks_cbs_tail = &rhp->next;
567	raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags);
568	if (needwake) {
 
 
569		rcu_spawn_tasks_kthread();
570		wake_up(&rcu_tasks_cbs_wq);
571	}
572}
573EXPORT_SYMBOL_GPL(call_rcu_tasks);
574
575/**
576 * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed.
577 *
578 * Control will return to the caller some time after a full rcu-tasks
579 * grace period has elapsed, in other words after all currently
580 * executing rcu-tasks read-side critical sections have elapsed.  These
581 * read-side critical sections are delimited by calls to schedule(),
582 * cond_resched_rcu_qs(), idle execution, userspace execution, calls
583 * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched().
584 *
585 * This is a very specialized primitive, intended only for a few uses in
586 * tracing and other situations requiring manipulation of function
587 * preambles and profiling hooks.  The synchronize_rcu_tasks() function
588 * is not (yet) intended for heavy use from multiple CPUs.
589 *
590 * Note that this guarantee implies further memory-ordering guarantees.
591 * On systems with more than one CPU, when synchronize_rcu_tasks() returns,
592 * each CPU is guaranteed to have executed a full memory barrier since the
593 * end of its last RCU-tasks read-side critical section whose beginning
594 * preceded the call to synchronize_rcu_tasks().  In addition, each CPU
595 * having an RCU-tasks read-side critical section that extends beyond
596 * the return from synchronize_rcu_tasks() is guaranteed to have executed
597 * a full memory barrier after the beginning of synchronize_rcu_tasks()
598 * and before the beginning of that RCU-tasks read-side critical section.
599 * Note that these guarantees include CPUs that are offline, idle, or
600 * executing in user mode, as well as CPUs that are executing in the kernel.
601 *
602 * Furthermore, if CPU A invoked synchronize_rcu_tasks(), which returned
603 * to its caller on CPU B, then both CPU A and CPU B are guaranteed
604 * to have executed a full memory barrier during the execution of
605 * synchronize_rcu_tasks() -- even if CPU A and CPU B are the same CPU
606 * (but again only if the system has more than one CPU).
607 */
608void synchronize_rcu_tasks(void)
609{
610	/* Complain if the scheduler has not started.  */
611	RCU_LOCKDEP_WARN(!rcu_scheduler_active,
612			 "synchronize_rcu_tasks called too soon");
613
614	/* Wait for the grace period. */
615	wait_rcu_gp(call_rcu_tasks);
616}
617EXPORT_SYMBOL_GPL(synchronize_rcu_tasks);
618
619/**
620 * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks.
621 *
622 * Although the current implementation is guaranteed to wait, it is not
623 * obligated to, for example, if there are no pending callbacks.
624 */
625void rcu_barrier_tasks(void)
626{
627	/* There is only one callback queue, so this is easy.  ;-) */
628	synchronize_rcu_tasks();
629}
630EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
631
632/* See if tasks are still holding out, complain if so. */
633static void check_holdout_task(struct task_struct *t,
634			       bool needreport, bool *firstreport)
635{
636	int cpu;
637
638	if (!READ_ONCE(t->rcu_tasks_holdout) ||
639	    t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
640	    !READ_ONCE(t->on_rq) ||
641	    (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
642	     !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
643		WRITE_ONCE(t->rcu_tasks_holdout, false);
644		list_del_init(&t->rcu_tasks_holdout_list);
645		put_task_struct(t);
646		return;
647	}
648	if (!needreport)
649		return;
650	if (*firstreport) {
651		pr_err("INFO: rcu_tasks detected stalls on tasks:\n");
652		*firstreport = false;
653	}
654	cpu = task_cpu(t);
655	pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n",
656		 t, ".I"[is_idle_task(t)],
657		 "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)],
658		 t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout,
659		 t->rcu_tasks_idle_cpu, cpu);
660	sched_show_task(t);
661}
662
663/* RCU-tasks kthread that detects grace periods and invokes callbacks. */
664static int __noreturn rcu_tasks_kthread(void *arg)
665{
666	unsigned long flags;
667	struct task_struct *g, *t;
668	unsigned long lastreport;
669	struct rcu_head *list;
670	struct rcu_head *next;
671	LIST_HEAD(rcu_tasks_holdouts);
672
673	/* Run on housekeeping CPUs by default.  Sysadm can move if desired. */
674	housekeeping_affine(current);
675
676	/*
677	 * Each pass through the following loop makes one check for
678	 * newly arrived callbacks, and, if there are some, waits for
679	 * one RCU-tasks grace period and then invokes the callbacks.
680	 * This loop is terminated by the system going down.  ;-)
681	 */
682	for (;;) {
683
684		/* Pick up any new callbacks. */
685		raw_spin_lock_irqsave(&rcu_tasks_cbs_lock, flags);
686		list = rcu_tasks_cbs_head;
687		rcu_tasks_cbs_head = NULL;
688		rcu_tasks_cbs_tail = &rcu_tasks_cbs_head;
689		raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags);
690
691		/* If there were none, wait a bit and start over. */
692		if (!list) {
693			wait_event_interruptible(rcu_tasks_cbs_wq,
694						 rcu_tasks_cbs_head);
695			if (!rcu_tasks_cbs_head) {
696				WARN_ON(signal_pending(current));
697				schedule_timeout_interruptible(HZ/10);
698			}
699			continue;
700		}
701
702		/*
703		 * Wait for all pre-existing t->on_rq and t->nvcsw
704		 * transitions to complete.  Invoking synchronize_sched()
705		 * suffices because all these transitions occur with
706		 * interrupts disabled.  Without this synchronize_sched(),
707		 * a read-side critical section that started before the
708		 * grace period might be incorrectly seen as having started
709		 * after the grace period.
710		 *
711		 * This synchronize_sched() also dispenses with the
712		 * need for a memory barrier on the first store to
713		 * ->rcu_tasks_holdout, as it forces the store to happen
714		 * after the beginning of the grace period.
715		 */
716		synchronize_sched();
717
718		/*
719		 * There were callbacks, so we need to wait for an
720		 * RCU-tasks grace period.  Start off by scanning
721		 * the task list for tasks that are not already
722		 * voluntarily blocked.  Mark these tasks and make
723		 * a list of them in rcu_tasks_holdouts.
724		 */
725		rcu_read_lock();
726		for_each_process_thread(g, t) {
727			if (t != current && READ_ONCE(t->on_rq) &&
728			    !is_idle_task(t)) {
729				get_task_struct(t);
730				t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
731				WRITE_ONCE(t->rcu_tasks_holdout, true);
732				list_add(&t->rcu_tasks_holdout_list,
733					 &rcu_tasks_holdouts);
734			}
735		}
736		rcu_read_unlock();
737
738		/*
739		 * Wait for tasks that are in the process of exiting.
740		 * This does only part of the job, ensuring that all
741		 * tasks that were previously exiting reach the point
742		 * where they have disabled preemption, allowing the
743		 * later synchronize_sched() to finish the job.
744		 */
745		synchronize_srcu(&tasks_rcu_exit_srcu);
746
747		/*
748		 * Each pass through the following loop scans the list
749		 * of holdout tasks, removing any that are no longer
750		 * holdouts.  When the list is empty, we are done.
751		 */
752		lastreport = jiffies;
753		while (!list_empty(&rcu_tasks_holdouts)) {
754			bool firstreport;
755			bool needreport;
756			int rtst;
757			struct task_struct *t1;
758
759			schedule_timeout_interruptible(HZ);
760			rtst = READ_ONCE(rcu_task_stall_timeout);
761			needreport = rtst > 0 &&
762				     time_after(jiffies, lastreport + rtst);
763			if (needreport)
764				lastreport = jiffies;
765			firstreport = true;
766			WARN_ON(signal_pending(current));
767			list_for_each_entry_safe(t, t1, &rcu_tasks_holdouts,
768						rcu_tasks_holdout_list) {
769				check_holdout_task(t, needreport, &firstreport);
770				cond_resched();
771			}
772		}
773
774		/*
775		 * Because ->on_rq and ->nvcsw are not guaranteed
776		 * to have a full memory barriers prior to them in the
777		 * schedule() path, memory reordering on other CPUs could
778		 * cause their RCU-tasks read-side critical sections to
779		 * extend past the end of the grace period.  However,
780		 * because these ->nvcsw updates are carried out with
781		 * interrupts disabled, we can use synchronize_sched()
782		 * to force the needed ordering on all such CPUs.
783		 *
784		 * This synchronize_sched() also confines all
785		 * ->rcu_tasks_holdout accesses to be within the grace
786		 * period, avoiding the need for memory barriers for
787		 * ->rcu_tasks_holdout accesses.
788		 *
789		 * In addition, this synchronize_sched() waits for exiting
790		 * tasks to complete their final preempt_disable() region
791		 * of execution, cleaning up after the synchronize_srcu()
792		 * above.
793		 */
794		synchronize_sched();
795
796		/* Invoke the callbacks. */
797		while (list) {
798			next = list->next;
799			local_bh_disable();
800			list->func(list);
801			local_bh_enable();
802			list = next;
803			cond_resched();
804		}
805		schedule_timeout_uninterruptible(HZ/10);
806	}
807}
808
809/* Spawn rcu_tasks_kthread() at first call to call_rcu_tasks(). */
810static void rcu_spawn_tasks_kthread(void)
811{
812	static DEFINE_MUTEX(rcu_tasks_kthread_mutex);
813	static struct task_struct *rcu_tasks_kthread_ptr;
814	struct task_struct *t;
815
816	if (READ_ONCE(rcu_tasks_kthread_ptr)) {
817		smp_mb(); /* Ensure caller sees full kthread. */
818		return;
819	}
820	mutex_lock(&rcu_tasks_kthread_mutex);
821	if (rcu_tasks_kthread_ptr) {
822		mutex_unlock(&rcu_tasks_kthread_mutex);
823		return;
824	}
825	t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread");
826	BUG_ON(IS_ERR(t));
827	smp_mb(); /* Ensure others see full kthread. */
828	WRITE_ONCE(rcu_tasks_kthread_ptr, t);
829	mutex_unlock(&rcu_tasks_kthread_mutex);
830}
831
832#endif /* #ifdef CONFIG_TASKS_RCU */
833
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
834#ifdef CONFIG_PROVE_RCU
835
836/*
837 * Early boot self test parameters, one for each flavor
838 */
839static bool rcu_self_test;
840static bool rcu_self_test_bh;
841static bool rcu_self_test_sched;
842
843module_param(rcu_self_test, bool, 0444);
844module_param(rcu_self_test_bh, bool, 0444);
845module_param(rcu_self_test_sched, bool, 0444);
846
847static int rcu_self_test_counter;
848
849static void test_callback(struct rcu_head *r)
850{
851	rcu_self_test_counter++;
852	pr_info("RCU test callback executed %d\n", rcu_self_test_counter);
853}
854
855static void early_boot_test_call_rcu(void)
856{
857	static struct rcu_head head;
858
859	call_rcu(&head, test_callback);
860}
861
862static void early_boot_test_call_rcu_bh(void)
863{
864	static struct rcu_head head;
865
866	call_rcu_bh(&head, test_callback);
867}
868
869static void early_boot_test_call_rcu_sched(void)
870{
871	static struct rcu_head head;
872
873	call_rcu_sched(&head, test_callback);
874}
875
876void rcu_early_boot_tests(void)
877{
878	pr_info("Running RCU self tests\n");
879
880	if (rcu_self_test)
881		early_boot_test_call_rcu();
882	if (rcu_self_test_bh)
883		early_boot_test_call_rcu_bh();
884	if (rcu_self_test_sched)
885		early_boot_test_call_rcu_sched();
 
886}
887
888static int rcu_verify_early_boot_tests(void)
889{
890	int ret = 0;
891	int early_boot_test_counter = 0;
892
893	if (rcu_self_test) {
894		early_boot_test_counter++;
895		rcu_barrier();
896	}
897	if (rcu_self_test_bh) {
898		early_boot_test_counter++;
899		rcu_barrier_bh();
900	}
901	if (rcu_self_test_sched) {
902		early_boot_test_counter++;
903		rcu_barrier_sched();
904	}
905
906	if (rcu_self_test_counter != early_boot_test_counter) {
907		WARN_ON(1);
908		ret = -1;
909	}
910
911	return ret;
912}
913late_initcall(rcu_verify_early_boot_tests);
914#else
915void rcu_early_boot_tests(void) {}
916#endif /* CONFIG_PROVE_RCU */
v4.10.11
  1/*
  2 * Read-Copy Update mechanism for mutual exclusion
  3 *
  4 * This program is free software; you can redistribute it and/or modify
  5 * it under the terms of the GNU General Public License as published by
  6 * the Free Software Foundation; either version 2 of the License, or
  7 * (at your option) any later version.
  8 *
  9 * This program is distributed in the hope that it will be useful,
 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 12 * GNU General Public License for more details.
 13 *
 14 * You should have received a copy of the GNU General Public License
 15 * along with this program; if not, you can access it online at
 16 * http://www.gnu.org/licenses/gpl-2.0.html.
 17 *
 18 * Copyright IBM Corporation, 2001
 19 *
 20 * Authors: Dipankar Sarma <dipankar@in.ibm.com>
 21 *	    Manfred Spraul <manfred@colorfullife.com>
 22 *
 23 * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
 24 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
 25 * Papers:
 26 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
 27 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
 28 *
 29 * For detailed explanation of Read-Copy Update mechanism see -
 30 *		http://lse.sourceforge.net/locking/rcupdate.html
 31 *
 32 */
 33#include <linux/types.h>
 34#include <linux/kernel.h>
 35#include <linux/init.h>
 36#include <linux/spinlock.h>
 37#include <linux/smp.h>
 38#include <linux/interrupt.h>
 39#include <linux/sched.h>
 40#include <linux/atomic.h>
 41#include <linux/bitops.h>
 42#include <linux/percpu.h>
 43#include <linux/notifier.h>
 44#include <linux/cpu.h>
 45#include <linux/mutex.h>
 46#include <linux/export.h>
 47#include <linux/hardirq.h>
 48#include <linux/delay.h>
 49#include <linux/moduleparam.h>
 50#include <linux/kthread.h>
 51#include <linux/tick.h>
 52
 53#define CREATE_TRACE_POINTS
 54
 55#include "rcu.h"
 56
 
 57#ifdef MODULE_PARAM_PREFIX
 58#undef MODULE_PARAM_PREFIX
 59#endif
 60#define MODULE_PARAM_PREFIX "rcupdate."
 61
 62#ifndef CONFIG_TINY_RCU
 63module_param(rcu_expedited, int, 0);
 64module_param(rcu_normal, int, 0);
 65static int rcu_normal_after_boot;
 66module_param(rcu_normal_after_boot, int, 0);
 67#endif /* #ifndef CONFIG_TINY_RCU */
 68
 69#ifdef CONFIG_DEBUG_LOCK_ALLOC
 70/**
 71 * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
 72 *
 73 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an
 74 * RCU-sched read-side critical section.  In absence of
 75 * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
 76 * critical section unless it can prove otherwise.  Note that disabling
 77 * of preemption (including disabling irqs) counts as an RCU-sched
 78 * read-side critical section.  This is useful for debug checks in functions
 79 * that required that they be called within an RCU-sched read-side
 80 * critical section.
 81 *
 82 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot
 83 * and while lockdep is disabled.
 84 *
 85 * Note that if the CPU is in the idle loop from an RCU point of
 86 * view (ie: that we are in the section between rcu_idle_enter() and
 87 * rcu_idle_exit()) then rcu_read_lock_held() returns false even if the CPU
 88 * did an rcu_read_lock().  The reason for this is that RCU ignores CPUs
 89 * that are in such a section, considering these as in extended quiescent
 90 * state, so such a CPU is effectively never in an RCU read-side critical
 91 * section regardless of what RCU primitives it invokes.  This state of
 92 * affairs is required --- we need to keep an RCU-free window in idle
 93 * where the CPU may possibly enter into low power mode. This way we can
 94 * notice an extended quiescent state to other CPUs that started a grace
 95 * period. Otherwise we would delay any grace period as long as we run in
 96 * the idle task.
 97 *
 98 * Similarly, we avoid claiming an SRCU read lock held if the current
 99 * CPU is offline.
100 */
101int rcu_read_lock_sched_held(void)
102{
103	int lockdep_opinion = 0;
104
105	if (!debug_lockdep_rcu_enabled())
106		return 1;
107	if (!rcu_is_watching())
108		return 0;
109	if (!rcu_lockdep_current_cpu_online())
110		return 0;
111	if (debug_locks)
112		lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
113	return lockdep_opinion || !preemptible();
114}
115EXPORT_SYMBOL(rcu_read_lock_sched_held);
116#endif
117
118#ifndef CONFIG_TINY_RCU
119
120/*
121 * Should expedited grace-period primitives always fall back to their
122 * non-expedited counterparts?  Intended for use within RCU.  Note
123 * that if the user specifies both rcu_expedited and rcu_normal, then
124 * rcu_normal wins.  (Except during the time period during boot from
125 * when the first task is spawned until the rcu_exp_runtime_mode()
126 * core_initcall() is invoked, at which point everything is expedited.)
127 */
128bool rcu_gp_is_normal(void)
129{
130	return READ_ONCE(rcu_normal) &&
131	       rcu_scheduler_active != RCU_SCHEDULER_INIT;
132}
133EXPORT_SYMBOL_GPL(rcu_gp_is_normal);
134
135static atomic_t rcu_expedited_nesting =
136	ATOMIC_INIT(IS_ENABLED(CONFIG_RCU_EXPEDITE_BOOT) ? 1 : 0);
137
138/*
139 * Should normal grace-period primitives be expedited?  Intended for
140 * use within RCU.  Note that this function takes the rcu_expedited
141 * sysfs/boot variable and rcu_scheduler_active into account as well
142 * as the rcu_expedite_gp() nesting.  So looping on rcu_unexpedite_gp()
143 * until rcu_gp_is_expedited() returns false is a -really- bad idea.
144 */
145bool rcu_gp_is_expedited(void)
146{
147	return rcu_expedited || atomic_read(&rcu_expedited_nesting) ||
148	       rcu_scheduler_active == RCU_SCHEDULER_INIT;
149}
150EXPORT_SYMBOL_GPL(rcu_gp_is_expedited);
151
152/**
153 * rcu_expedite_gp - Expedite future RCU grace periods
154 *
155 * After a call to this function, future calls to synchronize_rcu() and
156 * friends act as the corresponding synchronize_rcu_expedited() function
157 * had instead been called.
158 */
159void rcu_expedite_gp(void)
160{
161	atomic_inc(&rcu_expedited_nesting);
162}
163EXPORT_SYMBOL_GPL(rcu_expedite_gp);
164
165/**
166 * rcu_unexpedite_gp - Cancel prior rcu_expedite_gp() invocation
167 *
168 * Undo a prior call to rcu_expedite_gp().  If all prior calls to
169 * rcu_expedite_gp() are undone by a subsequent call to rcu_unexpedite_gp(),
170 * and if the rcu_expedited sysfs/boot parameter is not set, then all
171 * subsequent calls to synchronize_rcu() and friends will return to
172 * their normal non-expedited behavior.
173 */
174void rcu_unexpedite_gp(void)
175{
176	atomic_dec(&rcu_expedited_nesting);
177}
178EXPORT_SYMBOL_GPL(rcu_unexpedite_gp);
179
180/*
181 * Inform RCU of the end of the in-kernel boot sequence.
182 */
183void rcu_end_inkernel_boot(void)
184{
185	if (IS_ENABLED(CONFIG_RCU_EXPEDITE_BOOT))
186		rcu_unexpedite_gp();
187	if (rcu_normal_after_boot)
188		WRITE_ONCE(rcu_normal, 1);
189}
190
191#endif /* #ifndef CONFIG_TINY_RCU */
192
193#ifdef CONFIG_PREEMPT_RCU
194
195/*
196 * Preemptible RCU implementation for rcu_read_lock().
197 * Just increment ->rcu_read_lock_nesting, shared state will be updated
198 * if we block.
199 */
200void __rcu_read_lock(void)
201{
202	current->rcu_read_lock_nesting++;
203	barrier();  /* critical section after entry code. */
204}
205EXPORT_SYMBOL_GPL(__rcu_read_lock);
206
207/*
208 * Preemptible RCU implementation for rcu_read_unlock().
209 * Decrement ->rcu_read_lock_nesting.  If the result is zero (outermost
210 * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
211 * invoke rcu_read_unlock_special() to clean up after a context switch
212 * in an RCU read-side critical section and other special cases.
213 */
214void __rcu_read_unlock(void)
215{
216	struct task_struct *t = current;
217
218	if (t->rcu_read_lock_nesting != 1) {
219		--t->rcu_read_lock_nesting;
220	} else {
221		barrier();  /* critical section before exit code. */
222		t->rcu_read_lock_nesting = INT_MIN;
223		barrier();  /* assign before ->rcu_read_unlock_special load */
224		if (unlikely(READ_ONCE(t->rcu_read_unlock_special.s)))
225			rcu_read_unlock_special(t);
226		barrier();  /* ->rcu_read_unlock_special load before assign */
227		t->rcu_read_lock_nesting = 0;
228	}
229#ifdef CONFIG_PROVE_LOCKING
230	{
231		int rrln = READ_ONCE(t->rcu_read_lock_nesting);
232
233		WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2);
234	}
235#endif /* #ifdef CONFIG_PROVE_LOCKING */
236}
237EXPORT_SYMBOL_GPL(__rcu_read_unlock);
238
239#endif /* #ifdef CONFIG_PREEMPT_RCU */
240
241#ifdef CONFIG_DEBUG_LOCK_ALLOC
242static struct lock_class_key rcu_lock_key;
243struct lockdep_map rcu_lock_map =
244	STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key);
245EXPORT_SYMBOL_GPL(rcu_lock_map);
246
247static struct lock_class_key rcu_bh_lock_key;
248struct lockdep_map rcu_bh_lock_map =
249	STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_bh", &rcu_bh_lock_key);
250EXPORT_SYMBOL_GPL(rcu_bh_lock_map);
251
252static struct lock_class_key rcu_sched_lock_key;
253struct lockdep_map rcu_sched_lock_map =
254	STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_sched", &rcu_sched_lock_key);
255EXPORT_SYMBOL_GPL(rcu_sched_lock_map);
256
257static struct lock_class_key rcu_callback_key;
258struct lockdep_map rcu_callback_map =
259	STATIC_LOCKDEP_MAP_INIT("rcu_callback", &rcu_callback_key);
260EXPORT_SYMBOL_GPL(rcu_callback_map);
261
262int notrace debug_lockdep_rcu_enabled(void)
263{
264	return rcu_scheduler_active != RCU_SCHEDULER_INACTIVE && debug_locks &&
265	       current->lockdep_recursion == 0;
266}
267EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
268
269/**
270 * rcu_read_lock_held() - might we be in RCU read-side critical section?
271 *
272 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU
273 * read-side critical section.  In absence of CONFIG_DEBUG_LOCK_ALLOC,
274 * this assumes we are in an RCU read-side critical section unless it can
275 * prove otherwise.  This is useful for debug checks in functions that
276 * require that they be called within an RCU read-side critical section.
277 *
278 * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot
279 * and while lockdep is disabled.
280 *
281 * Note that rcu_read_lock() and the matching rcu_read_unlock() must
282 * occur in the same context, for example, it is illegal to invoke
283 * rcu_read_unlock() in process context if the matching rcu_read_lock()
284 * was invoked from within an irq handler.
285 *
286 * Note that rcu_read_lock() is disallowed if the CPU is either idle or
287 * offline from an RCU perspective, so check for those as well.
288 */
289int rcu_read_lock_held(void)
290{
291	if (!debug_lockdep_rcu_enabled())
292		return 1;
293	if (!rcu_is_watching())
294		return 0;
295	if (!rcu_lockdep_current_cpu_online())
296		return 0;
297	return lock_is_held(&rcu_lock_map);
298}
299EXPORT_SYMBOL_GPL(rcu_read_lock_held);
300
301/**
302 * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section?
303 *
304 * Check for bottom half being disabled, which covers both the
305 * CONFIG_PROVE_RCU and not cases.  Note that if someone uses
306 * rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled)
307 * will show the situation.  This is useful for debug checks in functions
308 * that require that they be called within an RCU read-side critical
309 * section.
310 *
311 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot.
312 *
313 * Note that rcu_read_lock() is disallowed if the CPU is either idle or
314 * offline from an RCU perspective, so check for those as well.
315 */
316int rcu_read_lock_bh_held(void)
317{
318	if (!debug_lockdep_rcu_enabled())
319		return 1;
320	if (!rcu_is_watching())
321		return 0;
322	if (!rcu_lockdep_current_cpu_online())
323		return 0;
324	return in_softirq() || irqs_disabled();
325}
326EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
327
328#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
329
330/**
331 * wakeme_after_rcu() - Callback function to awaken a task after grace period
332 * @head: Pointer to rcu_head member within rcu_synchronize structure
333 *
334 * Awaken the corresponding task now that a grace period has elapsed.
335 */
336void wakeme_after_rcu(struct rcu_head *head)
337{
338	struct rcu_synchronize *rcu;
339
340	rcu = container_of(head, struct rcu_synchronize, head);
341	complete(&rcu->completion);
342}
343EXPORT_SYMBOL_GPL(wakeme_after_rcu);
344
345void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array,
346		   struct rcu_synchronize *rs_array)
347{
348	int i;
349
350	/* Initialize and register callbacks for each flavor specified. */
351	for (i = 0; i < n; i++) {
352		if (checktiny &&
353		    (crcu_array[i] == call_rcu ||
354		     crcu_array[i] == call_rcu_bh)) {
355			might_sleep();
356			continue;
357		}
358		init_rcu_head_on_stack(&rs_array[i].head);
359		init_completion(&rs_array[i].completion);
360		(crcu_array[i])(&rs_array[i].head, wakeme_after_rcu);
361	}
362
363	/* Wait for all callbacks to be invoked. */
364	for (i = 0; i < n; i++) {
365		if (checktiny &&
366		    (crcu_array[i] == call_rcu ||
367		     crcu_array[i] == call_rcu_bh))
368			continue;
369		wait_for_completion(&rs_array[i].completion);
370		destroy_rcu_head_on_stack(&rs_array[i].head);
371	}
372}
373EXPORT_SYMBOL_GPL(__wait_rcu_gp);
374
375#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
376void init_rcu_head(struct rcu_head *head)
377{
378	debug_object_init(head, &rcuhead_debug_descr);
379}
380
381void destroy_rcu_head(struct rcu_head *head)
382{
383	debug_object_free(head, &rcuhead_debug_descr);
384}
385
386static bool rcuhead_is_static_object(void *addr)
 
 
 
 
 
 
387{
388	return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
389}
390
391/**
392 * init_rcu_head_on_stack() - initialize on-stack rcu_head for debugobjects
393 * @head: pointer to rcu_head structure to be initialized
394 *
395 * This function informs debugobjects of a new rcu_head structure that
396 * has been allocated as an auto variable on the stack.  This function
397 * is not required for rcu_head structures that are statically defined or
398 * that are dynamically allocated on the heap.  This function has no
399 * effect for !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds.
400 */
401void init_rcu_head_on_stack(struct rcu_head *head)
402{
403	debug_object_init_on_stack(head, &rcuhead_debug_descr);
404}
405EXPORT_SYMBOL_GPL(init_rcu_head_on_stack);
406
407/**
408 * destroy_rcu_head_on_stack() - destroy on-stack rcu_head for debugobjects
409 * @head: pointer to rcu_head structure to be initialized
410 *
411 * This function informs debugobjects that an on-stack rcu_head structure
412 * is about to go out of scope.  As with init_rcu_head_on_stack(), this
413 * function is not required for rcu_head structures that are statically
414 * defined or that are dynamically allocated on the heap.  Also as with
415 * init_rcu_head_on_stack(), this function has no effect for
416 * !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds.
417 */
418void destroy_rcu_head_on_stack(struct rcu_head *head)
419{
420	debug_object_free(head, &rcuhead_debug_descr);
421}
422EXPORT_SYMBOL_GPL(destroy_rcu_head_on_stack);
423
424struct debug_obj_descr rcuhead_debug_descr = {
425	.name = "rcu_head",
426	.is_static_object = rcuhead_is_static_object,
427};
428EXPORT_SYMBOL_GPL(rcuhead_debug_descr);
429#endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
430
431#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) || defined(CONFIG_RCU_TRACE)
432void do_trace_rcu_torture_read(const char *rcutorturename, struct rcu_head *rhp,
433			       unsigned long secs,
434			       unsigned long c_old, unsigned long c)
435{
436	trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c);
437}
438EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read);
439#else
440#define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
441	do { } while (0)
442#endif
443
444#ifdef CONFIG_RCU_STALL_COMMON
445
446#ifdef CONFIG_PROVE_RCU
447#define RCU_STALL_DELAY_DELTA	       (5 * HZ)
448#else
449#define RCU_STALL_DELAY_DELTA	       0
450#endif
451
452int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */
453static int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT;
454
455module_param(rcu_cpu_stall_suppress, int, 0644);
456module_param(rcu_cpu_stall_timeout, int, 0644);
457
458int rcu_jiffies_till_stall_check(void)
459{
460	int till_stall_check = READ_ONCE(rcu_cpu_stall_timeout);
461
462	/*
463	 * Limit check must be consistent with the Kconfig limits
464	 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
465	 */
466	if (till_stall_check < 3) {
467		WRITE_ONCE(rcu_cpu_stall_timeout, 3);
468		till_stall_check = 3;
469	} else if (till_stall_check > 300) {
470		WRITE_ONCE(rcu_cpu_stall_timeout, 300);
471		till_stall_check = 300;
472	}
473	return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
474}
475
476void rcu_sysrq_start(void)
477{
478	if (!rcu_cpu_stall_suppress)
479		rcu_cpu_stall_suppress = 2;
480}
481
482void rcu_sysrq_end(void)
483{
484	if (rcu_cpu_stall_suppress == 2)
485		rcu_cpu_stall_suppress = 0;
486}
487
488static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr)
489{
490	rcu_cpu_stall_suppress = 1;
491	return NOTIFY_DONE;
492}
493
494static struct notifier_block rcu_panic_block = {
495	.notifier_call = rcu_panic,
496};
497
498static int __init check_cpu_stall_init(void)
499{
500	atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block);
501	return 0;
502}
503early_initcall(check_cpu_stall_init);
504
505#endif /* #ifdef CONFIG_RCU_STALL_COMMON */
506
507#ifdef CONFIG_TASKS_RCU
508
509/*
510 * Simple variant of RCU whose quiescent states are voluntary context switch,
511 * user-space execution, and idle.  As such, grace periods can take one good
512 * long time.  There are no read-side primitives similar to rcu_read_lock()
513 * and rcu_read_unlock() because this implementation is intended to get
514 * the system into a safe state for some of the manipulations involved in
515 * tracing and the like.  Finally, this implementation does not support
516 * high call_rcu_tasks() rates from multiple CPUs.  If this is required,
517 * per-CPU callback lists will be needed.
518 */
519
520/* Global list of callbacks and associated lock. */
521static struct rcu_head *rcu_tasks_cbs_head;
522static struct rcu_head **rcu_tasks_cbs_tail = &rcu_tasks_cbs_head;
523static DECLARE_WAIT_QUEUE_HEAD(rcu_tasks_cbs_wq);
524static DEFINE_RAW_SPINLOCK(rcu_tasks_cbs_lock);
525
526/* Track exiting tasks in order to allow them to be waited for. */
527DEFINE_SRCU(tasks_rcu_exit_srcu);
528
529/* Control stall timeouts.  Disable with <= 0, otherwise jiffies till stall. */
530static int rcu_task_stall_timeout __read_mostly = HZ * 60 * 10;
531module_param(rcu_task_stall_timeout, int, 0644);
532
533static void rcu_spawn_tasks_kthread(void);
534static struct task_struct *rcu_tasks_kthread_ptr;
535
536/*
537 * Post an RCU-tasks callback.  First call must be from process context
538 * after the scheduler if fully operational.
539 */
540void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
541{
542	unsigned long flags;
543	bool needwake;
544	bool havetask = READ_ONCE(rcu_tasks_kthread_ptr);
545
546	rhp->next = NULL;
547	rhp->func = func;
548	raw_spin_lock_irqsave(&rcu_tasks_cbs_lock, flags);
549	needwake = !rcu_tasks_cbs_head;
550	*rcu_tasks_cbs_tail = rhp;
551	rcu_tasks_cbs_tail = &rhp->next;
552	raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags);
553	/* We can't create the thread unless interrupts are enabled. */
554	if ((needwake && havetask) ||
555	    (!havetask && !irqs_disabled_flags(flags))) {
556		rcu_spawn_tasks_kthread();
557		wake_up(&rcu_tasks_cbs_wq);
558	}
559}
560EXPORT_SYMBOL_GPL(call_rcu_tasks);
561
562/**
563 * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed.
564 *
565 * Control will return to the caller some time after a full rcu-tasks
566 * grace period has elapsed, in other words after all currently
567 * executing rcu-tasks read-side critical sections have elapsed.  These
568 * read-side critical sections are delimited by calls to schedule(),
569 * cond_resched_rcu_qs(), idle execution, userspace execution, calls
570 * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched().
571 *
572 * This is a very specialized primitive, intended only for a few uses in
573 * tracing and other situations requiring manipulation of function
574 * preambles and profiling hooks.  The synchronize_rcu_tasks() function
575 * is not (yet) intended for heavy use from multiple CPUs.
576 *
577 * Note that this guarantee implies further memory-ordering guarantees.
578 * On systems with more than one CPU, when synchronize_rcu_tasks() returns,
579 * each CPU is guaranteed to have executed a full memory barrier since the
580 * end of its last RCU-tasks read-side critical section whose beginning
581 * preceded the call to synchronize_rcu_tasks().  In addition, each CPU
582 * having an RCU-tasks read-side critical section that extends beyond
583 * the return from synchronize_rcu_tasks() is guaranteed to have executed
584 * a full memory barrier after the beginning of synchronize_rcu_tasks()
585 * and before the beginning of that RCU-tasks read-side critical section.
586 * Note that these guarantees include CPUs that are offline, idle, or
587 * executing in user mode, as well as CPUs that are executing in the kernel.
588 *
589 * Furthermore, if CPU A invoked synchronize_rcu_tasks(), which returned
590 * to its caller on CPU B, then both CPU A and CPU B are guaranteed
591 * to have executed a full memory barrier during the execution of
592 * synchronize_rcu_tasks() -- even if CPU A and CPU B are the same CPU
593 * (but again only if the system has more than one CPU).
594 */
595void synchronize_rcu_tasks(void)
596{
597	/* Complain if the scheduler has not started.  */
598	RCU_LOCKDEP_WARN(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE,
599			 "synchronize_rcu_tasks called too soon");
600
601	/* Wait for the grace period. */
602	wait_rcu_gp(call_rcu_tasks);
603}
604EXPORT_SYMBOL_GPL(synchronize_rcu_tasks);
605
606/**
607 * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks.
608 *
609 * Although the current implementation is guaranteed to wait, it is not
610 * obligated to, for example, if there are no pending callbacks.
611 */
612void rcu_barrier_tasks(void)
613{
614	/* There is only one callback queue, so this is easy.  ;-) */
615	synchronize_rcu_tasks();
616}
617EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
618
619/* See if tasks are still holding out, complain if so. */
620static void check_holdout_task(struct task_struct *t,
621			       bool needreport, bool *firstreport)
622{
623	int cpu;
624
625	if (!READ_ONCE(t->rcu_tasks_holdout) ||
626	    t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
627	    !READ_ONCE(t->on_rq) ||
628	    (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
629	     !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
630		WRITE_ONCE(t->rcu_tasks_holdout, false);
631		list_del_init(&t->rcu_tasks_holdout_list);
632		put_task_struct(t);
633		return;
634	}
635	if (!needreport)
636		return;
637	if (*firstreport) {
638		pr_err("INFO: rcu_tasks detected stalls on tasks:\n");
639		*firstreport = false;
640	}
641	cpu = task_cpu(t);
642	pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n",
643		 t, ".I"[is_idle_task(t)],
644		 "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)],
645		 t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout,
646		 t->rcu_tasks_idle_cpu, cpu);
647	sched_show_task(t);
648}
649
650/* RCU-tasks kthread that detects grace periods and invokes callbacks. */
651static int __noreturn rcu_tasks_kthread(void *arg)
652{
653	unsigned long flags;
654	struct task_struct *g, *t;
655	unsigned long lastreport;
656	struct rcu_head *list;
657	struct rcu_head *next;
658	LIST_HEAD(rcu_tasks_holdouts);
659
660	/* Run on housekeeping CPUs by default.  Sysadm can move if desired. */
661	housekeeping_affine(current);
662
663	/*
664	 * Each pass through the following loop makes one check for
665	 * newly arrived callbacks, and, if there are some, waits for
666	 * one RCU-tasks grace period and then invokes the callbacks.
667	 * This loop is terminated by the system going down.  ;-)
668	 */
669	for (;;) {
670
671		/* Pick up any new callbacks. */
672		raw_spin_lock_irqsave(&rcu_tasks_cbs_lock, flags);
673		list = rcu_tasks_cbs_head;
674		rcu_tasks_cbs_head = NULL;
675		rcu_tasks_cbs_tail = &rcu_tasks_cbs_head;
676		raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags);
677
678		/* If there were none, wait a bit and start over. */
679		if (!list) {
680			wait_event_interruptible(rcu_tasks_cbs_wq,
681						 rcu_tasks_cbs_head);
682			if (!rcu_tasks_cbs_head) {
683				WARN_ON(signal_pending(current));
684				schedule_timeout_interruptible(HZ/10);
685			}
686			continue;
687		}
688
689		/*
690		 * Wait for all pre-existing t->on_rq and t->nvcsw
691		 * transitions to complete.  Invoking synchronize_sched()
692		 * suffices because all these transitions occur with
693		 * interrupts disabled.  Without this synchronize_sched(),
694		 * a read-side critical section that started before the
695		 * grace period might be incorrectly seen as having started
696		 * after the grace period.
697		 *
698		 * This synchronize_sched() also dispenses with the
699		 * need for a memory barrier on the first store to
700		 * ->rcu_tasks_holdout, as it forces the store to happen
701		 * after the beginning of the grace period.
702		 */
703		synchronize_sched();
704
705		/*
706		 * There were callbacks, so we need to wait for an
707		 * RCU-tasks grace period.  Start off by scanning
708		 * the task list for tasks that are not already
709		 * voluntarily blocked.  Mark these tasks and make
710		 * a list of them in rcu_tasks_holdouts.
711		 */
712		rcu_read_lock();
713		for_each_process_thread(g, t) {
714			if (t != current && READ_ONCE(t->on_rq) &&
715			    !is_idle_task(t)) {
716				get_task_struct(t);
717				t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
718				WRITE_ONCE(t->rcu_tasks_holdout, true);
719				list_add(&t->rcu_tasks_holdout_list,
720					 &rcu_tasks_holdouts);
721			}
722		}
723		rcu_read_unlock();
724
725		/*
726		 * Wait for tasks that are in the process of exiting.
727		 * This does only part of the job, ensuring that all
728		 * tasks that were previously exiting reach the point
729		 * where they have disabled preemption, allowing the
730		 * later synchronize_sched() to finish the job.
731		 */
732		synchronize_srcu(&tasks_rcu_exit_srcu);
733
734		/*
735		 * Each pass through the following loop scans the list
736		 * of holdout tasks, removing any that are no longer
737		 * holdouts.  When the list is empty, we are done.
738		 */
739		lastreport = jiffies;
740		while (!list_empty(&rcu_tasks_holdouts)) {
741			bool firstreport;
742			bool needreport;
743			int rtst;
744			struct task_struct *t1;
745
746			schedule_timeout_interruptible(HZ);
747			rtst = READ_ONCE(rcu_task_stall_timeout);
748			needreport = rtst > 0 &&
749				     time_after(jiffies, lastreport + rtst);
750			if (needreport)
751				lastreport = jiffies;
752			firstreport = true;
753			WARN_ON(signal_pending(current));
754			list_for_each_entry_safe(t, t1, &rcu_tasks_holdouts,
755						rcu_tasks_holdout_list) {
756				check_holdout_task(t, needreport, &firstreport);
757				cond_resched();
758			}
759		}
760
761		/*
762		 * Because ->on_rq and ->nvcsw are not guaranteed
763		 * to have a full memory barriers prior to them in the
764		 * schedule() path, memory reordering on other CPUs could
765		 * cause their RCU-tasks read-side critical sections to
766		 * extend past the end of the grace period.  However,
767		 * because these ->nvcsw updates are carried out with
768		 * interrupts disabled, we can use synchronize_sched()
769		 * to force the needed ordering on all such CPUs.
770		 *
771		 * This synchronize_sched() also confines all
772		 * ->rcu_tasks_holdout accesses to be within the grace
773		 * period, avoiding the need for memory barriers for
774		 * ->rcu_tasks_holdout accesses.
775		 *
776		 * In addition, this synchronize_sched() waits for exiting
777		 * tasks to complete their final preempt_disable() region
778		 * of execution, cleaning up after the synchronize_srcu()
779		 * above.
780		 */
781		synchronize_sched();
782
783		/* Invoke the callbacks. */
784		while (list) {
785			next = list->next;
786			local_bh_disable();
787			list->func(list);
788			local_bh_enable();
789			list = next;
790			cond_resched();
791		}
792		schedule_timeout_uninterruptible(HZ/10);
793	}
794}
795
796/* Spawn rcu_tasks_kthread() at first call to call_rcu_tasks(). */
797static void rcu_spawn_tasks_kthread(void)
798{
799	static DEFINE_MUTEX(rcu_tasks_kthread_mutex);
 
800	struct task_struct *t;
801
802	if (READ_ONCE(rcu_tasks_kthread_ptr)) {
803		smp_mb(); /* Ensure caller sees full kthread. */
804		return;
805	}
806	mutex_lock(&rcu_tasks_kthread_mutex);
807	if (rcu_tasks_kthread_ptr) {
808		mutex_unlock(&rcu_tasks_kthread_mutex);
809		return;
810	}
811	t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread");
812	BUG_ON(IS_ERR(t));
813	smp_mb(); /* Ensure others see full kthread. */
814	WRITE_ONCE(rcu_tasks_kthread_ptr, t);
815	mutex_unlock(&rcu_tasks_kthread_mutex);
816}
817
818#endif /* #ifdef CONFIG_TASKS_RCU */
819
820/*
821 * Test each non-SRCU synchronous grace-period wait API.  This is
822 * useful just after a change in mode for these primitives, and
823 * during early boot.
824 */
825void rcu_test_sync_prims(void)
826{
827	if (!IS_ENABLED(CONFIG_PROVE_RCU))
828		return;
829	synchronize_rcu();
830	synchronize_rcu_bh();
831	synchronize_sched();
832	synchronize_rcu_expedited();
833	synchronize_rcu_bh_expedited();
834	synchronize_sched_expedited();
835}
836
837#ifdef CONFIG_PROVE_RCU
838
839/*
840 * Early boot self test parameters, one for each flavor
841 */
842static bool rcu_self_test;
843static bool rcu_self_test_bh;
844static bool rcu_self_test_sched;
845
846module_param(rcu_self_test, bool, 0444);
847module_param(rcu_self_test_bh, bool, 0444);
848module_param(rcu_self_test_sched, bool, 0444);
849
850static int rcu_self_test_counter;
851
852static void test_callback(struct rcu_head *r)
853{
854	rcu_self_test_counter++;
855	pr_info("RCU test callback executed %d\n", rcu_self_test_counter);
856}
857
858static void early_boot_test_call_rcu(void)
859{
860	static struct rcu_head head;
861
862	call_rcu(&head, test_callback);
863}
864
865static void early_boot_test_call_rcu_bh(void)
866{
867	static struct rcu_head head;
868
869	call_rcu_bh(&head, test_callback);
870}
871
872static void early_boot_test_call_rcu_sched(void)
873{
874	static struct rcu_head head;
875
876	call_rcu_sched(&head, test_callback);
877}
878
879void rcu_early_boot_tests(void)
880{
881	pr_info("Running RCU self tests\n");
882
883	if (rcu_self_test)
884		early_boot_test_call_rcu();
885	if (rcu_self_test_bh)
886		early_boot_test_call_rcu_bh();
887	if (rcu_self_test_sched)
888		early_boot_test_call_rcu_sched();
889	rcu_test_sync_prims();
890}
891
892static int rcu_verify_early_boot_tests(void)
893{
894	int ret = 0;
895	int early_boot_test_counter = 0;
896
897	if (rcu_self_test) {
898		early_boot_test_counter++;
899		rcu_barrier();
900	}
901	if (rcu_self_test_bh) {
902		early_boot_test_counter++;
903		rcu_barrier_bh();
904	}
905	if (rcu_self_test_sched) {
906		early_boot_test_counter++;
907		rcu_barrier_sched();
908	}
909
910	if (rcu_self_test_counter != early_boot_test_counter) {
911		WARN_ON(1);
912		ret = -1;
913	}
914
915	return ret;
916}
917late_initcall(rcu_verify_early_boot_tests);
918#else
919void rcu_early_boot_tests(void) {}
920#endif /* CONFIG_PROVE_RCU */