Linux Audio

Check our new training course

In-person Linux kernel drivers training

Jun 16-20, 2025
Register
Loading...
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * Read-Copy Update module-based torture test facility
   4 *
   5 * Copyright (C) IBM Corporation, 2005, 2006
   6 *
   7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
   8 *	  Josh Triplett <josh@joshtriplett.org>
   9 *
  10 * See also:  Documentation/RCU/torture.rst
  11 */
  12
  13#define pr_fmt(fmt) fmt
  14
  15#include <linux/types.h>
  16#include <linux/kernel.h>
  17#include <linux/init.h>
  18#include <linux/module.h>
  19#include <linux/kthread.h>
  20#include <linux/err.h>
  21#include <linux/spinlock.h>
  22#include <linux/smp.h>
  23#include <linux/rcupdate_wait.h>
  24#include <linux/rcu_notifier.h>
  25#include <linux/interrupt.h>
  26#include <linux/sched/signal.h>
  27#include <uapi/linux/sched/types.h>
  28#include <linux/atomic.h>
  29#include <linux/bitops.h>
  30#include <linux/completion.h>
  31#include <linux/moduleparam.h>
  32#include <linux/percpu.h>
  33#include <linux/notifier.h>
  34#include <linux/reboot.h>
  35#include <linux/freezer.h>
  36#include <linux/cpu.h>
  37#include <linux/delay.h>
  38#include <linux/stat.h>
  39#include <linux/srcu.h>
  40#include <linux/slab.h>
  41#include <linux/trace_clock.h>
  42#include <asm/byteorder.h>
  43#include <linux/torture.h>
  44#include <linux/vmalloc.h>
  45#include <linux/sched/debug.h>
  46#include <linux/sched/sysctl.h>
  47#include <linux/oom.h>
  48#include <linux/tick.h>
  49#include <linux/rcupdate_trace.h>
  50#include <linux/nmi.h>
  51
  52#include "rcu.h"
  53
  54MODULE_LICENSE("GPL");
  55MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@joshtriplett.org>");
  56
 
 
 
 
 
 
 
 
 
 
 
 
 
  57/* Bits for ->extendables field, extendables param, and related definitions. */
  58#define RCUTORTURE_RDR_SHIFT_1	 8	/* Put SRCU index in upper bits. */
  59#define RCUTORTURE_RDR_MASK_1	 (1 << RCUTORTURE_RDR_SHIFT_1)
  60#define RCUTORTURE_RDR_SHIFT_2	 9	/* Put SRCU index in upper bits. */
  61#define RCUTORTURE_RDR_MASK_2	 (1 << RCUTORTURE_RDR_SHIFT_2)
  62#define RCUTORTURE_RDR_BH	 0x01	/* Extend readers by disabling bh. */
  63#define RCUTORTURE_RDR_IRQ	 0x02	/*  ... disabling interrupts. */
  64#define RCUTORTURE_RDR_PREEMPT	 0x04	/*  ... disabling preemption. */
  65#define RCUTORTURE_RDR_RBH	 0x08	/*  ... rcu_read_lock_bh(). */
  66#define RCUTORTURE_RDR_SCHED	 0x10	/*  ... rcu_read_lock_sched(). */
  67#define RCUTORTURE_RDR_RCU_1	 0x20	/*  ... entering another RCU reader. */
  68#define RCUTORTURE_RDR_RCU_2	 0x40	/*  ... entering another RCU reader. */
  69#define RCUTORTURE_RDR_NBITS	 7	/* Number of bits defined above. */
  70#define RCUTORTURE_MAX_EXTEND	 \
  71	(RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \
  72	 RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED)
  73#define RCUTORTURE_RDR_MAX_LOOPS 0x7	/* Maximum reader extensions. */
  74					/* Must be power of two minus one. */
  75#define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3)
  76
  77torture_param(int, extendables, RCUTORTURE_MAX_EXTEND,
  78	      "Extend readers by disabling bh (1), irqs (2), or preempt (4)");
  79torture_param(int, fqs_duration, 0, "Duration of fqs bursts (us), 0 to disable");
 
  80torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)");
  81torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)");
  82torture_param(int, fwd_progress, 1, "Number of grace-period forward progress tasks (0 to disable)");
  83torture_param(int, fwd_progress_div, 4, "Fraction of CPU stall to wait");
  84torture_param(int, fwd_progress_holdoff, 60, "Time between forward-progress tests (s)");
  85torture_param(bool, fwd_progress_need_resched, 1, "Hide cond_resched() behind need_resched()");
 
 
  86torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives");
  87torture_param(bool, gp_cond_exp, false, "Use conditional/async expedited GP wait primitives");
  88torture_param(bool, gp_cond_full, false, "Use conditional/async full-state GP wait primitives");
  89torture_param(bool, gp_cond_exp_full, false,
  90		    "Use conditional/async full-stateexpedited GP wait primitives");
  91torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
  92torture_param(bool, gp_normal, false, "Use normal (non-expedited) GP wait primitives");
  93torture_param(bool, gp_poll, false, "Use polling GP wait primitives");
  94torture_param(bool, gp_poll_exp, false, "Use polling expedited GP wait primitives");
  95torture_param(bool, gp_poll_full, false, "Use polling full-state GP wait primitives");
  96torture_param(bool, gp_poll_exp_full, false, "Use polling full-state expedited GP wait primitives");
  97torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives");
  98torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers");
  99torture_param(int, leakpointer, 0, "Leak pointer dereferences from readers");
 100torture_param(int, n_barrier_cbs, 0, "# of callbacks/kthreads for barrier testing");
 101torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads");
 102torture_param(int, nreaders, -1, "Number of RCU reader threads");
 103torture_param(int, object_debug, 0, "Enable debug-object double call_rcu() testing");
 
 104torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
 105torture_param(int, onoff_interval, 0, "Time between CPU hotplugs (jiffies), 0=disable");
 106torture_param(int, nocbs_nthreads, 0, "Number of NOCB toggle threads, 0 to disable");
 107torture_param(int, nocbs_toggle, 1000, "Time between toggling nocb state (ms)");
 108torture_param(int, read_exit_delay, 13, "Delay between read-then-exit episodes (s)");
 109torture_param(int, read_exit_burst, 16, "# of read-then-exit bursts per episode, zero to disable");
 
 110torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles");
 111torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable.");
 112torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable.");
 113torture_param(int, stall_cpu_holdoff, 10, "Time to wait before starting stall (s).");
 114torture_param(bool, stall_no_softlockup, false, "Avoid softlockup warning during cpu stall.");
 115torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling.");
 116torture_param(int, stall_cpu_block, 0, "Sleep while stalling.");
 117torture_param(int, stall_gp_kthread, 0, "Grace-period kthread stall duration (s).");
 118torture_param(int, stat_interval, 60, "Number of seconds between stats printk()s");
 
 
 119torture_param(int, stutter, 5, "Number of seconds to run/halt test");
 120torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
 121torture_param(int, test_boost_duration, 4, "Duration of each boost test, seconds.");
 122torture_param(int, test_boost_interval, 7, "Interval between boost tests, seconds.");
 123torture_param(int, test_nmis, 0, "End-test NMI tests, 0 to disable.");
 124torture_param(bool, test_no_idle_hz, true, "Test support for tickless idle CPUs");
 125torture_param(int, test_srcu_lockdep, 0, "Test specified SRCU deadlock scenario.");
 126torture_param(int, verbose, 1, "Enable verbose debugging printk()s");
 
 
 127
 128static char *torture_type = "rcu";
 129module_param(torture_type, charp, 0444);
 130MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)");
 131
 132static int nrealnocbers;
 133static int nrealreaders;
 134static struct task_struct *writer_task;
 135static struct task_struct **fakewriter_tasks;
 136static struct task_struct **reader_tasks;
 137static struct task_struct **nocb_tasks;
 138static struct task_struct *stats_task;
 139static struct task_struct *fqs_task;
 140static struct task_struct *boost_tasks[NR_CPUS];
 141static struct task_struct *stall_task;
 142static struct task_struct **fwd_prog_tasks;
 143static struct task_struct **barrier_cbs_tasks;
 144static struct task_struct *barrier_task;
 145static struct task_struct *read_exit_task;
 146
 147#define RCU_TORTURE_PIPE_LEN 10
 148
 149// Mailbox-like structure to check RCU global memory ordering.
 150struct rcu_torture_reader_check {
 151	unsigned long rtc_myloops;
 152	int rtc_chkrdr;
 153	unsigned long rtc_chkloops;
 154	int rtc_ready;
 155	struct rcu_torture_reader_check *rtc_assigner;
 156} ____cacheline_internodealigned_in_smp;
 157
 158// Update-side data structure used to check RCU readers.
 159struct rcu_torture {
 160	struct rcu_head rtort_rcu;
 161	int rtort_pipe_count;
 162	struct list_head rtort_free;
 163	int rtort_mbtest;
 164	struct rcu_torture_reader_check *rtort_chkp;
 165};
 166
 167static LIST_HEAD(rcu_torture_freelist);
 168static struct rcu_torture __rcu *rcu_torture_current;
 169static unsigned long rcu_torture_current_version;
 170static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
 171static DEFINE_SPINLOCK(rcu_torture_lock);
 172static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count);
 173static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch);
 174static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
 175static struct rcu_torture_reader_check *rcu_torture_reader_mbchk;
 176static atomic_t n_rcu_torture_alloc;
 177static atomic_t n_rcu_torture_alloc_fail;
 178static atomic_t n_rcu_torture_free;
 179static atomic_t n_rcu_torture_mberror;
 180static atomic_t n_rcu_torture_mbchk_fail;
 181static atomic_t n_rcu_torture_mbchk_tries;
 182static atomic_t n_rcu_torture_error;
 183static long n_rcu_torture_barrier_error;
 184static long n_rcu_torture_boost_ktrerror;
 
 185static long n_rcu_torture_boost_failure;
 186static long n_rcu_torture_boosts;
 187static atomic_long_t n_rcu_torture_timers;
 188static long n_barrier_attempts;
 189static long n_barrier_successes; /* did rcu_barrier test succeed? */
 190static unsigned long n_read_exits;
 191static struct list_head rcu_torture_removed;
 192static unsigned long shutdown_jiffies;
 193static unsigned long start_gp_seq;
 194static atomic_long_t n_nocb_offload;
 195static atomic_long_t n_nocb_deoffload;
 196
 197static int rcu_torture_writer_state;
 198#define RTWS_FIXED_DELAY	0
 199#define RTWS_DELAY		1
 200#define RTWS_REPLACE		2
 201#define RTWS_DEF_FREE		3
 202#define RTWS_EXP_SYNC		4
 203#define RTWS_COND_GET		5
 204#define RTWS_COND_GET_FULL	6
 205#define RTWS_COND_GET_EXP	7
 206#define RTWS_COND_GET_EXP_FULL	8
 207#define RTWS_COND_SYNC		9
 208#define RTWS_COND_SYNC_FULL	10
 209#define RTWS_COND_SYNC_EXP	11
 210#define RTWS_COND_SYNC_EXP_FULL	12
 211#define RTWS_POLL_GET		13
 212#define RTWS_POLL_GET_FULL	14
 213#define RTWS_POLL_GET_EXP	15
 214#define RTWS_POLL_GET_EXP_FULL	16
 215#define RTWS_POLL_WAIT		17
 216#define RTWS_POLL_WAIT_FULL	18
 217#define RTWS_POLL_WAIT_EXP	19
 218#define RTWS_POLL_WAIT_EXP_FULL	20
 219#define RTWS_SYNC		21
 220#define RTWS_STUTTER		22
 221#define RTWS_STOPPING		23
 222static const char * const rcu_torture_writer_state_names[] = {
 223	"RTWS_FIXED_DELAY",
 224	"RTWS_DELAY",
 225	"RTWS_REPLACE",
 226	"RTWS_DEF_FREE",
 227	"RTWS_EXP_SYNC",
 228	"RTWS_COND_GET",
 229	"RTWS_COND_GET_FULL",
 230	"RTWS_COND_GET_EXP",
 231	"RTWS_COND_GET_EXP_FULL",
 232	"RTWS_COND_SYNC",
 233	"RTWS_COND_SYNC_FULL",
 234	"RTWS_COND_SYNC_EXP",
 235	"RTWS_COND_SYNC_EXP_FULL",
 236	"RTWS_POLL_GET",
 237	"RTWS_POLL_GET_FULL",
 238	"RTWS_POLL_GET_EXP",
 239	"RTWS_POLL_GET_EXP_FULL",
 240	"RTWS_POLL_WAIT",
 241	"RTWS_POLL_WAIT_FULL",
 242	"RTWS_POLL_WAIT_EXP",
 243	"RTWS_POLL_WAIT_EXP_FULL",
 244	"RTWS_SYNC",
 245	"RTWS_STUTTER",
 246	"RTWS_STOPPING",
 247};
 248
 249/* Record reader segment types and duration for first failing read. */
 250struct rt_read_seg {
 251	int rt_readstate;
 252	unsigned long rt_delay_jiffies;
 253	unsigned long rt_delay_ms;
 254	unsigned long rt_delay_us;
 255	bool rt_preempted;
 256};
 257static int err_segs_recorded;
 258static struct rt_read_seg err_segs[RCUTORTURE_RDR_MAX_SEGS];
 259static int rt_read_nsegs;
 260
 261static const char *rcu_torture_writer_state_getname(void)
 262{
 263	unsigned int i = READ_ONCE(rcu_torture_writer_state);
 264
 265	if (i >= ARRAY_SIZE(rcu_torture_writer_state_names))
 266		return "???";
 267	return rcu_torture_writer_state_names[i];
 268}
 269
 
 
 
 
 
 
 270#ifdef CONFIG_RCU_TRACE
 271static u64 notrace rcu_trace_clock_local(void)
 272{
 273	u64 ts = trace_clock_local();
 274
 275	(void)do_div(ts, NSEC_PER_USEC);
 276	return ts;
 277}
 278#else /* #ifdef CONFIG_RCU_TRACE */
 279static u64 notrace rcu_trace_clock_local(void)
 280{
 281	return 0ULL;
 282}
 283#endif /* #else #ifdef CONFIG_RCU_TRACE */
 284
 285/*
 286 * Stop aggressive CPU-hog tests a bit before the end of the test in order
 287 * to avoid interfering with test shutdown.
 288 */
 289static bool shutdown_time_arrived(void)
 290{
 291	return shutdown_secs && time_after(jiffies, shutdown_jiffies - 30 * HZ);
 292}
 293
 294static unsigned long boost_starttime;	/* jiffies of next boost test start. */
 295static DEFINE_MUTEX(boost_mutex);	/* protect setting boost_starttime */
 296					/*  and boost task create/destroy. */
 297static atomic_t barrier_cbs_count;	/* Barrier callbacks registered. */
 298static bool barrier_phase;		/* Test phase. */
 299static atomic_t barrier_cbs_invoked;	/* Barrier callbacks invoked. */
 300static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */
 301static DECLARE_WAIT_QUEUE_HEAD(barrier_wq);
 302
 303static atomic_t rcu_fwd_cb_nodelay;	/* Short rcu_torture_delay() delays. */
 304
 305/*
 306 * Allocate an element from the rcu_tortures pool.
 307 */
 308static struct rcu_torture *
 309rcu_torture_alloc(void)
 310{
 311	struct list_head *p;
 312
 313	spin_lock_bh(&rcu_torture_lock);
 314	if (list_empty(&rcu_torture_freelist)) {
 315		atomic_inc(&n_rcu_torture_alloc_fail);
 316		spin_unlock_bh(&rcu_torture_lock);
 317		return NULL;
 318	}
 319	atomic_inc(&n_rcu_torture_alloc);
 320	p = rcu_torture_freelist.next;
 321	list_del_init(p);
 322	spin_unlock_bh(&rcu_torture_lock);
 323	return container_of(p, struct rcu_torture, rtort_free);
 324}
 325
 326/*
 327 * Free an element to the rcu_tortures pool.
 328 */
 329static void
 330rcu_torture_free(struct rcu_torture *p)
 331{
 332	atomic_inc(&n_rcu_torture_free);
 333	spin_lock_bh(&rcu_torture_lock);
 334	list_add_tail(&p->rtort_free, &rcu_torture_freelist);
 335	spin_unlock_bh(&rcu_torture_lock);
 336}
 337
 338/*
 339 * Operations vector for selecting different types of tests.
 340 */
 341
 342struct rcu_torture_ops {
 343	int ttype;
 344	void (*init)(void);
 345	void (*cleanup)(void);
 346	int (*readlock)(void);
 347	void (*read_delay)(struct torture_random_state *rrsp,
 348			   struct rt_read_seg *rtrsp);
 349	void (*readunlock)(int idx);
 350	int (*readlock_held)(void);
 351	unsigned long (*get_gp_seq)(void);
 352	unsigned long (*gp_diff)(unsigned long new, unsigned long old);
 353	void (*deferred_free)(struct rcu_torture *p);
 354	void (*sync)(void);
 355	void (*exp_sync)(void);
 356	unsigned long (*get_gp_state_exp)(void);
 357	unsigned long (*start_gp_poll_exp)(void);
 358	void (*start_gp_poll_exp_full)(struct rcu_gp_oldstate *rgosp);
 359	bool (*poll_gp_state_exp)(unsigned long oldstate);
 360	void (*cond_sync_exp)(unsigned long oldstate);
 361	void (*cond_sync_exp_full)(struct rcu_gp_oldstate *rgosp);
 362	unsigned long (*get_comp_state)(void);
 363	void (*get_comp_state_full)(struct rcu_gp_oldstate *rgosp);
 364	bool (*same_gp_state)(unsigned long oldstate1, unsigned long oldstate2);
 365	bool (*same_gp_state_full)(struct rcu_gp_oldstate *rgosp1, struct rcu_gp_oldstate *rgosp2);
 366	unsigned long (*get_gp_state)(void);
 367	void (*get_gp_state_full)(struct rcu_gp_oldstate *rgosp);
 368	unsigned long (*get_gp_completed)(void);
 369	void (*get_gp_completed_full)(struct rcu_gp_oldstate *rgosp);
 370	unsigned long (*start_gp_poll)(void);
 371	void (*start_gp_poll_full)(struct rcu_gp_oldstate *rgosp);
 372	bool (*poll_gp_state)(unsigned long oldstate);
 373	bool (*poll_gp_state_full)(struct rcu_gp_oldstate *rgosp);
 374	bool (*poll_need_2gp)(bool poll, bool poll_full);
 375	void (*cond_sync)(unsigned long oldstate);
 376	void (*cond_sync_full)(struct rcu_gp_oldstate *rgosp);
 377	call_rcu_func_t call;
 378	void (*cb_barrier)(void);
 379	void (*fqs)(void);
 380	void (*stats)(void);
 381	void (*gp_kthread_dbg)(void);
 382	bool (*check_boost_failed)(unsigned long gp_state, int *cpup);
 383	int (*stall_dur)(void);
 384	long cbflood_max;
 385	int irq_capable;
 386	int can_boost;
 387	int extendables;
 388	int slow_gps;
 389	int no_pi_lock;
 390	const char *name;
 391};
 392
 393static struct rcu_torture_ops *cur_ops;
 394
 395/*
 396 * Definitions for rcu torture testing.
 397 */
 398
 399static int torture_readlock_not_held(void)
 400{
 401	return rcu_read_lock_bh_held() || rcu_read_lock_sched_held();
 402}
 403
 404static int rcu_torture_read_lock(void)
 405{
 406	rcu_read_lock();
 407	return 0;
 408}
 409
 410static void
 411rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
 412{
 413	unsigned long started;
 414	unsigned long completed;
 415	const unsigned long shortdelay_us = 200;
 416	unsigned long longdelay_ms = 300;
 417	unsigned long long ts;
 418
 419	/* We want a short delay sometimes to make a reader delay the grace
 420	 * period, and we want a long delay occasionally to trigger
 421	 * force_quiescent_state. */
 422
 423	if (!atomic_read(&rcu_fwd_cb_nodelay) &&
 424	    !(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) {
 425		started = cur_ops->get_gp_seq();
 426		ts = rcu_trace_clock_local();
 427		if (preempt_count() & (SOFTIRQ_MASK | HARDIRQ_MASK))
 428			longdelay_ms = 5; /* Avoid triggering BH limits. */
 429		mdelay(longdelay_ms);
 430		rtrsp->rt_delay_ms = longdelay_ms;
 431		completed = cur_ops->get_gp_seq();
 432		do_trace_rcu_torture_read(cur_ops->name, NULL, ts,
 433					  started, completed);
 434	}
 435	if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) {
 436		udelay(shortdelay_us);
 437		rtrsp->rt_delay_us = shortdelay_us;
 438	}
 439	if (!preempt_count() &&
 440	    !(torture_random(rrsp) % (nrealreaders * 500))) {
 441		torture_preempt_schedule();  /* QS only if preemptible. */
 442		rtrsp->rt_preempted = true;
 443	}
 444}
 445
 446static void rcu_torture_read_unlock(int idx)
 447{
 448	rcu_read_unlock();
 449}
 450
 451/*
 452 * Update callback in the pipe.  This should be invoked after a grace period.
 453 */
 454static bool
 455rcu_torture_pipe_update_one(struct rcu_torture *rp)
 456{
 457	int i;
 458	struct rcu_torture_reader_check *rtrcp = READ_ONCE(rp->rtort_chkp);
 459
 460	if (rtrcp) {
 461		WRITE_ONCE(rp->rtort_chkp, NULL);
 462		smp_store_release(&rtrcp->rtc_ready, 1); // Pair with smp_load_acquire().
 463	}
 464	i = READ_ONCE(rp->rtort_pipe_count);
 465	if (i > RCU_TORTURE_PIPE_LEN)
 466		i = RCU_TORTURE_PIPE_LEN;
 467	atomic_inc(&rcu_torture_wcount[i]);
 468	WRITE_ONCE(rp->rtort_pipe_count, i + 1);
 469	if (rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
 470		rp->rtort_mbtest = 0;
 471		return true;
 472	}
 473	return false;
 474}
 475
 476/*
 477 * Update all callbacks in the pipe.  Suitable for synchronous grace-period
 478 * primitives.
 479 */
 480static void
 481rcu_torture_pipe_update(struct rcu_torture *old_rp)
 482{
 483	struct rcu_torture *rp;
 484	struct rcu_torture *rp1;
 485
 486	if (old_rp)
 487		list_add(&old_rp->rtort_free, &rcu_torture_removed);
 488	list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) {
 489		if (rcu_torture_pipe_update_one(rp)) {
 490			list_del(&rp->rtort_free);
 491			rcu_torture_free(rp);
 492		}
 493	}
 494}
 495
 496static void
 497rcu_torture_cb(struct rcu_head *p)
 498{
 499	struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
 500
 501	if (torture_must_stop_irq()) {
 502		/* Test is ending, just drop callbacks on the floor. */
 503		/* The next initialization will pick up the pieces. */
 504		return;
 505	}
 506	if (rcu_torture_pipe_update_one(rp))
 507		rcu_torture_free(rp);
 508	else
 509		cur_ops->deferred_free(rp);
 510}
 511
 512static unsigned long rcu_no_completed(void)
 513{
 514	return 0;
 515}
 516
 517static void rcu_torture_deferred_free(struct rcu_torture *p)
 518{
 519	call_rcu_hurry(&p->rtort_rcu, rcu_torture_cb);
 520}
 521
 522static void rcu_sync_torture_init(void)
 523{
 524	INIT_LIST_HEAD(&rcu_torture_removed);
 525}
 526
 527static bool rcu_poll_need_2gp(bool poll, bool poll_full)
 528{
 529	return poll;
 530}
 531
 532static struct rcu_torture_ops rcu_ops = {
 533	.ttype			= RCU_FLAVOR,
 534	.init			= rcu_sync_torture_init,
 535	.readlock		= rcu_torture_read_lock,
 536	.read_delay		= rcu_read_delay,
 537	.readunlock		= rcu_torture_read_unlock,
 538	.readlock_held		= torture_readlock_not_held,
 539	.get_gp_seq		= rcu_get_gp_seq,
 540	.gp_diff		= rcu_seq_diff,
 541	.deferred_free		= rcu_torture_deferred_free,
 542	.sync			= synchronize_rcu,
 543	.exp_sync		= synchronize_rcu_expedited,
 544	.same_gp_state		= same_state_synchronize_rcu,
 545	.same_gp_state_full	= same_state_synchronize_rcu_full,
 546	.get_comp_state		= get_completed_synchronize_rcu,
 547	.get_comp_state_full	= get_completed_synchronize_rcu_full,
 548	.get_gp_state		= get_state_synchronize_rcu,
 549	.get_gp_state_full	= get_state_synchronize_rcu_full,
 550	.get_gp_completed	= get_completed_synchronize_rcu,
 551	.get_gp_completed_full	= get_completed_synchronize_rcu_full,
 552	.start_gp_poll		= start_poll_synchronize_rcu,
 553	.start_gp_poll_full	= start_poll_synchronize_rcu_full,
 554	.poll_gp_state		= poll_state_synchronize_rcu,
 555	.poll_gp_state_full	= poll_state_synchronize_rcu_full,
 556	.poll_need_2gp		= rcu_poll_need_2gp,
 557	.cond_sync		= cond_synchronize_rcu,
 558	.cond_sync_full		= cond_synchronize_rcu_full,
 559	.get_gp_state_exp	= get_state_synchronize_rcu,
 560	.start_gp_poll_exp	= start_poll_synchronize_rcu_expedited,
 561	.start_gp_poll_exp_full	= start_poll_synchronize_rcu_expedited_full,
 562	.poll_gp_state_exp	= poll_state_synchronize_rcu,
 563	.cond_sync_exp		= cond_synchronize_rcu_expedited,
 564	.call			= call_rcu_hurry,
 565	.cb_barrier		= rcu_barrier,
 566	.fqs			= rcu_force_quiescent_state,
 567	.stats			= NULL,
 568	.gp_kthread_dbg		= show_rcu_gp_kthreads,
 569	.check_boost_failed	= rcu_check_boost_fail,
 570	.stall_dur		= rcu_jiffies_till_stall_check,
 571	.irq_capable		= 1,
 572	.can_boost		= IS_ENABLED(CONFIG_RCU_BOOST),
 573	.extendables		= RCUTORTURE_MAX_EXTEND,
 574	.name			= "rcu"
 575};
 576
 577/*
 578 * Don't even think about trying any of these in real life!!!
 579 * The names includes "busted", and they really means it!
 580 * The only purpose of these functions is to provide a buggy RCU
 581 * implementation to make sure that rcutorture correctly emits
 582 * buggy-RCU error messages.
 583 */
 584static void rcu_busted_torture_deferred_free(struct rcu_torture *p)
 585{
 586	/* This is a deliberate bug for testing purposes only! */
 587	rcu_torture_cb(&p->rtort_rcu);
 588}
 589
 590static void synchronize_rcu_busted(void)
 591{
 592	/* This is a deliberate bug for testing purposes only! */
 593}
 594
 595static void
 596call_rcu_busted(struct rcu_head *head, rcu_callback_t func)
 597{
 598	/* This is a deliberate bug for testing purposes only! */
 599	func(head);
 600}
 601
 602static struct rcu_torture_ops rcu_busted_ops = {
 603	.ttype		= INVALID_RCU_FLAVOR,
 604	.init		= rcu_sync_torture_init,
 605	.readlock	= rcu_torture_read_lock,
 606	.read_delay	= rcu_read_delay,  /* just reuse rcu's version. */
 607	.readunlock	= rcu_torture_read_unlock,
 608	.readlock_held	= torture_readlock_not_held,
 609	.get_gp_seq	= rcu_no_completed,
 610	.deferred_free	= rcu_busted_torture_deferred_free,
 611	.sync		= synchronize_rcu_busted,
 612	.exp_sync	= synchronize_rcu_busted,
 613	.call		= call_rcu_busted,
 614	.cb_barrier	= NULL,
 615	.fqs		= NULL,
 616	.stats		= NULL,
 617	.irq_capable	= 1,
 618	.name		= "busted"
 619};
 620
 621/*
 622 * Definitions for srcu torture testing.
 623 */
 624
 625DEFINE_STATIC_SRCU(srcu_ctl);
 626static struct srcu_struct srcu_ctld;
 627static struct srcu_struct *srcu_ctlp = &srcu_ctl;
 628static struct rcu_torture_ops srcud_ops;
 629
 630static int srcu_torture_read_lock(void)
 631{
 632	if (cur_ops == &srcud_ops)
 633		return srcu_read_lock_nmisafe(srcu_ctlp);
 634	else
 635		return srcu_read_lock(srcu_ctlp);
 636}
 637
 638static void
 639srcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
 640{
 641	long delay;
 642	const long uspertick = 1000000 / HZ;
 643	const long longdelay = 10;
 644
 645	/* We want there to be long-running readers, but not all the time. */
 646
 647	delay = torture_random(rrsp) %
 648		(nrealreaders * 2 * longdelay * uspertick);
 649	if (!delay && in_task()) {
 650		schedule_timeout_interruptible(longdelay);
 651		rtrsp->rt_delay_jiffies = longdelay;
 652	} else {
 653		rcu_read_delay(rrsp, rtrsp);
 654	}
 655}
 656
 657static void srcu_torture_read_unlock(int idx)
 658{
 659	if (cur_ops == &srcud_ops)
 660		srcu_read_unlock_nmisafe(srcu_ctlp, idx);
 661	else
 662		srcu_read_unlock(srcu_ctlp, idx);
 663}
 664
 665static int torture_srcu_read_lock_held(void)
 666{
 667	return srcu_read_lock_held(srcu_ctlp);
 668}
 669
 670static unsigned long srcu_torture_completed(void)
 671{
 672	return srcu_batches_completed(srcu_ctlp);
 673}
 674
 675static void srcu_torture_deferred_free(struct rcu_torture *rp)
 676{
 677	call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb);
 678}
 679
 680static void srcu_torture_synchronize(void)
 681{
 682	synchronize_srcu(srcu_ctlp);
 683}
 684
 685static unsigned long srcu_torture_get_gp_state(void)
 686{
 687	return get_state_synchronize_srcu(srcu_ctlp);
 688}
 689
 690static unsigned long srcu_torture_start_gp_poll(void)
 691{
 692	return start_poll_synchronize_srcu(srcu_ctlp);
 693}
 694
 695static bool srcu_torture_poll_gp_state(unsigned long oldstate)
 696{
 697	return poll_state_synchronize_srcu(srcu_ctlp, oldstate);
 698}
 699
 700static void srcu_torture_call(struct rcu_head *head,
 701			      rcu_callback_t func)
 702{
 703	call_srcu(srcu_ctlp, head, func);
 704}
 705
 706static void srcu_torture_barrier(void)
 707{
 708	srcu_barrier(srcu_ctlp);
 709}
 710
 711static void srcu_torture_stats(void)
 712{
 713	srcu_torture_stats_print(srcu_ctlp, torture_type, TORTURE_FLAG);
 714}
 715
 716static void srcu_torture_synchronize_expedited(void)
 717{
 718	synchronize_srcu_expedited(srcu_ctlp);
 719}
 720
 721static struct rcu_torture_ops srcu_ops = {
 722	.ttype		= SRCU_FLAVOR,
 723	.init		= rcu_sync_torture_init,
 724	.readlock	= srcu_torture_read_lock,
 725	.read_delay	= srcu_read_delay,
 726	.readunlock	= srcu_torture_read_unlock,
 727	.readlock_held	= torture_srcu_read_lock_held,
 728	.get_gp_seq	= srcu_torture_completed,
 729	.deferred_free	= srcu_torture_deferred_free,
 730	.sync		= srcu_torture_synchronize,
 731	.exp_sync	= srcu_torture_synchronize_expedited,
 732	.get_gp_state	= srcu_torture_get_gp_state,
 733	.start_gp_poll	= srcu_torture_start_gp_poll,
 734	.poll_gp_state	= srcu_torture_poll_gp_state,
 735	.call		= srcu_torture_call,
 736	.cb_barrier	= srcu_torture_barrier,
 737	.stats		= srcu_torture_stats,
 738	.cbflood_max	= 50000,
 739	.irq_capable	= 1,
 740	.no_pi_lock	= IS_ENABLED(CONFIG_TINY_SRCU),
 741	.name		= "srcu"
 742};
 743
 744static void srcu_torture_init(void)
 745{
 746	rcu_sync_torture_init();
 747	WARN_ON(init_srcu_struct(&srcu_ctld));
 748	srcu_ctlp = &srcu_ctld;
 749}
 750
 751static void srcu_torture_cleanup(void)
 752{
 753	cleanup_srcu_struct(&srcu_ctld);
 754	srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */
 755}
 756
 757/* As above, but dynamically allocated. */
 758static struct rcu_torture_ops srcud_ops = {
 759	.ttype		= SRCU_FLAVOR,
 760	.init		= srcu_torture_init,
 761	.cleanup	= srcu_torture_cleanup,
 762	.readlock	= srcu_torture_read_lock,
 763	.read_delay	= srcu_read_delay,
 764	.readunlock	= srcu_torture_read_unlock,
 765	.readlock_held	= torture_srcu_read_lock_held,
 766	.get_gp_seq	= srcu_torture_completed,
 767	.deferred_free	= srcu_torture_deferred_free,
 768	.sync		= srcu_torture_synchronize,
 769	.exp_sync	= srcu_torture_synchronize_expedited,
 770	.get_gp_state	= srcu_torture_get_gp_state,
 771	.start_gp_poll	= srcu_torture_start_gp_poll,
 772	.poll_gp_state	= srcu_torture_poll_gp_state,
 773	.call		= srcu_torture_call,
 774	.cb_barrier	= srcu_torture_barrier,
 775	.stats		= srcu_torture_stats,
 776	.cbflood_max	= 50000,
 777	.irq_capable	= 1,
 778	.no_pi_lock	= IS_ENABLED(CONFIG_TINY_SRCU),
 779	.name		= "srcud"
 780};
 781
 782/* As above, but broken due to inappropriate reader extension. */
 783static struct rcu_torture_ops busted_srcud_ops = {
 784	.ttype		= SRCU_FLAVOR,
 785	.init		= srcu_torture_init,
 786	.cleanup	= srcu_torture_cleanup,
 787	.readlock	= srcu_torture_read_lock,
 788	.read_delay	= rcu_read_delay,
 789	.readunlock	= srcu_torture_read_unlock,
 790	.readlock_held	= torture_srcu_read_lock_held,
 791	.get_gp_seq	= srcu_torture_completed,
 792	.deferred_free	= srcu_torture_deferred_free,
 793	.sync		= srcu_torture_synchronize,
 794	.exp_sync	= srcu_torture_synchronize_expedited,
 795	.call		= srcu_torture_call,
 796	.cb_barrier	= srcu_torture_barrier,
 797	.stats		= srcu_torture_stats,
 798	.irq_capable	= 1,
 799	.no_pi_lock	= IS_ENABLED(CONFIG_TINY_SRCU),
 800	.extendables	= RCUTORTURE_MAX_EXTEND,
 801	.name		= "busted_srcud"
 802};
 803
 804/*
 805 * Definitions for trivial CONFIG_PREEMPT=n-only torture testing.
 806 * This implementation does not necessarily work well with CPU hotplug.
 807 */
 808
 809static void synchronize_rcu_trivial(void)
 810{
 811	int cpu;
 812
 813	for_each_online_cpu(cpu) {
 814		torture_sched_setaffinity(current->pid, cpumask_of(cpu));
 815		WARN_ON_ONCE(raw_smp_processor_id() != cpu);
 816	}
 817}
 818
 819static int rcu_torture_read_lock_trivial(void)
 820{
 821	preempt_disable();
 822	return 0;
 823}
 824
 825static void rcu_torture_read_unlock_trivial(int idx)
 826{
 827	preempt_enable();
 828}
 829
 830static struct rcu_torture_ops trivial_ops = {
 831	.ttype		= RCU_TRIVIAL_FLAVOR,
 832	.init		= rcu_sync_torture_init,
 833	.readlock	= rcu_torture_read_lock_trivial,
 834	.read_delay	= rcu_read_delay,  /* just reuse rcu's version. */
 835	.readunlock	= rcu_torture_read_unlock_trivial,
 836	.readlock_held	= torture_readlock_not_held,
 837	.get_gp_seq	= rcu_no_completed,
 838	.sync		= synchronize_rcu_trivial,
 839	.exp_sync	= synchronize_rcu_trivial,
 840	.fqs		= NULL,
 841	.stats		= NULL,
 842	.irq_capable	= 1,
 843	.name		= "trivial"
 844};
 845
 846#ifdef CONFIG_TASKS_RCU
 847
 848/*
 849 * Definitions for RCU-tasks torture testing.
 850 */
 851
 852static int tasks_torture_read_lock(void)
 853{
 854	return 0;
 855}
 856
 857static void tasks_torture_read_unlock(int idx)
 858{
 859}
 860
 861static void rcu_tasks_torture_deferred_free(struct rcu_torture *p)
 862{
 863	call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb);
 864}
 865
 866static void synchronize_rcu_mult_test(void)
 867{
 868	synchronize_rcu_mult(call_rcu_tasks, call_rcu_hurry);
 869}
 870
 871static struct rcu_torture_ops tasks_ops = {
 872	.ttype		= RCU_TASKS_FLAVOR,
 873	.init		= rcu_sync_torture_init,
 874	.readlock	= tasks_torture_read_lock,
 875	.read_delay	= rcu_read_delay,  /* just reuse rcu's version. */
 876	.readunlock	= tasks_torture_read_unlock,
 877	.get_gp_seq	= rcu_no_completed,
 878	.deferred_free	= rcu_tasks_torture_deferred_free,
 879	.sync		= synchronize_rcu_tasks,
 880	.exp_sync	= synchronize_rcu_mult_test,
 881	.call		= call_rcu_tasks,
 882	.cb_barrier	= rcu_barrier_tasks,
 883	.gp_kthread_dbg	= show_rcu_tasks_classic_gp_kthread,
 884	.fqs		= NULL,
 885	.stats		= NULL,
 886	.irq_capable	= 1,
 887	.slow_gps	= 1,
 888	.name		= "tasks"
 889};
 890
 891#define TASKS_OPS &tasks_ops,
 
 
 
 892
 893#else // #ifdef CONFIG_TASKS_RCU
 
 
 894
 895#define TASKS_OPS
 
 
 
 
 896
 897#endif // #else #ifdef CONFIG_TASKS_RCU
 
 
 
 
 898
 
 
 
 
 899
 900#ifdef CONFIG_TASKS_RUDE_RCU
 
 
 
 
 
 
 
 
 
 
 
 
 
 901
 902/*
 903 * Definitions for rude RCU-tasks torture testing.
 904 */
 905
 906static void rcu_tasks_rude_torture_deferred_free(struct rcu_torture *p)
 907{
 908	call_rcu_tasks_rude(&p->rtort_rcu, rcu_torture_cb);
 909}
 910
 911static struct rcu_torture_ops tasks_rude_ops = {
 912	.ttype		= RCU_TASKS_RUDE_FLAVOR,
 913	.init		= rcu_sync_torture_init,
 914	.readlock	= rcu_torture_read_lock_trivial,
 915	.read_delay	= rcu_read_delay,  /* just reuse rcu's version. */
 916	.readunlock	= rcu_torture_read_unlock_trivial,
 917	.get_gp_seq	= rcu_no_completed,
 918	.deferred_free	= rcu_tasks_rude_torture_deferred_free,
 919	.sync		= synchronize_rcu_tasks_rude,
 920	.exp_sync	= synchronize_rcu_tasks_rude,
 921	.call		= call_rcu_tasks_rude,
 922	.cb_barrier	= rcu_barrier_tasks_rude,
 923	.gp_kthread_dbg	= show_rcu_tasks_rude_gp_kthread,
 924	.cbflood_max	= 50000,
 925	.fqs		= NULL,
 926	.stats		= NULL,
 927	.irq_capable	= 1,
 928	.name		= "tasks-rude"
 929};
 930
 931#define TASKS_RUDE_OPS &tasks_rude_ops,
 932
 933#else // #ifdef CONFIG_TASKS_RUDE_RCU
 934
 935#define TASKS_RUDE_OPS
 936
 937#endif // #else #ifdef CONFIG_TASKS_RUDE_RCU
 938
 939
 940#ifdef CONFIG_TASKS_TRACE_RCU
 941
 942/*
 943 * Definitions for tracing RCU-tasks torture testing.
 944 */
 945
 946static int tasks_tracing_torture_read_lock(void)
 947{
 948	rcu_read_lock_trace();
 949	return 0;
 950}
 951
 952static void tasks_tracing_torture_read_unlock(int idx)
 953{
 954	rcu_read_unlock_trace();
 955}
 956
 957static void rcu_tasks_tracing_torture_deferred_free(struct rcu_torture *p)
 958{
 959	call_rcu_tasks_trace(&p->rtort_rcu, rcu_torture_cb);
 960}
 961
 962static struct rcu_torture_ops tasks_tracing_ops = {
 963	.ttype		= RCU_TASKS_TRACING_FLAVOR,
 964	.init		= rcu_sync_torture_init,
 965	.readlock	= tasks_tracing_torture_read_lock,
 966	.read_delay	= srcu_read_delay,  /* just reuse srcu's version. */
 967	.readunlock	= tasks_tracing_torture_read_unlock,
 968	.readlock_held	= rcu_read_lock_trace_held,
 969	.get_gp_seq	= rcu_no_completed,
 970	.deferred_free	= rcu_tasks_tracing_torture_deferred_free,
 971	.sync		= synchronize_rcu_tasks_trace,
 972	.exp_sync	= synchronize_rcu_tasks_trace,
 973	.call		= call_rcu_tasks_trace,
 974	.cb_barrier	= rcu_barrier_tasks_trace,
 975	.gp_kthread_dbg	= show_rcu_tasks_trace_gp_kthread,
 976	.cbflood_max	= 50000,
 977	.fqs		= NULL,
 978	.stats		= NULL,
 979	.irq_capable	= 1,
 980	.slow_gps	= 1,
 981	.name		= "tasks-tracing"
 982};
 983
 984#define TASKS_TRACING_OPS &tasks_tracing_ops,
 985
 986#else // #ifdef CONFIG_TASKS_TRACE_RCU
 987
 988#define TASKS_TRACING_OPS
 989
 990#endif // #else #ifdef CONFIG_TASKS_TRACE_RCU
 991
 992
 993static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old)
 994{
 995	if (!cur_ops->gp_diff)
 996		return new - old;
 997	return cur_ops->gp_diff(new, old);
 998}
 999
 
 
 
 
 
1000/*
1001 * RCU torture priority-boost testing.  Runs one real-time thread per
1002 * CPU for moderate bursts, repeatedly starting grace periods and waiting
1003 * for them to complete.  If a given grace period takes too long, we assume
1004 * that priority inversion has occurred.
1005 */
1006
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1007static int old_rt_runtime = -1;
1008
1009static void rcu_torture_disable_rt_throttle(void)
1010{
1011	/*
1012	 * Disable RT throttling so that rcutorture's boost threads don't get
1013	 * throttled. Only possible if rcutorture is built-in otherwise the
1014	 * user should manually do this by setting the sched_rt_period_us and
1015	 * sched_rt_runtime sysctls.
1016	 */
1017	if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1)
1018		return;
1019
1020	old_rt_runtime = sysctl_sched_rt_runtime;
1021	sysctl_sched_rt_runtime = -1;
1022}
1023
1024static void rcu_torture_enable_rt_throttle(void)
1025{
1026	if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1)
1027		return;
1028
1029	sysctl_sched_rt_runtime = old_rt_runtime;
1030	old_rt_runtime = -1;
1031}
1032
1033static bool rcu_torture_boost_failed(unsigned long gp_state, unsigned long *start)
1034{
1035	int cpu;
1036	static int dbg_done;
1037	unsigned long end = jiffies;
1038	bool gp_done;
1039	unsigned long j;
1040	static unsigned long last_persist;
1041	unsigned long lp;
1042	unsigned long mininterval = test_boost_duration * HZ - HZ / 2;
1043
1044	if (end - *start > mininterval) {
1045		// Recheck after checking time to avoid false positives.
1046		smp_mb(); // Time check before grace-period check.
1047		if (cur_ops->poll_gp_state(gp_state))
1048			return false; // passed, though perhaps just barely
1049		if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, &cpu)) {
1050			// At most one persisted message per boost test.
1051			j = jiffies;
1052			lp = READ_ONCE(last_persist);
1053			if (time_after(j, lp + mininterval) && cmpxchg(&last_persist, lp, j) == lp)
1054				pr_info("Boost inversion persisted: No QS from CPU %d\n", cpu);
1055			return false; // passed on a technicality
1056		}
1057		VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed");
1058		n_rcu_torture_boost_failure++;
1059		if (!xchg(&dbg_done, 1) && cur_ops->gp_kthread_dbg) {
1060			pr_info("Boost inversion thread ->rt_priority %u gp_state %lu jiffies %lu\n",
1061				current->rt_priority, gp_state, end - *start);
1062			cur_ops->gp_kthread_dbg();
1063			// Recheck after print to flag grace period ending during splat.
1064			gp_done = cur_ops->poll_gp_state(gp_state);
1065			pr_info("Boost inversion: GP %lu %s.\n", gp_state,
1066				gp_done ? "ended already" : "still pending");
1067
1068		}
1069
1070		return true; // failed
1071	} else if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, NULL)) {
1072		*start = jiffies;
1073	}
1074
1075	return false; // passed
1076}
1077
1078static int rcu_torture_boost(void *arg)
1079{
 
1080	unsigned long endtime;
1081	unsigned long gp_state;
1082	unsigned long gp_state_time;
1083	unsigned long oldstarttime;
 
1084
1085	VERBOSE_TOROUT_STRING("rcu_torture_boost started");
1086
1087	/* Set real-time priority. */
1088	sched_set_fifo_low(current);
1089
 
1090	/* Each pass through the following loop does one boost-test cycle. */
1091	do {
1092		bool failed = false; // Test failed already in this test interval
1093		bool gp_initiated = false;
1094
 
 
 
 
 
 
 
 
 
1095		if (kthread_should_stop())
1096			goto checkwait;
1097
1098		/* Wait for the next test interval. */
1099		oldstarttime = READ_ONCE(boost_starttime);
1100		while (time_before(jiffies, oldstarttime)) {
1101			schedule_timeout_interruptible(oldstarttime - jiffies);
1102			if (stutter_wait("rcu_torture_boost"))
1103				sched_set_fifo_low(current);
1104			if (torture_must_stop())
1105				goto checkwait;
1106		}
1107
1108		// Do one boost-test interval.
1109		endtime = oldstarttime + test_boost_duration * HZ;
 
1110		while (time_before(jiffies, endtime)) {
1111			// Has current GP gone too long?
1112			if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state))
1113				failed = rcu_torture_boost_failed(gp_state, &gp_state_time);
1114			// If we don't have a grace period in flight, start one.
1115			if (!gp_initiated || cur_ops->poll_gp_state(gp_state)) {
1116				gp_state = cur_ops->start_gp_poll();
1117				gp_initiated = true;
1118				gp_state_time = jiffies;
1119			}
1120			if (stutter_wait("rcu_torture_boost")) {
1121				sched_set_fifo_low(current);
1122				// If the grace period already ended,
1123				// we don't know when that happened, so
1124				// start over.
1125				if (cur_ops->poll_gp_state(gp_state))
1126					gp_initiated = false;
1127			}
 
1128			if (torture_must_stop())
1129				goto checkwait;
1130		}
1131
1132		// In case the grace period extended beyond the end of the loop.
1133		if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state))
1134			rcu_torture_boost_failed(gp_state, &gp_state_time);
 
 
 
 
1135
1136		/*
1137		 * Set the start time of the next test interval.
1138		 * Yes, this is vulnerable to long delays, but such
1139		 * delays simply cause a false negative for the next
1140		 * interval.  Besides, we are running at RT priority,
1141		 * so delays should be relatively rare.
1142		 */
1143		while (oldstarttime == READ_ONCE(boost_starttime) && !kthread_should_stop()) {
 
1144			if (mutex_trylock(&boost_mutex)) {
1145				if (oldstarttime == boost_starttime) {
1146					WRITE_ONCE(boost_starttime,
1147						   jiffies + test_boost_interval * HZ);
1148					n_rcu_torture_boosts++;
1149				}
1150				mutex_unlock(&boost_mutex);
1151				break;
1152			}
1153			schedule_timeout_uninterruptible(HZ / 20);
1154		}
1155
1156		/* Go do the stutter. */
1157checkwait:	if (stutter_wait("rcu_torture_boost"))
1158			sched_set_fifo_low(current);
1159	} while (!torture_must_stop());
1160
1161	/* Clean up and exit. */
1162	while (!kthread_should_stop()) {
1163		torture_shutdown_absorb("rcu_torture_boost");
1164		schedule_timeout_uninterruptible(HZ / 20);
1165	}
 
1166	torture_kthread_stopping("rcu_torture_boost");
1167	return 0;
1168}
1169
1170/*
1171 * RCU torture force-quiescent-state kthread.  Repeatedly induces
1172 * bursts of calls to force_quiescent_state(), increasing the probability
1173 * of occurrence of some important types of race conditions.
1174 */
1175static int
1176rcu_torture_fqs(void *arg)
1177{
1178	unsigned long fqs_resume_time;
1179	int fqs_burst_remaining;
1180	int oldnice = task_nice(current);
1181
1182	VERBOSE_TOROUT_STRING("rcu_torture_fqs task started");
1183	do {
1184		fqs_resume_time = jiffies + fqs_stutter * HZ;
1185		while (time_before(jiffies, fqs_resume_time) &&
1186		       !kthread_should_stop()) {
1187			schedule_timeout_interruptible(HZ / 20);
1188		}
1189		fqs_burst_remaining = fqs_duration;
1190		while (fqs_burst_remaining > 0 &&
1191		       !kthread_should_stop()) {
1192			cur_ops->fqs();
1193			udelay(fqs_holdoff);
1194			fqs_burst_remaining -= fqs_holdoff;
1195		}
1196		if (stutter_wait("rcu_torture_fqs"))
1197			sched_set_normal(current, oldnice);
1198	} while (!torture_must_stop());
1199	torture_kthread_stopping("rcu_torture_fqs");
1200	return 0;
1201}
1202
1203// Used by writers to randomly choose from the available grace-period primitives.
1204static int synctype[ARRAY_SIZE(rcu_torture_writer_state_names)] = { };
1205static int nsynctypes;
1206
1207/*
1208 * Determine which grace-period primitives are available.
 
 
1209 */
1210static void rcu_torture_write_types(void)
 
1211{
1212	bool gp_cond1 = gp_cond, gp_cond_exp1 = gp_cond_exp, gp_cond_full1 = gp_cond_full;
1213	bool gp_cond_exp_full1 = gp_cond_exp_full, gp_exp1 = gp_exp, gp_poll_exp1 = gp_poll_exp;
1214	bool gp_poll_exp_full1 = gp_poll_exp_full, gp_normal1 = gp_normal, gp_poll1 = gp_poll;
1215	bool gp_poll_full1 = gp_poll_full, gp_sync1 = gp_sync;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1216
1217	/* Initialize synctype[] array.  If none set, take default. */
1218	if (!gp_cond1 &&
1219	    !gp_cond_exp1 &&
1220	    !gp_cond_full1 &&
1221	    !gp_cond_exp_full1 &&
1222	    !gp_exp1 &&
1223	    !gp_poll_exp1 &&
1224	    !gp_poll_exp_full1 &&
1225	    !gp_normal1 &&
1226	    !gp_poll1 &&
1227	    !gp_poll_full1 &&
1228	    !gp_sync1) {
1229		gp_cond1 = true;
1230		gp_cond_exp1 = true;
1231		gp_cond_full1 = true;
1232		gp_cond_exp_full1 = true;
1233		gp_exp1 = true;
1234		gp_poll_exp1 = true;
1235		gp_poll_exp_full1 = true;
1236		gp_normal1 = true;
1237		gp_poll1 = true;
1238		gp_poll_full1 = true;
1239		gp_sync1 = true;
1240	}
1241	if (gp_cond1 && cur_ops->get_gp_state && cur_ops->cond_sync) {
1242		synctype[nsynctypes++] = RTWS_COND_GET;
1243		pr_info("%s: Testing conditional GPs.\n", __func__);
1244	} else if (gp_cond && (!cur_ops->get_gp_state || !cur_ops->cond_sync)) {
1245		pr_alert("%s: gp_cond without primitives.\n", __func__);
1246	}
1247	if (gp_cond_exp1 && cur_ops->get_gp_state_exp && cur_ops->cond_sync_exp) {
1248		synctype[nsynctypes++] = RTWS_COND_GET_EXP;
1249		pr_info("%s: Testing conditional expedited GPs.\n", __func__);
1250	} else if (gp_cond_exp && (!cur_ops->get_gp_state_exp || !cur_ops->cond_sync_exp)) {
1251		pr_alert("%s: gp_cond_exp without primitives.\n", __func__);
1252	}
1253	if (gp_cond_full1 && cur_ops->get_gp_state && cur_ops->cond_sync_full) {
1254		synctype[nsynctypes++] = RTWS_COND_GET_FULL;
1255		pr_info("%s: Testing conditional full-state GPs.\n", __func__);
1256	} else if (gp_cond_full && (!cur_ops->get_gp_state || !cur_ops->cond_sync_full)) {
1257		pr_alert("%s: gp_cond_full without primitives.\n", __func__);
1258	}
1259	if (gp_cond_exp_full1 && cur_ops->get_gp_state_exp && cur_ops->cond_sync_exp_full) {
1260		synctype[nsynctypes++] = RTWS_COND_GET_EXP_FULL;
1261		pr_info("%s: Testing conditional full-state expedited GPs.\n", __func__);
1262	} else if (gp_cond_exp_full &&
1263		   (!cur_ops->get_gp_state_exp || !cur_ops->cond_sync_exp_full)) {
1264		pr_alert("%s: gp_cond_exp_full without primitives.\n", __func__);
1265	}
1266	if (gp_exp1 && cur_ops->exp_sync) {
1267		synctype[nsynctypes++] = RTWS_EXP_SYNC;
1268		pr_info("%s: Testing expedited GPs.\n", __func__);
1269	} else if (gp_exp && !cur_ops->exp_sync) {
1270		pr_alert("%s: gp_exp without primitives.\n", __func__);
1271	}
1272	if (gp_normal1 && cur_ops->deferred_free) {
1273		synctype[nsynctypes++] = RTWS_DEF_FREE;
1274		pr_info("%s: Testing asynchronous GPs.\n", __func__);
1275	} else if (gp_normal && !cur_ops->deferred_free) {
1276		pr_alert("%s: gp_normal without primitives.\n", __func__);
1277	}
1278	if (gp_poll1 && cur_ops->get_comp_state && cur_ops->same_gp_state &&
1279	    cur_ops->start_gp_poll && cur_ops->poll_gp_state) {
1280		synctype[nsynctypes++] = RTWS_POLL_GET;
1281		pr_info("%s: Testing polling GPs.\n", __func__);
1282	} else if (gp_poll && (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state)) {
1283		pr_alert("%s: gp_poll without primitives.\n", __func__);
1284	}
1285	if (gp_poll_full1 && cur_ops->get_comp_state_full && cur_ops->same_gp_state_full
1286	    && cur_ops->start_gp_poll_full && cur_ops->poll_gp_state_full) {
1287		synctype[nsynctypes++] = RTWS_POLL_GET_FULL;
1288		pr_info("%s: Testing polling full-state GPs.\n", __func__);
1289	} else if (gp_poll_full && (!cur_ops->start_gp_poll_full || !cur_ops->poll_gp_state_full)) {
1290		pr_alert("%s: gp_poll_full without primitives.\n", __func__);
1291	}
1292	if (gp_poll_exp1 && cur_ops->start_gp_poll_exp && cur_ops->poll_gp_state_exp) {
1293		synctype[nsynctypes++] = RTWS_POLL_GET_EXP;
1294		pr_info("%s: Testing polling expedited GPs.\n", __func__);
1295	} else if (gp_poll_exp && (!cur_ops->start_gp_poll_exp || !cur_ops->poll_gp_state_exp)) {
1296		pr_alert("%s: gp_poll_exp without primitives.\n", __func__);
1297	}
1298	if (gp_poll_exp_full1 && cur_ops->start_gp_poll_exp_full && cur_ops->poll_gp_state_full) {
1299		synctype[nsynctypes++] = RTWS_POLL_GET_EXP_FULL;
1300		pr_info("%s: Testing polling full-state expedited GPs.\n", __func__);
1301	} else if (gp_poll_exp_full &&
1302		   (!cur_ops->start_gp_poll_exp_full || !cur_ops->poll_gp_state_full)) {
1303		pr_alert("%s: gp_poll_exp_full without primitives.\n", __func__);
1304	}
1305	if (gp_sync1 && cur_ops->sync) {
1306		synctype[nsynctypes++] = RTWS_SYNC;
1307		pr_info("%s: Testing normal GPs.\n", __func__);
1308	} else if (gp_sync && !cur_ops->sync) {
1309		pr_alert("%s: gp_sync without primitives.\n", __func__);
1310	}
1311}
1312
1313/*
1314 * Do the specified rcu_torture_writer() synchronous grace period,
1315 * while also testing out the polled APIs.  Note well that the single-CPU
1316 * grace-period optimizations must be accounted for.
1317 */
1318static void do_rtws_sync(struct torture_random_state *trsp, void (*sync)(void))
1319{
1320	unsigned long cookie;
1321	struct rcu_gp_oldstate cookie_full;
1322	bool dopoll;
1323	bool dopoll_full;
1324	unsigned long r = torture_random(trsp);
1325
1326	dopoll = cur_ops->get_gp_state && cur_ops->poll_gp_state && !(r & 0x300);
1327	dopoll_full = cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full && !(r & 0xc00);
1328	if (dopoll || dopoll_full)
1329		cpus_read_lock();
1330	if (dopoll)
1331		cookie = cur_ops->get_gp_state();
1332	if (dopoll_full)
1333		cur_ops->get_gp_state_full(&cookie_full);
1334	if (cur_ops->poll_need_2gp && cur_ops->poll_need_2gp(dopoll, dopoll_full))
1335		sync();
1336	sync();
1337	WARN_ONCE(dopoll && !cur_ops->poll_gp_state(cookie),
1338		  "%s: Cookie check 3 failed %pS() online %*pbl.",
1339		  __func__, sync, cpumask_pr_args(cpu_online_mask));
1340	WARN_ONCE(dopoll_full && !cur_ops->poll_gp_state_full(&cookie_full),
1341		  "%s: Cookie check 4 failed %pS() online %*pbl",
1342		  __func__, sync, cpumask_pr_args(cpu_online_mask));
1343	if (dopoll || dopoll_full)
1344		cpus_read_unlock();
1345}
1346
1347/*
1348 * RCU torture writer kthread.  Repeatedly substitutes a new structure
1349 * for that pointed to by rcu_torture_current, freeing the old structure
1350 * after a series of grace periods (the "pipeline").
1351 */
1352static int
1353rcu_torture_writer(void *arg)
1354{
1355	bool boot_ended;
1356	bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal();
1357	unsigned long cookie;
1358	struct rcu_gp_oldstate cookie_full;
1359	int expediting = 0;
1360	unsigned long gp_snap;
1361	unsigned long gp_snap1;
1362	struct rcu_gp_oldstate gp_snap_full;
1363	struct rcu_gp_oldstate gp_snap1_full;
1364	int i;
1365	int idx;
1366	int oldnice = task_nice(current);
1367	struct rcu_gp_oldstate rgo[NUM_ACTIVE_RCU_POLL_FULL_OLDSTATE];
1368	struct rcu_torture *rp;
1369	struct rcu_torture *old_rp;
1370	static DEFINE_TORTURE_RANDOM(rand);
1371	unsigned long stallsdone = jiffies;
1372	bool stutter_waited;
1373	unsigned long ulo[NUM_ACTIVE_RCU_POLL_OLDSTATE];
1374
1375	// If a new stall test is added, this must be adjusted.
1376	if (stall_cpu_holdoff + stall_gp_kthread + stall_cpu)
1377		stallsdone += (stall_cpu_holdoff + stall_gp_kthread + stall_cpu + 60) * HZ;
1378	VERBOSE_TOROUT_STRING("rcu_torture_writer task started");
1379	if (!can_expedite)
1380		pr_alert("%s" TORTURE_FLAG
1381			 " GP expediting controlled from boot/sysfs for %s.\n",
1382			 torture_type, cur_ops->name);
1383	if (WARN_ONCE(nsynctypes == 0,
1384		      "%s: No update-side primitives.\n", __func__)) {
1385		/*
1386		 * No updates primitives, so don't try updating.
1387		 * The resulting test won't be testing much, hence the
1388		 * above WARN_ONCE().
1389		 */
1390		rcu_torture_writer_state = RTWS_STOPPING;
1391		torture_kthread_stopping("rcu_torture_writer");
1392		return 0;
1393	}
1394
1395	do {
1396		rcu_torture_writer_state = RTWS_FIXED_DELAY;
1397		torture_hrtimeout_us(500, 1000, &rand);
1398		rp = rcu_torture_alloc();
1399		if (rp == NULL)
1400			continue;
1401		rp->rtort_pipe_count = 0;
1402		rcu_torture_writer_state = RTWS_DELAY;
1403		udelay(torture_random(&rand) & 0x3ff);
1404		rcu_torture_writer_state = RTWS_REPLACE;
1405		old_rp = rcu_dereference_check(rcu_torture_current,
1406					       current == writer_task);
1407		rp->rtort_mbtest = 1;
1408		rcu_assign_pointer(rcu_torture_current, rp);
1409		smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */
1410		if (old_rp) {
1411			i = old_rp->rtort_pipe_count;
1412			if (i > RCU_TORTURE_PIPE_LEN)
1413				i = RCU_TORTURE_PIPE_LEN;
1414			atomic_inc(&rcu_torture_wcount[i]);
1415			WRITE_ONCE(old_rp->rtort_pipe_count,
1416				   old_rp->rtort_pipe_count + 1);
1417
1418			// Make sure readers block polled grace periods.
1419			if (cur_ops->get_gp_state && cur_ops->poll_gp_state) {
1420				idx = cur_ops->readlock();
1421				cookie = cur_ops->get_gp_state();
1422				WARN_ONCE(cur_ops->poll_gp_state(cookie),
1423					  "%s: Cookie check 1 failed %s(%d) %lu->%lu\n",
1424					  __func__,
1425					  rcu_torture_writer_state_getname(),
1426					  rcu_torture_writer_state,
1427					  cookie, cur_ops->get_gp_state());
1428				if (cur_ops->get_gp_completed) {
1429					cookie = cur_ops->get_gp_completed();
1430					WARN_ON_ONCE(!cur_ops->poll_gp_state(cookie));
1431				}
1432				cur_ops->readunlock(idx);
1433			}
1434			if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full) {
1435				idx = cur_ops->readlock();
1436				cur_ops->get_gp_state_full(&cookie_full);
1437				WARN_ONCE(cur_ops->poll_gp_state_full(&cookie_full),
1438					  "%s: Cookie check 5 failed %s(%d) online %*pbl\n",
1439					  __func__,
1440					  rcu_torture_writer_state_getname(),
1441					  rcu_torture_writer_state,
1442					  cpumask_pr_args(cpu_online_mask));
1443				if (cur_ops->get_gp_completed_full) {
1444					cur_ops->get_gp_completed_full(&cookie_full);
1445					WARN_ON_ONCE(!cur_ops->poll_gp_state_full(&cookie_full));
1446				}
1447				cur_ops->readunlock(idx);
1448			}
1449			switch (synctype[torture_random(&rand) % nsynctypes]) {
1450			case RTWS_DEF_FREE:
1451				rcu_torture_writer_state = RTWS_DEF_FREE;
1452				cur_ops->deferred_free(old_rp);
1453				break;
1454			case RTWS_EXP_SYNC:
1455				rcu_torture_writer_state = RTWS_EXP_SYNC;
1456				do_rtws_sync(&rand, cur_ops->exp_sync);
1457				rcu_torture_pipe_update(old_rp);
1458				break;
1459			case RTWS_COND_GET:
1460				rcu_torture_writer_state = RTWS_COND_GET;
1461				gp_snap = cur_ops->get_gp_state();
1462				torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
 
 
 
1463				rcu_torture_writer_state = RTWS_COND_SYNC;
1464				cur_ops->cond_sync(gp_snap);
1465				rcu_torture_pipe_update(old_rp);
1466				break;
1467			case RTWS_COND_GET_EXP:
1468				rcu_torture_writer_state = RTWS_COND_GET_EXP;
1469				gp_snap = cur_ops->get_gp_state_exp();
1470				torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1471				rcu_torture_writer_state = RTWS_COND_SYNC_EXP;
1472				cur_ops->cond_sync_exp(gp_snap);
1473				rcu_torture_pipe_update(old_rp);
1474				break;
1475			case RTWS_COND_GET_FULL:
1476				rcu_torture_writer_state = RTWS_COND_GET_FULL;
1477				cur_ops->get_gp_state_full(&gp_snap_full);
1478				torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1479				rcu_torture_writer_state = RTWS_COND_SYNC_FULL;
1480				cur_ops->cond_sync_full(&gp_snap_full);
1481				rcu_torture_pipe_update(old_rp);
1482				break;
1483			case RTWS_COND_GET_EXP_FULL:
1484				rcu_torture_writer_state = RTWS_COND_GET_EXP_FULL;
1485				cur_ops->get_gp_state_full(&gp_snap_full);
1486				torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1487				rcu_torture_writer_state = RTWS_COND_SYNC_EXP_FULL;
1488				cur_ops->cond_sync_exp_full(&gp_snap_full);
1489				rcu_torture_pipe_update(old_rp);
1490				break;
1491			case RTWS_POLL_GET:
1492				rcu_torture_writer_state = RTWS_POLL_GET;
1493				for (i = 0; i < ARRAY_SIZE(ulo); i++)
1494					ulo[i] = cur_ops->get_comp_state();
1495				gp_snap = cur_ops->start_gp_poll();
1496				rcu_torture_writer_state = RTWS_POLL_WAIT;
1497				while (!cur_ops->poll_gp_state(gp_snap)) {
1498					gp_snap1 = cur_ops->get_gp_state();
1499					for (i = 0; i < ARRAY_SIZE(ulo); i++)
1500						if (cur_ops->poll_gp_state(ulo[i]) ||
1501						    cur_ops->same_gp_state(ulo[i], gp_snap1)) {
1502							ulo[i] = gp_snap1;
1503							break;
1504						}
1505					WARN_ON_ONCE(i >= ARRAY_SIZE(ulo));
1506					torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1507								  &rand);
1508				}
1509				rcu_torture_pipe_update(old_rp);
1510				break;
1511			case RTWS_POLL_GET_FULL:
1512				rcu_torture_writer_state = RTWS_POLL_GET_FULL;
1513				for (i = 0; i < ARRAY_SIZE(rgo); i++)
1514					cur_ops->get_comp_state_full(&rgo[i]);
1515				cur_ops->start_gp_poll_full(&gp_snap_full);
1516				rcu_torture_writer_state = RTWS_POLL_WAIT_FULL;
1517				while (!cur_ops->poll_gp_state_full(&gp_snap_full)) {
1518					cur_ops->get_gp_state_full(&gp_snap1_full);
1519					for (i = 0; i < ARRAY_SIZE(rgo); i++)
1520						if (cur_ops->poll_gp_state_full(&rgo[i]) ||
1521						    cur_ops->same_gp_state_full(&rgo[i],
1522										&gp_snap1_full)) {
1523							rgo[i] = gp_snap1_full;
1524							break;
1525						}
1526					WARN_ON_ONCE(i >= ARRAY_SIZE(rgo));
1527					torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1528								  &rand);
1529				}
1530				rcu_torture_pipe_update(old_rp);
1531				break;
1532			case RTWS_POLL_GET_EXP:
1533				rcu_torture_writer_state = RTWS_POLL_GET_EXP;
1534				gp_snap = cur_ops->start_gp_poll_exp();
1535				rcu_torture_writer_state = RTWS_POLL_WAIT_EXP;
1536				while (!cur_ops->poll_gp_state_exp(gp_snap))
1537					torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1538								  &rand);
1539				rcu_torture_pipe_update(old_rp);
1540				break;
1541			case RTWS_POLL_GET_EXP_FULL:
1542				rcu_torture_writer_state = RTWS_POLL_GET_EXP_FULL;
1543				cur_ops->start_gp_poll_exp_full(&gp_snap_full);
1544				rcu_torture_writer_state = RTWS_POLL_WAIT_EXP_FULL;
1545				while (!cur_ops->poll_gp_state_full(&gp_snap_full))
1546					torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1547								  &rand);
1548				rcu_torture_pipe_update(old_rp);
1549				break;
1550			case RTWS_SYNC:
1551				rcu_torture_writer_state = RTWS_SYNC;
1552				do_rtws_sync(&rand, cur_ops->sync);
1553				rcu_torture_pipe_update(old_rp);
1554				break;
1555			default:
1556				WARN_ON_ONCE(1);
1557				break;
1558			}
1559		}
1560		WRITE_ONCE(rcu_torture_current_version,
1561			   rcu_torture_current_version + 1);
1562		/* Cycle through nesting levels of rcu_expedite_gp() calls. */
1563		if (can_expedite &&
1564		    !(torture_random(&rand) & 0xff & (!!expediting - 1))) {
1565			WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited());
1566			if (expediting >= 0)
1567				rcu_expedite_gp();
1568			else
1569				rcu_unexpedite_gp();
1570			if (++expediting > 3)
1571				expediting = -expediting;
1572		} else if (!can_expedite) { /* Disabled during boot, recheck. */
1573			can_expedite = !rcu_gp_is_expedited() &&
1574				       !rcu_gp_is_normal();
1575		}
1576		rcu_torture_writer_state = RTWS_STUTTER;
1577		boot_ended = rcu_inkernel_boot_has_ended();
1578		stutter_waited = stutter_wait("rcu_torture_writer");
1579		if (stutter_waited &&
1580		    !atomic_read(&rcu_fwd_cb_nodelay) &&
1581		    !cur_ops->slow_gps &&
1582		    !torture_must_stop() &&
1583		    boot_ended &&
1584		    time_after(jiffies, stallsdone))
1585			for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++)
1586				if (list_empty(&rcu_tortures[i].rtort_free) &&
1587				    rcu_access_pointer(rcu_torture_current) != &rcu_tortures[i]) {
1588					tracing_off();
1589					show_rcu_gp_kthreads();
1590					WARN(1, "%s: rtort_pipe_count: %d\n", __func__, rcu_tortures[i].rtort_pipe_count);
1591					rcu_ftrace_dump(DUMP_ALL);
 
1592				}
1593		if (stutter_waited)
1594			sched_set_normal(current, oldnice);
1595	} while (!torture_must_stop());
1596	rcu_torture_current = NULL;  // Let stats task know that we are done.
1597	/* Reset expediting back to unexpedited. */
1598	if (expediting > 0)
1599		expediting = -expediting;
1600	while (can_expedite && expediting++ < 0)
1601		rcu_unexpedite_gp();
1602	WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited());
1603	if (!can_expedite)
1604		pr_alert("%s" TORTURE_FLAG
1605			 " Dynamic grace-period expediting was disabled.\n",
1606			 torture_type);
1607	rcu_torture_writer_state = RTWS_STOPPING;
1608	torture_kthread_stopping("rcu_torture_writer");
1609	return 0;
1610}
1611
1612/*
1613 * RCU torture fake writer kthread.  Repeatedly calls sync, with a random
1614 * delay between calls.
1615 */
1616static int
1617rcu_torture_fakewriter(void *arg)
1618{
1619	unsigned long gp_snap;
1620	struct rcu_gp_oldstate gp_snap_full;
1621	DEFINE_TORTURE_RANDOM(rand);
1622
1623	VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started");
1624	set_user_nice(current, MAX_NICE);
1625
1626	if (WARN_ONCE(nsynctypes == 0,
1627		      "%s: No update-side primitives.\n", __func__)) {
1628		/*
1629		 * No updates primitives, so don't try updating.
1630		 * The resulting test won't be testing much, hence the
1631		 * above WARN_ONCE().
1632		 */
1633		torture_kthread_stopping("rcu_torture_fakewriter");
1634		return 0;
1635	}
1636
1637	do {
1638		torture_hrtimeout_jiffies(torture_random(&rand) % 10, &rand);
 
1639		if (cur_ops->cb_barrier != NULL &&
1640		    torture_random(&rand) % (nfakewriters * 8) == 0) {
1641			cur_ops->cb_barrier();
1642		} else {
1643			switch (synctype[torture_random(&rand) % nsynctypes]) {
1644			case RTWS_DEF_FREE:
1645				break;
1646			case RTWS_EXP_SYNC:
1647				cur_ops->exp_sync();
1648				break;
1649			case RTWS_COND_GET:
1650				gp_snap = cur_ops->get_gp_state();
1651				torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1652				cur_ops->cond_sync(gp_snap);
1653				break;
1654			case RTWS_COND_GET_EXP:
1655				gp_snap = cur_ops->get_gp_state_exp();
1656				torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1657				cur_ops->cond_sync_exp(gp_snap);
1658				break;
1659			case RTWS_COND_GET_FULL:
1660				cur_ops->get_gp_state_full(&gp_snap_full);
1661				torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1662				cur_ops->cond_sync_full(&gp_snap_full);
1663				break;
1664			case RTWS_COND_GET_EXP_FULL:
1665				cur_ops->get_gp_state_full(&gp_snap_full);
1666				torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1667				cur_ops->cond_sync_exp_full(&gp_snap_full);
1668				break;
1669			case RTWS_POLL_GET:
1670				gp_snap = cur_ops->start_gp_poll();
1671				while (!cur_ops->poll_gp_state(gp_snap)) {
1672					torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1673								  &rand);
1674				}
1675				break;
1676			case RTWS_POLL_GET_FULL:
1677				cur_ops->start_gp_poll_full(&gp_snap_full);
1678				while (!cur_ops->poll_gp_state_full(&gp_snap_full)) {
1679					torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1680								  &rand);
1681				}
1682				break;
1683			case RTWS_POLL_GET_EXP:
1684				gp_snap = cur_ops->start_gp_poll_exp();
1685				while (!cur_ops->poll_gp_state_exp(gp_snap)) {
1686					torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1687								  &rand);
1688				}
1689				break;
1690			case RTWS_POLL_GET_EXP_FULL:
1691				cur_ops->start_gp_poll_exp_full(&gp_snap_full);
1692				while (!cur_ops->poll_gp_state_full(&gp_snap_full)) {
1693					torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1694								  &rand);
1695				}
1696				break;
1697			case RTWS_SYNC:
1698				cur_ops->sync();
1699				break;
1700			default:
1701				WARN_ON_ONCE(1);
1702				break;
1703			}
 
1704		}
1705		stutter_wait("rcu_torture_fakewriter");
1706	} while (!torture_must_stop());
1707
1708	torture_kthread_stopping("rcu_torture_fakewriter");
1709	return 0;
1710}
1711
1712static void rcu_torture_timer_cb(struct rcu_head *rhp)
1713{
1714	kfree(rhp);
1715}
1716
1717// Set up and carry out testing of RCU's global memory ordering
1718static void rcu_torture_reader_do_mbchk(long myid, struct rcu_torture *rtp,
1719					struct torture_random_state *trsp)
1720{
1721	unsigned long loops;
1722	int noc = torture_num_online_cpus();
1723	int rdrchked;
1724	int rdrchker;
1725	struct rcu_torture_reader_check *rtrcp; // Me.
1726	struct rcu_torture_reader_check *rtrcp_assigner; // Assigned us to do checking.
1727	struct rcu_torture_reader_check *rtrcp_chked; // Reader being checked.
1728	struct rcu_torture_reader_check *rtrcp_chker; // Reader doing checking when not me.
1729
1730	if (myid < 0)
1731		return; // Don't try this from timer handlers.
1732
1733	// Increment my counter.
1734	rtrcp = &rcu_torture_reader_mbchk[myid];
1735	WRITE_ONCE(rtrcp->rtc_myloops, rtrcp->rtc_myloops + 1);
1736
1737	// Attempt to assign someone else some checking work.
1738	rdrchked = torture_random(trsp) % nrealreaders;
1739	rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked];
1740	rdrchker = torture_random(trsp) % nrealreaders;
1741	rtrcp_chker = &rcu_torture_reader_mbchk[rdrchker];
1742	if (rdrchked != myid && rdrchked != rdrchker && noc >= rdrchked && noc >= rdrchker &&
1743	    smp_load_acquire(&rtrcp->rtc_chkrdr) < 0 && // Pairs with smp_store_release below.
1744	    !READ_ONCE(rtp->rtort_chkp) &&
1745	    !smp_load_acquire(&rtrcp_chker->rtc_assigner)) { // Pairs with smp_store_release below.
1746		rtrcp->rtc_chkloops = READ_ONCE(rtrcp_chked->rtc_myloops);
1747		WARN_ON_ONCE(rtrcp->rtc_chkrdr >= 0);
1748		rtrcp->rtc_chkrdr = rdrchked;
1749		WARN_ON_ONCE(rtrcp->rtc_ready); // This gets set after the grace period ends.
1750		if (cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, NULL, rtrcp) ||
1751		    cmpxchg_relaxed(&rtp->rtort_chkp, NULL, rtrcp))
1752			(void)cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, rtrcp, NULL); // Back out.
1753	}
1754
1755	// If assigned some completed work, do it!
1756	rtrcp_assigner = READ_ONCE(rtrcp->rtc_assigner);
1757	if (!rtrcp_assigner || !smp_load_acquire(&rtrcp_assigner->rtc_ready))
1758		return; // No work or work not yet ready.
1759	rdrchked = rtrcp_assigner->rtc_chkrdr;
1760	if (WARN_ON_ONCE(rdrchked < 0))
1761		return;
1762	rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked];
1763	loops = READ_ONCE(rtrcp_chked->rtc_myloops);
1764	atomic_inc(&n_rcu_torture_mbchk_tries);
1765	if (ULONG_CMP_LT(loops, rtrcp_assigner->rtc_chkloops))
1766		atomic_inc(&n_rcu_torture_mbchk_fail);
1767	rtrcp_assigner->rtc_chkloops = loops + ULONG_MAX / 2;
1768	rtrcp_assigner->rtc_ready = 0;
1769	smp_store_release(&rtrcp->rtc_assigner, NULL); // Someone else can assign us work.
1770	smp_store_release(&rtrcp_assigner->rtc_chkrdr, -1); // Assigner can again assign.
1771}
1772
1773/*
1774 * Do one extension of an RCU read-side critical section using the
1775 * current reader state in readstate (set to zero for initial entry
1776 * to extended critical section), set the new state as specified by
1777 * newstate (set to zero for final exit from extended critical section),
1778 * and random-number-generator state in trsp.  If this is neither the
1779 * beginning or end of the critical section and if there was actually a
1780 * change, do a ->read_delay().
1781 */
1782static void rcutorture_one_extend(int *readstate, int newstate,
1783				  struct torture_random_state *trsp,
1784				  struct rt_read_seg *rtrsp)
1785{
1786	unsigned long flags;
1787	int idxnew1 = -1;
1788	int idxnew2 = -1;
1789	int idxold1 = *readstate;
1790	int idxold2 = idxold1;
1791	int statesnew = ~*readstate & newstate;
1792	int statesold = *readstate & ~newstate;
1793
1794	WARN_ON_ONCE(idxold2 < 0);
1795	WARN_ON_ONCE((idxold2 >> RCUTORTURE_RDR_SHIFT_2) > 1);
1796	rtrsp->rt_readstate = newstate;
1797
1798	/* First, put new protection in place to avoid critical-section gap. */
1799	if (statesnew & RCUTORTURE_RDR_BH)
1800		local_bh_disable();
1801	if (statesnew & RCUTORTURE_RDR_RBH)
1802		rcu_read_lock_bh();
1803	if (statesnew & RCUTORTURE_RDR_IRQ)
1804		local_irq_disable();
1805	if (statesnew & RCUTORTURE_RDR_PREEMPT)
1806		preempt_disable();
 
 
1807	if (statesnew & RCUTORTURE_RDR_SCHED)
1808		rcu_read_lock_sched();
1809	if (statesnew & RCUTORTURE_RDR_RCU_1)
1810		idxnew1 = (cur_ops->readlock() & 0x1) << RCUTORTURE_RDR_SHIFT_1;
1811	if (statesnew & RCUTORTURE_RDR_RCU_2)
1812		idxnew2 = (cur_ops->readlock() & 0x1) << RCUTORTURE_RDR_SHIFT_2;
1813
1814	/*
1815	 * Next, remove old protection, in decreasing order of strength
1816	 * to avoid unlock paths that aren't safe in the stronger
1817	 * context. Namely: BH can not be enabled with disabled interrupts.
1818	 * Additionally PREEMPT_RT requires that BH is enabled in preemptible
1819	 * context.
1820	 */
1821	if (statesold & RCUTORTURE_RDR_IRQ)
1822		local_irq_enable();
1823	if (statesold & RCUTORTURE_RDR_PREEMPT)
1824		preempt_enable();
1825	if (statesold & RCUTORTURE_RDR_SCHED)
1826		rcu_read_unlock_sched();
1827	if (statesold & RCUTORTURE_RDR_BH)
1828		local_bh_enable();
 
 
1829	if (statesold & RCUTORTURE_RDR_RBH)
1830		rcu_read_unlock_bh();
1831	if (statesold & RCUTORTURE_RDR_RCU_2) {
1832		cur_ops->readunlock((idxold2 >> RCUTORTURE_RDR_SHIFT_2) & 0x1);
1833		WARN_ON_ONCE(idxnew2 != -1);
1834		idxold2 = 0;
1835	}
1836	if (statesold & RCUTORTURE_RDR_RCU_1) {
1837		bool lockit;
1838
1839		lockit = !cur_ops->no_pi_lock && !statesnew && !(torture_random(trsp) & 0xffff);
1840		if (lockit)
1841			raw_spin_lock_irqsave(&current->pi_lock, flags);
1842		cur_ops->readunlock((idxold1 >> RCUTORTURE_RDR_SHIFT_1) & 0x1);
1843		WARN_ON_ONCE(idxnew1 != -1);
1844		idxold1 = 0;
1845		if (lockit)
1846			raw_spin_unlock_irqrestore(&current->pi_lock, flags);
1847	}
1848
1849	/* Delay if neither beginning nor end and there was a change. */
1850	if ((statesnew || statesold) && *readstate && newstate)
1851		cur_ops->read_delay(trsp, rtrsp);
1852
1853	/* Update the reader state. */
1854	if (idxnew1 == -1)
1855		idxnew1 = idxold1 & RCUTORTURE_RDR_MASK_1;
1856	WARN_ON_ONCE(idxnew1 < 0);
1857	if (WARN_ON_ONCE((idxnew1 >> RCUTORTURE_RDR_SHIFT_1) > 1))
1858		pr_info("Unexpected idxnew1 value of %#x\n", idxnew1);
1859	if (idxnew2 == -1)
1860		idxnew2 = idxold2 & RCUTORTURE_RDR_MASK_2;
1861	WARN_ON_ONCE(idxnew2 < 0);
1862	WARN_ON_ONCE((idxnew2 >> RCUTORTURE_RDR_SHIFT_2) > 1);
1863	*readstate = idxnew1 | idxnew2 | newstate;
1864	WARN_ON_ONCE(*readstate < 0);
1865	if (WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT_2) > 1))
1866		pr_info("Unexpected idxnew2 value of %#x\n", idxnew2);
1867}
1868
1869/* Return the biggest extendables mask given current RCU and boot parameters. */
1870static int rcutorture_extend_mask_max(void)
1871{
1872	int mask;
1873
1874	WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND);
1875	mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables;
1876	mask = mask | RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2;
1877	return mask;
1878}
1879
1880/* Return a random protection state mask, but with at least one bit set. */
1881static int
1882rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp)
1883{
1884	int mask = rcutorture_extend_mask_max();
1885	unsigned long randmask1 = torture_random(trsp);
1886	unsigned long randmask2 = randmask1 >> 3;
1887	unsigned long preempts = RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED;
1888	unsigned long preempts_irq = preempts | RCUTORTURE_RDR_IRQ;
1889	unsigned long bhs = RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH;
1890
1891	WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT_1);
1892	/* Mostly only one bit (need preemption!), sometimes lots of bits. */
1893	if (!(randmask1 & 0x7))
1894		mask = mask & randmask2;
1895	else
1896		mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS));
1897
1898	// Can't have nested RCU reader without outer RCU reader.
1899	if (!(mask & RCUTORTURE_RDR_RCU_1) && (mask & RCUTORTURE_RDR_RCU_2)) {
1900		if (oldmask & RCUTORTURE_RDR_RCU_1)
1901			mask &= ~RCUTORTURE_RDR_RCU_2;
1902		else
1903			mask |= RCUTORTURE_RDR_RCU_1;
1904	}
1905
1906	/*
1907	 * Can't enable bh w/irq disabled.
1908	 */
1909	if (mask & RCUTORTURE_RDR_IRQ)
1910		mask |= oldmask & bhs;
1911
1912	/*
1913	 * Ideally these sequences would be detected in debug builds
1914	 * (regardless of RT), but until then don't stop testing
1915	 * them on non-RT.
1916	 */
1917	if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
1918		/* Can't modify BH in atomic context */
1919		if (oldmask & preempts_irq)
1920			mask &= ~bhs;
1921		if ((oldmask | mask) & preempts_irq)
1922			mask |= oldmask & bhs;
1923	}
1924
1925	return mask ?: RCUTORTURE_RDR_RCU_1;
1926}
1927
1928/*
1929 * Do a randomly selected number of extensions of an existing RCU read-side
1930 * critical section.
1931 */
1932static struct rt_read_seg *
1933rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp,
1934		       struct rt_read_seg *rtrsp)
1935{
1936	int i;
1937	int j;
1938	int mask = rcutorture_extend_mask_max();
1939
1940	WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */
1941	if (!((mask - 1) & mask))
1942		return rtrsp;  /* Current RCU reader not extendable. */
1943	/* Bias towards larger numbers of loops. */
1944	i = torture_random(trsp);
1945	i = ((i | (i >> 3)) & RCUTORTURE_RDR_MAX_LOOPS) + 1;
1946	for (j = 0; j < i; j++) {
1947		mask = rcutorture_extend_mask(*readstate, trsp);
1948		rcutorture_one_extend(readstate, mask, trsp, &rtrsp[j]);
1949	}
1950	return &rtrsp[j];
1951}
1952
1953/*
1954 * Do one read-side critical section, returning false if there was
1955 * no data to read.  Can be invoked both from process context and
1956 * from a timer handler.
1957 */
1958static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid)
1959{
1960	bool checkpolling = !(torture_random(trsp) & 0xfff);
1961	unsigned long cookie;
1962	struct rcu_gp_oldstate cookie_full;
1963	int i;
1964	unsigned long started;
1965	unsigned long completed;
1966	int newstate;
1967	struct rcu_torture *p;
1968	int pipe_count;
1969	int readstate = 0;
1970	struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS] = { { 0 } };
1971	struct rt_read_seg *rtrsp = &rtseg[0];
1972	struct rt_read_seg *rtrsp1;
1973	unsigned long long ts;
1974
1975	WARN_ON_ONCE(!rcu_is_watching());
1976	newstate = rcutorture_extend_mask(readstate, trsp);
1977	rcutorture_one_extend(&readstate, newstate, trsp, rtrsp++);
1978	if (checkpolling) {
1979		if (cur_ops->get_gp_state && cur_ops->poll_gp_state)
1980			cookie = cur_ops->get_gp_state();
1981		if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full)
1982			cur_ops->get_gp_state_full(&cookie_full);
1983	}
1984	started = cur_ops->get_gp_seq();
1985	ts = rcu_trace_clock_local();
1986	p = rcu_dereference_check(rcu_torture_current,
1987				  !cur_ops->readlock_held || cur_ops->readlock_held());
 
 
 
 
1988	if (p == NULL) {
1989		/* Wait for rcu_torture_writer to get underway */
1990		rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
1991		return false;
1992	}
1993	if (p->rtort_mbtest == 0)
1994		atomic_inc(&n_rcu_torture_mberror);
1995	rcu_torture_reader_do_mbchk(myid, p, trsp);
1996	rtrsp = rcutorture_loop_extend(&readstate, trsp, rtrsp);
1997	preempt_disable();
1998	pipe_count = READ_ONCE(p->rtort_pipe_count);
1999	if (pipe_count > RCU_TORTURE_PIPE_LEN) {
2000		/* Should not happen, but... */
2001		pipe_count = RCU_TORTURE_PIPE_LEN;
2002	}
2003	completed = cur_ops->get_gp_seq();
2004	if (pipe_count > 1) {
2005		do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu,
2006					  ts, started, completed);
2007		rcu_ftrace_dump(DUMP_ALL);
2008	}
2009	__this_cpu_inc(rcu_torture_count[pipe_count]);
2010	completed = rcutorture_seq_diff(completed, started);
2011	if (completed > RCU_TORTURE_PIPE_LEN) {
2012		/* Should not happen, but... */
2013		completed = RCU_TORTURE_PIPE_LEN;
2014	}
2015	__this_cpu_inc(rcu_torture_batch[completed]);
2016	preempt_enable();
2017	if (checkpolling) {
2018		if (cur_ops->get_gp_state && cur_ops->poll_gp_state)
2019			WARN_ONCE(cur_ops->poll_gp_state(cookie),
2020				  "%s: Cookie check 2 failed %s(%d) %lu->%lu\n",
2021				  __func__,
2022				  rcu_torture_writer_state_getname(),
2023				  rcu_torture_writer_state,
2024				  cookie, cur_ops->get_gp_state());
2025		if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full)
2026			WARN_ONCE(cur_ops->poll_gp_state_full(&cookie_full),
2027				  "%s: Cookie check 6 failed %s(%d) online %*pbl\n",
2028				  __func__,
2029				  rcu_torture_writer_state_getname(),
2030				  rcu_torture_writer_state,
2031				  cpumask_pr_args(cpu_online_mask));
2032	}
2033	rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
2034	WARN_ON_ONCE(readstate);
2035	// This next splat is expected behavior if leakpointer, especially
2036	// for CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels.
2037	WARN_ON_ONCE(leakpointer && READ_ONCE(p->rtort_pipe_count) > 1);
2038
2039	/* If error or close call, record the sequence of reader protections. */
2040	if ((pipe_count > 1 || completed > 1) && !xchg(&err_segs_recorded, 1)) {
2041		i = 0;
2042		for (rtrsp1 = &rtseg[0]; rtrsp1 < rtrsp; rtrsp1++)
2043			err_segs[i++] = *rtrsp1;
2044		rt_read_nsegs = i;
2045	}
2046
2047	return true;
2048}
2049
2050static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand);
2051
2052/*
2053 * RCU torture reader from timer handler.  Dereferences rcu_torture_current,
2054 * incrementing the corresponding element of the pipeline array.  The
2055 * counter in the element should never be greater than 1, otherwise, the
2056 * RCU implementation is broken.
2057 */
2058static void rcu_torture_timer(struct timer_list *unused)
2059{
2060	atomic_long_inc(&n_rcu_torture_timers);
2061	(void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand), -1);
2062
2063	/* Test call_rcu() invocation from interrupt handler. */
2064	if (cur_ops->call) {
2065		struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_NOWAIT);
2066
2067		if (rhp)
2068			cur_ops->call(rhp, rcu_torture_timer_cb);
2069	}
2070}
2071
2072/*
2073 * RCU torture reader kthread.  Repeatedly dereferences rcu_torture_current,
2074 * incrementing the corresponding element of the pipeline array.  The
2075 * counter in the element should never be greater than 1, otherwise, the
2076 * RCU implementation is broken.
2077 */
2078static int
2079rcu_torture_reader(void *arg)
2080{
2081	unsigned long lastsleep = jiffies;
2082	long myid = (long)arg;
2083	int mynumonline = myid;
2084	DEFINE_TORTURE_RANDOM(rand);
2085	struct timer_list t;
2086
2087	VERBOSE_TOROUT_STRING("rcu_torture_reader task started");
2088	set_user_nice(current, MAX_NICE);
2089	if (irqreader && cur_ops->irq_capable)
2090		timer_setup_on_stack(&t, rcu_torture_timer, 0);
2091	tick_dep_set_task(current, TICK_DEP_BIT_RCU);
2092	do {
2093		if (irqreader && cur_ops->irq_capable) {
2094			if (!timer_pending(&t))
2095				mod_timer(&t, jiffies + 1);
2096		}
2097		if (!rcu_torture_one_read(&rand, myid) && !torture_must_stop())
2098			schedule_timeout_interruptible(HZ);
2099		if (time_after(jiffies, lastsleep) && !torture_must_stop()) {
2100			torture_hrtimeout_us(500, 1000, &rand);
2101			lastsleep = jiffies + 10;
2102		}
2103		while (torture_num_online_cpus() < mynumonline && !torture_must_stop())
2104			schedule_timeout_interruptible(HZ / 5);
2105		stutter_wait("rcu_torture_reader");
2106	} while (!torture_must_stop());
2107	if (irqreader && cur_ops->irq_capable) {
2108		del_timer_sync(&t);
2109		destroy_timer_on_stack(&t);
2110	}
2111	tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
2112	torture_kthread_stopping("rcu_torture_reader");
2113	return 0;
2114}
2115
2116/*
2117 * Randomly Toggle CPUs' callback-offload state.  This uses hrtimers to
2118 * increase race probabilities and fuzzes the interval between toggling.
2119 */
2120static int rcu_nocb_toggle(void *arg)
2121{
2122	int cpu;
2123	int maxcpu = -1;
2124	int oldnice = task_nice(current);
2125	long r;
2126	DEFINE_TORTURE_RANDOM(rand);
2127	ktime_t toggle_delay;
2128	unsigned long toggle_fuzz;
2129	ktime_t toggle_interval = ms_to_ktime(nocbs_toggle);
2130
2131	VERBOSE_TOROUT_STRING("rcu_nocb_toggle task started");
2132	while (!rcu_inkernel_boot_has_ended())
2133		schedule_timeout_interruptible(HZ / 10);
2134	for_each_possible_cpu(cpu)
2135		maxcpu = cpu;
2136	WARN_ON(maxcpu < 0);
2137	if (toggle_interval > ULONG_MAX)
2138		toggle_fuzz = ULONG_MAX >> 3;
2139	else
2140		toggle_fuzz = toggle_interval >> 3;
2141	if (toggle_fuzz <= 0)
2142		toggle_fuzz = NSEC_PER_USEC;
2143	do {
2144		r = torture_random(&rand);
2145		cpu = (r >> 1) % (maxcpu + 1);
2146		if (r & 0x1) {
2147			rcu_nocb_cpu_offload(cpu);
2148			atomic_long_inc(&n_nocb_offload);
2149		} else {
2150			rcu_nocb_cpu_deoffload(cpu);
2151			atomic_long_inc(&n_nocb_deoffload);
2152		}
2153		toggle_delay = torture_random(&rand) % toggle_fuzz + toggle_interval;
2154		set_current_state(TASK_INTERRUPTIBLE);
2155		schedule_hrtimeout(&toggle_delay, HRTIMER_MODE_REL);
2156		if (stutter_wait("rcu_nocb_toggle"))
2157			sched_set_normal(current, oldnice);
2158	} while (!torture_must_stop());
2159	torture_kthread_stopping("rcu_nocb_toggle");
2160	return 0;
2161}
2162
2163/*
2164 * Print torture statistics.  Caller must ensure that there is only
2165 * one call to this function at a given time!!!  This is normally
2166 * accomplished by relying on the module system to only have one copy
2167 * of the module loaded, and then by giving the rcu_torture_stats
2168 * kthread full control (or the init/cleanup functions when rcu_torture_stats
2169 * thread is not running).
2170 */
2171static void
2172rcu_torture_stats_print(void)
2173{
2174	int cpu;
2175	int i;
2176	long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
2177	long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
2178	struct rcu_torture *rtcp;
2179	static unsigned long rtcv_snap = ULONG_MAX;
2180	static bool splatted;
2181	struct task_struct *wtp;
2182
2183	for_each_possible_cpu(cpu) {
2184		for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
2185			pipesummary[i] += READ_ONCE(per_cpu(rcu_torture_count, cpu)[i]);
2186			batchsummary[i] += READ_ONCE(per_cpu(rcu_torture_batch, cpu)[i]);
2187		}
2188	}
2189	for (i = RCU_TORTURE_PIPE_LEN; i >= 0; i--) {
2190		if (pipesummary[i] != 0)
2191			break;
2192	}
2193
2194	pr_alert("%s%s ", torture_type, TORTURE_FLAG);
2195	rtcp = rcu_access_pointer(rcu_torture_current);
2196	pr_cont("rtc: %p %s: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
2197		rtcp,
2198		rtcp && !rcu_stall_is_suppressed_at_boot() ? "ver" : "VER",
2199		rcu_torture_current_version,
2200		list_empty(&rcu_torture_freelist),
2201		atomic_read(&n_rcu_torture_alloc),
2202		atomic_read(&n_rcu_torture_alloc_fail),
2203		atomic_read(&n_rcu_torture_free));
2204	pr_cont("rtmbe: %d rtmbkf: %d/%d rtbe: %ld rtbke: %ld ",
2205		atomic_read(&n_rcu_torture_mberror),
2206		atomic_read(&n_rcu_torture_mbchk_fail), atomic_read(&n_rcu_torture_mbchk_tries),
2207		n_rcu_torture_barrier_error,
2208		n_rcu_torture_boost_ktrerror);
 
2209	pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
2210		n_rcu_torture_boost_failure,
2211		n_rcu_torture_boosts,
2212		atomic_long_read(&n_rcu_torture_timers));
2213	torture_onoff_stats();
2214	pr_cont("barrier: %ld/%ld:%ld ",
2215		data_race(n_barrier_successes),
2216		data_race(n_barrier_attempts),
2217		data_race(n_rcu_torture_barrier_error));
2218	pr_cont("read-exits: %ld ", data_race(n_read_exits)); // Statistic.
2219	pr_cont("nocb-toggles: %ld:%ld\n",
2220		atomic_long_read(&n_nocb_offload), atomic_long_read(&n_nocb_deoffload));
2221
2222	pr_alert("%s%s ", torture_type, TORTURE_FLAG);
2223	if (atomic_read(&n_rcu_torture_mberror) ||
2224	    atomic_read(&n_rcu_torture_mbchk_fail) ||
2225	    n_rcu_torture_barrier_error || n_rcu_torture_boost_ktrerror ||
2226	    n_rcu_torture_boost_failure || i > 1) {
 
2227		pr_cont("%s", "!!! ");
2228		atomic_inc(&n_rcu_torture_error);
2229		WARN_ON_ONCE(atomic_read(&n_rcu_torture_mberror));
2230		WARN_ON_ONCE(atomic_read(&n_rcu_torture_mbchk_fail));
2231		WARN_ON_ONCE(n_rcu_torture_barrier_error);  // rcu_barrier()
2232		WARN_ON_ONCE(n_rcu_torture_boost_ktrerror); // no boost kthread
2233		WARN_ON_ONCE(n_rcu_torture_boost_failure); // boost failed (TIMER_SOFTIRQ RT prio?)
 
2234		WARN_ON_ONCE(i > 1); // Too-short grace period
2235	}
2236	pr_cont("Reader Pipe: ");
2237	for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
2238		pr_cont(" %ld", pipesummary[i]);
2239	pr_cont("\n");
2240
2241	pr_alert("%s%s ", torture_type, TORTURE_FLAG);
2242	pr_cont("Reader Batch: ");
2243	for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
2244		pr_cont(" %ld", batchsummary[i]);
2245	pr_cont("\n");
2246
2247	pr_alert("%s%s ", torture_type, TORTURE_FLAG);
2248	pr_cont("Free-Block Circulation: ");
2249	for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
2250		pr_cont(" %d", atomic_read(&rcu_torture_wcount[i]));
2251	}
2252	pr_cont("\n");
2253
2254	if (cur_ops->stats)
2255		cur_ops->stats();
2256	if (rtcv_snap == rcu_torture_current_version &&
2257	    rcu_access_pointer(rcu_torture_current) &&
2258	    !rcu_stall_is_suppressed()) {
2259		int __maybe_unused flags = 0;
2260		unsigned long __maybe_unused gp_seq = 0;
2261
2262		rcutorture_get_gp_data(cur_ops->ttype,
2263				       &flags, &gp_seq);
2264		srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp,
2265					&flags, &gp_seq);
2266		wtp = READ_ONCE(writer_task);
2267		pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#x cpu %d\n",
2268			 rcu_torture_writer_state_getname(),
2269			 rcu_torture_writer_state, gp_seq, flags,
2270			 wtp == NULL ? ~0U : wtp->__state,
2271			 wtp == NULL ? -1 : (int)task_cpu(wtp));
2272		if (!splatted && wtp) {
2273			sched_show_task(wtp);
2274			splatted = true;
2275		}
2276		if (cur_ops->gp_kthread_dbg)
2277			cur_ops->gp_kthread_dbg();
2278		rcu_ftrace_dump(DUMP_ALL);
2279	}
2280	rtcv_snap = rcu_torture_current_version;
2281}
2282
2283/*
2284 * Periodically prints torture statistics, if periodic statistics printing
2285 * was specified via the stat_interval module parameter.
2286 */
2287static int
2288rcu_torture_stats(void *arg)
2289{
2290	VERBOSE_TOROUT_STRING("rcu_torture_stats task started");
2291	do {
2292		schedule_timeout_interruptible(stat_interval * HZ);
2293		rcu_torture_stats_print();
2294		torture_shutdown_absorb("rcu_torture_stats");
2295	} while (!torture_must_stop());
2296	torture_kthread_stopping("rcu_torture_stats");
2297	return 0;
2298}
2299
2300/* Test mem_dump_obj() and friends.  */
2301static void rcu_torture_mem_dump_obj(void)
2302{
2303	struct rcu_head *rhp;
2304	struct kmem_cache *kcp;
2305	static int z;
2306
2307	kcp = kmem_cache_create("rcuscale", 136, 8, SLAB_STORE_USER, NULL);
2308	if (WARN_ON_ONCE(!kcp))
2309		return;
2310	rhp = kmem_cache_alloc(kcp, GFP_KERNEL);
2311	if (WARN_ON_ONCE(!rhp)) {
2312		kmem_cache_destroy(kcp);
2313		return;
2314	}
2315	pr_alert("mem_dump_obj() slab test: rcu_torture_stats = %px, &rhp = %px, rhp = %px, &z = %px\n", stats_task, &rhp, rhp, &z);
2316	pr_alert("mem_dump_obj(ZERO_SIZE_PTR):");
2317	mem_dump_obj(ZERO_SIZE_PTR);
2318	pr_alert("mem_dump_obj(NULL):");
2319	mem_dump_obj(NULL);
2320	pr_alert("mem_dump_obj(%px):", &rhp);
2321	mem_dump_obj(&rhp);
2322	pr_alert("mem_dump_obj(%px):", rhp);
2323	mem_dump_obj(rhp);
2324	pr_alert("mem_dump_obj(%px):", &rhp->func);
2325	mem_dump_obj(&rhp->func);
2326	pr_alert("mem_dump_obj(%px):", &z);
2327	mem_dump_obj(&z);
2328	kmem_cache_free(kcp, rhp);
2329	kmem_cache_destroy(kcp);
2330	rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
2331	if (WARN_ON_ONCE(!rhp))
2332		return;
2333	pr_alert("mem_dump_obj() kmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp);
2334	pr_alert("mem_dump_obj(kmalloc %px):", rhp);
2335	mem_dump_obj(rhp);
2336	pr_alert("mem_dump_obj(kmalloc %px):", &rhp->func);
2337	mem_dump_obj(&rhp->func);
2338	kfree(rhp);
2339	rhp = vmalloc(4096);
2340	if (WARN_ON_ONCE(!rhp))
2341		return;
2342	pr_alert("mem_dump_obj() vmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp);
2343	pr_alert("mem_dump_obj(vmalloc %px):", rhp);
2344	mem_dump_obj(rhp);
2345	pr_alert("mem_dump_obj(vmalloc %px):", &rhp->func);
2346	mem_dump_obj(&rhp->func);
2347	vfree(rhp);
2348}
2349
2350static void
2351rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
2352{
2353	pr_alert("%s" TORTURE_FLAG
2354		 "--- %s: nreaders=%d nfakewriters=%d "
2355		 "stat_interval=%d verbose=%d test_no_idle_hz=%d "
2356		 "shuffle_interval=%d stutter=%d irqreader=%d "
2357		 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d "
2358		 "test_boost=%d/%d test_boost_interval=%d "
2359		 "test_boost_duration=%d shutdown_secs=%d "
2360		 "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d "
2361		 "stall_cpu_block=%d "
2362		 "n_barrier_cbs=%d "
2363		 "onoff_interval=%d onoff_holdoff=%d "
2364		 "read_exit_delay=%d read_exit_burst=%d "
2365		 "nocbs_nthreads=%d nocbs_toggle=%d "
2366		 "test_nmis=%d\n",
2367		 torture_type, tag, nrealreaders, nfakewriters,
2368		 stat_interval, verbose, test_no_idle_hz, shuffle_interval,
2369		 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter,
2370		 test_boost, cur_ops->can_boost,
2371		 test_boost_interval, test_boost_duration, shutdown_secs,
2372		 stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff,
2373		 stall_cpu_block,
2374		 n_barrier_cbs,
2375		 onoff_interval, onoff_holdoff,
2376		 read_exit_delay, read_exit_burst,
2377		 nocbs_nthreads, nocbs_toggle,
2378		 test_nmis);
2379}
2380
2381static int rcutorture_booster_cleanup(unsigned int cpu)
2382{
2383	struct task_struct *t;
2384
2385	if (boost_tasks[cpu] == NULL)
2386		return 0;
2387	mutex_lock(&boost_mutex);
2388	t = boost_tasks[cpu];
2389	boost_tasks[cpu] = NULL;
2390	rcu_torture_enable_rt_throttle();
2391	mutex_unlock(&boost_mutex);
2392
2393	/* This must be outside of the mutex, otherwise deadlock! */
2394	torture_stop_kthread(rcu_torture_boost, t);
2395	return 0;
2396}
2397
2398static int rcutorture_booster_init(unsigned int cpu)
2399{
2400	int retval;
2401
2402	if (boost_tasks[cpu] != NULL)
2403		return 0;  /* Already created, nothing more to do. */
2404
2405	// Testing RCU priority boosting requires rcutorture do
2406	// some serious abuse.  Counter this by running ksoftirqd
2407	// at higher priority.
2408	if (IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)) {
2409		struct sched_param sp;
2410		struct task_struct *t;
2411
2412		t = per_cpu(ksoftirqd, cpu);
2413		WARN_ON_ONCE(!t);
2414		sp.sched_priority = 2;
2415		sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
2416	}
2417
2418	/* Don't allow time recalculation while creating a new task. */
2419	mutex_lock(&boost_mutex);
2420	rcu_torture_disable_rt_throttle();
2421	VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task");
2422	boost_tasks[cpu] = kthread_run_on_cpu(rcu_torture_boost, NULL,
2423					      cpu, "rcu_torture_boost_%u");
 
2424	if (IS_ERR(boost_tasks[cpu])) {
2425		retval = PTR_ERR(boost_tasks[cpu]);
2426		VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed");
2427		n_rcu_torture_boost_ktrerror++;
2428		boost_tasks[cpu] = NULL;
2429		mutex_unlock(&boost_mutex);
2430		return retval;
2431	}
 
 
2432	mutex_unlock(&boost_mutex);
2433	return 0;
2434}
2435
2436static int rcu_torture_stall_nf(struct notifier_block *nb, unsigned long v, void *ptr)
2437{
2438	pr_info("%s: v=%lu, duration=%lu.\n", __func__, v, (unsigned long)ptr);
2439	return NOTIFY_OK;
2440}
2441
2442static struct notifier_block rcu_torture_stall_block = {
2443	.notifier_call = rcu_torture_stall_nf,
2444};
2445
2446/*
2447 * CPU-stall kthread.  It waits as specified by stall_cpu_holdoff, then
2448 * induces a CPU stall for the time specified by stall_cpu.  If a new
2449 * stall test is added, stallsdone in rcu_torture_writer() must be adjusted.
2450 */
2451static int rcu_torture_stall(void *args)
2452{
2453	int idx;
2454	int ret;
2455	unsigned long stop_at;
2456
2457	VERBOSE_TOROUT_STRING("rcu_torture_stall task started");
2458	if (rcu_cpu_stall_notifiers) {
2459		ret = rcu_stall_chain_notifier_register(&rcu_torture_stall_block);
2460		if (ret)
2461			pr_info("%s: rcu_stall_chain_notifier_register() returned %d, %sexpected.\n",
2462				__func__, ret, !IS_ENABLED(CONFIG_RCU_STALL_COMMON) ? "un" : "");
2463	}
2464	if (stall_cpu_holdoff > 0) {
2465		VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff");
2466		schedule_timeout_interruptible(stall_cpu_holdoff * HZ);
2467		VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff");
2468	}
2469	if (!kthread_should_stop() && stall_gp_kthread > 0) {
2470		VERBOSE_TOROUT_STRING("rcu_torture_stall begin GP stall");
2471		rcu_gp_set_torture_wait(stall_gp_kthread * HZ);
2472		for (idx = 0; idx < stall_gp_kthread + 2; idx++) {
2473			if (kthread_should_stop())
2474				break;
2475			schedule_timeout_uninterruptible(HZ);
2476		}
2477	}
2478	if (!kthread_should_stop() && stall_cpu > 0) {
2479		VERBOSE_TOROUT_STRING("rcu_torture_stall begin CPU stall");
2480		stop_at = ktime_get_seconds() + stall_cpu;
2481		/* RCU CPU stall is expected behavior in following code. */
2482		idx = cur_ops->readlock();
2483		if (stall_cpu_irqsoff)
2484			local_irq_disable();
2485		else if (!stall_cpu_block)
2486			preempt_disable();
2487		pr_alert("%s start on CPU %d.\n",
2488			  __func__, raw_smp_processor_id());
2489		while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(),
2490				    stop_at))
2491			if (stall_cpu_block) {
2492#ifdef CONFIG_PREEMPTION
2493				preempt_schedule();
2494#else
2495				schedule_timeout_uninterruptible(HZ);
2496#endif
2497			} else if (stall_no_softlockup) {
2498				touch_softlockup_watchdog();
2499			}
2500		if (stall_cpu_irqsoff)
2501			local_irq_enable();
2502		else if (!stall_cpu_block)
2503			preempt_enable();
2504		cur_ops->readunlock(idx);
2505	}
2506	pr_alert("%s end.\n", __func__);
2507	if (rcu_cpu_stall_notifiers && !ret) {
2508		ret = rcu_stall_chain_notifier_unregister(&rcu_torture_stall_block);
2509		if (ret)
2510			pr_info("%s: rcu_stall_chain_notifier_unregister() returned %d.\n", __func__, ret);
2511	}
2512	torture_shutdown_absorb("rcu_torture_stall");
2513	while (!kthread_should_stop())
2514		schedule_timeout_interruptible(10 * HZ);
2515	return 0;
2516}
2517
2518/* Spawn CPU-stall kthread, if stall_cpu specified. */
2519static int __init rcu_torture_stall_init(void)
2520{
2521	if (stall_cpu <= 0 && stall_gp_kthread <= 0)
2522		return 0;
2523	return torture_create_kthread(rcu_torture_stall, NULL, stall_task);
2524}
2525
2526/* State structure for forward-progress self-propagating RCU callback. */
2527struct fwd_cb_state {
2528	struct rcu_head rh;
2529	int stop;
2530};
2531
2532/*
2533 * Forward-progress self-propagating RCU callback function.  Because
2534 * callbacks run from softirq, this function is an implicit RCU read-side
2535 * critical section.
2536 */
2537static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp)
2538{
2539	struct fwd_cb_state *fcsp = container_of(rhp, struct fwd_cb_state, rh);
2540
2541	if (READ_ONCE(fcsp->stop)) {
2542		WRITE_ONCE(fcsp->stop, 2);
2543		return;
2544	}
2545	cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb);
2546}
2547
2548/* State for continuous-flood RCU callbacks. */
2549struct rcu_fwd_cb {
2550	struct rcu_head rh;
2551	struct rcu_fwd_cb *rfc_next;
2552	struct rcu_fwd *rfc_rfp;
2553	int rfc_gps;
2554};
2555
2556#define MAX_FWD_CB_JIFFIES	(8 * HZ) /* Maximum CB test duration. */
2557#define MIN_FWD_CB_LAUNDERS	3	/* This many CB invocations to count. */
2558#define MIN_FWD_CBS_LAUNDERED	100	/* Number of counted CBs. */
2559#define FWD_CBS_HIST_DIV	10	/* Histogram buckets/second. */
2560#define N_LAUNDERS_HIST (2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV))
2561
2562struct rcu_launder_hist {
2563	long n_launders;
2564	unsigned long launder_gp_seq;
2565};
2566
2567struct rcu_fwd {
2568	spinlock_t rcu_fwd_lock;
2569	struct rcu_fwd_cb *rcu_fwd_cb_head;
2570	struct rcu_fwd_cb **rcu_fwd_cb_tail;
2571	long n_launders_cb;
2572	unsigned long rcu_fwd_startat;
2573	struct rcu_launder_hist n_launders_hist[N_LAUNDERS_HIST];
2574	unsigned long rcu_launder_gp_seq_start;
2575	int rcu_fwd_id;
2576};
2577
2578static DEFINE_MUTEX(rcu_fwd_mutex);
2579static struct rcu_fwd *rcu_fwds;
2580static unsigned long rcu_fwd_seq;
2581static atomic_long_t rcu_fwd_max_cbs;
2582static bool rcu_fwd_emergency_stop;
2583
2584static void rcu_torture_fwd_cb_hist(struct rcu_fwd *rfp)
2585{
2586	unsigned long gps;
2587	unsigned long gps_old;
2588	int i;
2589	int j;
2590
2591	for (i = ARRAY_SIZE(rfp->n_launders_hist) - 1; i > 0; i--)
2592		if (rfp->n_launders_hist[i].n_launders > 0)
2593			break;
2594	pr_alert("%s: Callback-invocation histogram %d (duration %lu jiffies):",
2595		 __func__, rfp->rcu_fwd_id, jiffies - rfp->rcu_fwd_startat);
2596	gps_old = rfp->rcu_launder_gp_seq_start;
2597	for (j = 0; j <= i; j++) {
2598		gps = rfp->n_launders_hist[j].launder_gp_seq;
2599		pr_cont(" %ds/%d: %ld:%ld",
2600			j + 1, FWD_CBS_HIST_DIV,
2601			rfp->n_launders_hist[j].n_launders,
2602			rcutorture_seq_diff(gps, gps_old));
2603		gps_old = gps;
2604	}
2605	pr_cont("\n");
2606}
2607
2608/* Callback function for continuous-flood RCU callbacks. */
2609static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp)
2610{
2611	unsigned long flags;
2612	int i;
2613	struct rcu_fwd_cb *rfcp = container_of(rhp, struct rcu_fwd_cb, rh);
2614	struct rcu_fwd_cb **rfcpp;
2615	struct rcu_fwd *rfp = rfcp->rfc_rfp;
2616
2617	rfcp->rfc_next = NULL;
2618	rfcp->rfc_gps++;
2619	spin_lock_irqsave(&rfp->rcu_fwd_lock, flags);
2620	rfcpp = rfp->rcu_fwd_cb_tail;
2621	rfp->rcu_fwd_cb_tail = &rfcp->rfc_next;
2622	WRITE_ONCE(*rfcpp, rfcp);
2623	WRITE_ONCE(rfp->n_launders_cb, rfp->n_launders_cb + 1);
2624	i = ((jiffies - rfp->rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV));
2625	if (i >= ARRAY_SIZE(rfp->n_launders_hist))
2626		i = ARRAY_SIZE(rfp->n_launders_hist) - 1;
2627	rfp->n_launders_hist[i].n_launders++;
2628	rfp->n_launders_hist[i].launder_gp_seq = cur_ops->get_gp_seq();
2629	spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
2630}
2631
2632// Give the scheduler a chance, even on nohz_full CPUs.
2633static void rcu_torture_fwd_prog_cond_resched(unsigned long iter)
2634{
2635	if (IS_ENABLED(CONFIG_PREEMPTION) && IS_ENABLED(CONFIG_NO_HZ_FULL)) {
2636		// Real call_rcu() floods hit userspace, so emulate that.
2637		if (need_resched() || (iter & 0xfff))
2638			schedule();
2639		return;
2640	}
2641	// No userspace emulation: CB invocation throttles call_rcu()
2642	cond_resched();
2643}
2644
2645/*
2646 * Free all callbacks on the rcu_fwd_cb_head list, either because the
2647 * test is over or because we hit an OOM event.
2648 */
2649static unsigned long rcu_torture_fwd_prog_cbfree(struct rcu_fwd *rfp)
2650{
2651	unsigned long flags;
2652	unsigned long freed = 0;
2653	struct rcu_fwd_cb *rfcp;
2654
2655	for (;;) {
2656		spin_lock_irqsave(&rfp->rcu_fwd_lock, flags);
2657		rfcp = rfp->rcu_fwd_cb_head;
2658		if (!rfcp) {
2659			spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
2660			break;
2661		}
2662		rfp->rcu_fwd_cb_head = rfcp->rfc_next;
2663		if (!rfp->rcu_fwd_cb_head)
2664			rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head;
2665		spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
2666		kfree(rfcp);
2667		freed++;
2668		rcu_torture_fwd_prog_cond_resched(freed);
2669		if (tick_nohz_full_enabled()) {
2670			local_irq_save(flags);
2671			rcu_momentary_dyntick_idle();
2672			local_irq_restore(flags);
2673		}
2674	}
2675	return freed;
2676}
2677
2678/* Carry out need_resched()/cond_resched() forward-progress testing. */
2679static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp,
2680				    int *tested, int *tested_tries)
2681{
2682	unsigned long cver;
2683	unsigned long dur;
2684	struct fwd_cb_state fcs;
2685	unsigned long gps;
2686	int idx;
2687	int sd;
2688	int sd4;
2689	bool selfpropcb = false;
2690	unsigned long stopat;
2691	static DEFINE_TORTURE_RANDOM(trs);
2692
2693	pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id);
2694	if (!cur_ops->sync)
2695		return; // Cannot do need_resched() forward progress testing without ->sync.
2696	if (cur_ops->call && cur_ops->cb_barrier) {
2697		init_rcu_head_on_stack(&fcs.rh);
2698		selfpropcb = true;
2699	}
2700
2701	/* Tight loop containing cond_resched(). */
2702	atomic_inc(&rcu_fwd_cb_nodelay);
2703	cur_ops->sync(); /* Later readers see above write. */
2704	if  (selfpropcb) {
2705		WRITE_ONCE(fcs.stop, 0);
2706		cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb);
2707	}
2708	cver = READ_ONCE(rcu_torture_current_version);
2709	gps = cur_ops->get_gp_seq();
2710	sd = cur_ops->stall_dur() + 1;
2711	sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div;
2712	dur = sd4 + torture_random(&trs) % (sd - sd4);
2713	WRITE_ONCE(rfp->rcu_fwd_startat, jiffies);
2714	stopat = rfp->rcu_fwd_startat + dur;
2715	while (time_before(jiffies, stopat) &&
2716	       !shutdown_time_arrived() &&
2717	       !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
2718		idx = cur_ops->readlock();
2719		udelay(10);
2720		cur_ops->readunlock(idx);
2721		if (!fwd_progress_need_resched || need_resched())
2722			cond_resched();
2723	}
2724	(*tested_tries)++;
2725	if (!time_before(jiffies, stopat) &&
2726	    !shutdown_time_arrived() &&
2727	    !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
2728		(*tested)++;
2729		cver = READ_ONCE(rcu_torture_current_version) - cver;
2730		gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
2731		WARN_ON(!cver && gps < 2);
2732		pr_alert("%s: %d Duration %ld cver %ld gps %ld\n", __func__,
2733			 rfp->rcu_fwd_id, dur, cver, gps);
2734	}
2735	if (selfpropcb) {
2736		WRITE_ONCE(fcs.stop, 1);
2737		cur_ops->sync(); /* Wait for running CB to complete. */
2738		pr_alert("%s: Waiting for CBs: %pS() %d\n", __func__, cur_ops->cb_barrier, rfp->rcu_fwd_id);
2739		cur_ops->cb_barrier(); /* Wait for queued callbacks. */
2740	}
2741
2742	if (selfpropcb) {
2743		WARN_ON(READ_ONCE(fcs.stop) != 2);
2744		destroy_rcu_head_on_stack(&fcs.rh);
2745	}
2746	schedule_timeout_uninterruptible(HZ / 10); /* Let kthreads recover. */
2747	atomic_dec(&rcu_fwd_cb_nodelay);
2748}
2749
2750/* Carry out call_rcu() forward-progress testing. */
2751static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp)
2752{
2753	unsigned long cver;
2754	unsigned long flags;
2755	unsigned long gps;
2756	int i;
2757	long n_launders;
2758	long n_launders_cb_snap;
2759	long n_launders_sa;
2760	long n_max_cbs;
2761	long n_max_gps;
2762	struct rcu_fwd_cb *rfcp;
2763	struct rcu_fwd_cb *rfcpn;
2764	unsigned long stopat;
2765	unsigned long stoppedat;
2766
2767	pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id);
2768	if (READ_ONCE(rcu_fwd_emergency_stop))
2769		return; /* Get out of the way quickly, no GP wait! */
2770	if (!cur_ops->call)
2771		return; /* Can't do call_rcu() fwd prog without ->call. */
2772
2773	/* Loop continuously posting RCU callbacks. */
2774	atomic_inc(&rcu_fwd_cb_nodelay);
2775	cur_ops->sync(); /* Later readers see above write. */
2776	WRITE_ONCE(rfp->rcu_fwd_startat, jiffies);
2777	stopat = rfp->rcu_fwd_startat + MAX_FWD_CB_JIFFIES;
2778	n_launders = 0;
2779	rfp->n_launders_cb = 0; // Hoist initialization for multi-kthread
2780	n_launders_sa = 0;
2781	n_max_cbs = 0;
2782	n_max_gps = 0;
2783	for (i = 0; i < ARRAY_SIZE(rfp->n_launders_hist); i++)
2784		rfp->n_launders_hist[i].n_launders = 0;
2785	cver = READ_ONCE(rcu_torture_current_version);
2786	gps = cur_ops->get_gp_seq();
2787	rfp->rcu_launder_gp_seq_start = gps;
2788	tick_dep_set_task(current, TICK_DEP_BIT_RCU);
2789	while (time_before(jiffies, stopat) &&
2790	       !shutdown_time_arrived() &&
2791	       !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
2792		rfcp = READ_ONCE(rfp->rcu_fwd_cb_head);
2793		rfcpn = NULL;
2794		if (rfcp)
2795			rfcpn = READ_ONCE(rfcp->rfc_next);
2796		if (rfcpn) {
2797			if (rfcp->rfc_gps >= MIN_FWD_CB_LAUNDERS &&
2798			    ++n_max_gps >= MIN_FWD_CBS_LAUNDERED)
2799				break;
2800			rfp->rcu_fwd_cb_head = rfcpn;
2801			n_launders++;
2802			n_launders_sa++;
2803		} else if (!cur_ops->cbflood_max || cur_ops->cbflood_max > n_max_cbs) {
2804			rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL);
2805			if (WARN_ON_ONCE(!rfcp)) {
2806				schedule_timeout_interruptible(1);
2807				continue;
2808			}
2809			n_max_cbs++;
2810			n_launders_sa = 0;
2811			rfcp->rfc_gps = 0;
2812			rfcp->rfc_rfp = rfp;
2813		} else {
2814			rfcp = NULL;
2815		}
2816		if (rfcp)
2817			cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr);
2818		rcu_torture_fwd_prog_cond_resched(n_launders + n_max_cbs);
2819		if (tick_nohz_full_enabled()) {
2820			local_irq_save(flags);
2821			rcu_momentary_dyntick_idle();
2822			local_irq_restore(flags);
2823		}
2824	}
2825	stoppedat = jiffies;
2826	n_launders_cb_snap = READ_ONCE(rfp->n_launders_cb);
2827	cver = READ_ONCE(rcu_torture_current_version) - cver;
2828	gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
2829	pr_alert("%s: Waiting for CBs: %pS() %d\n", __func__, cur_ops->cb_barrier, rfp->rcu_fwd_id);
2830	cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */
2831	(void)rcu_torture_fwd_prog_cbfree(rfp);
2832
2833	if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop) &&
2834	    !shutdown_time_arrived()) {
2835		WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED);
2836		pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld\n",
2837			 __func__,
2838			 stoppedat - rfp->rcu_fwd_startat, jiffies - stoppedat,
2839			 n_launders + n_max_cbs - n_launders_cb_snap,
2840			 n_launders, n_launders_sa,
2841			 n_max_gps, n_max_cbs, cver, gps);
2842		atomic_long_add(n_max_cbs, &rcu_fwd_max_cbs);
2843		mutex_lock(&rcu_fwd_mutex); // Serialize histograms.
2844		rcu_torture_fwd_cb_hist(rfp);
2845		mutex_unlock(&rcu_fwd_mutex);
2846	}
2847	schedule_timeout_uninterruptible(HZ); /* Let CBs drain. */
2848	tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
2849	atomic_dec(&rcu_fwd_cb_nodelay);
2850}
2851
2852
2853/*
2854 * OOM notifier, but this only prints diagnostic information for the
2855 * current forward-progress test.
2856 */
2857static int rcutorture_oom_notify(struct notifier_block *self,
2858				 unsigned long notused, void *nfreed)
2859{
2860	int i;
2861	long ncbs;
2862	struct rcu_fwd *rfp;
2863
2864	mutex_lock(&rcu_fwd_mutex);
2865	rfp = rcu_fwds;
2866	if (!rfp) {
2867		mutex_unlock(&rcu_fwd_mutex);
2868		return NOTIFY_OK;
2869	}
2870	WARN(1, "%s invoked upon OOM during forward-progress testing.\n",
2871	     __func__);
2872	for (i = 0; i < fwd_progress; i++) {
2873		rcu_torture_fwd_cb_hist(&rfp[i]);
2874		rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rfp[i].rcu_fwd_startat)) / 2);
2875	}
2876	WRITE_ONCE(rcu_fwd_emergency_stop, true);
2877	smp_mb(); /* Emergency stop before free and wait to avoid hangs. */
2878	ncbs = 0;
2879	for (i = 0; i < fwd_progress; i++)
2880		ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]);
2881	pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs);
2882	cur_ops->cb_barrier();
2883	ncbs = 0;
2884	for (i = 0; i < fwd_progress; i++)
2885		ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]);
2886	pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs);
2887	cur_ops->cb_barrier();
2888	ncbs = 0;
2889	for (i = 0; i < fwd_progress; i++)
2890		ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]);
2891	pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs);
2892	smp_mb(); /* Frees before return to avoid redoing OOM. */
2893	(*(unsigned long *)nfreed)++; /* Forward progress CBs freed! */
2894	pr_info("%s returning after OOM processing.\n", __func__);
2895	mutex_unlock(&rcu_fwd_mutex);
2896	return NOTIFY_OK;
2897}
2898
2899static struct notifier_block rcutorture_oom_nb = {
2900	.notifier_call = rcutorture_oom_notify
2901};
2902
2903/* Carry out grace-period forward-progress testing. */
2904static int rcu_torture_fwd_prog(void *args)
2905{
2906	bool firsttime = true;
2907	long max_cbs;
2908	int oldnice = task_nice(current);
2909	unsigned long oldseq = READ_ONCE(rcu_fwd_seq);
2910	struct rcu_fwd *rfp = args;
2911	int tested = 0;
2912	int tested_tries = 0;
2913
2914	VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started");
2915	rcu_bind_current_to_nocb();
2916	if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST))
2917		set_user_nice(current, MAX_NICE);
2918	do {
2919		if (!rfp->rcu_fwd_id) {
2920			schedule_timeout_interruptible(fwd_progress_holdoff * HZ);
2921			WRITE_ONCE(rcu_fwd_emergency_stop, false);
2922			if (!firsttime) {
2923				max_cbs = atomic_long_xchg(&rcu_fwd_max_cbs, 0);
2924				pr_alert("%s n_max_cbs: %ld\n", __func__, max_cbs);
2925			}
2926			firsttime = false;
2927			WRITE_ONCE(rcu_fwd_seq, rcu_fwd_seq + 1);
2928		} else {
2929			while (READ_ONCE(rcu_fwd_seq) == oldseq && !torture_must_stop())
2930				schedule_timeout_interruptible(HZ / 20);
2931			oldseq = READ_ONCE(rcu_fwd_seq);
2932		}
2933		pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id);
2934		if (rcu_inkernel_boot_has_ended() && torture_num_online_cpus() > rfp->rcu_fwd_id)
2935			rcu_torture_fwd_prog_cr(rfp);
2936		if ((cur_ops->stall_dur && cur_ops->stall_dur() > 0) &&
2937		    (!IS_ENABLED(CONFIG_TINY_RCU) ||
2938		     (rcu_inkernel_boot_has_ended() &&
2939		      torture_num_online_cpus() > rfp->rcu_fwd_id)))
2940			rcu_torture_fwd_prog_nr(rfp, &tested, &tested_tries);
 
 
 
2941
2942		/* Avoid slow periods, better to test when busy. */
2943		if (stutter_wait("rcu_torture_fwd_prog"))
2944			sched_set_normal(current, oldnice);
2945	} while (!torture_must_stop());
2946	/* Short runs might not contain a valid forward-progress attempt. */
2947	if (!rfp->rcu_fwd_id) {
2948		WARN_ON(!tested && tested_tries >= 5);
2949		pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries);
2950	}
2951	torture_kthread_stopping("rcu_torture_fwd_prog");
2952	return 0;
2953}
2954
2955/* If forward-progress checking is requested and feasible, spawn the thread. */
2956static int __init rcu_torture_fwd_prog_init(void)
2957{
2958	int i;
2959	int ret = 0;
2960	struct rcu_fwd *rfp;
2961
2962	if (!fwd_progress)
2963		return 0; /* Not requested, so don't do it. */
2964	if (fwd_progress >= nr_cpu_ids) {
2965		VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Limiting fwd_progress to # CPUs.\n");
2966		fwd_progress = nr_cpu_ids;
2967	} else if (fwd_progress < 0) {
2968		fwd_progress = nr_cpu_ids;
2969	}
2970	if ((!cur_ops->sync && !cur_ops->call) ||
2971	    (!cur_ops->cbflood_max && (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0)) ||
2972	    cur_ops == &rcu_busted_ops) {
2973		VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test");
2974		fwd_progress = 0;
2975		return 0;
2976	}
2977	if (stall_cpu > 0) {
2978		VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall testing");
2979		fwd_progress = 0;
2980		if (IS_MODULE(CONFIG_RCU_TORTURE_TEST))
2981			return -EINVAL; /* In module, can fail back to user. */
2982		WARN_ON(1); /* Make sure rcutorture notices conflict. */
2983		return 0;
2984	}
2985	if (fwd_progress_holdoff <= 0)
2986		fwd_progress_holdoff = 1;
2987	if (fwd_progress_div <= 0)
2988		fwd_progress_div = 4;
2989	rfp = kcalloc(fwd_progress, sizeof(*rfp), GFP_KERNEL);
2990	fwd_prog_tasks = kcalloc(fwd_progress, sizeof(*fwd_prog_tasks), GFP_KERNEL);
2991	if (!rfp || !fwd_prog_tasks) {
2992		kfree(rfp);
2993		kfree(fwd_prog_tasks);
2994		fwd_prog_tasks = NULL;
2995		fwd_progress = 0;
2996		return -ENOMEM;
2997	}
2998	for (i = 0; i < fwd_progress; i++) {
2999		spin_lock_init(&rfp[i].rcu_fwd_lock);
3000		rfp[i].rcu_fwd_cb_tail = &rfp[i].rcu_fwd_cb_head;
3001		rfp[i].rcu_fwd_id = i;
3002	}
3003	mutex_lock(&rcu_fwd_mutex);
3004	rcu_fwds = rfp;
3005	mutex_unlock(&rcu_fwd_mutex);
3006	register_oom_notifier(&rcutorture_oom_nb);
3007	for (i = 0; i < fwd_progress; i++) {
3008		ret = torture_create_kthread(rcu_torture_fwd_prog, &rcu_fwds[i], fwd_prog_tasks[i]);
3009		if (ret) {
3010			fwd_progress = i;
3011			return ret;
3012		}
3013	}
3014	return 0;
3015}
3016
3017static void rcu_torture_fwd_prog_cleanup(void)
3018{
3019	int i;
3020	struct rcu_fwd *rfp;
3021
3022	if (!rcu_fwds || !fwd_prog_tasks)
3023		return;
3024	for (i = 0; i < fwd_progress; i++)
3025		torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_tasks[i]);
3026	unregister_oom_notifier(&rcutorture_oom_nb);
3027	mutex_lock(&rcu_fwd_mutex);
3028	rfp = rcu_fwds;
3029	rcu_fwds = NULL;
3030	mutex_unlock(&rcu_fwd_mutex);
3031	kfree(rfp);
3032	kfree(fwd_prog_tasks);
3033	fwd_prog_tasks = NULL;
3034}
3035
3036/* Callback function for RCU barrier testing. */
3037static void rcu_torture_barrier_cbf(struct rcu_head *rcu)
3038{
3039	atomic_inc(&barrier_cbs_invoked);
3040}
3041
3042/* IPI handler to get callback posted on desired CPU, if online. */
3043static void rcu_torture_barrier1cb(void *rcu_void)
3044{
3045	struct rcu_head *rhp = rcu_void;
3046
3047	cur_ops->call(rhp, rcu_torture_barrier_cbf);
3048}
3049
3050/* kthread function to register callbacks used to test RCU barriers. */
3051static int rcu_torture_barrier_cbs(void *arg)
3052{
3053	long myid = (long)arg;
3054	bool lastphase = false;
3055	bool newphase;
3056	struct rcu_head rcu;
3057
3058	init_rcu_head_on_stack(&rcu);
3059	VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started");
3060	set_user_nice(current, MAX_NICE);
3061	do {
3062		wait_event(barrier_cbs_wq[myid],
3063			   (newphase =
3064			    smp_load_acquire(&barrier_phase)) != lastphase ||
3065			   torture_must_stop());
3066		lastphase = newphase;
3067		if (torture_must_stop())
3068			break;
3069		/*
3070		 * The above smp_load_acquire() ensures barrier_phase load
3071		 * is ordered before the following ->call().
3072		 */
3073		if (smp_call_function_single(myid, rcu_torture_barrier1cb,
3074					     &rcu, 1)) {
3075			// IPI failed, so use direct call from current CPU.
3076			cur_ops->call(&rcu, rcu_torture_barrier_cbf);
3077		}
3078		if (atomic_dec_and_test(&barrier_cbs_count))
3079			wake_up(&barrier_wq);
3080	} while (!torture_must_stop());
3081	if (cur_ops->cb_barrier != NULL)
3082		cur_ops->cb_barrier();
3083	destroy_rcu_head_on_stack(&rcu);
3084	torture_kthread_stopping("rcu_torture_barrier_cbs");
3085	return 0;
3086}
3087
3088/* kthread function to drive and coordinate RCU barrier testing. */
3089static int rcu_torture_barrier(void *arg)
3090{
3091	int i;
3092
3093	VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting");
3094	do {
3095		atomic_set(&barrier_cbs_invoked, 0);
3096		atomic_set(&barrier_cbs_count, n_barrier_cbs);
3097		/* Ensure barrier_phase ordered after prior assignments. */
3098		smp_store_release(&barrier_phase, !barrier_phase);
3099		for (i = 0; i < n_barrier_cbs; i++)
3100			wake_up(&barrier_cbs_wq[i]);
3101		wait_event(barrier_wq,
3102			   atomic_read(&barrier_cbs_count) == 0 ||
3103			   torture_must_stop());
3104		if (torture_must_stop())
3105			break;
3106		n_barrier_attempts++;
3107		cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */
3108		if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) {
3109			n_rcu_torture_barrier_error++;
3110			pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n",
3111			       atomic_read(&barrier_cbs_invoked),
3112			       n_barrier_cbs);
3113			WARN_ON(1);
3114			// Wait manually for the remaining callbacks
3115			i = 0;
3116			do {
3117				if (WARN_ON(i++ > HZ))
3118					i = INT_MIN;
3119				schedule_timeout_interruptible(1);
3120				cur_ops->cb_barrier();
3121			} while (atomic_read(&barrier_cbs_invoked) !=
3122				 n_barrier_cbs &&
3123				 !torture_must_stop());
3124			smp_mb(); // Can't trust ordering if broken.
3125			if (!torture_must_stop())
3126				pr_err("Recovered: barrier_cbs_invoked = %d\n",
3127				       atomic_read(&barrier_cbs_invoked));
3128		} else {
3129			n_barrier_successes++;
3130		}
3131		schedule_timeout_interruptible(HZ / 10);
3132	} while (!torture_must_stop());
3133	torture_kthread_stopping("rcu_torture_barrier");
3134	return 0;
3135}
3136
3137/* Initialize RCU barrier testing. */
3138static int rcu_torture_barrier_init(void)
3139{
3140	int i;
3141	int ret;
3142
3143	if (n_barrier_cbs <= 0)
3144		return 0;
3145	if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) {
3146		pr_alert("%s" TORTURE_FLAG
3147			 " Call or barrier ops missing for %s,\n",
3148			 torture_type, cur_ops->name);
3149		pr_alert("%s" TORTURE_FLAG
3150			 " RCU barrier testing omitted from run.\n",
3151			 torture_type);
3152		return 0;
3153	}
3154	atomic_set(&barrier_cbs_count, 0);
3155	atomic_set(&barrier_cbs_invoked, 0);
3156	barrier_cbs_tasks =
3157		kcalloc(n_barrier_cbs, sizeof(barrier_cbs_tasks[0]),
3158			GFP_KERNEL);
3159	barrier_cbs_wq =
3160		kcalloc(n_barrier_cbs, sizeof(barrier_cbs_wq[0]), GFP_KERNEL);
3161	if (barrier_cbs_tasks == NULL || !barrier_cbs_wq)
3162		return -ENOMEM;
3163	for (i = 0; i < n_barrier_cbs; i++) {
3164		init_waitqueue_head(&barrier_cbs_wq[i]);
3165		ret = torture_create_kthread(rcu_torture_barrier_cbs,
3166					     (void *)(long)i,
3167					     barrier_cbs_tasks[i]);
3168		if (ret)
3169			return ret;
3170	}
3171	return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task);
3172}
3173
3174/* Clean up after RCU barrier testing. */
3175static void rcu_torture_barrier_cleanup(void)
3176{
3177	int i;
3178
3179	torture_stop_kthread(rcu_torture_barrier, barrier_task);
3180	if (barrier_cbs_tasks != NULL) {
3181		for (i = 0; i < n_barrier_cbs; i++)
3182			torture_stop_kthread(rcu_torture_barrier_cbs,
3183					     barrier_cbs_tasks[i]);
3184		kfree(barrier_cbs_tasks);
3185		barrier_cbs_tasks = NULL;
3186	}
3187	if (barrier_cbs_wq != NULL) {
3188		kfree(barrier_cbs_wq);
3189		barrier_cbs_wq = NULL;
3190	}
3191}
3192
3193static bool rcu_torture_can_boost(void)
3194{
3195	static int boost_warn_once;
3196	int prio;
3197
3198	if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2)
3199		return false;
3200	if (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state)
3201		return false;
3202
3203	prio = rcu_get_gp_kthreads_prio();
3204	if (!prio)
3205		return false;
3206
3207	if (prio < 2) {
3208		if (boost_warn_once == 1)
3209			return false;
3210
3211		pr_alert("%s: WARN: RCU kthread priority too low to test boosting.  Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME);
3212		boost_warn_once = 1;
3213		return false;
3214	}
3215
3216	return true;
3217}
3218
3219static bool read_exit_child_stop;
3220static bool read_exit_child_stopped;
3221static wait_queue_head_t read_exit_wq;
3222
3223// Child kthread which just does an rcutorture reader and exits.
3224static int rcu_torture_read_exit_child(void *trsp_in)
3225{
3226	struct torture_random_state *trsp = trsp_in;
3227
3228	set_user_nice(current, MAX_NICE);
3229	// Minimize time between reading and exiting.
3230	while (!kthread_should_stop())
3231		schedule_timeout_uninterruptible(HZ / 20);
3232	(void)rcu_torture_one_read(trsp, -1);
3233	return 0;
3234}
3235
3236// Parent kthread which creates and destroys read-exit child kthreads.
3237static int rcu_torture_read_exit(void *unused)
3238{
 
3239	bool errexit = false;
3240	int i;
3241	struct task_struct *tsp;
3242	DEFINE_TORTURE_RANDOM(trs);
3243
3244	// Allocate and initialize.
3245	set_user_nice(current, MAX_NICE);
3246	VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of test");
3247
3248	// Each pass through this loop does one read-exit episode.
3249	do {
3250		VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of episode");
3251		for (i = 0; i < read_exit_burst; i++) {
3252			if (READ_ONCE(read_exit_child_stop))
3253				break;
3254			stutter_wait("rcu_torture_read_exit");
3255			// Spawn child.
3256			tsp = kthread_run(rcu_torture_read_exit_child,
3257					  &trs, "%s", "rcu_torture_read_exit_child");
3258			if (IS_ERR(tsp)) {
3259				TOROUT_ERRSTRING("out of memory");
3260				errexit = true;
3261				break;
3262			}
3263			cond_resched();
3264			kthread_stop(tsp);
3265			n_read_exits++;
3266		}
3267		VERBOSE_TOROUT_STRING("rcu_torture_read_exit: End of episode");
3268		rcu_barrier(); // Wait for task_struct free, avoid OOM.
3269		i = 0;
3270		for (; !errexit && !READ_ONCE(read_exit_child_stop) && i < read_exit_delay; i++)
3271			schedule_timeout_uninterruptible(HZ);
 
 
 
 
 
 
 
 
 
 
 
3272	} while (!errexit && !READ_ONCE(read_exit_child_stop));
3273
3274	// Clean up and exit.
3275	smp_store_release(&read_exit_child_stopped, true); // After reaping.
3276	smp_mb(); // Store before wakeup.
3277	wake_up(&read_exit_wq);
3278	while (!torture_must_stop())
3279		schedule_timeout_uninterruptible(HZ / 20);
3280	torture_kthread_stopping("rcu_torture_read_exit");
3281	return 0;
3282}
3283
3284static int rcu_torture_read_exit_init(void)
3285{
3286	if (read_exit_burst <= 0)
3287		return 0;
3288	init_waitqueue_head(&read_exit_wq);
3289	read_exit_child_stop = false;
3290	read_exit_child_stopped = false;
3291	return torture_create_kthread(rcu_torture_read_exit, NULL,
3292				      read_exit_task);
3293}
3294
3295static void rcu_torture_read_exit_cleanup(void)
3296{
3297	if (!read_exit_task)
3298		return;
3299	WRITE_ONCE(read_exit_child_stop, true);
3300	smp_mb(); // Above write before wait.
3301	wait_event(read_exit_wq, smp_load_acquire(&read_exit_child_stopped));
3302	torture_stop_kthread(rcutorture_read_exit, read_exit_task);
3303}
3304
3305static void rcutorture_test_nmis(int n)
3306{
3307#if IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)
3308	int cpu;
3309	int dumpcpu;
3310	int i;
3311
3312	for (i = 0; i < n; i++) {
3313		preempt_disable();
3314		cpu = smp_processor_id();
3315		dumpcpu = cpu + 1;
3316		if (dumpcpu >= nr_cpu_ids)
3317			dumpcpu = 0;
3318		pr_alert("%s: CPU %d invoking dump_cpu_task(%d)\n", __func__, cpu, dumpcpu);
3319		dump_cpu_task(dumpcpu);
3320		preempt_enable();
3321		schedule_timeout_uninterruptible(15 * HZ);
3322	}
3323#else // #if IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)
3324	WARN_ONCE(n, "Non-zero rcutorture.test_nmis=%d permitted only when rcutorture is built in.\n", test_nmis);
3325#endif // #else // #if IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)
3326}
3327
3328static enum cpuhp_state rcutor_hp;
3329
3330static void
3331rcu_torture_cleanup(void)
3332{
3333	int firsttime;
3334	int flags = 0;
3335	unsigned long gp_seq = 0;
3336	int i;
3337
3338	if (torture_cleanup_begin()) {
3339		if (cur_ops->cb_barrier != NULL) {
3340			pr_info("%s: Invoking %pS().\n", __func__, cur_ops->cb_barrier);
3341			cur_ops->cb_barrier();
3342		}
3343		rcu_gp_slow_unregister(NULL);
3344		return;
3345	}
3346	if (!cur_ops) {
3347		torture_cleanup_end();
3348		rcu_gp_slow_unregister(NULL);
3349		return;
3350	}
3351
3352	rcutorture_test_nmis(test_nmis);
3353
3354	if (cur_ops->gp_kthread_dbg)
3355		cur_ops->gp_kthread_dbg();
3356	rcu_torture_read_exit_cleanup();
3357	rcu_torture_barrier_cleanup();
3358	rcu_torture_fwd_prog_cleanup();
3359	torture_stop_kthread(rcu_torture_stall, stall_task);
3360	torture_stop_kthread(rcu_torture_writer, writer_task);
3361
3362	if (nocb_tasks) {
3363		for (i = 0; i < nrealnocbers; i++)
3364			torture_stop_kthread(rcu_nocb_toggle, nocb_tasks[i]);
3365		kfree(nocb_tasks);
3366		nocb_tasks = NULL;
3367	}
3368
3369	if (reader_tasks) {
3370		for (i = 0; i < nrealreaders; i++)
3371			torture_stop_kthread(rcu_torture_reader,
3372					     reader_tasks[i]);
3373		kfree(reader_tasks);
3374		reader_tasks = NULL;
3375	}
3376	kfree(rcu_torture_reader_mbchk);
3377	rcu_torture_reader_mbchk = NULL;
3378
3379	if (fakewriter_tasks) {
3380		for (i = 0; i < nfakewriters; i++)
3381			torture_stop_kthread(rcu_torture_fakewriter,
3382					     fakewriter_tasks[i]);
 
3383		kfree(fakewriter_tasks);
3384		fakewriter_tasks = NULL;
3385	}
3386
3387	rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq);
3388	srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq);
3389	pr_alert("%s:  End-test grace-period state: g%ld f%#x total-gps=%ld\n",
3390		 cur_ops->name, (long)gp_seq, flags,
3391		 rcutorture_seq_diff(gp_seq, start_gp_seq));
3392	torture_stop_kthread(rcu_torture_stats, stats_task);
3393	torture_stop_kthread(rcu_torture_fqs, fqs_task);
3394	if (rcu_torture_can_boost() && rcutor_hp >= 0)
3395		cpuhp_remove_state(rcutor_hp);
3396
3397	/*
3398	 * Wait for all RCU callbacks to fire, then do torture-type-specific
3399	 * cleanup operations.
3400	 */
3401	if (cur_ops->cb_barrier != NULL) {
3402		pr_info("%s: Invoking %pS().\n", __func__, cur_ops->cb_barrier);
3403		cur_ops->cb_barrier();
3404	}
3405	if (cur_ops->cleanup != NULL)
3406		cur_ops->cleanup();
3407
3408	rcu_torture_mem_dump_obj();
3409
3410	rcu_torture_stats_print();  /* -After- the stats thread is stopped! */
3411
3412	if (err_segs_recorded) {
3413		pr_alert("Failure/close-call rcutorture reader segments:\n");
3414		if (rt_read_nsegs == 0)
3415			pr_alert("\t: No segments recorded!!!\n");
3416		firsttime = 1;
3417		for (i = 0; i < rt_read_nsegs; i++) {
3418			pr_alert("\t%d: %#x ", i, err_segs[i].rt_readstate);
3419			if (err_segs[i].rt_delay_jiffies != 0) {
3420				pr_cont("%s%ldjiffies", firsttime ? "" : "+",
3421					err_segs[i].rt_delay_jiffies);
3422				firsttime = 0;
3423			}
3424			if (err_segs[i].rt_delay_ms != 0) {
3425				pr_cont("%s%ldms", firsttime ? "" : "+",
3426					err_segs[i].rt_delay_ms);
3427				firsttime = 0;
3428			}
3429			if (err_segs[i].rt_delay_us != 0) {
3430				pr_cont("%s%ldus", firsttime ? "" : "+",
3431					err_segs[i].rt_delay_us);
3432				firsttime = 0;
3433			}
3434			pr_cont("%s\n",
3435				err_segs[i].rt_preempted ? "preempted" : "");
3436
3437		}
3438	}
3439	if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
3440		rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
3441	else if (torture_onoff_failures())
3442		rcu_torture_print_module_parms(cur_ops,
3443					       "End of test: RCU_HOTPLUG");
3444	else
3445		rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
3446	torture_cleanup_end();
3447	rcu_gp_slow_unregister(&rcu_fwd_cb_nodelay);
3448}
3449
3450#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
3451static void rcu_torture_leak_cb(struct rcu_head *rhp)
3452{
3453}
3454
3455static void rcu_torture_err_cb(struct rcu_head *rhp)
3456{
3457	/*
3458	 * This -might- happen due to race conditions, but is unlikely.
3459	 * The scenario that leads to this happening is that the
3460	 * first of the pair of duplicate callbacks is queued,
3461	 * someone else starts a grace period that includes that
3462	 * callback, then the second of the pair must wait for the
3463	 * next grace period.  Unlikely, but can happen.  If it
3464	 * does happen, the debug-objects subsystem won't have splatted.
3465	 */
3466	pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME);
3467}
3468#endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
3469
3470/*
3471 * Verify that double-free causes debug-objects to complain, but only
3472 * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y.  Otherwise, say that the test
3473 * cannot be carried out.
3474 */
3475static void rcu_test_debug_objects(void)
3476{
3477#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
3478	struct rcu_head rh1;
3479	struct rcu_head rh2;
3480	struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
3481
3482	init_rcu_head_on_stack(&rh1);
3483	init_rcu_head_on_stack(&rh2);
3484	pr_alert("%s: WARN: Duplicate call_rcu() test starting.\n", KBUILD_MODNAME);
3485
3486	/* Try to queue the rh2 pair of callbacks for the same grace period. */
3487	preempt_disable(); /* Prevent preemption from interrupting test. */
3488	rcu_read_lock(); /* Make it impossible to finish a grace period. */
3489	call_rcu_hurry(&rh1, rcu_torture_leak_cb); /* Start grace period. */
3490	local_irq_disable(); /* Make it harder to start a new grace period. */
3491	call_rcu_hurry(&rh2, rcu_torture_leak_cb);
3492	call_rcu_hurry(&rh2, rcu_torture_err_cb); /* Duplicate callback. */
3493	if (rhp) {
3494		call_rcu_hurry(rhp, rcu_torture_leak_cb);
3495		call_rcu_hurry(rhp, rcu_torture_err_cb); /* Another duplicate callback. */
3496	}
3497	local_irq_enable();
3498	rcu_read_unlock();
3499	preempt_enable();
3500
3501	/* Wait for them all to get done so we can safely return. */
3502	rcu_barrier();
3503	pr_alert("%s: WARN: Duplicate call_rcu() test complete.\n", KBUILD_MODNAME);
3504	destroy_rcu_head_on_stack(&rh1);
3505	destroy_rcu_head_on_stack(&rh2);
3506	kfree(rhp);
3507#else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
3508	pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n", KBUILD_MODNAME);
3509#endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
3510}
3511
3512static void rcutorture_sync(void)
3513{
3514	static unsigned long n;
3515
3516	if (cur_ops->sync && !(++n & 0xfff))
3517		cur_ops->sync();
3518}
3519
3520static DEFINE_MUTEX(mut0);
3521static DEFINE_MUTEX(mut1);
3522static DEFINE_MUTEX(mut2);
3523static DEFINE_MUTEX(mut3);
3524static DEFINE_MUTEX(mut4);
3525static DEFINE_MUTEX(mut5);
3526static DEFINE_MUTEX(mut6);
3527static DEFINE_MUTEX(mut7);
3528static DEFINE_MUTEX(mut8);
3529static DEFINE_MUTEX(mut9);
3530
3531static DECLARE_RWSEM(rwsem0);
3532static DECLARE_RWSEM(rwsem1);
3533static DECLARE_RWSEM(rwsem2);
3534static DECLARE_RWSEM(rwsem3);
3535static DECLARE_RWSEM(rwsem4);
3536static DECLARE_RWSEM(rwsem5);
3537static DECLARE_RWSEM(rwsem6);
3538static DECLARE_RWSEM(rwsem7);
3539static DECLARE_RWSEM(rwsem8);
3540static DECLARE_RWSEM(rwsem9);
3541
3542DEFINE_STATIC_SRCU(srcu0);
3543DEFINE_STATIC_SRCU(srcu1);
3544DEFINE_STATIC_SRCU(srcu2);
3545DEFINE_STATIC_SRCU(srcu3);
3546DEFINE_STATIC_SRCU(srcu4);
3547DEFINE_STATIC_SRCU(srcu5);
3548DEFINE_STATIC_SRCU(srcu6);
3549DEFINE_STATIC_SRCU(srcu7);
3550DEFINE_STATIC_SRCU(srcu8);
3551DEFINE_STATIC_SRCU(srcu9);
3552
3553static int srcu_lockdep_next(const char *f, const char *fl, const char *fs, const char *fu, int i,
3554			     int cyclelen, int deadlock)
3555{
3556	int j = i + 1;
3557
3558	if (j >= cyclelen)
3559		j = deadlock ? 0 : -1;
3560	if (j >= 0)
3561		pr_info("%s: %s(%d), %s(%d), %s(%d)\n", f, fl, i, fs, j, fu, i);
3562	else
3563		pr_info("%s: %s(%d), %s(%d)\n", f, fl, i, fu, i);
3564	return j;
3565}
3566
3567// Test lockdep on SRCU-based deadlock scenarios.
3568static void rcu_torture_init_srcu_lockdep(void)
3569{
3570	int cyclelen;
3571	int deadlock;
3572	bool err = false;
3573	int i;
3574	int j;
3575	int idx;
3576	struct mutex *muts[] = { &mut0, &mut1, &mut2, &mut3, &mut4,
3577				 &mut5, &mut6, &mut7, &mut8, &mut9 };
3578	struct rw_semaphore *rwsems[] = { &rwsem0, &rwsem1, &rwsem2, &rwsem3, &rwsem4,
3579					  &rwsem5, &rwsem6, &rwsem7, &rwsem8, &rwsem9 };
3580	struct srcu_struct *srcus[] = { &srcu0, &srcu1, &srcu2, &srcu3, &srcu4,
3581					&srcu5, &srcu6, &srcu7, &srcu8, &srcu9 };
3582	int testtype;
3583
3584	if (!test_srcu_lockdep)
3585		return;
3586
3587	deadlock = test_srcu_lockdep / 1000;
3588	testtype = (test_srcu_lockdep / 10) % 100;
3589	cyclelen = test_srcu_lockdep % 10;
3590	WARN_ON_ONCE(ARRAY_SIZE(muts) != ARRAY_SIZE(srcus));
3591	if (WARN_ONCE(deadlock != !!deadlock,
3592		      "%s: test_srcu_lockdep=%d and deadlock digit %d must be zero or one.\n",
3593		      __func__, test_srcu_lockdep, deadlock))
3594		err = true;
3595	if (WARN_ONCE(cyclelen <= 0,
3596		      "%s: test_srcu_lockdep=%d and cycle-length digit %d must be greater than zero.\n",
3597		      __func__, test_srcu_lockdep, cyclelen))
3598		err = true;
3599	if (err)
3600		goto err_out;
3601
3602	if (testtype == 0) {
3603		pr_info("%s: test_srcu_lockdep = %05d: SRCU %d-way %sdeadlock.\n",
3604			__func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-");
3605		if (deadlock && cyclelen == 1)
3606			pr_info("%s: Expect hang.\n", __func__);
3607		for (i = 0; i < cyclelen; i++) {
3608			j = srcu_lockdep_next(__func__, "srcu_read_lock", "synchronize_srcu",
3609					      "srcu_read_unlock", i, cyclelen, deadlock);
3610			idx = srcu_read_lock(srcus[i]);
3611			if (j >= 0)
3612				synchronize_srcu(srcus[j]);
3613			srcu_read_unlock(srcus[i], idx);
3614		}
3615		return;
3616	}
3617
3618	if (testtype == 1) {
3619		pr_info("%s: test_srcu_lockdep = %05d: SRCU/mutex %d-way %sdeadlock.\n",
3620			__func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-");
3621		for (i = 0; i < cyclelen; i++) {
3622			pr_info("%s: srcu_read_lock(%d), mutex_lock(%d), mutex_unlock(%d), srcu_read_unlock(%d)\n",
3623				__func__, i, i, i, i);
3624			idx = srcu_read_lock(srcus[i]);
3625			mutex_lock(muts[i]);
3626			mutex_unlock(muts[i]);
3627			srcu_read_unlock(srcus[i], idx);
3628
3629			j = srcu_lockdep_next(__func__, "mutex_lock", "synchronize_srcu",
3630					      "mutex_unlock", i, cyclelen, deadlock);
3631			mutex_lock(muts[i]);
3632			if (j >= 0)
3633				synchronize_srcu(srcus[j]);
3634			mutex_unlock(muts[i]);
3635		}
3636		return;
3637	}
3638
3639	if (testtype == 2) {
3640		pr_info("%s: test_srcu_lockdep = %05d: SRCU/rwsem %d-way %sdeadlock.\n",
3641			__func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-");
3642		for (i = 0; i < cyclelen; i++) {
3643			pr_info("%s: srcu_read_lock(%d), down_read(%d), up_read(%d), srcu_read_unlock(%d)\n",
3644				__func__, i, i, i, i);
3645			idx = srcu_read_lock(srcus[i]);
3646			down_read(rwsems[i]);
3647			up_read(rwsems[i]);
3648			srcu_read_unlock(srcus[i], idx);
3649
3650			j = srcu_lockdep_next(__func__, "down_write", "synchronize_srcu",
3651					      "up_write", i, cyclelen, deadlock);
3652			down_write(rwsems[i]);
3653			if (j >= 0)
3654				synchronize_srcu(srcus[j]);
3655			up_write(rwsems[i]);
3656		}
3657		return;
3658	}
3659
3660#ifdef CONFIG_TASKS_TRACE_RCU
3661	if (testtype == 3) {
3662		pr_info("%s: test_srcu_lockdep = %05d: SRCU and Tasks Trace RCU %d-way %sdeadlock.\n",
3663			__func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-");
3664		if (deadlock && cyclelen == 1)
3665			pr_info("%s: Expect hang.\n", __func__);
3666		for (i = 0; i < cyclelen; i++) {
3667			char *fl = i == 0 ? "rcu_read_lock_trace" : "srcu_read_lock";
3668			char *fs = i == cyclelen - 1 ? "synchronize_rcu_tasks_trace"
3669						     : "synchronize_srcu";
3670			char *fu = i == 0 ? "rcu_read_unlock_trace" : "srcu_read_unlock";
3671
3672			j = srcu_lockdep_next(__func__, fl, fs, fu, i, cyclelen, deadlock);
3673			if (i == 0)
3674				rcu_read_lock_trace();
3675			else
3676				idx = srcu_read_lock(srcus[i]);
3677			if (j >= 0) {
3678				if (i == cyclelen - 1)
3679					synchronize_rcu_tasks_trace();
3680				else
3681					synchronize_srcu(srcus[j]);
3682			}
3683			if (i == 0)
3684				rcu_read_unlock_trace();
3685			else
3686				srcu_read_unlock(srcus[i], idx);
3687		}
3688		return;
3689	}
3690#endif // #ifdef CONFIG_TASKS_TRACE_RCU
3691
3692err_out:
3693	pr_info("%s: test_srcu_lockdep = %05d does nothing.\n", __func__, test_srcu_lockdep);
3694	pr_info("%s: test_srcu_lockdep = DNNL.\n", __func__);
3695	pr_info("%s: D: Deadlock if nonzero.\n", __func__);
3696	pr_info("%s: NN: Test number, 0=SRCU, 1=SRCU/mutex, 2=SRCU/rwsem, 3=SRCU/Tasks Trace RCU.\n", __func__);
3697	pr_info("%s: L: Cycle length.\n", __func__);
3698	if (!IS_ENABLED(CONFIG_TASKS_TRACE_RCU))
3699		pr_info("%s: NN=3 disallowed because kernel is built with CONFIG_TASKS_TRACE_RCU=n\n", __func__);
3700}
3701
3702static int __init
3703rcu_torture_init(void)
3704{
3705	long i;
3706	int cpu;
3707	int firsterr = 0;
3708	int flags = 0;
3709	unsigned long gp_seq = 0;
3710	static struct rcu_torture_ops *torture_ops[] = {
3711		&rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops, &busted_srcud_ops,
3712		TASKS_OPS TASKS_RUDE_OPS TASKS_TRACING_OPS
3713		&trivial_ops,
3714	};
3715
3716	if (!torture_init_begin(torture_type, verbose))
3717		return -EBUSY;
3718
3719	/* Process args and tell the world that the torturer is on the job. */
3720	for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
3721		cur_ops = torture_ops[i];
3722		if (strcmp(torture_type, cur_ops->name) == 0)
3723			break;
3724	}
3725	if (i == ARRAY_SIZE(torture_ops)) {
3726		pr_alert("rcu-torture: invalid torture type: \"%s\"\n",
3727			 torture_type);
3728		pr_alert("rcu-torture types:");
3729		for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
3730			pr_cont(" %s", torture_ops[i]->name);
3731		pr_cont("\n");
 
3732		firsterr = -EINVAL;
3733		cur_ops = NULL;
3734		goto unwind;
3735	}
3736	if (cur_ops->fqs == NULL && fqs_duration != 0) {
3737		pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n");
3738		fqs_duration = 0;
3739	}
3740	if (nocbs_nthreads != 0 && (cur_ops != &rcu_ops ||
3741				    !IS_ENABLED(CONFIG_RCU_NOCB_CPU))) {
3742		pr_alert("rcu-torture types: %s and CONFIG_RCU_NOCB_CPU=%d, nocb toggle disabled.\n",
3743			 cur_ops->name, IS_ENABLED(CONFIG_RCU_NOCB_CPU));
3744		nocbs_nthreads = 0;
3745	}
3746	if (cur_ops->init)
3747		cur_ops->init();
3748
3749	rcu_torture_init_srcu_lockdep();
3750
3751	if (nreaders >= 0) {
3752		nrealreaders = nreaders;
3753	} else {
3754		nrealreaders = num_online_cpus() - 2 - nreaders;
3755		if (nrealreaders <= 0)
3756			nrealreaders = 1;
3757	}
3758	rcu_torture_print_module_parms(cur_ops, "Start of test");
3759	rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq);
3760	srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq);
3761	start_gp_seq = gp_seq;
3762	pr_alert("%s:  Start-test grace-period state: g%ld f%#x\n",
3763		 cur_ops->name, (long)gp_seq, flags);
3764
3765	/* Set up the freelist. */
3766
3767	INIT_LIST_HEAD(&rcu_torture_freelist);
3768	for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) {
3769		rcu_tortures[i].rtort_mbtest = 0;
3770		list_add_tail(&rcu_tortures[i].rtort_free,
3771			      &rcu_torture_freelist);
3772	}
3773
3774	/* Initialize the statistics so that each run gets its own numbers. */
3775
3776	rcu_torture_current = NULL;
3777	rcu_torture_current_version = 0;
3778	atomic_set(&n_rcu_torture_alloc, 0);
3779	atomic_set(&n_rcu_torture_alloc_fail, 0);
3780	atomic_set(&n_rcu_torture_free, 0);
3781	atomic_set(&n_rcu_torture_mberror, 0);
3782	atomic_set(&n_rcu_torture_mbchk_fail, 0);
3783	atomic_set(&n_rcu_torture_mbchk_tries, 0);
3784	atomic_set(&n_rcu_torture_error, 0);
3785	n_rcu_torture_barrier_error = 0;
3786	n_rcu_torture_boost_ktrerror = 0;
 
3787	n_rcu_torture_boost_failure = 0;
3788	n_rcu_torture_boosts = 0;
3789	for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
3790		atomic_set(&rcu_torture_wcount[i], 0);
3791	for_each_possible_cpu(cpu) {
3792		for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
3793			per_cpu(rcu_torture_count, cpu)[i] = 0;
3794			per_cpu(rcu_torture_batch, cpu)[i] = 0;
3795		}
3796	}
3797	err_segs_recorded = 0;
3798	rt_read_nsegs = 0;
3799
3800	/* Start up the kthreads. */
3801
3802	rcu_torture_write_types();
3803	firsterr = torture_create_kthread(rcu_torture_writer, NULL,
3804					  writer_task);
3805	if (torture_init_error(firsterr))
3806		goto unwind;
3807	if (nfakewriters > 0) {
3808		fakewriter_tasks = kcalloc(nfakewriters,
3809					   sizeof(fakewriter_tasks[0]),
3810					   GFP_KERNEL);
3811		if (fakewriter_tasks == NULL) {
3812			TOROUT_ERRSTRING("out of memory");
3813			firsterr = -ENOMEM;
3814			goto unwind;
3815		}
3816	}
3817	for (i = 0; i < nfakewriters; i++) {
3818		firsterr = torture_create_kthread(rcu_torture_fakewriter,
3819						  NULL, fakewriter_tasks[i]);
3820		if (torture_init_error(firsterr))
3821			goto unwind;
3822	}
3823	reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]),
3824			       GFP_KERNEL);
3825	rcu_torture_reader_mbchk = kcalloc(nrealreaders, sizeof(*rcu_torture_reader_mbchk),
3826					   GFP_KERNEL);
3827	if (!reader_tasks || !rcu_torture_reader_mbchk) {
3828		TOROUT_ERRSTRING("out of memory");
3829		firsterr = -ENOMEM;
3830		goto unwind;
3831	}
3832	for (i = 0; i < nrealreaders; i++) {
3833		rcu_torture_reader_mbchk[i].rtc_chkrdr = -1;
3834		firsterr = torture_create_kthread(rcu_torture_reader, (void *)i,
3835						  reader_tasks[i]);
3836		if (torture_init_error(firsterr))
3837			goto unwind;
3838	}
3839	nrealnocbers = nocbs_nthreads;
3840	if (WARN_ON(nrealnocbers < 0))
3841		nrealnocbers = 1;
3842	if (WARN_ON(nocbs_toggle < 0))
3843		nocbs_toggle = HZ;
3844	if (nrealnocbers > 0) {
3845		nocb_tasks = kcalloc(nrealnocbers, sizeof(nocb_tasks[0]), GFP_KERNEL);
3846		if (nocb_tasks == NULL) {
3847			TOROUT_ERRSTRING("out of memory");
3848			firsterr = -ENOMEM;
3849			goto unwind;
3850		}
3851	} else {
3852		nocb_tasks = NULL;
3853	}
3854	for (i = 0; i < nrealnocbers; i++) {
3855		firsterr = torture_create_kthread(rcu_nocb_toggle, NULL, nocb_tasks[i]);
3856		if (torture_init_error(firsterr))
3857			goto unwind;
3858	}
3859	if (stat_interval > 0) {
3860		firsterr = torture_create_kthread(rcu_torture_stats, NULL,
3861						  stats_task);
3862		if (torture_init_error(firsterr))
3863			goto unwind;
3864	}
3865	if (test_no_idle_hz && shuffle_interval > 0) {
3866		firsterr = torture_shuffle_init(shuffle_interval * HZ);
3867		if (torture_init_error(firsterr))
3868			goto unwind;
3869	}
3870	if (stutter < 0)
3871		stutter = 0;
3872	if (stutter) {
3873		int t;
3874
3875		t = cur_ops->stall_dur ? cur_ops->stall_dur() : stutter * HZ;
3876		firsterr = torture_stutter_init(stutter * HZ, t);
3877		if (torture_init_error(firsterr))
3878			goto unwind;
3879	}
3880	if (fqs_duration < 0)
3881		fqs_duration = 0;
3882	if (fqs_holdoff < 0)
3883		fqs_holdoff = 0;
3884	if (fqs_duration && fqs_holdoff) {
3885		/* Create the fqs thread */
3886		firsterr = torture_create_kthread(rcu_torture_fqs, NULL,
3887						  fqs_task);
3888		if (torture_init_error(firsterr))
3889			goto unwind;
3890	}
3891	if (test_boost_interval < 1)
3892		test_boost_interval = 1;
3893	if (test_boost_duration < 2)
3894		test_boost_duration = 2;
3895	if (rcu_torture_can_boost()) {
3896
3897		boost_starttime = jiffies + test_boost_interval * HZ;
3898
3899		firsterr = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "RCU_TORTURE",
3900					     rcutorture_booster_init,
3901					     rcutorture_booster_cleanup);
3902		rcutor_hp = firsterr;
3903		if (torture_init_error(firsterr))
3904			goto unwind;
 
3905	}
3906	shutdown_jiffies = jiffies + shutdown_secs * HZ;
3907	firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup);
3908	if (torture_init_error(firsterr))
3909		goto unwind;
3910	firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval,
3911				      rcutorture_sync);
3912	if (torture_init_error(firsterr))
3913		goto unwind;
3914	firsterr = rcu_torture_stall_init();
3915	if (torture_init_error(firsterr))
3916		goto unwind;
3917	firsterr = rcu_torture_fwd_prog_init();
3918	if (torture_init_error(firsterr))
3919		goto unwind;
3920	firsterr = rcu_torture_barrier_init();
3921	if (torture_init_error(firsterr))
3922		goto unwind;
3923	firsterr = rcu_torture_read_exit_init();
3924	if (torture_init_error(firsterr))
3925		goto unwind;
3926	if (object_debug)
3927		rcu_test_debug_objects();
3928	torture_init_end();
3929	rcu_gp_slow_register(&rcu_fwd_cb_nodelay);
3930	return 0;
3931
3932unwind:
3933	torture_init_end();
3934	rcu_torture_cleanup();
3935	if (shutdown_secs) {
3936		WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST));
3937		kernel_power_off();
3938	}
3939	return firsterr;
3940}
3941
3942module_init(rcu_torture_init);
3943module_exit(rcu_torture_cleanup);
v5.9
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * Read-Copy Update module-based torture test facility
   4 *
   5 * Copyright (C) IBM Corporation, 2005, 2006
   6 *
   7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
   8 *	  Josh Triplett <josh@joshtriplett.org>
   9 *
  10 * See also:  Documentation/RCU/torture.rst
  11 */
  12
  13#define pr_fmt(fmt) fmt
  14
  15#include <linux/types.h>
  16#include <linux/kernel.h>
  17#include <linux/init.h>
  18#include <linux/module.h>
  19#include <linux/kthread.h>
  20#include <linux/err.h>
  21#include <linux/spinlock.h>
  22#include <linux/smp.h>
  23#include <linux/rcupdate_wait.h>
 
  24#include <linux/interrupt.h>
  25#include <linux/sched/signal.h>
  26#include <uapi/linux/sched/types.h>
  27#include <linux/atomic.h>
  28#include <linux/bitops.h>
  29#include <linux/completion.h>
  30#include <linux/moduleparam.h>
  31#include <linux/percpu.h>
  32#include <linux/notifier.h>
  33#include <linux/reboot.h>
  34#include <linux/freezer.h>
  35#include <linux/cpu.h>
  36#include <linux/delay.h>
  37#include <linux/stat.h>
  38#include <linux/srcu.h>
  39#include <linux/slab.h>
  40#include <linux/trace_clock.h>
  41#include <asm/byteorder.h>
  42#include <linux/torture.h>
  43#include <linux/vmalloc.h>
  44#include <linux/sched/debug.h>
  45#include <linux/sched/sysctl.h>
  46#include <linux/oom.h>
  47#include <linux/tick.h>
  48#include <linux/rcupdate_trace.h>
 
  49
  50#include "rcu.h"
  51
  52MODULE_LICENSE("GPL");
  53MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@joshtriplett.org>");
  54
  55#ifndef data_race
  56#define data_race(expr)							\
  57	({								\
  58		expr;							\
  59	})
  60#endif
  61#ifndef ASSERT_EXCLUSIVE_WRITER
  62#define ASSERT_EXCLUSIVE_WRITER(var) do { } while (0)
  63#endif
  64#ifndef ASSERT_EXCLUSIVE_ACCESS
  65#define ASSERT_EXCLUSIVE_ACCESS(var) do { } while (0)
  66#endif
  67
  68/* Bits for ->extendables field, extendables param, and related definitions. */
  69#define RCUTORTURE_RDR_SHIFT	 8	/* Put SRCU index in upper bits. */
  70#define RCUTORTURE_RDR_MASK	 ((1 << RCUTORTURE_RDR_SHIFT) - 1)
 
 
  71#define RCUTORTURE_RDR_BH	 0x01	/* Extend readers by disabling bh. */
  72#define RCUTORTURE_RDR_IRQ	 0x02	/*  ... disabling interrupts. */
  73#define RCUTORTURE_RDR_PREEMPT	 0x04	/*  ... disabling preemption. */
  74#define RCUTORTURE_RDR_RBH	 0x08	/*  ... rcu_read_lock_bh(). */
  75#define RCUTORTURE_RDR_SCHED	 0x10	/*  ... rcu_read_lock_sched(). */
  76#define RCUTORTURE_RDR_RCU	 0x20	/*  ... entering another RCU reader. */
  77#define RCUTORTURE_RDR_NBITS	 6	/* Number of bits defined above. */
 
  78#define RCUTORTURE_MAX_EXTEND	 \
  79	(RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \
  80	 RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED)
  81#define RCUTORTURE_RDR_MAX_LOOPS 0x7	/* Maximum reader extensions. */
  82					/* Must be power of two minus one. */
  83#define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3)
  84
  85torture_param(int, extendables, RCUTORTURE_MAX_EXTEND,
  86	      "Extend readers by disabling bh (1), irqs (2), or preempt (4)");
  87torture_param(int, fqs_duration, 0,
  88	      "Duration of fqs bursts (us), 0 to disable");
  89torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)");
  90torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)");
  91torture_param(bool, fwd_progress, 1, "Test grace-period forward progress");
  92torture_param(int, fwd_progress_div, 4, "Fraction of CPU stall to wait");
  93torture_param(int, fwd_progress_holdoff, 60,
  94	      "Time between forward-progress tests (s)");
  95torture_param(bool, fwd_progress_need_resched, 1,
  96	      "Hide cond_resched() behind need_resched()");
  97torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives");
 
 
 
 
  98torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
  99torture_param(bool, gp_normal, false,
 100	     "Use normal (non-expedited) GP wait primitives");
 
 
 
 101torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives");
 102torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers");
 103torture_param(int, n_barrier_cbs, 0,
 104	     "# of callbacks/kthreads for barrier testing");
 105torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads");
 106torture_param(int, nreaders, -1, "Number of RCU reader threads");
 107torture_param(int, object_debug, 0,
 108	     "Enable debug-object double call_rcu() testing");
 109torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
 110torture_param(int, onoff_interval, 0,
 111	     "Time between CPU hotplugs (jiffies), 0=disable");
 112torture_param(int, read_exit_delay, 13,
 113	      "Delay between read-then-exit episodes (s)");
 114torture_param(int, read_exit_burst, 16,
 115	      "# of read-then-exit bursts per episode, zero to disable");
 116torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles");
 117torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable.");
 118torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable.");
 119torture_param(int, stall_cpu_holdoff, 10,
 120	     "Time to wait before starting stall (s).");
 121torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling.");
 122torture_param(int, stall_cpu_block, 0, "Sleep while stalling.");
 123torture_param(int, stall_gp_kthread, 0,
 124	      "Grace-period kthread stall duration (s).");
 125torture_param(int, stat_interval, 60,
 126	     "Number of seconds between stats printk()s");
 127torture_param(int, stutter, 5, "Number of seconds to run/halt test");
 128torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
 129torture_param(int, test_boost_duration, 4,
 130	     "Duration of each boost test, seconds.");
 131torture_param(int, test_boost_interval, 7,
 132	     "Interval between boost tests, seconds.");
 133torture_param(bool, test_no_idle_hz, true,
 134	     "Test support for tickless idle CPUs");
 135torture_param(int, verbose, 1,
 136	     "Enable verbose debugging printk()s");
 137
 138static char *torture_type = "rcu";
 139module_param(torture_type, charp, 0444);
 140MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)");
 141
 
 142static int nrealreaders;
 143static struct task_struct *writer_task;
 144static struct task_struct **fakewriter_tasks;
 145static struct task_struct **reader_tasks;
 
 146static struct task_struct *stats_task;
 147static struct task_struct *fqs_task;
 148static struct task_struct *boost_tasks[NR_CPUS];
 149static struct task_struct *stall_task;
 150static struct task_struct *fwd_prog_task;
 151static struct task_struct **barrier_cbs_tasks;
 152static struct task_struct *barrier_task;
 153static struct task_struct *read_exit_task;
 154
 155#define RCU_TORTURE_PIPE_LEN 10
 156
 
 
 
 
 
 
 
 
 
 
 157struct rcu_torture {
 158	struct rcu_head rtort_rcu;
 159	int rtort_pipe_count;
 160	struct list_head rtort_free;
 161	int rtort_mbtest;
 
 162};
 163
 164static LIST_HEAD(rcu_torture_freelist);
 165static struct rcu_torture __rcu *rcu_torture_current;
 166static unsigned long rcu_torture_current_version;
 167static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
 168static DEFINE_SPINLOCK(rcu_torture_lock);
 169static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count);
 170static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch);
 171static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
 
 172static atomic_t n_rcu_torture_alloc;
 173static atomic_t n_rcu_torture_alloc_fail;
 174static atomic_t n_rcu_torture_free;
 175static atomic_t n_rcu_torture_mberror;
 
 
 176static atomic_t n_rcu_torture_error;
 177static long n_rcu_torture_barrier_error;
 178static long n_rcu_torture_boost_ktrerror;
 179static long n_rcu_torture_boost_rterror;
 180static long n_rcu_torture_boost_failure;
 181static long n_rcu_torture_boosts;
 182static atomic_long_t n_rcu_torture_timers;
 183static long n_barrier_attempts;
 184static long n_barrier_successes; /* did rcu_barrier test succeed? */
 185static unsigned long n_read_exits;
 186static struct list_head rcu_torture_removed;
 187static unsigned long shutdown_jiffies;
 
 
 
 188
 189static int rcu_torture_writer_state;
 190#define RTWS_FIXED_DELAY	0
 191#define RTWS_DELAY		1
 192#define RTWS_REPLACE		2
 193#define RTWS_DEF_FREE		3
 194#define RTWS_EXP_SYNC		4
 195#define RTWS_COND_GET		5
 196#define RTWS_COND_SYNC		6
 197#define RTWS_SYNC		7
 198#define RTWS_STUTTER		8
 199#define RTWS_STOPPING		9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 200static const char * const rcu_torture_writer_state_names[] = {
 201	"RTWS_FIXED_DELAY",
 202	"RTWS_DELAY",
 203	"RTWS_REPLACE",
 204	"RTWS_DEF_FREE",
 205	"RTWS_EXP_SYNC",
 206	"RTWS_COND_GET",
 
 
 
 207	"RTWS_COND_SYNC",
 
 
 
 
 
 
 
 
 
 
 
 208	"RTWS_SYNC",
 209	"RTWS_STUTTER",
 210	"RTWS_STOPPING",
 211};
 212
 213/* Record reader segment types and duration for first failing read. */
 214struct rt_read_seg {
 215	int rt_readstate;
 216	unsigned long rt_delay_jiffies;
 217	unsigned long rt_delay_ms;
 218	unsigned long rt_delay_us;
 219	bool rt_preempted;
 220};
 221static int err_segs_recorded;
 222static struct rt_read_seg err_segs[RCUTORTURE_RDR_MAX_SEGS];
 223static int rt_read_nsegs;
 224
 225static const char *rcu_torture_writer_state_getname(void)
 226{
 227	unsigned int i = READ_ONCE(rcu_torture_writer_state);
 228
 229	if (i >= ARRAY_SIZE(rcu_torture_writer_state_names))
 230		return "???";
 231	return rcu_torture_writer_state_names[i];
 232}
 233
 234#if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU)
 235#define rcu_can_boost() 1
 236#else /* #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
 237#define rcu_can_boost() 0
 238#endif /* #else #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
 239
 240#ifdef CONFIG_RCU_TRACE
 241static u64 notrace rcu_trace_clock_local(void)
 242{
 243	u64 ts = trace_clock_local();
 244
 245	(void)do_div(ts, NSEC_PER_USEC);
 246	return ts;
 247}
 248#else /* #ifdef CONFIG_RCU_TRACE */
 249static u64 notrace rcu_trace_clock_local(void)
 250{
 251	return 0ULL;
 252}
 253#endif /* #else #ifdef CONFIG_RCU_TRACE */
 254
 255/*
 256 * Stop aggressive CPU-hog tests a bit before the end of the test in order
 257 * to avoid interfering with test shutdown.
 258 */
 259static bool shutdown_time_arrived(void)
 260{
 261	return shutdown_secs && time_after(jiffies, shutdown_jiffies - 30 * HZ);
 262}
 263
 264static unsigned long boost_starttime;	/* jiffies of next boost test start. */
 265static DEFINE_MUTEX(boost_mutex);	/* protect setting boost_starttime */
 266					/*  and boost task create/destroy. */
 267static atomic_t barrier_cbs_count;	/* Barrier callbacks registered. */
 268static bool barrier_phase;		/* Test phase. */
 269static atomic_t barrier_cbs_invoked;	/* Barrier callbacks invoked. */
 270static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */
 271static DECLARE_WAIT_QUEUE_HEAD(barrier_wq);
 272
 273static bool rcu_fwd_cb_nodelay;		/* Short rcu_torture_delay() delays. */
 274
 275/*
 276 * Allocate an element from the rcu_tortures pool.
 277 */
 278static struct rcu_torture *
 279rcu_torture_alloc(void)
 280{
 281	struct list_head *p;
 282
 283	spin_lock_bh(&rcu_torture_lock);
 284	if (list_empty(&rcu_torture_freelist)) {
 285		atomic_inc(&n_rcu_torture_alloc_fail);
 286		spin_unlock_bh(&rcu_torture_lock);
 287		return NULL;
 288	}
 289	atomic_inc(&n_rcu_torture_alloc);
 290	p = rcu_torture_freelist.next;
 291	list_del_init(p);
 292	spin_unlock_bh(&rcu_torture_lock);
 293	return container_of(p, struct rcu_torture, rtort_free);
 294}
 295
 296/*
 297 * Free an element to the rcu_tortures pool.
 298 */
 299static void
 300rcu_torture_free(struct rcu_torture *p)
 301{
 302	atomic_inc(&n_rcu_torture_free);
 303	spin_lock_bh(&rcu_torture_lock);
 304	list_add_tail(&p->rtort_free, &rcu_torture_freelist);
 305	spin_unlock_bh(&rcu_torture_lock);
 306}
 307
 308/*
 309 * Operations vector for selecting different types of tests.
 310 */
 311
 312struct rcu_torture_ops {
 313	int ttype;
 314	void (*init)(void);
 315	void (*cleanup)(void);
 316	int (*readlock)(void);
 317	void (*read_delay)(struct torture_random_state *rrsp,
 318			   struct rt_read_seg *rtrsp);
 319	void (*readunlock)(int idx);
 
 320	unsigned long (*get_gp_seq)(void);
 321	unsigned long (*gp_diff)(unsigned long new, unsigned long old);
 322	void (*deferred_free)(struct rcu_torture *p);
 323	void (*sync)(void);
 324	void (*exp_sync)(void);
 325	unsigned long (*get_state)(void);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 326	void (*cond_sync)(unsigned long oldstate);
 
 327	call_rcu_func_t call;
 328	void (*cb_barrier)(void);
 329	void (*fqs)(void);
 330	void (*stats)(void);
 
 
 331	int (*stall_dur)(void);
 
 332	int irq_capable;
 333	int can_boost;
 334	int extendables;
 335	int slow_gps;
 
 336	const char *name;
 337};
 338
 339static struct rcu_torture_ops *cur_ops;
 340
 341/*
 342 * Definitions for rcu torture testing.
 343 */
 344
 345static int rcu_torture_read_lock(void) __acquires(RCU)
 
 
 
 
 
 346{
 347	rcu_read_lock();
 348	return 0;
 349}
 350
 351static void
 352rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
 353{
 354	unsigned long started;
 355	unsigned long completed;
 356	const unsigned long shortdelay_us = 200;
 357	unsigned long longdelay_ms = 300;
 358	unsigned long long ts;
 359
 360	/* We want a short delay sometimes to make a reader delay the grace
 361	 * period, and we want a long delay occasionally to trigger
 362	 * force_quiescent_state. */
 363
 364	if (!READ_ONCE(rcu_fwd_cb_nodelay) &&
 365	    !(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) {
 366		started = cur_ops->get_gp_seq();
 367		ts = rcu_trace_clock_local();
 368		if (preempt_count() & (SOFTIRQ_MASK | HARDIRQ_MASK))
 369			longdelay_ms = 5; /* Avoid triggering BH limits. */
 370		mdelay(longdelay_ms);
 371		rtrsp->rt_delay_ms = longdelay_ms;
 372		completed = cur_ops->get_gp_seq();
 373		do_trace_rcu_torture_read(cur_ops->name, NULL, ts,
 374					  started, completed);
 375	}
 376	if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) {
 377		udelay(shortdelay_us);
 378		rtrsp->rt_delay_us = shortdelay_us;
 379	}
 380	if (!preempt_count() &&
 381	    !(torture_random(rrsp) % (nrealreaders * 500))) {
 382		torture_preempt_schedule();  /* QS only if preemptible. */
 383		rtrsp->rt_preempted = true;
 384	}
 385}
 386
 387static void rcu_torture_read_unlock(int idx) __releases(RCU)
 388{
 389	rcu_read_unlock();
 390}
 391
 392/*
 393 * Update callback in the pipe.  This should be invoked after a grace period.
 394 */
 395static bool
 396rcu_torture_pipe_update_one(struct rcu_torture *rp)
 397{
 398	int i;
 
 399
 
 
 
 
 400	i = READ_ONCE(rp->rtort_pipe_count);
 401	if (i > RCU_TORTURE_PIPE_LEN)
 402		i = RCU_TORTURE_PIPE_LEN;
 403	atomic_inc(&rcu_torture_wcount[i]);
 404	WRITE_ONCE(rp->rtort_pipe_count, i + 1);
 405	if (rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
 406		rp->rtort_mbtest = 0;
 407		return true;
 408	}
 409	return false;
 410}
 411
 412/*
 413 * Update all callbacks in the pipe.  Suitable for synchronous grace-period
 414 * primitives.
 415 */
 416static void
 417rcu_torture_pipe_update(struct rcu_torture *old_rp)
 418{
 419	struct rcu_torture *rp;
 420	struct rcu_torture *rp1;
 421
 422	if (old_rp)
 423		list_add(&old_rp->rtort_free, &rcu_torture_removed);
 424	list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) {
 425		if (rcu_torture_pipe_update_one(rp)) {
 426			list_del(&rp->rtort_free);
 427			rcu_torture_free(rp);
 428		}
 429	}
 430}
 431
 432static void
 433rcu_torture_cb(struct rcu_head *p)
 434{
 435	struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
 436
 437	if (torture_must_stop_irq()) {
 438		/* Test is ending, just drop callbacks on the floor. */
 439		/* The next initialization will pick up the pieces. */
 440		return;
 441	}
 442	if (rcu_torture_pipe_update_one(rp))
 443		rcu_torture_free(rp);
 444	else
 445		cur_ops->deferred_free(rp);
 446}
 447
 448static unsigned long rcu_no_completed(void)
 449{
 450	return 0;
 451}
 452
 453static void rcu_torture_deferred_free(struct rcu_torture *p)
 454{
 455	call_rcu(&p->rtort_rcu, rcu_torture_cb);
 456}
 457
 458static void rcu_sync_torture_init(void)
 459{
 460	INIT_LIST_HEAD(&rcu_torture_removed);
 461}
 462
 
 
 
 
 
 463static struct rcu_torture_ops rcu_ops = {
 464	.ttype		= RCU_FLAVOR,
 465	.init		= rcu_sync_torture_init,
 466	.readlock	= rcu_torture_read_lock,
 467	.read_delay	= rcu_read_delay,
 468	.readunlock	= rcu_torture_read_unlock,
 469	.get_gp_seq	= rcu_get_gp_seq,
 470	.gp_diff	= rcu_seq_diff,
 471	.deferred_free	= rcu_torture_deferred_free,
 472	.sync		= synchronize_rcu,
 473	.exp_sync	= synchronize_rcu_expedited,
 474	.get_state	= get_state_synchronize_rcu,
 475	.cond_sync	= cond_synchronize_rcu,
 476	.call		= call_rcu,
 477	.cb_barrier	= rcu_barrier,
 478	.fqs		= rcu_force_quiescent_state,
 479	.stats		= NULL,
 480	.stall_dur	= rcu_jiffies_till_stall_check,
 481	.irq_capable	= 1,
 482	.can_boost	= rcu_can_boost(),
 483	.extendables	= RCUTORTURE_MAX_EXTEND,
 484	.name		= "rcu"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 485};
 486
 487/*
 488 * Don't even think about trying any of these in real life!!!
 489 * The names includes "busted", and they really means it!
 490 * The only purpose of these functions is to provide a buggy RCU
 491 * implementation to make sure that rcutorture correctly emits
 492 * buggy-RCU error messages.
 493 */
 494static void rcu_busted_torture_deferred_free(struct rcu_torture *p)
 495{
 496	/* This is a deliberate bug for testing purposes only! */
 497	rcu_torture_cb(&p->rtort_rcu);
 498}
 499
 500static void synchronize_rcu_busted(void)
 501{
 502	/* This is a deliberate bug for testing purposes only! */
 503}
 504
 505static void
 506call_rcu_busted(struct rcu_head *head, rcu_callback_t func)
 507{
 508	/* This is a deliberate bug for testing purposes only! */
 509	func(head);
 510}
 511
 512static struct rcu_torture_ops rcu_busted_ops = {
 513	.ttype		= INVALID_RCU_FLAVOR,
 514	.init		= rcu_sync_torture_init,
 515	.readlock	= rcu_torture_read_lock,
 516	.read_delay	= rcu_read_delay,  /* just reuse rcu's version. */
 517	.readunlock	= rcu_torture_read_unlock,
 
 518	.get_gp_seq	= rcu_no_completed,
 519	.deferred_free	= rcu_busted_torture_deferred_free,
 520	.sync		= synchronize_rcu_busted,
 521	.exp_sync	= synchronize_rcu_busted,
 522	.call		= call_rcu_busted,
 523	.cb_barrier	= NULL,
 524	.fqs		= NULL,
 525	.stats		= NULL,
 526	.irq_capable	= 1,
 527	.name		= "busted"
 528};
 529
 530/*
 531 * Definitions for srcu torture testing.
 532 */
 533
 534DEFINE_STATIC_SRCU(srcu_ctl);
 535static struct srcu_struct srcu_ctld;
 536static struct srcu_struct *srcu_ctlp = &srcu_ctl;
 
 537
 538static int srcu_torture_read_lock(void) __acquires(srcu_ctlp)
 539{
 540	return srcu_read_lock(srcu_ctlp);
 
 
 
 541}
 542
 543static void
 544srcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
 545{
 546	long delay;
 547	const long uspertick = 1000000 / HZ;
 548	const long longdelay = 10;
 549
 550	/* We want there to be long-running readers, but not all the time. */
 551
 552	delay = torture_random(rrsp) %
 553		(nrealreaders * 2 * longdelay * uspertick);
 554	if (!delay && in_task()) {
 555		schedule_timeout_interruptible(longdelay);
 556		rtrsp->rt_delay_jiffies = longdelay;
 557	} else {
 558		rcu_read_delay(rrsp, rtrsp);
 559	}
 560}
 561
 562static void srcu_torture_read_unlock(int idx) __releases(srcu_ctlp)
 563{
 564	srcu_read_unlock(srcu_ctlp, idx);
 
 
 
 
 
 
 
 
 565}
 566
 567static unsigned long srcu_torture_completed(void)
 568{
 569	return srcu_batches_completed(srcu_ctlp);
 570}
 571
 572static void srcu_torture_deferred_free(struct rcu_torture *rp)
 573{
 574	call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb);
 575}
 576
 577static void srcu_torture_synchronize(void)
 578{
 579	synchronize_srcu(srcu_ctlp);
 580}
 581
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 582static void srcu_torture_call(struct rcu_head *head,
 583			      rcu_callback_t func)
 584{
 585	call_srcu(srcu_ctlp, head, func);
 586}
 587
 588static void srcu_torture_barrier(void)
 589{
 590	srcu_barrier(srcu_ctlp);
 591}
 592
 593static void srcu_torture_stats(void)
 594{
 595	srcu_torture_stats_print(srcu_ctlp, torture_type, TORTURE_FLAG);
 596}
 597
 598static void srcu_torture_synchronize_expedited(void)
 599{
 600	synchronize_srcu_expedited(srcu_ctlp);
 601}
 602
 603static struct rcu_torture_ops srcu_ops = {
 604	.ttype		= SRCU_FLAVOR,
 605	.init		= rcu_sync_torture_init,
 606	.readlock	= srcu_torture_read_lock,
 607	.read_delay	= srcu_read_delay,
 608	.readunlock	= srcu_torture_read_unlock,
 
 609	.get_gp_seq	= srcu_torture_completed,
 610	.deferred_free	= srcu_torture_deferred_free,
 611	.sync		= srcu_torture_synchronize,
 612	.exp_sync	= srcu_torture_synchronize_expedited,
 
 
 
 613	.call		= srcu_torture_call,
 614	.cb_barrier	= srcu_torture_barrier,
 615	.stats		= srcu_torture_stats,
 
 616	.irq_capable	= 1,
 
 617	.name		= "srcu"
 618};
 619
 620static void srcu_torture_init(void)
 621{
 622	rcu_sync_torture_init();
 623	WARN_ON(init_srcu_struct(&srcu_ctld));
 624	srcu_ctlp = &srcu_ctld;
 625}
 626
 627static void srcu_torture_cleanup(void)
 628{
 629	cleanup_srcu_struct(&srcu_ctld);
 630	srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */
 631}
 632
 633/* As above, but dynamically allocated. */
 634static struct rcu_torture_ops srcud_ops = {
 635	.ttype		= SRCU_FLAVOR,
 636	.init		= srcu_torture_init,
 637	.cleanup	= srcu_torture_cleanup,
 638	.readlock	= srcu_torture_read_lock,
 639	.read_delay	= srcu_read_delay,
 640	.readunlock	= srcu_torture_read_unlock,
 
 641	.get_gp_seq	= srcu_torture_completed,
 642	.deferred_free	= srcu_torture_deferred_free,
 643	.sync		= srcu_torture_synchronize,
 644	.exp_sync	= srcu_torture_synchronize_expedited,
 
 
 
 645	.call		= srcu_torture_call,
 646	.cb_barrier	= srcu_torture_barrier,
 647	.stats		= srcu_torture_stats,
 
 648	.irq_capable	= 1,
 
 649	.name		= "srcud"
 650};
 651
 652/* As above, but broken due to inappropriate reader extension. */
 653static struct rcu_torture_ops busted_srcud_ops = {
 654	.ttype		= SRCU_FLAVOR,
 655	.init		= srcu_torture_init,
 656	.cleanup	= srcu_torture_cleanup,
 657	.readlock	= srcu_torture_read_lock,
 658	.read_delay	= rcu_read_delay,
 659	.readunlock	= srcu_torture_read_unlock,
 
 660	.get_gp_seq	= srcu_torture_completed,
 661	.deferred_free	= srcu_torture_deferred_free,
 662	.sync		= srcu_torture_synchronize,
 663	.exp_sync	= srcu_torture_synchronize_expedited,
 664	.call		= srcu_torture_call,
 665	.cb_barrier	= srcu_torture_barrier,
 666	.stats		= srcu_torture_stats,
 667	.irq_capable	= 1,
 
 668	.extendables	= RCUTORTURE_MAX_EXTEND,
 669	.name		= "busted_srcud"
 670};
 671
 672/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 673 * Definitions for RCU-tasks torture testing.
 674 */
 675
 676static int tasks_torture_read_lock(void)
 677{
 678	return 0;
 679}
 680
 681static void tasks_torture_read_unlock(int idx)
 682{
 683}
 684
 685static void rcu_tasks_torture_deferred_free(struct rcu_torture *p)
 686{
 687	call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb);
 688}
 689
 690static void synchronize_rcu_mult_test(void)
 691{
 692	synchronize_rcu_mult(call_rcu_tasks, call_rcu);
 693}
 694
 695static struct rcu_torture_ops tasks_ops = {
 696	.ttype		= RCU_TASKS_FLAVOR,
 697	.init		= rcu_sync_torture_init,
 698	.readlock	= tasks_torture_read_lock,
 699	.read_delay	= rcu_read_delay,  /* just reuse rcu's version. */
 700	.readunlock	= tasks_torture_read_unlock,
 701	.get_gp_seq	= rcu_no_completed,
 702	.deferred_free	= rcu_tasks_torture_deferred_free,
 703	.sync		= synchronize_rcu_tasks,
 704	.exp_sync	= synchronize_rcu_mult_test,
 705	.call		= call_rcu_tasks,
 706	.cb_barrier	= rcu_barrier_tasks,
 
 707	.fqs		= NULL,
 708	.stats		= NULL,
 709	.irq_capable	= 1,
 710	.slow_gps	= 1,
 711	.name		= "tasks"
 712};
 713
 714/*
 715 * Definitions for trivial CONFIG_PREEMPT=n-only torture testing.
 716 * This implementation does not necessarily work well with CPU hotplug.
 717 */
 718
 719static void synchronize_rcu_trivial(void)
 720{
 721	int cpu;
 722
 723	for_each_online_cpu(cpu) {
 724		rcutorture_sched_setaffinity(current->pid, cpumask_of(cpu));
 725		WARN_ON_ONCE(raw_smp_processor_id() != cpu);
 726	}
 727}
 728
 729static int rcu_torture_read_lock_trivial(void) __acquires(RCU)
 730{
 731	preempt_disable();
 732	return 0;
 733}
 734
 735static void rcu_torture_read_unlock_trivial(int idx) __releases(RCU)
 736{
 737	preempt_enable();
 738}
 739
 740static struct rcu_torture_ops trivial_ops = {
 741	.ttype		= RCU_TRIVIAL_FLAVOR,
 742	.init		= rcu_sync_torture_init,
 743	.readlock	= rcu_torture_read_lock_trivial,
 744	.read_delay	= rcu_read_delay,  /* just reuse rcu's version. */
 745	.readunlock	= rcu_torture_read_unlock_trivial,
 746	.get_gp_seq	= rcu_no_completed,
 747	.sync		= synchronize_rcu_trivial,
 748	.exp_sync	= synchronize_rcu_trivial,
 749	.fqs		= NULL,
 750	.stats		= NULL,
 751	.irq_capable	= 1,
 752	.name		= "trivial"
 753};
 754
 755/*
 756 * Definitions for rude RCU-tasks torture testing.
 757 */
 758
 759static void rcu_tasks_rude_torture_deferred_free(struct rcu_torture *p)
 760{
 761	call_rcu_tasks_rude(&p->rtort_rcu, rcu_torture_cb);
 762}
 763
 764static struct rcu_torture_ops tasks_rude_ops = {
 765	.ttype		= RCU_TASKS_RUDE_FLAVOR,
 766	.init		= rcu_sync_torture_init,
 767	.readlock	= rcu_torture_read_lock_trivial,
 768	.read_delay	= rcu_read_delay,  /* just reuse rcu's version. */
 769	.readunlock	= rcu_torture_read_unlock_trivial,
 770	.get_gp_seq	= rcu_no_completed,
 771	.deferred_free	= rcu_tasks_rude_torture_deferred_free,
 772	.sync		= synchronize_rcu_tasks_rude,
 773	.exp_sync	= synchronize_rcu_tasks_rude,
 774	.call		= call_rcu_tasks_rude,
 775	.cb_barrier	= rcu_barrier_tasks_rude,
 
 
 776	.fqs		= NULL,
 777	.stats		= NULL,
 778	.irq_capable	= 1,
 779	.name		= "tasks-rude"
 780};
 781
 
 
 
 
 
 
 
 
 
 
 
 782/*
 783 * Definitions for tracing RCU-tasks torture testing.
 784 */
 785
 786static int tasks_tracing_torture_read_lock(void)
 787{
 788	rcu_read_lock_trace();
 789	return 0;
 790}
 791
 792static void tasks_tracing_torture_read_unlock(int idx)
 793{
 794	rcu_read_unlock_trace();
 795}
 796
 797static void rcu_tasks_tracing_torture_deferred_free(struct rcu_torture *p)
 798{
 799	call_rcu_tasks_trace(&p->rtort_rcu, rcu_torture_cb);
 800}
 801
 802static struct rcu_torture_ops tasks_tracing_ops = {
 803	.ttype		= RCU_TASKS_TRACING_FLAVOR,
 804	.init		= rcu_sync_torture_init,
 805	.readlock	= tasks_tracing_torture_read_lock,
 806	.read_delay	= srcu_read_delay,  /* just reuse srcu's version. */
 807	.readunlock	= tasks_tracing_torture_read_unlock,
 
 808	.get_gp_seq	= rcu_no_completed,
 809	.deferred_free	= rcu_tasks_tracing_torture_deferred_free,
 810	.sync		= synchronize_rcu_tasks_trace,
 811	.exp_sync	= synchronize_rcu_tasks_trace,
 812	.call		= call_rcu_tasks_trace,
 813	.cb_barrier	= rcu_barrier_tasks_trace,
 
 
 814	.fqs		= NULL,
 815	.stats		= NULL,
 816	.irq_capable	= 1,
 817	.slow_gps	= 1,
 818	.name		= "tasks-tracing"
 819};
 820
 
 
 
 
 
 
 
 
 
 821static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old)
 822{
 823	if (!cur_ops->gp_diff)
 824		return new - old;
 825	return cur_ops->gp_diff(new, old);
 826}
 827
 828static bool __maybe_unused torturing_tasks(void)
 829{
 830	return cur_ops == &tasks_ops || cur_ops == &tasks_rude_ops;
 831}
 832
 833/*
 834 * RCU torture priority-boost testing.  Runs one real-time thread per
 835 * CPU for moderate bursts, repeatedly registering RCU callbacks and
 836 * spinning waiting for them to be invoked.  If a given callback takes
 837 * too long to be invoked, we assume that priority inversion has occurred.
 838 */
 839
 840struct rcu_boost_inflight {
 841	struct rcu_head rcu;
 842	int inflight;
 843};
 844
 845static void rcu_torture_boost_cb(struct rcu_head *head)
 846{
 847	struct rcu_boost_inflight *rbip =
 848		container_of(head, struct rcu_boost_inflight, rcu);
 849
 850	/* Ensure RCU-core accesses precede clearing ->inflight */
 851	smp_store_release(&rbip->inflight, 0);
 852}
 853
 854static int old_rt_runtime = -1;
 855
 856static void rcu_torture_disable_rt_throttle(void)
 857{
 858	/*
 859	 * Disable RT throttling so that rcutorture's boost threads don't get
 860	 * throttled. Only possible if rcutorture is built-in otherwise the
 861	 * user should manually do this by setting the sched_rt_period_us and
 862	 * sched_rt_runtime sysctls.
 863	 */
 864	if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1)
 865		return;
 866
 867	old_rt_runtime = sysctl_sched_rt_runtime;
 868	sysctl_sched_rt_runtime = -1;
 869}
 870
 871static void rcu_torture_enable_rt_throttle(void)
 872{
 873	if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1)
 874		return;
 875
 876	sysctl_sched_rt_runtime = old_rt_runtime;
 877	old_rt_runtime = -1;
 878}
 879
 880static bool rcu_torture_boost_failed(unsigned long start, unsigned long end)
 881{
 882	if (end - start > test_boost_duration * HZ - HZ / 2) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 883		VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed");
 884		n_rcu_torture_boost_failure++;
 
 
 
 
 
 
 
 
 885
 886		return true; /* failed */
 
 
 
 
 887	}
 888
 889	return false; /* passed */
 890}
 891
 892static int rcu_torture_boost(void *arg)
 893{
 894	unsigned long call_rcu_time;
 895	unsigned long endtime;
 
 
 896	unsigned long oldstarttime;
 897	struct rcu_boost_inflight rbi = { .inflight = 0 };
 898
 899	VERBOSE_TOROUT_STRING("rcu_torture_boost started");
 900
 901	/* Set real-time priority. */
 902	sched_set_fifo_low(current);
 903
 904	init_rcu_head_on_stack(&rbi.rcu);
 905	/* Each pass through the following loop does one boost-test cycle. */
 906	do {
 907		/* Track if the test failed already in this test interval? */
 908		bool failed = false;
 909
 910		/* Increment n_rcu_torture_boosts once per boost-test */
 911		while (!kthread_should_stop()) {
 912			if (mutex_trylock(&boost_mutex)) {
 913				n_rcu_torture_boosts++;
 914				mutex_unlock(&boost_mutex);
 915				break;
 916			}
 917			schedule_timeout_uninterruptible(1);
 918		}
 919		if (kthread_should_stop())
 920			goto checkwait;
 921
 922		/* Wait for the next test interval. */
 923		oldstarttime = boost_starttime;
 924		while (time_before(jiffies, oldstarttime)) {
 925			schedule_timeout_interruptible(oldstarttime - jiffies);
 926			stutter_wait("rcu_torture_boost");
 
 927			if (torture_must_stop())
 928				goto checkwait;
 929		}
 930
 931		/* Do one boost-test interval. */
 932		endtime = oldstarttime + test_boost_duration * HZ;
 933		call_rcu_time = jiffies;
 934		while (time_before(jiffies, endtime)) {
 935			/* If we don't have a callback in flight, post one. */
 936			if (!smp_load_acquire(&rbi.inflight)) {
 937				/* RCU core before ->inflight = 1. */
 938				smp_store_release(&rbi.inflight, 1);
 939				call_rcu(&rbi.rcu, rcu_torture_boost_cb);
 940				/* Check if the boost test failed */
 941				failed = failed ||
 942					 rcu_torture_boost_failed(call_rcu_time,
 943								 jiffies);
 944				call_rcu_time = jiffies;
 
 
 
 
 
 
 945			}
 946			stutter_wait("rcu_torture_boost");
 947			if (torture_must_stop())
 948				goto checkwait;
 949		}
 950
 951		/*
 952		 * If boost never happened, then inflight will always be 1, in
 953		 * this case the boost check would never happen in the above
 954		 * loop so do another one here.
 955		 */
 956		if (!failed && smp_load_acquire(&rbi.inflight))
 957			rcu_torture_boost_failed(call_rcu_time, jiffies);
 958
 959		/*
 960		 * Set the start time of the next test interval.
 961		 * Yes, this is vulnerable to long delays, but such
 962		 * delays simply cause a false negative for the next
 963		 * interval.  Besides, we are running at RT priority,
 964		 * so delays should be relatively rare.
 965		 */
 966		while (oldstarttime == boost_starttime &&
 967		       !kthread_should_stop()) {
 968			if (mutex_trylock(&boost_mutex)) {
 969				boost_starttime = jiffies +
 970						  test_boost_interval * HZ;
 
 
 
 971				mutex_unlock(&boost_mutex);
 972				break;
 973			}
 974			schedule_timeout_uninterruptible(1);
 975		}
 976
 977		/* Go do the stutter. */
 978checkwait:	stutter_wait("rcu_torture_boost");
 
 979	} while (!torture_must_stop());
 980
 981	/* Clean up and exit. */
 982	while (!kthread_should_stop() || smp_load_acquire(&rbi.inflight)) {
 983		torture_shutdown_absorb("rcu_torture_boost");
 984		schedule_timeout_uninterruptible(1);
 985	}
 986	destroy_rcu_head_on_stack(&rbi.rcu);
 987	torture_kthread_stopping("rcu_torture_boost");
 988	return 0;
 989}
 990
 991/*
 992 * RCU torture force-quiescent-state kthread.  Repeatedly induces
 993 * bursts of calls to force_quiescent_state(), increasing the probability
 994 * of occurrence of some important types of race conditions.
 995 */
 996static int
 997rcu_torture_fqs(void *arg)
 998{
 999	unsigned long fqs_resume_time;
1000	int fqs_burst_remaining;
 
1001
1002	VERBOSE_TOROUT_STRING("rcu_torture_fqs task started");
1003	do {
1004		fqs_resume_time = jiffies + fqs_stutter * HZ;
1005		while (time_before(jiffies, fqs_resume_time) &&
1006		       !kthread_should_stop()) {
1007			schedule_timeout_interruptible(1);
1008		}
1009		fqs_burst_remaining = fqs_duration;
1010		while (fqs_burst_remaining > 0 &&
1011		       !kthread_should_stop()) {
1012			cur_ops->fqs();
1013			udelay(fqs_holdoff);
1014			fqs_burst_remaining -= fqs_holdoff;
1015		}
1016		stutter_wait("rcu_torture_fqs");
 
1017	} while (!torture_must_stop());
1018	torture_kthread_stopping("rcu_torture_fqs");
1019	return 0;
1020}
1021
 
 
 
 
1022/*
1023 * RCU torture writer kthread.  Repeatedly substitutes a new structure
1024 * for that pointed to by rcu_torture_current, freeing the old structure
1025 * after a series of grace periods (the "pipeline").
1026 */
1027static int
1028rcu_torture_writer(void *arg)
1029{
1030	bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal();
1031	int expediting = 0;
1032	unsigned long gp_snap;
1033	bool gp_cond1 = gp_cond, gp_exp1 = gp_exp, gp_normal1 = gp_normal;
1034	bool gp_sync1 = gp_sync;
1035	int i;
1036	struct rcu_torture *rp;
1037	struct rcu_torture *old_rp;
1038	static DEFINE_TORTURE_RANDOM(rand);
1039	int synctype[] = { RTWS_DEF_FREE, RTWS_EXP_SYNC,
1040			   RTWS_COND_GET, RTWS_SYNC };
1041	int nsynctypes = 0;
1042
1043	VERBOSE_TOROUT_STRING("rcu_torture_writer task started");
1044	if (!can_expedite)
1045		pr_alert("%s" TORTURE_FLAG
1046			 " GP expediting controlled from boot/sysfs for %s.\n",
1047			 torture_type, cur_ops->name);
1048
1049	/* Initialize synctype[] array.  If none set, take default. */
1050	if (!gp_cond1 && !gp_exp1 && !gp_normal1 && !gp_sync1)
1051		gp_cond1 = gp_exp1 = gp_normal1 = gp_sync1 = true;
1052	if (gp_cond1 && cur_ops->get_state && cur_ops->cond_sync) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1053		synctype[nsynctypes++] = RTWS_COND_GET;
1054		pr_info("%s: Testing conditional GPs.\n", __func__);
1055	} else if (gp_cond && (!cur_ops->get_state || !cur_ops->cond_sync)) {
1056		pr_alert("%s: gp_cond without primitives.\n", __func__);
1057	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1058	if (gp_exp1 && cur_ops->exp_sync) {
1059		synctype[nsynctypes++] = RTWS_EXP_SYNC;
1060		pr_info("%s: Testing expedited GPs.\n", __func__);
1061	} else if (gp_exp && !cur_ops->exp_sync) {
1062		pr_alert("%s: gp_exp without primitives.\n", __func__);
1063	}
1064	if (gp_normal1 && cur_ops->deferred_free) {
1065		synctype[nsynctypes++] = RTWS_DEF_FREE;
1066		pr_info("%s: Testing asynchronous GPs.\n", __func__);
1067	} else if (gp_normal && !cur_ops->deferred_free) {
1068		pr_alert("%s: gp_normal without primitives.\n", __func__);
1069	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1070	if (gp_sync1 && cur_ops->sync) {
1071		synctype[nsynctypes++] = RTWS_SYNC;
1072		pr_info("%s: Testing normal GPs.\n", __func__);
1073	} else if (gp_sync && !cur_ops->sync) {
1074		pr_alert("%s: gp_sync without primitives.\n", __func__);
1075	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1076	if (WARN_ONCE(nsynctypes == 0,
1077		      "rcu_torture_writer: No update-side primitives.\n")) {
1078		/*
1079		 * No updates primitives, so don't try updating.
1080		 * The resulting test won't be testing much, hence the
1081		 * above WARN_ONCE().
1082		 */
1083		rcu_torture_writer_state = RTWS_STOPPING;
1084		torture_kthread_stopping("rcu_torture_writer");
 
1085	}
1086
1087	do {
1088		rcu_torture_writer_state = RTWS_FIXED_DELAY;
1089		schedule_timeout_uninterruptible(1);
1090		rp = rcu_torture_alloc();
1091		if (rp == NULL)
1092			continue;
1093		rp->rtort_pipe_count = 0;
1094		rcu_torture_writer_state = RTWS_DELAY;
1095		udelay(torture_random(&rand) & 0x3ff);
1096		rcu_torture_writer_state = RTWS_REPLACE;
1097		old_rp = rcu_dereference_check(rcu_torture_current,
1098					       current == writer_task);
1099		rp->rtort_mbtest = 1;
1100		rcu_assign_pointer(rcu_torture_current, rp);
1101		smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */
1102		if (old_rp) {
1103			i = old_rp->rtort_pipe_count;
1104			if (i > RCU_TORTURE_PIPE_LEN)
1105				i = RCU_TORTURE_PIPE_LEN;
1106			atomic_inc(&rcu_torture_wcount[i]);
1107			WRITE_ONCE(old_rp->rtort_pipe_count,
1108				   old_rp->rtort_pipe_count + 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1109			switch (synctype[torture_random(&rand) % nsynctypes]) {
1110			case RTWS_DEF_FREE:
1111				rcu_torture_writer_state = RTWS_DEF_FREE;
1112				cur_ops->deferred_free(old_rp);
1113				break;
1114			case RTWS_EXP_SYNC:
1115				rcu_torture_writer_state = RTWS_EXP_SYNC;
1116				cur_ops->exp_sync();
1117				rcu_torture_pipe_update(old_rp);
1118				break;
1119			case RTWS_COND_GET:
1120				rcu_torture_writer_state = RTWS_COND_GET;
1121				gp_snap = cur_ops->get_state();
1122				i = torture_random(&rand) % 16;
1123				if (i != 0)
1124					schedule_timeout_interruptible(i);
1125				udelay(torture_random(&rand) % 1000);
1126				rcu_torture_writer_state = RTWS_COND_SYNC;
1127				cur_ops->cond_sync(gp_snap);
1128				rcu_torture_pipe_update(old_rp);
1129				break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1130			case RTWS_SYNC:
1131				rcu_torture_writer_state = RTWS_SYNC;
1132				cur_ops->sync();
1133				rcu_torture_pipe_update(old_rp);
1134				break;
1135			default:
1136				WARN_ON_ONCE(1);
1137				break;
1138			}
1139		}
1140		WRITE_ONCE(rcu_torture_current_version,
1141			   rcu_torture_current_version + 1);
1142		/* Cycle through nesting levels of rcu_expedite_gp() calls. */
1143		if (can_expedite &&
1144		    !(torture_random(&rand) & 0xff & (!!expediting - 1))) {
1145			WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited());
1146			if (expediting >= 0)
1147				rcu_expedite_gp();
1148			else
1149				rcu_unexpedite_gp();
1150			if (++expediting > 3)
1151				expediting = -expediting;
1152		} else if (!can_expedite) { /* Disabled during boot, recheck. */
1153			can_expedite = !rcu_gp_is_expedited() &&
1154				       !rcu_gp_is_normal();
1155		}
1156		rcu_torture_writer_state = RTWS_STUTTER;
1157		if (stutter_wait("rcu_torture_writer") &&
1158		    !READ_ONCE(rcu_fwd_cb_nodelay) &&
 
 
1159		    !cur_ops->slow_gps &&
1160		    !torture_must_stop() &&
1161		    rcu_inkernel_boot_has_ended())
 
1162			for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++)
1163				if (list_empty(&rcu_tortures[i].rtort_free) &&
1164				    rcu_access_pointer(rcu_torture_current) !=
1165				    &rcu_tortures[i]) {
 
 
1166					rcu_ftrace_dump(DUMP_ALL);
1167					WARN(1, "%s: rtort_pipe_count: %d\n", __func__, rcu_tortures[i].rtort_pipe_count);
1168				}
 
 
1169	} while (!torture_must_stop());
1170	rcu_torture_current = NULL;  // Let stats task know that we are done.
1171	/* Reset expediting back to unexpedited. */
1172	if (expediting > 0)
1173		expediting = -expediting;
1174	while (can_expedite && expediting++ < 0)
1175		rcu_unexpedite_gp();
1176	WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited());
1177	if (!can_expedite)
1178		pr_alert("%s" TORTURE_FLAG
1179			 " Dynamic grace-period expediting was disabled.\n",
1180			 torture_type);
1181	rcu_torture_writer_state = RTWS_STOPPING;
1182	torture_kthread_stopping("rcu_torture_writer");
1183	return 0;
1184}
1185
1186/*
1187 * RCU torture fake writer kthread.  Repeatedly calls sync, with a random
1188 * delay between calls.
1189 */
1190static int
1191rcu_torture_fakewriter(void *arg)
1192{
 
 
1193	DEFINE_TORTURE_RANDOM(rand);
1194
1195	VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started");
1196	set_user_nice(current, MAX_NICE);
1197
 
 
 
 
 
 
 
 
 
 
 
1198	do {
1199		schedule_timeout_uninterruptible(1 + torture_random(&rand)%10);
1200		udelay(torture_random(&rand) & 0x3ff);
1201		if (cur_ops->cb_barrier != NULL &&
1202		    torture_random(&rand) % (nfakewriters * 8) == 0) {
1203			cur_ops->cb_barrier();
1204		} else if (gp_normal == gp_exp) {
1205			if (cur_ops->sync && torture_random(&rand) & 0x80)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1206				cur_ops->sync();
1207			else if (cur_ops->exp_sync)
1208				cur_ops->exp_sync();
1209		} else if (gp_normal && cur_ops->sync) {
1210			cur_ops->sync();
1211		} else if (cur_ops->exp_sync) {
1212			cur_ops->exp_sync();
1213		}
1214		stutter_wait("rcu_torture_fakewriter");
1215	} while (!torture_must_stop());
1216
1217	torture_kthread_stopping("rcu_torture_fakewriter");
1218	return 0;
1219}
1220
1221static void rcu_torture_timer_cb(struct rcu_head *rhp)
1222{
1223	kfree(rhp);
1224}
1225
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1226/*
1227 * Do one extension of an RCU read-side critical section using the
1228 * current reader state in readstate (set to zero for initial entry
1229 * to extended critical section), set the new state as specified by
1230 * newstate (set to zero for final exit from extended critical section),
1231 * and random-number-generator state in trsp.  If this is neither the
1232 * beginning or end of the critical section and if there was actually a
1233 * change, do a ->read_delay().
1234 */
1235static void rcutorture_one_extend(int *readstate, int newstate,
1236				  struct torture_random_state *trsp,
1237				  struct rt_read_seg *rtrsp)
1238{
1239	unsigned long flags;
1240	int idxnew = -1;
1241	int idxold = *readstate;
 
 
1242	int statesnew = ~*readstate & newstate;
1243	int statesold = *readstate & ~newstate;
1244
1245	WARN_ON_ONCE(idxold < 0);
1246	WARN_ON_ONCE((idxold >> RCUTORTURE_RDR_SHIFT) > 1);
1247	rtrsp->rt_readstate = newstate;
1248
1249	/* First, put new protection in place to avoid critical-section gap. */
1250	if (statesnew & RCUTORTURE_RDR_BH)
1251		local_bh_disable();
 
 
1252	if (statesnew & RCUTORTURE_RDR_IRQ)
1253		local_irq_disable();
1254	if (statesnew & RCUTORTURE_RDR_PREEMPT)
1255		preempt_disable();
1256	if (statesnew & RCUTORTURE_RDR_RBH)
1257		rcu_read_lock_bh();
1258	if (statesnew & RCUTORTURE_RDR_SCHED)
1259		rcu_read_lock_sched();
1260	if (statesnew & RCUTORTURE_RDR_RCU)
1261		idxnew = cur_ops->readlock() << RCUTORTURE_RDR_SHIFT;
 
 
1262
1263	/* Next, remove old protection, irq first due to bh conflict. */
 
 
 
 
 
 
1264	if (statesold & RCUTORTURE_RDR_IRQ)
1265		local_irq_enable();
 
 
 
 
1266	if (statesold & RCUTORTURE_RDR_BH)
1267		local_bh_enable();
1268	if (statesold & RCUTORTURE_RDR_PREEMPT)
1269		preempt_enable();
1270	if (statesold & RCUTORTURE_RDR_RBH)
1271		rcu_read_unlock_bh();
1272	if (statesold & RCUTORTURE_RDR_SCHED)
1273		rcu_read_unlock_sched();
1274	if (statesold & RCUTORTURE_RDR_RCU) {
1275		bool lockit = !statesnew && !(torture_random(trsp) & 0xffff);
 
 
 
1276
 
1277		if (lockit)
1278			raw_spin_lock_irqsave(&current->pi_lock, flags);
1279		cur_ops->readunlock(idxold >> RCUTORTURE_RDR_SHIFT);
 
 
1280		if (lockit)
1281			raw_spin_unlock_irqrestore(&current->pi_lock, flags);
1282	}
1283
1284	/* Delay if neither beginning nor end and there was a change. */
1285	if ((statesnew || statesold) && *readstate && newstate)
1286		cur_ops->read_delay(trsp, rtrsp);
1287
1288	/* Update the reader state. */
1289	if (idxnew == -1)
1290		idxnew = idxold & ~RCUTORTURE_RDR_MASK;
1291	WARN_ON_ONCE(idxnew < 0);
1292	WARN_ON_ONCE((idxnew >> RCUTORTURE_RDR_SHIFT) > 1);
1293	*readstate = idxnew | newstate;
1294	WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) < 0);
1295	WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) > 1);
 
 
 
 
 
 
1296}
1297
1298/* Return the biggest extendables mask given current RCU and boot parameters. */
1299static int rcutorture_extend_mask_max(void)
1300{
1301	int mask;
1302
1303	WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND);
1304	mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables;
1305	mask = mask | RCUTORTURE_RDR_RCU;
1306	return mask;
1307}
1308
1309/* Return a random protection state mask, but with at least one bit set. */
1310static int
1311rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp)
1312{
1313	int mask = rcutorture_extend_mask_max();
1314	unsigned long randmask1 = torture_random(trsp) >> 8;
1315	unsigned long randmask2 = randmask1 >> 3;
 
 
 
1316
1317	WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT);
1318	/* Mostly only one bit (need preemption!), sometimes lots of bits. */
1319	if (!(randmask1 & 0x7))
1320		mask = mask & randmask2;
1321	else
1322		mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS));
1323	/* Can't enable bh w/irq disabled. */
1324	if ((mask & RCUTORTURE_RDR_IRQ) &&
1325	    ((!(mask & RCUTORTURE_RDR_BH) && (oldmask & RCUTORTURE_RDR_BH)) ||
1326	     (!(mask & RCUTORTURE_RDR_RBH) && (oldmask & RCUTORTURE_RDR_RBH))))
1327		mask |= RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH;
1328	return mask ?: RCUTORTURE_RDR_RCU;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1329}
1330
1331/*
1332 * Do a randomly selected number of extensions of an existing RCU read-side
1333 * critical section.
1334 */
1335static struct rt_read_seg *
1336rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp,
1337		       struct rt_read_seg *rtrsp)
1338{
1339	int i;
1340	int j;
1341	int mask = rcutorture_extend_mask_max();
1342
1343	WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */
1344	if (!((mask - 1) & mask))
1345		return rtrsp;  /* Current RCU reader not extendable. */
1346	/* Bias towards larger numbers of loops. */
1347	i = (torture_random(trsp) >> 3);
1348	i = ((i | (i >> 3)) & RCUTORTURE_RDR_MAX_LOOPS) + 1;
1349	for (j = 0; j < i; j++) {
1350		mask = rcutorture_extend_mask(*readstate, trsp);
1351		rcutorture_one_extend(readstate, mask, trsp, &rtrsp[j]);
1352	}
1353	return &rtrsp[j];
1354}
1355
1356/*
1357 * Do one read-side critical section, returning false if there was
1358 * no data to read.  Can be invoked both from process context and
1359 * from a timer handler.
1360 */
1361static bool rcu_torture_one_read(struct torture_random_state *trsp)
1362{
 
 
 
1363	int i;
1364	unsigned long started;
1365	unsigned long completed;
1366	int newstate;
1367	struct rcu_torture *p;
1368	int pipe_count;
1369	int readstate = 0;
1370	struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS] = { { 0 } };
1371	struct rt_read_seg *rtrsp = &rtseg[0];
1372	struct rt_read_seg *rtrsp1;
1373	unsigned long long ts;
1374
1375	WARN_ON_ONCE(!rcu_is_watching());
1376	newstate = rcutorture_extend_mask(readstate, trsp);
1377	rcutorture_one_extend(&readstate, newstate, trsp, rtrsp++);
 
 
 
 
 
 
1378	started = cur_ops->get_gp_seq();
1379	ts = rcu_trace_clock_local();
1380	p = rcu_dereference_check(rcu_torture_current,
1381				  rcu_read_lock_bh_held() ||
1382				  rcu_read_lock_sched_held() ||
1383				  srcu_read_lock_held(srcu_ctlp) ||
1384				  rcu_read_lock_trace_held() ||
1385				  torturing_tasks());
1386	if (p == NULL) {
1387		/* Wait for rcu_torture_writer to get underway */
1388		rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
1389		return false;
1390	}
1391	if (p->rtort_mbtest == 0)
1392		atomic_inc(&n_rcu_torture_mberror);
 
1393	rtrsp = rcutorture_loop_extend(&readstate, trsp, rtrsp);
1394	preempt_disable();
1395	pipe_count = READ_ONCE(p->rtort_pipe_count);
1396	if (pipe_count > RCU_TORTURE_PIPE_LEN) {
1397		/* Should not happen, but... */
1398		pipe_count = RCU_TORTURE_PIPE_LEN;
1399	}
1400	completed = cur_ops->get_gp_seq();
1401	if (pipe_count > 1) {
1402		do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu,
1403					  ts, started, completed);
1404		rcu_ftrace_dump(DUMP_ALL);
1405	}
1406	__this_cpu_inc(rcu_torture_count[pipe_count]);
1407	completed = rcutorture_seq_diff(completed, started);
1408	if (completed > RCU_TORTURE_PIPE_LEN) {
1409		/* Should not happen, but... */
1410		completed = RCU_TORTURE_PIPE_LEN;
1411	}
1412	__this_cpu_inc(rcu_torture_batch[completed]);
1413	preempt_enable();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1414	rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
1415	WARN_ON_ONCE(readstate & RCUTORTURE_RDR_MASK);
 
 
 
1416
1417	/* If error or close call, record the sequence of reader protections. */
1418	if ((pipe_count > 1 || completed > 1) && !xchg(&err_segs_recorded, 1)) {
1419		i = 0;
1420		for (rtrsp1 = &rtseg[0]; rtrsp1 < rtrsp; rtrsp1++)
1421			err_segs[i++] = *rtrsp1;
1422		rt_read_nsegs = i;
1423	}
1424
1425	return true;
1426}
1427
1428static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand);
1429
1430/*
1431 * RCU torture reader from timer handler.  Dereferences rcu_torture_current,
1432 * incrementing the corresponding element of the pipeline array.  The
1433 * counter in the element should never be greater than 1, otherwise, the
1434 * RCU implementation is broken.
1435 */
1436static void rcu_torture_timer(struct timer_list *unused)
1437{
1438	atomic_long_inc(&n_rcu_torture_timers);
1439	(void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand));
1440
1441	/* Test call_rcu() invocation from interrupt handler. */
1442	if (cur_ops->call) {
1443		struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_NOWAIT);
1444
1445		if (rhp)
1446			cur_ops->call(rhp, rcu_torture_timer_cb);
1447	}
1448}
1449
1450/*
1451 * RCU torture reader kthread.  Repeatedly dereferences rcu_torture_current,
1452 * incrementing the corresponding element of the pipeline array.  The
1453 * counter in the element should never be greater than 1, otherwise, the
1454 * RCU implementation is broken.
1455 */
1456static int
1457rcu_torture_reader(void *arg)
1458{
1459	unsigned long lastsleep = jiffies;
1460	long myid = (long)arg;
1461	int mynumonline = myid;
1462	DEFINE_TORTURE_RANDOM(rand);
1463	struct timer_list t;
1464
1465	VERBOSE_TOROUT_STRING("rcu_torture_reader task started");
1466	set_user_nice(current, MAX_NICE);
1467	if (irqreader && cur_ops->irq_capable)
1468		timer_setup_on_stack(&t, rcu_torture_timer, 0);
1469	tick_dep_set_task(current, TICK_DEP_BIT_RCU);
1470	do {
1471		if (irqreader && cur_ops->irq_capable) {
1472			if (!timer_pending(&t))
1473				mod_timer(&t, jiffies + 1);
1474		}
1475		if (!rcu_torture_one_read(&rand) && !torture_must_stop())
1476			schedule_timeout_interruptible(HZ);
1477		if (time_after(jiffies, lastsleep) && !torture_must_stop()) {
1478			schedule_timeout_interruptible(1);
1479			lastsleep = jiffies + 10;
1480		}
1481		while (num_online_cpus() < mynumonline && !torture_must_stop())
1482			schedule_timeout_interruptible(HZ / 5);
1483		stutter_wait("rcu_torture_reader");
1484	} while (!torture_must_stop());
1485	if (irqreader && cur_ops->irq_capable) {
1486		del_timer_sync(&t);
1487		destroy_timer_on_stack(&t);
1488	}
1489	tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
1490	torture_kthread_stopping("rcu_torture_reader");
1491	return 0;
1492}
1493
1494/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1495 * Print torture statistics.  Caller must ensure that there is only
1496 * one call to this function at a given time!!!  This is normally
1497 * accomplished by relying on the module system to only have one copy
1498 * of the module loaded, and then by giving the rcu_torture_stats
1499 * kthread full control (or the init/cleanup functions when rcu_torture_stats
1500 * thread is not running).
1501 */
1502static void
1503rcu_torture_stats_print(void)
1504{
1505	int cpu;
1506	int i;
1507	long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
1508	long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
1509	struct rcu_torture *rtcp;
1510	static unsigned long rtcv_snap = ULONG_MAX;
1511	static bool splatted;
1512	struct task_struct *wtp;
1513
1514	for_each_possible_cpu(cpu) {
1515		for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1516			pipesummary[i] += READ_ONCE(per_cpu(rcu_torture_count, cpu)[i]);
1517			batchsummary[i] += READ_ONCE(per_cpu(rcu_torture_batch, cpu)[i]);
1518		}
1519	}
1520	for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) {
1521		if (pipesummary[i] != 0)
1522			break;
1523	}
1524
1525	pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1526	rtcp = rcu_access_pointer(rcu_torture_current);
1527	pr_cont("rtc: %p %s: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
1528		rtcp,
1529		rtcp && !rcu_stall_is_suppressed_at_boot() ? "ver" : "VER",
1530		rcu_torture_current_version,
1531		list_empty(&rcu_torture_freelist),
1532		atomic_read(&n_rcu_torture_alloc),
1533		atomic_read(&n_rcu_torture_alloc_fail),
1534		atomic_read(&n_rcu_torture_free));
1535	pr_cont("rtmbe: %d rtbe: %ld rtbke: %ld rtbre: %ld ",
1536		atomic_read(&n_rcu_torture_mberror),
 
1537		n_rcu_torture_barrier_error,
1538		n_rcu_torture_boost_ktrerror,
1539		n_rcu_torture_boost_rterror);
1540	pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
1541		n_rcu_torture_boost_failure,
1542		n_rcu_torture_boosts,
1543		atomic_long_read(&n_rcu_torture_timers));
1544	torture_onoff_stats();
1545	pr_cont("barrier: %ld/%ld:%ld ",
1546		data_race(n_barrier_successes),
1547		data_race(n_barrier_attempts),
1548		data_race(n_rcu_torture_barrier_error));
1549	pr_cont("read-exits: %ld\n", data_race(n_read_exits));
 
 
1550
1551	pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1552	if (atomic_read(&n_rcu_torture_mberror) ||
 
1553	    n_rcu_torture_barrier_error || n_rcu_torture_boost_ktrerror ||
1554	    n_rcu_torture_boost_rterror || n_rcu_torture_boost_failure ||
1555	    i > 1) {
1556		pr_cont("%s", "!!! ");
1557		atomic_inc(&n_rcu_torture_error);
1558		WARN_ON_ONCE(atomic_read(&n_rcu_torture_mberror));
 
1559		WARN_ON_ONCE(n_rcu_torture_barrier_error);  // rcu_barrier()
1560		WARN_ON_ONCE(n_rcu_torture_boost_ktrerror); // no boost kthread
1561		WARN_ON_ONCE(n_rcu_torture_boost_rterror); // can't set RT prio
1562		WARN_ON_ONCE(n_rcu_torture_boost_failure); // RCU boost failed
1563		WARN_ON_ONCE(i > 1); // Too-short grace period
1564	}
1565	pr_cont("Reader Pipe: ");
1566	for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
1567		pr_cont(" %ld", pipesummary[i]);
1568	pr_cont("\n");
1569
1570	pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1571	pr_cont("Reader Batch: ");
1572	for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
1573		pr_cont(" %ld", batchsummary[i]);
1574	pr_cont("\n");
1575
1576	pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1577	pr_cont("Free-Block Circulation: ");
1578	for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1579		pr_cont(" %d", atomic_read(&rcu_torture_wcount[i]));
1580	}
1581	pr_cont("\n");
1582
1583	if (cur_ops->stats)
1584		cur_ops->stats();
1585	if (rtcv_snap == rcu_torture_current_version &&
1586	    rcu_access_pointer(rcu_torture_current) &&
1587	    !rcu_stall_is_suppressed()) {
1588		int __maybe_unused flags = 0;
1589		unsigned long __maybe_unused gp_seq = 0;
1590
1591		rcutorture_get_gp_data(cur_ops->ttype,
1592				       &flags, &gp_seq);
1593		srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp,
1594					&flags, &gp_seq);
1595		wtp = READ_ONCE(writer_task);
1596		pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#lx cpu %d\n",
1597			 rcu_torture_writer_state_getname(),
1598			 rcu_torture_writer_state, gp_seq, flags,
1599			 wtp == NULL ? ~0UL : wtp->state,
1600			 wtp == NULL ? -1 : (int)task_cpu(wtp));
1601		if (!splatted && wtp) {
1602			sched_show_task(wtp);
1603			splatted = true;
1604		}
1605		show_rcu_gp_kthreads();
 
1606		rcu_ftrace_dump(DUMP_ALL);
1607	}
1608	rtcv_snap = rcu_torture_current_version;
1609}
1610
1611/*
1612 * Periodically prints torture statistics, if periodic statistics printing
1613 * was specified via the stat_interval module parameter.
1614 */
1615static int
1616rcu_torture_stats(void *arg)
1617{
1618	VERBOSE_TOROUT_STRING("rcu_torture_stats task started");
1619	do {
1620		schedule_timeout_interruptible(stat_interval * HZ);
1621		rcu_torture_stats_print();
1622		torture_shutdown_absorb("rcu_torture_stats");
1623	} while (!torture_must_stop());
1624	torture_kthread_stopping("rcu_torture_stats");
1625	return 0;
1626}
1627
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1628static void
1629rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
1630{
1631	pr_alert("%s" TORTURE_FLAG
1632		 "--- %s: nreaders=%d nfakewriters=%d "
1633		 "stat_interval=%d verbose=%d test_no_idle_hz=%d "
1634		 "shuffle_interval=%d stutter=%d irqreader=%d "
1635		 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d "
1636		 "test_boost=%d/%d test_boost_interval=%d "
1637		 "test_boost_duration=%d shutdown_secs=%d "
1638		 "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d "
1639		 "stall_cpu_block=%d "
1640		 "n_barrier_cbs=%d "
1641		 "onoff_interval=%d onoff_holdoff=%d "
1642		 "read_exit_delay=%d read_exit_burst=%d\n",
 
 
1643		 torture_type, tag, nrealreaders, nfakewriters,
1644		 stat_interval, verbose, test_no_idle_hz, shuffle_interval,
1645		 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter,
1646		 test_boost, cur_ops->can_boost,
1647		 test_boost_interval, test_boost_duration, shutdown_secs,
1648		 stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff,
1649		 stall_cpu_block,
1650		 n_barrier_cbs,
1651		 onoff_interval, onoff_holdoff,
1652		 read_exit_delay, read_exit_burst);
 
 
1653}
1654
1655static int rcutorture_booster_cleanup(unsigned int cpu)
1656{
1657	struct task_struct *t;
1658
1659	if (boost_tasks[cpu] == NULL)
1660		return 0;
1661	mutex_lock(&boost_mutex);
1662	t = boost_tasks[cpu];
1663	boost_tasks[cpu] = NULL;
1664	rcu_torture_enable_rt_throttle();
1665	mutex_unlock(&boost_mutex);
1666
1667	/* This must be outside of the mutex, otherwise deadlock! */
1668	torture_stop_kthread(rcu_torture_boost, t);
1669	return 0;
1670}
1671
1672static int rcutorture_booster_init(unsigned int cpu)
1673{
1674	int retval;
1675
1676	if (boost_tasks[cpu] != NULL)
1677		return 0;  /* Already created, nothing more to do. */
1678
 
 
 
 
 
 
 
 
 
 
 
 
 
1679	/* Don't allow time recalculation while creating a new task. */
1680	mutex_lock(&boost_mutex);
1681	rcu_torture_disable_rt_throttle();
1682	VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task");
1683	boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL,
1684						  cpu_to_node(cpu),
1685						  "rcu_torture_boost");
1686	if (IS_ERR(boost_tasks[cpu])) {
1687		retval = PTR_ERR(boost_tasks[cpu]);
1688		VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed");
1689		n_rcu_torture_boost_ktrerror++;
1690		boost_tasks[cpu] = NULL;
1691		mutex_unlock(&boost_mutex);
1692		return retval;
1693	}
1694	kthread_bind(boost_tasks[cpu], cpu);
1695	wake_up_process(boost_tasks[cpu]);
1696	mutex_unlock(&boost_mutex);
1697	return 0;
1698}
1699
 
 
 
 
 
 
 
 
 
 
1700/*
1701 * CPU-stall kthread.  It waits as specified by stall_cpu_holdoff, then
1702 * induces a CPU stall for the time specified by stall_cpu.
 
1703 */
1704static int rcu_torture_stall(void *args)
1705{
1706	int idx;
 
1707	unsigned long stop_at;
1708
1709	VERBOSE_TOROUT_STRING("rcu_torture_stall task started");
 
 
 
 
 
 
1710	if (stall_cpu_holdoff > 0) {
1711		VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff");
1712		schedule_timeout_interruptible(stall_cpu_holdoff * HZ);
1713		VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff");
1714	}
1715	if (!kthread_should_stop() && stall_gp_kthread > 0) {
1716		VERBOSE_TOROUT_STRING("rcu_torture_stall begin GP stall");
1717		rcu_gp_set_torture_wait(stall_gp_kthread * HZ);
1718		for (idx = 0; idx < stall_gp_kthread + 2; idx++) {
1719			if (kthread_should_stop())
1720				break;
1721			schedule_timeout_uninterruptible(HZ);
1722		}
1723	}
1724	if (!kthread_should_stop() && stall_cpu > 0) {
1725		VERBOSE_TOROUT_STRING("rcu_torture_stall begin CPU stall");
1726		stop_at = ktime_get_seconds() + stall_cpu;
1727		/* RCU CPU stall is expected behavior in following code. */
1728		idx = cur_ops->readlock();
1729		if (stall_cpu_irqsoff)
1730			local_irq_disable();
1731		else if (!stall_cpu_block)
1732			preempt_disable();
1733		pr_alert("rcu_torture_stall start on CPU %d.\n",
1734			 raw_smp_processor_id());
1735		while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(),
1736				    stop_at))
1737			if (stall_cpu_block)
 
 
 
1738				schedule_timeout_uninterruptible(HZ);
 
 
 
 
1739		if (stall_cpu_irqsoff)
1740			local_irq_enable();
1741		else if (!stall_cpu_block)
1742			preempt_enable();
1743		cur_ops->readunlock(idx);
1744	}
1745	pr_alert("rcu_torture_stall end.\n");
 
 
 
 
 
1746	torture_shutdown_absorb("rcu_torture_stall");
1747	while (!kthread_should_stop())
1748		schedule_timeout_interruptible(10 * HZ);
1749	return 0;
1750}
1751
1752/* Spawn CPU-stall kthread, if stall_cpu specified. */
1753static int __init rcu_torture_stall_init(void)
1754{
1755	if (stall_cpu <= 0 && stall_gp_kthread <= 0)
1756		return 0;
1757	return torture_create_kthread(rcu_torture_stall, NULL, stall_task);
1758}
1759
1760/* State structure for forward-progress self-propagating RCU callback. */
1761struct fwd_cb_state {
1762	struct rcu_head rh;
1763	int stop;
1764};
1765
1766/*
1767 * Forward-progress self-propagating RCU callback function.  Because
1768 * callbacks run from softirq, this function is an implicit RCU read-side
1769 * critical section.
1770 */
1771static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp)
1772{
1773	struct fwd_cb_state *fcsp = container_of(rhp, struct fwd_cb_state, rh);
1774
1775	if (READ_ONCE(fcsp->stop)) {
1776		WRITE_ONCE(fcsp->stop, 2);
1777		return;
1778	}
1779	cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb);
1780}
1781
1782/* State for continuous-flood RCU callbacks. */
1783struct rcu_fwd_cb {
1784	struct rcu_head rh;
1785	struct rcu_fwd_cb *rfc_next;
1786	struct rcu_fwd *rfc_rfp;
1787	int rfc_gps;
1788};
1789
1790#define MAX_FWD_CB_JIFFIES	(8 * HZ) /* Maximum CB test duration. */
1791#define MIN_FWD_CB_LAUNDERS	3	/* This many CB invocations to count. */
1792#define MIN_FWD_CBS_LAUNDERED	100	/* Number of counted CBs. */
1793#define FWD_CBS_HIST_DIV	10	/* Histogram buckets/second. */
1794#define N_LAUNDERS_HIST (2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV))
1795
1796struct rcu_launder_hist {
1797	long n_launders;
1798	unsigned long launder_gp_seq;
1799};
1800
1801struct rcu_fwd {
1802	spinlock_t rcu_fwd_lock;
1803	struct rcu_fwd_cb *rcu_fwd_cb_head;
1804	struct rcu_fwd_cb **rcu_fwd_cb_tail;
1805	long n_launders_cb;
1806	unsigned long rcu_fwd_startat;
1807	struct rcu_launder_hist n_launders_hist[N_LAUNDERS_HIST];
1808	unsigned long rcu_launder_gp_seq_start;
 
1809};
1810
 
1811static struct rcu_fwd *rcu_fwds;
 
 
1812static bool rcu_fwd_emergency_stop;
1813
1814static void rcu_torture_fwd_cb_hist(struct rcu_fwd *rfp)
1815{
1816	unsigned long gps;
1817	unsigned long gps_old;
1818	int i;
1819	int j;
1820
1821	for (i = ARRAY_SIZE(rfp->n_launders_hist) - 1; i > 0; i--)
1822		if (rfp->n_launders_hist[i].n_launders > 0)
1823			break;
1824	pr_alert("%s: Callback-invocation histogram (duration %lu jiffies):",
1825		 __func__, jiffies - rfp->rcu_fwd_startat);
1826	gps_old = rfp->rcu_launder_gp_seq_start;
1827	for (j = 0; j <= i; j++) {
1828		gps = rfp->n_launders_hist[j].launder_gp_seq;
1829		pr_cont(" %ds/%d: %ld:%ld",
1830			j + 1, FWD_CBS_HIST_DIV,
1831			rfp->n_launders_hist[j].n_launders,
1832			rcutorture_seq_diff(gps, gps_old));
1833		gps_old = gps;
1834	}
1835	pr_cont("\n");
1836}
1837
1838/* Callback function for continuous-flood RCU callbacks. */
1839static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp)
1840{
1841	unsigned long flags;
1842	int i;
1843	struct rcu_fwd_cb *rfcp = container_of(rhp, struct rcu_fwd_cb, rh);
1844	struct rcu_fwd_cb **rfcpp;
1845	struct rcu_fwd *rfp = rfcp->rfc_rfp;
1846
1847	rfcp->rfc_next = NULL;
1848	rfcp->rfc_gps++;
1849	spin_lock_irqsave(&rfp->rcu_fwd_lock, flags);
1850	rfcpp = rfp->rcu_fwd_cb_tail;
1851	rfp->rcu_fwd_cb_tail = &rfcp->rfc_next;
1852	WRITE_ONCE(*rfcpp, rfcp);
1853	WRITE_ONCE(rfp->n_launders_cb, rfp->n_launders_cb + 1);
1854	i = ((jiffies - rfp->rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV));
1855	if (i >= ARRAY_SIZE(rfp->n_launders_hist))
1856		i = ARRAY_SIZE(rfp->n_launders_hist) - 1;
1857	rfp->n_launders_hist[i].n_launders++;
1858	rfp->n_launders_hist[i].launder_gp_seq = cur_ops->get_gp_seq();
1859	spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
1860}
1861
1862// Give the scheduler a chance, even on nohz_full CPUs.
1863static void rcu_torture_fwd_prog_cond_resched(unsigned long iter)
1864{
1865	if (IS_ENABLED(CONFIG_PREEMPTION) && IS_ENABLED(CONFIG_NO_HZ_FULL)) {
1866		// Real call_rcu() floods hit userspace, so emulate that.
1867		if (need_resched() || (iter & 0xfff))
1868			schedule();
1869		return;
1870	}
1871	// No userspace emulation: CB invocation throttles call_rcu()
1872	cond_resched();
1873}
1874
1875/*
1876 * Free all callbacks on the rcu_fwd_cb_head list, either because the
1877 * test is over or because we hit an OOM event.
1878 */
1879static unsigned long rcu_torture_fwd_prog_cbfree(struct rcu_fwd *rfp)
1880{
1881	unsigned long flags;
1882	unsigned long freed = 0;
1883	struct rcu_fwd_cb *rfcp;
1884
1885	for (;;) {
1886		spin_lock_irqsave(&rfp->rcu_fwd_lock, flags);
1887		rfcp = rfp->rcu_fwd_cb_head;
1888		if (!rfcp) {
1889			spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
1890			break;
1891		}
1892		rfp->rcu_fwd_cb_head = rfcp->rfc_next;
1893		if (!rfp->rcu_fwd_cb_head)
1894			rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head;
1895		spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
1896		kfree(rfcp);
1897		freed++;
1898		rcu_torture_fwd_prog_cond_resched(freed);
1899		if (tick_nohz_full_enabled()) {
1900			local_irq_save(flags);
1901			rcu_momentary_dyntick_idle();
1902			local_irq_restore(flags);
1903		}
1904	}
1905	return freed;
1906}
1907
1908/* Carry out need_resched()/cond_resched() forward-progress testing. */
1909static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp,
1910				    int *tested, int *tested_tries)
1911{
1912	unsigned long cver;
1913	unsigned long dur;
1914	struct fwd_cb_state fcs;
1915	unsigned long gps;
1916	int idx;
1917	int sd;
1918	int sd4;
1919	bool selfpropcb = false;
1920	unsigned long stopat;
1921	static DEFINE_TORTURE_RANDOM(trs);
1922
1923	if  (cur_ops->call && cur_ops->sync && cur_ops->cb_barrier) {
 
 
 
1924		init_rcu_head_on_stack(&fcs.rh);
1925		selfpropcb = true;
1926	}
1927
1928	/* Tight loop containing cond_resched(). */
1929	WRITE_ONCE(rcu_fwd_cb_nodelay, true);
1930	cur_ops->sync(); /* Later readers see above write. */
1931	if  (selfpropcb) {
1932		WRITE_ONCE(fcs.stop, 0);
1933		cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb);
1934	}
1935	cver = READ_ONCE(rcu_torture_current_version);
1936	gps = cur_ops->get_gp_seq();
1937	sd = cur_ops->stall_dur() + 1;
1938	sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div;
1939	dur = sd4 + torture_random(&trs) % (sd - sd4);
1940	WRITE_ONCE(rfp->rcu_fwd_startat, jiffies);
1941	stopat = rfp->rcu_fwd_startat + dur;
1942	while (time_before(jiffies, stopat) &&
1943	       !shutdown_time_arrived() &&
1944	       !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
1945		idx = cur_ops->readlock();
1946		udelay(10);
1947		cur_ops->readunlock(idx);
1948		if (!fwd_progress_need_resched || need_resched())
1949			cond_resched();
1950	}
1951	(*tested_tries)++;
1952	if (!time_before(jiffies, stopat) &&
1953	    !shutdown_time_arrived() &&
1954	    !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
1955		(*tested)++;
1956		cver = READ_ONCE(rcu_torture_current_version) - cver;
1957		gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
1958		WARN_ON(!cver && gps < 2);
1959		pr_alert("%s: Duration %ld cver %ld gps %ld\n", __func__, dur, cver, gps);
 
1960	}
1961	if (selfpropcb) {
1962		WRITE_ONCE(fcs.stop, 1);
1963		cur_ops->sync(); /* Wait for running CB to complete. */
 
1964		cur_ops->cb_barrier(); /* Wait for queued callbacks. */
1965	}
1966
1967	if (selfpropcb) {
1968		WARN_ON(READ_ONCE(fcs.stop) != 2);
1969		destroy_rcu_head_on_stack(&fcs.rh);
1970	}
1971	schedule_timeout_uninterruptible(HZ / 10); /* Let kthreads recover. */
1972	WRITE_ONCE(rcu_fwd_cb_nodelay, false);
1973}
1974
1975/* Carry out call_rcu() forward-progress testing. */
1976static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp)
1977{
1978	unsigned long cver;
1979	unsigned long flags;
1980	unsigned long gps;
1981	int i;
1982	long n_launders;
1983	long n_launders_cb_snap;
1984	long n_launders_sa;
1985	long n_max_cbs;
1986	long n_max_gps;
1987	struct rcu_fwd_cb *rfcp;
1988	struct rcu_fwd_cb *rfcpn;
1989	unsigned long stopat;
1990	unsigned long stoppedat;
1991
 
1992	if (READ_ONCE(rcu_fwd_emergency_stop))
1993		return; /* Get out of the way quickly, no GP wait! */
1994	if (!cur_ops->call)
1995		return; /* Can't do call_rcu() fwd prog without ->call. */
1996
1997	/* Loop continuously posting RCU callbacks. */
1998	WRITE_ONCE(rcu_fwd_cb_nodelay, true);
1999	cur_ops->sync(); /* Later readers see above write. */
2000	WRITE_ONCE(rfp->rcu_fwd_startat, jiffies);
2001	stopat = rfp->rcu_fwd_startat + MAX_FWD_CB_JIFFIES;
2002	n_launders = 0;
2003	rfp->n_launders_cb = 0; // Hoist initialization for multi-kthread
2004	n_launders_sa = 0;
2005	n_max_cbs = 0;
2006	n_max_gps = 0;
2007	for (i = 0; i < ARRAY_SIZE(rfp->n_launders_hist); i++)
2008		rfp->n_launders_hist[i].n_launders = 0;
2009	cver = READ_ONCE(rcu_torture_current_version);
2010	gps = cur_ops->get_gp_seq();
2011	rfp->rcu_launder_gp_seq_start = gps;
2012	tick_dep_set_task(current, TICK_DEP_BIT_RCU);
2013	while (time_before(jiffies, stopat) &&
2014	       !shutdown_time_arrived() &&
2015	       !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
2016		rfcp = READ_ONCE(rfp->rcu_fwd_cb_head);
2017		rfcpn = NULL;
2018		if (rfcp)
2019			rfcpn = READ_ONCE(rfcp->rfc_next);
2020		if (rfcpn) {
2021			if (rfcp->rfc_gps >= MIN_FWD_CB_LAUNDERS &&
2022			    ++n_max_gps >= MIN_FWD_CBS_LAUNDERED)
2023				break;
2024			rfp->rcu_fwd_cb_head = rfcpn;
2025			n_launders++;
2026			n_launders_sa++;
2027		} else {
2028			rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL);
2029			if (WARN_ON_ONCE(!rfcp)) {
2030				schedule_timeout_interruptible(1);
2031				continue;
2032			}
2033			n_max_cbs++;
2034			n_launders_sa = 0;
2035			rfcp->rfc_gps = 0;
2036			rfcp->rfc_rfp = rfp;
 
 
2037		}
2038		cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr);
 
2039		rcu_torture_fwd_prog_cond_resched(n_launders + n_max_cbs);
2040		if (tick_nohz_full_enabled()) {
2041			local_irq_save(flags);
2042			rcu_momentary_dyntick_idle();
2043			local_irq_restore(flags);
2044		}
2045	}
2046	stoppedat = jiffies;
2047	n_launders_cb_snap = READ_ONCE(rfp->n_launders_cb);
2048	cver = READ_ONCE(rcu_torture_current_version) - cver;
2049	gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
 
2050	cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */
2051	(void)rcu_torture_fwd_prog_cbfree(rfp);
2052
2053	if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop) &&
2054	    !shutdown_time_arrived()) {
2055		WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED);
2056		pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld\n",
2057			 __func__,
2058			 stoppedat - rfp->rcu_fwd_startat, jiffies - stoppedat,
2059			 n_launders + n_max_cbs - n_launders_cb_snap,
2060			 n_launders, n_launders_sa,
2061			 n_max_gps, n_max_cbs, cver, gps);
 
 
2062		rcu_torture_fwd_cb_hist(rfp);
 
2063	}
2064	schedule_timeout_uninterruptible(HZ); /* Let CBs drain. */
2065	tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
2066	WRITE_ONCE(rcu_fwd_cb_nodelay, false);
2067}
2068
2069
2070/*
2071 * OOM notifier, but this only prints diagnostic information for the
2072 * current forward-progress test.
2073 */
2074static int rcutorture_oom_notify(struct notifier_block *self,
2075				 unsigned long notused, void *nfreed)
2076{
2077	struct rcu_fwd *rfp = rcu_fwds;
 
 
2078
 
 
 
 
 
 
2079	WARN(1, "%s invoked upon OOM during forward-progress testing.\n",
2080	     __func__);
2081	rcu_torture_fwd_cb_hist(rfp);
2082	rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rfp->rcu_fwd_startat)) / 2);
 
 
2083	WRITE_ONCE(rcu_fwd_emergency_stop, true);
2084	smp_mb(); /* Emergency stop before free and wait to avoid hangs. */
2085	pr_info("%s: Freed %lu RCU callbacks.\n",
2086		__func__, rcu_torture_fwd_prog_cbfree(rfp));
2087	rcu_barrier();
2088	pr_info("%s: Freed %lu RCU callbacks.\n",
2089		__func__, rcu_torture_fwd_prog_cbfree(rfp));
2090	rcu_barrier();
2091	pr_info("%s: Freed %lu RCU callbacks.\n",
2092		__func__, rcu_torture_fwd_prog_cbfree(rfp));
 
 
 
 
 
 
2093	smp_mb(); /* Frees before return to avoid redoing OOM. */
2094	(*(unsigned long *)nfreed)++; /* Forward progress CBs freed! */
2095	pr_info("%s returning after OOM processing.\n", __func__);
 
2096	return NOTIFY_OK;
2097}
2098
2099static struct notifier_block rcutorture_oom_nb = {
2100	.notifier_call = rcutorture_oom_notify
2101};
2102
2103/* Carry out grace-period forward-progress testing. */
2104static int rcu_torture_fwd_prog(void *args)
2105{
 
 
 
 
2106	struct rcu_fwd *rfp = args;
2107	int tested = 0;
2108	int tested_tries = 0;
2109
2110	VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started");
2111	rcu_bind_current_to_nocb();
2112	if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST))
2113		set_user_nice(current, MAX_NICE);
2114	do {
2115		schedule_timeout_interruptible(fwd_progress_holdoff * HZ);
2116		WRITE_ONCE(rcu_fwd_emergency_stop, false);
2117		register_oom_notifier(&rcutorture_oom_nb);
2118		if (!IS_ENABLED(CONFIG_TINY_RCU) ||
2119		    rcu_inkernel_boot_has_ended())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2120			rcu_torture_fwd_prog_nr(rfp, &tested, &tested_tries);
2121		if (rcu_inkernel_boot_has_ended())
2122			rcu_torture_fwd_prog_cr(rfp);
2123		unregister_oom_notifier(&rcutorture_oom_nb);
2124
2125		/* Avoid slow periods, better to test when busy. */
2126		stutter_wait("rcu_torture_fwd_prog");
 
2127	} while (!torture_must_stop());
2128	/* Short runs might not contain a valid forward-progress attempt. */
2129	WARN_ON(!tested && tested_tries >= 5);
2130	pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries);
 
 
2131	torture_kthread_stopping("rcu_torture_fwd_prog");
2132	return 0;
2133}
2134
2135/* If forward-progress checking is requested and feasible, spawn the thread. */
2136static int __init rcu_torture_fwd_prog_init(void)
2137{
 
 
2138	struct rcu_fwd *rfp;
2139
2140	if (!fwd_progress)
2141		return 0; /* Not requested, so don't do it. */
2142	if (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0 ||
 
 
 
 
 
 
 
2143	    cur_ops == &rcu_busted_ops) {
2144		VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test");
 
2145		return 0;
2146	}
2147	if (stall_cpu > 0) {
2148		VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall testing");
2149		if (IS_MODULE(CONFIG_RCU_TORTURE_TESTS))
 
2150			return -EINVAL; /* In module, can fail back to user. */
2151		WARN_ON(1); /* Make sure rcutorture notices conflict. */
2152		return 0;
2153	}
2154	if (fwd_progress_holdoff <= 0)
2155		fwd_progress_holdoff = 1;
2156	if (fwd_progress_div <= 0)
2157		fwd_progress_div = 4;
2158	rfp = kzalloc(sizeof(*rfp), GFP_KERNEL);
2159	if (!rfp)
 
 
 
 
 
2160		return -ENOMEM;
2161	spin_lock_init(&rfp->rcu_fwd_lock);
2162	rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head;
2163	return torture_create_kthread(rcu_torture_fwd_prog, rfp, fwd_prog_task);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2164}
2165
2166/* Callback function for RCU barrier testing. */
2167static void rcu_torture_barrier_cbf(struct rcu_head *rcu)
2168{
2169	atomic_inc(&barrier_cbs_invoked);
2170}
2171
2172/* IPI handler to get callback posted on desired CPU, if online. */
2173static void rcu_torture_barrier1cb(void *rcu_void)
2174{
2175	struct rcu_head *rhp = rcu_void;
2176
2177	cur_ops->call(rhp, rcu_torture_barrier_cbf);
2178}
2179
2180/* kthread function to register callbacks used to test RCU barriers. */
2181static int rcu_torture_barrier_cbs(void *arg)
2182{
2183	long myid = (long)arg;
2184	bool lastphase = false;
2185	bool newphase;
2186	struct rcu_head rcu;
2187
2188	init_rcu_head_on_stack(&rcu);
2189	VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started");
2190	set_user_nice(current, MAX_NICE);
2191	do {
2192		wait_event(barrier_cbs_wq[myid],
2193			   (newphase =
2194			    smp_load_acquire(&barrier_phase)) != lastphase ||
2195			   torture_must_stop());
2196		lastphase = newphase;
2197		if (torture_must_stop())
2198			break;
2199		/*
2200		 * The above smp_load_acquire() ensures barrier_phase load
2201		 * is ordered before the following ->call().
2202		 */
2203		if (smp_call_function_single(myid, rcu_torture_barrier1cb,
2204					     &rcu, 1)) {
2205			// IPI failed, so use direct call from current CPU.
2206			cur_ops->call(&rcu, rcu_torture_barrier_cbf);
2207		}
2208		if (atomic_dec_and_test(&barrier_cbs_count))
2209			wake_up(&barrier_wq);
2210	} while (!torture_must_stop());
2211	if (cur_ops->cb_barrier != NULL)
2212		cur_ops->cb_barrier();
2213	destroy_rcu_head_on_stack(&rcu);
2214	torture_kthread_stopping("rcu_torture_barrier_cbs");
2215	return 0;
2216}
2217
2218/* kthread function to drive and coordinate RCU barrier testing. */
2219static int rcu_torture_barrier(void *arg)
2220{
2221	int i;
2222
2223	VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting");
2224	do {
2225		atomic_set(&barrier_cbs_invoked, 0);
2226		atomic_set(&barrier_cbs_count, n_barrier_cbs);
2227		/* Ensure barrier_phase ordered after prior assignments. */
2228		smp_store_release(&barrier_phase, !barrier_phase);
2229		for (i = 0; i < n_barrier_cbs; i++)
2230			wake_up(&barrier_cbs_wq[i]);
2231		wait_event(barrier_wq,
2232			   atomic_read(&barrier_cbs_count) == 0 ||
2233			   torture_must_stop());
2234		if (torture_must_stop())
2235			break;
2236		n_barrier_attempts++;
2237		cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */
2238		if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) {
2239			n_rcu_torture_barrier_error++;
2240			pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n",
2241			       atomic_read(&barrier_cbs_invoked),
2242			       n_barrier_cbs);
2243			WARN_ON(1);
2244			// Wait manually for the remaining callbacks
2245			i = 0;
2246			do {
2247				if (WARN_ON(i++ > HZ))
2248					i = INT_MIN;
2249				schedule_timeout_interruptible(1);
2250				cur_ops->cb_barrier();
2251			} while (atomic_read(&barrier_cbs_invoked) !=
2252				 n_barrier_cbs &&
2253				 !torture_must_stop());
2254			smp_mb(); // Can't trust ordering if broken.
2255			if (!torture_must_stop())
2256				pr_err("Recovered: barrier_cbs_invoked = %d\n",
2257				       atomic_read(&barrier_cbs_invoked));
2258		} else {
2259			n_barrier_successes++;
2260		}
2261		schedule_timeout_interruptible(HZ / 10);
2262	} while (!torture_must_stop());
2263	torture_kthread_stopping("rcu_torture_barrier");
2264	return 0;
2265}
2266
2267/* Initialize RCU barrier testing. */
2268static int rcu_torture_barrier_init(void)
2269{
2270	int i;
2271	int ret;
2272
2273	if (n_barrier_cbs <= 0)
2274		return 0;
2275	if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) {
2276		pr_alert("%s" TORTURE_FLAG
2277			 " Call or barrier ops missing for %s,\n",
2278			 torture_type, cur_ops->name);
2279		pr_alert("%s" TORTURE_FLAG
2280			 " RCU barrier testing omitted from run.\n",
2281			 torture_type);
2282		return 0;
2283	}
2284	atomic_set(&barrier_cbs_count, 0);
2285	atomic_set(&barrier_cbs_invoked, 0);
2286	barrier_cbs_tasks =
2287		kcalloc(n_barrier_cbs, sizeof(barrier_cbs_tasks[0]),
2288			GFP_KERNEL);
2289	barrier_cbs_wq =
2290		kcalloc(n_barrier_cbs, sizeof(barrier_cbs_wq[0]), GFP_KERNEL);
2291	if (barrier_cbs_tasks == NULL || !barrier_cbs_wq)
2292		return -ENOMEM;
2293	for (i = 0; i < n_barrier_cbs; i++) {
2294		init_waitqueue_head(&barrier_cbs_wq[i]);
2295		ret = torture_create_kthread(rcu_torture_barrier_cbs,
2296					     (void *)(long)i,
2297					     barrier_cbs_tasks[i]);
2298		if (ret)
2299			return ret;
2300	}
2301	return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task);
2302}
2303
2304/* Clean up after RCU barrier testing. */
2305static void rcu_torture_barrier_cleanup(void)
2306{
2307	int i;
2308
2309	torture_stop_kthread(rcu_torture_barrier, barrier_task);
2310	if (barrier_cbs_tasks != NULL) {
2311		for (i = 0; i < n_barrier_cbs; i++)
2312			torture_stop_kthread(rcu_torture_barrier_cbs,
2313					     barrier_cbs_tasks[i]);
2314		kfree(barrier_cbs_tasks);
2315		barrier_cbs_tasks = NULL;
2316	}
2317	if (barrier_cbs_wq != NULL) {
2318		kfree(barrier_cbs_wq);
2319		barrier_cbs_wq = NULL;
2320	}
2321}
2322
2323static bool rcu_torture_can_boost(void)
2324{
2325	static int boost_warn_once;
2326	int prio;
2327
2328	if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2)
2329		return false;
 
 
2330
2331	prio = rcu_get_gp_kthreads_prio();
2332	if (!prio)
2333		return false;
2334
2335	if (prio < 2) {
2336		if (boost_warn_once  == 1)
2337			return false;
2338
2339		pr_alert("%s: WARN: RCU kthread priority too low to test boosting.  Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME);
2340		boost_warn_once = 1;
2341		return false;
2342	}
2343
2344	return true;
2345}
2346
2347static bool read_exit_child_stop;
2348static bool read_exit_child_stopped;
2349static wait_queue_head_t read_exit_wq;
2350
2351// Child kthread which just does an rcutorture reader and exits.
2352static int rcu_torture_read_exit_child(void *trsp_in)
2353{
2354	struct torture_random_state *trsp = trsp_in;
2355
2356	set_user_nice(current, MAX_NICE);
2357	// Minimize time between reading and exiting.
2358	while (!kthread_should_stop())
2359		schedule_timeout_uninterruptible(1);
2360	(void)rcu_torture_one_read(trsp);
2361	return 0;
2362}
2363
2364// Parent kthread which creates and destroys read-exit child kthreads.
2365static int rcu_torture_read_exit(void *unused)
2366{
2367	int count = 0;
2368	bool errexit = false;
2369	int i;
2370	struct task_struct *tsp;
2371	DEFINE_TORTURE_RANDOM(trs);
2372
2373	// Allocate and initialize.
2374	set_user_nice(current, MAX_NICE);
2375	VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of test");
2376
2377	// Each pass through this loop does one read-exit episode.
2378	do {
2379		if (++count > read_exit_burst) {
2380			VERBOSE_TOROUT_STRING("rcu_torture_read_exit: End of episode");
2381			rcu_barrier(); // Wait for task_struct free, avoid OOM.
2382			for (i = 0; i < read_exit_delay; i++) {
2383				schedule_timeout_uninterruptible(HZ);
2384				if (READ_ONCE(read_exit_child_stop))
2385					break;
 
 
 
 
 
2386			}
2387			if (!READ_ONCE(read_exit_child_stop))
2388				VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of episode");
2389			count = 0;
2390		}
2391		if (READ_ONCE(read_exit_child_stop))
2392			break;
2393		// Spawn child.
2394		tsp = kthread_run(rcu_torture_read_exit_child,
2395				     &trs, "%s",
2396				     "rcu_torture_read_exit_child");
2397		if (IS_ERR(tsp)) {
2398			VERBOSE_TOROUT_ERRSTRING("out of memory");
2399			errexit = true;
2400			tsp = NULL;
2401			break;
2402		}
2403		cond_resched();
2404		kthread_stop(tsp);
2405		n_read_exits ++;
2406		stutter_wait("rcu_torture_read_exit");
2407	} while (!errexit && !READ_ONCE(read_exit_child_stop));
2408
2409	// Clean up and exit.
2410	smp_store_release(&read_exit_child_stopped, true); // After reaping.
2411	smp_mb(); // Store before wakeup.
2412	wake_up(&read_exit_wq);
2413	while (!torture_must_stop())
2414		schedule_timeout_uninterruptible(1);
2415	torture_kthread_stopping("rcu_torture_read_exit");
2416	return 0;
2417}
2418
2419static int rcu_torture_read_exit_init(void)
2420{
2421	if (read_exit_burst <= 0)
2422		return -EINVAL;
2423	init_waitqueue_head(&read_exit_wq);
2424	read_exit_child_stop = false;
2425	read_exit_child_stopped = false;
2426	return torture_create_kthread(rcu_torture_read_exit, NULL,
2427				      read_exit_task);
2428}
2429
2430static void rcu_torture_read_exit_cleanup(void)
2431{
2432	if (!read_exit_task)
2433		return;
2434	WRITE_ONCE(read_exit_child_stop, true);
2435	smp_mb(); // Above write before wait.
2436	wait_event(read_exit_wq, smp_load_acquire(&read_exit_child_stopped));
2437	torture_stop_kthread(rcutorture_read_exit, read_exit_task);
2438}
2439
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2440static enum cpuhp_state rcutor_hp;
2441
2442static void
2443rcu_torture_cleanup(void)
2444{
2445	int firsttime;
2446	int flags = 0;
2447	unsigned long gp_seq = 0;
2448	int i;
2449
2450	if (torture_cleanup_begin()) {
2451		if (cur_ops->cb_barrier != NULL)
 
2452			cur_ops->cb_barrier();
 
 
2453		return;
2454	}
2455	if (!cur_ops) {
2456		torture_cleanup_end();
 
2457		return;
2458	}
2459
2460	show_rcu_gp_kthreads();
 
 
 
2461	rcu_torture_read_exit_cleanup();
2462	rcu_torture_barrier_cleanup();
2463	torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_task);
2464	torture_stop_kthread(rcu_torture_stall, stall_task);
2465	torture_stop_kthread(rcu_torture_writer, writer_task);
2466
 
 
 
 
 
 
 
2467	if (reader_tasks) {
2468		for (i = 0; i < nrealreaders; i++)
2469			torture_stop_kthread(rcu_torture_reader,
2470					     reader_tasks[i]);
2471		kfree(reader_tasks);
 
2472	}
 
 
2473
2474	if (fakewriter_tasks) {
2475		for (i = 0; i < nfakewriters; i++) {
2476			torture_stop_kthread(rcu_torture_fakewriter,
2477					     fakewriter_tasks[i]);
2478		}
2479		kfree(fakewriter_tasks);
2480		fakewriter_tasks = NULL;
2481	}
2482
2483	rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq);
2484	srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq);
2485	pr_alert("%s:  End-test grace-period state: g%lu f%#x\n",
2486		 cur_ops->name, gp_seq, flags);
 
2487	torture_stop_kthread(rcu_torture_stats, stats_task);
2488	torture_stop_kthread(rcu_torture_fqs, fqs_task);
2489	if (rcu_torture_can_boost())
2490		cpuhp_remove_state(rcutor_hp);
2491
2492	/*
2493	 * Wait for all RCU callbacks to fire, then do torture-type-specific
2494	 * cleanup operations.
2495	 */
2496	if (cur_ops->cb_barrier != NULL)
 
2497		cur_ops->cb_barrier();
 
2498	if (cur_ops->cleanup != NULL)
2499		cur_ops->cleanup();
2500
 
 
2501	rcu_torture_stats_print();  /* -After- the stats thread is stopped! */
2502
2503	if (err_segs_recorded) {
2504		pr_alert("Failure/close-call rcutorture reader segments:\n");
2505		if (rt_read_nsegs == 0)
2506			pr_alert("\t: No segments recorded!!!\n");
2507		firsttime = 1;
2508		for (i = 0; i < rt_read_nsegs; i++) {
2509			pr_alert("\t%d: %#x ", i, err_segs[i].rt_readstate);
2510			if (err_segs[i].rt_delay_jiffies != 0) {
2511				pr_cont("%s%ldjiffies", firsttime ? "" : "+",
2512					err_segs[i].rt_delay_jiffies);
2513				firsttime = 0;
2514			}
2515			if (err_segs[i].rt_delay_ms != 0) {
2516				pr_cont("%s%ldms", firsttime ? "" : "+",
2517					err_segs[i].rt_delay_ms);
2518				firsttime = 0;
2519			}
2520			if (err_segs[i].rt_delay_us != 0) {
2521				pr_cont("%s%ldus", firsttime ? "" : "+",
2522					err_segs[i].rt_delay_us);
2523				firsttime = 0;
2524			}
2525			pr_cont("%s\n",
2526				err_segs[i].rt_preempted ? "preempted" : "");
2527
2528		}
2529	}
2530	if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
2531		rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
2532	else if (torture_onoff_failures())
2533		rcu_torture_print_module_parms(cur_ops,
2534					       "End of test: RCU_HOTPLUG");
2535	else
2536		rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
2537	torture_cleanup_end();
 
2538}
2539
2540#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
2541static void rcu_torture_leak_cb(struct rcu_head *rhp)
2542{
2543}
2544
2545static void rcu_torture_err_cb(struct rcu_head *rhp)
2546{
2547	/*
2548	 * This -might- happen due to race conditions, but is unlikely.
2549	 * The scenario that leads to this happening is that the
2550	 * first of the pair of duplicate callbacks is queued,
2551	 * someone else starts a grace period that includes that
2552	 * callback, then the second of the pair must wait for the
2553	 * next grace period.  Unlikely, but can happen.  If it
2554	 * does happen, the debug-objects subsystem won't have splatted.
2555	 */
2556	pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME);
2557}
2558#endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
2559
2560/*
2561 * Verify that double-free causes debug-objects to complain, but only
2562 * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y.  Otherwise, say that the test
2563 * cannot be carried out.
2564 */
2565static void rcu_test_debug_objects(void)
2566{
2567#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
2568	struct rcu_head rh1;
2569	struct rcu_head rh2;
 
2570
2571	init_rcu_head_on_stack(&rh1);
2572	init_rcu_head_on_stack(&rh2);
2573	pr_alert("%s: WARN: Duplicate call_rcu() test starting.\n", KBUILD_MODNAME);
2574
2575	/* Try to queue the rh2 pair of callbacks for the same grace period. */
2576	preempt_disable(); /* Prevent preemption from interrupting test. */
2577	rcu_read_lock(); /* Make it impossible to finish a grace period. */
2578	call_rcu(&rh1, rcu_torture_leak_cb); /* Start grace period. */
2579	local_irq_disable(); /* Make it harder to start a new grace period. */
2580	call_rcu(&rh2, rcu_torture_leak_cb);
2581	call_rcu(&rh2, rcu_torture_err_cb); /* Duplicate callback. */
 
 
 
 
2582	local_irq_enable();
2583	rcu_read_unlock();
2584	preempt_enable();
2585
2586	/* Wait for them all to get done so we can safely return. */
2587	rcu_barrier();
2588	pr_alert("%s: WARN: Duplicate call_rcu() test complete.\n", KBUILD_MODNAME);
2589	destroy_rcu_head_on_stack(&rh1);
2590	destroy_rcu_head_on_stack(&rh2);
 
2591#else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
2592	pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n", KBUILD_MODNAME);
2593#endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
2594}
2595
2596static void rcutorture_sync(void)
2597{
2598	static unsigned long n;
2599
2600	if (cur_ops->sync && !(++n & 0xfff))
2601		cur_ops->sync();
2602}
2603
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2604static int __init
2605rcu_torture_init(void)
2606{
2607	long i;
2608	int cpu;
2609	int firsterr = 0;
 
 
2610	static struct rcu_torture_ops *torture_ops[] = {
2611		&rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops,
2612		&busted_srcud_ops, &tasks_ops, &tasks_rude_ops,
2613		&tasks_tracing_ops, &trivial_ops,
2614	};
2615
2616	if (!torture_init_begin(torture_type, verbose))
2617		return -EBUSY;
2618
2619	/* Process args and tell the world that the torturer is on the job. */
2620	for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
2621		cur_ops = torture_ops[i];
2622		if (strcmp(torture_type, cur_ops->name) == 0)
2623			break;
2624	}
2625	if (i == ARRAY_SIZE(torture_ops)) {
2626		pr_alert("rcu-torture: invalid torture type: \"%s\"\n",
2627			 torture_type);
2628		pr_alert("rcu-torture types:");
2629		for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
2630			pr_cont(" %s", torture_ops[i]->name);
2631		pr_cont("\n");
2632		WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST));
2633		firsterr = -EINVAL;
2634		cur_ops = NULL;
2635		goto unwind;
2636	}
2637	if (cur_ops->fqs == NULL && fqs_duration != 0) {
2638		pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n");
2639		fqs_duration = 0;
2640	}
 
 
 
 
 
 
2641	if (cur_ops->init)
2642		cur_ops->init();
2643
 
 
2644	if (nreaders >= 0) {
2645		nrealreaders = nreaders;
2646	} else {
2647		nrealreaders = num_online_cpus() - 2 - nreaders;
2648		if (nrealreaders <= 0)
2649			nrealreaders = 1;
2650	}
2651	rcu_torture_print_module_parms(cur_ops, "Start of test");
 
 
 
 
 
2652
2653	/* Set up the freelist. */
2654
2655	INIT_LIST_HEAD(&rcu_torture_freelist);
2656	for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) {
2657		rcu_tortures[i].rtort_mbtest = 0;
2658		list_add_tail(&rcu_tortures[i].rtort_free,
2659			      &rcu_torture_freelist);
2660	}
2661
2662	/* Initialize the statistics so that each run gets its own numbers. */
2663
2664	rcu_torture_current = NULL;
2665	rcu_torture_current_version = 0;
2666	atomic_set(&n_rcu_torture_alloc, 0);
2667	atomic_set(&n_rcu_torture_alloc_fail, 0);
2668	atomic_set(&n_rcu_torture_free, 0);
2669	atomic_set(&n_rcu_torture_mberror, 0);
 
 
2670	atomic_set(&n_rcu_torture_error, 0);
2671	n_rcu_torture_barrier_error = 0;
2672	n_rcu_torture_boost_ktrerror = 0;
2673	n_rcu_torture_boost_rterror = 0;
2674	n_rcu_torture_boost_failure = 0;
2675	n_rcu_torture_boosts = 0;
2676	for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
2677		atomic_set(&rcu_torture_wcount[i], 0);
2678	for_each_possible_cpu(cpu) {
2679		for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
2680			per_cpu(rcu_torture_count, cpu)[i] = 0;
2681			per_cpu(rcu_torture_batch, cpu)[i] = 0;
2682		}
2683	}
2684	err_segs_recorded = 0;
2685	rt_read_nsegs = 0;
2686
2687	/* Start up the kthreads. */
2688
 
2689	firsterr = torture_create_kthread(rcu_torture_writer, NULL,
2690					  writer_task);
2691	if (firsterr)
2692		goto unwind;
2693	if (nfakewriters > 0) {
2694		fakewriter_tasks = kcalloc(nfakewriters,
2695					   sizeof(fakewriter_tasks[0]),
2696					   GFP_KERNEL);
2697		if (fakewriter_tasks == NULL) {
2698			VERBOSE_TOROUT_ERRSTRING("out of memory");
2699			firsterr = -ENOMEM;
2700			goto unwind;
2701		}
2702	}
2703	for (i = 0; i < nfakewriters; i++) {
2704		firsterr = torture_create_kthread(rcu_torture_fakewriter,
2705						  NULL, fakewriter_tasks[i]);
2706		if (firsterr)
2707			goto unwind;
2708	}
2709	reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]),
2710			       GFP_KERNEL);
2711	if (reader_tasks == NULL) {
2712		VERBOSE_TOROUT_ERRSTRING("out of memory");
 
 
2713		firsterr = -ENOMEM;
2714		goto unwind;
2715	}
2716	for (i = 0; i < nrealreaders; i++) {
 
2717		firsterr = torture_create_kthread(rcu_torture_reader, (void *)i,
2718						  reader_tasks[i]);
2719		if (firsterr)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2720			goto unwind;
2721	}
2722	if (stat_interval > 0) {
2723		firsterr = torture_create_kthread(rcu_torture_stats, NULL,
2724						  stats_task);
2725		if (firsterr)
2726			goto unwind;
2727	}
2728	if (test_no_idle_hz && shuffle_interval > 0) {
2729		firsterr = torture_shuffle_init(shuffle_interval * HZ);
2730		if (firsterr)
2731			goto unwind;
2732	}
2733	if (stutter < 0)
2734		stutter = 0;
2735	if (stutter) {
2736		int t;
2737
2738		t = cur_ops->stall_dur ? cur_ops->stall_dur() : stutter * HZ;
2739		firsterr = torture_stutter_init(stutter * HZ, t);
2740		if (firsterr)
2741			goto unwind;
2742	}
2743	if (fqs_duration < 0)
2744		fqs_duration = 0;
2745	if (fqs_duration) {
 
 
2746		/* Create the fqs thread */
2747		firsterr = torture_create_kthread(rcu_torture_fqs, NULL,
2748						  fqs_task);
2749		if (firsterr)
2750			goto unwind;
2751	}
2752	if (test_boost_interval < 1)
2753		test_boost_interval = 1;
2754	if (test_boost_duration < 2)
2755		test_boost_duration = 2;
2756	if (rcu_torture_can_boost()) {
2757
2758		boost_starttime = jiffies + test_boost_interval * HZ;
2759
2760		firsterr = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "RCU_TORTURE",
2761					     rcutorture_booster_init,
2762					     rcutorture_booster_cleanup);
2763		if (firsterr < 0)
 
2764			goto unwind;
2765		rcutor_hp = firsterr;
2766	}
2767	shutdown_jiffies = jiffies + shutdown_secs * HZ;
2768	firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup);
2769	if (firsterr)
2770		goto unwind;
2771	firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval,
2772				      rcutorture_sync);
2773	if (firsterr)
2774		goto unwind;
2775	firsterr = rcu_torture_stall_init();
2776	if (firsterr)
2777		goto unwind;
2778	firsterr = rcu_torture_fwd_prog_init();
2779	if (firsterr)
2780		goto unwind;
2781	firsterr = rcu_torture_barrier_init();
2782	if (firsterr)
2783		goto unwind;
2784	firsterr = rcu_torture_read_exit_init();
2785	if (firsterr)
2786		goto unwind;
2787	if (object_debug)
2788		rcu_test_debug_objects();
2789	torture_init_end();
 
2790	return 0;
2791
2792unwind:
2793	torture_init_end();
2794	rcu_torture_cleanup();
 
 
 
 
2795	return firsterr;
2796}
2797
2798module_init(rcu_torture_init);
2799module_exit(rcu_torture_cleanup);