Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * Read-Copy Update module-based torture test facility
   4 *
   5 * Copyright (C) IBM Corporation, 2005, 2006
   6 *
   7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
   8 *	  Josh Triplett <josh@joshtriplett.org>
   9 *
  10 * See also:  Documentation/RCU/torture.rst
  11 */
  12
  13#define pr_fmt(fmt) fmt
  14
  15#include <linux/types.h>
  16#include <linux/kernel.h>
  17#include <linux/init.h>
  18#include <linux/module.h>
  19#include <linux/kthread.h>
  20#include <linux/err.h>
  21#include <linux/spinlock.h>
  22#include <linux/smp.h>
  23#include <linux/rcupdate_wait.h>
 
  24#include <linux/interrupt.h>
  25#include <linux/sched/signal.h>
  26#include <uapi/linux/sched/types.h>
  27#include <linux/atomic.h>
  28#include <linux/bitops.h>
  29#include <linux/completion.h>
  30#include <linux/moduleparam.h>
  31#include <linux/percpu.h>
  32#include <linux/notifier.h>
  33#include <linux/reboot.h>
  34#include <linux/freezer.h>
  35#include <linux/cpu.h>
  36#include <linux/delay.h>
  37#include <linux/stat.h>
  38#include <linux/srcu.h>
  39#include <linux/slab.h>
  40#include <linux/trace_clock.h>
  41#include <asm/byteorder.h>
  42#include <linux/torture.h>
  43#include <linux/vmalloc.h>
  44#include <linux/sched/debug.h>
  45#include <linux/sched/sysctl.h>
  46#include <linux/oom.h>
  47#include <linux/tick.h>
  48#include <linux/rcupdate_trace.h>
  49#include <linux/nmi.h>
  50
  51#include "rcu.h"
  52
 
  53MODULE_LICENSE("GPL");
  54MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@joshtriplett.org>");
  55
  56/* Bits for ->extendables field, extendables param, and related definitions. */
  57#define RCUTORTURE_RDR_SHIFT_1	 8	/* Put SRCU index in upper bits. */
  58#define RCUTORTURE_RDR_MASK_1	 (1 << RCUTORTURE_RDR_SHIFT_1)
  59#define RCUTORTURE_RDR_SHIFT_2	 9	/* Put SRCU index in upper bits. */
  60#define RCUTORTURE_RDR_MASK_2	 (1 << RCUTORTURE_RDR_SHIFT_2)
  61#define RCUTORTURE_RDR_BH	 0x01	/* Extend readers by disabling bh. */
  62#define RCUTORTURE_RDR_IRQ	 0x02	/*  ... disabling interrupts. */
  63#define RCUTORTURE_RDR_PREEMPT	 0x04	/*  ... disabling preemption. */
  64#define RCUTORTURE_RDR_RBH	 0x08	/*  ... rcu_read_lock_bh(). */
  65#define RCUTORTURE_RDR_SCHED	 0x10	/*  ... rcu_read_lock_sched(). */
  66#define RCUTORTURE_RDR_RCU_1	 0x20	/*  ... entering another RCU reader. */
  67#define RCUTORTURE_RDR_RCU_2	 0x40	/*  ... entering another RCU reader. */
  68#define RCUTORTURE_RDR_NBITS	 7	/* Number of bits defined above. */
  69#define RCUTORTURE_MAX_EXTEND	 \
  70	(RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \
  71	 RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED)
 
 
 
  72#define RCUTORTURE_RDR_MAX_LOOPS 0x7	/* Maximum reader extensions. */
  73					/* Must be power of two minus one. */
  74#define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3)
  75
  76torture_param(int, extendables, RCUTORTURE_MAX_EXTEND,
  77	      "Extend readers by disabling bh (1), irqs (2), or preempt (4)");
  78torture_param(int, fqs_duration, 0, "Duration of fqs bursts (us), 0 to disable");
  79torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)");
  80torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)");
  81torture_param(int, fwd_progress, 1, "Number of grace-period forward progress tasks (0 to disable)");
  82torture_param(int, fwd_progress_div, 4, "Fraction of CPU stall to wait");
  83torture_param(int, fwd_progress_holdoff, 60, "Time between forward-progress tests (s)");
  84torture_param(bool, fwd_progress_need_resched, 1, "Hide cond_resched() behind need_resched()");
  85torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives");
  86torture_param(bool, gp_cond_exp, false, "Use conditional/async expedited GP wait primitives");
  87torture_param(bool, gp_cond_full, false, "Use conditional/async full-state GP wait primitives");
  88torture_param(bool, gp_cond_exp_full, false,
  89		    "Use conditional/async full-stateexpedited GP wait primitives");
  90torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
  91torture_param(bool, gp_normal, false, "Use normal (non-expedited) GP wait primitives");
  92torture_param(bool, gp_poll, false, "Use polling GP wait primitives");
  93torture_param(bool, gp_poll_exp, false, "Use polling expedited GP wait primitives");
  94torture_param(bool, gp_poll_full, false, "Use polling full-state GP wait primitives");
  95torture_param(bool, gp_poll_exp_full, false, "Use polling full-state expedited GP wait primitives");
  96torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives");
  97torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers");
  98torture_param(int, leakpointer, 0, "Leak pointer dereferences from readers");
  99torture_param(int, n_barrier_cbs, 0, "# of callbacks/kthreads for barrier testing");
 100torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads");
 101torture_param(int, nreaders, -1, "Number of RCU reader threads");
 102torture_param(int, object_debug, 0, "Enable debug-object double call_rcu() testing");
 103torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
 104torture_param(int, onoff_interval, 0, "Time between CPU hotplugs (jiffies), 0=disable");
 105torture_param(int, nocbs_nthreads, 0, "Number of NOCB toggle threads, 0 to disable");
 106torture_param(int, nocbs_toggle, 1000, "Time between toggling nocb state (ms)");
 107torture_param(int, read_exit_delay, 13, "Delay between read-then-exit episodes (s)");
 108torture_param(int, read_exit_burst, 16, "# of read-then-exit bursts per episode, zero to disable");
 
 109torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles");
 110torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable.");
 111torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable.");
 112torture_param(int, stall_cpu_holdoff, 10, "Time to wait before starting stall (s).");
 113torture_param(bool, stall_no_softlockup, false, "Avoid softlockup warning during cpu stall.");
 114torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling.");
 115torture_param(int, stall_cpu_block, 0, "Sleep while stalling.");
 
 116torture_param(int, stall_gp_kthread, 0, "Grace-period kthread stall duration (s).");
 117torture_param(int, stat_interval, 60, "Number of seconds between stats printk()s");
 118torture_param(int, stutter, 5, "Number of seconds to run/halt test");
 119torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
 120torture_param(int, test_boost_duration, 4, "Duration of each boost test, seconds.");
 121torture_param(int, test_boost_interval, 7, "Interval between boost tests, seconds.");
 
 122torture_param(bool, test_no_idle_hz, true, "Test support for tickless idle CPUs");
 
 123torture_param(int, verbose, 1, "Enable verbose debugging printk()s");
 124
 125static char *torture_type = "rcu";
 126module_param(torture_type, charp, 0444);
 127MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)");
 128
 129static int nrealnocbers;
 130static int nrealreaders;
 131static struct task_struct *writer_task;
 132static struct task_struct **fakewriter_tasks;
 133static struct task_struct **reader_tasks;
 134static struct task_struct **nocb_tasks;
 135static struct task_struct *stats_task;
 136static struct task_struct *fqs_task;
 137static struct task_struct *boost_tasks[NR_CPUS];
 138static struct task_struct *stall_task;
 139static struct task_struct **fwd_prog_tasks;
 140static struct task_struct **barrier_cbs_tasks;
 141static struct task_struct *barrier_task;
 142static struct task_struct *read_exit_task;
 143
 144#define RCU_TORTURE_PIPE_LEN 10
 145
 146// Mailbox-like structure to check RCU global memory ordering.
 147struct rcu_torture_reader_check {
 148	unsigned long rtc_myloops;
 149	int rtc_chkrdr;
 150	unsigned long rtc_chkloops;
 151	int rtc_ready;
 152	struct rcu_torture_reader_check *rtc_assigner;
 153} ____cacheline_internodealigned_in_smp;
 154
 155// Update-side data structure used to check RCU readers.
 156struct rcu_torture {
 157	struct rcu_head rtort_rcu;
 158	int rtort_pipe_count;
 159	struct list_head rtort_free;
 160	int rtort_mbtest;
 161	struct rcu_torture_reader_check *rtort_chkp;
 162};
 163
 164static LIST_HEAD(rcu_torture_freelist);
 165static struct rcu_torture __rcu *rcu_torture_current;
 166static unsigned long rcu_torture_current_version;
 167static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
 168static DEFINE_SPINLOCK(rcu_torture_lock);
 169static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count);
 170static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch);
 171static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
 172static struct rcu_torture_reader_check *rcu_torture_reader_mbchk;
 173static atomic_t n_rcu_torture_alloc;
 174static atomic_t n_rcu_torture_alloc_fail;
 175static atomic_t n_rcu_torture_free;
 176static atomic_t n_rcu_torture_mberror;
 177static atomic_t n_rcu_torture_mbchk_fail;
 178static atomic_t n_rcu_torture_mbchk_tries;
 179static atomic_t n_rcu_torture_error;
 180static long n_rcu_torture_barrier_error;
 181static long n_rcu_torture_boost_ktrerror;
 182static long n_rcu_torture_boost_rterror;
 183static long n_rcu_torture_boost_failure;
 184static long n_rcu_torture_boosts;
 185static atomic_long_t n_rcu_torture_timers;
 186static long n_barrier_attempts;
 187static long n_barrier_successes; /* did rcu_barrier test succeed? */
 188static unsigned long n_read_exits;
 189static struct list_head rcu_torture_removed;
 190static unsigned long shutdown_jiffies;
 191static unsigned long start_gp_seq;
 192static atomic_long_t n_nocb_offload;
 193static atomic_long_t n_nocb_deoffload;
 194
 195static int rcu_torture_writer_state;
 196#define RTWS_FIXED_DELAY	0
 197#define RTWS_DELAY		1
 198#define RTWS_REPLACE		2
 199#define RTWS_DEF_FREE		3
 200#define RTWS_EXP_SYNC		4
 201#define RTWS_COND_GET		5
 202#define RTWS_COND_GET_FULL	6
 203#define RTWS_COND_GET_EXP	7
 204#define RTWS_COND_GET_EXP_FULL	8
 205#define RTWS_COND_SYNC		9
 206#define RTWS_COND_SYNC_FULL	10
 207#define RTWS_COND_SYNC_EXP	11
 208#define RTWS_COND_SYNC_EXP_FULL	12
 209#define RTWS_POLL_GET		13
 210#define RTWS_POLL_GET_FULL	14
 211#define RTWS_POLL_GET_EXP	15
 212#define RTWS_POLL_GET_EXP_FULL	16
 213#define RTWS_POLL_WAIT		17
 214#define RTWS_POLL_WAIT_FULL	18
 215#define RTWS_POLL_WAIT_EXP	19
 216#define RTWS_POLL_WAIT_EXP_FULL	20
 217#define RTWS_SYNC		21
 218#define RTWS_STUTTER		22
 219#define RTWS_STOPPING		23
 220static const char * const rcu_torture_writer_state_names[] = {
 221	"RTWS_FIXED_DELAY",
 222	"RTWS_DELAY",
 223	"RTWS_REPLACE",
 224	"RTWS_DEF_FREE",
 225	"RTWS_EXP_SYNC",
 226	"RTWS_COND_GET",
 227	"RTWS_COND_GET_FULL",
 228	"RTWS_COND_GET_EXP",
 229	"RTWS_COND_GET_EXP_FULL",
 230	"RTWS_COND_SYNC",
 231	"RTWS_COND_SYNC_FULL",
 232	"RTWS_COND_SYNC_EXP",
 233	"RTWS_COND_SYNC_EXP_FULL",
 234	"RTWS_POLL_GET",
 235	"RTWS_POLL_GET_FULL",
 236	"RTWS_POLL_GET_EXP",
 237	"RTWS_POLL_GET_EXP_FULL",
 238	"RTWS_POLL_WAIT",
 239	"RTWS_POLL_WAIT_FULL",
 240	"RTWS_POLL_WAIT_EXP",
 241	"RTWS_POLL_WAIT_EXP_FULL",
 242	"RTWS_SYNC",
 243	"RTWS_STUTTER",
 244	"RTWS_STOPPING",
 245};
 246
 247/* Record reader segment types and duration for first failing read. */
 248struct rt_read_seg {
 249	int rt_readstate;
 250	unsigned long rt_delay_jiffies;
 251	unsigned long rt_delay_ms;
 252	unsigned long rt_delay_us;
 253	bool rt_preempted;
 254};
 255static int err_segs_recorded;
 256static struct rt_read_seg err_segs[RCUTORTURE_RDR_MAX_SEGS];
 257static int rt_read_nsegs;
 258
 259static const char *rcu_torture_writer_state_getname(void)
 260{
 261	unsigned int i = READ_ONCE(rcu_torture_writer_state);
 262
 263	if (i >= ARRAY_SIZE(rcu_torture_writer_state_names))
 264		return "???";
 265	return rcu_torture_writer_state_names[i];
 266}
 267
 268#ifdef CONFIG_RCU_TRACE
 269static u64 notrace rcu_trace_clock_local(void)
 270{
 271	u64 ts = trace_clock_local();
 272
 273	(void)do_div(ts, NSEC_PER_USEC);
 274	return ts;
 275}
 276#else /* #ifdef CONFIG_RCU_TRACE */
 277static u64 notrace rcu_trace_clock_local(void)
 278{
 279	return 0ULL;
 280}
 281#endif /* #else #ifdef CONFIG_RCU_TRACE */
 282
 283/*
 284 * Stop aggressive CPU-hog tests a bit before the end of the test in order
 285 * to avoid interfering with test shutdown.
 286 */
 287static bool shutdown_time_arrived(void)
 288{
 289	return shutdown_secs && time_after(jiffies, shutdown_jiffies - 30 * HZ);
 290}
 291
 292static unsigned long boost_starttime;	/* jiffies of next boost test start. */
 293static DEFINE_MUTEX(boost_mutex);	/* protect setting boost_starttime */
 294					/*  and boost task create/destroy. */
 295static atomic_t barrier_cbs_count;	/* Barrier callbacks registered. */
 296static bool barrier_phase;		/* Test phase. */
 297static atomic_t barrier_cbs_invoked;	/* Barrier callbacks invoked. */
 298static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */
 299static DECLARE_WAIT_QUEUE_HEAD(barrier_wq);
 300
 301static atomic_t rcu_fwd_cb_nodelay;	/* Short rcu_torture_delay() delays. */
 302
 303/*
 304 * Allocate an element from the rcu_tortures pool.
 305 */
 306static struct rcu_torture *
 307rcu_torture_alloc(void)
 308{
 309	struct list_head *p;
 310
 311	spin_lock_bh(&rcu_torture_lock);
 312	if (list_empty(&rcu_torture_freelist)) {
 313		atomic_inc(&n_rcu_torture_alloc_fail);
 314		spin_unlock_bh(&rcu_torture_lock);
 315		return NULL;
 316	}
 317	atomic_inc(&n_rcu_torture_alloc);
 318	p = rcu_torture_freelist.next;
 319	list_del_init(p);
 320	spin_unlock_bh(&rcu_torture_lock);
 321	return container_of(p, struct rcu_torture, rtort_free);
 322}
 323
 324/*
 325 * Free an element to the rcu_tortures pool.
 326 */
 327static void
 328rcu_torture_free(struct rcu_torture *p)
 329{
 330	atomic_inc(&n_rcu_torture_free);
 331	spin_lock_bh(&rcu_torture_lock);
 332	list_add_tail(&p->rtort_free, &rcu_torture_freelist);
 333	spin_unlock_bh(&rcu_torture_lock);
 334}
 335
 336/*
 337 * Operations vector for selecting different types of tests.
 338 */
 339
 340struct rcu_torture_ops {
 341	int ttype;
 342	void (*init)(void);
 343	void (*cleanup)(void);
 344	int (*readlock)(void);
 345	void (*read_delay)(struct torture_random_state *rrsp,
 346			   struct rt_read_seg *rtrsp);
 347	void (*readunlock)(int idx);
 348	int (*readlock_held)(void);
 349	unsigned long (*get_gp_seq)(void);
 350	unsigned long (*gp_diff)(unsigned long new, unsigned long old);
 351	void (*deferred_free)(struct rcu_torture *p);
 352	void (*sync)(void);
 353	void (*exp_sync)(void);
 354	unsigned long (*get_gp_state_exp)(void);
 355	unsigned long (*start_gp_poll_exp)(void);
 356	void (*start_gp_poll_exp_full)(struct rcu_gp_oldstate *rgosp);
 357	bool (*poll_gp_state_exp)(unsigned long oldstate);
 358	void (*cond_sync_exp)(unsigned long oldstate);
 359	void (*cond_sync_exp_full)(struct rcu_gp_oldstate *rgosp);
 360	unsigned long (*get_comp_state)(void);
 361	void (*get_comp_state_full)(struct rcu_gp_oldstate *rgosp);
 362	bool (*same_gp_state)(unsigned long oldstate1, unsigned long oldstate2);
 363	bool (*same_gp_state_full)(struct rcu_gp_oldstate *rgosp1, struct rcu_gp_oldstate *rgosp2);
 364	unsigned long (*get_gp_state)(void);
 365	void (*get_gp_state_full)(struct rcu_gp_oldstate *rgosp);
 366	unsigned long (*get_gp_completed)(void);
 367	void (*get_gp_completed_full)(struct rcu_gp_oldstate *rgosp);
 368	unsigned long (*start_gp_poll)(void);
 369	void (*start_gp_poll_full)(struct rcu_gp_oldstate *rgosp);
 370	bool (*poll_gp_state)(unsigned long oldstate);
 371	bool (*poll_gp_state_full)(struct rcu_gp_oldstate *rgosp);
 372	bool (*poll_need_2gp)(bool poll, bool poll_full);
 373	void (*cond_sync)(unsigned long oldstate);
 374	void (*cond_sync_full)(struct rcu_gp_oldstate *rgosp);
 
 
 375	call_rcu_func_t call;
 376	void (*cb_barrier)(void);
 377	void (*fqs)(void);
 378	void (*stats)(void);
 379	void (*gp_kthread_dbg)(void);
 380	bool (*check_boost_failed)(unsigned long gp_state, int *cpup);
 381	int (*stall_dur)(void);
 
 
 
 382	long cbflood_max;
 383	int irq_capable;
 384	int can_boost;
 385	int extendables;
 386	int slow_gps;
 387	int no_pi_lock;
 
 
 388	const char *name;
 389};
 390
 391static struct rcu_torture_ops *cur_ops;
 392
 393/*
 394 * Definitions for rcu torture testing.
 395 */
 396
 397static int torture_readlock_not_held(void)
 398{
 399	return rcu_read_lock_bh_held() || rcu_read_lock_sched_held();
 400}
 401
 402static int rcu_torture_read_lock(void) __acquires(RCU)
 403{
 404	rcu_read_lock();
 405	return 0;
 406}
 407
 408static void
 409rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
 410{
 411	unsigned long started;
 412	unsigned long completed;
 413	const unsigned long shortdelay_us = 200;
 414	unsigned long longdelay_ms = 300;
 415	unsigned long long ts;
 416
 417	/* We want a short delay sometimes to make a reader delay the grace
 418	 * period, and we want a long delay occasionally to trigger
 419	 * force_quiescent_state. */
 420
 421	if (!atomic_read(&rcu_fwd_cb_nodelay) &&
 422	    !(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) {
 423		started = cur_ops->get_gp_seq();
 424		ts = rcu_trace_clock_local();
 425		if (preempt_count() & (SOFTIRQ_MASK | HARDIRQ_MASK))
 426			longdelay_ms = 5; /* Avoid triggering BH limits. */
 427		mdelay(longdelay_ms);
 428		rtrsp->rt_delay_ms = longdelay_ms;
 429		completed = cur_ops->get_gp_seq();
 430		do_trace_rcu_torture_read(cur_ops->name, NULL, ts,
 431					  started, completed);
 432	}
 433	if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) {
 434		udelay(shortdelay_us);
 435		rtrsp->rt_delay_us = shortdelay_us;
 436	}
 437	if (!preempt_count() &&
 438	    !(torture_random(rrsp) % (nrealreaders * 500))) {
 439		torture_preempt_schedule();  /* QS only if preemptible. */
 440		rtrsp->rt_preempted = true;
 441	}
 442}
 443
 444static void rcu_torture_read_unlock(int idx) __releases(RCU)
 445{
 446	rcu_read_unlock();
 447}
 448
 449/*
 450 * Update callback in the pipe.  This should be invoked after a grace period.
 451 */
 452static bool
 453rcu_torture_pipe_update_one(struct rcu_torture *rp)
 454{
 455	int i;
 456	struct rcu_torture_reader_check *rtrcp = READ_ONCE(rp->rtort_chkp);
 457
 458	if (rtrcp) {
 459		WRITE_ONCE(rp->rtort_chkp, NULL);
 460		smp_store_release(&rtrcp->rtc_ready, 1); // Pair with smp_load_acquire().
 461	}
 462	i = READ_ONCE(rp->rtort_pipe_count);
 463	if (i > RCU_TORTURE_PIPE_LEN)
 464		i = RCU_TORTURE_PIPE_LEN;
 465	atomic_inc(&rcu_torture_wcount[i]);
 466	WRITE_ONCE(rp->rtort_pipe_count, i + 1);
 467	if (rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
 
 468		rp->rtort_mbtest = 0;
 469		return true;
 470	}
 471	return false;
 472}
 473
 474/*
 475 * Update all callbacks in the pipe.  Suitable for synchronous grace-period
 476 * primitives.
 477 */
 478static void
 479rcu_torture_pipe_update(struct rcu_torture *old_rp)
 480{
 481	struct rcu_torture *rp;
 482	struct rcu_torture *rp1;
 483
 484	if (old_rp)
 485		list_add(&old_rp->rtort_free, &rcu_torture_removed);
 486	list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) {
 487		if (rcu_torture_pipe_update_one(rp)) {
 488			list_del(&rp->rtort_free);
 489			rcu_torture_free(rp);
 490		}
 491	}
 492}
 493
 494static void
 495rcu_torture_cb(struct rcu_head *p)
 496{
 497	struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
 498
 499	if (torture_must_stop_irq()) {
 500		/* Test is ending, just drop callbacks on the floor. */
 501		/* The next initialization will pick up the pieces. */
 502		return;
 503	}
 504	if (rcu_torture_pipe_update_one(rp))
 505		rcu_torture_free(rp);
 506	else
 507		cur_ops->deferred_free(rp);
 508}
 509
 510static unsigned long rcu_no_completed(void)
 511{
 512	return 0;
 513}
 514
 515static void rcu_torture_deferred_free(struct rcu_torture *p)
 516{
 517	call_rcu_hurry(&p->rtort_rcu, rcu_torture_cb);
 518}
 519
 520static void rcu_sync_torture_init(void)
 521{
 522	INIT_LIST_HEAD(&rcu_torture_removed);
 523}
 524
 525static bool rcu_poll_need_2gp(bool poll, bool poll_full)
 526{
 527	return poll;
 528}
 529
 530static struct rcu_torture_ops rcu_ops = {
 531	.ttype			= RCU_FLAVOR,
 532	.init			= rcu_sync_torture_init,
 533	.readlock		= rcu_torture_read_lock,
 534	.read_delay		= rcu_read_delay,
 535	.readunlock		= rcu_torture_read_unlock,
 536	.readlock_held		= torture_readlock_not_held,
 537	.get_gp_seq		= rcu_get_gp_seq,
 538	.gp_diff		= rcu_seq_diff,
 539	.deferred_free		= rcu_torture_deferred_free,
 540	.sync			= synchronize_rcu,
 541	.exp_sync		= synchronize_rcu_expedited,
 542	.same_gp_state		= same_state_synchronize_rcu,
 543	.same_gp_state_full	= same_state_synchronize_rcu_full,
 544	.get_comp_state		= get_completed_synchronize_rcu,
 545	.get_comp_state_full	= get_completed_synchronize_rcu_full,
 546	.get_gp_state		= get_state_synchronize_rcu,
 547	.get_gp_state_full	= get_state_synchronize_rcu_full,
 548	.get_gp_completed	= get_completed_synchronize_rcu,
 549	.get_gp_completed_full	= get_completed_synchronize_rcu_full,
 550	.start_gp_poll		= start_poll_synchronize_rcu,
 551	.start_gp_poll_full	= start_poll_synchronize_rcu_full,
 552	.poll_gp_state		= poll_state_synchronize_rcu,
 553	.poll_gp_state_full	= poll_state_synchronize_rcu_full,
 554	.poll_need_2gp		= rcu_poll_need_2gp,
 555	.cond_sync		= cond_synchronize_rcu,
 556	.cond_sync_full		= cond_synchronize_rcu_full,
 
 
 557	.get_gp_state_exp	= get_state_synchronize_rcu,
 558	.start_gp_poll_exp	= start_poll_synchronize_rcu_expedited,
 559	.start_gp_poll_exp_full	= start_poll_synchronize_rcu_expedited_full,
 560	.poll_gp_state_exp	= poll_state_synchronize_rcu,
 561	.cond_sync_exp		= cond_synchronize_rcu_expedited,
 562	.call			= call_rcu_hurry,
 563	.cb_barrier		= rcu_barrier,
 564	.fqs			= rcu_force_quiescent_state,
 565	.stats			= NULL,
 566	.gp_kthread_dbg		= show_rcu_gp_kthreads,
 567	.check_boost_failed	= rcu_check_boost_fail,
 568	.stall_dur		= rcu_jiffies_till_stall_check,
 
 
 
 569	.irq_capable		= 1,
 570	.can_boost		= IS_ENABLED(CONFIG_RCU_BOOST),
 571	.extendables		= RCUTORTURE_MAX_EXTEND,
 
 
 572	.name			= "rcu"
 573};
 574
 575/*
 576 * Don't even think about trying any of these in real life!!!
 577 * The names includes "busted", and they really means it!
 578 * The only purpose of these functions is to provide a buggy RCU
 579 * implementation to make sure that rcutorture correctly emits
 580 * buggy-RCU error messages.
 581 */
 582static void rcu_busted_torture_deferred_free(struct rcu_torture *p)
 583{
 584	/* This is a deliberate bug for testing purposes only! */
 585	rcu_torture_cb(&p->rtort_rcu);
 586}
 587
 588static void synchronize_rcu_busted(void)
 589{
 590	/* This is a deliberate bug for testing purposes only! */
 591}
 592
 593static void
 594call_rcu_busted(struct rcu_head *head, rcu_callback_t func)
 595{
 596	/* This is a deliberate bug for testing purposes only! */
 597	func(head);
 598}
 599
 600static struct rcu_torture_ops rcu_busted_ops = {
 601	.ttype		= INVALID_RCU_FLAVOR,
 602	.init		= rcu_sync_torture_init,
 603	.readlock	= rcu_torture_read_lock,
 604	.read_delay	= rcu_read_delay,  /* just reuse rcu's version. */
 605	.readunlock	= rcu_torture_read_unlock,
 606	.readlock_held	= torture_readlock_not_held,
 607	.get_gp_seq	= rcu_no_completed,
 608	.deferred_free	= rcu_busted_torture_deferred_free,
 609	.sync		= synchronize_rcu_busted,
 610	.exp_sync	= synchronize_rcu_busted,
 611	.call		= call_rcu_busted,
 612	.cb_barrier	= NULL,
 613	.fqs		= NULL,
 614	.stats		= NULL,
 615	.irq_capable	= 1,
 616	.name		= "busted"
 617};
 618
 619/*
 620 * Definitions for srcu torture testing.
 621 */
 622
 623DEFINE_STATIC_SRCU(srcu_ctl);
 624static struct srcu_struct srcu_ctld;
 625static struct srcu_struct *srcu_ctlp = &srcu_ctl;
 626static struct rcu_torture_ops srcud_ops;
 627
 628static int srcu_torture_read_lock(void) __acquires(srcu_ctlp)
 629{
 630	if (cur_ops == &srcud_ops)
 631		return srcu_read_lock_nmisafe(srcu_ctlp);
 632	else
 633		return srcu_read_lock(srcu_ctlp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 634}
 635
 636static void
 637srcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
 638{
 639	long delay;
 640	const long uspertick = 1000000 / HZ;
 641	const long longdelay = 10;
 642
 643	/* We want there to be long-running readers, but not all the time. */
 644
 645	delay = torture_random(rrsp) %
 646		(nrealreaders * 2 * longdelay * uspertick);
 647	if (!delay && in_task()) {
 648		schedule_timeout_interruptible(longdelay);
 649		rtrsp->rt_delay_jiffies = longdelay;
 650	} else {
 651		rcu_read_delay(rrsp, rtrsp);
 652	}
 653}
 654
 655static void srcu_torture_read_unlock(int idx) __releases(srcu_ctlp)
 656{
 657	if (cur_ops == &srcud_ops)
 658		srcu_read_unlock_nmisafe(srcu_ctlp, idx);
 659	else
 660		srcu_read_unlock(srcu_ctlp, idx);
 
 
 
 661}
 662
 663static int torture_srcu_read_lock_held(void)
 664{
 665	return srcu_read_lock_held(srcu_ctlp);
 666}
 667
 668static unsigned long srcu_torture_completed(void)
 669{
 670	return srcu_batches_completed(srcu_ctlp);
 671}
 672
 673static void srcu_torture_deferred_free(struct rcu_torture *rp)
 674{
 675	call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb);
 676}
 677
 678static void srcu_torture_synchronize(void)
 679{
 680	synchronize_srcu(srcu_ctlp);
 681}
 682
 683static unsigned long srcu_torture_get_gp_state(void)
 684{
 685	return get_state_synchronize_srcu(srcu_ctlp);
 686}
 687
 688static unsigned long srcu_torture_start_gp_poll(void)
 689{
 690	return start_poll_synchronize_srcu(srcu_ctlp);
 691}
 692
 693static bool srcu_torture_poll_gp_state(unsigned long oldstate)
 694{
 695	return poll_state_synchronize_srcu(srcu_ctlp, oldstate);
 696}
 697
 698static void srcu_torture_call(struct rcu_head *head,
 699			      rcu_callback_t func)
 700{
 701	call_srcu(srcu_ctlp, head, func);
 702}
 703
 704static void srcu_torture_barrier(void)
 705{
 706	srcu_barrier(srcu_ctlp);
 707}
 708
 709static void srcu_torture_stats(void)
 710{
 711	srcu_torture_stats_print(srcu_ctlp, torture_type, TORTURE_FLAG);
 712}
 713
 714static void srcu_torture_synchronize_expedited(void)
 715{
 716	synchronize_srcu_expedited(srcu_ctlp);
 717}
 718
 719static struct rcu_torture_ops srcu_ops = {
 720	.ttype		= SRCU_FLAVOR,
 721	.init		= rcu_sync_torture_init,
 722	.readlock	= srcu_torture_read_lock,
 723	.read_delay	= srcu_read_delay,
 724	.readunlock	= srcu_torture_read_unlock,
 725	.readlock_held	= torture_srcu_read_lock_held,
 726	.get_gp_seq	= srcu_torture_completed,
 727	.deferred_free	= srcu_torture_deferred_free,
 728	.sync		= srcu_torture_synchronize,
 729	.exp_sync	= srcu_torture_synchronize_expedited,
 
 
 730	.get_gp_state	= srcu_torture_get_gp_state,
 731	.start_gp_poll	= srcu_torture_start_gp_poll,
 732	.poll_gp_state	= srcu_torture_poll_gp_state,
 
 733	.call		= srcu_torture_call,
 734	.cb_barrier	= srcu_torture_barrier,
 735	.stats		= srcu_torture_stats,
 
 736	.cbflood_max	= 50000,
 737	.irq_capable	= 1,
 738	.no_pi_lock	= IS_ENABLED(CONFIG_TINY_SRCU),
 
 739	.name		= "srcu"
 740};
 741
 742static void srcu_torture_init(void)
 743{
 744	rcu_sync_torture_init();
 745	WARN_ON(init_srcu_struct(&srcu_ctld));
 746	srcu_ctlp = &srcu_ctld;
 747}
 748
 749static void srcu_torture_cleanup(void)
 750{
 751	cleanup_srcu_struct(&srcu_ctld);
 752	srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */
 753}
 754
 755/* As above, but dynamically allocated. */
 756static struct rcu_torture_ops srcud_ops = {
 757	.ttype		= SRCU_FLAVOR,
 758	.init		= srcu_torture_init,
 759	.cleanup	= srcu_torture_cleanup,
 760	.readlock	= srcu_torture_read_lock,
 761	.read_delay	= srcu_read_delay,
 762	.readunlock	= srcu_torture_read_unlock,
 763	.readlock_held	= torture_srcu_read_lock_held,
 764	.get_gp_seq	= srcu_torture_completed,
 765	.deferred_free	= srcu_torture_deferred_free,
 766	.sync		= srcu_torture_synchronize,
 767	.exp_sync	= srcu_torture_synchronize_expedited,
 
 
 768	.get_gp_state	= srcu_torture_get_gp_state,
 769	.start_gp_poll	= srcu_torture_start_gp_poll,
 770	.poll_gp_state	= srcu_torture_poll_gp_state,
 
 771	.call		= srcu_torture_call,
 772	.cb_barrier	= srcu_torture_barrier,
 773	.stats		= srcu_torture_stats,
 
 774	.cbflood_max	= 50000,
 775	.irq_capable	= 1,
 776	.no_pi_lock	= IS_ENABLED(CONFIG_TINY_SRCU),
 
 777	.name		= "srcud"
 778};
 779
 780/* As above, but broken due to inappropriate reader extension. */
 781static struct rcu_torture_ops busted_srcud_ops = {
 782	.ttype		= SRCU_FLAVOR,
 783	.init		= srcu_torture_init,
 784	.cleanup	= srcu_torture_cleanup,
 785	.readlock	= srcu_torture_read_lock,
 786	.read_delay	= rcu_read_delay,
 787	.readunlock	= srcu_torture_read_unlock,
 788	.readlock_held	= torture_srcu_read_lock_held,
 789	.get_gp_seq	= srcu_torture_completed,
 790	.deferred_free	= srcu_torture_deferred_free,
 791	.sync		= srcu_torture_synchronize,
 792	.exp_sync	= srcu_torture_synchronize_expedited,
 793	.call		= srcu_torture_call,
 794	.cb_barrier	= srcu_torture_barrier,
 795	.stats		= srcu_torture_stats,
 796	.irq_capable	= 1,
 797	.no_pi_lock	= IS_ENABLED(CONFIG_TINY_SRCU),
 798	.extendables	= RCUTORTURE_MAX_EXTEND,
 799	.name		= "busted_srcud"
 800};
 801
 802/*
 803 * Definitions for trivial CONFIG_PREEMPT=n-only torture testing.
 804 * This implementation does not necessarily work well with CPU hotplug.
 805 */
 806
 807static void synchronize_rcu_trivial(void)
 808{
 809	int cpu;
 810
 811	for_each_online_cpu(cpu) {
 812		rcutorture_sched_setaffinity(current->pid, cpumask_of(cpu));
 813		WARN_ON_ONCE(raw_smp_processor_id() != cpu);
 814	}
 815}
 816
 817static int rcu_torture_read_lock_trivial(void) __acquires(RCU)
 818{
 819	preempt_disable();
 820	return 0;
 821}
 822
 823static void rcu_torture_read_unlock_trivial(int idx) __releases(RCU)
 824{
 825	preempt_enable();
 826}
 827
 828static struct rcu_torture_ops trivial_ops = {
 829	.ttype		= RCU_TRIVIAL_FLAVOR,
 830	.init		= rcu_sync_torture_init,
 831	.readlock	= rcu_torture_read_lock_trivial,
 832	.read_delay	= rcu_read_delay,  /* just reuse rcu's version. */
 833	.readunlock	= rcu_torture_read_unlock_trivial,
 834	.readlock_held	= torture_readlock_not_held,
 835	.get_gp_seq	= rcu_no_completed,
 836	.sync		= synchronize_rcu_trivial,
 837	.exp_sync	= synchronize_rcu_trivial,
 838	.fqs		= NULL,
 839	.stats		= NULL,
 840	.irq_capable	= 1,
 841	.name		= "trivial"
 842};
 843
 844#ifdef CONFIG_TASKS_RCU
 845
 846/*
 847 * Definitions for RCU-tasks torture testing.
 848 */
 849
 850static int tasks_torture_read_lock(void)
 851{
 852	return 0;
 853}
 854
 855static void tasks_torture_read_unlock(int idx)
 856{
 857}
 858
 859static void rcu_tasks_torture_deferred_free(struct rcu_torture *p)
 860{
 861	call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb);
 862}
 863
 864static void synchronize_rcu_mult_test(void)
 865{
 866	synchronize_rcu_mult(call_rcu_tasks, call_rcu_hurry);
 867}
 868
 869static struct rcu_torture_ops tasks_ops = {
 870	.ttype		= RCU_TASKS_FLAVOR,
 871	.init		= rcu_sync_torture_init,
 872	.readlock	= tasks_torture_read_lock,
 873	.read_delay	= rcu_read_delay,  /* just reuse rcu's version. */
 874	.readunlock	= tasks_torture_read_unlock,
 875	.get_gp_seq	= rcu_no_completed,
 876	.deferred_free	= rcu_tasks_torture_deferred_free,
 877	.sync		= synchronize_rcu_tasks,
 878	.exp_sync	= synchronize_rcu_mult_test,
 879	.call		= call_rcu_tasks,
 880	.cb_barrier	= rcu_barrier_tasks,
 881	.gp_kthread_dbg	= show_rcu_tasks_classic_gp_kthread,
 882	.fqs		= NULL,
 883	.stats		= NULL,
 884	.irq_capable	= 1,
 885	.slow_gps	= 1,
 886	.name		= "tasks"
 887};
 888
 889#define TASKS_OPS &tasks_ops,
 890
 891#else // #ifdef CONFIG_TASKS_RCU
 892
 893#define TASKS_OPS
 894
 895#endif // #else #ifdef CONFIG_TASKS_RCU
 896
 897
 898#ifdef CONFIG_TASKS_RUDE_RCU
 899
 900/*
 901 * Definitions for rude RCU-tasks torture testing.
 902 */
 903
 904static void rcu_tasks_rude_torture_deferred_free(struct rcu_torture *p)
 905{
 906	call_rcu_tasks_rude(&p->rtort_rcu, rcu_torture_cb);
 907}
 908
 909static struct rcu_torture_ops tasks_rude_ops = {
 910	.ttype		= RCU_TASKS_RUDE_FLAVOR,
 911	.init		= rcu_sync_torture_init,
 912	.readlock	= rcu_torture_read_lock_trivial,
 913	.read_delay	= rcu_read_delay,  /* just reuse rcu's version. */
 914	.readunlock	= rcu_torture_read_unlock_trivial,
 915	.get_gp_seq	= rcu_no_completed,
 916	.deferred_free	= rcu_tasks_rude_torture_deferred_free,
 917	.sync		= synchronize_rcu_tasks_rude,
 918	.exp_sync	= synchronize_rcu_tasks_rude,
 919	.call		= call_rcu_tasks_rude,
 920	.cb_barrier	= rcu_barrier_tasks_rude,
 921	.gp_kthread_dbg	= show_rcu_tasks_rude_gp_kthread,
 
 922	.cbflood_max	= 50000,
 923	.fqs		= NULL,
 924	.stats		= NULL,
 925	.irq_capable	= 1,
 926	.name		= "tasks-rude"
 927};
 928
 929#define TASKS_RUDE_OPS &tasks_rude_ops,
 930
 931#else // #ifdef CONFIG_TASKS_RUDE_RCU
 932
 933#define TASKS_RUDE_OPS
 934
 935#endif // #else #ifdef CONFIG_TASKS_RUDE_RCU
 936
 937
 938#ifdef CONFIG_TASKS_TRACE_RCU
 939
 940/*
 941 * Definitions for tracing RCU-tasks torture testing.
 942 */
 943
 944static int tasks_tracing_torture_read_lock(void)
 945{
 946	rcu_read_lock_trace();
 947	return 0;
 948}
 949
 950static void tasks_tracing_torture_read_unlock(int idx)
 951{
 952	rcu_read_unlock_trace();
 953}
 954
 955static void rcu_tasks_tracing_torture_deferred_free(struct rcu_torture *p)
 956{
 957	call_rcu_tasks_trace(&p->rtort_rcu, rcu_torture_cb);
 958}
 959
 960static struct rcu_torture_ops tasks_tracing_ops = {
 961	.ttype		= RCU_TASKS_TRACING_FLAVOR,
 962	.init		= rcu_sync_torture_init,
 963	.readlock	= tasks_tracing_torture_read_lock,
 964	.read_delay	= srcu_read_delay,  /* just reuse srcu's version. */
 965	.readunlock	= tasks_tracing_torture_read_unlock,
 966	.readlock_held	= rcu_read_lock_trace_held,
 967	.get_gp_seq	= rcu_no_completed,
 968	.deferred_free	= rcu_tasks_tracing_torture_deferred_free,
 969	.sync		= synchronize_rcu_tasks_trace,
 970	.exp_sync	= synchronize_rcu_tasks_trace,
 971	.call		= call_rcu_tasks_trace,
 972	.cb_barrier	= rcu_barrier_tasks_trace,
 973	.gp_kthread_dbg	= show_rcu_tasks_trace_gp_kthread,
 
 974	.cbflood_max	= 50000,
 975	.fqs		= NULL,
 976	.stats		= NULL,
 977	.irq_capable	= 1,
 978	.slow_gps	= 1,
 979	.name		= "tasks-tracing"
 980};
 981
 982#define TASKS_TRACING_OPS &tasks_tracing_ops,
 983
 984#else // #ifdef CONFIG_TASKS_TRACE_RCU
 985
 986#define TASKS_TRACING_OPS
 987
 988#endif // #else #ifdef CONFIG_TASKS_TRACE_RCU
 989
 990
 991static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old)
 992{
 993	if (!cur_ops->gp_diff)
 994		return new - old;
 995	return cur_ops->gp_diff(new, old);
 996}
 997
 998/*
 999 * RCU torture priority-boost testing.  Runs one real-time thread per
1000 * CPU for moderate bursts, repeatedly starting grace periods and waiting
1001 * for them to complete.  If a given grace period takes too long, we assume
1002 * that priority inversion has occurred.
1003 */
1004
1005static int old_rt_runtime = -1;
1006
1007static void rcu_torture_disable_rt_throttle(void)
1008{
1009	/*
1010	 * Disable RT throttling so that rcutorture's boost threads don't get
1011	 * throttled. Only possible if rcutorture is built-in otherwise the
1012	 * user should manually do this by setting the sched_rt_period_us and
1013	 * sched_rt_runtime sysctls.
1014	 */
1015	if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1)
1016		return;
1017
1018	old_rt_runtime = sysctl_sched_rt_runtime;
1019	sysctl_sched_rt_runtime = -1;
1020}
1021
1022static void rcu_torture_enable_rt_throttle(void)
1023{
1024	if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1)
1025		return;
1026
1027	sysctl_sched_rt_runtime = old_rt_runtime;
1028	old_rt_runtime = -1;
1029}
1030
1031static bool rcu_torture_boost_failed(unsigned long gp_state, unsigned long *start)
1032{
1033	int cpu;
1034	static int dbg_done;
1035	unsigned long end = jiffies;
1036	bool gp_done;
1037	unsigned long j;
1038	static unsigned long last_persist;
1039	unsigned long lp;
1040	unsigned long mininterval = test_boost_duration * HZ - HZ / 2;
1041
1042	if (end - *start > mininterval) {
1043		// Recheck after checking time to avoid false positives.
1044		smp_mb(); // Time check before grace-period check.
1045		if (cur_ops->poll_gp_state(gp_state))
1046			return false; // passed, though perhaps just barely
1047		if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, &cpu)) {
1048			// At most one persisted message per boost test.
1049			j = jiffies;
1050			lp = READ_ONCE(last_persist);
1051			if (time_after(j, lp + mininterval) && cmpxchg(&last_persist, lp, j) == lp)
1052				pr_info("Boost inversion persisted: No QS from CPU %d\n", cpu);
 
 
 
 
 
1053			return false; // passed on a technicality
1054		}
1055		VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed");
1056		n_rcu_torture_boost_failure++;
1057		if (!xchg(&dbg_done, 1) && cur_ops->gp_kthread_dbg) {
1058			pr_info("Boost inversion thread ->rt_priority %u gp_state %lu jiffies %lu\n",
1059				current->rt_priority, gp_state, end - *start);
1060			cur_ops->gp_kthread_dbg();
1061			// Recheck after print to flag grace period ending during splat.
1062			gp_done = cur_ops->poll_gp_state(gp_state);
1063			pr_info("Boost inversion: GP %lu %s.\n", gp_state,
1064				gp_done ? "ended already" : "still pending");
1065
1066		}
1067
1068		return true; // failed
1069	} else if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, NULL)) {
1070		*start = jiffies;
1071	}
1072
1073	return false; // passed
1074}
1075
1076static int rcu_torture_boost(void *arg)
1077{
1078	unsigned long endtime;
1079	unsigned long gp_state;
1080	unsigned long gp_state_time;
1081	unsigned long oldstarttime;
1082
1083	VERBOSE_TOROUT_STRING("rcu_torture_boost started");
1084
1085	/* Set real-time priority. */
1086	sched_set_fifo_low(current);
1087
1088	/* Each pass through the following loop does one boost-test cycle. */
1089	do {
1090		bool failed = false; // Test failed already in this test interval
1091		bool gp_initiated = false;
1092
1093		if (kthread_should_stop())
1094			goto checkwait;
1095
1096		/* Wait for the next test interval. */
1097		oldstarttime = READ_ONCE(boost_starttime);
1098		while (time_before(jiffies, oldstarttime)) {
1099			schedule_timeout_interruptible(oldstarttime - jiffies);
1100			if (stutter_wait("rcu_torture_boost"))
1101				sched_set_fifo_low(current);
1102			if (torture_must_stop())
1103				goto checkwait;
1104		}
1105
1106		// Do one boost-test interval.
1107		endtime = oldstarttime + test_boost_duration * HZ;
1108		while (time_before(jiffies, endtime)) {
1109			// Has current GP gone too long?
1110			if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state))
1111				failed = rcu_torture_boost_failed(gp_state, &gp_state_time);
1112			// If we don't have a grace period in flight, start one.
1113			if (!gp_initiated || cur_ops->poll_gp_state(gp_state)) {
1114				gp_state = cur_ops->start_gp_poll();
1115				gp_initiated = true;
1116				gp_state_time = jiffies;
1117			}
1118			if (stutter_wait("rcu_torture_boost")) {
1119				sched_set_fifo_low(current);
1120				// If the grace period already ended,
1121				// we don't know when that happened, so
1122				// start over.
1123				if (cur_ops->poll_gp_state(gp_state))
1124					gp_initiated = false;
1125			}
1126			if (torture_must_stop())
1127				goto checkwait;
1128		}
1129
1130		// In case the grace period extended beyond the end of the loop.
1131		if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state))
1132			rcu_torture_boost_failed(gp_state, &gp_state_time);
1133
1134		/*
1135		 * Set the start time of the next test interval.
1136		 * Yes, this is vulnerable to long delays, but such
1137		 * delays simply cause a false negative for the next
1138		 * interval.  Besides, we are running at RT priority,
1139		 * so delays should be relatively rare.
1140		 */
1141		while (oldstarttime == READ_ONCE(boost_starttime) && !kthread_should_stop()) {
1142			if (mutex_trylock(&boost_mutex)) {
1143				if (oldstarttime == boost_starttime) {
1144					WRITE_ONCE(boost_starttime,
1145						   jiffies + test_boost_interval * HZ);
1146					n_rcu_torture_boosts++;
1147				}
1148				mutex_unlock(&boost_mutex);
1149				break;
1150			}
1151			schedule_timeout_uninterruptible(1);
1152		}
1153
1154		/* Go do the stutter. */
1155checkwait:	if (stutter_wait("rcu_torture_boost"))
1156			sched_set_fifo_low(current);
1157	} while (!torture_must_stop());
1158
1159	/* Clean up and exit. */
1160	while (!kthread_should_stop()) {
1161		torture_shutdown_absorb("rcu_torture_boost");
1162		schedule_timeout_uninterruptible(1);
1163	}
1164	torture_kthread_stopping("rcu_torture_boost");
1165	return 0;
1166}
1167
1168/*
1169 * RCU torture force-quiescent-state kthread.  Repeatedly induces
1170 * bursts of calls to force_quiescent_state(), increasing the probability
1171 * of occurrence of some important types of race conditions.
1172 */
1173static int
1174rcu_torture_fqs(void *arg)
1175{
1176	unsigned long fqs_resume_time;
1177	int fqs_burst_remaining;
1178	int oldnice = task_nice(current);
1179
1180	VERBOSE_TOROUT_STRING("rcu_torture_fqs task started");
1181	do {
1182		fqs_resume_time = jiffies + fqs_stutter * HZ;
1183		while (time_before(jiffies, fqs_resume_time) &&
1184		       !kthread_should_stop()) {
1185			schedule_timeout_interruptible(1);
1186		}
1187		fqs_burst_remaining = fqs_duration;
1188		while (fqs_burst_remaining > 0 &&
1189		       !kthread_should_stop()) {
1190			cur_ops->fqs();
1191			udelay(fqs_holdoff);
1192			fqs_burst_remaining -= fqs_holdoff;
1193		}
1194		if (stutter_wait("rcu_torture_fqs"))
1195			sched_set_normal(current, oldnice);
1196	} while (!torture_must_stop());
1197	torture_kthread_stopping("rcu_torture_fqs");
1198	return 0;
1199}
1200
1201// Used by writers to randomly choose from the available grace-period primitives.
1202static int synctype[ARRAY_SIZE(rcu_torture_writer_state_names)] = { };
1203static int nsynctypes;
1204
1205/*
1206 * Determine which grace-period primitives are available.
1207 */
1208static void rcu_torture_write_types(void)
1209{
1210	bool gp_cond1 = gp_cond, gp_cond_exp1 = gp_cond_exp, gp_cond_full1 = gp_cond_full;
1211	bool gp_cond_exp_full1 = gp_cond_exp_full, gp_exp1 = gp_exp, gp_poll_exp1 = gp_poll_exp;
1212	bool gp_poll_exp_full1 = gp_poll_exp_full, gp_normal1 = gp_normal, gp_poll1 = gp_poll;
1213	bool gp_poll_full1 = gp_poll_full, gp_sync1 = gp_sync;
1214
1215	/* Initialize synctype[] array.  If none set, take default. */
1216	if (!gp_cond1 &&
1217	    !gp_cond_exp1 &&
1218	    !gp_cond_full1 &&
1219	    !gp_cond_exp_full1 &&
1220	    !gp_exp1 &&
1221	    !gp_poll_exp1 &&
1222	    !gp_poll_exp_full1 &&
1223	    !gp_normal1 &&
1224	    !gp_poll1 &&
1225	    !gp_poll_full1 &&
1226	    !gp_sync1) {
1227		gp_cond1 = true;
1228		gp_cond_exp1 = true;
1229		gp_cond_full1 = true;
1230		gp_cond_exp_full1 = true;
1231		gp_exp1 = true;
1232		gp_poll_exp1 = true;
1233		gp_poll_exp_full1 = true;
1234		gp_normal1 = true;
1235		gp_poll1 = true;
1236		gp_poll_full1 = true;
1237		gp_sync1 = true;
1238	}
1239	if (gp_cond1 && cur_ops->get_gp_state && cur_ops->cond_sync) {
1240		synctype[nsynctypes++] = RTWS_COND_GET;
1241		pr_info("%s: Testing conditional GPs.\n", __func__);
1242	} else if (gp_cond && (!cur_ops->get_gp_state || !cur_ops->cond_sync)) {
1243		pr_alert("%s: gp_cond without primitives.\n", __func__);
1244	}
1245	if (gp_cond_exp1 && cur_ops->get_gp_state_exp && cur_ops->cond_sync_exp) {
1246		synctype[nsynctypes++] = RTWS_COND_GET_EXP;
1247		pr_info("%s: Testing conditional expedited GPs.\n", __func__);
1248	} else if (gp_cond_exp && (!cur_ops->get_gp_state_exp || !cur_ops->cond_sync_exp)) {
1249		pr_alert("%s: gp_cond_exp without primitives.\n", __func__);
1250	}
1251	if (gp_cond_full1 && cur_ops->get_gp_state && cur_ops->cond_sync_full) {
1252		synctype[nsynctypes++] = RTWS_COND_GET_FULL;
1253		pr_info("%s: Testing conditional full-state GPs.\n", __func__);
1254	} else if (gp_cond_full && (!cur_ops->get_gp_state || !cur_ops->cond_sync_full)) {
1255		pr_alert("%s: gp_cond_full without primitives.\n", __func__);
1256	}
1257	if (gp_cond_exp_full1 && cur_ops->get_gp_state_exp && cur_ops->cond_sync_exp_full) {
1258		synctype[nsynctypes++] = RTWS_COND_GET_EXP_FULL;
1259		pr_info("%s: Testing conditional full-state expedited GPs.\n", __func__);
1260	} else if (gp_cond_exp_full &&
1261		   (!cur_ops->get_gp_state_exp || !cur_ops->cond_sync_exp_full)) {
1262		pr_alert("%s: gp_cond_exp_full without primitives.\n", __func__);
1263	}
1264	if (gp_exp1 && cur_ops->exp_sync) {
1265		synctype[nsynctypes++] = RTWS_EXP_SYNC;
1266		pr_info("%s: Testing expedited GPs.\n", __func__);
1267	} else if (gp_exp && !cur_ops->exp_sync) {
1268		pr_alert("%s: gp_exp without primitives.\n", __func__);
1269	}
1270	if (gp_normal1 && cur_ops->deferred_free) {
1271		synctype[nsynctypes++] = RTWS_DEF_FREE;
1272		pr_info("%s: Testing asynchronous GPs.\n", __func__);
1273	} else if (gp_normal && !cur_ops->deferred_free) {
1274		pr_alert("%s: gp_normal without primitives.\n", __func__);
1275	}
1276	if (gp_poll1 && cur_ops->get_comp_state && cur_ops->same_gp_state &&
1277	    cur_ops->start_gp_poll && cur_ops->poll_gp_state) {
1278		synctype[nsynctypes++] = RTWS_POLL_GET;
1279		pr_info("%s: Testing polling GPs.\n", __func__);
1280	} else if (gp_poll && (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state)) {
1281		pr_alert("%s: gp_poll without primitives.\n", __func__);
1282	}
1283	if (gp_poll_full1 && cur_ops->get_comp_state_full && cur_ops->same_gp_state_full
1284	    && cur_ops->start_gp_poll_full && cur_ops->poll_gp_state_full) {
1285		synctype[nsynctypes++] = RTWS_POLL_GET_FULL;
1286		pr_info("%s: Testing polling full-state GPs.\n", __func__);
1287	} else if (gp_poll_full && (!cur_ops->start_gp_poll_full || !cur_ops->poll_gp_state_full)) {
1288		pr_alert("%s: gp_poll_full without primitives.\n", __func__);
1289	}
1290	if (gp_poll_exp1 && cur_ops->start_gp_poll_exp && cur_ops->poll_gp_state_exp) {
1291		synctype[nsynctypes++] = RTWS_POLL_GET_EXP;
1292		pr_info("%s: Testing polling expedited GPs.\n", __func__);
1293	} else if (gp_poll_exp && (!cur_ops->start_gp_poll_exp || !cur_ops->poll_gp_state_exp)) {
1294		pr_alert("%s: gp_poll_exp without primitives.\n", __func__);
1295	}
1296	if (gp_poll_exp_full1 && cur_ops->start_gp_poll_exp_full && cur_ops->poll_gp_state_full) {
1297		synctype[nsynctypes++] = RTWS_POLL_GET_EXP_FULL;
1298		pr_info("%s: Testing polling full-state expedited GPs.\n", __func__);
1299	} else if (gp_poll_exp_full &&
1300		   (!cur_ops->start_gp_poll_exp_full || !cur_ops->poll_gp_state_full)) {
1301		pr_alert("%s: gp_poll_exp_full without primitives.\n", __func__);
1302	}
1303	if (gp_sync1 && cur_ops->sync) {
1304		synctype[nsynctypes++] = RTWS_SYNC;
1305		pr_info("%s: Testing normal GPs.\n", __func__);
1306	} else if (gp_sync && !cur_ops->sync) {
1307		pr_alert("%s: gp_sync without primitives.\n", __func__);
1308	}
 
1309}
1310
1311/*
1312 * Do the specified rcu_torture_writer() synchronous grace period,
1313 * while also testing out the polled APIs.  Note well that the single-CPU
1314 * grace-period optimizations must be accounted for.
1315 */
1316static void do_rtws_sync(struct torture_random_state *trsp, void (*sync)(void))
1317{
1318	unsigned long cookie;
1319	struct rcu_gp_oldstate cookie_full;
1320	bool dopoll;
1321	bool dopoll_full;
1322	unsigned long r = torture_random(trsp);
1323
1324	dopoll = cur_ops->get_gp_state && cur_ops->poll_gp_state && !(r & 0x300);
1325	dopoll_full = cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full && !(r & 0xc00);
1326	if (dopoll || dopoll_full)
1327		cpus_read_lock();
1328	if (dopoll)
1329		cookie = cur_ops->get_gp_state();
1330	if (dopoll_full)
1331		cur_ops->get_gp_state_full(&cookie_full);
1332	if (cur_ops->poll_need_2gp && cur_ops->poll_need_2gp(dopoll, dopoll_full))
1333		sync();
1334	sync();
1335	WARN_ONCE(dopoll && !cur_ops->poll_gp_state(cookie),
1336		  "%s: Cookie check 3 failed %pS() online %*pbl.",
1337		  __func__, sync, cpumask_pr_args(cpu_online_mask));
1338	WARN_ONCE(dopoll_full && !cur_ops->poll_gp_state_full(&cookie_full),
1339		  "%s: Cookie check 4 failed %pS() online %*pbl",
1340		  __func__, sync, cpumask_pr_args(cpu_online_mask));
1341	if (dopoll || dopoll_full)
1342		cpus_read_unlock();
1343}
1344
1345/*
1346 * RCU torture writer kthread.  Repeatedly substitutes a new structure
1347 * for that pointed to by rcu_torture_current, freeing the old structure
1348 * after a series of grace periods (the "pipeline").
1349 */
1350static int
1351rcu_torture_writer(void *arg)
1352{
1353	bool boot_ended;
1354	bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal();
1355	unsigned long cookie;
1356	struct rcu_gp_oldstate cookie_full;
1357	int expediting = 0;
1358	unsigned long gp_snap;
1359	unsigned long gp_snap1;
1360	struct rcu_gp_oldstate gp_snap_full;
1361	struct rcu_gp_oldstate gp_snap1_full;
1362	int i;
1363	int idx;
1364	int oldnice = task_nice(current);
1365	struct rcu_gp_oldstate rgo[NUM_ACTIVE_RCU_POLL_FULL_OLDSTATE];
 
1366	struct rcu_torture *rp;
1367	struct rcu_torture *old_rp;
1368	static DEFINE_TORTURE_RANDOM(rand);
 
1369	bool stutter_waited;
1370	unsigned long ulo[NUM_ACTIVE_RCU_POLL_OLDSTATE];
 
1371
 
 
 
 
1372	VERBOSE_TOROUT_STRING("rcu_torture_writer task started");
1373	if (!can_expedite)
1374		pr_alert("%s" TORTURE_FLAG
1375			 " GP expediting controlled from boot/sysfs for %s.\n",
1376			 torture_type, cur_ops->name);
1377	if (WARN_ONCE(nsynctypes == 0,
1378		      "%s: No update-side primitives.\n", __func__)) {
1379		/*
1380		 * No updates primitives, so don't try updating.
1381		 * The resulting test won't be testing much, hence the
1382		 * above WARN_ONCE().
1383		 */
1384		rcu_torture_writer_state = RTWS_STOPPING;
1385		torture_kthread_stopping("rcu_torture_writer");
1386		return 0;
1387	}
 
 
 
 
 
 
 
 
 
 
1388
1389	do {
1390		rcu_torture_writer_state = RTWS_FIXED_DELAY;
1391		torture_hrtimeout_us(500, 1000, &rand);
1392		rp = rcu_torture_alloc();
1393		if (rp == NULL)
1394			continue;
1395		rp->rtort_pipe_count = 0;
 
1396		rcu_torture_writer_state = RTWS_DELAY;
1397		udelay(torture_random(&rand) & 0x3ff);
1398		rcu_torture_writer_state = RTWS_REPLACE;
1399		old_rp = rcu_dereference_check(rcu_torture_current,
1400					       current == writer_task);
1401		rp->rtort_mbtest = 1;
1402		rcu_assign_pointer(rcu_torture_current, rp);
1403		smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */
1404		if (old_rp) {
1405			i = old_rp->rtort_pipe_count;
1406			if (i > RCU_TORTURE_PIPE_LEN)
1407				i = RCU_TORTURE_PIPE_LEN;
1408			atomic_inc(&rcu_torture_wcount[i]);
1409			WRITE_ONCE(old_rp->rtort_pipe_count,
1410				   old_rp->rtort_pipe_count + 1);
 
1411
1412			// Make sure readers block polled grace periods.
1413			if (cur_ops->get_gp_state && cur_ops->poll_gp_state) {
1414				idx = cur_ops->readlock();
1415				cookie = cur_ops->get_gp_state();
1416				WARN_ONCE(cur_ops->poll_gp_state(cookie),
1417					  "%s: Cookie check 1 failed %s(%d) %lu->%lu\n",
1418					  __func__,
1419					  rcu_torture_writer_state_getname(),
1420					  rcu_torture_writer_state,
1421					  cookie, cur_ops->get_gp_state());
1422				if (cur_ops->get_gp_completed) {
1423					cookie = cur_ops->get_gp_completed();
1424					WARN_ON_ONCE(!cur_ops->poll_gp_state(cookie));
1425				}
1426				cur_ops->readunlock(idx);
1427			}
1428			if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full) {
1429				idx = cur_ops->readlock();
1430				cur_ops->get_gp_state_full(&cookie_full);
1431				WARN_ONCE(cur_ops->poll_gp_state_full(&cookie_full),
1432					  "%s: Cookie check 5 failed %s(%d) online %*pbl\n",
1433					  __func__,
1434					  rcu_torture_writer_state_getname(),
1435					  rcu_torture_writer_state,
1436					  cpumask_pr_args(cpu_online_mask));
1437				if (cur_ops->get_gp_completed_full) {
1438					cur_ops->get_gp_completed_full(&cookie_full);
1439					WARN_ON_ONCE(!cur_ops->poll_gp_state_full(&cookie_full));
1440				}
1441				cur_ops->readunlock(idx);
1442			}
1443			switch (synctype[torture_random(&rand) % nsynctypes]) {
1444			case RTWS_DEF_FREE:
1445				rcu_torture_writer_state = RTWS_DEF_FREE;
1446				cur_ops->deferred_free(old_rp);
1447				break;
1448			case RTWS_EXP_SYNC:
1449				rcu_torture_writer_state = RTWS_EXP_SYNC;
1450				do_rtws_sync(&rand, cur_ops->exp_sync);
1451				rcu_torture_pipe_update(old_rp);
1452				break;
1453			case RTWS_COND_GET:
1454				rcu_torture_writer_state = RTWS_COND_GET;
1455				gp_snap = cur_ops->get_gp_state();
1456				torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1457				rcu_torture_writer_state = RTWS_COND_SYNC;
1458				cur_ops->cond_sync(gp_snap);
1459				rcu_torture_pipe_update(old_rp);
1460				break;
1461			case RTWS_COND_GET_EXP:
1462				rcu_torture_writer_state = RTWS_COND_GET_EXP;
1463				gp_snap = cur_ops->get_gp_state_exp();
1464				torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1465				rcu_torture_writer_state = RTWS_COND_SYNC_EXP;
1466				cur_ops->cond_sync_exp(gp_snap);
1467				rcu_torture_pipe_update(old_rp);
1468				break;
1469			case RTWS_COND_GET_FULL:
1470				rcu_torture_writer_state = RTWS_COND_GET_FULL;
1471				cur_ops->get_gp_state_full(&gp_snap_full);
1472				torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1473				rcu_torture_writer_state = RTWS_COND_SYNC_FULL;
1474				cur_ops->cond_sync_full(&gp_snap_full);
1475				rcu_torture_pipe_update(old_rp);
1476				break;
1477			case RTWS_COND_GET_EXP_FULL:
1478				rcu_torture_writer_state = RTWS_COND_GET_EXP_FULL;
1479				cur_ops->get_gp_state_full(&gp_snap_full);
1480				torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1481				rcu_torture_writer_state = RTWS_COND_SYNC_EXP_FULL;
1482				cur_ops->cond_sync_exp_full(&gp_snap_full);
1483				rcu_torture_pipe_update(old_rp);
1484				break;
1485			case RTWS_POLL_GET:
1486				rcu_torture_writer_state = RTWS_POLL_GET;
1487				for (i = 0; i < ARRAY_SIZE(ulo); i++)
1488					ulo[i] = cur_ops->get_comp_state();
1489				gp_snap = cur_ops->start_gp_poll();
1490				rcu_torture_writer_state = RTWS_POLL_WAIT;
1491				while (!cur_ops->poll_gp_state(gp_snap)) {
1492					gp_snap1 = cur_ops->get_gp_state();
1493					for (i = 0; i < ARRAY_SIZE(ulo); i++)
1494						if (cur_ops->poll_gp_state(ulo[i]) ||
1495						    cur_ops->same_gp_state(ulo[i], gp_snap1)) {
1496							ulo[i] = gp_snap1;
1497							break;
1498						}
1499					WARN_ON_ONCE(i >= ARRAY_SIZE(ulo));
1500					torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1501								  &rand);
1502				}
1503				rcu_torture_pipe_update(old_rp);
1504				break;
1505			case RTWS_POLL_GET_FULL:
1506				rcu_torture_writer_state = RTWS_POLL_GET_FULL;
1507				for (i = 0; i < ARRAY_SIZE(rgo); i++)
1508					cur_ops->get_comp_state_full(&rgo[i]);
1509				cur_ops->start_gp_poll_full(&gp_snap_full);
1510				rcu_torture_writer_state = RTWS_POLL_WAIT_FULL;
1511				while (!cur_ops->poll_gp_state_full(&gp_snap_full)) {
1512					cur_ops->get_gp_state_full(&gp_snap1_full);
1513					for (i = 0; i < ARRAY_SIZE(rgo); i++)
1514						if (cur_ops->poll_gp_state_full(&rgo[i]) ||
1515						    cur_ops->same_gp_state_full(&rgo[i],
1516										&gp_snap1_full)) {
1517							rgo[i] = gp_snap1_full;
1518							break;
1519						}
1520					WARN_ON_ONCE(i >= ARRAY_SIZE(rgo));
1521					torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1522								  &rand);
1523				}
1524				rcu_torture_pipe_update(old_rp);
1525				break;
1526			case RTWS_POLL_GET_EXP:
1527				rcu_torture_writer_state = RTWS_POLL_GET_EXP;
1528				gp_snap = cur_ops->start_gp_poll_exp();
1529				rcu_torture_writer_state = RTWS_POLL_WAIT_EXP;
1530				while (!cur_ops->poll_gp_state_exp(gp_snap))
1531					torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1532								  &rand);
1533				rcu_torture_pipe_update(old_rp);
1534				break;
1535			case RTWS_POLL_GET_EXP_FULL:
1536				rcu_torture_writer_state = RTWS_POLL_GET_EXP_FULL;
1537				cur_ops->start_gp_poll_exp_full(&gp_snap_full);
1538				rcu_torture_writer_state = RTWS_POLL_WAIT_EXP_FULL;
1539				while (!cur_ops->poll_gp_state_full(&gp_snap_full))
1540					torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1541								  &rand);
1542				rcu_torture_pipe_update(old_rp);
1543				break;
1544			case RTWS_SYNC:
1545				rcu_torture_writer_state = RTWS_SYNC;
1546				do_rtws_sync(&rand, cur_ops->sync);
1547				rcu_torture_pipe_update(old_rp);
1548				break;
1549			default:
1550				WARN_ON_ONCE(1);
1551				break;
1552			}
1553		}
1554		WRITE_ONCE(rcu_torture_current_version,
1555			   rcu_torture_current_version + 1);
1556		/* Cycle through nesting levels of rcu_expedite_gp() calls. */
1557		if (can_expedite &&
1558		    !(torture_random(&rand) & 0xff & (!!expediting - 1))) {
1559			WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited());
1560			if (expediting >= 0)
1561				rcu_expedite_gp();
1562			else
1563				rcu_unexpedite_gp();
1564			if (++expediting > 3)
1565				expediting = -expediting;
1566		} else if (!can_expedite) { /* Disabled during boot, recheck. */
1567			can_expedite = !rcu_gp_is_expedited() &&
1568				       !rcu_gp_is_normal();
1569		}
1570		rcu_torture_writer_state = RTWS_STUTTER;
1571		boot_ended = rcu_inkernel_boot_has_ended();
1572		stutter_waited = stutter_wait("rcu_torture_writer");
1573		if (stutter_waited &&
1574		    !atomic_read(&rcu_fwd_cb_nodelay) &&
1575		    !cur_ops->slow_gps &&
1576		    !torture_must_stop() &&
1577		    boot_ended)
 
1578			for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++)
1579				if (list_empty(&rcu_tortures[i].rtort_free) &&
1580				    rcu_access_pointer(rcu_torture_current) !=
1581				    &rcu_tortures[i]) {
1582					tracing_off();
 
 
1583					WARN(1, "%s: rtort_pipe_count: %d\n", __func__, rcu_tortures[i].rtort_pipe_count);
1584					rcu_ftrace_dump(DUMP_ALL);
1585				}
1586		if (stutter_waited)
1587			sched_set_normal(current, oldnice);
1588	} while (!torture_must_stop());
1589	rcu_torture_current = NULL;  // Let stats task know that we are done.
1590	/* Reset expediting back to unexpedited. */
1591	if (expediting > 0)
1592		expediting = -expediting;
1593	while (can_expedite && expediting++ < 0)
1594		rcu_unexpedite_gp();
1595	WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited());
1596	if (!can_expedite)
1597		pr_alert("%s" TORTURE_FLAG
1598			 " Dynamic grace-period expediting was disabled.\n",
1599			 torture_type);
 
 
1600	rcu_torture_writer_state = RTWS_STOPPING;
1601	torture_kthread_stopping("rcu_torture_writer");
1602	return 0;
1603}
1604
1605/*
1606 * RCU torture fake writer kthread.  Repeatedly calls sync, with a random
1607 * delay between calls.
1608 */
1609static int
1610rcu_torture_fakewriter(void *arg)
1611{
1612	unsigned long gp_snap;
1613	struct rcu_gp_oldstate gp_snap_full;
1614	DEFINE_TORTURE_RANDOM(rand);
1615
1616	VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started");
1617	set_user_nice(current, MAX_NICE);
1618
1619	if (WARN_ONCE(nsynctypes == 0,
1620		      "%s: No update-side primitives.\n", __func__)) {
1621		/*
1622		 * No updates primitives, so don't try updating.
1623		 * The resulting test won't be testing much, hence the
1624		 * above WARN_ONCE().
1625		 */
1626		torture_kthread_stopping("rcu_torture_fakewriter");
1627		return 0;
1628	}
1629
1630	do {
1631		torture_hrtimeout_jiffies(torture_random(&rand) % 10, &rand);
1632		if (cur_ops->cb_barrier != NULL &&
1633		    torture_random(&rand) % (nfakewriters * 8) == 0) {
1634			cur_ops->cb_barrier();
1635		} else {
1636			switch (synctype[torture_random(&rand) % nsynctypes]) {
1637			case RTWS_DEF_FREE:
1638				break;
1639			case RTWS_EXP_SYNC:
1640				cur_ops->exp_sync();
1641				break;
1642			case RTWS_COND_GET:
1643				gp_snap = cur_ops->get_gp_state();
1644				torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1645				cur_ops->cond_sync(gp_snap);
1646				break;
1647			case RTWS_COND_GET_EXP:
1648				gp_snap = cur_ops->get_gp_state_exp();
1649				torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1650				cur_ops->cond_sync_exp(gp_snap);
1651				break;
1652			case RTWS_COND_GET_FULL:
1653				cur_ops->get_gp_state_full(&gp_snap_full);
1654				torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1655				cur_ops->cond_sync_full(&gp_snap_full);
1656				break;
1657			case RTWS_COND_GET_EXP_FULL:
1658				cur_ops->get_gp_state_full(&gp_snap_full);
1659				torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1660				cur_ops->cond_sync_exp_full(&gp_snap_full);
1661				break;
1662			case RTWS_POLL_GET:
 
 
1663				gp_snap = cur_ops->start_gp_poll();
 
 
1664				while (!cur_ops->poll_gp_state(gp_snap)) {
1665					torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1666								  &rand);
1667				}
1668				break;
1669			case RTWS_POLL_GET_FULL:
 
 
1670				cur_ops->start_gp_poll_full(&gp_snap_full);
 
 
1671				while (!cur_ops->poll_gp_state_full(&gp_snap_full)) {
1672					torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1673								  &rand);
1674				}
1675				break;
1676			case RTWS_POLL_GET_EXP:
1677				gp_snap = cur_ops->start_gp_poll_exp();
1678				while (!cur_ops->poll_gp_state_exp(gp_snap)) {
1679					torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1680								  &rand);
1681				}
1682				break;
1683			case RTWS_POLL_GET_EXP_FULL:
1684				cur_ops->start_gp_poll_exp_full(&gp_snap_full);
1685				while (!cur_ops->poll_gp_state_full(&gp_snap_full)) {
1686					torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1687								  &rand);
1688				}
1689				break;
1690			case RTWS_SYNC:
1691				cur_ops->sync();
1692				break;
1693			default:
1694				WARN_ON_ONCE(1);
1695				break;
1696			}
1697		}
1698		stutter_wait("rcu_torture_fakewriter");
1699	} while (!torture_must_stop());
1700
1701	torture_kthread_stopping("rcu_torture_fakewriter");
1702	return 0;
1703}
1704
1705static void rcu_torture_timer_cb(struct rcu_head *rhp)
1706{
1707	kfree(rhp);
1708}
1709
1710// Set up and carry out testing of RCU's global memory ordering
1711static void rcu_torture_reader_do_mbchk(long myid, struct rcu_torture *rtp,
1712					struct torture_random_state *trsp)
1713{
1714	unsigned long loops;
1715	int noc = torture_num_online_cpus();
1716	int rdrchked;
1717	int rdrchker;
1718	struct rcu_torture_reader_check *rtrcp; // Me.
1719	struct rcu_torture_reader_check *rtrcp_assigner; // Assigned us to do checking.
1720	struct rcu_torture_reader_check *rtrcp_chked; // Reader being checked.
1721	struct rcu_torture_reader_check *rtrcp_chker; // Reader doing checking when not me.
1722
1723	if (myid < 0)
1724		return; // Don't try this from timer handlers.
1725
1726	// Increment my counter.
1727	rtrcp = &rcu_torture_reader_mbchk[myid];
1728	WRITE_ONCE(rtrcp->rtc_myloops, rtrcp->rtc_myloops + 1);
1729
1730	// Attempt to assign someone else some checking work.
1731	rdrchked = torture_random(trsp) % nrealreaders;
1732	rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked];
1733	rdrchker = torture_random(trsp) % nrealreaders;
1734	rtrcp_chker = &rcu_torture_reader_mbchk[rdrchker];
1735	if (rdrchked != myid && rdrchked != rdrchker && noc >= rdrchked && noc >= rdrchker &&
1736	    smp_load_acquire(&rtrcp->rtc_chkrdr) < 0 && // Pairs with smp_store_release below.
1737	    !READ_ONCE(rtp->rtort_chkp) &&
1738	    !smp_load_acquire(&rtrcp_chker->rtc_assigner)) { // Pairs with smp_store_release below.
1739		rtrcp->rtc_chkloops = READ_ONCE(rtrcp_chked->rtc_myloops);
1740		WARN_ON_ONCE(rtrcp->rtc_chkrdr >= 0);
1741		rtrcp->rtc_chkrdr = rdrchked;
1742		WARN_ON_ONCE(rtrcp->rtc_ready); // This gets set after the grace period ends.
1743		if (cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, NULL, rtrcp) ||
1744		    cmpxchg_relaxed(&rtp->rtort_chkp, NULL, rtrcp))
1745			(void)cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, rtrcp, NULL); // Back out.
1746	}
1747
1748	// If assigned some completed work, do it!
1749	rtrcp_assigner = READ_ONCE(rtrcp->rtc_assigner);
1750	if (!rtrcp_assigner || !smp_load_acquire(&rtrcp_assigner->rtc_ready))
1751		return; // No work or work not yet ready.
1752	rdrchked = rtrcp_assigner->rtc_chkrdr;
1753	if (WARN_ON_ONCE(rdrchked < 0))
1754		return;
1755	rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked];
1756	loops = READ_ONCE(rtrcp_chked->rtc_myloops);
1757	atomic_inc(&n_rcu_torture_mbchk_tries);
1758	if (ULONG_CMP_LT(loops, rtrcp_assigner->rtc_chkloops))
1759		atomic_inc(&n_rcu_torture_mbchk_fail);
1760	rtrcp_assigner->rtc_chkloops = loops + ULONG_MAX / 2;
1761	rtrcp_assigner->rtc_ready = 0;
1762	smp_store_release(&rtrcp->rtc_assigner, NULL); // Someone else can assign us work.
1763	smp_store_release(&rtrcp_assigner->rtc_chkrdr, -1); // Assigner can again assign.
1764}
1765
1766/*
1767 * Do one extension of an RCU read-side critical section using the
1768 * current reader state in readstate (set to zero for initial entry
1769 * to extended critical section), set the new state as specified by
1770 * newstate (set to zero for final exit from extended critical section),
1771 * and random-number-generator state in trsp.  If this is neither the
1772 * beginning or end of the critical section and if there was actually a
1773 * change, do a ->read_delay().
1774 */
1775static void rcutorture_one_extend(int *readstate, int newstate,
1776				  struct torture_random_state *trsp,
1777				  struct rt_read_seg *rtrsp)
1778{
1779	unsigned long flags;
1780	int idxnew1 = -1;
1781	int idxnew2 = -1;
1782	int idxold1 = *readstate;
1783	int idxold2 = idxold1;
1784	int statesnew = ~*readstate & newstate;
1785	int statesold = *readstate & ~newstate;
1786
1787	WARN_ON_ONCE(idxold2 < 0);
1788	WARN_ON_ONCE((idxold2 >> RCUTORTURE_RDR_SHIFT_2) > 1);
1789	rtrsp->rt_readstate = newstate;
1790
1791	/* First, put new protection in place to avoid critical-section gap. */
1792	if (statesnew & RCUTORTURE_RDR_BH)
1793		local_bh_disable();
1794	if (statesnew & RCUTORTURE_RDR_RBH)
1795		rcu_read_lock_bh();
1796	if (statesnew & RCUTORTURE_RDR_IRQ)
1797		local_irq_disable();
1798	if (statesnew & RCUTORTURE_RDR_PREEMPT)
1799		preempt_disable();
1800	if (statesnew & RCUTORTURE_RDR_SCHED)
1801		rcu_read_lock_sched();
1802	if (statesnew & RCUTORTURE_RDR_RCU_1)
1803		idxnew1 = (cur_ops->readlock() & 0x1) << RCUTORTURE_RDR_SHIFT_1;
1804	if (statesnew & RCUTORTURE_RDR_RCU_2)
1805		idxnew2 = (cur_ops->readlock() & 0x1) << RCUTORTURE_RDR_SHIFT_2;
1806
1807	/*
1808	 * Next, remove old protection, in decreasing order of strength
1809	 * to avoid unlock paths that aren't safe in the stronger
1810	 * context. Namely: BH can not be enabled with disabled interrupts.
1811	 * Additionally PREEMPT_RT requires that BH is enabled in preemptible
1812	 * context.
1813	 */
1814	if (statesold & RCUTORTURE_RDR_IRQ)
1815		local_irq_enable();
1816	if (statesold & RCUTORTURE_RDR_PREEMPT)
1817		preempt_enable();
1818	if (statesold & RCUTORTURE_RDR_SCHED)
1819		rcu_read_unlock_sched();
1820	if (statesold & RCUTORTURE_RDR_BH)
1821		local_bh_enable();
1822	if (statesold & RCUTORTURE_RDR_RBH)
1823		rcu_read_unlock_bh();
1824	if (statesold & RCUTORTURE_RDR_RCU_2) {
1825		cur_ops->readunlock((idxold2 >> RCUTORTURE_RDR_SHIFT_2) & 0x1);
1826		WARN_ON_ONCE(idxnew2 != -1);
1827		idxold2 = 0;
1828	}
1829	if (statesold & RCUTORTURE_RDR_RCU_1) {
1830		bool lockit;
1831
1832		lockit = !cur_ops->no_pi_lock && !statesnew && !(torture_random(trsp) & 0xffff);
1833		if (lockit)
1834			raw_spin_lock_irqsave(&current->pi_lock, flags);
1835		cur_ops->readunlock((idxold1 >> RCUTORTURE_RDR_SHIFT_1) & 0x1);
1836		WARN_ON_ONCE(idxnew1 != -1);
1837		idxold1 = 0;
1838		if (lockit)
1839			raw_spin_unlock_irqrestore(&current->pi_lock, flags);
1840	}
1841
1842	/* Delay if neither beginning nor end and there was a change. */
1843	if ((statesnew || statesold) && *readstate && newstate)
1844		cur_ops->read_delay(trsp, rtrsp);
1845
1846	/* Update the reader state. */
1847	if (idxnew1 == -1)
1848		idxnew1 = idxold1 & RCUTORTURE_RDR_MASK_1;
1849	WARN_ON_ONCE(idxnew1 < 0);
1850	if (WARN_ON_ONCE((idxnew1 >> RCUTORTURE_RDR_SHIFT_1) > 1))
1851		pr_info("Unexpected idxnew1 value of %#x\n", idxnew1);
1852	if (idxnew2 == -1)
1853		idxnew2 = idxold2 & RCUTORTURE_RDR_MASK_2;
1854	WARN_ON_ONCE(idxnew2 < 0);
1855	WARN_ON_ONCE((idxnew2 >> RCUTORTURE_RDR_SHIFT_2) > 1);
1856	*readstate = idxnew1 | idxnew2 | newstate;
1857	WARN_ON_ONCE(*readstate < 0);
1858	if (WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT_2) > 1))
1859		pr_info("Unexpected idxnew2 value of %#x\n", idxnew2);
1860}
1861
1862/* Return the biggest extendables mask given current RCU and boot parameters. */
1863static int rcutorture_extend_mask_max(void)
1864{
1865	int mask;
1866
1867	WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND);
1868	mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables;
1869	mask = mask | RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2;
1870	return mask;
1871}
1872
1873/* Return a random protection state mask, but with at least one bit set. */
1874static int
1875rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp)
1876{
1877	int mask = rcutorture_extend_mask_max();
1878	unsigned long randmask1 = torture_random(trsp) >> 8;
1879	unsigned long randmask2 = randmask1 >> 3;
1880	unsigned long preempts = RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED;
1881	unsigned long preempts_irq = preempts | RCUTORTURE_RDR_IRQ;
1882	unsigned long bhs = RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH;
1883
1884	WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT_1);
1885	/* Mostly only one bit (need preemption!), sometimes lots of bits. */
1886	if (!(randmask1 & 0x7))
1887		mask = mask & randmask2;
1888	else
1889		mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS));
1890
1891	// Can't have nested RCU reader without outer RCU reader.
1892	if (!(mask & RCUTORTURE_RDR_RCU_1) && (mask & RCUTORTURE_RDR_RCU_2)) {
1893		if (oldmask & RCUTORTURE_RDR_RCU_1)
1894			mask &= ~RCUTORTURE_RDR_RCU_2;
1895		else
1896			mask |= RCUTORTURE_RDR_RCU_1;
1897	}
1898
1899	/*
1900	 * Can't enable bh w/irq disabled.
1901	 */
1902	if (mask & RCUTORTURE_RDR_IRQ)
1903		mask |= oldmask & bhs;
1904
1905	/*
1906	 * Ideally these sequences would be detected in debug builds
1907	 * (regardless of RT), but until then don't stop testing
1908	 * them on non-RT.
1909	 */
1910	if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
1911		/* Can't modify BH in atomic context */
1912		if (oldmask & preempts_irq)
1913			mask &= ~bhs;
1914		if ((oldmask | mask) & preempts_irq)
1915			mask |= oldmask & bhs;
1916	}
1917
1918	return mask ?: RCUTORTURE_RDR_RCU_1;
1919}
1920
1921/*
1922 * Do a randomly selected number of extensions of an existing RCU read-side
1923 * critical section.
1924 */
1925static struct rt_read_seg *
1926rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp,
1927		       struct rt_read_seg *rtrsp)
1928{
1929	int i;
1930	int j;
1931	int mask = rcutorture_extend_mask_max();
1932
1933	WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */
1934	if (!((mask - 1) & mask))
1935		return rtrsp;  /* Current RCU reader not extendable. */
1936	/* Bias towards larger numbers of loops. */
1937	i = (torture_random(trsp) >> 3);
1938	i = ((i | (i >> 3)) & RCUTORTURE_RDR_MAX_LOOPS) + 1;
1939	for (j = 0; j < i; j++) {
1940		mask = rcutorture_extend_mask(*readstate, trsp);
1941		rcutorture_one_extend(readstate, mask, trsp, &rtrsp[j]);
1942	}
1943	return &rtrsp[j];
1944}
1945
1946/*
1947 * Do one read-side critical section, returning false if there was
1948 * no data to read.  Can be invoked both from process context and
1949 * from a timer handler.
1950 */
1951static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid)
1952{
1953	bool checkpolling = !(torture_random(trsp) & 0xfff);
1954	unsigned long cookie;
1955	struct rcu_gp_oldstate cookie_full;
1956	int i;
1957	unsigned long started;
1958	unsigned long completed;
1959	int newstate;
1960	struct rcu_torture *p;
1961	int pipe_count;
1962	int readstate = 0;
1963	struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS] = { { 0 } };
1964	struct rt_read_seg *rtrsp = &rtseg[0];
1965	struct rt_read_seg *rtrsp1;
1966	unsigned long long ts;
1967
1968	WARN_ON_ONCE(!rcu_is_watching());
1969	newstate = rcutorture_extend_mask(readstate, trsp);
1970	rcutorture_one_extend(&readstate, newstate, trsp, rtrsp++);
1971	if (checkpolling) {
1972		if (cur_ops->get_gp_state && cur_ops->poll_gp_state)
1973			cookie = cur_ops->get_gp_state();
1974		if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full)
1975			cur_ops->get_gp_state_full(&cookie_full);
1976	}
1977	started = cur_ops->get_gp_seq();
1978	ts = rcu_trace_clock_local();
1979	p = rcu_dereference_check(rcu_torture_current,
1980				  !cur_ops->readlock_held || cur_ops->readlock_held());
1981	if (p == NULL) {
1982		/* Wait for rcu_torture_writer to get underway */
1983		rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
1984		return false;
1985	}
1986	if (p->rtort_mbtest == 0)
1987		atomic_inc(&n_rcu_torture_mberror);
1988	rcu_torture_reader_do_mbchk(myid, p, trsp);
1989	rtrsp = rcutorture_loop_extend(&readstate, trsp, rtrsp);
1990	preempt_disable();
1991	pipe_count = READ_ONCE(p->rtort_pipe_count);
1992	if (pipe_count > RCU_TORTURE_PIPE_LEN) {
1993		/* Should not happen, but... */
 
1994		pipe_count = RCU_TORTURE_PIPE_LEN;
1995	}
1996	completed = cur_ops->get_gp_seq();
1997	if (pipe_count > 1) {
1998		do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu,
1999					  ts, started, completed);
2000		rcu_ftrace_dump(DUMP_ALL);
2001	}
2002	__this_cpu_inc(rcu_torture_count[pipe_count]);
2003	completed = rcutorture_seq_diff(completed, started);
2004	if (completed > RCU_TORTURE_PIPE_LEN) {
2005		/* Should not happen, but... */
2006		completed = RCU_TORTURE_PIPE_LEN;
2007	}
2008	__this_cpu_inc(rcu_torture_batch[completed]);
2009	preempt_enable();
2010	if (checkpolling) {
2011		if (cur_ops->get_gp_state && cur_ops->poll_gp_state)
2012			WARN_ONCE(cur_ops->poll_gp_state(cookie),
2013				  "%s: Cookie check 2 failed %s(%d) %lu->%lu\n",
2014				  __func__,
2015				  rcu_torture_writer_state_getname(),
2016				  rcu_torture_writer_state,
2017				  cookie, cur_ops->get_gp_state());
2018		if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full)
2019			WARN_ONCE(cur_ops->poll_gp_state_full(&cookie_full),
2020				  "%s: Cookie check 6 failed %s(%d) online %*pbl\n",
2021				  __func__,
2022				  rcu_torture_writer_state_getname(),
2023				  rcu_torture_writer_state,
2024				  cpumask_pr_args(cpu_online_mask));
2025	}
2026	rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
2027	WARN_ON_ONCE(readstate);
2028	// This next splat is expected behavior if leakpointer, especially
2029	// for CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels.
2030	WARN_ON_ONCE(leakpointer && READ_ONCE(p->rtort_pipe_count) > 1);
2031
2032	/* If error or close call, record the sequence of reader protections. */
2033	if ((pipe_count > 1 || completed > 1) && !xchg(&err_segs_recorded, 1)) {
2034		i = 0;
2035		for (rtrsp1 = &rtseg[0]; rtrsp1 < rtrsp; rtrsp1++)
2036			err_segs[i++] = *rtrsp1;
2037		rt_read_nsegs = i;
2038	}
2039
2040	return true;
2041}
2042
2043static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand);
2044
2045/*
2046 * RCU torture reader from timer handler.  Dereferences rcu_torture_current,
2047 * incrementing the corresponding element of the pipeline array.  The
2048 * counter in the element should never be greater than 1, otherwise, the
2049 * RCU implementation is broken.
2050 */
2051static void rcu_torture_timer(struct timer_list *unused)
2052{
2053	atomic_long_inc(&n_rcu_torture_timers);
2054	(void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand), -1);
2055
2056	/* Test call_rcu() invocation from interrupt handler. */
2057	if (cur_ops->call) {
2058		struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_NOWAIT);
2059
2060		if (rhp)
2061			cur_ops->call(rhp, rcu_torture_timer_cb);
2062	}
2063}
2064
2065/*
2066 * RCU torture reader kthread.  Repeatedly dereferences rcu_torture_current,
2067 * incrementing the corresponding element of the pipeline array.  The
2068 * counter in the element should never be greater than 1, otherwise, the
2069 * RCU implementation is broken.
2070 */
2071static int
2072rcu_torture_reader(void *arg)
2073{
2074	unsigned long lastsleep = jiffies;
2075	long myid = (long)arg;
2076	int mynumonline = myid;
2077	DEFINE_TORTURE_RANDOM(rand);
2078	struct timer_list t;
2079
2080	VERBOSE_TOROUT_STRING("rcu_torture_reader task started");
2081	set_user_nice(current, MAX_NICE);
2082	if (irqreader && cur_ops->irq_capable)
2083		timer_setup_on_stack(&t, rcu_torture_timer, 0);
2084	tick_dep_set_task(current, TICK_DEP_BIT_RCU);
2085	do {
2086		if (irqreader && cur_ops->irq_capable) {
2087			if (!timer_pending(&t))
2088				mod_timer(&t, jiffies + 1);
2089		}
2090		if (!rcu_torture_one_read(&rand, myid) && !torture_must_stop())
2091			schedule_timeout_interruptible(HZ);
2092		if (time_after(jiffies, lastsleep) && !torture_must_stop()) {
2093			torture_hrtimeout_us(500, 1000, &rand);
2094			lastsleep = jiffies + 10;
2095		}
2096		while (torture_num_online_cpus() < mynumonline && !torture_must_stop())
2097			schedule_timeout_interruptible(HZ / 5);
2098		stutter_wait("rcu_torture_reader");
2099	} while (!torture_must_stop());
2100	if (irqreader && cur_ops->irq_capable) {
2101		del_timer_sync(&t);
2102		destroy_timer_on_stack(&t);
2103	}
2104	tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
2105	torture_kthread_stopping("rcu_torture_reader");
2106	return 0;
2107}
2108
2109/*
2110 * Randomly Toggle CPUs' callback-offload state.  This uses hrtimers to
2111 * increase race probabilities and fuzzes the interval between toggling.
2112 */
2113static int rcu_nocb_toggle(void *arg)
2114{
2115	int cpu;
2116	int maxcpu = -1;
2117	int oldnice = task_nice(current);
2118	long r;
2119	DEFINE_TORTURE_RANDOM(rand);
2120	ktime_t toggle_delay;
2121	unsigned long toggle_fuzz;
2122	ktime_t toggle_interval = ms_to_ktime(nocbs_toggle);
2123
2124	VERBOSE_TOROUT_STRING("rcu_nocb_toggle task started");
2125	while (!rcu_inkernel_boot_has_ended())
2126		schedule_timeout_interruptible(HZ / 10);
2127	for_each_online_cpu(cpu)
2128		maxcpu = cpu;
2129	WARN_ON(maxcpu < 0);
2130	if (toggle_interval > ULONG_MAX)
2131		toggle_fuzz = ULONG_MAX >> 3;
2132	else
2133		toggle_fuzz = toggle_interval >> 3;
2134	if (toggle_fuzz <= 0)
2135		toggle_fuzz = NSEC_PER_USEC;
2136	do {
2137		r = torture_random(&rand);
2138		cpu = (r >> 4) % (maxcpu + 1);
2139		if (r & 0x1) {
2140			rcu_nocb_cpu_offload(cpu);
2141			atomic_long_inc(&n_nocb_offload);
2142		} else {
2143			rcu_nocb_cpu_deoffload(cpu);
2144			atomic_long_inc(&n_nocb_deoffload);
2145		}
2146		toggle_delay = torture_random(&rand) % toggle_fuzz + toggle_interval;
2147		set_current_state(TASK_INTERRUPTIBLE);
2148		schedule_hrtimeout(&toggle_delay, HRTIMER_MODE_REL);
2149		if (stutter_wait("rcu_nocb_toggle"))
2150			sched_set_normal(current, oldnice);
2151	} while (!torture_must_stop());
2152	torture_kthread_stopping("rcu_nocb_toggle");
2153	return 0;
2154}
2155
2156/*
2157 * Print torture statistics.  Caller must ensure that there is only
2158 * one call to this function at a given time!!!  This is normally
2159 * accomplished by relying on the module system to only have one copy
2160 * of the module loaded, and then by giving the rcu_torture_stats
2161 * kthread full control (or the init/cleanup functions when rcu_torture_stats
2162 * thread is not running).
2163 */
2164static void
2165rcu_torture_stats_print(void)
2166{
2167	int cpu;
2168	int i;
2169	long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
2170	long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
2171	struct rcu_torture *rtcp;
2172	static unsigned long rtcv_snap = ULONG_MAX;
2173	static bool splatted;
2174	struct task_struct *wtp;
2175
2176	for_each_possible_cpu(cpu) {
2177		for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
2178			pipesummary[i] += READ_ONCE(per_cpu(rcu_torture_count, cpu)[i]);
2179			batchsummary[i] += READ_ONCE(per_cpu(rcu_torture_batch, cpu)[i]);
2180		}
2181	}
2182	for (i = RCU_TORTURE_PIPE_LEN; i >= 0; i--) {
2183		if (pipesummary[i] != 0)
2184			break;
2185	}
2186
2187	pr_alert("%s%s ", torture_type, TORTURE_FLAG);
2188	rtcp = rcu_access_pointer(rcu_torture_current);
2189	pr_cont("rtc: %p %s: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
2190		rtcp,
2191		rtcp && !rcu_stall_is_suppressed_at_boot() ? "ver" : "VER",
2192		rcu_torture_current_version,
2193		list_empty(&rcu_torture_freelist),
2194		atomic_read(&n_rcu_torture_alloc),
2195		atomic_read(&n_rcu_torture_alloc_fail),
2196		atomic_read(&n_rcu_torture_free));
2197	pr_cont("rtmbe: %d rtmbkf: %d/%d rtbe: %ld rtbke: %ld rtbre: %ld ",
2198		atomic_read(&n_rcu_torture_mberror),
2199		atomic_read(&n_rcu_torture_mbchk_fail), atomic_read(&n_rcu_torture_mbchk_tries),
2200		n_rcu_torture_barrier_error,
2201		n_rcu_torture_boost_ktrerror,
2202		n_rcu_torture_boost_rterror);
2203	pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
2204		n_rcu_torture_boost_failure,
2205		n_rcu_torture_boosts,
2206		atomic_long_read(&n_rcu_torture_timers));
2207	torture_onoff_stats();
2208	pr_cont("barrier: %ld/%ld:%ld ",
2209		data_race(n_barrier_successes),
2210		data_race(n_barrier_attempts),
2211		data_race(n_rcu_torture_barrier_error));
2212	pr_cont("read-exits: %ld ", data_race(n_read_exits)); // Statistic.
2213	pr_cont("nocb-toggles: %ld:%ld\n",
2214		atomic_long_read(&n_nocb_offload), atomic_long_read(&n_nocb_deoffload));
2215
2216	pr_alert("%s%s ", torture_type, TORTURE_FLAG);
2217	if (atomic_read(&n_rcu_torture_mberror) ||
2218	    atomic_read(&n_rcu_torture_mbchk_fail) ||
2219	    n_rcu_torture_barrier_error || n_rcu_torture_boost_ktrerror ||
2220	    n_rcu_torture_boost_rterror || n_rcu_torture_boost_failure ||
2221	    i > 1) {
2222		pr_cont("%s", "!!! ");
2223		atomic_inc(&n_rcu_torture_error);
2224		WARN_ON_ONCE(atomic_read(&n_rcu_torture_mberror));
2225		WARN_ON_ONCE(atomic_read(&n_rcu_torture_mbchk_fail));
2226		WARN_ON_ONCE(n_rcu_torture_barrier_error);  // rcu_barrier()
2227		WARN_ON_ONCE(n_rcu_torture_boost_ktrerror); // no boost kthread
2228		WARN_ON_ONCE(n_rcu_torture_boost_rterror); // can't set RT prio
2229		WARN_ON_ONCE(n_rcu_torture_boost_failure); // boost failed (TIMER_SOFTIRQ RT prio?)
2230		WARN_ON_ONCE(i > 1); // Too-short grace period
2231	}
2232	pr_cont("Reader Pipe: ");
2233	for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
2234		pr_cont(" %ld", pipesummary[i]);
2235	pr_cont("\n");
2236
2237	pr_alert("%s%s ", torture_type, TORTURE_FLAG);
2238	pr_cont("Reader Batch: ");
2239	for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
2240		pr_cont(" %ld", batchsummary[i]);
2241	pr_cont("\n");
2242
2243	pr_alert("%s%s ", torture_type, TORTURE_FLAG);
2244	pr_cont("Free-Block Circulation: ");
2245	for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
2246		pr_cont(" %d", atomic_read(&rcu_torture_wcount[i]));
2247	}
2248	pr_cont("\n");
2249
2250	if (cur_ops->stats)
2251		cur_ops->stats();
2252	if (rtcv_snap == rcu_torture_current_version &&
2253	    rcu_access_pointer(rcu_torture_current) &&
2254	    !rcu_stall_is_suppressed()) {
2255		int __maybe_unused flags = 0;
2256		unsigned long __maybe_unused gp_seq = 0;
2257
2258		rcutorture_get_gp_data(cur_ops->ttype,
2259				       &flags, &gp_seq);
2260		srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp,
2261					&flags, &gp_seq);
2262		wtp = READ_ONCE(writer_task);
2263		pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#x cpu %d\n",
2264			 rcu_torture_writer_state_getname(),
2265			 rcu_torture_writer_state, gp_seq, flags,
2266			 wtp == NULL ? ~0U : wtp->__state,
2267			 wtp == NULL ? -1 : (int)task_cpu(wtp));
2268		if (!splatted && wtp) {
2269			sched_show_task(wtp);
2270			splatted = true;
2271		}
2272		if (cur_ops->gp_kthread_dbg)
2273			cur_ops->gp_kthread_dbg();
2274		rcu_ftrace_dump(DUMP_ALL);
2275	}
2276	rtcv_snap = rcu_torture_current_version;
2277}
2278
2279/*
2280 * Periodically prints torture statistics, if periodic statistics printing
2281 * was specified via the stat_interval module parameter.
2282 */
2283static int
2284rcu_torture_stats(void *arg)
2285{
2286	VERBOSE_TOROUT_STRING("rcu_torture_stats task started");
2287	do {
2288		schedule_timeout_interruptible(stat_interval * HZ);
2289		rcu_torture_stats_print();
2290		torture_shutdown_absorb("rcu_torture_stats");
2291	} while (!torture_must_stop());
2292	torture_kthread_stopping("rcu_torture_stats");
2293	return 0;
2294}
2295
2296/* Test mem_dump_obj() and friends.  */
2297static void rcu_torture_mem_dump_obj(void)
2298{
2299	struct rcu_head *rhp;
2300	struct kmem_cache *kcp;
2301	static int z;
2302
2303	kcp = kmem_cache_create("rcuscale", 136, 8, SLAB_STORE_USER, NULL);
2304	if (WARN_ON_ONCE(!kcp))
2305		return;
2306	rhp = kmem_cache_alloc(kcp, GFP_KERNEL);
2307	if (WARN_ON_ONCE(!rhp)) {
2308		kmem_cache_destroy(kcp);
2309		return;
2310	}
2311	pr_alert("mem_dump_obj() slab test: rcu_torture_stats = %px, &rhp = %px, rhp = %px, &z = %px\n", stats_task, &rhp, rhp, &z);
2312	pr_alert("mem_dump_obj(ZERO_SIZE_PTR):");
2313	mem_dump_obj(ZERO_SIZE_PTR);
2314	pr_alert("mem_dump_obj(NULL):");
2315	mem_dump_obj(NULL);
2316	pr_alert("mem_dump_obj(%px):", &rhp);
2317	mem_dump_obj(&rhp);
2318	pr_alert("mem_dump_obj(%px):", rhp);
2319	mem_dump_obj(rhp);
2320	pr_alert("mem_dump_obj(%px):", &rhp->func);
2321	mem_dump_obj(&rhp->func);
2322	pr_alert("mem_dump_obj(%px):", &z);
2323	mem_dump_obj(&z);
2324	kmem_cache_free(kcp, rhp);
2325	kmem_cache_destroy(kcp);
2326	rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
2327	if (WARN_ON_ONCE(!rhp))
2328		return;
2329	pr_alert("mem_dump_obj() kmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp);
2330	pr_alert("mem_dump_obj(kmalloc %px):", rhp);
2331	mem_dump_obj(rhp);
2332	pr_alert("mem_dump_obj(kmalloc %px):", &rhp->func);
2333	mem_dump_obj(&rhp->func);
2334	kfree(rhp);
2335	rhp = vmalloc(4096);
2336	if (WARN_ON_ONCE(!rhp))
2337		return;
2338	pr_alert("mem_dump_obj() vmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp);
2339	pr_alert("mem_dump_obj(vmalloc %px):", rhp);
2340	mem_dump_obj(rhp);
2341	pr_alert("mem_dump_obj(vmalloc %px):", &rhp->func);
2342	mem_dump_obj(&rhp->func);
2343	vfree(rhp);
2344}
2345
2346static void
2347rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
2348{
2349	pr_alert("%s" TORTURE_FLAG
2350		 "--- %s: nreaders=%d nfakewriters=%d "
2351		 "stat_interval=%d verbose=%d test_no_idle_hz=%d "
2352		 "shuffle_interval=%d stutter=%d irqreader=%d "
2353		 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d "
2354		 "test_boost=%d/%d test_boost_interval=%d "
2355		 "test_boost_duration=%d shutdown_secs=%d "
2356		 "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d "
2357		 "stall_cpu_block=%d "
2358		 "n_barrier_cbs=%d "
2359		 "onoff_interval=%d onoff_holdoff=%d "
2360		 "read_exit_delay=%d read_exit_burst=%d "
2361		 "nocbs_nthreads=%d nocbs_toggle=%d\n",
 
 
2362		 torture_type, tag, nrealreaders, nfakewriters,
2363		 stat_interval, verbose, test_no_idle_hz, shuffle_interval,
2364		 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter,
2365		 test_boost, cur_ops->can_boost,
2366		 test_boost_interval, test_boost_duration, shutdown_secs,
2367		 stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff,
2368		 stall_cpu_block,
2369		 n_barrier_cbs,
2370		 onoff_interval, onoff_holdoff,
2371		 read_exit_delay, read_exit_burst,
2372		 nocbs_nthreads, nocbs_toggle);
 
 
2373}
2374
2375static int rcutorture_booster_cleanup(unsigned int cpu)
2376{
2377	struct task_struct *t;
2378
2379	if (boost_tasks[cpu] == NULL)
2380		return 0;
2381	mutex_lock(&boost_mutex);
2382	t = boost_tasks[cpu];
2383	boost_tasks[cpu] = NULL;
2384	rcu_torture_enable_rt_throttle();
2385	mutex_unlock(&boost_mutex);
2386
2387	/* This must be outside of the mutex, otherwise deadlock! */
2388	torture_stop_kthread(rcu_torture_boost, t);
2389	return 0;
2390}
2391
2392static int rcutorture_booster_init(unsigned int cpu)
2393{
2394	int retval;
2395
2396	if (boost_tasks[cpu] != NULL)
2397		return 0;  /* Already created, nothing more to do. */
2398
2399	// Testing RCU priority boosting requires rcutorture do
2400	// some serious abuse.  Counter this by running ksoftirqd
2401	// at higher priority.
2402	if (IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)) {
2403		struct sched_param sp;
2404		struct task_struct *t;
2405
2406		t = per_cpu(ksoftirqd, cpu);
2407		WARN_ON_ONCE(!t);
2408		sp.sched_priority = 2;
2409		sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
 
 
 
 
 
 
 
 
2410	}
2411
2412	/* Don't allow time recalculation while creating a new task. */
2413	mutex_lock(&boost_mutex);
2414	rcu_torture_disable_rt_throttle();
2415	VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task");
2416	boost_tasks[cpu] = kthread_run_on_cpu(rcu_torture_boost, NULL,
2417					      cpu, "rcu_torture_boost_%u");
2418	if (IS_ERR(boost_tasks[cpu])) {
2419		retval = PTR_ERR(boost_tasks[cpu]);
2420		VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed");
2421		n_rcu_torture_boost_ktrerror++;
2422		boost_tasks[cpu] = NULL;
2423		mutex_unlock(&boost_mutex);
2424		return retval;
2425	}
2426	mutex_unlock(&boost_mutex);
2427	return 0;
2428}
2429
 
 
 
 
 
 
 
 
 
 
2430/*
2431 * CPU-stall kthread.  It waits as specified by stall_cpu_holdoff, then
2432 * induces a CPU stall for the time specified by stall_cpu.
 
2433 */
2434static int rcu_torture_stall(void *args)
2435{
2436	int idx;
2437	unsigned long stop_at;
2438
2439	VERBOSE_TOROUT_STRING("rcu_torture_stall task started");
2440	if (stall_cpu_holdoff > 0) {
2441		VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff");
2442		schedule_timeout_interruptible(stall_cpu_holdoff * HZ);
2443		VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff");
2444	}
2445	if (!kthread_should_stop() && stall_gp_kthread > 0) {
2446		VERBOSE_TOROUT_STRING("rcu_torture_stall begin GP stall");
2447		rcu_gp_set_torture_wait(stall_gp_kthread * HZ);
2448		for (idx = 0; idx < stall_gp_kthread + 2; idx++) {
2449			if (kthread_should_stop())
2450				break;
2451			schedule_timeout_uninterruptible(HZ);
2452		}
2453	}
2454	if (!kthread_should_stop() && stall_cpu > 0) {
2455		VERBOSE_TOROUT_STRING("rcu_torture_stall begin CPU stall");
2456		stop_at = ktime_get_seconds() + stall_cpu;
2457		/* RCU CPU stall is expected behavior in following code. */
2458		idx = cur_ops->readlock();
2459		if (stall_cpu_irqsoff)
2460			local_irq_disable();
2461		else if (!stall_cpu_block)
2462			preempt_disable();
2463		pr_alert("%s start on CPU %d.\n",
2464			  __func__, raw_smp_processor_id());
2465		while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(),
2466				    stop_at))
2467			if (stall_cpu_block) {
2468#ifdef CONFIG_PREEMPTION
2469				preempt_schedule();
2470#else
2471				schedule_timeout_uninterruptible(HZ);
2472#endif
2473			} else if (stall_no_softlockup) {
2474				touch_softlockup_watchdog();
2475			}
2476		if (stall_cpu_irqsoff)
2477			local_irq_enable();
2478		else if (!stall_cpu_block)
2479			preempt_enable();
2480		cur_ops->readunlock(idx);
2481	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2482	pr_alert("%s end.\n", __func__);
 
 
 
 
 
2483	torture_shutdown_absorb("rcu_torture_stall");
2484	while (!kthread_should_stop())
2485		schedule_timeout_interruptible(10 * HZ);
2486	return 0;
2487}
2488
2489/* Spawn CPU-stall kthread, if stall_cpu specified. */
2490static int __init rcu_torture_stall_init(void)
2491{
2492	if (stall_cpu <= 0 && stall_gp_kthread <= 0)
2493		return 0;
2494	return torture_create_kthread(rcu_torture_stall, NULL, stall_task);
2495}
2496
2497/* State structure for forward-progress self-propagating RCU callback. */
2498struct fwd_cb_state {
2499	struct rcu_head rh;
2500	int stop;
2501};
2502
2503/*
2504 * Forward-progress self-propagating RCU callback function.  Because
2505 * callbacks run from softirq, this function is an implicit RCU read-side
2506 * critical section.
2507 */
2508static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp)
2509{
2510	struct fwd_cb_state *fcsp = container_of(rhp, struct fwd_cb_state, rh);
2511
2512	if (READ_ONCE(fcsp->stop)) {
2513		WRITE_ONCE(fcsp->stop, 2);
2514		return;
2515	}
2516	cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb);
2517}
2518
2519/* State for continuous-flood RCU callbacks. */
2520struct rcu_fwd_cb {
2521	struct rcu_head rh;
2522	struct rcu_fwd_cb *rfc_next;
2523	struct rcu_fwd *rfc_rfp;
2524	int rfc_gps;
2525};
2526
2527#define MAX_FWD_CB_JIFFIES	(8 * HZ) /* Maximum CB test duration. */
2528#define MIN_FWD_CB_LAUNDERS	3	/* This many CB invocations to count. */
2529#define MIN_FWD_CBS_LAUNDERED	100	/* Number of counted CBs. */
2530#define FWD_CBS_HIST_DIV	10	/* Histogram buckets/second. */
2531#define N_LAUNDERS_HIST (2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV))
2532
2533struct rcu_launder_hist {
2534	long n_launders;
2535	unsigned long launder_gp_seq;
2536};
2537
2538struct rcu_fwd {
2539	spinlock_t rcu_fwd_lock;
2540	struct rcu_fwd_cb *rcu_fwd_cb_head;
2541	struct rcu_fwd_cb **rcu_fwd_cb_tail;
2542	long n_launders_cb;
2543	unsigned long rcu_fwd_startat;
2544	struct rcu_launder_hist n_launders_hist[N_LAUNDERS_HIST];
2545	unsigned long rcu_launder_gp_seq_start;
2546	int rcu_fwd_id;
2547};
2548
2549static DEFINE_MUTEX(rcu_fwd_mutex);
2550static struct rcu_fwd *rcu_fwds;
2551static unsigned long rcu_fwd_seq;
2552static atomic_long_t rcu_fwd_max_cbs;
2553static bool rcu_fwd_emergency_stop;
2554
2555static void rcu_torture_fwd_cb_hist(struct rcu_fwd *rfp)
2556{
2557	unsigned long gps;
2558	unsigned long gps_old;
2559	int i;
2560	int j;
2561
2562	for (i = ARRAY_SIZE(rfp->n_launders_hist) - 1; i > 0; i--)
2563		if (rfp->n_launders_hist[i].n_launders > 0)
2564			break;
2565	pr_alert("%s: Callback-invocation histogram %d (duration %lu jiffies):",
2566		 __func__, rfp->rcu_fwd_id, jiffies - rfp->rcu_fwd_startat);
2567	gps_old = rfp->rcu_launder_gp_seq_start;
2568	for (j = 0; j <= i; j++) {
2569		gps = rfp->n_launders_hist[j].launder_gp_seq;
2570		pr_cont(" %ds/%d: %ld:%ld",
2571			j + 1, FWD_CBS_HIST_DIV,
2572			rfp->n_launders_hist[j].n_launders,
2573			rcutorture_seq_diff(gps, gps_old));
2574		gps_old = gps;
2575	}
2576	pr_cont("\n");
2577}
2578
2579/* Callback function for continuous-flood RCU callbacks. */
2580static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp)
2581{
2582	unsigned long flags;
2583	int i;
2584	struct rcu_fwd_cb *rfcp = container_of(rhp, struct rcu_fwd_cb, rh);
2585	struct rcu_fwd_cb **rfcpp;
2586	struct rcu_fwd *rfp = rfcp->rfc_rfp;
2587
2588	rfcp->rfc_next = NULL;
2589	rfcp->rfc_gps++;
2590	spin_lock_irqsave(&rfp->rcu_fwd_lock, flags);
2591	rfcpp = rfp->rcu_fwd_cb_tail;
2592	rfp->rcu_fwd_cb_tail = &rfcp->rfc_next;
2593	WRITE_ONCE(*rfcpp, rfcp);
2594	WRITE_ONCE(rfp->n_launders_cb, rfp->n_launders_cb + 1);
2595	i = ((jiffies - rfp->rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV));
2596	if (i >= ARRAY_SIZE(rfp->n_launders_hist))
2597		i = ARRAY_SIZE(rfp->n_launders_hist) - 1;
2598	rfp->n_launders_hist[i].n_launders++;
2599	rfp->n_launders_hist[i].launder_gp_seq = cur_ops->get_gp_seq();
2600	spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
2601}
2602
2603// Give the scheduler a chance, even on nohz_full CPUs.
2604static void rcu_torture_fwd_prog_cond_resched(unsigned long iter)
2605{
2606	if (IS_ENABLED(CONFIG_PREEMPTION) && IS_ENABLED(CONFIG_NO_HZ_FULL)) {
2607		// Real call_rcu() floods hit userspace, so emulate that.
2608		if (need_resched() || (iter & 0xfff))
2609			schedule();
2610		return;
2611	}
2612	// No userspace emulation: CB invocation throttles call_rcu()
2613	cond_resched();
2614}
2615
2616/*
2617 * Free all callbacks on the rcu_fwd_cb_head list, either because the
2618 * test is over or because we hit an OOM event.
2619 */
2620static unsigned long rcu_torture_fwd_prog_cbfree(struct rcu_fwd *rfp)
2621{
2622	unsigned long flags;
2623	unsigned long freed = 0;
2624	struct rcu_fwd_cb *rfcp;
2625
2626	for (;;) {
2627		spin_lock_irqsave(&rfp->rcu_fwd_lock, flags);
2628		rfcp = rfp->rcu_fwd_cb_head;
2629		if (!rfcp) {
2630			spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
2631			break;
2632		}
2633		rfp->rcu_fwd_cb_head = rfcp->rfc_next;
2634		if (!rfp->rcu_fwd_cb_head)
2635			rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head;
2636		spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
2637		kfree(rfcp);
2638		freed++;
2639		rcu_torture_fwd_prog_cond_resched(freed);
2640		if (tick_nohz_full_enabled()) {
2641			local_irq_save(flags);
2642			rcu_momentary_dyntick_idle();
2643			local_irq_restore(flags);
2644		}
2645	}
2646	return freed;
2647}
2648
2649/* Carry out need_resched()/cond_resched() forward-progress testing. */
2650static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp,
2651				    int *tested, int *tested_tries)
2652{
2653	unsigned long cver;
2654	unsigned long dur;
2655	struct fwd_cb_state fcs;
2656	unsigned long gps;
2657	int idx;
2658	int sd;
2659	int sd4;
2660	bool selfpropcb = false;
2661	unsigned long stopat;
2662	static DEFINE_TORTURE_RANDOM(trs);
2663
2664	pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id);
2665	if (!cur_ops->sync)
2666		return; // Cannot do need_resched() forward progress testing without ->sync.
2667	if (cur_ops->call && cur_ops->cb_barrier) {
2668		init_rcu_head_on_stack(&fcs.rh);
2669		selfpropcb = true;
2670	}
2671
2672	/* Tight loop containing cond_resched(). */
2673	atomic_inc(&rcu_fwd_cb_nodelay);
2674	cur_ops->sync(); /* Later readers see above write. */
2675	if  (selfpropcb) {
2676		WRITE_ONCE(fcs.stop, 0);
2677		cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb);
2678	}
2679	cver = READ_ONCE(rcu_torture_current_version);
2680	gps = cur_ops->get_gp_seq();
2681	sd = cur_ops->stall_dur() + 1;
2682	sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div;
2683	dur = sd4 + torture_random(&trs) % (sd - sd4);
2684	WRITE_ONCE(rfp->rcu_fwd_startat, jiffies);
2685	stopat = rfp->rcu_fwd_startat + dur;
2686	while (time_before(jiffies, stopat) &&
2687	       !shutdown_time_arrived() &&
2688	       !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
2689		idx = cur_ops->readlock();
2690		udelay(10);
2691		cur_ops->readunlock(idx);
2692		if (!fwd_progress_need_resched || need_resched())
2693			cond_resched();
2694	}
2695	(*tested_tries)++;
2696	if (!time_before(jiffies, stopat) &&
2697	    !shutdown_time_arrived() &&
2698	    !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
2699		(*tested)++;
2700		cver = READ_ONCE(rcu_torture_current_version) - cver;
2701		gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
2702		WARN_ON(!cver && gps < 2);
2703		pr_alert("%s: %d Duration %ld cver %ld gps %ld\n", __func__,
2704			 rfp->rcu_fwd_id, dur, cver, gps);
2705	}
2706	if (selfpropcb) {
2707		WRITE_ONCE(fcs.stop, 1);
2708		cur_ops->sync(); /* Wait for running CB to complete. */
2709		pr_alert("%s: Waiting for CBs: %pS() %d\n", __func__, cur_ops->cb_barrier, rfp->rcu_fwd_id);
2710		cur_ops->cb_barrier(); /* Wait for queued callbacks. */
2711	}
2712
2713	if (selfpropcb) {
2714		WARN_ON(READ_ONCE(fcs.stop) != 2);
2715		destroy_rcu_head_on_stack(&fcs.rh);
2716	}
2717	schedule_timeout_uninterruptible(HZ / 10); /* Let kthreads recover. */
2718	atomic_dec(&rcu_fwd_cb_nodelay);
2719}
2720
2721/* Carry out call_rcu() forward-progress testing. */
2722static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp)
2723{
2724	unsigned long cver;
2725	unsigned long flags;
2726	unsigned long gps;
2727	int i;
2728	long n_launders;
2729	long n_launders_cb_snap;
2730	long n_launders_sa;
2731	long n_max_cbs;
2732	long n_max_gps;
2733	struct rcu_fwd_cb *rfcp;
2734	struct rcu_fwd_cb *rfcpn;
2735	unsigned long stopat;
2736	unsigned long stoppedat;
2737
2738	pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id);
2739	if (READ_ONCE(rcu_fwd_emergency_stop))
2740		return; /* Get out of the way quickly, no GP wait! */
2741	if (!cur_ops->call)
2742		return; /* Can't do call_rcu() fwd prog without ->call. */
2743
2744	/* Loop continuously posting RCU callbacks. */
2745	atomic_inc(&rcu_fwd_cb_nodelay);
2746	cur_ops->sync(); /* Later readers see above write. */
2747	WRITE_ONCE(rfp->rcu_fwd_startat, jiffies);
2748	stopat = rfp->rcu_fwd_startat + MAX_FWD_CB_JIFFIES;
2749	n_launders = 0;
2750	rfp->n_launders_cb = 0; // Hoist initialization for multi-kthread
2751	n_launders_sa = 0;
2752	n_max_cbs = 0;
2753	n_max_gps = 0;
2754	for (i = 0; i < ARRAY_SIZE(rfp->n_launders_hist); i++)
2755		rfp->n_launders_hist[i].n_launders = 0;
2756	cver = READ_ONCE(rcu_torture_current_version);
2757	gps = cur_ops->get_gp_seq();
2758	rfp->rcu_launder_gp_seq_start = gps;
2759	tick_dep_set_task(current, TICK_DEP_BIT_RCU);
2760	while (time_before(jiffies, stopat) &&
2761	       !shutdown_time_arrived() &&
2762	       !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
2763		rfcp = READ_ONCE(rfp->rcu_fwd_cb_head);
2764		rfcpn = NULL;
2765		if (rfcp)
2766			rfcpn = READ_ONCE(rfcp->rfc_next);
2767		if (rfcpn) {
2768			if (rfcp->rfc_gps >= MIN_FWD_CB_LAUNDERS &&
2769			    ++n_max_gps >= MIN_FWD_CBS_LAUNDERED)
2770				break;
2771			rfp->rcu_fwd_cb_head = rfcpn;
2772			n_launders++;
2773			n_launders_sa++;
2774		} else if (!cur_ops->cbflood_max || cur_ops->cbflood_max > n_max_cbs) {
2775			rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL);
2776			if (WARN_ON_ONCE(!rfcp)) {
2777				schedule_timeout_interruptible(1);
2778				continue;
2779			}
2780			n_max_cbs++;
2781			n_launders_sa = 0;
2782			rfcp->rfc_gps = 0;
2783			rfcp->rfc_rfp = rfp;
2784		} else {
2785			rfcp = NULL;
2786		}
2787		if (rfcp)
2788			cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr);
2789		rcu_torture_fwd_prog_cond_resched(n_launders + n_max_cbs);
2790		if (tick_nohz_full_enabled()) {
2791			local_irq_save(flags);
2792			rcu_momentary_dyntick_idle();
2793			local_irq_restore(flags);
2794		}
2795	}
2796	stoppedat = jiffies;
2797	n_launders_cb_snap = READ_ONCE(rfp->n_launders_cb);
2798	cver = READ_ONCE(rcu_torture_current_version) - cver;
2799	gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
2800	pr_alert("%s: Waiting for CBs: %pS() %d\n", __func__, cur_ops->cb_barrier, rfp->rcu_fwd_id);
2801	cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */
2802	(void)rcu_torture_fwd_prog_cbfree(rfp);
2803
2804	if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop) &&
2805	    !shutdown_time_arrived()) {
2806		WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED);
2807		pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld\n",
 
2808			 __func__,
2809			 stoppedat - rfp->rcu_fwd_startat, jiffies - stoppedat,
2810			 n_launders + n_max_cbs - n_launders_cb_snap,
2811			 n_launders, n_launders_sa,
2812			 n_max_gps, n_max_cbs, cver, gps);
2813		atomic_long_add(n_max_cbs, &rcu_fwd_max_cbs);
2814		mutex_lock(&rcu_fwd_mutex); // Serialize histograms.
2815		rcu_torture_fwd_cb_hist(rfp);
2816		mutex_unlock(&rcu_fwd_mutex);
2817	}
2818	schedule_timeout_uninterruptible(HZ); /* Let CBs drain. */
2819	tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
2820	atomic_dec(&rcu_fwd_cb_nodelay);
2821}
2822
2823
2824/*
2825 * OOM notifier, but this only prints diagnostic information for the
2826 * current forward-progress test.
2827 */
2828static int rcutorture_oom_notify(struct notifier_block *self,
2829				 unsigned long notused, void *nfreed)
2830{
2831	int i;
2832	long ncbs;
2833	struct rcu_fwd *rfp;
2834
2835	mutex_lock(&rcu_fwd_mutex);
2836	rfp = rcu_fwds;
2837	if (!rfp) {
2838		mutex_unlock(&rcu_fwd_mutex);
2839		return NOTIFY_OK;
2840	}
2841	WARN(1, "%s invoked upon OOM during forward-progress testing.\n",
2842	     __func__);
2843	for (i = 0; i < fwd_progress; i++) {
2844		rcu_torture_fwd_cb_hist(&rfp[i]);
2845		rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rfp[i].rcu_fwd_startat)) / 2);
2846	}
2847	WRITE_ONCE(rcu_fwd_emergency_stop, true);
2848	smp_mb(); /* Emergency stop before free and wait to avoid hangs. */
2849	ncbs = 0;
2850	for (i = 0; i < fwd_progress; i++)
2851		ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]);
2852	pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs);
2853	cur_ops->cb_barrier();
2854	ncbs = 0;
2855	for (i = 0; i < fwd_progress; i++)
2856		ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]);
2857	pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs);
2858	cur_ops->cb_barrier();
2859	ncbs = 0;
2860	for (i = 0; i < fwd_progress; i++)
2861		ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]);
2862	pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs);
2863	smp_mb(); /* Frees before return to avoid redoing OOM. */
2864	(*(unsigned long *)nfreed)++; /* Forward progress CBs freed! */
2865	pr_info("%s returning after OOM processing.\n", __func__);
2866	mutex_unlock(&rcu_fwd_mutex);
2867	return NOTIFY_OK;
2868}
2869
2870static struct notifier_block rcutorture_oom_nb = {
2871	.notifier_call = rcutorture_oom_notify
2872};
2873
2874/* Carry out grace-period forward-progress testing. */
2875static int rcu_torture_fwd_prog(void *args)
2876{
2877	bool firsttime = true;
2878	long max_cbs;
2879	int oldnice = task_nice(current);
2880	unsigned long oldseq = READ_ONCE(rcu_fwd_seq);
2881	struct rcu_fwd *rfp = args;
2882	int tested = 0;
2883	int tested_tries = 0;
2884
2885	VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started");
2886	rcu_bind_current_to_nocb();
2887	if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST))
2888		set_user_nice(current, MAX_NICE);
2889	do {
2890		if (!rfp->rcu_fwd_id) {
2891			schedule_timeout_interruptible(fwd_progress_holdoff * HZ);
2892			WRITE_ONCE(rcu_fwd_emergency_stop, false);
2893			if (!firsttime) {
2894				max_cbs = atomic_long_xchg(&rcu_fwd_max_cbs, 0);
2895				pr_alert("%s n_max_cbs: %ld\n", __func__, max_cbs);
2896			}
2897			firsttime = false;
2898			WRITE_ONCE(rcu_fwd_seq, rcu_fwd_seq + 1);
2899		} else {
2900			while (READ_ONCE(rcu_fwd_seq) == oldseq && !torture_must_stop())
2901				schedule_timeout_interruptible(1);
2902			oldseq = READ_ONCE(rcu_fwd_seq);
2903		}
2904		pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id);
2905		if (rcu_inkernel_boot_has_ended() && torture_num_online_cpus() > rfp->rcu_fwd_id)
2906			rcu_torture_fwd_prog_cr(rfp);
2907		if ((cur_ops->stall_dur && cur_ops->stall_dur() > 0) &&
2908		    (!IS_ENABLED(CONFIG_TINY_RCU) ||
2909		     (rcu_inkernel_boot_has_ended() &&
2910		      torture_num_online_cpus() > rfp->rcu_fwd_id)))
2911			rcu_torture_fwd_prog_nr(rfp, &tested, &tested_tries);
2912
2913		/* Avoid slow periods, better to test when busy. */
2914		if (stutter_wait("rcu_torture_fwd_prog"))
2915			sched_set_normal(current, oldnice);
2916	} while (!torture_must_stop());
2917	/* Short runs might not contain a valid forward-progress attempt. */
2918	if (!rfp->rcu_fwd_id) {
2919		WARN_ON(!tested && tested_tries >= 5);
2920		pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries);
2921	}
2922	torture_kthread_stopping("rcu_torture_fwd_prog");
2923	return 0;
2924}
2925
2926/* If forward-progress checking is requested and feasible, spawn the thread. */
2927static int __init rcu_torture_fwd_prog_init(void)
2928{
2929	int i;
2930	int ret = 0;
2931	struct rcu_fwd *rfp;
2932
2933	if (!fwd_progress)
2934		return 0; /* Not requested, so don't do it. */
2935	if (fwd_progress >= nr_cpu_ids) {
2936		VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Limiting fwd_progress to # CPUs.\n");
2937		fwd_progress = nr_cpu_ids;
2938	} else if (fwd_progress < 0) {
2939		fwd_progress = nr_cpu_ids;
2940	}
2941	if ((!cur_ops->sync && !cur_ops->call) ||
2942	    (!cur_ops->cbflood_max && (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0)) ||
2943	    cur_ops == &rcu_busted_ops) {
2944		VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test");
2945		fwd_progress = 0;
2946		return 0;
2947	}
2948	if (stall_cpu > 0) {
2949		VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall testing");
2950		fwd_progress = 0;
2951		if (IS_MODULE(CONFIG_RCU_TORTURE_TEST))
2952			return -EINVAL; /* In module, can fail back to user. */
2953		WARN_ON(1); /* Make sure rcutorture notices conflict. */
2954		return 0;
2955	}
2956	if (fwd_progress_holdoff <= 0)
2957		fwd_progress_holdoff = 1;
2958	if (fwd_progress_div <= 0)
2959		fwd_progress_div = 4;
2960	rfp = kcalloc(fwd_progress, sizeof(*rfp), GFP_KERNEL);
2961	fwd_prog_tasks = kcalloc(fwd_progress, sizeof(*fwd_prog_tasks), GFP_KERNEL);
2962	if (!rfp || !fwd_prog_tasks) {
2963		kfree(rfp);
2964		kfree(fwd_prog_tasks);
2965		fwd_prog_tasks = NULL;
2966		fwd_progress = 0;
2967		return -ENOMEM;
2968	}
2969	for (i = 0; i < fwd_progress; i++) {
2970		spin_lock_init(&rfp[i].rcu_fwd_lock);
2971		rfp[i].rcu_fwd_cb_tail = &rfp[i].rcu_fwd_cb_head;
2972		rfp[i].rcu_fwd_id = i;
2973	}
2974	mutex_lock(&rcu_fwd_mutex);
2975	rcu_fwds = rfp;
2976	mutex_unlock(&rcu_fwd_mutex);
2977	register_oom_notifier(&rcutorture_oom_nb);
2978	for (i = 0; i < fwd_progress; i++) {
2979		ret = torture_create_kthread(rcu_torture_fwd_prog, &rcu_fwds[i], fwd_prog_tasks[i]);
2980		if (ret) {
2981			fwd_progress = i;
2982			return ret;
2983		}
2984	}
2985	return 0;
2986}
2987
2988static void rcu_torture_fwd_prog_cleanup(void)
2989{
2990	int i;
2991	struct rcu_fwd *rfp;
2992
2993	if (!rcu_fwds || !fwd_prog_tasks)
2994		return;
2995	for (i = 0; i < fwd_progress; i++)
2996		torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_tasks[i]);
2997	unregister_oom_notifier(&rcutorture_oom_nb);
2998	mutex_lock(&rcu_fwd_mutex);
2999	rfp = rcu_fwds;
3000	rcu_fwds = NULL;
3001	mutex_unlock(&rcu_fwd_mutex);
3002	kfree(rfp);
3003	kfree(fwd_prog_tasks);
3004	fwd_prog_tasks = NULL;
3005}
3006
3007/* Callback function for RCU barrier testing. */
3008static void rcu_torture_barrier_cbf(struct rcu_head *rcu)
3009{
3010	atomic_inc(&barrier_cbs_invoked);
3011}
3012
3013/* IPI handler to get callback posted on desired CPU, if online. */
3014static void rcu_torture_barrier1cb(void *rcu_void)
3015{
3016	struct rcu_head *rhp = rcu_void;
3017
3018	cur_ops->call(rhp, rcu_torture_barrier_cbf);
 
3019}
3020
3021/* kthread function to register callbacks used to test RCU barriers. */
3022static int rcu_torture_barrier_cbs(void *arg)
3023{
3024	long myid = (long)arg;
3025	bool lastphase = false;
3026	bool newphase;
3027	struct rcu_head rcu;
3028
3029	init_rcu_head_on_stack(&rcu);
3030	VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started");
3031	set_user_nice(current, MAX_NICE);
3032	do {
3033		wait_event(barrier_cbs_wq[myid],
3034			   (newphase =
3035			    smp_load_acquire(&barrier_phase)) != lastphase ||
3036			   torture_must_stop());
3037		lastphase = newphase;
3038		if (torture_must_stop())
3039			break;
3040		/*
3041		 * The above smp_load_acquire() ensures barrier_phase load
3042		 * is ordered before the following ->call().
3043		 */
3044		if (smp_call_function_single(myid, rcu_torture_barrier1cb,
3045					     &rcu, 1)) {
3046			// IPI failed, so use direct call from current CPU.
3047			cur_ops->call(&rcu, rcu_torture_barrier_cbf);
3048		}
3049		if (atomic_dec_and_test(&barrier_cbs_count))
3050			wake_up(&barrier_wq);
3051	} while (!torture_must_stop());
3052	if (cur_ops->cb_barrier != NULL)
3053		cur_ops->cb_barrier();
3054	destroy_rcu_head_on_stack(&rcu);
3055	torture_kthread_stopping("rcu_torture_barrier_cbs");
3056	return 0;
3057}
3058
3059/* kthread function to drive and coordinate RCU barrier testing. */
3060static int rcu_torture_barrier(void *arg)
3061{
3062	int i;
3063
3064	VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting");
3065	do {
3066		atomic_set(&barrier_cbs_invoked, 0);
3067		atomic_set(&barrier_cbs_count, n_barrier_cbs);
3068		/* Ensure barrier_phase ordered after prior assignments. */
3069		smp_store_release(&barrier_phase, !barrier_phase);
3070		for (i = 0; i < n_barrier_cbs; i++)
3071			wake_up(&barrier_cbs_wq[i]);
3072		wait_event(barrier_wq,
3073			   atomic_read(&barrier_cbs_count) == 0 ||
3074			   torture_must_stop());
3075		if (torture_must_stop())
3076			break;
3077		n_barrier_attempts++;
3078		cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */
3079		if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) {
3080			n_rcu_torture_barrier_error++;
3081			pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n",
3082			       atomic_read(&barrier_cbs_invoked),
3083			       n_barrier_cbs);
3084			WARN_ON(1);
3085			// Wait manually for the remaining callbacks
3086			i = 0;
3087			do {
3088				if (WARN_ON(i++ > HZ))
3089					i = INT_MIN;
3090				schedule_timeout_interruptible(1);
3091				cur_ops->cb_barrier();
3092			} while (atomic_read(&barrier_cbs_invoked) !=
3093				 n_barrier_cbs &&
3094				 !torture_must_stop());
3095			smp_mb(); // Can't trust ordering if broken.
3096			if (!torture_must_stop())
3097				pr_err("Recovered: barrier_cbs_invoked = %d\n",
3098				       atomic_read(&barrier_cbs_invoked));
3099		} else {
3100			n_barrier_successes++;
3101		}
3102		schedule_timeout_interruptible(HZ / 10);
3103	} while (!torture_must_stop());
3104	torture_kthread_stopping("rcu_torture_barrier");
3105	return 0;
3106}
3107
3108/* Initialize RCU barrier testing. */
3109static int rcu_torture_barrier_init(void)
3110{
3111	int i;
3112	int ret;
3113
3114	if (n_barrier_cbs <= 0)
3115		return 0;
3116	if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) {
3117		pr_alert("%s" TORTURE_FLAG
3118			 " Call or barrier ops missing for %s,\n",
3119			 torture_type, cur_ops->name);
3120		pr_alert("%s" TORTURE_FLAG
3121			 " RCU barrier testing omitted from run.\n",
3122			 torture_type);
3123		return 0;
3124	}
3125	atomic_set(&barrier_cbs_count, 0);
3126	atomic_set(&barrier_cbs_invoked, 0);
3127	barrier_cbs_tasks =
3128		kcalloc(n_barrier_cbs, sizeof(barrier_cbs_tasks[0]),
3129			GFP_KERNEL);
3130	barrier_cbs_wq =
3131		kcalloc(n_barrier_cbs, sizeof(barrier_cbs_wq[0]), GFP_KERNEL);
3132	if (barrier_cbs_tasks == NULL || !barrier_cbs_wq)
3133		return -ENOMEM;
3134	for (i = 0; i < n_barrier_cbs; i++) {
3135		init_waitqueue_head(&barrier_cbs_wq[i]);
3136		ret = torture_create_kthread(rcu_torture_barrier_cbs,
3137					     (void *)(long)i,
3138					     barrier_cbs_tasks[i]);
3139		if (ret)
3140			return ret;
3141	}
3142	return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task);
3143}
3144
3145/* Clean up after RCU barrier testing. */
3146static void rcu_torture_barrier_cleanup(void)
3147{
3148	int i;
3149
3150	torture_stop_kthread(rcu_torture_barrier, barrier_task);
3151	if (barrier_cbs_tasks != NULL) {
3152		for (i = 0; i < n_barrier_cbs; i++)
3153			torture_stop_kthread(rcu_torture_barrier_cbs,
3154					     barrier_cbs_tasks[i]);
3155		kfree(barrier_cbs_tasks);
3156		barrier_cbs_tasks = NULL;
3157	}
3158	if (barrier_cbs_wq != NULL) {
3159		kfree(barrier_cbs_wq);
3160		barrier_cbs_wq = NULL;
3161	}
3162}
3163
3164static bool rcu_torture_can_boost(void)
3165{
3166	static int boost_warn_once;
3167	int prio;
3168
3169	if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2)
3170		return false;
3171	if (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state)
3172		return false;
3173
3174	prio = rcu_get_gp_kthreads_prio();
3175	if (!prio)
3176		return false;
3177
3178	if (prio < 2) {
3179		if (boost_warn_once == 1)
3180			return false;
3181
3182		pr_alert("%s: WARN: RCU kthread priority too low to test boosting.  Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME);
3183		boost_warn_once = 1;
3184		return false;
3185	}
3186
3187	return true;
3188}
3189
3190static bool read_exit_child_stop;
3191static bool read_exit_child_stopped;
3192static wait_queue_head_t read_exit_wq;
3193
3194// Child kthread which just does an rcutorture reader and exits.
3195static int rcu_torture_read_exit_child(void *trsp_in)
3196{
3197	struct torture_random_state *trsp = trsp_in;
3198
3199	set_user_nice(current, MAX_NICE);
3200	// Minimize time between reading and exiting.
3201	while (!kthread_should_stop())
3202		schedule_timeout_uninterruptible(1);
3203	(void)rcu_torture_one_read(trsp, -1);
3204	return 0;
3205}
3206
3207// Parent kthread which creates and destroys read-exit child kthreads.
3208static int rcu_torture_read_exit(void *unused)
3209{
3210	bool errexit = false;
3211	int i;
3212	struct task_struct *tsp;
3213	DEFINE_TORTURE_RANDOM(trs);
3214
3215	// Allocate and initialize.
3216	set_user_nice(current, MAX_NICE);
3217	VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of test");
3218
3219	// Each pass through this loop does one read-exit episode.
3220	do {
3221		VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of episode");
3222		for (i = 0; i < read_exit_burst; i++) {
3223			if (READ_ONCE(read_exit_child_stop))
3224				break;
3225			stutter_wait("rcu_torture_read_exit");
3226			// Spawn child.
3227			tsp = kthread_run(rcu_torture_read_exit_child,
3228					  &trs, "%s", "rcu_torture_read_exit_child");
3229			if (IS_ERR(tsp)) {
3230				TOROUT_ERRSTRING("out of memory");
3231				errexit = true;
3232				break;
3233			}
3234			cond_resched();
3235			kthread_stop(tsp);
3236			n_read_exits++;
3237		}
3238		VERBOSE_TOROUT_STRING("rcu_torture_read_exit: End of episode");
3239		rcu_barrier(); // Wait for task_struct free, avoid OOM.
3240		i = 0;
3241		for (; !errexit && !READ_ONCE(read_exit_child_stop) && i < read_exit_delay; i++)
3242			schedule_timeout_uninterruptible(HZ);
3243	} while (!errexit && !READ_ONCE(read_exit_child_stop));
3244
3245	// Clean up and exit.
3246	smp_store_release(&read_exit_child_stopped, true); // After reaping.
3247	smp_mb(); // Store before wakeup.
3248	wake_up(&read_exit_wq);
3249	while (!torture_must_stop())
3250		schedule_timeout_uninterruptible(1);
3251	torture_kthread_stopping("rcu_torture_read_exit");
3252	return 0;
3253}
3254
3255static int rcu_torture_read_exit_init(void)
3256{
3257	if (read_exit_burst <= 0)
3258		return 0;
3259	init_waitqueue_head(&read_exit_wq);
3260	read_exit_child_stop = false;
3261	read_exit_child_stopped = false;
3262	return torture_create_kthread(rcu_torture_read_exit, NULL,
3263				      read_exit_task);
3264}
3265
3266static void rcu_torture_read_exit_cleanup(void)
3267{
3268	if (!read_exit_task)
3269		return;
3270	WRITE_ONCE(read_exit_child_stop, true);
3271	smp_mb(); // Above write before wait.
3272	wait_event(read_exit_wq, smp_load_acquire(&read_exit_child_stopped));
3273	torture_stop_kthread(rcutorture_read_exit, read_exit_task);
3274}
3275
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3276static enum cpuhp_state rcutor_hp;
3277
3278static void
3279rcu_torture_cleanup(void)
3280{
3281	int firsttime;
3282	int flags = 0;
3283	unsigned long gp_seq = 0;
3284	int i;
3285
3286	if (torture_cleanup_begin()) {
3287		if (cur_ops->cb_barrier != NULL) {
3288			pr_info("%s: Invoking %pS().\n", __func__, cur_ops->cb_barrier);
3289			cur_ops->cb_barrier();
3290		}
3291		rcu_gp_slow_unregister(NULL);
 
3292		return;
3293	}
3294	if (!cur_ops) {
3295		torture_cleanup_end();
3296		rcu_gp_slow_unregister(NULL);
3297		return;
3298	}
3299
 
 
3300	if (cur_ops->gp_kthread_dbg)
3301		cur_ops->gp_kthread_dbg();
3302	rcu_torture_read_exit_cleanup();
3303	rcu_torture_barrier_cleanup();
3304	rcu_torture_fwd_prog_cleanup();
3305	torture_stop_kthread(rcu_torture_stall, stall_task);
3306	torture_stop_kthread(rcu_torture_writer, writer_task);
3307
3308	if (nocb_tasks) {
3309		for (i = 0; i < nrealnocbers; i++)
3310			torture_stop_kthread(rcu_nocb_toggle, nocb_tasks[i]);
3311		kfree(nocb_tasks);
3312		nocb_tasks = NULL;
3313	}
3314
3315	if (reader_tasks) {
3316		for (i = 0; i < nrealreaders; i++)
3317			torture_stop_kthread(rcu_torture_reader,
3318					     reader_tasks[i]);
3319		kfree(reader_tasks);
3320		reader_tasks = NULL;
3321	}
3322	kfree(rcu_torture_reader_mbchk);
3323	rcu_torture_reader_mbchk = NULL;
3324
3325	if (fakewriter_tasks) {
3326		for (i = 0; i < nfakewriters; i++)
3327			torture_stop_kthread(rcu_torture_fakewriter,
3328					     fakewriter_tasks[i]);
3329		kfree(fakewriter_tasks);
3330		fakewriter_tasks = NULL;
3331	}
3332
3333	rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq);
3334	srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq);
3335	pr_alert("%s:  End-test grace-period state: g%ld f%#x total-gps=%ld\n",
3336		 cur_ops->name, (long)gp_seq, flags,
3337		 rcutorture_seq_diff(gp_seq, start_gp_seq));
3338	torture_stop_kthread(rcu_torture_stats, stats_task);
3339	torture_stop_kthread(rcu_torture_fqs, fqs_task);
3340	if (rcu_torture_can_boost() && rcutor_hp >= 0)
3341		cpuhp_remove_state(rcutor_hp);
3342
3343	/*
3344	 * Wait for all RCU callbacks to fire, then do torture-type-specific
3345	 * cleanup operations.
3346	 */
3347	if (cur_ops->cb_barrier != NULL) {
3348		pr_info("%s: Invoking %pS().\n", __func__, cur_ops->cb_barrier);
3349		cur_ops->cb_barrier();
3350	}
3351	if (cur_ops->cleanup != NULL)
3352		cur_ops->cleanup();
3353
3354	rcu_torture_mem_dump_obj();
3355
3356	rcu_torture_stats_print();  /* -After- the stats thread is stopped! */
3357
3358	if (err_segs_recorded) {
3359		pr_alert("Failure/close-call rcutorture reader segments:\n");
3360		if (rt_read_nsegs == 0)
3361			pr_alert("\t: No segments recorded!!!\n");
3362		firsttime = 1;
3363		for (i = 0; i < rt_read_nsegs; i++) {
3364			pr_alert("\t%d: %#x ", i, err_segs[i].rt_readstate);
3365			if (err_segs[i].rt_delay_jiffies != 0) {
3366				pr_cont("%s%ldjiffies", firsttime ? "" : "+",
3367					err_segs[i].rt_delay_jiffies);
3368				firsttime = 0;
3369			}
3370			if (err_segs[i].rt_delay_ms != 0) {
3371				pr_cont("%s%ldms", firsttime ? "" : "+",
3372					err_segs[i].rt_delay_ms);
3373				firsttime = 0;
3374			}
3375			if (err_segs[i].rt_delay_us != 0) {
3376				pr_cont("%s%ldus", firsttime ? "" : "+",
3377					err_segs[i].rt_delay_us);
3378				firsttime = 0;
3379			}
3380			pr_cont("%s\n",
3381				err_segs[i].rt_preempted ? "preempted" : "");
3382
3383		}
3384	}
3385	if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
3386		rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
3387	else if (torture_onoff_failures())
3388		rcu_torture_print_module_parms(cur_ops,
3389					       "End of test: RCU_HOTPLUG");
3390	else
3391		rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
3392	torture_cleanup_end();
3393	rcu_gp_slow_unregister(&rcu_fwd_cb_nodelay);
 
3394}
3395
3396#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
3397static void rcu_torture_leak_cb(struct rcu_head *rhp)
3398{
3399}
3400
3401static void rcu_torture_err_cb(struct rcu_head *rhp)
3402{
3403	/*
3404	 * This -might- happen due to race conditions, but is unlikely.
3405	 * The scenario that leads to this happening is that the
3406	 * first of the pair of duplicate callbacks is queued,
3407	 * someone else starts a grace period that includes that
3408	 * callback, then the second of the pair must wait for the
3409	 * next grace period.  Unlikely, but can happen.  If it
3410	 * does happen, the debug-objects subsystem won't have splatted.
3411	 */
3412	pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME);
3413}
3414#endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
3415
3416/*
3417 * Verify that double-free causes debug-objects to complain, but only
3418 * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y.  Otherwise, say that the test
3419 * cannot be carried out.
3420 */
3421static void rcu_test_debug_objects(void)
3422{
3423#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
3424	struct rcu_head rh1;
3425	struct rcu_head rh2;
 
 
 
 
 
 
 
 
 
 
 
 
3426	struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
3427
3428	init_rcu_head_on_stack(&rh1);
3429	init_rcu_head_on_stack(&rh2);
3430	pr_alert("%s: WARN: Duplicate call_rcu() test starting.\n", KBUILD_MODNAME);
3431
3432	/* Try to queue the rh2 pair of callbacks for the same grace period. */
3433	preempt_disable(); /* Prevent preemption from interrupting test. */
3434	rcu_read_lock(); /* Make it impossible to finish a grace period. */
3435	call_rcu_hurry(&rh1, rcu_torture_leak_cb); /* Start grace period. */
3436	local_irq_disable(); /* Make it harder to start a new grace period. */
3437	call_rcu_hurry(&rh2, rcu_torture_leak_cb);
3438	call_rcu_hurry(&rh2, rcu_torture_err_cb); /* Duplicate callback. */
3439	if (rhp) {
3440		call_rcu_hurry(rhp, rcu_torture_leak_cb);
3441		call_rcu_hurry(rhp, rcu_torture_err_cb); /* Another duplicate callback. */
3442	}
3443	local_irq_enable();
3444	rcu_read_unlock();
3445	preempt_enable();
3446
3447	/* Wait for them all to get done so we can safely return. */
3448	rcu_barrier();
3449	pr_alert("%s: WARN: Duplicate call_rcu() test complete.\n", KBUILD_MODNAME);
3450	destroy_rcu_head_on_stack(&rh1);
3451	destroy_rcu_head_on_stack(&rh2);
3452	kfree(rhp);
3453#else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
3454	pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n", KBUILD_MODNAME);
3455#endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
3456}
3457
3458static void rcutorture_sync(void)
3459{
3460	static unsigned long n;
3461
3462	if (cur_ops->sync && !(++n & 0xfff))
3463		cur_ops->sync();
3464}
3465
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3466static int __init
3467rcu_torture_init(void)
3468{
3469	long i;
3470	int cpu;
3471	int firsterr = 0;
3472	int flags = 0;
3473	unsigned long gp_seq = 0;
3474	static struct rcu_torture_ops *torture_ops[] = {
3475		&rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops, &busted_srcud_ops,
3476		TASKS_OPS TASKS_RUDE_OPS TASKS_TRACING_OPS
3477		&trivial_ops,
3478	};
3479
3480	if (!torture_init_begin(torture_type, verbose))
3481		return -EBUSY;
3482
3483	/* Process args and tell the world that the torturer is on the job. */
3484	for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
3485		cur_ops = torture_ops[i];
3486		if (strcmp(torture_type, cur_ops->name) == 0)
3487			break;
3488	}
3489	if (i == ARRAY_SIZE(torture_ops)) {
3490		pr_alert("rcu-torture: invalid torture type: \"%s\"\n",
3491			 torture_type);
3492		pr_alert("rcu-torture types:");
3493		for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
3494			pr_cont(" %s", torture_ops[i]->name);
3495		pr_cont("\n");
3496		firsterr = -EINVAL;
3497		cur_ops = NULL;
3498		goto unwind;
3499	}
3500	if (cur_ops->fqs == NULL && fqs_duration != 0) {
3501		pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n");
3502		fqs_duration = 0;
3503	}
 
 
 
 
 
 
3504	if (cur_ops->init)
3505		cur_ops->init();
3506
 
 
3507	if (nreaders >= 0) {
3508		nrealreaders = nreaders;
3509	} else {
3510		nrealreaders = num_online_cpus() - 2 - nreaders;
3511		if (nrealreaders <= 0)
3512			nrealreaders = 1;
3513	}
3514	rcu_torture_print_module_parms(cur_ops, "Start of test");
3515	rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq);
3516	srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq);
3517	start_gp_seq = gp_seq;
3518	pr_alert("%s:  Start-test grace-period state: g%ld f%#x\n",
3519		 cur_ops->name, (long)gp_seq, flags);
3520
3521	/* Set up the freelist. */
3522
3523	INIT_LIST_HEAD(&rcu_torture_freelist);
3524	for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) {
3525		rcu_tortures[i].rtort_mbtest = 0;
3526		list_add_tail(&rcu_tortures[i].rtort_free,
3527			      &rcu_torture_freelist);
3528	}
3529
3530	/* Initialize the statistics so that each run gets its own numbers. */
3531
3532	rcu_torture_current = NULL;
3533	rcu_torture_current_version = 0;
3534	atomic_set(&n_rcu_torture_alloc, 0);
3535	atomic_set(&n_rcu_torture_alloc_fail, 0);
3536	atomic_set(&n_rcu_torture_free, 0);
3537	atomic_set(&n_rcu_torture_mberror, 0);
3538	atomic_set(&n_rcu_torture_mbchk_fail, 0);
3539	atomic_set(&n_rcu_torture_mbchk_tries, 0);
3540	atomic_set(&n_rcu_torture_error, 0);
3541	n_rcu_torture_barrier_error = 0;
3542	n_rcu_torture_boost_ktrerror = 0;
3543	n_rcu_torture_boost_rterror = 0;
3544	n_rcu_torture_boost_failure = 0;
3545	n_rcu_torture_boosts = 0;
3546	for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
3547		atomic_set(&rcu_torture_wcount[i], 0);
3548	for_each_possible_cpu(cpu) {
3549		for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
3550			per_cpu(rcu_torture_count, cpu)[i] = 0;
3551			per_cpu(rcu_torture_batch, cpu)[i] = 0;
3552		}
3553	}
3554	err_segs_recorded = 0;
3555	rt_read_nsegs = 0;
3556
3557	/* Start up the kthreads. */
3558
3559	rcu_torture_write_types();
3560	firsterr = torture_create_kthread(rcu_torture_writer, NULL,
3561					  writer_task);
3562	if (torture_init_error(firsterr))
3563		goto unwind;
3564	if (nfakewriters > 0) {
3565		fakewriter_tasks = kcalloc(nfakewriters,
3566					   sizeof(fakewriter_tasks[0]),
3567					   GFP_KERNEL);
3568		if (fakewriter_tasks == NULL) {
3569			TOROUT_ERRSTRING("out of memory");
3570			firsterr = -ENOMEM;
3571			goto unwind;
3572		}
3573	}
3574	for (i = 0; i < nfakewriters; i++) {
3575		firsterr = torture_create_kthread(rcu_torture_fakewriter,
3576						  NULL, fakewriter_tasks[i]);
3577		if (torture_init_error(firsterr))
3578			goto unwind;
3579	}
3580	reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]),
3581			       GFP_KERNEL);
3582	rcu_torture_reader_mbchk = kcalloc(nrealreaders, sizeof(*rcu_torture_reader_mbchk),
3583					   GFP_KERNEL);
3584	if (!reader_tasks || !rcu_torture_reader_mbchk) {
3585		TOROUT_ERRSTRING("out of memory");
3586		firsterr = -ENOMEM;
3587		goto unwind;
3588	}
3589	for (i = 0; i < nrealreaders; i++) {
3590		rcu_torture_reader_mbchk[i].rtc_chkrdr = -1;
3591		firsterr = torture_create_kthread(rcu_torture_reader, (void *)i,
3592						  reader_tasks[i]);
3593		if (torture_init_error(firsterr))
3594			goto unwind;
3595	}
3596	nrealnocbers = nocbs_nthreads;
3597	if (WARN_ON(nrealnocbers < 0))
3598		nrealnocbers = 1;
3599	if (WARN_ON(nocbs_toggle < 0))
3600		nocbs_toggle = HZ;
3601	if (nrealnocbers > 0) {
3602		nocb_tasks = kcalloc(nrealnocbers, sizeof(nocb_tasks[0]), GFP_KERNEL);
3603		if (nocb_tasks == NULL) {
3604			TOROUT_ERRSTRING("out of memory");
3605			firsterr = -ENOMEM;
3606			goto unwind;
3607		}
3608	} else {
3609		nocb_tasks = NULL;
3610	}
3611	for (i = 0; i < nrealnocbers; i++) {
3612		firsterr = torture_create_kthread(rcu_nocb_toggle, NULL, nocb_tasks[i]);
3613		if (torture_init_error(firsterr))
3614			goto unwind;
3615	}
3616	if (stat_interval > 0) {
3617		firsterr = torture_create_kthread(rcu_torture_stats, NULL,
3618						  stats_task);
3619		if (torture_init_error(firsterr))
3620			goto unwind;
3621	}
3622	if (test_no_idle_hz && shuffle_interval > 0) {
3623		firsterr = torture_shuffle_init(shuffle_interval * HZ);
3624		if (torture_init_error(firsterr))
3625			goto unwind;
3626	}
3627	if (stutter < 0)
3628		stutter = 0;
3629	if (stutter) {
3630		int t;
3631
3632		t = cur_ops->stall_dur ? cur_ops->stall_dur() : stutter * HZ;
3633		firsterr = torture_stutter_init(stutter * HZ, t);
3634		if (torture_init_error(firsterr))
3635			goto unwind;
3636	}
3637	if (fqs_duration < 0)
3638		fqs_duration = 0;
3639	if (fqs_duration) {
 
 
3640		/* Create the fqs thread */
3641		firsterr = torture_create_kthread(rcu_torture_fqs, NULL,
3642						  fqs_task);
3643		if (torture_init_error(firsterr))
3644			goto unwind;
3645	}
3646	if (test_boost_interval < 1)
3647		test_boost_interval = 1;
3648	if (test_boost_duration < 2)
3649		test_boost_duration = 2;
3650	if (rcu_torture_can_boost()) {
3651
3652		boost_starttime = jiffies + test_boost_interval * HZ;
3653
3654		firsterr = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "RCU_TORTURE",
3655					     rcutorture_booster_init,
3656					     rcutorture_booster_cleanup);
3657		rcutor_hp = firsterr;
3658		if (torture_init_error(firsterr))
3659			goto unwind;
3660	}
3661	shutdown_jiffies = jiffies + shutdown_secs * HZ;
3662	firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup);
3663	if (torture_init_error(firsterr))
3664		goto unwind;
3665	firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval,
3666				      rcutorture_sync);
3667	if (torture_init_error(firsterr))
3668		goto unwind;
3669	firsterr = rcu_torture_stall_init();
3670	if (torture_init_error(firsterr))
3671		goto unwind;
3672	firsterr = rcu_torture_fwd_prog_init();
3673	if (torture_init_error(firsterr))
3674		goto unwind;
3675	firsterr = rcu_torture_barrier_init();
3676	if (torture_init_error(firsterr))
3677		goto unwind;
3678	firsterr = rcu_torture_read_exit_init();
3679	if (torture_init_error(firsterr))
3680		goto unwind;
3681	if (object_debug)
3682		rcu_test_debug_objects();
3683	torture_init_end();
3684	rcu_gp_slow_register(&rcu_fwd_cb_nodelay);
 
3685	return 0;
3686
3687unwind:
3688	torture_init_end();
3689	rcu_torture_cleanup();
3690	if (shutdown_secs) {
3691		WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST));
3692		kernel_power_off();
3693	}
3694	return firsterr;
3695}
3696
3697module_init(rcu_torture_init);
3698module_exit(rcu_torture_cleanup);
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * Read-Copy Update module-based torture test facility
   4 *
   5 * Copyright (C) IBM Corporation, 2005, 2006
   6 *
   7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
   8 *	  Josh Triplett <josh@joshtriplett.org>
   9 *
  10 * See also:  Documentation/RCU/torture.rst
  11 */
  12
  13#define pr_fmt(fmt) fmt
  14
  15#include <linux/types.h>
  16#include <linux/kernel.h>
  17#include <linux/init.h>
  18#include <linux/module.h>
  19#include <linux/kthread.h>
  20#include <linux/err.h>
  21#include <linux/spinlock.h>
  22#include <linux/smp.h>
  23#include <linux/rcupdate_wait.h>
  24#include <linux/rcu_notifier.h>
  25#include <linux/interrupt.h>
  26#include <linux/sched/signal.h>
  27#include <uapi/linux/sched/types.h>
  28#include <linux/atomic.h>
  29#include <linux/bitops.h>
  30#include <linux/completion.h>
  31#include <linux/moduleparam.h>
  32#include <linux/percpu.h>
  33#include <linux/notifier.h>
  34#include <linux/reboot.h>
  35#include <linux/freezer.h>
  36#include <linux/cpu.h>
  37#include <linux/delay.h>
  38#include <linux/stat.h>
  39#include <linux/srcu.h>
  40#include <linux/slab.h>
  41#include <linux/trace_clock.h>
  42#include <asm/byteorder.h>
  43#include <linux/torture.h>
  44#include <linux/vmalloc.h>
  45#include <linux/sched/debug.h>
  46#include <linux/sched/sysctl.h>
  47#include <linux/oom.h>
  48#include <linux/tick.h>
  49#include <linux/rcupdate_trace.h>
  50#include <linux/nmi.h>
  51
  52#include "rcu.h"
  53
  54MODULE_DESCRIPTION("Read-Copy Update module-based torture test facility");
  55MODULE_LICENSE("GPL");
  56MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@joshtriplett.org>");
  57
  58/* Bits for ->extendables field, extendables param, and related definitions. */
  59#define RCUTORTURE_RDR_SHIFT_1	 8	/* Put SRCU index in upper bits. */
  60#define RCUTORTURE_RDR_MASK_1	 (0xff << RCUTORTURE_RDR_SHIFT_1)
  61#define RCUTORTURE_RDR_SHIFT_2	 16	/* Put SRCU index in upper bits. */
  62#define RCUTORTURE_RDR_MASK_2	 (0xff << RCUTORTURE_RDR_SHIFT_2)
  63#define RCUTORTURE_RDR_BH	 0x01	/* Extend readers by disabling bh. */
  64#define RCUTORTURE_RDR_IRQ	 0x02	/*  ... disabling interrupts. */
  65#define RCUTORTURE_RDR_PREEMPT	 0x04	/*  ... disabling preemption. */
  66#define RCUTORTURE_RDR_RBH	 0x08	/*  ... rcu_read_lock_bh(). */
  67#define RCUTORTURE_RDR_SCHED	 0x10	/*  ... rcu_read_lock_sched(). */
  68#define RCUTORTURE_RDR_RCU_1	 0x20	/*  ... entering another RCU reader. */
  69#define RCUTORTURE_RDR_RCU_2	 0x40	/*  ... entering another RCU reader. */
  70#define RCUTORTURE_RDR_NBITS	 7	/* Number of bits defined above. */
  71#define RCUTORTURE_MAX_EXTEND	 \
  72	(RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \
  73	 RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED)
  74#define RCUTORTURE_RDR_ALLBITS	\
  75	(RCUTORTURE_MAX_EXTEND | RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2 | \
  76	 RCUTORTURE_RDR_MASK_1 | RCUTORTURE_RDR_MASK_2)
  77#define RCUTORTURE_RDR_MAX_LOOPS 0x7	/* Maximum reader extensions. */
  78					/* Must be power of two minus one. */
  79#define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3)
  80
  81torture_param(int, extendables, RCUTORTURE_MAX_EXTEND,
  82	      "Extend readers by disabling bh (1), irqs (2), or preempt (4)");
  83torture_param(int, fqs_duration, 0, "Duration of fqs bursts (us), 0 to disable");
  84torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)");
  85torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)");
  86torture_param(int, fwd_progress, 1, "Number of grace-period forward progress tasks (0 to disable)");
  87torture_param(int, fwd_progress_div, 4, "Fraction of CPU stall to wait");
  88torture_param(int, fwd_progress_holdoff, 60, "Time between forward-progress tests (s)");
  89torture_param(bool, fwd_progress_need_resched, 1, "Hide cond_resched() behind need_resched()");
  90torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives");
  91torture_param(bool, gp_cond_exp, false, "Use conditional/async expedited GP wait primitives");
  92torture_param(bool, gp_cond_full, false, "Use conditional/async full-state GP wait primitives");
  93torture_param(bool, gp_cond_exp_full, false,
  94		    "Use conditional/async full-stateexpedited GP wait primitives");
  95torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
  96torture_param(bool, gp_normal, false, "Use normal (non-expedited) GP wait primitives");
  97torture_param(bool, gp_poll, false, "Use polling GP wait primitives");
  98torture_param(bool, gp_poll_exp, false, "Use polling expedited GP wait primitives");
  99torture_param(bool, gp_poll_full, false, "Use polling full-state GP wait primitives");
 100torture_param(bool, gp_poll_exp_full, false, "Use polling full-state expedited GP wait primitives");
 101torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives");
 102torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers");
 103torture_param(int, leakpointer, 0, "Leak pointer dereferences from readers");
 104torture_param(int, n_barrier_cbs, 0, "# of callbacks/kthreads for barrier testing");
 105torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads");
 106torture_param(int, nreaders, -1, "Number of RCU reader threads");
 107torture_param(int, object_debug, 0, "Enable debug-object double call_rcu() testing");
 108torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
 109torture_param(int, onoff_interval, 0, "Time between CPU hotplugs (jiffies), 0=disable");
 110torture_param(int, nocbs_nthreads, 0, "Number of NOCB toggle threads, 0 to disable");
 111torture_param(int, nocbs_toggle, 1000, "Time between toggling nocb state (ms)");
 112torture_param(int, read_exit_delay, 13, "Delay between read-then-exit episodes (s)");
 113torture_param(int, read_exit_burst, 16, "# of read-then-exit bursts per episode, zero to disable");
 114torture_param(int, reader_flavor, 0x1, "Reader flavors to use, one per bit.");
 115torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles");
 116torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable.");
 117torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable.");
 118torture_param(int, stall_cpu_holdoff, 10, "Time to wait before starting stall (s).");
 119torture_param(bool, stall_no_softlockup, false, "Avoid softlockup warning during cpu stall.");
 120torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling.");
 121torture_param(int, stall_cpu_block, 0, "Sleep while stalling.");
 122torture_param(int, stall_cpu_repeat, 0, "Number of additional stalls after the first one.");
 123torture_param(int, stall_gp_kthread, 0, "Grace-period kthread stall duration (s).");
 124torture_param(int, stat_interval, 60, "Number of seconds between stats printk()s");
 125torture_param(int, stutter, 5, "Number of seconds to run/halt test");
 126torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
 127torture_param(int, test_boost_duration, 4, "Duration of each boost test, seconds.");
 128torture_param(int, test_boost_interval, 7, "Interval between boost tests, seconds.");
 129torture_param(int, test_nmis, 0, "End-test NMI tests, 0 to disable.");
 130torture_param(bool, test_no_idle_hz, true, "Test support for tickless idle CPUs");
 131torture_param(int, test_srcu_lockdep, 0, "Test specified SRCU deadlock scenario.");
 132torture_param(int, verbose, 1, "Enable verbose debugging printk()s");
 133
 134static char *torture_type = "rcu";
 135module_param(torture_type, charp, 0444);
 136MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)");
 137
 138static int nrealnocbers;
 139static int nrealreaders;
 140static struct task_struct *writer_task;
 141static struct task_struct **fakewriter_tasks;
 142static struct task_struct **reader_tasks;
 143static struct task_struct **nocb_tasks;
 144static struct task_struct *stats_task;
 145static struct task_struct *fqs_task;
 146static struct task_struct *boost_tasks[NR_CPUS];
 147static struct task_struct *stall_task;
 148static struct task_struct **fwd_prog_tasks;
 149static struct task_struct **barrier_cbs_tasks;
 150static struct task_struct *barrier_task;
 151static struct task_struct *read_exit_task;
 152
 153#define RCU_TORTURE_PIPE_LEN 10
 154
 155// Mailbox-like structure to check RCU global memory ordering.
 156struct rcu_torture_reader_check {
 157	unsigned long rtc_myloops;
 158	int rtc_chkrdr;
 159	unsigned long rtc_chkloops;
 160	int rtc_ready;
 161	struct rcu_torture_reader_check *rtc_assigner;
 162} ____cacheline_internodealigned_in_smp;
 163
 164// Update-side data structure used to check RCU readers.
 165struct rcu_torture {
 166	struct rcu_head rtort_rcu;
 167	int rtort_pipe_count;
 168	struct list_head rtort_free;
 169	int rtort_mbtest;
 170	struct rcu_torture_reader_check *rtort_chkp;
 171};
 172
 173static LIST_HEAD(rcu_torture_freelist);
 174static struct rcu_torture __rcu *rcu_torture_current;
 175static unsigned long rcu_torture_current_version;
 176static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
 177static DEFINE_SPINLOCK(rcu_torture_lock);
 178static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count);
 179static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch);
 180static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
 181static struct rcu_torture_reader_check *rcu_torture_reader_mbchk;
 182static atomic_t n_rcu_torture_alloc;
 183static atomic_t n_rcu_torture_alloc_fail;
 184static atomic_t n_rcu_torture_free;
 185static atomic_t n_rcu_torture_mberror;
 186static atomic_t n_rcu_torture_mbchk_fail;
 187static atomic_t n_rcu_torture_mbchk_tries;
 188static atomic_t n_rcu_torture_error;
 189static long n_rcu_torture_barrier_error;
 190static long n_rcu_torture_boost_ktrerror;
 
 191static long n_rcu_torture_boost_failure;
 192static long n_rcu_torture_boosts;
 193static atomic_long_t n_rcu_torture_timers;
 194static long n_barrier_attempts;
 195static long n_barrier_successes; /* did rcu_barrier test succeed? */
 196static unsigned long n_read_exits;
 197static struct list_head rcu_torture_removed;
 198static unsigned long shutdown_jiffies;
 199static unsigned long start_gp_seq;
 200static atomic_long_t n_nocb_offload;
 201static atomic_long_t n_nocb_deoffload;
 202
 203static int rcu_torture_writer_state;
 204#define RTWS_FIXED_DELAY	0
 205#define RTWS_DELAY		1
 206#define RTWS_REPLACE		2
 207#define RTWS_DEF_FREE		3
 208#define RTWS_EXP_SYNC		4
 209#define RTWS_COND_GET		5
 210#define RTWS_COND_GET_FULL	6
 211#define RTWS_COND_GET_EXP	7
 212#define RTWS_COND_GET_EXP_FULL	8
 213#define RTWS_COND_SYNC		9
 214#define RTWS_COND_SYNC_FULL	10
 215#define RTWS_COND_SYNC_EXP	11
 216#define RTWS_COND_SYNC_EXP_FULL	12
 217#define RTWS_POLL_GET		13
 218#define RTWS_POLL_GET_FULL	14
 219#define RTWS_POLL_GET_EXP	15
 220#define RTWS_POLL_GET_EXP_FULL	16
 221#define RTWS_POLL_WAIT		17
 222#define RTWS_POLL_WAIT_FULL	18
 223#define RTWS_POLL_WAIT_EXP	19
 224#define RTWS_POLL_WAIT_EXP_FULL	20
 225#define RTWS_SYNC		21
 226#define RTWS_STUTTER		22
 227#define RTWS_STOPPING		23
 228static const char * const rcu_torture_writer_state_names[] = {
 229	"RTWS_FIXED_DELAY",
 230	"RTWS_DELAY",
 231	"RTWS_REPLACE",
 232	"RTWS_DEF_FREE",
 233	"RTWS_EXP_SYNC",
 234	"RTWS_COND_GET",
 235	"RTWS_COND_GET_FULL",
 236	"RTWS_COND_GET_EXP",
 237	"RTWS_COND_GET_EXP_FULL",
 238	"RTWS_COND_SYNC",
 239	"RTWS_COND_SYNC_FULL",
 240	"RTWS_COND_SYNC_EXP",
 241	"RTWS_COND_SYNC_EXP_FULL",
 242	"RTWS_POLL_GET",
 243	"RTWS_POLL_GET_FULL",
 244	"RTWS_POLL_GET_EXP",
 245	"RTWS_POLL_GET_EXP_FULL",
 246	"RTWS_POLL_WAIT",
 247	"RTWS_POLL_WAIT_FULL",
 248	"RTWS_POLL_WAIT_EXP",
 249	"RTWS_POLL_WAIT_EXP_FULL",
 250	"RTWS_SYNC",
 251	"RTWS_STUTTER",
 252	"RTWS_STOPPING",
 253};
 254
 255/* Record reader segment types and duration for first failing read. */
 256struct rt_read_seg {
 257	int rt_readstate;
 258	unsigned long rt_delay_jiffies;
 259	unsigned long rt_delay_ms;
 260	unsigned long rt_delay_us;
 261	bool rt_preempted;
 262};
 263static int err_segs_recorded;
 264static struct rt_read_seg err_segs[RCUTORTURE_RDR_MAX_SEGS];
 265static int rt_read_nsegs;
 266
 267static const char *rcu_torture_writer_state_getname(void)
 268{
 269	unsigned int i = READ_ONCE(rcu_torture_writer_state);
 270
 271	if (i >= ARRAY_SIZE(rcu_torture_writer_state_names))
 272		return "???";
 273	return rcu_torture_writer_state_names[i];
 274}
 275
 276#ifdef CONFIG_RCU_TRACE
 277static u64 notrace rcu_trace_clock_local(void)
 278{
 279	u64 ts = trace_clock_local();
 280
 281	(void)do_div(ts, NSEC_PER_USEC);
 282	return ts;
 283}
 284#else /* #ifdef CONFIG_RCU_TRACE */
 285static u64 notrace rcu_trace_clock_local(void)
 286{
 287	return 0ULL;
 288}
 289#endif /* #else #ifdef CONFIG_RCU_TRACE */
 290
 291/*
 292 * Stop aggressive CPU-hog tests a bit before the end of the test in order
 293 * to avoid interfering with test shutdown.
 294 */
 295static bool shutdown_time_arrived(void)
 296{
 297	return shutdown_secs && time_after(jiffies, shutdown_jiffies - 30 * HZ);
 298}
 299
 300static unsigned long boost_starttime;	/* jiffies of next boost test start. */
 301static DEFINE_MUTEX(boost_mutex);	/* protect setting boost_starttime */
 302					/*  and boost task create/destroy. */
 303static atomic_t barrier_cbs_count;	/* Barrier callbacks registered. */
 304static bool barrier_phase;		/* Test phase. */
 305static atomic_t barrier_cbs_invoked;	/* Barrier callbacks invoked. */
 306static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */
 307static DECLARE_WAIT_QUEUE_HEAD(barrier_wq);
 308
 309static atomic_t rcu_fwd_cb_nodelay;	/* Short rcu_torture_delay() delays. */
 310
 311/*
 312 * Allocate an element from the rcu_tortures pool.
 313 */
 314static struct rcu_torture *
 315rcu_torture_alloc(void)
 316{
 317	struct list_head *p;
 318
 319	spin_lock_bh(&rcu_torture_lock);
 320	if (list_empty(&rcu_torture_freelist)) {
 321		atomic_inc(&n_rcu_torture_alloc_fail);
 322		spin_unlock_bh(&rcu_torture_lock);
 323		return NULL;
 324	}
 325	atomic_inc(&n_rcu_torture_alloc);
 326	p = rcu_torture_freelist.next;
 327	list_del_init(p);
 328	spin_unlock_bh(&rcu_torture_lock);
 329	return container_of(p, struct rcu_torture, rtort_free);
 330}
 331
 332/*
 333 * Free an element to the rcu_tortures pool.
 334 */
 335static void
 336rcu_torture_free(struct rcu_torture *p)
 337{
 338	atomic_inc(&n_rcu_torture_free);
 339	spin_lock_bh(&rcu_torture_lock);
 340	list_add_tail(&p->rtort_free, &rcu_torture_freelist);
 341	spin_unlock_bh(&rcu_torture_lock);
 342}
 343
 344/*
 345 * Operations vector for selecting different types of tests.
 346 */
 347
 348struct rcu_torture_ops {
 349	int ttype;
 350	void (*init)(void);
 351	void (*cleanup)(void);
 352	int (*readlock)(void);
 353	void (*read_delay)(struct torture_random_state *rrsp,
 354			   struct rt_read_seg *rtrsp);
 355	void (*readunlock)(int idx);
 356	int (*readlock_held)(void);
 357	unsigned long (*get_gp_seq)(void);
 358	unsigned long (*gp_diff)(unsigned long new, unsigned long old);
 359	void (*deferred_free)(struct rcu_torture *p);
 360	void (*sync)(void);
 361	void (*exp_sync)(void);
 362	unsigned long (*get_gp_state_exp)(void);
 363	unsigned long (*start_gp_poll_exp)(void);
 364	void (*start_gp_poll_exp_full)(struct rcu_gp_oldstate *rgosp);
 365	bool (*poll_gp_state_exp)(unsigned long oldstate);
 366	void (*cond_sync_exp)(unsigned long oldstate);
 367	void (*cond_sync_exp_full)(struct rcu_gp_oldstate *rgosp);
 368	unsigned long (*get_comp_state)(void);
 369	void (*get_comp_state_full)(struct rcu_gp_oldstate *rgosp);
 370	bool (*same_gp_state)(unsigned long oldstate1, unsigned long oldstate2);
 371	bool (*same_gp_state_full)(struct rcu_gp_oldstate *rgosp1, struct rcu_gp_oldstate *rgosp2);
 372	unsigned long (*get_gp_state)(void);
 373	void (*get_gp_state_full)(struct rcu_gp_oldstate *rgosp);
 
 
 374	unsigned long (*start_gp_poll)(void);
 375	void (*start_gp_poll_full)(struct rcu_gp_oldstate *rgosp);
 376	bool (*poll_gp_state)(unsigned long oldstate);
 377	bool (*poll_gp_state_full)(struct rcu_gp_oldstate *rgosp);
 378	bool (*poll_need_2gp)(bool poll, bool poll_full);
 379	void (*cond_sync)(unsigned long oldstate);
 380	void (*cond_sync_full)(struct rcu_gp_oldstate *rgosp);
 381	int poll_active;
 382	int poll_active_full;
 383	call_rcu_func_t call;
 384	void (*cb_barrier)(void);
 385	void (*fqs)(void);
 386	void (*stats)(void);
 387	void (*gp_kthread_dbg)(void);
 388	bool (*check_boost_failed)(unsigned long gp_state, int *cpup);
 389	int (*stall_dur)(void);
 390	void (*get_gp_data)(int *flags, unsigned long *gp_seq);
 391	void (*gp_slow_register)(atomic_t *rgssp);
 392	void (*gp_slow_unregister)(atomic_t *rgssp);
 393	long cbflood_max;
 394	int irq_capable;
 395	int can_boost;
 396	int extendables;
 397	int slow_gps;
 398	int no_pi_lock;
 399	int debug_objects;
 400	int start_poll_irqsoff;
 401	const char *name;
 402};
 403
 404static struct rcu_torture_ops *cur_ops;
 405
 406/*
 407 * Definitions for rcu torture testing.
 408 */
 409
 410static int torture_readlock_not_held(void)
 411{
 412	return rcu_read_lock_bh_held() || rcu_read_lock_sched_held();
 413}
 414
 415static int rcu_torture_read_lock(void)
 416{
 417	rcu_read_lock();
 418	return 0;
 419}
 420
 421static void
 422rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
 423{
 424	unsigned long started;
 425	unsigned long completed;
 426	const unsigned long shortdelay_us = 200;
 427	unsigned long longdelay_ms = 300;
 428	unsigned long long ts;
 429
 430	/* We want a short delay sometimes to make a reader delay the grace
 431	 * period, and we want a long delay occasionally to trigger
 432	 * force_quiescent_state. */
 433
 434	if (!atomic_read(&rcu_fwd_cb_nodelay) &&
 435	    !(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) {
 436		started = cur_ops->get_gp_seq();
 437		ts = rcu_trace_clock_local();
 438		if (preempt_count() & (SOFTIRQ_MASK | HARDIRQ_MASK))
 439			longdelay_ms = 5; /* Avoid triggering BH limits. */
 440		mdelay(longdelay_ms);
 441		rtrsp->rt_delay_ms = longdelay_ms;
 442		completed = cur_ops->get_gp_seq();
 443		do_trace_rcu_torture_read(cur_ops->name, NULL, ts,
 444					  started, completed);
 445	}
 446	if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) {
 447		udelay(shortdelay_us);
 448		rtrsp->rt_delay_us = shortdelay_us;
 449	}
 450	if (!preempt_count() &&
 451	    !(torture_random(rrsp) % (nrealreaders * 500))) {
 452		torture_preempt_schedule();  /* QS only if preemptible. */
 453		rtrsp->rt_preempted = true;
 454	}
 455}
 456
 457static void rcu_torture_read_unlock(int idx)
 458{
 459	rcu_read_unlock();
 460}
 461
 462/*
 463 * Update callback in the pipe.  This should be invoked after a grace period.
 464 */
 465static bool
 466rcu_torture_pipe_update_one(struct rcu_torture *rp)
 467{
 468	int i;
 469	struct rcu_torture_reader_check *rtrcp = READ_ONCE(rp->rtort_chkp);
 470
 471	if (rtrcp) {
 472		WRITE_ONCE(rp->rtort_chkp, NULL);
 473		smp_store_release(&rtrcp->rtc_ready, 1); // Pair with smp_load_acquire().
 474	}
 475	i = rp->rtort_pipe_count;
 476	if (i > RCU_TORTURE_PIPE_LEN)
 477		i = RCU_TORTURE_PIPE_LEN;
 478	atomic_inc(&rcu_torture_wcount[i]);
 479	WRITE_ONCE(rp->rtort_pipe_count, i + 1);
 480	ASSERT_EXCLUSIVE_WRITER(rp->rtort_pipe_count);
 481	if (i + 1 >= RCU_TORTURE_PIPE_LEN) {
 482		rp->rtort_mbtest = 0;
 483		return true;
 484	}
 485	return false;
 486}
 487
 488/*
 489 * Update all callbacks in the pipe.  Suitable for synchronous grace-period
 490 * primitives.
 491 */
 492static void
 493rcu_torture_pipe_update(struct rcu_torture *old_rp)
 494{
 495	struct rcu_torture *rp;
 496	struct rcu_torture *rp1;
 497
 498	if (old_rp)
 499		list_add(&old_rp->rtort_free, &rcu_torture_removed);
 500	list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) {
 501		if (rcu_torture_pipe_update_one(rp)) {
 502			list_del(&rp->rtort_free);
 503			rcu_torture_free(rp);
 504		}
 505	}
 506}
 507
 508static void
 509rcu_torture_cb(struct rcu_head *p)
 510{
 511	struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
 512
 513	if (torture_must_stop_irq()) {
 514		/* Test is ending, just drop callbacks on the floor. */
 515		/* The next initialization will pick up the pieces. */
 516		return;
 517	}
 518	if (rcu_torture_pipe_update_one(rp))
 519		rcu_torture_free(rp);
 520	else
 521		cur_ops->deferred_free(rp);
 522}
 523
 524static unsigned long rcu_no_completed(void)
 525{
 526	return 0;
 527}
 528
 529static void rcu_torture_deferred_free(struct rcu_torture *p)
 530{
 531	call_rcu_hurry(&p->rtort_rcu, rcu_torture_cb);
 532}
 533
 534static void rcu_sync_torture_init(void)
 535{
 536	INIT_LIST_HEAD(&rcu_torture_removed);
 537}
 538
 539static bool rcu_poll_need_2gp(bool poll, bool poll_full)
 540{
 541	return poll;
 542}
 543
 544static struct rcu_torture_ops rcu_ops = {
 545	.ttype			= RCU_FLAVOR,
 546	.init			= rcu_sync_torture_init,
 547	.readlock		= rcu_torture_read_lock,
 548	.read_delay		= rcu_read_delay,
 549	.readunlock		= rcu_torture_read_unlock,
 550	.readlock_held		= torture_readlock_not_held,
 551	.get_gp_seq		= rcu_get_gp_seq,
 552	.gp_diff		= rcu_seq_diff,
 553	.deferred_free		= rcu_torture_deferred_free,
 554	.sync			= synchronize_rcu,
 555	.exp_sync		= synchronize_rcu_expedited,
 556	.same_gp_state		= same_state_synchronize_rcu,
 557	.same_gp_state_full	= same_state_synchronize_rcu_full,
 558	.get_comp_state		= get_completed_synchronize_rcu,
 559	.get_comp_state_full	= get_completed_synchronize_rcu_full,
 560	.get_gp_state		= get_state_synchronize_rcu,
 561	.get_gp_state_full	= get_state_synchronize_rcu_full,
 
 
 562	.start_gp_poll		= start_poll_synchronize_rcu,
 563	.start_gp_poll_full	= start_poll_synchronize_rcu_full,
 564	.poll_gp_state		= poll_state_synchronize_rcu,
 565	.poll_gp_state_full	= poll_state_synchronize_rcu_full,
 566	.poll_need_2gp		= rcu_poll_need_2gp,
 567	.cond_sync		= cond_synchronize_rcu,
 568	.cond_sync_full		= cond_synchronize_rcu_full,
 569	.poll_active		= NUM_ACTIVE_RCU_POLL_OLDSTATE,
 570	.poll_active_full	= NUM_ACTIVE_RCU_POLL_FULL_OLDSTATE,
 571	.get_gp_state_exp	= get_state_synchronize_rcu,
 572	.start_gp_poll_exp	= start_poll_synchronize_rcu_expedited,
 573	.start_gp_poll_exp_full	= start_poll_synchronize_rcu_expedited_full,
 574	.poll_gp_state_exp	= poll_state_synchronize_rcu,
 575	.cond_sync_exp		= cond_synchronize_rcu_expedited,
 576	.call			= call_rcu_hurry,
 577	.cb_barrier		= rcu_barrier,
 578	.fqs			= rcu_force_quiescent_state,
 
 579	.gp_kthread_dbg		= show_rcu_gp_kthreads,
 580	.check_boost_failed	= rcu_check_boost_fail,
 581	.stall_dur		= rcu_jiffies_till_stall_check,
 582	.get_gp_data		= rcutorture_get_gp_data,
 583	.gp_slow_register	= rcu_gp_slow_register,
 584	.gp_slow_unregister	= rcu_gp_slow_unregister,
 585	.irq_capable		= 1,
 586	.can_boost		= IS_ENABLED(CONFIG_RCU_BOOST),
 587	.extendables		= RCUTORTURE_MAX_EXTEND,
 588	.debug_objects		= 1,
 589	.start_poll_irqsoff	= 1,
 590	.name			= "rcu"
 591};
 592
 593/*
 594 * Don't even think about trying any of these in real life!!!
 595 * The names includes "busted", and they really means it!
 596 * The only purpose of these functions is to provide a buggy RCU
 597 * implementation to make sure that rcutorture correctly emits
 598 * buggy-RCU error messages.
 599 */
 600static void rcu_busted_torture_deferred_free(struct rcu_torture *p)
 601{
 602	/* This is a deliberate bug for testing purposes only! */
 603	rcu_torture_cb(&p->rtort_rcu);
 604}
 605
 606static void synchronize_rcu_busted(void)
 607{
 608	/* This is a deliberate bug for testing purposes only! */
 609}
 610
 611static void
 612call_rcu_busted(struct rcu_head *head, rcu_callback_t func)
 613{
 614	/* This is a deliberate bug for testing purposes only! */
 615	func(head);
 616}
 617
 618static struct rcu_torture_ops rcu_busted_ops = {
 619	.ttype		= INVALID_RCU_FLAVOR,
 620	.init		= rcu_sync_torture_init,
 621	.readlock	= rcu_torture_read_lock,
 622	.read_delay	= rcu_read_delay,  /* just reuse rcu's version. */
 623	.readunlock	= rcu_torture_read_unlock,
 624	.readlock_held	= torture_readlock_not_held,
 625	.get_gp_seq	= rcu_no_completed,
 626	.deferred_free	= rcu_busted_torture_deferred_free,
 627	.sync		= synchronize_rcu_busted,
 628	.exp_sync	= synchronize_rcu_busted,
 629	.call		= call_rcu_busted,
 
 
 
 630	.irq_capable	= 1,
 631	.name		= "busted"
 632};
 633
 634/*
 635 * Definitions for srcu torture testing.
 636 */
 637
 638DEFINE_STATIC_SRCU(srcu_ctl);
 639static struct srcu_struct srcu_ctld;
 640static struct srcu_struct *srcu_ctlp = &srcu_ctl;
 641static struct rcu_torture_ops srcud_ops;
 642
 643static void srcu_get_gp_data(int *flags, unsigned long *gp_seq)
 644{
 645	srcutorture_get_gp_data(srcu_ctlp, flags, gp_seq);
 646}
 647
 648static int srcu_torture_read_lock(void)
 649{
 650	int idx;
 651	int ret = 0;
 652
 653	if ((reader_flavor & 0x1) || !(reader_flavor & 0x7)) {
 654		idx = srcu_read_lock(srcu_ctlp);
 655		WARN_ON_ONCE(idx & ~0x1);
 656		ret += idx;
 657	}
 658	if (reader_flavor & 0x2) {
 659		idx = srcu_read_lock_nmisafe(srcu_ctlp);
 660		WARN_ON_ONCE(idx & ~0x1);
 661		ret += idx << 1;
 662	}
 663	if (reader_flavor & 0x4) {
 664		idx = srcu_read_lock_lite(srcu_ctlp);
 665		WARN_ON_ONCE(idx & ~0x1);
 666		ret += idx << 2;
 667	}
 668	return ret;
 669}
 670
 671static void
 672srcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
 673{
 674	long delay;
 675	const long uspertick = 1000000 / HZ;
 676	const long longdelay = 10;
 677
 678	/* We want there to be long-running readers, but not all the time. */
 679
 680	delay = torture_random(rrsp) %
 681		(nrealreaders * 2 * longdelay * uspertick);
 682	if (!delay && in_task()) {
 683		schedule_timeout_interruptible(longdelay);
 684		rtrsp->rt_delay_jiffies = longdelay;
 685	} else {
 686		rcu_read_delay(rrsp, rtrsp);
 687	}
 688}
 689
 690static void srcu_torture_read_unlock(int idx)
 691{
 692	WARN_ON_ONCE((reader_flavor && (idx & ~reader_flavor)) || (!reader_flavor && (idx & ~0x1)));
 693	if (reader_flavor & 0x4)
 694		srcu_read_unlock_lite(srcu_ctlp, (idx & 0x4) >> 2);
 695	if (reader_flavor & 0x2)
 696		srcu_read_unlock_nmisafe(srcu_ctlp, (idx & 0x2) >> 1);
 697	if ((reader_flavor & 0x1) || !(reader_flavor & 0x7))
 698		srcu_read_unlock(srcu_ctlp, idx & 0x1);
 699}
 700
 701static int torture_srcu_read_lock_held(void)
 702{
 703	return srcu_read_lock_held(srcu_ctlp);
 704}
 705
 706static unsigned long srcu_torture_completed(void)
 707{
 708	return srcu_batches_completed(srcu_ctlp);
 709}
 710
 711static void srcu_torture_deferred_free(struct rcu_torture *rp)
 712{
 713	call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb);
 714}
 715
 716static void srcu_torture_synchronize(void)
 717{
 718	synchronize_srcu(srcu_ctlp);
 719}
 720
 721static unsigned long srcu_torture_get_gp_state(void)
 722{
 723	return get_state_synchronize_srcu(srcu_ctlp);
 724}
 725
 726static unsigned long srcu_torture_start_gp_poll(void)
 727{
 728	return start_poll_synchronize_srcu(srcu_ctlp);
 729}
 730
 731static bool srcu_torture_poll_gp_state(unsigned long oldstate)
 732{
 733	return poll_state_synchronize_srcu(srcu_ctlp, oldstate);
 734}
 735
 736static void srcu_torture_call(struct rcu_head *head,
 737			      rcu_callback_t func)
 738{
 739	call_srcu(srcu_ctlp, head, func);
 740}
 741
 742static void srcu_torture_barrier(void)
 743{
 744	srcu_barrier(srcu_ctlp);
 745}
 746
 747static void srcu_torture_stats(void)
 748{
 749	srcu_torture_stats_print(srcu_ctlp, torture_type, TORTURE_FLAG);
 750}
 751
 752static void srcu_torture_synchronize_expedited(void)
 753{
 754	synchronize_srcu_expedited(srcu_ctlp);
 755}
 756
 757static struct rcu_torture_ops srcu_ops = {
 758	.ttype		= SRCU_FLAVOR,
 759	.init		= rcu_sync_torture_init,
 760	.readlock	= srcu_torture_read_lock,
 761	.read_delay	= srcu_read_delay,
 762	.readunlock	= srcu_torture_read_unlock,
 763	.readlock_held	= torture_srcu_read_lock_held,
 764	.get_gp_seq	= srcu_torture_completed,
 765	.deferred_free	= srcu_torture_deferred_free,
 766	.sync		= srcu_torture_synchronize,
 767	.exp_sync	= srcu_torture_synchronize_expedited,
 768	.same_gp_state	= same_state_synchronize_srcu,
 769	.get_comp_state = get_completed_synchronize_srcu,
 770	.get_gp_state	= srcu_torture_get_gp_state,
 771	.start_gp_poll	= srcu_torture_start_gp_poll,
 772	.poll_gp_state	= srcu_torture_poll_gp_state,
 773	.poll_active	= NUM_ACTIVE_SRCU_POLL_OLDSTATE,
 774	.call		= srcu_torture_call,
 775	.cb_barrier	= srcu_torture_barrier,
 776	.stats		= srcu_torture_stats,
 777	.get_gp_data	= srcu_get_gp_data,
 778	.cbflood_max	= 50000,
 779	.irq_capable	= 1,
 780	.no_pi_lock	= IS_ENABLED(CONFIG_TINY_SRCU),
 781	.debug_objects	= 1,
 782	.name		= "srcu"
 783};
 784
 785static void srcu_torture_init(void)
 786{
 787	rcu_sync_torture_init();
 788	WARN_ON(init_srcu_struct(&srcu_ctld));
 789	srcu_ctlp = &srcu_ctld;
 790}
 791
 792static void srcu_torture_cleanup(void)
 793{
 794	cleanup_srcu_struct(&srcu_ctld);
 795	srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */
 796}
 797
 798/* As above, but dynamically allocated. */
 799static struct rcu_torture_ops srcud_ops = {
 800	.ttype		= SRCU_FLAVOR,
 801	.init		= srcu_torture_init,
 802	.cleanup	= srcu_torture_cleanup,
 803	.readlock	= srcu_torture_read_lock,
 804	.read_delay	= srcu_read_delay,
 805	.readunlock	= srcu_torture_read_unlock,
 806	.readlock_held	= torture_srcu_read_lock_held,
 807	.get_gp_seq	= srcu_torture_completed,
 808	.deferred_free	= srcu_torture_deferred_free,
 809	.sync		= srcu_torture_synchronize,
 810	.exp_sync	= srcu_torture_synchronize_expedited,
 811	.same_gp_state	= same_state_synchronize_srcu,
 812	.get_comp_state = get_completed_synchronize_srcu,
 813	.get_gp_state	= srcu_torture_get_gp_state,
 814	.start_gp_poll	= srcu_torture_start_gp_poll,
 815	.poll_gp_state	= srcu_torture_poll_gp_state,
 816	.poll_active	= NUM_ACTIVE_SRCU_POLL_OLDSTATE,
 817	.call		= srcu_torture_call,
 818	.cb_barrier	= srcu_torture_barrier,
 819	.stats		= srcu_torture_stats,
 820	.get_gp_data	= srcu_get_gp_data,
 821	.cbflood_max	= 50000,
 822	.irq_capable	= 1,
 823	.no_pi_lock	= IS_ENABLED(CONFIG_TINY_SRCU),
 824	.debug_objects	= 1,
 825	.name		= "srcud"
 826};
 827
 828/* As above, but broken due to inappropriate reader extension. */
 829static struct rcu_torture_ops busted_srcud_ops = {
 830	.ttype		= SRCU_FLAVOR,
 831	.init		= srcu_torture_init,
 832	.cleanup	= srcu_torture_cleanup,
 833	.readlock	= srcu_torture_read_lock,
 834	.read_delay	= rcu_read_delay,
 835	.readunlock	= srcu_torture_read_unlock,
 836	.readlock_held	= torture_srcu_read_lock_held,
 837	.get_gp_seq	= srcu_torture_completed,
 838	.deferred_free	= srcu_torture_deferred_free,
 839	.sync		= srcu_torture_synchronize,
 840	.exp_sync	= srcu_torture_synchronize_expedited,
 841	.call		= srcu_torture_call,
 842	.cb_barrier	= srcu_torture_barrier,
 843	.stats		= srcu_torture_stats,
 844	.irq_capable	= 1,
 845	.no_pi_lock	= IS_ENABLED(CONFIG_TINY_SRCU),
 846	.extendables	= RCUTORTURE_MAX_EXTEND,
 847	.name		= "busted_srcud"
 848};
 849
 850/*
 851 * Definitions for trivial CONFIG_PREEMPT=n-only torture testing.
 852 * This implementation does not necessarily work well with CPU hotplug.
 853 */
 854
 855static void synchronize_rcu_trivial(void)
 856{
 857	int cpu;
 858
 859	for_each_online_cpu(cpu) {
 860		torture_sched_setaffinity(current->pid, cpumask_of(cpu));
 861		WARN_ON_ONCE(raw_smp_processor_id() != cpu);
 862	}
 863}
 864
 865static int rcu_torture_read_lock_trivial(void)
 866{
 867	preempt_disable();
 868	return 0;
 869}
 870
 871static void rcu_torture_read_unlock_trivial(int idx)
 872{
 873	preempt_enable();
 874}
 875
 876static struct rcu_torture_ops trivial_ops = {
 877	.ttype		= RCU_TRIVIAL_FLAVOR,
 878	.init		= rcu_sync_torture_init,
 879	.readlock	= rcu_torture_read_lock_trivial,
 880	.read_delay	= rcu_read_delay,  /* just reuse rcu's version. */
 881	.readunlock	= rcu_torture_read_unlock_trivial,
 882	.readlock_held	= torture_readlock_not_held,
 883	.get_gp_seq	= rcu_no_completed,
 884	.sync		= synchronize_rcu_trivial,
 885	.exp_sync	= synchronize_rcu_trivial,
 
 
 886	.irq_capable	= 1,
 887	.name		= "trivial"
 888};
 889
 890#ifdef CONFIG_TASKS_RCU
 891
 892/*
 893 * Definitions for RCU-tasks torture testing.
 894 */
 895
 896static int tasks_torture_read_lock(void)
 897{
 898	return 0;
 899}
 900
 901static void tasks_torture_read_unlock(int idx)
 902{
 903}
 904
 905static void rcu_tasks_torture_deferred_free(struct rcu_torture *p)
 906{
 907	call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb);
 908}
 909
 910static void synchronize_rcu_mult_test(void)
 911{
 912	synchronize_rcu_mult(call_rcu_tasks, call_rcu_hurry);
 913}
 914
 915static struct rcu_torture_ops tasks_ops = {
 916	.ttype		= RCU_TASKS_FLAVOR,
 917	.init		= rcu_sync_torture_init,
 918	.readlock	= tasks_torture_read_lock,
 919	.read_delay	= rcu_read_delay,  /* just reuse rcu's version. */
 920	.readunlock	= tasks_torture_read_unlock,
 921	.get_gp_seq	= rcu_no_completed,
 922	.deferred_free	= rcu_tasks_torture_deferred_free,
 923	.sync		= synchronize_rcu_tasks,
 924	.exp_sync	= synchronize_rcu_mult_test,
 925	.call		= call_rcu_tasks,
 926	.cb_barrier	= rcu_barrier_tasks,
 927	.gp_kthread_dbg	= show_rcu_tasks_classic_gp_kthread,
 928	.get_gp_data	= rcu_tasks_get_gp_data,
 
 929	.irq_capable	= 1,
 930	.slow_gps	= 1,
 931	.name		= "tasks"
 932};
 933
 934#define TASKS_OPS &tasks_ops,
 935
 936#else // #ifdef CONFIG_TASKS_RCU
 937
 938#define TASKS_OPS
 939
 940#endif // #else #ifdef CONFIG_TASKS_RCU
 941
 942
 943#ifdef CONFIG_TASKS_RUDE_RCU
 944
 945/*
 946 * Definitions for rude RCU-tasks torture testing.
 947 */
 948
 
 
 
 
 
 949static struct rcu_torture_ops tasks_rude_ops = {
 950	.ttype		= RCU_TASKS_RUDE_FLAVOR,
 951	.init		= rcu_sync_torture_init,
 952	.readlock	= rcu_torture_read_lock_trivial,
 953	.read_delay	= rcu_read_delay,  /* just reuse rcu's version. */
 954	.readunlock	= rcu_torture_read_unlock_trivial,
 955	.get_gp_seq	= rcu_no_completed,
 
 956	.sync		= synchronize_rcu_tasks_rude,
 957	.exp_sync	= synchronize_rcu_tasks_rude,
 
 
 958	.gp_kthread_dbg	= show_rcu_tasks_rude_gp_kthread,
 959	.get_gp_data	= rcu_tasks_rude_get_gp_data,
 960	.cbflood_max	= 50000,
 
 
 961	.irq_capable	= 1,
 962	.name		= "tasks-rude"
 963};
 964
 965#define TASKS_RUDE_OPS &tasks_rude_ops,
 966
 967#else // #ifdef CONFIG_TASKS_RUDE_RCU
 968
 969#define TASKS_RUDE_OPS
 970
 971#endif // #else #ifdef CONFIG_TASKS_RUDE_RCU
 972
 973
 974#ifdef CONFIG_TASKS_TRACE_RCU
 975
 976/*
 977 * Definitions for tracing RCU-tasks torture testing.
 978 */
 979
 980static int tasks_tracing_torture_read_lock(void)
 981{
 982	rcu_read_lock_trace();
 983	return 0;
 984}
 985
 986static void tasks_tracing_torture_read_unlock(int idx)
 987{
 988	rcu_read_unlock_trace();
 989}
 990
 991static void rcu_tasks_tracing_torture_deferred_free(struct rcu_torture *p)
 992{
 993	call_rcu_tasks_trace(&p->rtort_rcu, rcu_torture_cb);
 994}
 995
 996static struct rcu_torture_ops tasks_tracing_ops = {
 997	.ttype		= RCU_TASKS_TRACING_FLAVOR,
 998	.init		= rcu_sync_torture_init,
 999	.readlock	= tasks_tracing_torture_read_lock,
1000	.read_delay	= srcu_read_delay,  /* just reuse srcu's version. */
1001	.readunlock	= tasks_tracing_torture_read_unlock,
1002	.readlock_held	= rcu_read_lock_trace_held,
1003	.get_gp_seq	= rcu_no_completed,
1004	.deferred_free	= rcu_tasks_tracing_torture_deferred_free,
1005	.sync		= synchronize_rcu_tasks_trace,
1006	.exp_sync	= synchronize_rcu_tasks_trace,
1007	.call		= call_rcu_tasks_trace,
1008	.cb_barrier	= rcu_barrier_tasks_trace,
1009	.gp_kthread_dbg	= show_rcu_tasks_trace_gp_kthread,
1010	.get_gp_data    = rcu_tasks_trace_get_gp_data,
1011	.cbflood_max	= 50000,
 
 
1012	.irq_capable	= 1,
1013	.slow_gps	= 1,
1014	.name		= "tasks-tracing"
1015};
1016
1017#define TASKS_TRACING_OPS &tasks_tracing_ops,
1018
1019#else // #ifdef CONFIG_TASKS_TRACE_RCU
1020
1021#define TASKS_TRACING_OPS
1022
1023#endif // #else #ifdef CONFIG_TASKS_TRACE_RCU
1024
1025
1026static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old)
1027{
1028	if (!cur_ops->gp_diff)
1029		return new - old;
1030	return cur_ops->gp_diff(new, old);
1031}
1032
1033/*
1034 * RCU torture priority-boost testing.  Runs one real-time thread per
1035 * CPU for moderate bursts, repeatedly starting grace periods and waiting
1036 * for them to complete.  If a given grace period takes too long, we assume
1037 * that priority inversion has occurred.
1038 */
1039
1040static int old_rt_runtime = -1;
1041
1042static void rcu_torture_disable_rt_throttle(void)
1043{
1044	/*
1045	 * Disable RT throttling so that rcutorture's boost threads don't get
1046	 * throttled. Only possible if rcutorture is built-in otherwise the
1047	 * user should manually do this by setting the sched_rt_period_us and
1048	 * sched_rt_runtime sysctls.
1049	 */
1050	if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1)
1051		return;
1052
1053	old_rt_runtime = sysctl_sched_rt_runtime;
1054	sysctl_sched_rt_runtime = -1;
1055}
1056
1057static void rcu_torture_enable_rt_throttle(void)
1058{
1059	if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1)
1060		return;
1061
1062	sysctl_sched_rt_runtime = old_rt_runtime;
1063	old_rt_runtime = -1;
1064}
1065
1066static bool rcu_torture_boost_failed(unsigned long gp_state, unsigned long *start)
1067{
1068	int cpu;
1069	static int dbg_done;
1070	unsigned long end = jiffies;
1071	bool gp_done;
1072	unsigned long j;
1073	static unsigned long last_persist;
1074	unsigned long lp;
1075	unsigned long mininterval = test_boost_duration * HZ - HZ / 2;
1076
1077	if (end - *start > mininterval) {
1078		// Recheck after checking time to avoid false positives.
1079		smp_mb(); // Time check before grace-period check.
1080		if (cur_ops->poll_gp_state(gp_state))
1081			return false; // passed, though perhaps just barely
1082		if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, &cpu)) {
1083			// At most one persisted message per boost test.
1084			j = jiffies;
1085			lp = READ_ONCE(last_persist);
1086			if (time_after(j, lp + mininterval) &&
1087			    cmpxchg(&last_persist, lp, j) == lp) {
1088				if (cpu < 0)
1089					pr_info("Boost inversion persisted: QS from all CPUs\n");
1090				else
1091					pr_info("Boost inversion persisted: No QS from CPU %d\n", cpu);
1092			}
1093			return false; // passed on a technicality
1094		}
1095		VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed");
1096		n_rcu_torture_boost_failure++;
1097		if (!xchg(&dbg_done, 1) && cur_ops->gp_kthread_dbg) {
1098			pr_info("Boost inversion thread ->rt_priority %u gp_state %lu jiffies %lu\n",
1099				current->rt_priority, gp_state, end - *start);
1100			cur_ops->gp_kthread_dbg();
1101			// Recheck after print to flag grace period ending during splat.
1102			gp_done = cur_ops->poll_gp_state(gp_state);
1103			pr_info("Boost inversion: GP %lu %s.\n", gp_state,
1104				gp_done ? "ended already" : "still pending");
1105
1106		}
1107
1108		return true; // failed
1109	} else if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, NULL)) {
1110		*start = jiffies;
1111	}
1112
1113	return false; // passed
1114}
1115
1116static int rcu_torture_boost(void *arg)
1117{
1118	unsigned long endtime;
1119	unsigned long gp_state;
1120	unsigned long gp_state_time;
1121	unsigned long oldstarttime;
1122
1123	VERBOSE_TOROUT_STRING("rcu_torture_boost started");
1124
1125	/* Set real-time priority. */
1126	sched_set_fifo_low(current);
1127
1128	/* Each pass through the following loop does one boost-test cycle. */
1129	do {
1130		bool failed = false; // Test failed already in this test interval
1131		bool gp_initiated = false;
1132
1133		if (kthread_should_stop())
1134			goto checkwait;
1135
1136		/* Wait for the next test interval. */
1137		oldstarttime = READ_ONCE(boost_starttime);
1138		while (time_before(jiffies, oldstarttime)) {
1139			schedule_timeout_interruptible(oldstarttime - jiffies);
1140			if (stutter_wait("rcu_torture_boost"))
1141				sched_set_fifo_low(current);
1142			if (torture_must_stop())
1143				goto checkwait;
1144		}
1145
1146		// Do one boost-test interval.
1147		endtime = oldstarttime + test_boost_duration * HZ;
1148		while (time_before(jiffies, endtime)) {
1149			// Has current GP gone too long?
1150			if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state))
1151				failed = rcu_torture_boost_failed(gp_state, &gp_state_time);
1152			// If we don't have a grace period in flight, start one.
1153			if (!gp_initiated || cur_ops->poll_gp_state(gp_state)) {
1154				gp_state = cur_ops->start_gp_poll();
1155				gp_initiated = true;
1156				gp_state_time = jiffies;
1157			}
1158			if (stutter_wait("rcu_torture_boost")) {
1159				sched_set_fifo_low(current);
1160				// If the grace period already ended,
1161				// we don't know when that happened, so
1162				// start over.
1163				if (cur_ops->poll_gp_state(gp_state))
1164					gp_initiated = false;
1165			}
1166			if (torture_must_stop())
1167				goto checkwait;
1168		}
1169
1170		// In case the grace period extended beyond the end of the loop.
1171		if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state))
1172			rcu_torture_boost_failed(gp_state, &gp_state_time);
1173
1174		/*
1175		 * Set the start time of the next test interval.
1176		 * Yes, this is vulnerable to long delays, but such
1177		 * delays simply cause a false negative for the next
1178		 * interval.  Besides, we are running at RT priority,
1179		 * so delays should be relatively rare.
1180		 */
1181		while (oldstarttime == READ_ONCE(boost_starttime) && !kthread_should_stop()) {
1182			if (mutex_trylock(&boost_mutex)) {
1183				if (oldstarttime == boost_starttime) {
1184					WRITE_ONCE(boost_starttime,
1185						   jiffies + test_boost_interval * HZ);
1186					n_rcu_torture_boosts++;
1187				}
1188				mutex_unlock(&boost_mutex);
1189				break;
1190			}
1191			schedule_timeout_uninterruptible(HZ / 20);
1192		}
1193
1194		/* Go do the stutter. */
1195checkwait:	if (stutter_wait("rcu_torture_boost"))
1196			sched_set_fifo_low(current);
1197	} while (!torture_must_stop());
1198
1199	/* Clean up and exit. */
1200	while (!kthread_should_stop()) {
1201		torture_shutdown_absorb("rcu_torture_boost");
1202		schedule_timeout_uninterruptible(HZ / 20);
1203	}
1204	torture_kthread_stopping("rcu_torture_boost");
1205	return 0;
1206}
1207
1208/*
1209 * RCU torture force-quiescent-state kthread.  Repeatedly induces
1210 * bursts of calls to force_quiescent_state(), increasing the probability
1211 * of occurrence of some important types of race conditions.
1212 */
1213static int
1214rcu_torture_fqs(void *arg)
1215{
1216	unsigned long fqs_resume_time;
1217	int fqs_burst_remaining;
1218	int oldnice = task_nice(current);
1219
1220	VERBOSE_TOROUT_STRING("rcu_torture_fqs task started");
1221	do {
1222		fqs_resume_time = jiffies + fqs_stutter * HZ;
1223		while (time_before(jiffies, fqs_resume_time) &&
1224		       !kthread_should_stop()) {
1225			schedule_timeout_interruptible(HZ / 20);
1226		}
1227		fqs_burst_remaining = fqs_duration;
1228		while (fqs_burst_remaining > 0 &&
1229		       !kthread_should_stop()) {
1230			cur_ops->fqs();
1231			udelay(fqs_holdoff);
1232			fqs_burst_remaining -= fqs_holdoff;
1233		}
1234		if (stutter_wait("rcu_torture_fqs"))
1235			sched_set_normal(current, oldnice);
1236	} while (!torture_must_stop());
1237	torture_kthread_stopping("rcu_torture_fqs");
1238	return 0;
1239}
1240
1241// Used by writers to randomly choose from the available grace-period primitives.
1242static int synctype[ARRAY_SIZE(rcu_torture_writer_state_names)] = { };
1243static int nsynctypes;
1244
1245/*
1246 * Determine which grace-period primitives are available.
1247 */
1248static void rcu_torture_write_types(void)
1249{
1250	bool gp_cond1 = gp_cond, gp_cond_exp1 = gp_cond_exp, gp_cond_full1 = gp_cond_full;
1251	bool gp_cond_exp_full1 = gp_cond_exp_full, gp_exp1 = gp_exp, gp_poll_exp1 = gp_poll_exp;
1252	bool gp_poll_exp_full1 = gp_poll_exp_full, gp_normal1 = gp_normal, gp_poll1 = gp_poll;
1253	bool gp_poll_full1 = gp_poll_full, gp_sync1 = gp_sync;
1254
1255	/* Initialize synctype[] array.  If none set, take default. */
1256	if (!gp_cond1 &&
1257	    !gp_cond_exp1 &&
1258	    !gp_cond_full1 &&
1259	    !gp_cond_exp_full1 &&
1260	    !gp_exp1 &&
1261	    !gp_poll_exp1 &&
1262	    !gp_poll_exp_full1 &&
1263	    !gp_normal1 &&
1264	    !gp_poll1 &&
1265	    !gp_poll_full1 &&
1266	    !gp_sync1) {
1267		gp_cond1 = true;
1268		gp_cond_exp1 = true;
1269		gp_cond_full1 = true;
1270		gp_cond_exp_full1 = true;
1271		gp_exp1 = true;
1272		gp_poll_exp1 = true;
1273		gp_poll_exp_full1 = true;
1274		gp_normal1 = true;
1275		gp_poll1 = true;
1276		gp_poll_full1 = true;
1277		gp_sync1 = true;
1278	}
1279	if (gp_cond1 && cur_ops->get_gp_state && cur_ops->cond_sync) {
1280		synctype[nsynctypes++] = RTWS_COND_GET;
1281		pr_info("%s: Testing conditional GPs.\n", __func__);
1282	} else if (gp_cond && (!cur_ops->get_gp_state || !cur_ops->cond_sync)) {
1283		pr_alert("%s: gp_cond without primitives.\n", __func__);
1284	}
1285	if (gp_cond_exp1 && cur_ops->get_gp_state_exp && cur_ops->cond_sync_exp) {
1286		synctype[nsynctypes++] = RTWS_COND_GET_EXP;
1287		pr_info("%s: Testing conditional expedited GPs.\n", __func__);
1288	} else if (gp_cond_exp && (!cur_ops->get_gp_state_exp || !cur_ops->cond_sync_exp)) {
1289		pr_alert("%s: gp_cond_exp without primitives.\n", __func__);
1290	}
1291	if (gp_cond_full1 && cur_ops->get_gp_state && cur_ops->cond_sync_full) {
1292		synctype[nsynctypes++] = RTWS_COND_GET_FULL;
1293		pr_info("%s: Testing conditional full-state GPs.\n", __func__);
1294	} else if (gp_cond_full && (!cur_ops->get_gp_state || !cur_ops->cond_sync_full)) {
1295		pr_alert("%s: gp_cond_full without primitives.\n", __func__);
1296	}
1297	if (gp_cond_exp_full1 && cur_ops->get_gp_state_exp && cur_ops->cond_sync_exp_full) {
1298		synctype[nsynctypes++] = RTWS_COND_GET_EXP_FULL;
1299		pr_info("%s: Testing conditional full-state expedited GPs.\n", __func__);
1300	} else if (gp_cond_exp_full &&
1301		   (!cur_ops->get_gp_state_exp || !cur_ops->cond_sync_exp_full)) {
1302		pr_alert("%s: gp_cond_exp_full without primitives.\n", __func__);
1303	}
1304	if (gp_exp1 && cur_ops->exp_sync) {
1305		synctype[nsynctypes++] = RTWS_EXP_SYNC;
1306		pr_info("%s: Testing expedited GPs.\n", __func__);
1307	} else if (gp_exp && !cur_ops->exp_sync) {
1308		pr_alert("%s: gp_exp without primitives.\n", __func__);
1309	}
1310	if (gp_normal1 && cur_ops->deferred_free) {
1311		synctype[nsynctypes++] = RTWS_DEF_FREE;
1312		pr_info("%s: Testing asynchronous GPs.\n", __func__);
1313	} else if (gp_normal && !cur_ops->deferred_free) {
1314		pr_alert("%s: gp_normal without primitives.\n", __func__);
1315	}
1316	if (gp_poll1 && cur_ops->get_comp_state && cur_ops->same_gp_state &&
1317	    cur_ops->start_gp_poll && cur_ops->poll_gp_state) {
1318		synctype[nsynctypes++] = RTWS_POLL_GET;
1319		pr_info("%s: Testing polling GPs.\n", __func__);
1320	} else if (gp_poll && (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state)) {
1321		pr_alert("%s: gp_poll without primitives.\n", __func__);
1322	}
1323	if (gp_poll_full1 && cur_ops->get_comp_state_full && cur_ops->same_gp_state_full
1324	    && cur_ops->start_gp_poll_full && cur_ops->poll_gp_state_full) {
1325		synctype[nsynctypes++] = RTWS_POLL_GET_FULL;
1326		pr_info("%s: Testing polling full-state GPs.\n", __func__);
1327	} else if (gp_poll_full && (!cur_ops->start_gp_poll_full || !cur_ops->poll_gp_state_full)) {
1328		pr_alert("%s: gp_poll_full without primitives.\n", __func__);
1329	}
1330	if (gp_poll_exp1 && cur_ops->start_gp_poll_exp && cur_ops->poll_gp_state_exp) {
1331		synctype[nsynctypes++] = RTWS_POLL_GET_EXP;
1332		pr_info("%s: Testing polling expedited GPs.\n", __func__);
1333	} else if (gp_poll_exp && (!cur_ops->start_gp_poll_exp || !cur_ops->poll_gp_state_exp)) {
1334		pr_alert("%s: gp_poll_exp without primitives.\n", __func__);
1335	}
1336	if (gp_poll_exp_full1 && cur_ops->start_gp_poll_exp_full && cur_ops->poll_gp_state_full) {
1337		synctype[nsynctypes++] = RTWS_POLL_GET_EXP_FULL;
1338		pr_info("%s: Testing polling full-state expedited GPs.\n", __func__);
1339	} else if (gp_poll_exp_full &&
1340		   (!cur_ops->start_gp_poll_exp_full || !cur_ops->poll_gp_state_full)) {
1341		pr_alert("%s: gp_poll_exp_full without primitives.\n", __func__);
1342	}
1343	if (gp_sync1 && cur_ops->sync) {
1344		synctype[nsynctypes++] = RTWS_SYNC;
1345		pr_info("%s: Testing normal GPs.\n", __func__);
1346	} else if (gp_sync && !cur_ops->sync) {
1347		pr_alert("%s: gp_sync without primitives.\n", __func__);
1348	}
1349	pr_alert("%s: Testing %d update types.\n", __func__, nsynctypes);
1350}
1351
1352/*
1353 * Do the specified rcu_torture_writer() synchronous grace period,
1354 * while also testing out the polled APIs.  Note well that the single-CPU
1355 * grace-period optimizations must be accounted for.
1356 */
1357static void do_rtws_sync(struct torture_random_state *trsp, void (*sync)(void))
1358{
1359	unsigned long cookie;
1360	struct rcu_gp_oldstate cookie_full;
1361	bool dopoll;
1362	bool dopoll_full;
1363	unsigned long r = torture_random(trsp);
1364
1365	dopoll = cur_ops->get_gp_state && cur_ops->poll_gp_state && !(r & 0x300);
1366	dopoll_full = cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full && !(r & 0xc00);
1367	if (dopoll || dopoll_full)
1368		cpus_read_lock();
1369	if (dopoll)
1370		cookie = cur_ops->get_gp_state();
1371	if (dopoll_full)
1372		cur_ops->get_gp_state_full(&cookie_full);
1373	if (cur_ops->poll_need_2gp && cur_ops->poll_need_2gp(dopoll, dopoll_full))
1374		sync();
1375	sync();
1376	WARN_ONCE(dopoll && !cur_ops->poll_gp_state(cookie),
1377		  "%s: Cookie check 3 failed %pS() online %*pbl.",
1378		  __func__, sync, cpumask_pr_args(cpu_online_mask));
1379	WARN_ONCE(dopoll_full && !cur_ops->poll_gp_state_full(&cookie_full),
1380		  "%s: Cookie check 4 failed %pS() online %*pbl",
1381		  __func__, sync, cpumask_pr_args(cpu_online_mask));
1382	if (dopoll || dopoll_full)
1383		cpus_read_unlock();
1384}
1385
1386/*
1387 * RCU torture writer kthread.  Repeatedly substitutes a new structure
1388 * for that pointed to by rcu_torture_current, freeing the old structure
1389 * after a series of grace periods (the "pipeline").
1390 */
1391static int
1392rcu_torture_writer(void *arg)
1393{
1394	bool boot_ended;
1395	bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal();
1396	unsigned long cookie;
1397	struct rcu_gp_oldstate cookie_full;
1398	int expediting = 0;
1399	unsigned long gp_snap;
1400	unsigned long gp_snap1;
1401	struct rcu_gp_oldstate gp_snap_full;
1402	struct rcu_gp_oldstate gp_snap1_full;
1403	int i;
1404	int idx;
1405	int oldnice = task_nice(current);
1406	struct rcu_gp_oldstate *rgo = NULL;
1407	int rgo_size = 0;
1408	struct rcu_torture *rp;
1409	struct rcu_torture *old_rp;
1410	static DEFINE_TORTURE_RANDOM(rand);
1411	unsigned long stallsdone = jiffies;
1412	bool stutter_waited;
1413	unsigned long *ulo = NULL;
1414	int ulo_size = 0;
1415
1416	// If a new stall test is added, this must be adjusted.
1417	if (stall_cpu_holdoff + stall_gp_kthread + stall_cpu)
1418		stallsdone += (stall_cpu_holdoff + stall_gp_kthread + stall_cpu + 60) *
1419			      HZ * (stall_cpu_repeat + 1);
1420	VERBOSE_TOROUT_STRING("rcu_torture_writer task started");
1421	if (!can_expedite)
1422		pr_alert("%s" TORTURE_FLAG
1423			 " GP expediting controlled from boot/sysfs for %s.\n",
1424			 torture_type, cur_ops->name);
1425	if (WARN_ONCE(nsynctypes == 0,
1426		      "%s: No update-side primitives.\n", __func__)) {
1427		/*
1428		 * No updates primitives, so don't try updating.
1429		 * The resulting test won't be testing much, hence the
1430		 * above WARN_ONCE().
1431		 */
1432		rcu_torture_writer_state = RTWS_STOPPING;
1433		torture_kthread_stopping("rcu_torture_writer");
1434		return 0;
1435	}
1436	if (cur_ops->poll_active > 0) {
1437		ulo = kzalloc(cur_ops->poll_active * sizeof(ulo[0]), GFP_KERNEL);
1438		if (!WARN_ON(!ulo))
1439			ulo_size = cur_ops->poll_active;
1440	}
1441	if (cur_ops->poll_active_full > 0) {
1442		rgo = kzalloc(cur_ops->poll_active_full * sizeof(rgo[0]), GFP_KERNEL);
1443		if (!WARN_ON(!rgo))
1444			rgo_size = cur_ops->poll_active_full;
1445	}
1446
1447	do {
1448		rcu_torture_writer_state = RTWS_FIXED_DELAY;
1449		torture_hrtimeout_us(500, 1000, &rand);
1450		rp = rcu_torture_alloc();
1451		if (rp == NULL)
1452			continue;
1453		rp->rtort_pipe_count = 0;
1454		ASSERT_EXCLUSIVE_WRITER(rp->rtort_pipe_count);
1455		rcu_torture_writer_state = RTWS_DELAY;
1456		udelay(torture_random(&rand) & 0x3ff);
1457		rcu_torture_writer_state = RTWS_REPLACE;
1458		old_rp = rcu_dereference_check(rcu_torture_current,
1459					       current == writer_task);
1460		rp->rtort_mbtest = 1;
1461		rcu_assign_pointer(rcu_torture_current, rp);
1462		smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */
1463		if (old_rp) {
1464			i = old_rp->rtort_pipe_count;
1465			if (i > RCU_TORTURE_PIPE_LEN)
1466				i = RCU_TORTURE_PIPE_LEN;
1467			atomic_inc(&rcu_torture_wcount[i]);
1468			WRITE_ONCE(old_rp->rtort_pipe_count,
1469				   old_rp->rtort_pipe_count + 1);
1470			ASSERT_EXCLUSIVE_WRITER(old_rp->rtort_pipe_count);
1471
1472			// Make sure readers block polled grace periods.
1473			if (cur_ops->get_gp_state && cur_ops->poll_gp_state) {
1474				idx = cur_ops->readlock();
1475				cookie = cur_ops->get_gp_state();
1476				WARN_ONCE(cur_ops->poll_gp_state(cookie),
1477					  "%s: Cookie check 1 failed %s(%d) %lu->%lu\n",
1478					  __func__,
1479					  rcu_torture_writer_state_getname(),
1480					  rcu_torture_writer_state,
1481					  cookie, cur_ops->get_gp_state());
1482				if (cur_ops->get_comp_state) {
1483					cookie = cur_ops->get_comp_state();
1484					WARN_ON_ONCE(!cur_ops->poll_gp_state(cookie));
1485				}
1486				cur_ops->readunlock(idx);
1487			}
1488			if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full) {
1489				idx = cur_ops->readlock();
1490				cur_ops->get_gp_state_full(&cookie_full);
1491				WARN_ONCE(cur_ops->poll_gp_state_full(&cookie_full),
1492					  "%s: Cookie check 5 failed %s(%d) online %*pbl\n",
1493					  __func__,
1494					  rcu_torture_writer_state_getname(),
1495					  rcu_torture_writer_state,
1496					  cpumask_pr_args(cpu_online_mask));
1497				if (cur_ops->get_comp_state_full) {
1498					cur_ops->get_comp_state_full(&cookie_full);
1499					WARN_ON_ONCE(!cur_ops->poll_gp_state_full(&cookie_full));
1500				}
1501				cur_ops->readunlock(idx);
1502			}
1503			switch (synctype[torture_random(&rand) % nsynctypes]) {
1504			case RTWS_DEF_FREE:
1505				rcu_torture_writer_state = RTWS_DEF_FREE;
1506				cur_ops->deferred_free(old_rp);
1507				break;
1508			case RTWS_EXP_SYNC:
1509				rcu_torture_writer_state = RTWS_EXP_SYNC;
1510				do_rtws_sync(&rand, cur_ops->exp_sync);
1511				rcu_torture_pipe_update(old_rp);
1512				break;
1513			case RTWS_COND_GET:
1514				rcu_torture_writer_state = RTWS_COND_GET;
1515				gp_snap = cur_ops->get_gp_state();
1516				torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1517				rcu_torture_writer_state = RTWS_COND_SYNC;
1518				cur_ops->cond_sync(gp_snap);
1519				rcu_torture_pipe_update(old_rp);
1520				break;
1521			case RTWS_COND_GET_EXP:
1522				rcu_torture_writer_state = RTWS_COND_GET_EXP;
1523				gp_snap = cur_ops->get_gp_state_exp();
1524				torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1525				rcu_torture_writer_state = RTWS_COND_SYNC_EXP;
1526				cur_ops->cond_sync_exp(gp_snap);
1527				rcu_torture_pipe_update(old_rp);
1528				break;
1529			case RTWS_COND_GET_FULL:
1530				rcu_torture_writer_state = RTWS_COND_GET_FULL;
1531				cur_ops->get_gp_state_full(&gp_snap_full);
1532				torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1533				rcu_torture_writer_state = RTWS_COND_SYNC_FULL;
1534				cur_ops->cond_sync_full(&gp_snap_full);
1535				rcu_torture_pipe_update(old_rp);
1536				break;
1537			case RTWS_COND_GET_EXP_FULL:
1538				rcu_torture_writer_state = RTWS_COND_GET_EXP_FULL;
1539				cur_ops->get_gp_state_full(&gp_snap_full);
1540				torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1541				rcu_torture_writer_state = RTWS_COND_SYNC_EXP_FULL;
1542				cur_ops->cond_sync_exp_full(&gp_snap_full);
1543				rcu_torture_pipe_update(old_rp);
1544				break;
1545			case RTWS_POLL_GET:
1546				rcu_torture_writer_state = RTWS_POLL_GET;
1547				for (i = 0; i < ulo_size; i++)
1548					ulo[i] = cur_ops->get_comp_state();
1549				gp_snap = cur_ops->start_gp_poll();
1550				rcu_torture_writer_state = RTWS_POLL_WAIT;
1551				while (!cur_ops->poll_gp_state(gp_snap)) {
1552					gp_snap1 = cur_ops->get_gp_state();
1553					for (i = 0; i < ulo_size; i++)
1554						if (cur_ops->poll_gp_state(ulo[i]) ||
1555						    cur_ops->same_gp_state(ulo[i], gp_snap1)) {
1556							ulo[i] = gp_snap1;
1557							break;
1558						}
1559					WARN_ON_ONCE(ulo_size > 0 && i >= ulo_size);
1560					torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1561								  &rand);
1562				}
1563				rcu_torture_pipe_update(old_rp);
1564				break;
1565			case RTWS_POLL_GET_FULL:
1566				rcu_torture_writer_state = RTWS_POLL_GET_FULL;
1567				for (i = 0; i < rgo_size; i++)
1568					cur_ops->get_comp_state_full(&rgo[i]);
1569				cur_ops->start_gp_poll_full(&gp_snap_full);
1570				rcu_torture_writer_state = RTWS_POLL_WAIT_FULL;
1571				while (!cur_ops->poll_gp_state_full(&gp_snap_full)) {
1572					cur_ops->get_gp_state_full(&gp_snap1_full);
1573					for (i = 0; i < rgo_size; i++)
1574						if (cur_ops->poll_gp_state_full(&rgo[i]) ||
1575						    cur_ops->same_gp_state_full(&rgo[i],
1576										&gp_snap1_full)) {
1577							rgo[i] = gp_snap1_full;
1578							break;
1579						}
1580					WARN_ON_ONCE(rgo_size > 0 && i >= rgo_size);
1581					torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1582								  &rand);
1583				}
1584				rcu_torture_pipe_update(old_rp);
1585				break;
1586			case RTWS_POLL_GET_EXP:
1587				rcu_torture_writer_state = RTWS_POLL_GET_EXP;
1588				gp_snap = cur_ops->start_gp_poll_exp();
1589				rcu_torture_writer_state = RTWS_POLL_WAIT_EXP;
1590				while (!cur_ops->poll_gp_state_exp(gp_snap))
1591					torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1592								  &rand);
1593				rcu_torture_pipe_update(old_rp);
1594				break;
1595			case RTWS_POLL_GET_EXP_FULL:
1596				rcu_torture_writer_state = RTWS_POLL_GET_EXP_FULL;
1597				cur_ops->start_gp_poll_exp_full(&gp_snap_full);
1598				rcu_torture_writer_state = RTWS_POLL_WAIT_EXP_FULL;
1599				while (!cur_ops->poll_gp_state_full(&gp_snap_full))
1600					torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1601								  &rand);
1602				rcu_torture_pipe_update(old_rp);
1603				break;
1604			case RTWS_SYNC:
1605				rcu_torture_writer_state = RTWS_SYNC;
1606				do_rtws_sync(&rand, cur_ops->sync);
1607				rcu_torture_pipe_update(old_rp);
1608				break;
1609			default:
1610				WARN_ON_ONCE(1);
1611				break;
1612			}
1613		}
1614		WRITE_ONCE(rcu_torture_current_version,
1615			   rcu_torture_current_version + 1);
1616		/* Cycle through nesting levels of rcu_expedite_gp() calls. */
1617		if (can_expedite &&
1618		    !(torture_random(&rand) & 0xff & (!!expediting - 1))) {
1619			WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited());
1620			if (expediting >= 0)
1621				rcu_expedite_gp();
1622			else
1623				rcu_unexpedite_gp();
1624			if (++expediting > 3)
1625				expediting = -expediting;
1626		} else if (!can_expedite) { /* Disabled during boot, recheck. */
1627			can_expedite = !rcu_gp_is_expedited() &&
1628				       !rcu_gp_is_normal();
1629		}
1630		rcu_torture_writer_state = RTWS_STUTTER;
1631		boot_ended = rcu_inkernel_boot_has_ended();
1632		stutter_waited = stutter_wait("rcu_torture_writer");
1633		if (stutter_waited &&
1634		    !atomic_read(&rcu_fwd_cb_nodelay) &&
1635		    !cur_ops->slow_gps &&
1636		    !torture_must_stop() &&
1637		    boot_ended &&
1638		    time_after(jiffies, stallsdone))
1639			for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++)
1640				if (list_empty(&rcu_tortures[i].rtort_free) &&
1641				    rcu_access_pointer(rcu_torture_current) != &rcu_tortures[i]) {
 
1642					tracing_off();
1643					if (cur_ops->gp_kthread_dbg)
1644						cur_ops->gp_kthread_dbg();
1645					WARN(1, "%s: rtort_pipe_count: %d\n", __func__, rcu_tortures[i].rtort_pipe_count);
1646					rcu_ftrace_dump(DUMP_ALL);
1647				}
1648		if (stutter_waited)
1649			sched_set_normal(current, oldnice);
1650	} while (!torture_must_stop());
1651	rcu_torture_current = NULL;  // Let stats task know that we are done.
1652	/* Reset expediting back to unexpedited. */
1653	if (expediting > 0)
1654		expediting = -expediting;
1655	while (can_expedite && expediting++ < 0)
1656		rcu_unexpedite_gp();
1657	WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited());
1658	if (!can_expedite)
1659		pr_alert("%s" TORTURE_FLAG
1660			 " Dynamic grace-period expediting was disabled.\n",
1661			 torture_type);
1662	kfree(ulo);
1663	kfree(rgo);
1664	rcu_torture_writer_state = RTWS_STOPPING;
1665	torture_kthread_stopping("rcu_torture_writer");
1666	return 0;
1667}
1668
1669/*
1670 * RCU torture fake writer kthread.  Repeatedly calls sync, with a random
1671 * delay between calls.
1672 */
1673static int
1674rcu_torture_fakewriter(void *arg)
1675{
1676	unsigned long gp_snap;
1677	struct rcu_gp_oldstate gp_snap_full;
1678	DEFINE_TORTURE_RANDOM(rand);
1679
1680	VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started");
1681	set_user_nice(current, MAX_NICE);
1682
1683	if (WARN_ONCE(nsynctypes == 0,
1684		      "%s: No update-side primitives.\n", __func__)) {
1685		/*
1686		 * No updates primitives, so don't try updating.
1687		 * The resulting test won't be testing much, hence the
1688		 * above WARN_ONCE().
1689		 */
1690		torture_kthread_stopping("rcu_torture_fakewriter");
1691		return 0;
1692	}
1693
1694	do {
1695		torture_hrtimeout_jiffies(torture_random(&rand) % 10, &rand);
1696		if (cur_ops->cb_barrier != NULL &&
1697		    torture_random(&rand) % (nfakewriters * 8) == 0) {
1698			cur_ops->cb_barrier();
1699		} else {
1700			switch (synctype[torture_random(&rand) % nsynctypes]) {
1701			case RTWS_DEF_FREE:
1702				break;
1703			case RTWS_EXP_SYNC:
1704				cur_ops->exp_sync();
1705				break;
1706			case RTWS_COND_GET:
1707				gp_snap = cur_ops->get_gp_state();
1708				torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1709				cur_ops->cond_sync(gp_snap);
1710				break;
1711			case RTWS_COND_GET_EXP:
1712				gp_snap = cur_ops->get_gp_state_exp();
1713				torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1714				cur_ops->cond_sync_exp(gp_snap);
1715				break;
1716			case RTWS_COND_GET_FULL:
1717				cur_ops->get_gp_state_full(&gp_snap_full);
1718				torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1719				cur_ops->cond_sync_full(&gp_snap_full);
1720				break;
1721			case RTWS_COND_GET_EXP_FULL:
1722				cur_ops->get_gp_state_full(&gp_snap_full);
1723				torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1724				cur_ops->cond_sync_exp_full(&gp_snap_full);
1725				break;
1726			case RTWS_POLL_GET:
1727				if (cur_ops->start_poll_irqsoff)
1728					local_irq_disable();
1729				gp_snap = cur_ops->start_gp_poll();
1730				if (cur_ops->start_poll_irqsoff)
1731					local_irq_enable();
1732				while (!cur_ops->poll_gp_state(gp_snap)) {
1733					torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1734								  &rand);
1735				}
1736				break;
1737			case RTWS_POLL_GET_FULL:
1738				if (cur_ops->start_poll_irqsoff)
1739					local_irq_disable();
1740				cur_ops->start_gp_poll_full(&gp_snap_full);
1741				if (cur_ops->start_poll_irqsoff)
1742					local_irq_enable();
1743				while (!cur_ops->poll_gp_state_full(&gp_snap_full)) {
1744					torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1745								  &rand);
1746				}
1747				break;
1748			case RTWS_POLL_GET_EXP:
1749				gp_snap = cur_ops->start_gp_poll_exp();
1750				while (!cur_ops->poll_gp_state_exp(gp_snap)) {
1751					torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1752								  &rand);
1753				}
1754				break;
1755			case RTWS_POLL_GET_EXP_FULL:
1756				cur_ops->start_gp_poll_exp_full(&gp_snap_full);
1757				while (!cur_ops->poll_gp_state_full(&gp_snap_full)) {
1758					torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1759								  &rand);
1760				}
1761				break;
1762			case RTWS_SYNC:
1763				cur_ops->sync();
1764				break;
1765			default:
1766				WARN_ON_ONCE(1);
1767				break;
1768			}
1769		}
1770		stutter_wait("rcu_torture_fakewriter");
1771	} while (!torture_must_stop());
1772
1773	torture_kthread_stopping("rcu_torture_fakewriter");
1774	return 0;
1775}
1776
1777static void rcu_torture_timer_cb(struct rcu_head *rhp)
1778{
1779	kfree(rhp);
1780}
1781
1782// Set up and carry out testing of RCU's global memory ordering
1783static void rcu_torture_reader_do_mbchk(long myid, struct rcu_torture *rtp,
1784					struct torture_random_state *trsp)
1785{
1786	unsigned long loops;
1787	int noc = torture_num_online_cpus();
1788	int rdrchked;
1789	int rdrchker;
1790	struct rcu_torture_reader_check *rtrcp; // Me.
1791	struct rcu_torture_reader_check *rtrcp_assigner; // Assigned us to do checking.
1792	struct rcu_torture_reader_check *rtrcp_chked; // Reader being checked.
1793	struct rcu_torture_reader_check *rtrcp_chker; // Reader doing checking when not me.
1794
1795	if (myid < 0)
1796		return; // Don't try this from timer handlers.
1797
1798	// Increment my counter.
1799	rtrcp = &rcu_torture_reader_mbchk[myid];
1800	WRITE_ONCE(rtrcp->rtc_myloops, rtrcp->rtc_myloops + 1);
1801
1802	// Attempt to assign someone else some checking work.
1803	rdrchked = torture_random(trsp) % nrealreaders;
1804	rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked];
1805	rdrchker = torture_random(trsp) % nrealreaders;
1806	rtrcp_chker = &rcu_torture_reader_mbchk[rdrchker];
1807	if (rdrchked != myid && rdrchked != rdrchker && noc >= rdrchked && noc >= rdrchker &&
1808	    smp_load_acquire(&rtrcp->rtc_chkrdr) < 0 && // Pairs with smp_store_release below.
1809	    !READ_ONCE(rtp->rtort_chkp) &&
1810	    !smp_load_acquire(&rtrcp_chker->rtc_assigner)) { // Pairs with smp_store_release below.
1811		rtrcp->rtc_chkloops = READ_ONCE(rtrcp_chked->rtc_myloops);
1812		WARN_ON_ONCE(rtrcp->rtc_chkrdr >= 0);
1813		rtrcp->rtc_chkrdr = rdrchked;
1814		WARN_ON_ONCE(rtrcp->rtc_ready); // This gets set after the grace period ends.
1815		if (cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, NULL, rtrcp) ||
1816		    cmpxchg_relaxed(&rtp->rtort_chkp, NULL, rtrcp))
1817			(void)cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, rtrcp, NULL); // Back out.
1818	}
1819
1820	// If assigned some completed work, do it!
1821	rtrcp_assigner = READ_ONCE(rtrcp->rtc_assigner);
1822	if (!rtrcp_assigner || !smp_load_acquire(&rtrcp_assigner->rtc_ready))
1823		return; // No work or work not yet ready.
1824	rdrchked = rtrcp_assigner->rtc_chkrdr;
1825	if (WARN_ON_ONCE(rdrchked < 0))
1826		return;
1827	rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked];
1828	loops = READ_ONCE(rtrcp_chked->rtc_myloops);
1829	atomic_inc(&n_rcu_torture_mbchk_tries);
1830	if (ULONG_CMP_LT(loops, rtrcp_assigner->rtc_chkloops))
1831		atomic_inc(&n_rcu_torture_mbchk_fail);
1832	rtrcp_assigner->rtc_chkloops = loops + ULONG_MAX / 2;
1833	rtrcp_assigner->rtc_ready = 0;
1834	smp_store_release(&rtrcp->rtc_assigner, NULL); // Someone else can assign us work.
1835	smp_store_release(&rtrcp_assigner->rtc_chkrdr, -1); // Assigner can again assign.
1836}
1837
1838/*
1839 * Do one extension of an RCU read-side critical section using the
1840 * current reader state in readstate (set to zero for initial entry
1841 * to extended critical section), set the new state as specified by
1842 * newstate (set to zero for final exit from extended critical section),
1843 * and random-number-generator state in trsp.  If this is neither the
1844 * beginning or end of the critical section and if there was actually a
1845 * change, do a ->read_delay().
1846 */
1847static void rcutorture_one_extend(int *readstate, int newstate,
1848				  struct torture_random_state *trsp,
1849				  struct rt_read_seg *rtrsp)
1850{
1851	unsigned long flags;
1852	int idxnew1 = -1;
1853	int idxnew2 = -1;
1854	int idxold1 = *readstate;
1855	int idxold2 = idxold1;
1856	int statesnew = ~*readstate & newstate;
1857	int statesold = *readstate & ~newstate;
1858
1859	WARN_ON_ONCE(idxold2 < 0);
1860	WARN_ON_ONCE(idxold2 & ~RCUTORTURE_RDR_ALLBITS);
1861	rtrsp->rt_readstate = newstate;
1862
1863	/* First, put new protection in place to avoid critical-section gap. */
1864	if (statesnew & RCUTORTURE_RDR_BH)
1865		local_bh_disable();
1866	if (statesnew & RCUTORTURE_RDR_RBH)
1867		rcu_read_lock_bh();
1868	if (statesnew & RCUTORTURE_RDR_IRQ)
1869		local_irq_disable();
1870	if (statesnew & RCUTORTURE_RDR_PREEMPT)
1871		preempt_disable();
1872	if (statesnew & RCUTORTURE_RDR_SCHED)
1873		rcu_read_lock_sched();
1874	if (statesnew & RCUTORTURE_RDR_RCU_1)
1875		idxnew1 = (cur_ops->readlock() << RCUTORTURE_RDR_SHIFT_1) & RCUTORTURE_RDR_MASK_1;
1876	if (statesnew & RCUTORTURE_RDR_RCU_2)
1877		idxnew2 = (cur_ops->readlock() << RCUTORTURE_RDR_SHIFT_2) & RCUTORTURE_RDR_MASK_2;
1878
1879	/*
1880	 * Next, remove old protection, in decreasing order of strength
1881	 * to avoid unlock paths that aren't safe in the stronger
1882	 * context. Namely: BH can not be enabled with disabled interrupts.
1883	 * Additionally PREEMPT_RT requires that BH is enabled in preemptible
1884	 * context.
1885	 */
1886	if (statesold & RCUTORTURE_RDR_IRQ)
1887		local_irq_enable();
1888	if (statesold & RCUTORTURE_RDR_PREEMPT)
1889		preempt_enable();
1890	if (statesold & RCUTORTURE_RDR_SCHED)
1891		rcu_read_unlock_sched();
1892	if (statesold & RCUTORTURE_RDR_BH)
1893		local_bh_enable();
1894	if (statesold & RCUTORTURE_RDR_RBH)
1895		rcu_read_unlock_bh();
1896	if (statesold & RCUTORTURE_RDR_RCU_2) {
1897		cur_ops->readunlock((idxold2 & RCUTORTURE_RDR_MASK_2) >> RCUTORTURE_RDR_SHIFT_2);
1898		WARN_ON_ONCE(idxnew2 != -1);
1899		idxold2 = 0;
1900	}
1901	if (statesold & RCUTORTURE_RDR_RCU_1) {
1902		bool lockit;
1903
1904		lockit = !cur_ops->no_pi_lock && !statesnew && !(torture_random(trsp) & 0xffff);
1905		if (lockit)
1906			raw_spin_lock_irqsave(&current->pi_lock, flags);
1907		cur_ops->readunlock((idxold1 & RCUTORTURE_RDR_MASK_1) >> RCUTORTURE_RDR_SHIFT_1);
1908		WARN_ON_ONCE(idxnew1 != -1);
1909		idxold1 = 0;
1910		if (lockit)
1911			raw_spin_unlock_irqrestore(&current->pi_lock, flags);
1912	}
1913
1914	/* Delay if neither beginning nor end and there was a change. */
1915	if ((statesnew || statesold) && *readstate && newstate)
1916		cur_ops->read_delay(trsp, rtrsp);
1917
1918	/* Update the reader state. */
1919	if (idxnew1 == -1)
1920		idxnew1 = idxold1 & RCUTORTURE_RDR_MASK_1;
1921	WARN_ON_ONCE(idxnew1 < 0);
 
 
1922	if (idxnew2 == -1)
1923		idxnew2 = idxold2 & RCUTORTURE_RDR_MASK_2;
1924	WARN_ON_ONCE(idxnew2 < 0);
 
1925	*readstate = idxnew1 | idxnew2 | newstate;
1926	WARN_ON_ONCE(*readstate < 0);
1927	if (WARN_ON_ONCE(*readstate & ~RCUTORTURE_RDR_ALLBITS))
1928		pr_info("Unexpected readstate value of %#x\n", *readstate);
1929}
1930
1931/* Return the biggest extendables mask given current RCU and boot parameters. */
1932static int rcutorture_extend_mask_max(void)
1933{
1934	int mask;
1935
1936	WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND);
1937	mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables;
1938	mask = mask | RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2;
1939	return mask;
1940}
1941
1942/* Return a random protection state mask, but with at least one bit set. */
1943static int
1944rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp)
1945{
1946	int mask = rcutorture_extend_mask_max();
1947	unsigned long randmask1 = torture_random(trsp);
1948	unsigned long randmask2 = randmask1 >> 3;
1949	unsigned long preempts = RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED;
1950	unsigned long preempts_irq = preempts | RCUTORTURE_RDR_IRQ;
1951	unsigned long bhs = RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH;
1952
1953	WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT_1);  // Can't have reader idx bits.
1954	/* Mostly only one bit (need preemption!), sometimes lots of bits. */
1955	if (!(randmask1 & 0x7))
1956		mask = mask & randmask2;
1957	else
1958		mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS));
1959
1960	// Can't have nested RCU reader without outer RCU reader.
1961	if (!(mask & RCUTORTURE_RDR_RCU_1) && (mask & RCUTORTURE_RDR_RCU_2)) {
1962		if (oldmask & RCUTORTURE_RDR_RCU_1)
1963			mask &= ~RCUTORTURE_RDR_RCU_2;
1964		else
1965			mask |= RCUTORTURE_RDR_RCU_1;
1966	}
1967
1968	/*
1969	 * Can't enable bh w/irq disabled.
1970	 */
1971	if (mask & RCUTORTURE_RDR_IRQ)
1972		mask |= oldmask & bhs;
1973
1974	/*
1975	 * Ideally these sequences would be detected in debug builds
1976	 * (regardless of RT), but until then don't stop testing
1977	 * them on non-RT.
1978	 */
1979	if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
1980		/* Can't modify BH in atomic context */
1981		if (oldmask & preempts_irq)
1982			mask &= ~bhs;
1983		if ((oldmask | mask) & preempts_irq)
1984			mask |= oldmask & bhs;
1985	}
1986
1987	return mask ?: RCUTORTURE_RDR_RCU_1;
1988}
1989
1990/*
1991 * Do a randomly selected number of extensions of an existing RCU read-side
1992 * critical section.
1993 */
1994static struct rt_read_seg *
1995rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp,
1996		       struct rt_read_seg *rtrsp)
1997{
1998	int i;
1999	int j;
2000	int mask = rcutorture_extend_mask_max();
2001
2002	WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */
2003	if (!((mask - 1) & mask))
2004		return rtrsp;  /* Current RCU reader not extendable. */
2005	/* Bias towards larger numbers of loops. */
2006	i = torture_random(trsp);
2007	i = ((i | (i >> 3)) & RCUTORTURE_RDR_MAX_LOOPS) + 1;
2008	for (j = 0; j < i; j++) {
2009		mask = rcutorture_extend_mask(*readstate, trsp);
2010		rcutorture_one_extend(readstate, mask, trsp, &rtrsp[j]);
2011	}
2012	return &rtrsp[j];
2013}
2014
2015/*
2016 * Do one read-side critical section, returning false if there was
2017 * no data to read.  Can be invoked both from process context and
2018 * from a timer handler.
2019 */
2020static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid)
2021{
2022	bool checkpolling = !(torture_random(trsp) & 0xfff);
2023	unsigned long cookie;
2024	struct rcu_gp_oldstate cookie_full;
2025	int i;
2026	unsigned long started;
2027	unsigned long completed;
2028	int newstate;
2029	struct rcu_torture *p;
2030	int pipe_count;
2031	int readstate = 0;
2032	struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS] = { { 0 } };
2033	struct rt_read_seg *rtrsp = &rtseg[0];
2034	struct rt_read_seg *rtrsp1;
2035	unsigned long long ts;
2036
2037	WARN_ON_ONCE(!rcu_is_watching());
2038	newstate = rcutorture_extend_mask(readstate, trsp);
2039	rcutorture_one_extend(&readstate, newstate, trsp, rtrsp++);
2040	if (checkpolling) {
2041		if (cur_ops->get_gp_state && cur_ops->poll_gp_state)
2042			cookie = cur_ops->get_gp_state();
2043		if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full)
2044			cur_ops->get_gp_state_full(&cookie_full);
2045	}
2046	started = cur_ops->get_gp_seq();
2047	ts = rcu_trace_clock_local();
2048	p = rcu_dereference_check(rcu_torture_current,
2049				  !cur_ops->readlock_held || cur_ops->readlock_held());
2050	if (p == NULL) {
2051		/* Wait for rcu_torture_writer to get underway */
2052		rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
2053		return false;
2054	}
2055	if (p->rtort_mbtest == 0)
2056		atomic_inc(&n_rcu_torture_mberror);
2057	rcu_torture_reader_do_mbchk(myid, p, trsp);
2058	rtrsp = rcutorture_loop_extend(&readstate, trsp, rtrsp);
2059	preempt_disable();
2060	pipe_count = READ_ONCE(p->rtort_pipe_count);
2061	if (pipe_count > RCU_TORTURE_PIPE_LEN) {
2062		// Should not happen in a correct RCU implementation,
2063		// happens quite often for torture_type=busted.
2064		pipe_count = RCU_TORTURE_PIPE_LEN;
2065	}
2066	completed = cur_ops->get_gp_seq();
2067	if (pipe_count > 1) {
2068		do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu,
2069					  ts, started, completed);
2070		rcu_ftrace_dump(DUMP_ALL);
2071	}
2072	__this_cpu_inc(rcu_torture_count[pipe_count]);
2073	completed = rcutorture_seq_diff(completed, started);
2074	if (completed > RCU_TORTURE_PIPE_LEN) {
2075		/* Should not happen, but... */
2076		completed = RCU_TORTURE_PIPE_LEN;
2077	}
2078	__this_cpu_inc(rcu_torture_batch[completed]);
2079	preempt_enable();
2080	if (checkpolling) {
2081		if (cur_ops->get_gp_state && cur_ops->poll_gp_state)
2082			WARN_ONCE(cur_ops->poll_gp_state(cookie),
2083				  "%s: Cookie check 2 failed %s(%d) %lu->%lu\n",
2084				  __func__,
2085				  rcu_torture_writer_state_getname(),
2086				  rcu_torture_writer_state,
2087				  cookie, cur_ops->get_gp_state());
2088		if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full)
2089			WARN_ONCE(cur_ops->poll_gp_state_full(&cookie_full),
2090				  "%s: Cookie check 6 failed %s(%d) online %*pbl\n",
2091				  __func__,
2092				  rcu_torture_writer_state_getname(),
2093				  rcu_torture_writer_state,
2094				  cpumask_pr_args(cpu_online_mask));
2095	}
2096	rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
2097	WARN_ON_ONCE(readstate);
2098	// This next splat is expected behavior if leakpointer, especially
2099	// for CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels.
2100	WARN_ON_ONCE(leakpointer && READ_ONCE(p->rtort_pipe_count) > 1);
2101
2102	/* If error or close call, record the sequence of reader protections. */
2103	if ((pipe_count > 1 || completed > 1) && !xchg(&err_segs_recorded, 1)) {
2104		i = 0;
2105		for (rtrsp1 = &rtseg[0]; rtrsp1 < rtrsp; rtrsp1++)
2106			err_segs[i++] = *rtrsp1;
2107		rt_read_nsegs = i;
2108	}
2109
2110	return true;
2111}
2112
2113static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand);
2114
2115/*
2116 * RCU torture reader from timer handler.  Dereferences rcu_torture_current,
2117 * incrementing the corresponding element of the pipeline array.  The
2118 * counter in the element should never be greater than 1, otherwise, the
2119 * RCU implementation is broken.
2120 */
2121static void rcu_torture_timer(struct timer_list *unused)
2122{
2123	atomic_long_inc(&n_rcu_torture_timers);
2124	(void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand), -1);
2125
2126	/* Test call_rcu() invocation from interrupt handler. */
2127	if (cur_ops->call) {
2128		struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_NOWAIT);
2129
2130		if (rhp)
2131			cur_ops->call(rhp, rcu_torture_timer_cb);
2132	}
2133}
2134
2135/*
2136 * RCU torture reader kthread.  Repeatedly dereferences rcu_torture_current,
2137 * incrementing the corresponding element of the pipeline array.  The
2138 * counter in the element should never be greater than 1, otherwise, the
2139 * RCU implementation is broken.
2140 */
2141static int
2142rcu_torture_reader(void *arg)
2143{
2144	unsigned long lastsleep = jiffies;
2145	long myid = (long)arg;
2146	int mynumonline = myid;
2147	DEFINE_TORTURE_RANDOM(rand);
2148	struct timer_list t;
2149
2150	VERBOSE_TOROUT_STRING("rcu_torture_reader task started");
2151	set_user_nice(current, MAX_NICE);
2152	if (irqreader && cur_ops->irq_capable)
2153		timer_setup_on_stack(&t, rcu_torture_timer, 0);
2154	tick_dep_set_task(current, TICK_DEP_BIT_RCU);
2155	do {
2156		if (irqreader && cur_ops->irq_capable) {
2157			if (!timer_pending(&t))
2158				mod_timer(&t, jiffies + 1);
2159		}
2160		if (!rcu_torture_one_read(&rand, myid) && !torture_must_stop())
2161			schedule_timeout_interruptible(HZ);
2162		if (time_after(jiffies, lastsleep) && !torture_must_stop()) {
2163			torture_hrtimeout_us(500, 1000, &rand);
2164			lastsleep = jiffies + 10;
2165		}
2166		while (torture_num_online_cpus() < mynumonline && !torture_must_stop())
2167			schedule_timeout_interruptible(HZ / 5);
2168		stutter_wait("rcu_torture_reader");
2169	} while (!torture_must_stop());
2170	if (irqreader && cur_ops->irq_capable) {
2171		del_timer_sync(&t);
2172		destroy_timer_on_stack(&t);
2173	}
2174	tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
2175	torture_kthread_stopping("rcu_torture_reader");
2176	return 0;
2177}
2178
2179/*
2180 * Randomly Toggle CPUs' callback-offload state.  This uses hrtimers to
2181 * increase race probabilities and fuzzes the interval between toggling.
2182 */
2183static int rcu_nocb_toggle(void *arg)
2184{
2185	int cpu;
2186	int maxcpu = -1;
2187	int oldnice = task_nice(current);
2188	long r;
2189	DEFINE_TORTURE_RANDOM(rand);
2190	ktime_t toggle_delay;
2191	unsigned long toggle_fuzz;
2192	ktime_t toggle_interval = ms_to_ktime(nocbs_toggle);
2193
2194	VERBOSE_TOROUT_STRING("rcu_nocb_toggle task started");
2195	while (!rcu_inkernel_boot_has_ended())
2196		schedule_timeout_interruptible(HZ / 10);
2197	for_each_possible_cpu(cpu)
2198		maxcpu = cpu;
2199	WARN_ON(maxcpu < 0);
2200	if (toggle_interval > ULONG_MAX)
2201		toggle_fuzz = ULONG_MAX >> 3;
2202	else
2203		toggle_fuzz = toggle_interval >> 3;
2204	if (toggle_fuzz <= 0)
2205		toggle_fuzz = NSEC_PER_USEC;
2206	do {
2207		r = torture_random(&rand);
2208		cpu = (r >> 1) % (maxcpu + 1);
2209		if (r & 0x1) {
2210			rcu_nocb_cpu_offload(cpu);
2211			atomic_long_inc(&n_nocb_offload);
2212		} else {
2213			rcu_nocb_cpu_deoffload(cpu);
2214			atomic_long_inc(&n_nocb_deoffload);
2215		}
2216		toggle_delay = torture_random(&rand) % toggle_fuzz + toggle_interval;
2217		set_current_state(TASK_INTERRUPTIBLE);
2218		schedule_hrtimeout(&toggle_delay, HRTIMER_MODE_REL);
2219		if (stutter_wait("rcu_nocb_toggle"))
2220			sched_set_normal(current, oldnice);
2221	} while (!torture_must_stop());
2222	torture_kthread_stopping("rcu_nocb_toggle");
2223	return 0;
2224}
2225
2226/*
2227 * Print torture statistics.  Caller must ensure that there is only
2228 * one call to this function at a given time!!!  This is normally
2229 * accomplished by relying on the module system to only have one copy
2230 * of the module loaded, and then by giving the rcu_torture_stats
2231 * kthread full control (or the init/cleanup functions when rcu_torture_stats
2232 * thread is not running).
2233 */
2234static void
2235rcu_torture_stats_print(void)
2236{
2237	int cpu;
2238	int i;
2239	long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
2240	long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
2241	struct rcu_torture *rtcp;
2242	static unsigned long rtcv_snap = ULONG_MAX;
2243	static bool splatted;
2244	struct task_struct *wtp;
2245
2246	for_each_possible_cpu(cpu) {
2247		for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
2248			pipesummary[i] += READ_ONCE(per_cpu(rcu_torture_count, cpu)[i]);
2249			batchsummary[i] += READ_ONCE(per_cpu(rcu_torture_batch, cpu)[i]);
2250		}
2251	}
2252	for (i = RCU_TORTURE_PIPE_LEN; i >= 0; i--) {
2253		if (pipesummary[i] != 0)
2254			break;
2255	}
2256
2257	pr_alert("%s%s ", torture_type, TORTURE_FLAG);
2258	rtcp = rcu_access_pointer(rcu_torture_current);
2259	pr_cont("rtc: %p %s: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
2260		rtcp,
2261		rtcp && !rcu_stall_is_suppressed_at_boot() ? "ver" : "VER",
2262		rcu_torture_current_version,
2263		list_empty(&rcu_torture_freelist),
2264		atomic_read(&n_rcu_torture_alloc),
2265		atomic_read(&n_rcu_torture_alloc_fail),
2266		atomic_read(&n_rcu_torture_free));
2267	pr_cont("rtmbe: %d rtmbkf: %d/%d rtbe: %ld rtbke: %ld ",
2268		atomic_read(&n_rcu_torture_mberror),
2269		atomic_read(&n_rcu_torture_mbchk_fail), atomic_read(&n_rcu_torture_mbchk_tries),
2270		n_rcu_torture_barrier_error,
2271		n_rcu_torture_boost_ktrerror);
 
2272	pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
2273		n_rcu_torture_boost_failure,
2274		n_rcu_torture_boosts,
2275		atomic_long_read(&n_rcu_torture_timers));
2276	torture_onoff_stats();
2277	pr_cont("barrier: %ld/%ld:%ld ",
2278		data_race(n_barrier_successes),
2279		data_race(n_barrier_attempts),
2280		data_race(n_rcu_torture_barrier_error));
2281	pr_cont("read-exits: %ld ", data_race(n_read_exits)); // Statistic.
2282	pr_cont("nocb-toggles: %ld:%ld\n",
2283		atomic_long_read(&n_nocb_offload), atomic_long_read(&n_nocb_deoffload));
2284
2285	pr_alert("%s%s ", torture_type, TORTURE_FLAG);
2286	if (atomic_read(&n_rcu_torture_mberror) ||
2287	    atomic_read(&n_rcu_torture_mbchk_fail) ||
2288	    n_rcu_torture_barrier_error || n_rcu_torture_boost_ktrerror ||
2289	    n_rcu_torture_boost_failure || i > 1) {
 
2290		pr_cont("%s", "!!! ");
2291		atomic_inc(&n_rcu_torture_error);
2292		WARN_ON_ONCE(atomic_read(&n_rcu_torture_mberror));
2293		WARN_ON_ONCE(atomic_read(&n_rcu_torture_mbchk_fail));
2294		WARN_ON_ONCE(n_rcu_torture_barrier_error);  // rcu_barrier()
2295		WARN_ON_ONCE(n_rcu_torture_boost_ktrerror); // no boost kthread
 
2296		WARN_ON_ONCE(n_rcu_torture_boost_failure); // boost failed (TIMER_SOFTIRQ RT prio?)
2297		WARN_ON_ONCE(i > 1); // Too-short grace period
2298	}
2299	pr_cont("Reader Pipe: ");
2300	for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
2301		pr_cont(" %ld", pipesummary[i]);
2302	pr_cont("\n");
2303
2304	pr_alert("%s%s ", torture_type, TORTURE_FLAG);
2305	pr_cont("Reader Batch: ");
2306	for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
2307		pr_cont(" %ld", batchsummary[i]);
2308	pr_cont("\n");
2309
2310	pr_alert("%s%s ", torture_type, TORTURE_FLAG);
2311	pr_cont("Free-Block Circulation: ");
2312	for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
2313		pr_cont(" %d", atomic_read(&rcu_torture_wcount[i]));
2314	}
2315	pr_cont("\n");
2316
2317	if (cur_ops->stats)
2318		cur_ops->stats();
2319	if (rtcv_snap == rcu_torture_current_version &&
2320	    rcu_access_pointer(rcu_torture_current) &&
2321	    !rcu_stall_is_suppressed()) {
2322		int __maybe_unused flags = 0;
2323		unsigned long __maybe_unused gp_seq = 0;
2324
2325		if (cur_ops->get_gp_data)
2326			cur_ops->get_gp_data(&flags, &gp_seq);
 
 
2327		wtp = READ_ONCE(writer_task);
2328		pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#x cpu %d\n",
2329			 rcu_torture_writer_state_getname(),
2330			 rcu_torture_writer_state, gp_seq, flags,
2331			 wtp == NULL ? ~0U : wtp->__state,
2332			 wtp == NULL ? -1 : (int)task_cpu(wtp));
2333		if (!splatted && wtp) {
2334			sched_show_task(wtp);
2335			splatted = true;
2336		}
2337		if (cur_ops->gp_kthread_dbg)
2338			cur_ops->gp_kthread_dbg();
2339		rcu_ftrace_dump(DUMP_ALL);
2340	}
2341	rtcv_snap = rcu_torture_current_version;
2342}
2343
2344/*
2345 * Periodically prints torture statistics, if periodic statistics printing
2346 * was specified via the stat_interval module parameter.
2347 */
2348static int
2349rcu_torture_stats(void *arg)
2350{
2351	VERBOSE_TOROUT_STRING("rcu_torture_stats task started");
2352	do {
2353		schedule_timeout_interruptible(stat_interval * HZ);
2354		rcu_torture_stats_print();
2355		torture_shutdown_absorb("rcu_torture_stats");
2356	} while (!torture_must_stop());
2357	torture_kthread_stopping("rcu_torture_stats");
2358	return 0;
2359}
2360
2361/* Test mem_dump_obj() and friends.  */
2362static void rcu_torture_mem_dump_obj(void)
2363{
2364	struct rcu_head *rhp;
2365	struct kmem_cache *kcp;
2366	static int z;
2367
2368	kcp = kmem_cache_create("rcuscale", 136, 8, SLAB_STORE_USER, NULL);
2369	if (WARN_ON_ONCE(!kcp))
2370		return;
2371	rhp = kmem_cache_alloc(kcp, GFP_KERNEL);
2372	if (WARN_ON_ONCE(!rhp)) {
2373		kmem_cache_destroy(kcp);
2374		return;
2375	}
2376	pr_alert("mem_dump_obj() slab test: rcu_torture_stats = %px, &rhp = %px, rhp = %px, &z = %px\n", stats_task, &rhp, rhp, &z);
2377	pr_alert("mem_dump_obj(ZERO_SIZE_PTR):");
2378	mem_dump_obj(ZERO_SIZE_PTR);
2379	pr_alert("mem_dump_obj(NULL):");
2380	mem_dump_obj(NULL);
2381	pr_alert("mem_dump_obj(%px):", &rhp);
2382	mem_dump_obj(&rhp);
2383	pr_alert("mem_dump_obj(%px):", rhp);
2384	mem_dump_obj(rhp);
2385	pr_alert("mem_dump_obj(%px):", &rhp->func);
2386	mem_dump_obj(&rhp->func);
2387	pr_alert("mem_dump_obj(%px):", &z);
2388	mem_dump_obj(&z);
2389	kmem_cache_free(kcp, rhp);
2390	kmem_cache_destroy(kcp);
2391	rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
2392	if (WARN_ON_ONCE(!rhp))
2393		return;
2394	pr_alert("mem_dump_obj() kmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp);
2395	pr_alert("mem_dump_obj(kmalloc %px):", rhp);
2396	mem_dump_obj(rhp);
2397	pr_alert("mem_dump_obj(kmalloc %px):", &rhp->func);
2398	mem_dump_obj(&rhp->func);
2399	kfree(rhp);
2400	rhp = vmalloc(4096);
2401	if (WARN_ON_ONCE(!rhp))
2402		return;
2403	pr_alert("mem_dump_obj() vmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp);
2404	pr_alert("mem_dump_obj(vmalloc %px):", rhp);
2405	mem_dump_obj(rhp);
2406	pr_alert("mem_dump_obj(vmalloc %px):", &rhp->func);
2407	mem_dump_obj(&rhp->func);
2408	vfree(rhp);
2409}
2410
2411static void
2412rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
2413{
2414	pr_alert("%s" TORTURE_FLAG
2415		 "--- %s: nreaders=%d nfakewriters=%d "
2416		 "stat_interval=%d verbose=%d test_no_idle_hz=%d "
2417		 "shuffle_interval=%d stutter=%d irqreader=%d "
2418		 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d "
2419		 "test_boost=%d/%d test_boost_interval=%d "
2420		 "test_boost_duration=%d shutdown_secs=%d "
2421		 "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d "
2422		 "stall_cpu_block=%d stall_cpu_repeat=%d "
2423		 "n_barrier_cbs=%d "
2424		 "onoff_interval=%d onoff_holdoff=%d "
2425		 "read_exit_delay=%d read_exit_burst=%d "
2426		 "reader_flavor=%x "
2427		 "nocbs_nthreads=%d nocbs_toggle=%d "
2428		 "test_nmis=%d\n",
2429		 torture_type, tag, nrealreaders, nfakewriters,
2430		 stat_interval, verbose, test_no_idle_hz, shuffle_interval,
2431		 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter,
2432		 test_boost, cur_ops->can_boost,
2433		 test_boost_interval, test_boost_duration, shutdown_secs,
2434		 stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff,
2435		 stall_cpu_block, stall_cpu_repeat,
2436		 n_barrier_cbs,
2437		 onoff_interval, onoff_holdoff,
2438		 read_exit_delay, read_exit_burst,
2439		 reader_flavor,
2440		 nocbs_nthreads, nocbs_toggle,
2441		 test_nmis);
2442}
2443
2444static int rcutorture_booster_cleanup(unsigned int cpu)
2445{
2446	struct task_struct *t;
2447
2448	if (boost_tasks[cpu] == NULL)
2449		return 0;
2450	mutex_lock(&boost_mutex);
2451	t = boost_tasks[cpu];
2452	boost_tasks[cpu] = NULL;
2453	rcu_torture_enable_rt_throttle();
2454	mutex_unlock(&boost_mutex);
2455
2456	/* This must be outside of the mutex, otherwise deadlock! */
2457	torture_stop_kthread(rcu_torture_boost, t);
2458	return 0;
2459}
2460
2461static int rcutorture_booster_init(unsigned int cpu)
2462{
2463	int retval;
2464
2465	if (boost_tasks[cpu] != NULL)
2466		return 0;  /* Already created, nothing more to do. */
2467
2468	// Testing RCU priority boosting requires rcutorture do
2469	// some serious abuse.  Counter this by running ksoftirqd
2470	// at higher priority.
2471	if (IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)) {
2472		struct sched_param sp;
2473		struct task_struct *t;
2474
2475		t = per_cpu(ksoftirqd, cpu);
2476		WARN_ON_ONCE(!t);
2477		sp.sched_priority = 2;
2478		sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
2479#ifdef CONFIG_IRQ_FORCED_THREADING
2480		if (force_irqthreads()) {
2481			t = per_cpu(ktimerd, cpu);
2482			WARN_ON_ONCE(!t);
2483			sp.sched_priority = 2;
2484			sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
2485		}
2486#endif
2487	}
2488
2489	/* Don't allow time recalculation while creating a new task. */
2490	mutex_lock(&boost_mutex);
2491	rcu_torture_disable_rt_throttle();
2492	VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task");
2493	boost_tasks[cpu] = kthread_run_on_cpu(rcu_torture_boost, NULL,
2494					      cpu, "rcu_torture_boost_%u");
2495	if (IS_ERR(boost_tasks[cpu])) {
2496		retval = PTR_ERR(boost_tasks[cpu]);
2497		VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed");
2498		n_rcu_torture_boost_ktrerror++;
2499		boost_tasks[cpu] = NULL;
2500		mutex_unlock(&boost_mutex);
2501		return retval;
2502	}
2503	mutex_unlock(&boost_mutex);
2504	return 0;
2505}
2506
2507static int rcu_torture_stall_nf(struct notifier_block *nb, unsigned long v, void *ptr)
2508{
2509	pr_info("%s: v=%lu, duration=%lu.\n", __func__, v, (unsigned long)ptr);
2510	return NOTIFY_OK;
2511}
2512
2513static struct notifier_block rcu_torture_stall_block = {
2514	.notifier_call = rcu_torture_stall_nf,
2515};
2516
2517/*
2518 * CPU-stall kthread.  It waits as specified by stall_cpu_holdoff, then
2519 * induces a CPU stall for the time specified by stall_cpu.  If a new
2520 * stall test is added, stallsdone in rcu_torture_writer() must be adjusted.
2521 */
2522static void rcu_torture_stall_one(int rep, int irqsoff)
2523{
2524	int idx;
2525	unsigned long stop_at;
2526
 
2527	if (stall_cpu_holdoff > 0) {
2528		VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff");
2529		schedule_timeout_interruptible(stall_cpu_holdoff * HZ);
2530		VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff");
2531	}
2532	if (!kthread_should_stop() && stall_gp_kthread > 0) {
2533		VERBOSE_TOROUT_STRING("rcu_torture_stall begin GP stall");
2534		rcu_gp_set_torture_wait(stall_gp_kthread * HZ);
2535		for (idx = 0; idx < stall_gp_kthread + 2; idx++) {
2536			if (kthread_should_stop())
2537				break;
2538			schedule_timeout_uninterruptible(HZ);
2539		}
2540	}
2541	if (!kthread_should_stop() && stall_cpu > 0) {
2542		VERBOSE_TOROUT_STRING("rcu_torture_stall begin CPU stall");
2543		stop_at = ktime_get_seconds() + stall_cpu;
2544		/* RCU CPU stall is expected behavior in following code. */
2545		idx = cur_ops->readlock();
2546		if (irqsoff)
2547			local_irq_disable();
2548		else if (!stall_cpu_block)
2549			preempt_disable();
2550		pr_alert("%s start stall episode %d on CPU %d.\n",
2551			  __func__, rep + 1, raw_smp_processor_id());
2552		while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(), stop_at) &&
2553		       !kthread_should_stop())
2554			if (stall_cpu_block) {
2555#ifdef CONFIG_PREEMPTION
2556				preempt_schedule();
2557#else
2558				schedule_timeout_uninterruptible(HZ);
2559#endif
2560			} else if (stall_no_softlockup) {
2561				touch_softlockup_watchdog();
2562			}
2563		if (irqsoff)
2564			local_irq_enable();
2565		else if (!stall_cpu_block)
2566			preempt_enable();
2567		cur_ops->readunlock(idx);
2568	}
2569}
2570
2571/*
2572 * CPU-stall kthread.  Invokes rcu_torture_stall_one() once, and then as many
2573 * additional times as specified by the stall_cpu_repeat module parameter.
2574 * Note that stall_cpu_irqsoff is ignored on the second and subsequent
2575 * stall.
2576 */
2577static int rcu_torture_stall(void *args)
2578{
2579	int i;
2580	int repeat = stall_cpu_repeat;
2581	int ret;
2582
2583	VERBOSE_TOROUT_STRING("rcu_torture_stall task started");
2584	if (repeat < 0) {
2585		repeat = 0;
2586		WARN_ON_ONCE(IS_BUILTIN(CONFIG_RCU_TORTURE_TEST));
2587	}
2588	if (rcu_cpu_stall_notifiers) {
2589		ret = rcu_stall_chain_notifier_register(&rcu_torture_stall_block);
2590		if (ret)
2591			pr_info("%s: rcu_stall_chain_notifier_register() returned %d, %sexpected.\n",
2592				__func__, ret, !IS_ENABLED(CONFIG_RCU_STALL_COMMON) ? "un" : "");
2593	}
2594	for (i = 0; i <= repeat; i++) {
2595		if (kthread_should_stop())
2596			break;
2597		rcu_torture_stall_one(i, i == 0 ? stall_cpu_irqsoff : 0);
2598	}
2599	pr_alert("%s end.\n", __func__);
2600	if (rcu_cpu_stall_notifiers && !ret) {
2601		ret = rcu_stall_chain_notifier_unregister(&rcu_torture_stall_block);
2602		if (ret)
2603			pr_info("%s: rcu_stall_chain_notifier_unregister() returned %d.\n", __func__, ret);
2604	}
2605	torture_shutdown_absorb("rcu_torture_stall");
2606	while (!kthread_should_stop())
2607		schedule_timeout_interruptible(10 * HZ);
2608	return 0;
2609}
2610
2611/* Spawn CPU-stall kthread, if stall_cpu specified. */
2612static int __init rcu_torture_stall_init(void)
2613{
2614	if (stall_cpu <= 0 && stall_gp_kthread <= 0)
2615		return 0;
2616	return torture_create_kthread(rcu_torture_stall, NULL, stall_task);
2617}
2618
2619/* State structure for forward-progress self-propagating RCU callback. */
2620struct fwd_cb_state {
2621	struct rcu_head rh;
2622	int stop;
2623};
2624
2625/*
2626 * Forward-progress self-propagating RCU callback function.  Because
2627 * callbacks run from softirq, this function is an implicit RCU read-side
2628 * critical section.
2629 */
2630static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp)
2631{
2632	struct fwd_cb_state *fcsp = container_of(rhp, struct fwd_cb_state, rh);
2633
2634	if (READ_ONCE(fcsp->stop)) {
2635		WRITE_ONCE(fcsp->stop, 2);
2636		return;
2637	}
2638	cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb);
2639}
2640
2641/* State for continuous-flood RCU callbacks. */
2642struct rcu_fwd_cb {
2643	struct rcu_head rh;
2644	struct rcu_fwd_cb *rfc_next;
2645	struct rcu_fwd *rfc_rfp;
2646	int rfc_gps;
2647};
2648
2649#define MAX_FWD_CB_JIFFIES	(8 * HZ) /* Maximum CB test duration. */
2650#define MIN_FWD_CB_LAUNDERS	3	/* This many CB invocations to count. */
2651#define MIN_FWD_CBS_LAUNDERED	100	/* Number of counted CBs. */
2652#define FWD_CBS_HIST_DIV	10	/* Histogram buckets/second. */
2653#define N_LAUNDERS_HIST (2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV))
2654
2655struct rcu_launder_hist {
2656	long n_launders;
2657	unsigned long launder_gp_seq;
2658};
2659
2660struct rcu_fwd {
2661	spinlock_t rcu_fwd_lock;
2662	struct rcu_fwd_cb *rcu_fwd_cb_head;
2663	struct rcu_fwd_cb **rcu_fwd_cb_tail;
2664	long n_launders_cb;
2665	unsigned long rcu_fwd_startat;
2666	struct rcu_launder_hist n_launders_hist[N_LAUNDERS_HIST];
2667	unsigned long rcu_launder_gp_seq_start;
2668	int rcu_fwd_id;
2669};
2670
2671static DEFINE_MUTEX(rcu_fwd_mutex);
2672static struct rcu_fwd *rcu_fwds;
2673static unsigned long rcu_fwd_seq;
2674static atomic_long_t rcu_fwd_max_cbs;
2675static bool rcu_fwd_emergency_stop;
2676
2677static void rcu_torture_fwd_cb_hist(struct rcu_fwd *rfp)
2678{
2679	unsigned long gps;
2680	unsigned long gps_old;
2681	int i;
2682	int j;
2683
2684	for (i = ARRAY_SIZE(rfp->n_launders_hist) - 1; i > 0; i--)
2685		if (rfp->n_launders_hist[i].n_launders > 0)
2686			break;
2687	pr_alert("%s: Callback-invocation histogram %d (duration %lu jiffies):",
2688		 __func__, rfp->rcu_fwd_id, jiffies - rfp->rcu_fwd_startat);
2689	gps_old = rfp->rcu_launder_gp_seq_start;
2690	for (j = 0; j <= i; j++) {
2691		gps = rfp->n_launders_hist[j].launder_gp_seq;
2692		pr_cont(" %ds/%d: %ld:%ld",
2693			j + 1, FWD_CBS_HIST_DIV,
2694			rfp->n_launders_hist[j].n_launders,
2695			rcutorture_seq_diff(gps, gps_old));
2696		gps_old = gps;
2697	}
2698	pr_cont("\n");
2699}
2700
2701/* Callback function for continuous-flood RCU callbacks. */
2702static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp)
2703{
2704	unsigned long flags;
2705	int i;
2706	struct rcu_fwd_cb *rfcp = container_of(rhp, struct rcu_fwd_cb, rh);
2707	struct rcu_fwd_cb **rfcpp;
2708	struct rcu_fwd *rfp = rfcp->rfc_rfp;
2709
2710	rfcp->rfc_next = NULL;
2711	rfcp->rfc_gps++;
2712	spin_lock_irqsave(&rfp->rcu_fwd_lock, flags);
2713	rfcpp = rfp->rcu_fwd_cb_tail;
2714	rfp->rcu_fwd_cb_tail = &rfcp->rfc_next;
2715	smp_store_release(rfcpp, rfcp);
2716	WRITE_ONCE(rfp->n_launders_cb, rfp->n_launders_cb + 1);
2717	i = ((jiffies - rfp->rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV));
2718	if (i >= ARRAY_SIZE(rfp->n_launders_hist))
2719		i = ARRAY_SIZE(rfp->n_launders_hist) - 1;
2720	rfp->n_launders_hist[i].n_launders++;
2721	rfp->n_launders_hist[i].launder_gp_seq = cur_ops->get_gp_seq();
2722	spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
2723}
2724
2725// Give the scheduler a chance, even on nohz_full CPUs.
2726static void rcu_torture_fwd_prog_cond_resched(unsigned long iter)
2727{
2728	if (IS_ENABLED(CONFIG_PREEMPTION) && IS_ENABLED(CONFIG_NO_HZ_FULL)) {
2729		// Real call_rcu() floods hit userspace, so emulate that.
2730		if (need_resched() || (iter & 0xfff))
2731			schedule();
2732		return;
2733	}
2734	// No userspace emulation: CB invocation throttles call_rcu()
2735	cond_resched();
2736}
2737
2738/*
2739 * Free all callbacks on the rcu_fwd_cb_head list, either because the
2740 * test is over or because we hit an OOM event.
2741 */
2742static unsigned long rcu_torture_fwd_prog_cbfree(struct rcu_fwd *rfp)
2743{
2744	unsigned long flags;
2745	unsigned long freed = 0;
2746	struct rcu_fwd_cb *rfcp;
2747
2748	for (;;) {
2749		spin_lock_irqsave(&rfp->rcu_fwd_lock, flags);
2750		rfcp = rfp->rcu_fwd_cb_head;
2751		if (!rfcp) {
2752			spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
2753			break;
2754		}
2755		rfp->rcu_fwd_cb_head = rfcp->rfc_next;
2756		if (!rfp->rcu_fwd_cb_head)
2757			rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head;
2758		spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
2759		kfree(rfcp);
2760		freed++;
2761		rcu_torture_fwd_prog_cond_resched(freed);
2762		if (tick_nohz_full_enabled()) {
2763			local_irq_save(flags);
2764			rcu_momentary_eqs();
2765			local_irq_restore(flags);
2766		}
2767	}
2768	return freed;
2769}
2770
2771/* Carry out need_resched()/cond_resched() forward-progress testing. */
2772static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp,
2773				    int *tested, int *tested_tries)
2774{
2775	unsigned long cver;
2776	unsigned long dur;
2777	struct fwd_cb_state fcs;
2778	unsigned long gps;
2779	int idx;
2780	int sd;
2781	int sd4;
2782	bool selfpropcb = false;
2783	unsigned long stopat;
2784	static DEFINE_TORTURE_RANDOM(trs);
2785
2786	pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id);
2787	if (!cur_ops->sync)
2788		return; // Cannot do need_resched() forward progress testing without ->sync.
2789	if (cur_ops->call && cur_ops->cb_barrier) {
2790		init_rcu_head_on_stack(&fcs.rh);
2791		selfpropcb = true;
2792	}
2793
2794	/* Tight loop containing cond_resched(). */
2795	atomic_inc(&rcu_fwd_cb_nodelay);
2796	cur_ops->sync(); /* Later readers see above write. */
2797	if  (selfpropcb) {
2798		WRITE_ONCE(fcs.stop, 0);
2799		cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb);
2800	}
2801	cver = READ_ONCE(rcu_torture_current_version);
2802	gps = cur_ops->get_gp_seq();
2803	sd = cur_ops->stall_dur() + 1;
2804	sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div;
2805	dur = sd4 + torture_random(&trs) % (sd - sd4);
2806	WRITE_ONCE(rfp->rcu_fwd_startat, jiffies);
2807	stopat = rfp->rcu_fwd_startat + dur;
2808	while (time_before(jiffies, stopat) &&
2809	       !shutdown_time_arrived() &&
2810	       !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
2811		idx = cur_ops->readlock();
2812		udelay(10);
2813		cur_ops->readunlock(idx);
2814		if (!fwd_progress_need_resched || need_resched())
2815			cond_resched();
2816	}
2817	(*tested_tries)++;
2818	if (!time_before(jiffies, stopat) &&
2819	    !shutdown_time_arrived() &&
2820	    !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
2821		(*tested)++;
2822		cver = READ_ONCE(rcu_torture_current_version) - cver;
2823		gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
2824		WARN_ON(!cver && gps < 2);
2825		pr_alert("%s: %d Duration %ld cver %ld gps %ld\n", __func__,
2826			 rfp->rcu_fwd_id, dur, cver, gps);
2827	}
2828	if (selfpropcb) {
2829		WRITE_ONCE(fcs.stop, 1);
2830		cur_ops->sync(); /* Wait for running CB to complete. */
2831		pr_alert("%s: Waiting for CBs: %pS() %d\n", __func__, cur_ops->cb_barrier, rfp->rcu_fwd_id);
2832		cur_ops->cb_barrier(); /* Wait for queued callbacks. */
2833	}
2834
2835	if (selfpropcb) {
2836		WARN_ON(READ_ONCE(fcs.stop) != 2);
2837		destroy_rcu_head_on_stack(&fcs.rh);
2838	}
2839	schedule_timeout_uninterruptible(HZ / 10); /* Let kthreads recover. */
2840	atomic_dec(&rcu_fwd_cb_nodelay);
2841}
2842
2843/* Carry out call_rcu() forward-progress testing. */
2844static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp)
2845{
2846	unsigned long cver;
2847	unsigned long flags;
2848	unsigned long gps;
2849	int i;
2850	long n_launders;
2851	long n_launders_cb_snap;
2852	long n_launders_sa;
2853	long n_max_cbs;
2854	long n_max_gps;
2855	struct rcu_fwd_cb *rfcp;
2856	struct rcu_fwd_cb *rfcpn;
2857	unsigned long stopat;
2858	unsigned long stoppedat;
2859
2860	pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id);
2861	if (READ_ONCE(rcu_fwd_emergency_stop))
2862		return; /* Get out of the way quickly, no GP wait! */
2863	if (!cur_ops->call)
2864		return; /* Can't do call_rcu() fwd prog without ->call. */
2865
2866	/* Loop continuously posting RCU callbacks. */
2867	atomic_inc(&rcu_fwd_cb_nodelay);
2868	cur_ops->sync(); /* Later readers see above write. */
2869	WRITE_ONCE(rfp->rcu_fwd_startat, jiffies);
2870	stopat = rfp->rcu_fwd_startat + MAX_FWD_CB_JIFFIES;
2871	n_launders = 0;
2872	rfp->n_launders_cb = 0; // Hoist initialization for multi-kthread
2873	n_launders_sa = 0;
2874	n_max_cbs = 0;
2875	n_max_gps = 0;
2876	for (i = 0; i < ARRAY_SIZE(rfp->n_launders_hist); i++)
2877		rfp->n_launders_hist[i].n_launders = 0;
2878	cver = READ_ONCE(rcu_torture_current_version);
2879	gps = cur_ops->get_gp_seq();
2880	rfp->rcu_launder_gp_seq_start = gps;
2881	tick_dep_set_task(current, TICK_DEP_BIT_RCU);
2882	while (time_before(jiffies, stopat) &&
2883	       !shutdown_time_arrived() &&
2884	       !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
2885		rfcp = READ_ONCE(rfp->rcu_fwd_cb_head);
2886		rfcpn = NULL;
2887		if (rfcp)
2888			rfcpn = READ_ONCE(rfcp->rfc_next);
2889		if (rfcpn) {
2890			if (rfcp->rfc_gps >= MIN_FWD_CB_LAUNDERS &&
2891			    ++n_max_gps >= MIN_FWD_CBS_LAUNDERED)
2892				break;
2893			rfp->rcu_fwd_cb_head = rfcpn;
2894			n_launders++;
2895			n_launders_sa++;
2896		} else if (!cur_ops->cbflood_max || cur_ops->cbflood_max > n_max_cbs) {
2897			rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL);
2898			if (WARN_ON_ONCE(!rfcp)) {
2899				schedule_timeout_interruptible(1);
2900				continue;
2901			}
2902			n_max_cbs++;
2903			n_launders_sa = 0;
2904			rfcp->rfc_gps = 0;
2905			rfcp->rfc_rfp = rfp;
2906		} else {
2907			rfcp = NULL;
2908		}
2909		if (rfcp)
2910			cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr);
2911		rcu_torture_fwd_prog_cond_resched(n_launders + n_max_cbs);
2912		if (tick_nohz_full_enabled()) {
2913			local_irq_save(flags);
2914			rcu_momentary_eqs();
2915			local_irq_restore(flags);
2916		}
2917	}
2918	stoppedat = jiffies;
2919	n_launders_cb_snap = READ_ONCE(rfp->n_launders_cb);
2920	cver = READ_ONCE(rcu_torture_current_version) - cver;
2921	gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
2922	pr_alert("%s: Waiting for CBs: %pS() %d\n", __func__, cur_ops->cb_barrier, rfp->rcu_fwd_id);
2923	cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */
2924	(void)rcu_torture_fwd_prog_cbfree(rfp);
2925
2926	if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop) &&
2927	    !shutdown_time_arrived()) {
2928		if (WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED) && cur_ops->gp_kthread_dbg)
2929			cur_ops->gp_kthread_dbg();
2930		pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld #online %u\n",
2931			 __func__,
2932			 stoppedat - rfp->rcu_fwd_startat, jiffies - stoppedat,
2933			 n_launders + n_max_cbs - n_launders_cb_snap,
2934			 n_launders, n_launders_sa,
2935			 n_max_gps, n_max_cbs, cver, gps, num_online_cpus());
2936		atomic_long_add(n_max_cbs, &rcu_fwd_max_cbs);
2937		mutex_lock(&rcu_fwd_mutex); // Serialize histograms.
2938		rcu_torture_fwd_cb_hist(rfp);
2939		mutex_unlock(&rcu_fwd_mutex);
2940	}
2941	schedule_timeout_uninterruptible(HZ); /* Let CBs drain. */
2942	tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
2943	atomic_dec(&rcu_fwd_cb_nodelay);
2944}
2945
2946
2947/*
2948 * OOM notifier, but this only prints diagnostic information for the
2949 * current forward-progress test.
2950 */
2951static int rcutorture_oom_notify(struct notifier_block *self,
2952				 unsigned long notused, void *nfreed)
2953{
2954	int i;
2955	long ncbs;
2956	struct rcu_fwd *rfp;
2957
2958	mutex_lock(&rcu_fwd_mutex);
2959	rfp = rcu_fwds;
2960	if (!rfp) {
2961		mutex_unlock(&rcu_fwd_mutex);
2962		return NOTIFY_OK;
2963	}
2964	WARN(1, "%s invoked upon OOM during forward-progress testing.\n",
2965	     __func__);
2966	for (i = 0; i < fwd_progress; i++) {
2967		rcu_torture_fwd_cb_hist(&rfp[i]);
2968		rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rfp[i].rcu_fwd_startat)) / 2);
2969	}
2970	WRITE_ONCE(rcu_fwd_emergency_stop, true);
2971	smp_mb(); /* Emergency stop before free and wait to avoid hangs. */
2972	ncbs = 0;
2973	for (i = 0; i < fwd_progress; i++)
2974		ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]);
2975	pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs);
2976	cur_ops->cb_barrier();
2977	ncbs = 0;
2978	for (i = 0; i < fwd_progress; i++)
2979		ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]);
2980	pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs);
2981	cur_ops->cb_barrier();
2982	ncbs = 0;
2983	for (i = 0; i < fwd_progress; i++)
2984		ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]);
2985	pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs);
2986	smp_mb(); /* Frees before return to avoid redoing OOM. */
2987	(*(unsigned long *)nfreed)++; /* Forward progress CBs freed! */
2988	pr_info("%s returning after OOM processing.\n", __func__);
2989	mutex_unlock(&rcu_fwd_mutex);
2990	return NOTIFY_OK;
2991}
2992
2993static struct notifier_block rcutorture_oom_nb = {
2994	.notifier_call = rcutorture_oom_notify
2995};
2996
2997/* Carry out grace-period forward-progress testing. */
2998static int rcu_torture_fwd_prog(void *args)
2999{
3000	bool firsttime = true;
3001	long max_cbs;
3002	int oldnice = task_nice(current);
3003	unsigned long oldseq = READ_ONCE(rcu_fwd_seq);
3004	struct rcu_fwd *rfp = args;
3005	int tested = 0;
3006	int tested_tries = 0;
3007
3008	VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started");
3009	rcu_bind_current_to_nocb();
3010	if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST))
3011		set_user_nice(current, MAX_NICE);
3012	do {
3013		if (!rfp->rcu_fwd_id) {
3014			schedule_timeout_interruptible(fwd_progress_holdoff * HZ);
3015			WRITE_ONCE(rcu_fwd_emergency_stop, false);
3016			if (!firsttime) {
3017				max_cbs = atomic_long_xchg(&rcu_fwd_max_cbs, 0);
3018				pr_alert("%s n_max_cbs: %ld\n", __func__, max_cbs);
3019			}
3020			firsttime = false;
3021			WRITE_ONCE(rcu_fwd_seq, rcu_fwd_seq + 1);
3022		} else {
3023			while (READ_ONCE(rcu_fwd_seq) == oldseq && !torture_must_stop())
3024				schedule_timeout_interruptible(HZ / 20);
3025			oldseq = READ_ONCE(rcu_fwd_seq);
3026		}
3027		pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id);
3028		if (rcu_inkernel_boot_has_ended() && torture_num_online_cpus() > rfp->rcu_fwd_id)
3029			rcu_torture_fwd_prog_cr(rfp);
3030		if ((cur_ops->stall_dur && cur_ops->stall_dur() > 0) &&
3031		    (!IS_ENABLED(CONFIG_TINY_RCU) ||
3032		     (rcu_inkernel_boot_has_ended() &&
3033		      torture_num_online_cpus() > rfp->rcu_fwd_id)))
3034			rcu_torture_fwd_prog_nr(rfp, &tested, &tested_tries);
3035
3036		/* Avoid slow periods, better to test when busy. */
3037		if (stutter_wait("rcu_torture_fwd_prog"))
3038			sched_set_normal(current, oldnice);
3039	} while (!torture_must_stop());
3040	/* Short runs might not contain a valid forward-progress attempt. */
3041	if (!rfp->rcu_fwd_id) {
3042		WARN_ON(!tested && tested_tries >= 5);
3043		pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries);
3044	}
3045	torture_kthread_stopping("rcu_torture_fwd_prog");
3046	return 0;
3047}
3048
3049/* If forward-progress checking is requested and feasible, spawn the thread. */
3050static int __init rcu_torture_fwd_prog_init(void)
3051{
3052	int i;
3053	int ret = 0;
3054	struct rcu_fwd *rfp;
3055
3056	if (!fwd_progress)
3057		return 0; /* Not requested, so don't do it. */
3058	if (fwd_progress >= nr_cpu_ids) {
3059		VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Limiting fwd_progress to # CPUs.\n");
3060		fwd_progress = nr_cpu_ids;
3061	} else if (fwd_progress < 0) {
3062		fwd_progress = nr_cpu_ids;
3063	}
3064	if ((!cur_ops->sync && !cur_ops->call) ||
3065	    (!cur_ops->cbflood_max && (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0)) ||
3066	    cur_ops == &rcu_busted_ops) {
3067		VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test");
3068		fwd_progress = 0;
3069		return 0;
3070	}
3071	if (stall_cpu > 0) {
3072		VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall testing");
3073		fwd_progress = 0;
3074		if (IS_MODULE(CONFIG_RCU_TORTURE_TEST))
3075			return -EINVAL; /* In module, can fail back to user. */
3076		WARN_ON(1); /* Make sure rcutorture notices conflict. */
3077		return 0;
3078	}
3079	if (fwd_progress_holdoff <= 0)
3080		fwd_progress_holdoff = 1;
3081	if (fwd_progress_div <= 0)
3082		fwd_progress_div = 4;
3083	rfp = kcalloc(fwd_progress, sizeof(*rfp), GFP_KERNEL);
3084	fwd_prog_tasks = kcalloc(fwd_progress, sizeof(*fwd_prog_tasks), GFP_KERNEL);
3085	if (!rfp || !fwd_prog_tasks) {
3086		kfree(rfp);
3087		kfree(fwd_prog_tasks);
3088		fwd_prog_tasks = NULL;
3089		fwd_progress = 0;
3090		return -ENOMEM;
3091	}
3092	for (i = 0; i < fwd_progress; i++) {
3093		spin_lock_init(&rfp[i].rcu_fwd_lock);
3094		rfp[i].rcu_fwd_cb_tail = &rfp[i].rcu_fwd_cb_head;
3095		rfp[i].rcu_fwd_id = i;
3096	}
3097	mutex_lock(&rcu_fwd_mutex);
3098	rcu_fwds = rfp;
3099	mutex_unlock(&rcu_fwd_mutex);
3100	register_oom_notifier(&rcutorture_oom_nb);
3101	for (i = 0; i < fwd_progress; i++) {
3102		ret = torture_create_kthread(rcu_torture_fwd_prog, &rcu_fwds[i], fwd_prog_tasks[i]);
3103		if (ret) {
3104			fwd_progress = i;
3105			return ret;
3106		}
3107	}
3108	return 0;
3109}
3110
3111static void rcu_torture_fwd_prog_cleanup(void)
3112{
3113	int i;
3114	struct rcu_fwd *rfp;
3115
3116	if (!rcu_fwds || !fwd_prog_tasks)
3117		return;
3118	for (i = 0; i < fwd_progress; i++)
3119		torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_tasks[i]);
3120	unregister_oom_notifier(&rcutorture_oom_nb);
3121	mutex_lock(&rcu_fwd_mutex);
3122	rfp = rcu_fwds;
3123	rcu_fwds = NULL;
3124	mutex_unlock(&rcu_fwd_mutex);
3125	kfree(rfp);
3126	kfree(fwd_prog_tasks);
3127	fwd_prog_tasks = NULL;
3128}
3129
3130/* Callback function for RCU barrier testing. */
3131static void rcu_torture_barrier_cbf(struct rcu_head *rcu)
3132{
3133	atomic_inc(&barrier_cbs_invoked);
3134}
3135
3136/* IPI handler to get callback posted on desired CPU, if online. */
3137static int rcu_torture_barrier1cb(void *rcu_void)
3138{
3139	struct rcu_head *rhp = rcu_void;
3140
3141	cur_ops->call(rhp, rcu_torture_barrier_cbf);
3142	return 0;
3143}
3144
3145/* kthread function to register callbacks used to test RCU barriers. */
3146static int rcu_torture_barrier_cbs(void *arg)
3147{
3148	long myid = (long)arg;
3149	bool lastphase = false;
3150	bool newphase;
3151	struct rcu_head rcu;
3152
3153	init_rcu_head_on_stack(&rcu);
3154	VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started");
3155	set_user_nice(current, MAX_NICE);
3156	do {
3157		wait_event(barrier_cbs_wq[myid],
3158			   (newphase =
3159			    smp_load_acquire(&barrier_phase)) != lastphase ||
3160			   torture_must_stop());
3161		lastphase = newphase;
3162		if (torture_must_stop())
3163			break;
3164		/*
3165		 * The above smp_load_acquire() ensures barrier_phase load
3166		 * is ordered before the following ->call().
3167		 */
3168		if (smp_call_on_cpu(myid, rcu_torture_barrier1cb, &rcu, 1))
 
 
3169			cur_ops->call(&rcu, rcu_torture_barrier_cbf);
3170
3171		if (atomic_dec_and_test(&barrier_cbs_count))
3172			wake_up(&barrier_wq);
3173	} while (!torture_must_stop());
3174	if (cur_ops->cb_barrier != NULL)
3175		cur_ops->cb_barrier();
3176	destroy_rcu_head_on_stack(&rcu);
3177	torture_kthread_stopping("rcu_torture_barrier_cbs");
3178	return 0;
3179}
3180
3181/* kthread function to drive and coordinate RCU barrier testing. */
3182static int rcu_torture_barrier(void *arg)
3183{
3184	int i;
3185
3186	VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting");
3187	do {
3188		atomic_set(&barrier_cbs_invoked, 0);
3189		atomic_set(&barrier_cbs_count, n_barrier_cbs);
3190		/* Ensure barrier_phase ordered after prior assignments. */
3191		smp_store_release(&barrier_phase, !barrier_phase);
3192		for (i = 0; i < n_barrier_cbs; i++)
3193			wake_up(&barrier_cbs_wq[i]);
3194		wait_event(barrier_wq,
3195			   atomic_read(&barrier_cbs_count) == 0 ||
3196			   torture_must_stop());
3197		if (torture_must_stop())
3198			break;
3199		n_barrier_attempts++;
3200		cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */
3201		if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) {
3202			n_rcu_torture_barrier_error++;
3203			pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n",
3204			       atomic_read(&barrier_cbs_invoked),
3205			       n_barrier_cbs);
3206			WARN_ON(1);
3207			// Wait manually for the remaining callbacks
3208			i = 0;
3209			do {
3210				if (WARN_ON(i++ > HZ))
3211					i = INT_MIN;
3212				schedule_timeout_interruptible(1);
3213				cur_ops->cb_barrier();
3214			} while (atomic_read(&barrier_cbs_invoked) !=
3215				 n_barrier_cbs &&
3216				 !torture_must_stop());
3217			smp_mb(); // Can't trust ordering if broken.
3218			if (!torture_must_stop())
3219				pr_err("Recovered: barrier_cbs_invoked = %d\n",
3220				       atomic_read(&barrier_cbs_invoked));
3221		} else {
3222			n_barrier_successes++;
3223		}
3224		schedule_timeout_interruptible(HZ / 10);
3225	} while (!torture_must_stop());
3226	torture_kthread_stopping("rcu_torture_barrier");
3227	return 0;
3228}
3229
3230/* Initialize RCU barrier testing. */
3231static int rcu_torture_barrier_init(void)
3232{
3233	int i;
3234	int ret;
3235
3236	if (n_barrier_cbs <= 0)
3237		return 0;
3238	if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) {
3239		pr_alert("%s" TORTURE_FLAG
3240			 " Call or barrier ops missing for %s,\n",
3241			 torture_type, cur_ops->name);
3242		pr_alert("%s" TORTURE_FLAG
3243			 " RCU barrier testing omitted from run.\n",
3244			 torture_type);
3245		return 0;
3246	}
3247	atomic_set(&barrier_cbs_count, 0);
3248	atomic_set(&barrier_cbs_invoked, 0);
3249	barrier_cbs_tasks =
3250		kcalloc(n_barrier_cbs, sizeof(barrier_cbs_tasks[0]),
3251			GFP_KERNEL);
3252	barrier_cbs_wq =
3253		kcalloc(n_barrier_cbs, sizeof(barrier_cbs_wq[0]), GFP_KERNEL);
3254	if (barrier_cbs_tasks == NULL || !barrier_cbs_wq)
3255		return -ENOMEM;
3256	for (i = 0; i < n_barrier_cbs; i++) {
3257		init_waitqueue_head(&barrier_cbs_wq[i]);
3258		ret = torture_create_kthread(rcu_torture_barrier_cbs,
3259					     (void *)(long)i,
3260					     barrier_cbs_tasks[i]);
3261		if (ret)
3262			return ret;
3263	}
3264	return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task);
3265}
3266
3267/* Clean up after RCU barrier testing. */
3268static void rcu_torture_barrier_cleanup(void)
3269{
3270	int i;
3271
3272	torture_stop_kthread(rcu_torture_barrier, barrier_task);
3273	if (barrier_cbs_tasks != NULL) {
3274		for (i = 0; i < n_barrier_cbs; i++)
3275			torture_stop_kthread(rcu_torture_barrier_cbs,
3276					     barrier_cbs_tasks[i]);
3277		kfree(barrier_cbs_tasks);
3278		barrier_cbs_tasks = NULL;
3279	}
3280	if (barrier_cbs_wq != NULL) {
3281		kfree(barrier_cbs_wq);
3282		barrier_cbs_wq = NULL;
3283	}
3284}
3285
3286static bool rcu_torture_can_boost(void)
3287{
3288	static int boost_warn_once;
3289	int prio;
3290
3291	if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2)
3292		return false;
3293	if (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state)
3294		return false;
3295
3296	prio = rcu_get_gp_kthreads_prio();
3297	if (!prio)
3298		return false;
3299
3300	if (prio < 2) {
3301		if (boost_warn_once == 1)
3302			return false;
3303
3304		pr_alert("%s: WARN: RCU kthread priority too low to test boosting.  Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME);
3305		boost_warn_once = 1;
3306		return false;
3307	}
3308
3309	return true;
3310}
3311
3312static bool read_exit_child_stop;
3313static bool read_exit_child_stopped;
3314static wait_queue_head_t read_exit_wq;
3315
3316// Child kthread which just does an rcutorture reader and exits.
3317static int rcu_torture_read_exit_child(void *trsp_in)
3318{
3319	struct torture_random_state *trsp = trsp_in;
3320
3321	set_user_nice(current, MAX_NICE);
3322	// Minimize time between reading and exiting.
3323	while (!kthread_should_stop())
3324		schedule_timeout_uninterruptible(HZ / 20);
3325	(void)rcu_torture_one_read(trsp, -1);
3326	return 0;
3327}
3328
3329// Parent kthread which creates and destroys read-exit child kthreads.
3330static int rcu_torture_read_exit(void *unused)
3331{
3332	bool errexit = false;
3333	int i;
3334	struct task_struct *tsp;
3335	DEFINE_TORTURE_RANDOM(trs);
3336
3337	// Allocate and initialize.
3338	set_user_nice(current, MAX_NICE);
3339	VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of test");
3340
3341	// Each pass through this loop does one read-exit episode.
3342	do {
3343		VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of episode");
3344		for (i = 0; i < read_exit_burst; i++) {
3345			if (READ_ONCE(read_exit_child_stop))
3346				break;
3347			stutter_wait("rcu_torture_read_exit");
3348			// Spawn child.
3349			tsp = kthread_run(rcu_torture_read_exit_child,
3350					  &trs, "%s", "rcu_torture_read_exit_child");
3351			if (IS_ERR(tsp)) {
3352				TOROUT_ERRSTRING("out of memory");
3353				errexit = true;
3354				break;
3355			}
3356			cond_resched();
3357			kthread_stop(tsp);
3358			n_read_exits++;
3359		}
3360		VERBOSE_TOROUT_STRING("rcu_torture_read_exit: End of episode");
3361		rcu_barrier(); // Wait for task_struct free, avoid OOM.
3362		i = 0;
3363		for (; !errexit && !READ_ONCE(read_exit_child_stop) && i < read_exit_delay; i++)
3364			schedule_timeout_uninterruptible(HZ);
3365	} while (!errexit && !READ_ONCE(read_exit_child_stop));
3366
3367	// Clean up and exit.
3368	smp_store_release(&read_exit_child_stopped, true); // After reaping.
3369	smp_mb(); // Store before wakeup.
3370	wake_up(&read_exit_wq);
3371	while (!torture_must_stop())
3372		schedule_timeout_uninterruptible(HZ / 20);
3373	torture_kthread_stopping("rcu_torture_read_exit");
3374	return 0;
3375}
3376
3377static int rcu_torture_read_exit_init(void)
3378{
3379	if (read_exit_burst <= 0)
3380		return 0;
3381	init_waitqueue_head(&read_exit_wq);
3382	read_exit_child_stop = false;
3383	read_exit_child_stopped = false;
3384	return torture_create_kthread(rcu_torture_read_exit, NULL,
3385				      read_exit_task);
3386}
3387
3388static void rcu_torture_read_exit_cleanup(void)
3389{
3390	if (!read_exit_task)
3391		return;
3392	WRITE_ONCE(read_exit_child_stop, true);
3393	smp_mb(); // Above write before wait.
3394	wait_event(read_exit_wq, smp_load_acquire(&read_exit_child_stopped));
3395	torture_stop_kthread(rcutorture_read_exit, read_exit_task);
3396}
3397
3398static void rcutorture_test_nmis(int n)
3399{
3400#if IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)
3401	int cpu;
3402	int dumpcpu;
3403	int i;
3404
3405	for (i = 0; i < n; i++) {
3406		preempt_disable();
3407		cpu = smp_processor_id();
3408		dumpcpu = cpu + 1;
3409		if (dumpcpu >= nr_cpu_ids)
3410			dumpcpu = 0;
3411		pr_alert("%s: CPU %d invoking dump_cpu_task(%d)\n", __func__, cpu, dumpcpu);
3412		dump_cpu_task(dumpcpu);
3413		preempt_enable();
3414		schedule_timeout_uninterruptible(15 * HZ);
3415	}
3416#else // #if IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)
3417	WARN_ONCE(n, "Non-zero rcutorture.test_nmis=%d permitted only when rcutorture is built in.\n", test_nmis);
3418#endif // #else // #if IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)
3419}
3420
3421static enum cpuhp_state rcutor_hp;
3422
3423static void
3424rcu_torture_cleanup(void)
3425{
3426	int firsttime;
3427	int flags = 0;
3428	unsigned long gp_seq = 0;
3429	int i;
3430
3431	if (torture_cleanup_begin()) {
3432		if (cur_ops->cb_barrier != NULL) {
3433			pr_info("%s: Invoking %pS().\n", __func__, cur_ops->cb_barrier);
3434			cur_ops->cb_barrier();
3435		}
3436		if (cur_ops->gp_slow_unregister)
3437			cur_ops->gp_slow_unregister(NULL);
3438		return;
3439	}
3440	if (!cur_ops) {
3441		torture_cleanup_end();
 
3442		return;
3443	}
3444
3445	rcutorture_test_nmis(test_nmis);
3446
3447	if (cur_ops->gp_kthread_dbg)
3448		cur_ops->gp_kthread_dbg();
3449	rcu_torture_read_exit_cleanup();
3450	rcu_torture_barrier_cleanup();
3451	rcu_torture_fwd_prog_cleanup();
3452	torture_stop_kthread(rcu_torture_stall, stall_task);
3453	torture_stop_kthread(rcu_torture_writer, writer_task);
3454
3455	if (nocb_tasks) {
3456		for (i = 0; i < nrealnocbers; i++)
3457			torture_stop_kthread(rcu_nocb_toggle, nocb_tasks[i]);
3458		kfree(nocb_tasks);
3459		nocb_tasks = NULL;
3460	}
3461
3462	if (reader_tasks) {
3463		for (i = 0; i < nrealreaders; i++)
3464			torture_stop_kthread(rcu_torture_reader,
3465					     reader_tasks[i]);
3466		kfree(reader_tasks);
3467		reader_tasks = NULL;
3468	}
3469	kfree(rcu_torture_reader_mbchk);
3470	rcu_torture_reader_mbchk = NULL;
3471
3472	if (fakewriter_tasks) {
3473		for (i = 0; i < nfakewriters; i++)
3474			torture_stop_kthread(rcu_torture_fakewriter,
3475					     fakewriter_tasks[i]);
3476		kfree(fakewriter_tasks);
3477		fakewriter_tasks = NULL;
3478	}
3479
3480	if (cur_ops->get_gp_data)
3481		cur_ops->get_gp_data(&flags, &gp_seq);
3482	pr_alert("%s:  End-test grace-period state: g%ld f%#x total-gps=%ld\n",
3483		 cur_ops->name, (long)gp_seq, flags,
3484		 rcutorture_seq_diff(gp_seq, start_gp_seq));
3485	torture_stop_kthread(rcu_torture_stats, stats_task);
3486	torture_stop_kthread(rcu_torture_fqs, fqs_task);
3487	if (rcu_torture_can_boost() && rcutor_hp >= 0)
3488		cpuhp_remove_state(rcutor_hp);
3489
3490	/*
3491	 * Wait for all RCU callbacks to fire, then do torture-type-specific
3492	 * cleanup operations.
3493	 */
3494	if (cur_ops->cb_barrier != NULL) {
3495		pr_info("%s: Invoking %pS().\n", __func__, cur_ops->cb_barrier);
3496		cur_ops->cb_barrier();
3497	}
3498	if (cur_ops->cleanup != NULL)
3499		cur_ops->cleanup();
3500
3501	rcu_torture_mem_dump_obj();
3502
3503	rcu_torture_stats_print();  /* -After- the stats thread is stopped! */
3504
3505	if (err_segs_recorded) {
3506		pr_alert("Failure/close-call rcutorture reader segments:\n");
3507		if (rt_read_nsegs == 0)
3508			pr_alert("\t: No segments recorded!!!\n");
3509		firsttime = 1;
3510		for (i = 0; i < rt_read_nsegs; i++) {
3511			pr_alert("\t%d: %#x ", i, err_segs[i].rt_readstate);
3512			if (err_segs[i].rt_delay_jiffies != 0) {
3513				pr_cont("%s%ldjiffies", firsttime ? "" : "+",
3514					err_segs[i].rt_delay_jiffies);
3515				firsttime = 0;
3516			}
3517			if (err_segs[i].rt_delay_ms != 0) {
3518				pr_cont("%s%ldms", firsttime ? "" : "+",
3519					err_segs[i].rt_delay_ms);
3520				firsttime = 0;
3521			}
3522			if (err_segs[i].rt_delay_us != 0) {
3523				pr_cont("%s%ldus", firsttime ? "" : "+",
3524					err_segs[i].rt_delay_us);
3525				firsttime = 0;
3526			}
3527			pr_cont("%s\n",
3528				err_segs[i].rt_preempted ? "preempted" : "");
3529
3530		}
3531	}
3532	if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
3533		rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
3534	else if (torture_onoff_failures())
3535		rcu_torture_print_module_parms(cur_ops,
3536					       "End of test: RCU_HOTPLUG");
3537	else
3538		rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
3539	torture_cleanup_end();
3540	if (cur_ops->gp_slow_unregister)
3541		cur_ops->gp_slow_unregister(NULL);
3542}
3543
 
3544static void rcu_torture_leak_cb(struct rcu_head *rhp)
3545{
3546}
3547
3548static void rcu_torture_err_cb(struct rcu_head *rhp)
3549{
3550	/*
3551	 * This -might- happen due to race conditions, but is unlikely.
3552	 * The scenario that leads to this happening is that the
3553	 * first of the pair of duplicate callbacks is queued,
3554	 * someone else starts a grace period that includes that
3555	 * callback, then the second of the pair must wait for the
3556	 * next grace period.  Unlikely, but can happen.  If it
3557	 * does happen, the debug-objects subsystem won't have splatted.
3558	 */
3559	pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME);
3560}
 
3561
3562/*
3563 * Verify that double-free causes debug-objects to complain, but only
3564 * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y.  Otherwise, say that the test
3565 * cannot be carried out.
3566 */
3567static void rcu_test_debug_objects(void)
3568{
 
3569	struct rcu_head rh1;
3570	struct rcu_head rh2;
3571	int idx;
3572
3573	if (!IS_ENABLED(CONFIG_DEBUG_OBJECTS_RCU_HEAD)) {
3574		pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_%s()\n",
3575					KBUILD_MODNAME, cur_ops->name);
3576		return;
3577	}
3578
3579	if (WARN_ON_ONCE(cur_ops->debug_objects &&
3580			(!cur_ops->call || !cur_ops->cb_barrier)))
3581		return;
3582
3583	struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
3584
3585	init_rcu_head_on_stack(&rh1);
3586	init_rcu_head_on_stack(&rh2);
3587	pr_alert("%s: WARN: Duplicate call_%s() test starting.\n", KBUILD_MODNAME, cur_ops->name);
3588
3589	/* Try to queue the rh2 pair of callbacks for the same grace period. */
3590	idx = cur_ops->readlock(); /* Make it impossible to finish a grace period. */
3591	cur_ops->call(&rh1, rcu_torture_leak_cb); /* Start grace period. */
3592	cur_ops->call(&rh2, rcu_torture_leak_cb);
3593	cur_ops->call(&rh2, rcu_torture_err_cb); /* Duplicate callback. */
 
 
3594	if (rhp) {
3595		cur_ops->call(rhp, rcu_torture_leak_cb);
3596		cur_ops->call(rhp, rcu_torture_err_cb); /* Another duplicate callback. */
3597	}
3598	cur_ops->readunlock(idx);
 
 
3599
3600	/* Wait for them all to get done so we can safely return. */
3601	cur_ops->cb_barrier();
3602	pr_alert("%s: WARN: Duplicate call_%s() test complete.\n", KBUILD_MODNAME, cur_ops->name);
3603	destroy_rcu_head_on_stack(&rh1);
3604	destroy_rcu_head_on_stack(&rh2);
3605	kfree(rhp);
 
 
 
3606}
3607
3608static void rcutorture_sync(void)
3609{
3610	static unsigned long n;
3611
3612	if (cur_ops->sync && !(++n & 0xfff))
3613		cur_ops->sync();
3614}
3615
3616static DEFINE_MUTEX(mut0);
3617static DEFINE_MUTEX(mut1);
3618static DEFINE_MUTEX(mut2);
3619static DEFINE_MUTEX(mut3);
3620static DEFINE_MUTEX(mut4);
3621static DEFINE_MUTEX(mut5);
3622static DEFINE_MUTEX(mut6);
3623static DEFINE_MUTEX(mut7);
3624static DEFINE_MUTEX(mut8);
3625static DEFINE_MUTEX(mut9);
3626
3627static DECLARE_RWSEM(rwsem0);
3628static DECLARE_RWSEM(rwsem1);
3629static DECLARE_RWSEM(rwsem2);
3630static DECLARE_RWSEM(rwsem3);
3631static DECLARE_RWSEM(rwsem4);
3632static DECLARE_RWSEM(rwsem5);
3633static DECLARE_RWSEM(rwsem6);
3634static DECLARE_RWSEM(rwsem7);
3635static DECLARE_RWSEM(rwsem8);
3636static DECLARE_RWSEM(rwsem9);
3637
3638DEFINE_STATIC_SRCU(srcu0);
3639DEFINE_STATIC_SRCU(srcu1);
3640DEFINE_STATIC_SRCU(srcu2);
3641DEFINE_STATIC_SRCU(srcu3);
3642DEFINE_STATIC_SRCU(srcu4);
3643DEFINE_STATIC_SRCU(srcu5);
3644DEFINE_STATIC_SRCU(srcu6);
3645DEFINE_STATIC_SRCU(srcu7);
3646DEFINE_STATIC_SRCU(srcu8);
3647DEFINE_STATIC_SRCU(srcu9);
3648
3649static int srcu_lockdep_next(const char *f, const char *fl, const char *fs, const char *fu, int i,
3650			     int cyclelen, int deadlock)
3651{
3652	int j = i + 1;
3653
3654	if (j >= cyclelen)
3655		j = deadlock ? 0 : -1;
3656	if (j >= 0)
3657		pr_info("%s: %s(%d), %s(%d), %s(%d)\n", f, fl, i, fs, j, fu, i);
3658	else
3659		pr_info("%s: %s(%d), %s(%d)\n", f, fl, i, fu, i);
3660	return j;
3661}
3662
3663// Test lockdep on SRCU-based deadlock scenarios.
3664static void rcu_torture_init_srcu_lockdep(void)
3665{
3666	int cyclelen;
3667	int deadlock;
3668	bool err = false;
3669	int i;
3670	int j;
3671	int idx;
3672	struct mutex *muts[] = { &mut0, &mut1, &mut2, &mut3, &mut4,
3673				 &mut5, &mut6, &mut7, &mut8, &mut9 };
3674	struct rw_semaphore *rwsems[] = { &rwsem0, &rwsem1, &rwsem2, &rwsem3, &rwsem4,
3675					  &rwsem5, &rwsem6, &rwsem7, &rwsem8, &rwsem9 };
3676	struct srcu_struct *srcus[] = { &srcu0, &srcu1, &srcu2, &srcu3, &srcu4,
3677					&srcu5, &srcu6, &srcu7, &srcu8, &srcu9 };
3678	int testtype;
3679
3680	if (!test_srcu_lockdep)
3681		return;
3682
3683	deadlock = test_srcu_lockdep / 1000;
3684	testtype = (test_srcu_lockdep / 10) % 100;
3685	cyclelen = test_srcu_lockdep % 10;
3686	WARN_ON_ONCE(ARRAY_SIZE(muts) != ARRAY_SIZE(srcus));
3687	if (WARN_ONCE(deadlock != !!deadlock,
3688		      "%s: test_srcu_lockdep=%d and deadlock digit %d must be zero or one.\n",
3689		      __func__, test_srcu_lockdep, deadlock))
3690		err = true;
3691	if (WARN_ONCE(cyclelen <= 0,
3692		      "%s: test_srcu_lockdep=%d and cycle-length digit %d must be greater than zero.\n",
3693		      __func__, test_srcu_lockdep, cyclelen))
3694		err = true;
3695	if (err)
3696		goto err_out;
3697
3698	if (testtype == 0) {
3699		pr_info("%s: test_srcu_lockdep = %05d: SRCU %d-way %sdeadlock.\n",
3700			__func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-");
3701		if (deadlock && cyclelen == 1)
3702			pr_info("%s: Expect hang.\n", __func__);
3703		for (i = 0; i < cyclelen; i++) {
3704			j = srcu_lockdep_next(__func__, "srcu_read_lock", "synchronize_srcu",
3705					      "srcu_read_unlock", i, cyclelen, deadlock);
3706			idx = srcu_read_lock(srcus[i]);
3707			if (j >= 0)
3708				synchronize_srcu(srcus[j]);
3709			srcu_read_unlock(srcus[i], idx);
3710		}
3711		return;
3712	}
3713
3714	if (testtype == 1) {
3715		pr_info("%s: test_srcu_lockdep = %05d: SRCU/mutex %d-way %sdeadlock.\n",
3716			__func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-");
3717		for (i = 0; i < cyclelen; i++) {
3718			pr_info("%s: srcu_read_lock(%d), mutex_lock(%d), mutex_unlock(%d), srcu_read_unlock(%d)\n",
3719				__func__, i, i, i, i);
3720			idx = srcu_read_lock(srcus[i]);
3721			mutex_lock(muts[i]);
3722			mutex_unlock(muts[i]);
3723			srcu_read_unlock(srcus[i], idx);
3724
3725			j = srcu_lockdep_next(__func__, "mutex_lock", "synchronize_srcu",
3726					      "mutex_unlock", i, cyclelen, deadlock);
3727			mutex_lock(muts[i]);
3728			if (j >= 0)
3729				synchronize_srcu(srcus[j]);
3730			mutex_unlock(muts[i]);
3731		}
3732		return;
3733	}
3734
3735	if (testtype == 2) {
3736		pr_info("%s: test_srcu_lockdep = %05d: SRCU/rwsem %d-way %sdeadlock.\n",
3737			__func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-");
3738		for (i = 0; i < cyclelen; i++) {
3739			pr_info("%s: srcu_read_lock(%d), down_read(%d), up_read(%d), srcu_read_unlock(%d)\n",
3740				__func__, i, i, i, i);
3741			idx = srcu_read_lock(srcus[i]);
3742			down_read(rwsems[i]);
3743			up_read(rwsems[i]);
3744			srcu_read_unlock(srcus[i], idx);
3745
3746			j = srcu_lockdep_next(__func__, "down_write", "synchronize_srcu",
3747					      "up_write", i, cyclelen, deadlock);
3748			down_write(rwsems[i]);
3749			if (j >= 0)
3750				synchronize_srcu(srcus[j]);
3751			up_write(rwsems[i]);
3752		}
3753		return;
3754	}
3755
3756#ifdef CONFIG_TASKS_TRACE_RCU
3757	if (testtype == 3) {
3758		pr_info("%s: test_srcu_lockdep = %05d: SRCU and Tasks Trace RCU %d-way %sdeadlock.\n",
3759			__func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-");
3760		if (deadlock && cyclelen == 1)
3761			pr_info("%s: Expect hang.\n", __func__);
3762		for (i = 0; i < cyclelen; i++) {
3763			char *fl = i == 0 ? "rcu_read_lock_trace" : "srcu_read_lock";
3764			char *fs = i == cyclelen - 1 ? "synchronize_rcu_tasks_trace"
3765						     : "synchronize_srcu";
3766			char *fu = i == 0 ? "rcu_read_unlock_trace" : "srcu_read_unlock";
3767
3768			j = srcu_lockdep_next(__func__, fl, fs, fu, i, cyclelen, deadlock);
3769			if (i == 0)
3770				rcu_read_lock_trace();
3771			else
3772				idx = srcu_read_lock(srcus[i]);
3773			if (j >= 0) {
3774				if (i == cyclelen - 1)
3775					synchronize_rcu_tasks_trace();
3776				else
3777					synchronize_srcu(srcus[j]);
3778			}
3779			if (i == 0)
3780				rcu_read_unlock_trace();
3781			else
3782				srcu_read_unlock(srcus[i], idx);
3783		}
3784		return;
3785	}
3786#endif // #ifdef CONFIG_TASKS_TRACE_RCU
3787
3788err_out:
3789	pr_info("%s: test_srcu_lockdep = %05d does nothing.\n", __func__, test_srcu_lockdep);
3790	pr_info("%s: test_srcu_lockdep = DNNL.\n", __func__);
3791	pr_info("%s: D: Deadlock if nonzero.\n", __func__);
3792	pr_info("%s: NN: Test number, 0=SRCU, 1=SRCU/mutex, 2=SRCU/rwsem, 3=SRCU/Tasks Trace RCU.\n", __func__);
3793	pr_info("%s: L: Cycle length.\n", __func__);
3794	if (!IS_ENABLED(CONFIG_TASKS_TRACE_RCU))
3795		pr_info("%s: NN=3 disallowed because kernel is built with CONFIG_TASKS_TRACE_RCU=n\n", __func__);
3796}
3797
3798static int __init
3799rcu_torture_init(void)
3800{
3801	long i;
3802	int cpu;
3803	int firsterr = 0;
3804	int flags = 0;
3805	unsigned long gp_seq = 0;
3806	static struct rcu_torture_ops *torture_ops[] = {
3807		&rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops, &busted_srcud_ops,
3808		TASKS_OPS TASKS_RUDE_OPS TASKS_TRACING_OPS
3809		&trivial_ops,
3810	};
3811
3812	if (!torture_init_begin(torture_type, verbose))
3813		return -EBUSY;
3814
3815	/* Process args and tell the world that the torturer is on the job. */
3816	for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
3817		cur_ops = torture_ops[i];
3818		if (strcmp(torture_type, cur_ops->name) == 0)
3819			break;
3820	}
3821	if (i == ARRAY_SIZE(torture_ops)) {
3822		pr_alert("rcu-torture: invalid torture type: \"%s\"\n",
3823			 torture_type);
3824		pr_alert("rcu-torture types:");
3825		for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
3826			pr_cont(" %s", torture_ops[i]->name);
3827		pr_cont("\n");
3828		firsterr = -EINVAL;
3829		cur_ops = NULL;
3830		goto unwind;
3831	}
3832	if (cur_ops->fqs == NULL && fqs_duration != 0) {
3833		pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n");
3834		fqs_duration = 0;
3835	}
3836	if (nocbs_nthreads != 0 && (cur_ops != &rcu_ops ||
3837				    !IS_ENABLED(CONFIG_RCU_NOCB_CPU))) {
3838		pr_alert("rcu-torture types: %s and CONFIG_RCU_NOCB_CPU=%d, nocb toggle disabled.\n",
3839			 cur_ops->name, IS_ENABLED(CONFIG_RCU_NOCB_CPU));
3840		nocbs_nthreads = 0;
3841	}
3842	if (cur_ops->init)
3843		cur_ops->init();
3844
3845	rcu_torture_init_srcu_lockdep();
3846
3847	if (nreaders >= 0) {
3848		nrealreaders = nreaders;
3849	} else {
3850		nrealreaders = num_online_cpus() - 2 - nreaders;
3851		if (nrealreaders <= 0)
3852			nrealreaders = 1;
3853	}
3854	rcu_torture_print_module_parms(cur_ops, "Start of test");
3855	if (cur_ops->get_gp_data)
3856		cur_ops->get_gp_data(&flags, &gp_seq);
3857	start_gp_seq = gp_seq;
3858	pr_alert("%s:  Start-test grace-period state: g%ld f%#x\n",
3859		 cur_ops->name, (long)gp_seq, flags);
3860
3861	/* Set up the freelist. */
3862
3863	INIT_LIST_HEAD(&rcu_torture_freelist);
3864	for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) {
3865		rcu_tortures[i].rtort_mbtest = 0;
3866		list_add_tail(&rcu_tortures[i].rtort_free,
3867			      &rcu_torture_freelist);
3868	}
3869
3870	/* Initialize the statistics so that each run gets its own numbers. */
3871
3872	rcu_torture_current = NULL;
3873	rcu_torture_current_version = 0;
3874	atomic_set(&n_rcu_torture_alloc, 0);
3875	atomic_set(&n_rcu_torture_alloc_fail, 0);
3876	atomic_set(&n_rcu_torture_free, 0);
3877	atomic_set(&n_rcu_torture_mberror, 0);
3878	atomic_set(&n_rcu_torture_mbchk_fail, 0);
3879	atomic_set(&n_rcu_torture_mbchk_tries, 0);
3880	atomic_set(&n_rcu_torture_error, 0);
3881	n_rcu_torture_barrier_error = 0;
3882	n_rcu_torture_boost_ktrerror = 0;
 
3883	n_rcu_torture_boost_failure = 0;
3884	n_rcu_torture_boosts = 0;
3885	for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
3886		atomic_set(&rcu_torture_wcount[i], 0);
3887	for_each_possible_cpu(cpu) {
3888		for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
3889			per_cpu(rcu_torture_count, cpu)[i] = 0;
3890			per_cpu(rcu_torture_batch, cpu)[i] = 0;
3891		}
3892	}
3893	err_segs_recorded = 0;
3894	rt_read_nsegs = 0;
3895
3896	/* Start up the kthreads. */
3897
3898	rcu_torture_write_types();
3899	firsterr = torture_create_kthread(rcu_torture_writer, NULL,
3900					  writer_task);
3901	if (torture_init_error(firsterr))
3902		goto unwind;
3903	if (nfakewriters > 0) {
3904		fakewriter_tasks = kcalloc(nfakewriters,
3905					   sizeof(fakewriter_tasks[0]),
3906					   GFP_KERNEL);
3907		if (fakewriter_tasks == NULL) {
3908			TOROUT_ERRSTRING("out of memory");
3909			firsterr = -ENOMEM;
3910			goto unwind;
3911		}
3912	}
3913	for (i = 0; i < nfakewriters; i++) {
3914		firsterr = torture_create_kthread(rcu_torture_fakewriter,
3915						  NULL, fakewriter_tasks[i]);
3916		if (torture_init_error(firsterr))
3917			goto unwind;
3918	}
3919	reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]),
3920			       GFP_KERNEL);
3921	rcu_torture_reader_mbchk = kcalloc(nrealreaders, sizeof(*rcu_torture_reader_mbchk),
3922					   GFP_KERNEL);
3923	if (!reader_tasks || !rcu_torture_reader_mbchk) {
3924		TOROUT_ERRSTRING("out of memory");
3925		firsterr = -ENOMEM;
3926		goto unwind;
3927	}
3928	for (i = 0; i < nrealreaders; i++) {
3929		rcu_torture_reader_mbchk[i].rtc_chkrdr = -1;
3930		firsterr = torture_create_kthread(rcu_torture_reader, (void *)i,
3931						  reader_tasks[i]);
3932		if (torture_init_error(firsterr))
3933			goto unwind;
3934	}
3935	nrealnocbers = nocbs_nthreads;
3936	if (WARN_ON(nrealnocbers < 0))
3937		nrealnocbers = 1;
3938	if (WARN_ON(nocbs_toggle < 0))
3939		nocbs_toggle = HZ;
3940	if (nrealnocbers > 0) {
3941		nocb_tasks = kcalloc(nrealnocbers, sizeof(nocb_tasks[0]), GFP_KERNEL);
3942		if (nocb_tasks == NULL) {
3943			TOROUT_ERRSTRING("out of memory");
3944			firsterr = -ENOMEM;
3945			goto unwind;
3946		}
3947	} else {
3948		nocb_tasks = NULL;
3949	}
3950	for (i = 0; i < nrealnocbers; i++) {
3951		firsterr = torture_create_kthread(rcu_nocb_toggle, NULL, nocb_tasks[i]);
3952		if (torture_init_error(firsterr))
3953			goto unwind;
3954	}
3955	if (stat_interval > 0) {
3956		firsterr = torture_create_kthread(rcu_torture_stats, NULL,
3957						  stats_task);
3958		if (torture_init_error(firsterr))
3959			goto unwind;
3960	}
3961	if (test_no_idle_hz && shuffle_interval > 0) {
3962		firsterr = torture_shuffle_init(shuffle_interval * HZ);
3963		if (torture_init_error(firsterr))
3964			goto unwind;
3965	}
3966	if (stutter < 0)
3967		stutter = 0;
3968	if (stutter) {
3969		int t;
3970
3971		t = cur_ops->stall_dur ? cur_ops->stall_dur() : stutter * HZ;
3972		firsterr = torture_stutter_init(stutter * HZ, t);
3973		if (torture_init_error(firsterr))
3974			goto unwind;
3975	}
3976	if (fqs_duration < 0)
3977		fqs_duration = 0;
3978	if (fqs_holdoff < 0)
3979		fqs_holdoff = 0;
3980	if (fqs_duration && fqs_holdoff) {
3981		/* Create the fqs thread */
3982		firsterr = torture_create_kthread(rcu_torture_fqs, NULL,
3983						  fqs_task);
3984		if (torture_init_error(firsterr))
3985			goto unwind;
3986	}
3987	if (test_boost_interval < 1)
3988		test_boost_interval = 1;
3989	if (test_boost_duration < 2)
3990		test_boost_duration = 2;
3991	if (rcu_torture_can_boost()) {
3992
3993		boost_starttime = jiffies + test_boost_interval * HZ;
3994
3995		firsterr = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "RCU_TORTURE",
3996					     rcutorture_booster_init,
3997					     rcutorture_booster_cleanup);
3998		rcutor_hp = firsterr;
3999		if (torture_init_error(firsterr))
4000			goto unwind;
4001	}
4002	shutdown_jiffies = jiffies + shutdown_secs * HZ;
4003	firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup);
4004	if (torture_init_error(firsterr))
4005		goto unwind;
4006	firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval,
4007				      rcutorture_sync);
4008	if (torture_init_error(firsterr))
4009		goto unwind;
4010	firsterr = rcu_torture_stall_init();
4011	if (torture_init_error(firsterr))
4012		goto unwind;
4013	firsterr = rcu_torture_fwd_prog_init();
4014	if (torture_init_error(firsterr))
4015		goto unwind;
4016	firsterr = rcu_torture_barrier_init();
4017	if (torture_init_error(firsterr))
4018		goto unwind;
4019	firsterr = rcu_torture_read_exit_init();
4020	if (torture_init_error(firsterr))
4021		goto unwind;
4022	if (object_debug)
4023		rcu_test_debug_objects();
4024	torture_init_end();
4025	if (cur_ops->gp_slow_register && !WARN_ON_ONCE(!cur_ops->gp_slow_unregister))
4026		cur_ops->gp_slow_register(&rcu_fwd_cb_nodelay);
4027	return 0;
4028
4029unwind:
4030	torture_init_end();
4031	rcu_torture_cleanup();
4032	if (shutdown_secs) {
4033		WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST));
4034		kernel_power_off();
4035	}
4036	return firsterr;
4037}
4038
4039module_init(rcu_torture_init);
4040module_exit(rcu_torture_cleanup);