Linux Audio

Check our new training course

Linux kernel drivers training

May 6-19, 2025
Register
Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * Module-based torture test facility for locking
   4 *
   5 * Copyright (C) IBM Corporation, 2014
   6 *
   7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
   8 *          Davidlohr Bueso <dave@stgolabs.net>
   9 *	Based on kernel/rcu/torture.c.
  10 */
  11
  12#define pr_fmt(fmt) fmt
  13
  14#include <linux/kernel.h>
  15#include <linux/module.h>
  16#include <linux/kthread.h>
  17#include <linux/sched/rt.h>
  18#include <linux/spinlock.h>
  19#include <linux/mutex.h>
  20#include <linux/rwsem.h>
  21#include <linux/smp.h>
  22#include <linux/interrupt.h>
  23#include <linux/sched.h>
  24#include <uapi/linux/sched/types.h>
  25#include <linux/rtmutex.h>
  26#include <linux/atomic.h>
  27#include <linux/moduleparam.h>
  28#include <linux/delay.h>
  29#include <linux/slab.h>
  30#include <linux/torture.h>
  31#include <linux/reboot.h>
  32
 
  33MODULE_LICENSE("GPL");
  34MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com>");
  35
  36torture_param(int, nwriters_stress, -1,
  37	     "Number of write-locking stress-test threads");
  38torture_param(int, nreaders_stress, -1,
  39	     "Number of read-locking stress-test threads");
 
 
  40torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
  41torture_param(int, onoff_interval, 0,
  42	     "Time between CPU hotplugs (s), 0=disable");
  43torture_param(int, shuffle_interval, 3,
  44	     "Number of jiffies between shuffles, 0=disable");
 
  45torture_param(int, shutdown_secs, 0, "Shutdown time (j), <= zero to disable.");
  46torture_param(int, stat_interval, 60,
  47	     "Number of seconds between stats printk()s");
  48torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable");
  49torture_param(int, verbose, 1,
  50	     "Enable verbose debugging printk()s");
 
 
  51
  52static char *torture_type = "spin_lock";
  53module_param(torture_type, charp, 0444);
  54MODULE_PARM_DESC(torture_type,
  55		 "Type of lock to torture (spin_lock, spin_lock_irq, mutex_lock, ...)");
  56
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  57static struct task_struct *stats_task;
  58static struct task_struct **writer_tasks;
  59static struct task_struct **reader_tasks;
  60
  61static bool lock_is_write_held;
  62static atomic_t lock_is_read_held;
  63static unsigned long last_lock_release;
  64
  65struct lock_stress_stats {
  66	long n_lock_fail;
  67	long n_lock_acquired;
  68};
  69
 
 
 
 
 
 
  70/* Forward reference. */
  71static void lock_torture_cleanup(void);
  72
  73/*
  74 * Operations vector for selecting different types of tests.
  75 */
  76struct lock_torture_ops {
  77	void (*init)(void);
  78	void (*exit)(void);
 
  79	int (*writelock)(int tid);
  80	void (*write_delay)(struct torture_random_state *trsp);
  81	void (*task_boost)(struct torture_random_state *trsp);
  82	void (*writeunlock)(int tid);
 
  83	int (*readlock)(int tid);
  84	void (*read_delay)(struct torture_random_state *trsp);
  85	void (*readunlock)(int tid);
  86
  87	unsigned long flags; /* for irq spinlocks */
  88	const char *name;
  89};
  90
  91struct lock_torture_cxt {
  92	int nrealwriters_stress;
  93	int nrealreaders_stress;
  94	bool debug_lock;
  95	bool init_called;
  96	atomic_t n_lock_torture_errors;
  97	struct lock_torture_ops *cur_ops;
  98	struct lock_stress_stats *lwsa; /* writer statistics */
  99	struct lock_stress_stats *lrsa; /* reader statistics */
 100};
 101static struct lock_torture_cxt cxt = { 0, 0, false, false,
 102				       ATOMIC_INIT(0),
 103				       NULL, NULL};
 104/*
 105 * Definitions for lock torture testing.
 106 */
 107
 108static int torture_lock_busted_write_lock(int tid __maybe_unused)
 109{
 110	return 0;  /* BUGGY, do not use in real life!!! */
 111}
 112
 113static void torture_lock_busted_write_delay(struct torture_random_state *trsp)
 114{
 115	const unsigned long longdelay_ms = 100;
 116
 117	/* We want a long delay occasionally to force massive contention.  */
 118	if (!(torture_random(trsp) %
 119	      (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
 120		mdelay(longdelay_ms);
 121	if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
 122		torture_preempt_schedule();  /* Allow test to be preempted. */
 123}
 124
 125static void torture_lock_busted_write_unlock(int tid __maybe_unused)
 126{
 127	  /* BUGGY, do not use in real life!!! */
 128}
 129
 130static void torture_boost_dummy(struct torture_random_state *trsp)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 131{
 132	/* Only rtmutexes care about priority */
 
 
 
 133}
 134
 135static struct lock_torture_ops lock_busted_ops = {
 136	.writelock	= torture_lock_busted_write_lock,
 137	.write_delay	= torture_lock_busted_write_delay,
 138	.task_boost     = torture_boost_dummy,
 139	.writeunlock	= torture_lock_busted_write_unlock,
 140	.readlock       = NULL,
 141	.read_delay     = NULL,
 142	.readunlock     = NULL,
 143	.name		= "lock_busted"
 144};
 145
 146static DEFINE_SPINLOCK(torture_spinlock);
 147
 148static int torture_spin_lock_write_lock(int tid __maybe_unused)
 149__acquires(torture_spinlock)
 150{
 151	spin_lock(&torture_spinlock);
 152	return 0;
 153}
 154
 155static void torture_spin_lock_write_delay(struct torture_random_state *trsp)
 156{
 157	const unsigned long shortdelay_us = 2;
 158	const unsigned long longdelay_ms = 100;
 159
 160	/* We want a short delay mostly to emulate likely code, and
 161	 * we want a long delay occasionally to force massive contention.
 162	 */
 163	if (!(torture_random(trsp) %
 164	      (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
 165		mdelay(longdelay_ms);
 166	if (!(torture_random(trsp) %
 167	      (cxt.nrealwriters_stress * 2 * shortdelay_us)))
 
 168		udelay(shortdelay_us);
 169	if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
 170		torture_preempt_schedule();  /* Allow test to be preempted. */
 171}
 172
 173static void torture_spin_lock_write_unlock(int tid __maybe_unused)
 174__releases(torture_spinlock)
 175{
 176	spin_unlock(&torture_spinlock);
 177}
 178
 179static struct lock_torture_ops spin_lock_ops = {
 180	.writelock	= torture_spin_lock_write_lock,
 181	.write_delay	= torture_spin_lock_write_delay,
 182	.task_boost     = torture_boost_dummy,
 183	.writeunlock	= torture_spin_lock_write_unlock,
 184	.readlock       = NULL,
 185	.read_delay     = NULL,
 186	.readunlock     = NULL,
 187	.name		= "spin_lock"
 188};
 189
 190static int torture_spin_lock_write_lock_irq(int tid __maybe_unused)
 191__acquires(torture_spinlock)
 192{
 193	unsigned long flags;
 194
 195	spin_lock_irqsave(&torture_spinlock, flags);
 196	cxt.cur_ops->flags = flags;
 197	return 0;
 198}
 199
 200static void torture_lock_spin_write_unlock_irq(int tid __maybe_unused)
 201__releases(torture_spinlock)
 202{
 203	spin_unlock_irqrestore(&torture_spinlock, cxt.cur_ops->flags);
 204}
 205
 206static struct lock_torture_ops spin_lock_irq_ops = {
 207	.writelock	= torture_spin_lock_write_lock_irq,
 208	.write_delay	= torture_spin_lock_write_delay,
 209	.task_boost     = torture_boost_dummy,
 210	.writeunlock	= torture_lock_spin_write_unlock_irq,
 211	.readlock       = NULL,
 212	.read_delay     = NULL,
 213	.readunlock     = NULL,
 214	.name		= "spin_lock_irq"
 215};
 216
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 217static DEFINE_RWLOCK(torture_rwlock);
 218
 219static int torture_rwlock_write_lock(int tid __maybe_unused)
 220__acquires(torture_rwlock)
 221{
 222	write_lock(&torture_rwlock);
 223	return 0;
 224}
 225
 226static void torture_rwlock_write_delay(struct torture_random_state *trsp)
 227{
 228	const unsigned long shortdelay_us = 2;
 229	const unsigned long longdelay_ms = 100;
 230
 231	/* We want a short delay mostly to emulate likely code, and
 232	 * we want a long delay occasionally to force massive contention.
 233	 */
 234	if (!(torture_random(trsp) %
 235	      (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
 236		mdelay(longdelay_ms);
 237	else
 238		udelay(shortdelay_us);
 239}
 240
 241static void torture_rwlock_write_unlock(int tid __maybe_unused)
 242__releases(torture_rwlock)
 243{
 244	write_unlock(&torture_rwlock);
 245}
 246
 247static int torture_rwlock_read_lock(int tid __maybe_unused)
 248__acquires(torture_rwlock)
 249{
 250	read_lock(&torture_rwlock);
 251	return 0;
 252}
 253
 254static void torture_rwlock_read_delay(struct torture_random_state *trsp)
 255{
 256	const unsigned long shortdelay_us = 10;
 257	const unsigned long longdelay_ms = 100;
 258
 259	/* We want a short delay mostly to emulate likely code, and
 260	 * we want a long delay occasionally to force massive contention.
 261	 */
 262	if (!(torture_random(trsp) %
 263	      (cxt.nrealreaders_stress * 2000 * longdelay_ms)))
 264		mdelay(longdelay_ms);
 265	else
 266		udelay(shortdelay_us);
 267}
 268
 269static void torture_rwlock_read_unlock(int tid __maybe_unused)
 270__releases(torture_rwlock)
 271{
 272	read_unlock(&torture_rwlock);
 273}
 274
 275static struct lock_torture_ops rw_lock_ops = {
 276	.writelock	= torture_rwlock_write_lock,
 277	.write_delay	= torture_rwlock_write_delay,
 278	.task_boost     = torture_boost_dummy,
 279	.writeunlock	= torture_rwlock_write_unlock,
 280	.readlock       = torture_rwlock_read_lock,
 281	.read_delay     = torture_rwlock_read_delay,
 282	.readunlock     = torture_rwlock_read_unlock,
 283	.name		= "rw_lock"
 284};
 285
 286static int torture_rwlock_write_lock_irq(int tid __maybe_unused)
 287__acquires(torture_rwlock)
 288{
 289	unsigned long flags;
 290
 291	write_lock_irqsave(&torture_rwlock, flags);
 292	cxt.cur_ops->flags = flags;
 293	return 0;
 294}
 295
 296static void torture_rwlock_write_unlock_irq(int tid __maybe_unused)
 297__releases(torture_rwlock)
 298{
 299	write_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
 300}
 301
 302static int torture_rwlock_read_lock_irq(int tid __maybe_unused)
 303__acquires(torture_rwlock)
 304{
 305	unsigned long flags;
 306
 307	read_lock_irqsave(&torture_rwlock, flags);
 308	cxt.cur_ops->flags = flags;
 309	return 0;
 310}
 311
 312static void torture_rwlock_read_unlock_irq(int tid __maybe_unused)
 313__releases(torture_rwlock)
 314{
 315	read_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
 316}
 317
 318static struct lock_torture_ops rw_lock_irq_ops = {
 319	.writelock	= torture_rwlock_write_lock_irq,
 320	.write_delay	= torture_rwlock_write_delay,
 321	.task_boost     = torture_boost_dummy,
 322	.writeunlock	= torture_rwlock_write_unlock_irq,
 323	.readlock       = torture_rwlock_read_lock_irq,
 324	.read_delay     = torture_rwlock_read_delay,
 325	.readunlock     = torture_rwlock_read_unlock_irq,
 326	.name		= "rw_lock_irq"
 327};
 328
 329static DEFINE_MUTEX(torture_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 330
 331static int torture_mutex_lock(int tid __maybe_unused)
 332__acquires(torture_mutex)
 333{
 334	mutex_lock(&torture_mutex);
 335	return 0;
 336}
 337
 338static void torture_mutex_delay(struct torture_random_state *trsp)
 339{
 340	const unsigned long longdelay_ms = 100;
 341
 342	/* We want a long delay occasionally to force massive contention.  */
 343	if (!(torture_random(trsp) %
 344	      (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
 345		mdelay(longdelay_ms * 5);
 346	else
 347		mdelay(longdelay_ms / 5);
 348	if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
 349		torture_preempt_schedule();  /* Allow test to be preempted. */
 350}
 351
 352static void torture_mutex_unlock(int tid __maybe_unused)
 353__releases(torture_mutex)
 354{
 355	mutex_unlock(&torture_mutex);
 356}
 357
 
 
 
 
 
 
 
 
 
 
 358static struct lock_torture_ops mutex_lock_ops = {
 
 
 359	.writelock	= torture_mutex_lock,
 360	.write_delay	= torture_mutex_delay,
 361	.task_boost     = torture_boost_dummy,
 362	.writeunlock	= torture_mutex_unlock,
 
 363	.readlock       = NULL,
 364	.read_delay     = NULL,
 365	.readunlock     = NULL,
 366	.name		= "mutex_lock"
 367};
 368
 369#include <linux/ww_mutex.h>
 370/*
 371 * The torture ww_mutexes should belong to the same lock class as
 372 * torture_ww_class to avoid lockdep problem. The ww_mutex_init()
 373 * function is called for initialization to ensure that.
 374 */
 375static DEFINE_WD_CLASS(torture_ww_class);
 376static struct ww_mutex torture_ww_mutex_0, torture_ww_mutex_1, torture_ww_mutex_2;
 377static struct ww_acquire_ctx *ww_acquire_ctxs;
 378
 379static void torture_ww_mutex_init(void)
 380{
 381	ww_mutex_init(&torture_ww_mutex_0, &torture_ww_class);
 382	ww_mutex_init(&torture_ww_mutex_1, &torture_ww_class);
 383	ww_mutex_init(&torture_ww_mutex_2, &torture_ww_class);
 384
 385	ww_acquire_ctxs = kmalloc_array(cxt.nrealwriters_stress,
 386					sizeof(*ww_acquire_ctxs),
 387					GFP_KERNEL);
 388	if (!ww_acquire_ctxs)
 389		VERBOSE_TOROUT_STRING("ww_acquire_ctx: Out of memory");
 390}
 391
 392static void torture_ww_mutex_exit(void)
 393{
 394	kfree(ww_acquire_ctxs);
 395}
 396
 397static int torture_ww_mutex_lock(int tid)
 398__acquires(torture_ww_mutex_0)
 399__acquires(torture_ww_mutex_1)
 400__acquires(torture_ww_mutex_2)
 401{
 402	LIST_HEAD(list);
 403	struct reorder_lock {
 404		struct list_head link;
 405		struct ww_mutex *lock;
 406	} locks[3], *ll, *ln;
 407	struct ww_acquire_ctx *ctx = &ww_acquire_ctxs[tid];
 408
 409	locks[0].lock = &torture_ww_mutex_0;
 410	list_add(&locks[0].link, &list);
 411
 412	locks[1].lock = &torture_ww_mutex_1;
 413	list_add(&locks[1].link, &list);
 414
 415	locks[2].lock = &torture_ww_mutex_2;
 416	list_add(&locks[2].link, &list);
 417
 418	ww_acquire_init(ctx, &torture_ww_class);
 419
 420	list_for_each_entry(ll, &list, link) {
 421		int err;
 422
 423		err = ww_mutex_lock(ll->lock, ctx);
 424		if (!err)
 425			continue;
 426
 427		ln = ll;
 428		list_for_each_entry_continue_reverse(ln, &list, link)
 429			ww_mutex_unlock(ln->lock);
 430
 431		if (err != -EDEADLK)
 432			return err;
 433
 434		ww_mutex_lock_slow(ll->lock, ctx);
 435		list_move(&ll->link, &list);
 436	}
 437
 438	return 0;
 439}
 440
 441static void torture_ww_mutex_unlock(int tid)
 442__releases(torture_ww_mutex_0)
 443__releases(torture_ww_mutex_1)
 444__releases(torture_ww_mutex_2)
 445{
 446	struct ww_acquire_ctx *ctx = &ww_acquire_ctxs[tid];
 447
 448	ww_mutex_unlock(&torture_ww_mutex_0);
 449	ww_mutex_unlock(&torture_ww_mutex_1);
 450	ww_mutex_unlock(&torture_ww_mutex_2);
 451	ww_acquire_fini(ctx);
 452}
 453
 454static struct lock_torture_ops ww_mutex_lock_ops = {
 455	.init		= torture_ww_mutex_init,
 456	.exit		= torture_ww_mutex_exit,
 457	.writelock	= torture_ww_mutex_lock,
 458	.write_delay	= torture_mutex_delay,
 459	.task_boost     = torture_boost_dummy,
 460	.writeunlock	= torture_ww_mutex_unlock,
 461	.readlock       = NULL,
 462	.read_delay     = NULL,
 463	.readunlock     = NULL,
 464	.name		= "ww_mutex_lock"
 465};
 466
 467#ifdef CONFIG_RT_MUTEXES
 468static DEFINE_RT_MUTEX(torture_rtmutex);
 
 
 469
 470static int torture_rtmutex_lock(int tid __maybe_unused)
 471__acquires(torture_rtmutex)
 472{
 473	rt_mutex_lock(&torture_rtmutex);
 474	return 0;
 
 
 
 475}
 476
 477static void torture_rtmutex_boost(struct torture_random_state *trsp)
 
 478{
 479	const unsigned int factor = 50000; /* yes, quite arbitrary */
 480
 481	if (!rt_task(current)) {
 482		/*
 483		 * Boost priority once every ~50k operations. When the
 484		 * task tries to take the lock, the rtmutex it will account
 485		 * for the new priority, and do any corresponding pi-dance.
 486		 */
 487		if (trsp && !(torture_random(trsp) %
 488			      (cxt.nrealwriters_stress * factor))) {
 489			sched_set_fifo(current);
 490		} else /* common case, do nothing */
 491			return;
 492	} else {
 493		/*
 494		 * The task will remain boosted for another ~500k operations,
 495		 * then restored back to its original prio, and so forth.
 496		 *
 497		 * When @trsp is nil, we want to force-reset the task for
 498		 * stopping the kthread.
 499		 */
 500		if (!trsp || !(torture_random(trsp) %
 501			       (cxt.nrealwriters_stress * factor * 2))) {
 502			sched_set_normal(current, 0);
 503		} else /* common case, do nothing */
 504			return;
 505	}
 506}
 507
 508static void torture_rtmutex_delay(struct torture_random_state *trsp)
 509{
 510	const unsigned long shortdelay_us = 2;
 511	const unsigned long longdelay_ms = 100;
 512
 513	/*
 514	 * We want a short delay mostly to emulate likely code, and
 515	 * we want a long delay occasionally to force massive contention.
 516	 */
 
 
 517	if (!(torture_random(trsp) %
 518	      (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
 519		mdelay(longdelay_ms);
 520	if (!(torture_random(trsp) %
 521	      (cxt.nrealwriters_stress * 2 * shortdelay_us)))
 522		udelay(shortdelay_us);
 523	if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
 524		torture_preempt_schedule();  /* Allow test to be preempted. */
 525}
 526
 527static void torture_rtmutex_unlock(int tid __maybe_unused)
 528__releases(torture_rtmutex)
 529{
 530	rt_mutex_unlock(&torture_rtmutex);
 531}
 532
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 533static struct lock_torture_ops rtmutex_lock_ops = {
 
 
 534	.writelock	= torture_rtmutex_lock,
 535	.write_delay	= torture_rtmutex_delay,
 536	.task_boost     = torture_rtmutex_boost,
 537	.writeunlock	= torture_rtmutex_unlock,
 
 538	.readlock       = NULL,
 539	.read_delay     = NULL,
 540	.readunlock     = NULL,
 541	.name		= "rtmutex_lock"
 542};
 543#endif
 544
 545static DECLARE_RWSEM(torture_rwsem);
 546static int torture_rwsem_down_write(int tid __maybe_unused)
 547__acquires(torture_rwsem)
 548{
 549	down_write(&torture_rwsem);
 550	return 0;
 551}
 552
 553static void torture_rwsem_write_delay(struct torture_random_state *trsp)
 554{
 555	const unsigned long longdelay_ms = 100;
 556
 557	/* We want a long delay occasionally to force massive contention.  */
 558	if (!(torture_random(trsp) %
 559	      (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
 560		mdelay(longdelay_ms * 10);
 561	else
 562		mdelay(longdelay_ms / 10);
 563	if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
 564		torture_preempt_schedule();  /* Allow test to be preempted. */
 565}
 566
 567static void torture_rwsem_up_write(int tid __maybe_unused)
 568__releases(torture_rwsem)
 569{
 570	up_write(&torture_rwsem);
 571}
 572
 573static int torture_rwsem_down_read(int tid __maybe_unused)
 574__acquires(torture_rwsem)
 575{
 576	down_read(&torture_rwsem);
 577	return 0;
 578}
 579
 580static void torture_rwsem_read_delay(struct torture_random_state *trsp)
 581{
 582	const unsigned long longdelay_ms = 100;
 583
 584	/* We want a long delay occasionally to force massive contention.  */
 585	if (!(torture_random(trsp) %
 586	      (cxt.nrealreaders_stress * 2000 * longdelay_ms)))
 587		mdelay(longdelay_ms * 2);
 588	else
 589		mdelay(longdelay_ms / 2);
 590	if (!(torture_random(trsp) % (cxt.nrealreaders_stress * 20000)))
 591		torture_preempt_schedule();  /* Allow test to be preempted. */
 592}
 593
 594static void torture_rwsem_up_read(int tid __maybe_unused)
 595__releases(torture_rwsem)
 596{
 597	up_read(&torture_rwsem);
 598}
 599
 600static struct lock_torture_ops rwsem_lock_ops = {
 601	.writelock	= torture_rwsem_down_write,
 602	.write_delay	= torture_rwsem_write_delay,
 603	.task_boost     = torture_boost_dummy,
 604	.writeunlock	= torture_rwsem_up_write,
 605	.readlock       = torture_rwsem_down_read,
 606	.read_delay     = torture_rwsem_read_delay,
 607	.readunlock     = torture_rwsem_up_read,
 608	.name		= "rwsem_lock"
 609};
 610
 611#include <linux/percpu-rwsem.h>
 612static struct percpu_rw_semaphore pcpu_rwsem;
 613
 614static void torture_percpu_rwsem_init(void)
 615{
 616	BUG_ON(percpu_init_rwsem(&pcpu_rwsem));
 617}
 618
 619static void torture_percpu_rwsem_exit(void)
 620{
 621	percpu_free_rwsem(&pcpu_rwsem);
 622}
 623
 624static int torture_percpu_rwsem_down_write(int tid __maybe_unused)
 625__acquires(pcpu_rwsem)
 626{
 627	percpu_down_write(&pcpu_rwsem);
 628	return 0;
 629}
 630
 631static void torture_percpu_rwsem_up_write(int tid __maybe_unused)
 632__releases(pcpu_rwsem)
 633{
 634	percpu_up_write(&pcpu_rwsem);
 635}
 636
 637static int torture_percpu_rwsem_down_read(int tid __maybe_unused)
 638__acquires(pcpu_rwsem)
 639{
 640	percpu_down_read(&pcpu_rwsem);
 641	return 0;
 642}
 643
 644static void torture_percpu_rwsem_up_read(int tid __maybe_unused)
 645__releases(pcpu_rwsem)
 646{
 647	percpu_up_read(&pcpu_rwsem);
 648}
 649
 650static struct lock_torture_ops percpu_rwsem_lock_ops = {
 651	.init		= torture_percpu_rwsem_init,
 652	.exit		= torture_percpu_rwsem_exit,
 653	.writelock	= torture_percpu_rwsem_down_write,
 654	.write_delay	= torture_rwsem_write_delay,
 655	.task_boost     = torture_boost_dummy,
 656	.writeunlock	= torture_percpu_rwsem_up_write,
 657	.readlock       = torture_percpu_rwsem_down_read,
 658	.read_delay     = torture_rwsem_read_delay,
 659	.readunlock     = torture_percpu_rwsem_up_read,
 660	.name		= "percpu_rwsem_lock"
 661};
 662
 663/*
 664 * Lock torture writer kthread.  Repeatedly acquires and releases
 665 * the lock, checking for duplicate acquisitions.
 666 */
 667static int lock_torture_writer(void *arg)
 668{
 
 
 
 669	struct lock_stress_stats *lwsp = arg;
 670	int tid = lwsp - cxt.lwsa;
 671	DEFINE_TORTURE_RANDOM(rand);
 
 
 672
 673	VERBOSE_TOROUT_STRING("lock_torture_writer task started");
 674	set_user_nice(current, MAX_NICE);
 
 675
 676	do {
 677		if ((torture_random(&rand) & 0xfffff) == 0)
 678			schedule_timeout_uninterruptible(1);
 679
 
 
 
 
 
 
 
 
 
 
 
 
 680		cxt.cur_ops->task_boost(&rand);
 681		cxt.cur_ops->writelock(tid);
 682		if (WARN_ON_ONCE(lock_is_write_held))
 683			lwsp->n_lock_fail++;
 684		lock_is_write_held = true;
 685		if (WARN_ON_ONCE(atomic_read(&lock_is_read_held)))
 686			lwsp->n_lock_fail++; /* rare, but... */
 687
 688		lwsp->n_lock_acquired++;
 689		cxt.cur_ops->write_delay(&rand);
 690		lock_is_write_held = false;
 691		WRITE_ONCE(last_lock_release, jiffies);
 692		cxt.cur_ops->writeunlock(tid);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 693
 694		stutter_wait("lock_torture_writer");
 695	} while (!torture_must_stop());
 696
 697	cxt.cur_ops->task_boost(NULL); /* reset prio */
 698	torture_kthread_stopping("lock_torture_writer");
 699	return 0;
 700}
 701
 702/*
 703 * Lock torture reader kthread.  Repeatedly acquires and releases
 704 * the reader lock.
 705 */
 706static int lock_torture_reader(void *arg)
 707{
 708	struct lock_stress_stats *lrsp = arg;
 709	int tid = lrsp - cxt.lrsa;
 710	DEFINE_TORTURE_RANDOM(rand);
 711
 712	VERBOSE_TOROUT_STRING("lock_torture_reader task started");
 713	set_user_nice(current, MAX_NICE);
 714
 715	do {
 716		if ((torture_random(&rand) & 0xfffff) == 0)
 717			schedule_timeout_uninterruptible(1);
 718
 719		cxt.cur_ops->readlock(tid);
 720		atomic_inc(&lock_is_read_held);
 721		if (WARN_ON_ONCE(lock_is_write_held))
 722			lrsp->n_lock_fail++; /* rare, but... */
 723
 724		lrsp->n_lock_acquired++;
 725		cxt.cur_ops->read_delay(&rand);
 726		atomic_dec(&lock_is_read_held);
 727		cxt.cur_ops->readunlock(tid);
 728
 729		stutter_wait("lock_torture_reader");
 730	} while (!torture_must_stop());
 731	torture_kthread_stopping("lock_torture_reader");
 732	return 0;
 733}
 734
 735/*
 736 * Create an lock-torture-statistics message in the specified buffer.
 737 */
 738static void __torture_print_stats(char *page,
 739				  struct lock_stress_stats *statp, bool write)
 740{
 741	long cur;
 742	bool fail = false;
 743	int i, n_stress;
 744	long max = 0, min = statp ? data_race(statp[0].n_lock_acquired) : 0;
 745	long long sum = 0;
 746
 747	n_stress = write ? cxt.nrealwriters_stress : cxt.nrealreaders_stress;
 748	for (i = 0; i < n_stress; i++) {
 749		if (data_race(statp[i].n_lock_fail))
 750			fail = true;
 751		cur = data_race(statp[i].n_lock_acquired);
 752		sum += cur;
 753		if (max < cur)
 754			max = cur;
 755		if (min > cur)
 756			min = cur;
 757	}
 758	page += sprintf(page,
 759			"%s:  Total: %lld  Max/Min: %ld/%ld %s  Fail: %d %s\n",
 760			write ? "Writes" : "Reads ",
 761			sum, max, min,
 762			!onoff_interval && max / 2 > min ? "???" : "",
 763			fail, fail ? "!!!" : "");
 764	if (fail)
 765		atomic_inc(&cxt.n_lock_torture_errors);
 766}
 767
 768/*
 769 * Print torture statistics.  Caller must ensure that there is only one
 770 * call to this function at a given time!!!  This is normally accomplished
 771 * by relying on the module system to only have one copy of the module
 772 * loaded, and then by giving the lock_torture_stats kthread full control
 773 * (or the init/cleanup functions when lock_torture_stats thread is not
 774 * running).
 775 */
 776static void lock_torture_stats_print(void)
 777{
 778	int size = cxt.nrealwriters_stress * 200 + 8192;
 779	char *buf;
 780
 781	if (cxt.cur_ops->readlock)
 782		size += cxt.nrealreaders_stress * 200 + 8192;
 783
 784	buf = kmalloc(size, GFP_KERNEL);
 785	if (!buf) {
 786		pr_err("lock_torture_stats_print: Out of memory, need: %d",
 787		       size);
 788		return;
 789	}
 790
 791	__torture_print_stats(buf, cxt.lwsa, true);
 792	pr_alert("%s", buf);
 793	kfree(buf);
 794
 795	if (cxt.cur_ops->readlock) {
 796		buf = kmalloc(size, GFP_KERNEL);
 797		if (!buf) {
 798			pr_err("lock_torture_stats_print: Out of memory, need: %d",
 799			       size);
 800			return;
 801		}
 802
 803		__torture_print_stats(buf, cxt.lrsa, false);
 804		pr_alert("%s", buf);
 805		kfree(buf);
 806	}
 807}
 808
 809/*
 810 * Periodically prints torture statistics, if periodic statistics printing
 811 * was specified via the stat_interval module parameter.
 812 *
 813 * No need to worry about fullstop here, since this one doesn't reference
 814 * volatile state or register callbacks.
 815 */
 816static int lock_torture_stats(void *arg)
 817{
 818	VERBOSE_TOROUT_STRING("lock_torture_stats task started");
 819	do {
 820		schedule_timeout_interruptible(stat_interval * HZ);
 821		lock_torture_stats_print();
 822		torture_shutdown_absorb("lock_torture_stats");
 823	} while (!torture_must_stop());
 824	torture_kthread_stopping("lock_torture_stats");
 825	return 0;
 826}
 827
 
 828static inline void
 829lock_torture_print_module_parms(struct lock_torture_ops *cur_ops,
 830				const char *tag)
 831{
 
 
 
 
 
 832	pr_alert("%s" TORTURE_FLAG
 833		 "--- %s%s: nwriters_stress=%d nreaders_stress=%d stat_interval=%d verbose=%d shuffle_interval=%d stutter=%d shutdown_secs=%d onoff_interval=%d onoff_holdoff=%d\n",
 834		 torture_type, tag, cxt.debug_lock ? " [debug]": "",
 835		 cxt.nrealwriters_stress, cxt.nrealreaders_stress, stat_interval,
 836		 verbose, shuffle_interval, stutter, shutdown_secs,
 837		 onoff_interval, onoff_holdoff);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 838}
 839
 840static void lock_torture_cleanup(void)
 841{
 842	int i;
 843
 844	if (torture_cleanup_begin())
 845		return;
 846
 847	/*
 848	 * Indicates early cleanup, meaning that the test has not run,
 849	 * such as when passing bogus args when loading the module.
 850	 * However cxt->cur_ops.init() may have been invoked, so beside
 851	 * perform the underlying torture-specific cleanups, cur_ops.exit()
 852	 * will be invoked if needed.
 853	 */
 854	if (!cxt.lwsa && !cxt.lrsa)
 855		goto end;
 856
 857	if (writer_tasks) {
 858		for (i = 0; i < cxt.nrealwriters_stress; i++)
 859			torture_stop_kthread(lock_torture_writer,
 860					     writer_tasks[i]);
 861		kfree(writer_tasks);
 862		writer_tasks = NULL;
 863	}
 864
 865	if (reader_tasks) {
 866		for (i = 0; i < cxt.nrealreaders_stress; i++)
 867			torture_stop_kthread(lock_torture_reader,
 868					     reader_tasks[i]);
 869		kfree(reader_tasks);
 870		reader_tasks = NULL;
 871	}
 872
 873	torture_stop_kthread(lock_torture_stats, stats_task);
 874	lock_torture_stats_print();  /* -After- the stats thread is stopped! */
 875
 876	if (atomic_read(&cxt.n_lock_torture_errors))
 877		lock_torture_print_module_parms(cxt.cur_ops,
 878						"End of test: FAILURE");
 879	else if (torture_onoff_failures())
 880		lock_torture_print_module_parms(cxt.cur_ops,
 881						"End of test: LOCK_HOTPLUG");
 882	else
 883		lock_torture_print_module_parms(cxt.cur_ops,
 884						"End of test: SUCCESS");
 885
 886	kfree(cxt.lwsa);
 887	cxt.lwsa = NULL;
 888	kfree(cxt.lrsa);
 889	cxt.lrsa = NULL;
 890
 
 
 891end:
 892	if (cxt.init_called) {
 893		if (cxt.cur_ops->exit)
 894			cxt.cur_ops->exit();
 895		cxt.init_called = false;
 896	}
 897	torture_cleanup_end();
 898}
 899
 900static int __init lock_torture_init(void)
 901{
 902	int i, j;
 903	int firsterr = 0;
 904	static struct lock_torture_ops *torture_ops[] = {
 905		&lock_busted_ops,
 906		&spin_lock_ops, &spin_lock_irq_ops,
 
 907		&rw_lock_ops, &rw_lock_irq_ops,
 908		&mutex_lock_ops,
 909		&ww_mutex_lock_ops,
 910#ifdef CONFIG_RT_MUTEXES
 911		&rtmutex_lock_ops,
 912#endif
 913		&rwsem_lock_ops,
 914		&percpu_rwsem_lock_ops,
 915	};
 916
 917	if (!torture_init_begin(torture_type, verbose))
 918		return -EBUSY;
 919
 920	/* Process args and tell the world that the torturer is on the job. */
 921	for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
 922		cxt.cur_ops = torture_ops[i];
 923		if (strcmp(torture_type, cxt.cur_ops->name) == 0)
 924			break;
 925	}
 926	if (i == ARRAY_SIZE(torture_ops)) {
 927		pr_alert("lock-torture: invalid torture type: \"%s\"\n",
 928			 torture_type);
 929		pr_alert("lock-torture types:");
 930		for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
 931			pr_alert(" %s", torture_ops[i]->name);
 932		pr_alert("\n");
 933		firsterr = -EINVAL;
 934		goto unwind;
 935	}
 936
 937	if (nwriters_stress == 0 &&
 938	    (!cxt.cur_ops->readlock || nreaders_stress == 0)) {
 939		pr_alert("lock-torture: must run at least one locking thread\n");
 940		firsterr = -EINVAL;
 941		goto unwind;
 942	}
 943
 944	if (nwriters_stress >= 0)
 945		cxt.nrealwriters_stress = nwriters_stress;
 946	else
 947		cxt.nrealwriters_stress = 2 * num_online_cpus();
 948
 949	if (cxt.cur_ops->init) {
 950		cxt.cur_ops->init();
 951		cxt.init_called = true;
 952	}
 953
 954#ifdef CONFIG_DEBUG_MUTEXES
 955	if (str_has_prefix(torture_type, "mutex"))
 956		cxt.debug_lock = true;
 957#endif
 958#ifdef CONFIG_DEBUG_RT_MUTEXES
 959	if (str_has_prefix(torture_type, "rtmutex"))
 960		cxt.debug_lock = true;
 961#endif
 962#ifdef CONFIG_DEBUG_SPINLOCK
 963	if ((str_has_prefix(torture_type, "spin")) ||
 964	    (str_has_prefix(torture_type, "rw_lock")))
 965		cxt.debug_lock = true;
 966#endif
 967
 968	/* Initialize the statistics so that each run gets its own numbers. */
 969	if (nwriters_stress) {
 970		lock_is_write_held = false;
 971		cxt.lwsa = kmalloc_array(cxt.nrealwriters_stress,
 972					 sizeof(*cxt.lwsa),
 973					 GFP_KERNEL);
 974		if (cxt.lwsa == NULL) {
 975			VERBOSE_TOROUT_STRING("cxt.lwsa: Out of memory");
 976			firsterr = -ENOMEM;
 977			goto unwind;
 978		}
 979
 980		for (i = 0; i < cxt.nrealwriters_stress; i++) {
 981			cxt.lwsa[i].n_lock_fail = 0;
 982			cxt.lwsa[i].n_lock_acquired = 0;
 983		}
 984	}
 985
 986	if (cxt.cur_ops->readlock) {
 987		if (nreaders_stress >= 0)
 988			cxt.nrealreaders_stress = nreaders_stress;
 989		else {
 990			/*
 991			 * By default distribute evenly the number of
 992			 * readers and writers. We still run the same number
 993			 * of threads as the writer-only locks default.
 994			 */
 995			if (nwriters_stress < 0) /* user doesn't care */
 996				cxt.nrealwriters_stress = num_online_cpus();
 997			cxt.nrealreaders_stress = cxt.nrealwriters_stress;
 998		}
 999
1000		if (nreaders_stress) {
1001			cxt.lrsa = kmalloc_array(cxt.nrealreaders_stress,
1002						 sizeof(*cxt.lrsa),
1003						 GFP_KERNEL);
1004			if (cxt.lrsa == NULL) {
1005				VERBOSE_TOROUT_STRING("cxt.lrsa: Out of memory");
1006				firsterr = -ENOMEM;
1007				kfree(cxt.lwsa);
1008				cxt.lwsa = NULL;
1009				goto unwind;
1010			}
1011
1012			for (i = 0; i < cxt.nrealreaders_stress; i++) {
1013				cxt.lrsa[i].n_lock_fail = 0;
1014				cxt.lrsa[i].n_lock_acquired = 0;
1015			}
1016		}
1017	}
1018
 
 
 
 
1019	lock_torture_print_module_parms(cxt.cur_ops, "Start of test");
1020
1021	/* Prepare torture context. */
1022	if (onoff_interval > 0) {
1023		firsterr = torture_onoff_init(onoff_holdoff * HZ,
1024					      onoff_interval * HZ, NULL);
1025		if (torture_init_error(firsterr))
1026			goto unwind;
1027	}
1028	if (shuffle_interval > 0) {
1029		firsterr = torture_shuffle_init(shuffle_interval);
1030		if (torture_init_error(firsterr))
1031			goto unwind;
1032	}
1033	if (shutdown_secs > 0) {
1034		firsterr = torture_shutdown_init(shutdown_secs,
1035						 lock_torture_cleanup);
1036		if (torture_init_error(firsterr))
1037			goto unwind;
1038	}
1039	if (stutter > 0) {
1040		firsterr = torture_stutter_init(stutter, stutter);
1041		if (torture_init_error(firsterr))
1042			goto unwind;
1043	}
1044
1045	if (nwriters_stress) {
1046		writer_tasks = kcalloc(cxt.nrealwriters_stress,
1047				       sizeof(writer_tasks[0]),
1048				       GFP_KERNEL);
1049		if (writer_tasks == NULL) {
1050			TOROUT_ERRSTRING("writer_tasks: Out of memory");
1051			firsterr = -ENOMEM;
1052			goto unwind;
1053		}
1054	}
1055
 
 
 
 
1056	if (cxt.cur_ops->readlock) {
1057		reader_tasks = kcalloc(cxt.nrealreaders_stress,
1058				       sizeof(reader_tasks[0]),
1059				       GFP_KERNEL);
1060		if (reader_tasks == NULL) {
1061			TOROUT_ERRSTRING("reader_tasks: Out of memory");
1062			kfree(writer_tasks);
1063			writer_tasks = NULL;
1064			firsterr = -ENOMEM;
1065			goto unwind;
1066		}
1067	}
1068
1069	/*
1070	 * Create the kthreads and start torturing (oh, those poor little locks).
1071	 *
1072	 * TODO: Note that we interleave writers with readers, giving writers a
1073	 * slight advantage, by creating its kthread first. This can be modified
1074	 * for very specific needs, or even let the user choose the policy, if
1075	 * ever wanted.
1076	 */
1077	for (i = 0, j = 0; i < cxt.nrealwriters_stress ||
1078		    j < cxt.nrealreaders_stress; i++, j++) {
1079		if (i >= cxt.nrealwriters_stress)
1080			goto create_reader;
1081
1082		/* Create writer. */
1083		firsterr = torture_create_kthread(lock_torture_writer, &cxt.lwsa[i],
1084						  writer_tasks[i]);
 
1085		if (torture_init_error(firsterr))
1086			goto unwind;
 
 
1087
1088	create_reader:
1089		if (cxt.cur_ops->readlock == NULL || (j >= cxt.nrealreaders_stress))
1090			continue;
1091		/* Create reader. */
1092		firsterr = torture_create_kthread(lock_torture_reader, &cxt.lrsa[j],
1093						  reader_tasks[j]);
1094		if (torture_init_error(firsterr))
1095			goto unwind;
 
 
1096	}
1097	if (stat_interval > 0) {
1098		firsterr = torture_create_kthread(lock_torture_stats, NULL,
1099						  stats_task);
1100		if (torture_init_error(firsterr))
1101			goto unwind;
1102	}
1103	torture_init_end();
1104	return 0;
1105
1106unwind:
1107	torture_init_end();
1108	lock_torture_cleanup();
1109	if (shutdown_secs) {
1110		WARN_ON(!IS_MODULE(CONFIG_LOCK_TORTURE_TEST));
1111		kernel_power_off();
1112	}
1113	return firsterr;
1114}
1115
1116module_init(lock_torture_init);
1117module_exit(lock_torture_cleanup);
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * Module-based torture test facility for locking
   4 *
   5 * Copyright (C) IBM Corporation, 2014
   6 *
   7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
   8 *          Davidlohr Bueso <dave@stgolabs.net>
   9 *	Based on kernel/rcu/torture.c.
  10 */
  11
  12#define pr_fmt(fmt) fmt
  13
  14#include <linux/kernel.h>
  15#include <linux/module.h>
  16#include <linux/kthread.h>
  17#include <linux/sched/rt.h>
  18#include <linux/spinlock.h>
  19#include <linux/mutex.h>
  20#include <linux/rwsem.h>
  21#include <linux/smp.h>
  22#include <linux/interrupt.h>
  23#include <linux/sched.h>
  24#include <uapi/linux/sched/types.h>
  25#include <linux/rtmutex.h>
  26#include <linux/atomic.h>
  27#include <linux/moduleparam.h>
  28#include <linux/delay.h>
  29#include <linux/slab.h>
  30#include <linux/torture.h>
  31#include <linux/reboot.h>
  32
  33MODULE_DESCRIPTION("torture test facility for locking");
  34MODULE_LICENSE("GPL");
  35MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com>");
  36
  37torture_param(int, acq_writer_lim, 0, "Write_acquisition time limit (jiffies).");
  38torture_param(int, call_rcu_chains, 0, "Self-propagate call_rcu() chains during test (0=disable).");
  39torture_param(int, long_hold, 100, "Do occasional long hold of lock (ms), 0=disable");
  40torture_param(int, nested_locks, 0, "Number of nested locks (max = 8)");
  41torture_param(int, nreaders_stress, -1, "Number of read-locking stress-test threads");
  42torture_param(int, nwriters_stress, -1, "Number of write-locking stress-test threads");
  43torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
  44torture_param(int, onoff_interval, 0, "Time between CPU hotplugs (s), 0=disable");
  45torture_param(int, rt_boost, 2,
  46		   "Do periodic rt-boost. 0=Disable, 1=Only for rt_mutex, 2=For all lock types.");
  47torture_param(int, rt_boost_factor, 50, "A factor determining how often rt-boost happens.");
  48torture_param(int, shuffle_interval, 3, "Number of jiffies between shuffles, 0=disable");
  49torture_param(int, shutdown_secs, 0, "Shutdown time (j), <= zero to disable.");
  50torture_param(int, stat_interval, 60, "Number of seconds between stats printk()s");
 
  51torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable");
  52torture_param(int, verbose, 1, "Enable verbose debugging printk()s");
  53torture_param(int, writer_fifo, 0, "Run writers at sched_set_fifo() priority");
  54/* Going much higher trips "BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!" errors */
  55#define MAX_NESTED_LOCKS 8
  56
  57static char *torture_type = IS_ENABLED(CONFIG_PREEMPT_RT) ? "raw_spin_lock" : "spin_lock";
  58module_param(torture_type, charp, 0444);
  59MODULE_PARM_DESC(torture_type,
  60		 "Type of lock to torture (spin_lock, spin_lock_irq, mutex_lock, ...)");
  61
  62static cpumask_var_t bind_readers; // Bind the readers to the specified set of CPUs.
  63static cpumask_var_t bind_writers; // Bind the writers to the specified set of CPUs.
  64
  65// Parse a cpumask kernel parameter.  If there are more users later on,
  66// this might need to got to a more central location.
  67static int param_set_cpumask(const char *val, const struct kernel_param *kp)
  68{
  69	cpumask_var_t *cm_bind = kp->arg;
  70	int ret;
  71	char *s;
  72
  73	if (!alloc_cpumask_var(cm_bind, GFP_KERNEL)) {
  74		s = "Out of memory";
  75		ret = -ENOMEM;
  76		goto out_err;
  77	}
  78	ret = cpulist_parse(val, *cm_bind);
  79	if (!ret)
  80		return ret;
  81	s = "Bad CPU range";
  82out_err:
  83	pr_warn("%s: %s, all CPUs set\n", kp->name, s);
  84	cpumask_setall(*cm_bind);
  85	return ret;
  86}
  87
  88// Output a cpumask kernel parameter.
  89static int param_get_cpumask(char *buffer, const struct kernel_param *kp)
  90{
  91	cpumask_var_t *cm_bind = kp->arg;
  92
  93	return sprintf(buffer, "%*pbl", cpumask_pr_args(*cm_bind));
  94}
  95
  96static bool cpumask_nonempty(cpumask_var_t mask)
  97{
  98	return cpumask_available(mask) && !cpumask_empty(mask);
  99}
 100
 101static const struct kernel_param_ops lt_bind_ops = {
 102	.set = param_set_cpumask,
 103	.get = param_get_cpumask,
 104};
 105
 106module_param_cb(bind_readers, &lt_bind_ops, &bind_readers, 0644);
 107module_param_cb(bind_writers, &lt_bind_ops, &bind_writers, 0644);
 108
 109long torture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask);
 110
 111static struct task_struct *stats_task;
 112static struct task_struct **writer_tasks;
 113static struct task_struct **reader_tasks;
 114
 115static bool lock_is_write_held;
 116static atomic_t lock_is_read_held;
 117static unsigned long last_lock_release;
 118
 119struct lock_stress_stats {
 120	long n_lock_fail;
 121	long n_lock_acquired;
 122};
 123
 124struct call_rcu_chain {
 125	struct rcu_head crc_rh;
 126	bool crc_stop;
 127};
 128struct call_rcu_chain *call_rcu_chain_list;
 129
 130/* Forward reference. */
 131static void lock_torture_cleanup(void);
 132
 133/*
 134 * Operations vector for selecting different types of tests.
 135 */
 136struct lock_torture_ops {
 137	void (*init)(void);
 138	void (*exit)(void);
 139	int (*nested_lock)(int tid, u32 lockset);
 140	int (*writelock)(int tid);
 141	void (*write_delay)(struct torture_random_state *trsp);
 142	void (*task_boost)(struct torture_random_state *trsp);
 143	void (*writeunlock)(int tid);
 144	void (*nested_unlock)(int tid, u32 lockset);
 145	int (*readlock)(int tid);
 146	void (*read_delay)(struct torture_random_state *trsp);
 147	void (*readunlock)(int tid);
 148
 149	unsigned long flags; /* for irq spinlocks */
 150	const char *name;
 151};
 152
 153struct lock_torture_cxt {
 154	int nrealwriters_stress;
 155	int nrealreaders_stress;
 156	bool debug_lock;
 157	bool init_called;
 158	atomic_t n_lock_torture_errors;
 159	struct lock_torture_ops *cur_ops;
 160	struct lock_stress_stats *lwsa; /* writer statistics */
 161	struct lock_stress_stats *lrsa; /* reader statistics */
 162};
 163static struct lock_torture_cxt cxt = { 0, 0, false, false,
 164				       ATOMIC_INIT(0),
 165				       NULL, NULL};
 166/*
 167 * Definitions for lock torture testing.
 168 */
 169
 170static int torture_lock_busted_write_lock(int tid __maybe_unused)
 171{
 172	return 0;  /* BUGGY, do not use in real life!!! */
 173}
 174
 175static void torture_lock_busted_write_delay(struct torture_random_state *trsp)
 176{
 
 
 177	/* We want a long delay occasionally to force massive contention.  */
 178	if (long_hold && !(torture_random(trsp) % (cxt.nrealwriters_stress * 2000 * long_hold)))
 179		mdelay(long_hold);
 
 180	if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
 181		torture_preempt_schedule();  /* Allow test to be preempted. */
 182}
 183
 184static void torture_lock_busted_write_unlock(int tid __maybe_unused)
 185{
 186	  /* BUGGY, do not use in real life!!! */
 187}
 188
 189static void __torture_rt_boost(struct torture_random_state *trsp)
 190{
 191	const unsigned int factor = rt_boost_factor;
 192
 193	if (!rt_task(current)) {
 194		/*
 195		 * Boost priority once every rt_boost_factor operations. When
 196		 * the task tries to take the lock, the rtmutex it will account
 197		 * for the new priority, and do any corresponding pi-dance.
 198		 */
 199		if (trsp && !(torture_random(trsp) %
 200			      (cxt.nrealwriters_stress * factor))) {
 201			sched_set_fifo(current);
 202		} else /* common case, do nothing */
 203			return;
 204	} else {
 205		/*
 206		 * The task will remain boosted for another 10 * rt_boost_factor
 207		 * operations, then restored back to its original prio, and so
 208		 * forth.
 209		 *
 210		 * When @trsp is nil, we want to force-reset the task for
 211		 * stopping the kthread.
 212		 */
 213		if (!trsp || !(torture_random(trsp) %
 214			       (cxt.nrealwriters_stress * factor * 2))) {
 215			sched_set_normal(current, 0);
 216		} else /* common case, do nothing */
 217			return;
 218	}
 219}
 220
 221static void torture_rt_boost(struct torture_random_state *trsp)
 222{
 223	if (rt_boost != 2)
 224		return;
 225
 226	__torture_rt_boost(trsp);
 227}
 228
 229static struct lock_torture_ops lock_busted_ops = {
 230	.writelock	= torture_lock_busted_write_lock,
 231	.write_delay	= torture_lock_busted_write_delay,
 232	.task_boost     = torture_rt_boost,
 233	.writeunlock	= torture_lock_busted_write_unlock,
 234	.readlock       = NULL,
 235	.read_delay     = NULL,
 236	.readunlock     = NULL,
 237	.name		= "lock_busted"
 238};
 239
 240static DEFINE_SPINLOCK(torture_spinlock);
 241
 242static int torture_spin_lock_write_lock(int tid __maybe_unused)
 243__acquires(torture_spinlock)
 244{
 245	spin_lock(&torture_spinlock);
 246	return 0;
 247}
 248
 249static void torture_spin_lock_write_delay(struct torture_random_state *trsp)
 250{
 251	const unsigned long shortdelay_us = 2;
 252	unsigned long j;
 253
 254	/* We want a short delay mostly to emulate likely code, and
 255	 * we want a long delay occasionally to force massive contention.
 256	 */
 257	if (long_hold && !(torture_random(trsp) % (cxt.nrealwriters_stress * 2000 * long_hold))) {
 258		j = jiffies;
 259		mdelay(long_hold);
 260		pr_alert("%s: delay = %lu jiffies.\n", __func__, jiffies - j);
 261	}
 262	if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 200 * shortdelay_us)))
 263		udelay(shortdelay_us);
 264	if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
 265		torture_preempt_schedule();  /* Allow test to be preempted. */
 266}
 267
 268static void torture_spin_lock_write_unlock(int tid __maybe_unused)
 269__releases(torture_spinlock)
 270{
 271	spin_unlock(&torture_spinlock);
 272}
 273
 274static struct lock_torture_ops spin_lock_ops = {
 275	.writelock	= torture_spin_lock_write_lock,
 276	.write_delay	= torture_spin_lock_write_delay,
 277	.task_boost     = torture_rt_boost,
 278	.writeunlock	= torture_spin_lock_write_unlock,
 279	.readlock       = NULL,
 280	.read_delay     = NULL,
 281	.readunlock     = NULL,
 282	.name		= "spin_lock"
 283};
 284
 285static int torture_spin_lock_write_lock_irq(int tid __maybe_unused)
 286__acquires(torture_spinlock)
 287{
 288	unsigned long flags;
 289
 290	spin_lock_irqsave(&torture_spinlock, flags);
 291	cxt.cur_ops->flags = flags;
 292	return 0;
 293}
 294
 295static void torture_lock_spin_write_unlock_irq(int tid __maybe_unused)
 296__releases(torture_spinlock)
 297{
 298	spin_unlock_irqrestore(&torture_spinlock, cxt.cur_ops->flags);
 299}
 300
 301static struct lock_torture_ops spin_lock_irq_ops = {
 302	.writelock	= torture_spin_lock_write_lock_irq,
 303	.write_delay	= torture_spin_lock_write_delay,
 304	.task_boost     = torture_rt_boost,
 305	.writeunlock	= torture_lock_spin_write_unlock_irq,
 306	.readlock       = NULL,
 307	.read_delay     = NULL,
 308	.readunlock     = NULL,
 309	.name		= "spin_lock_irq"
 310};
 311
 312static DEFINE_RAW_SPINLOCK(torture_raw_spinlock);
 313
 314static int torture_raw_spin_lock_write_lock(int tid __maybe_unused)
 315__acquires(torture_raw_spinlock)
 316{
 317	raw_spin_lock(&torture_raw_spinlock);
 318	return 0;
 319}
 320
 321static void torture_raw_spin_lock_write_unlock(int tid __maybe_unused)
 322__releases(torture_raw_spinlock)
 323{
 324	raw_spin_unlock(&torture_raw_spinlock);
 325}
 326
 327static struct lock_torture_ops raw_spin_lock_ops = {
 328	.writelock	= torture_raw_spin_lock_write_lock,
 329	.write_delay	= torture_spin_lock_write_delay,
 330	.task_boost	= torture_rt_boost,
 331	.writeunlock	= torture_raw_spin_lock_write_unlock,
 332	.readlock	= NULL,
 333	.read_delay	= NULL,
 334	.readunlock	= NULL,
 335	.name		= "raw_spin_lock"
 336};
 337
 338static int torture_raw_spin_lock_write_lock_irq(int tid __maybe_unused)
 339__acquires(torture_raw_spinlock)
 340{
 341	unsigned long flags;
 342
 343	raw_spin_lock_irqsave(&torture_raw_spinlock, flags);
 344	cxt.cur_ops->flags = flags;
 345	return 0;
 346}
 347
 348static void torture_raw_spin_lock_write_unlock_irq(int tid __maybe_unused)
 349__releases(torture_raw_spinlock)
 350{
 351	raw_spin_unlock_irqrestore(&torture_raw_spinlock, cxt.cur_ops->flags);
 352}
 353
 354static struct lock_torture_ops raw_spin_lock_irq_ops = {
 355	.writelock	= torture_raw_spin_lock_write_lock_irq,
 356	.write_delay	= torture_spin_lock_write_delay,
 357	.task_boost	= torture_rt_boost,
 358	.writeunlock	= torture_raw_spin_lock_write_unlock_irq,
 359	.readlock	= NULL,
 360	.read_delay	= NULL,
 361	.readunlock	= NULL,
 362	.name		= "raw_spin_lock_irq"
 363};
 364
 365static DEFINE_RWLOCK(torture_rwlock);
 366
 367static int torture_rwlock_write_lock(int tid __maybe_unused)
 368__acquires(torture_rwlock)
 369{
 370	write_lock(&torture_rwlock);
 371	return 0;
 372}
 373
 374static void torture_rwlock_write_delay(struct torture_random_state *trsp)
 375{
 376	const unsigned long shortdelay_us = 2;
 
 377
 378	/* We want a short delay mostly to emulate likely code, and
 379	 * we want a long delay occasionally to force massive contention.
 380	 */
 381	if (long_hold && !(torture_random(trsp) % (cxt.nrealwriters_stress * 2000 * long_hold)))
 382		mdelay(long_hold);
 
 383	else
 384		udelay(shortdelay_us);
 385}
 386
 387static void torture_rwlock_write_unlock(int tid __maybe_unused)
 388__releases(torture_rwlock)
 389{
 390	write_unlock(&torture_rwlock);
 391}
 392
 393static int torture_rwlock_read_lock(int tid __maybe_unused)
 394__acquires(torture_rwlock)
 395{
 396	read_lock(&torture_rwlock);
 397	return 0;
 398}
 399
 400static void torture_rwlock_read_delay(struct torture_random_state *trsp)
 401{
 402	const unsigned long shortdelay_us = 10;
 
 403
 404	/* We want a short delay mostly to emulate likely code, and
 405	 * we want a long delay occasionally to force massive contention.
 406	 */
 407	if (long_hold && !(torture_random(trsp) % (cxt.nrealreaders_stress * 2000 * long_hold)))
 408		mdelay(long_hold);
 
 409	else
 410		udelay(shortdelay_us);
 411}
 412
 413static void torture_rwlock_read_unlock(int tid __maybe_unused)
 414__releases(torture_rwlock)
 415{
 416	read_unlock(&torture_rwlock);
 417}
 418
 419static struct lock_torture_ops rw_lock_ops = {
 420	.writelock	= torture_rwlock_write_lock,
 421	.write_delay	= torture_rwlock_write_delay,
 422	.task_boost     = torture_rt_boost,
 423	.writeunlock	= torture_rwlock_write_unlock,
 424	.readlock       = torture_rwlock_read_lock,
 425	.read_delay     = torture_rwlock_read_delay,
 426	.readunlock     = torture_rwlock_read_unlock,
 427	.name		= "rw_lock"
 428};
 429
 430static int torture_rwlock_write_lock_irq(int tid __maybe_unused)
 431__acquires(torture_rwlock)
 432{
 433	unsigned long flags;
 434
 435	write_lock_irqsave(&torture_rwlock, flags);
 436	cxt.cur_ops->flags = flags;
 437	return 0;
 438}
 439
 440static void torture_rwlock_write_unlock_irq(int tid __maybe_unused)
 441__releases(torture_rwlock)
 442{
 443	write_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
 444}
 445
 446static int torture_rwlock_read_lock_irq(int tid __maybe_unused)
 447__acquires(torture_rwlock)
 448{
 449	unsigned long flags;
 450
 451	read_lock_irqsave(&torture_rwlock, flags);
 452	cxt.cur_ops->flags = flags;
 453	return 0;
 454}
 455
 456static void torture_rwlock_read_unlock_irq(int tid __maybe_unused)
 457__releases(torture_rwlock)
 458{
 459	read_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
 460}
 461
 462static struct lock_torture_ops rw_lock_irq_ops = {
 463	.writelock	= torture_rwlock_write_lock_irq,
 464	.write_delay	= torture_rwlock_write_delay,
 465	.task_boost     = torture_rt_boost,
 466	.writeunlock	= torture_rwlock_write_unlock_irq,
 467	.readlock       = torture_rwlock_read_lock_irq,
 468	.read_delay     = torture_rwlock_read_delay,
 469	.readunlock     = torture_rwlock_read_unlock_irq,
 470	.name		= "rw_lock_irq"
 471};
 472
 473static DEFINE_MUTEX(torture_mutex);
 474static struct mutex torture_nested_mutexes[MAX_NESTED_LOCKS];
 475static struct lock_class_key nested_mutex_keys[MAX_NESTED_LOCKS];
 476
 477static void torture_mutex_init(void)
 478{
 479	int i;
 480
 481	for (i = 0; i < MAX_NESTED_LOCKS; i++)
 482		__mutex_init(&torture_nested_mutexes[i], __func__,
 483			     &nested_mutex_keys[i]);
 484}
 485
 486static int torture_mutex_nested_lock(int tid __maybe_unused,
 487				     u32 lockset)
 488{
 489	int i;
 490
 491	for (i = 0; i < nested_locks; i++)
 492		if (lockset & (1 << i))
 493			mutex_lock(&torture_nested_mutexes[i]);
 494	return 0;
 495}
 496
 497static int torture_mutex_lock(int tid __maybe_unused)
 498__acquires(torture_mutex)
 499{
 500	mutex_lock(&torture_mutex);
 501	return 0;
 502}
 503
 504static void torture_mutex_delay(struct torture_random_state *trsp)
 505{
 
 
 506	/* We want a long delay occasionally to force massive contention.  */
 507	if (long_hold && !(torture_random(trsp) % (cxt.nrealwriters_stress * 2000 * long_hold)))
 508		mdelay(long_hold * 5);
 
 
 
 509	if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
 510		torture_preempt_schedule();  /* Allow test to be preempted. */
 511}
 512
 513static void torture_mutex_unlock(int tid __maybe_unused)
 514__releases(torture_mutex)
 515{
 516	mutex_unlock(&torture_mutex);
 517}
 518
 519static void torture_mutex_nested_unlock(int tid __maybe_unused,
 520					u32 lockset)
 521{
 522	int i;
 523
 524	for (i = nested_locks - 1; i >= 0; i--)
 525		if (lockset & (1 << i))
 526			mutex_unlock(&torture_nested_mutexes[i]);
 527}
 528
 529static struct lock_torture_ops mutex_lock_ops = {
 530	.init		= torture_mutex_init,
 531	.nested_lock	= torture_mutex_nested_lock,
 532	.writelock	= torture_mutex_lock,
 533	.write_delay	= torture_mutex_delay,
 534	.task_boost     = torture_rt_boost,
 535	.writeunlock	= torture_mutex_unlock,
 536	.nested_unlock	= torture_mutex_nested_unlock,
 537	.readlock       = NULL,
 538	.read_delay     = NULL,
 539	.readunlock     = NULL,
 540	.name		= "mutex_lock"
 541};
 542
 543#include <linux/ww_mutex.h>
 544/*
 545 * The torture ww_mutexes should belong to the same lock class as
 546 * torture_ww_class to avoid lockdep problem. The ww_mutex_init()
 547 * function is called for initialization to ensure that.
 548 */
 549static DEFINE_WD_CLASS(torture_ww_class);
 550static struct ww_mutex torture_ww_mutex_0, torture_ww_mutex_1, torture_ww_mutex_2;
 551static struct ww_acquire_ctx *ww_acquire_ctxs;
 552
 553static void torture_ww_mutex_init(void)
 554{
 555	ww_mutex_init(&torture_ww_mutex_0, &torture_ww_class);
 556	ww_mutex_init(&torture_ww_mutex_1, &torture_ww_class);
 557	ww_mutex_init(&torture_ww_mutex_2, &torture_ww_class);
 558
 559	ww_acquire_ctxs = kmalloc_array(cxt.nrealwriters_stress,
 560					sizeof(*ww_acquire_ctxs),
 561					GFP_KERNEL);
 562	if (!ww_acquire_ctxs)
 563		VERBOSE_TOROUT_STRING("ww_acquire_ctx: Out of memory");
 564}
 565
 566static void torture_ww_mutex_exit(void)
 567{
 568	kfree(ww_acquire_ctxs);
 569}
 570
 571static int torture_ww_mutex_lock(int tid)
 572__acquires(torture_ww_mutex_0)
 573__acquires(torture_ww_mutex_1)
 574__acquires(torture_ww_mutex_2)
 575{
 576	LIST_HEAD(list);
 577	struct reorder_lock {
 578		struct list_head link;
 579		struct ww_mutex *lock;
 580	} locks[3], *ll, *ln;
 581	struct ww_acquire_ctx *ctx = &ww_acquire_ctxs[tid];
 582
 583	locks[0].lock = &torture_ww_mutex_0;
 584	list_add(&locks[0].link, &list);
 585
 586	locks[1].lock = &torture_ww_mutex_1;
 587	list_add(&locks[1].link, &list);
 588
 589	locks[2].lock = &torture_ww_mutex_2;
 590	list_add(&locks[2].link, &list);
 591
 592	ww_acquire_init(ctx, &torture_ww_class);
 593
 594	list_for_each_entry(ll, &list, link) {
 595		int err;
 596
 597		err = ww_mutex_lock(ll->lock, ctx);
 598		if (!err)
 599			continue;
 600
 601		ln = ll;
 602		list_for_each_entry_continue_reverse(ln, &list, link)
 603			ww_mutex_unlock(ln->lock);
 604
 605		if (err != -EDEADLK)
 606			return err;
 607
 608		ww_mutex_lock_slow(ll->lock, ctx);
 609		list_move(&ll->link, &list);
 610	}
 611
 612	return 0;
 613}
 614
 615static void torture_ww_mutex_unlock(int tid)
 616__releases(torture_ww_mutex_0)
 617__releases(torture_ww_mutex_1)
 618__releases(torture_ww_mutex_2)
 619{
 620	struct ww_acquire_ctx *ctx = &ww_acquire_ctxs[tid];
 621
 622	ww_mutex_unlock(&torture_ww_mutex_0);
 623	ww_mutex_unlock(&torture_ww_mutex_1);
 624	ww_mutex_unlock(&torture_ww_mutex_2);
 625	ww_acquire_fini(ctx);
 626}
 627
 628static struct lock_torture_ops ww_mutex_lock_ops = {
 629	.init		= torture_ww_mutex_init,
 630	.exit		= torture_ww_mutex_exit,
 631	.writelock	= torture_ww_mutex_lock,
 632	.write_delay	= torture_mutex_delay,
 633	.task_boost     = torture_rt_boost,
 634	.writeunlock	= torture_ww_mutex_unlock,
 635	.readlock       = NULL,
 636	.read_delay     = NULL,
 637	.readunlock     = NULL,
 638	.name		= "ww_mutex_lock"
 639};
 640
 641#ifdef CONFIG_RT_MUTEXES
 642static DEFINE_RT_MUTEX(torture_rtmutex);
 643static struct rt_mutex torture_nested_rtmutexes[MAX_NESTED_LOCKS];
 644static struct lock_class_key nested_rtmutex_keys[MAX_NESTED_LOCKS];
 645
 646static void torture_rtmutex_init(void)
 
 647{
 648	int i;
 649
 650	for (i = 0; i < MAX_NESTED_LOCKS; i++)
 651		__rt_mutex_init(&torture_nested_rtmutexes[i], __func__,
 652				&nested_rtmutex_keys[i]);
 653}
 654
 655static int torture_rtmutex_nested_lock(int tid __maybe_unused,
 656				       u32 lockset)
 657{
 658	int i;
 659
 660	for (i = 0; i < nested_locks; i++)
 661		if (lockset & (1 << i))
 662			rt_mutex_lock(&torture_nested_rtmutexes[i]);
 663	return 0;
 664}
 665
 666static int torture_rtmutex_lock(int tid __maybe_unused)
 667__acquires(torture_rtmutex)
 668{
 669	rt_mutex_lock(&torture_rtmutex);
 670	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 671}
 672
 673static void torture_rtmutex_delay(struct torture_random_state *trsp)
 674{
 675	const unsigned long shortdelay_us = 2;
 
 676
 677	/*
 678	 * We want a short delay mostly to emulate likely code, and
 679	 * we want a long delay occasionally to force massive contention.
 680	 */
 681	if (long_hold && !(torture_random(trsp) % (cxt.nrealwriters_stress * 2000 * long_hold)))
 682		mdelay(long_hold);
 683	if (!(torture_random(trsp) %
 684	      (cxt.nrealwriters_stress * 200 * shortdelay_us)))
 
 
 
 685		udelay(shortdelay_us);
 686	if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
 687		torture_preempt_schedule();  /* Allow test to be preempted. */
 688}
 689
 690static void torture_rtmutex_unlock(int tid __maybe_unused)
 691__releases(torture_rtmutex)
 692{
 693	rt_mutex_unlock(&torture_rtmutex);
 694}
 695
 696static void torture_rt_boost_rtmutex(struct torture_random_state *trsp)
 697{
 698	if (!rt_boost)
 699		return;
 700
 701	__torture_rt_boost(trsp);
 702}
 703
 704static void torture_rtmutex_nested_unlock(int tid __maybe_unused,
 705					  u32 lockset)
 706{
 707	int i;
 708
 709	for (i = nested_locks - 1; i >= 0; i--)
 710		if (lockset & (1 << i))
 711			rt_mutex_unlock(&torture_nested_rtmutexes[i]);
 712}
 713
 714static struct lock_torture_ops rtmutex_lock_ops = {
 715	.init		= torture_rtmutex_init,
 716	.nested_lock	= torture_rtmutex_nested_lock,
 717	.writelock	= torture_rtmutex_lock,
 718	.write_delay	= torture_rtmutex_delay,
 719	.task_boost     = torture_rt_boost_rtmutex,
 720	.writeunlock	= torture_rtmutex_unlock,
 721	.nested_unlock	= torture_rtmutex_nested_unlock,
 722	.readlock       = NULL,
 723	.read_delay     = NULL,
 724	.readunlock     = NULL,
 725	.name		= "rtmutex_lock"
 726};
 727#endif
 728
 729static DECLARE_RWSEM(torture_rwsem);
 730static int torture_rwsem_down_write(int tid __maybe_unused)
 731__acquires(torture_rwsem)
 732{
 733	down_write(&torture_rwsem);
 734	return 0;
 735}
 736
 737static void torture_rwsem_write_delay(struct torture_random_state *trsp)
 738{
 
 
 739	/* We want a long delay occasionally to force massive contention.  */
 740	if (long_hold && !(torture_random(trsp) % (cxt.nrealwriters_stress * 2000 * long_hold)))
 741		mdelay(long_hold * 10);
 
 
 
 742	if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
 743		torture_preempt_schedule();  /* Allow test to be preempted. */
 744}
 745
 746static void torture_rwsem_up_write(int tid __maybe_unused)
 747__releases(torture_rwsem)
 748{
 749	up_write(&torture_rwsem);
 750}
 751
 752static int torture_rwsem_down_read(int tid __maybe_unused)
 753__acquires(torture_rwsem)
 754{
 755	down_read(&torture_rwsem);
 756	return 0;
 757}
 758
 759static void torture_rwsem_read_delay(struct torture_random_state *trsp)
 760{
 
 
 761	/* We want a long delay occasionally to force massive contention.  */
 762	if (long_hold && !(torture_random(trsp) % (cxt.nrealreaders_stress * 2000 * long_hold)))
 763		mdelay(long_hold * 2);
 
 764	else
 765		mdelay(long_hold / 2);
 766	if (!(torture_random(trsp) % (cxt.nrealreaders_stress * 20000)))
 767		torture_preempt_schedule();  /* Allow test to be preempted. */
 768}
 769
 770static void torture_rwsem_up_read(int tid __maybe_unused)
 771__releases(torture_rwsem)
 772{
 773	up_read(&torture_rwsem);
 774}
 775
 776static struct lock_torture_ops rwsem_lock_ops = {
 777	.writelock	= torture_rwsem_down_write,
 778	.write_delay	= torture_rwsem_write_delay,
 779	.task_boost     = torture_rt_boost,
 780	.writeunlock	= torture_rwsem_up_write,
 781	.readlock       = torture_rwsem_down_read,
 782	.read_delay     = torture_rwsem_read_delay,
 783	.readunlock     = torture_rwsem_up_read,
 784	.name		= "rwsem_lock"
 785};
 786
 787#include <linux/percpu-rwsem.h>
 788static struct percpu_rw_semaphore pcpu_rwsem;
 789
 790static void torture_percpu_rwsem_init(void)
 791{
 792	BUG_ON(percpu_init_rwsem(&pcpu_rwsem));
 793}
 794
 795static void torture_percpu_rwsem_exit(void)
 796{
 797	percpu_free_rwsem(&pcpu_rwsem);
 798}
 799
 800static int torture_percpu_rwsem_down_write(int tid __maybe_unused)
 801__acquires(pcpu_rwsem)
 802{
 803	percpu_down_write(&pcpu_rwsem);
 804	return 0;
 805}
 806
 807static void torture_percpu_rwsem_up_write(int tid __maybe_unused)
 808__releases(pcpu_rwsem)
 809{
 810	percpu_up_write(&pcpu_rwsem);
 811}
 812
 813static int torture_percpu_rwsem_down_read(int tid __maybe_unused)
 814__acquires(pcpu_rwsem)
 815{
 816	percpu_down_read(&pcpu_rwsem);
 817	return 0;
 818}
 819
 820static void torture_percpu_rwsem_up_read(int tid __maybe_unused)
 821__releases(pcpu_rwsem)
 822{
 823	percpu_up_read(&pcpu_rwsem);
 824}
 825
 826static struct lock_torture_ops percpu_rwsem_lock_ops = {
 827	.init		= torture_percpu_rwsem_init,
 828	.exit		= torture_percpu_rwsem_exit,
 829	.writelock	= torture_percpu_rwsem_down_write,
 830	.write_delay	= torture_rwsem_write_delay,
 831	.task_boost     = torture_rt_boost,
 832	.writeunlock	= torture_percpu_rwsem_up_write,
 833	.readlock       = torture_percpu_rwsem_down_read,
 834	.read_delay     = torture_rwsem_read_delay,
 835	.readunlock     = torture_percpu_rwsem_up_read,
 836	.name		= "percpu_rwsem_lock"
 837};
 838
 839/*
 840 * Lock torture writer kthread.  Repeatedly acquires and releases
 841 * the lock, checking for duplicate acquisitions.
 842 */
 843static int lock_torture_writer(void *arg)
 844{
 845	unsigned long j;
 846	unsigned long j1;
 847	u32 lockset_mask;
 848	struct lock_stress_stats *lwsp = arg;
 
 849	DEFINE_TORTURE_RANDOM(rand);
 850	bool skip_main_lock;
 851	int tid = lwsp - cxt.lwsa;
 852
 853	VERBOSE_TOROUT_STRING("lock_torture_writer task started");
 854	if (!rt_task(current))
 855		set_user_nice(current, MAX_NICE);
 856
 857	do {
 858		if ((torture_random(&rand) & 0xfffff) == 0)
 859			schedule_timeout_uninterruptible(1);
 860
 861		lockset_mask = torture_random(&rand);
 862		/*
 863		 * When using nested_locks, we want to occasionally
 864		 * skip the main lock so we can avoid always serializing
 865		 * the lock chains on that central lock. By skipping the
 866		 * main lock occasionally, we can create different
 867		 * contention patterns (allowing for multiple disjoint
 868		 * blocked trees)
 869		 */
 870		skip_main_lock = (nested_locks &&
 871				 !(torture_random(&rand) % 100));
 872
 873		cxt.cur_ops->task_boost(&rand);
 874		if (cxt.cur_ops->nested_lock)
 875			cxt.cur_ops->nested_lock(tid, lockset_mask);
 
 
 
 
 876
 877		if (!skip_main_lock) {
 878			if (acq_writer_lim > 0)
 879				j = jiffies;
 880			cxt.cur_ops->writelock(tid);
 881			if (WARN_ON_ONCE(lock_is_write_held))
 882				lwsp->n_lock_fail++;
 883			lock_is_write_held = true;
 884			if (WARN_ON_ONCE(atomic_read(&lock_is_read_held)))
 885				lwsp->n_lock_fail++; /* rare, but... */
 886			if (acq_writer_lim > 0) {
 887				j1 = jiffies;
 888				WARN_ONCE(time_after(j1, j + acq_writer_lim),
 889					  "%s: Lock acquisition took %lu jiffies.\n",
 890					  __func__, j1 - j);
 891			}
 892			lwsp->n_lock_acquired++;
 893
 894			cxt.cur_ops->write_delay(&rand);
 895
 896			lock_is_write_held = false;
 897			WRITE_ONCE(last_lock_release, jiffies);
 898			cxt.cur_ops->writeunlock(tid);
 899		}
 900		if (cxt.cur_ops->nested_unlock)
 901			cxt.cur_ops->nested_unlock(tid, lockset_mask);
 902
 903		stutter_wait("lock_torture_writer");
 904	} while (!torture_must_stop());
 905
 906	cxt.cur_ops->task_boost(NULL); /* reset prio */
 907	torture_kthread_stopping("lock_torture_writer");
 908	return 0;
 909}
 910
 911/*
 912 * Lock torture reader kthread.  Repeatedly acquires and releases
 913 * the reader lock.
 914 */
 915static int lock_torture_reader(void *arg)
 916{
 917	struct lock_stress_stats *lrsp = arg;
 918	int tid = lrsp - cxt.lrsa;
 919	DEFINE_TORTURE_RANDOM(rand);
 920
 921	VERBOSE_TOROUT_STRING("lock_torture_reader task started");
 922	set_user_nice(current, MAX_NICE);
 923
 924	do {
 925		if ((torture_random(&rand) & 0xfffff) == 0)
 926			schedule_timeout_uninterruptible(1);
 927
 928		cxt.cur_ops->readlock(tid);
 929		atomic_inc(&lock_is_read_held);
 930		if (WARN_ON_ONCE(lock_is_write_held))
 931			lrsp->n_lock_fail++; /* rare, but... */
 932
 933		lrsp->n_lock_acquired++;
 934		cxt.cur_ops->read_delay(&rand);
 935		atomic_dec(&lock_is_read_held);
 936		cxt.cur_ops->readunlock(tid);
 937
 938		stutter_wait("lock_torture_reader");
 939	} while (!torture_must_stop());
 940	torture_kthread_stopping("lock_torture_reader");
 941	return 0;
 942}
 943
 944/*
 945 * Create an lock-torture-statistics message in the specified buffer.
 946 */
 947static void __torture_print_stats(char *page,
 948				  struct lock_stress_stats *statp, bool write)
 949{
 950	long cur;
 951	bool fail = false;
 952	int i, n_stress;
 953	long max = 0, min = statp ? data_race(statp[0].n_lock_acquired) : 0;
 954	long long sum = 0;
 955
 956	n_stress = write ? cxt.nrealwriters_stress : cxt.nrealreaders_stress;
 957	for (i = 0; i < n_stress; i++) {
 958		if (data_race(statp[i].n_lock_fail))
 959			fail = true;
 960		cur = data_race(statp[i].n_lock_acquired);
 961		sum += cur;
 962		if (max < cur)
 963			max = cur;
 964		if (min > cur)
 965			min = cur;
 966	}
 967	page += sprintf(page,
 968			"%s:  Total: %lld  Max/Min: %ld/%ld %s  Fail: %d %s\n",
 969			write ? "Writes" : "Reads ",
 970			sum, max, min,
 971			!onoff_interval && max / 2 > min ? "???" : "",
 972			fail, fail ? "!!!" : "");
 973	if (fail)
 974		atomic_inc(&cxt.n_lock_torture_errors);
 975}
 976
 977/*
 978 * Print torture statistics.  Caller must ensure that there is only one
 979 * call to this function at a given time!!!  This is normally accomplished
 980 * by relying on the module system to only have one copy of the module
 981 * loaded, and then by giving the lock_torture_stats kthread full control
 982 * (or the init/cleanup functions when lock_torture_stats thread is not
 983 * running).
 984 */
 985static void lock_torture_stats_print(void)
 986{
 987	int size = cxt.nrealwriters_stress * 200 + 8192;
 988	char *buf;
 989
 990	if (cxt.cur_ops->readlock)
 991		size += cxt.nrealreaders_stress * 200 + 8192;
 992
 993	buf = kmalloc(size, GFP_KERNEL);
 994	if (!buf) {
 995		pr_err("lock_torture_stats_print: Out of memory, need: %d",
 996		       size);
 997		return;
 998	}
 999
1000	__torture_print_stats(buf, cxt.lwsa, true);
1001	pr_alert("%s", buf);
1002	kfree(buf);
1003
1004	if (cxt.cur_ops->readlock) {
1005		buf = kmalloc(size, GFP_KERNEL);
1006		if (!buf) {
1007			pr_err("lock_torture_stats_print: Out of memory, need: %d",
1008			       size);
1009			return;
1010		}
1011
1012		__torture_print_stats(buf, cxt.lrsa, false);
1013		pr_alert("%s", buf);
1014		kfree(buf);
1015	}
1016}
1017
1018/*
1019 * Periodically prints torture statistics, if periodic statistics printing
1020 * was specified via the stat_interval module parameter.
1021 *
1022 * No need to worry about fullstop here, since this one doesn't reference
1023 * volatile state or register callbacks.
1024 */
1025static int lock_torture_stats(void *arg)
1026{
1027	VERBOSE_TOROUT_STRING("lock_torture_stats task started");
1028	do {
1029		schedule_timeout_interruptible(stat_interval * HZ);
1030		lock_torture_stats_print();
1031		torture_shutdown_absorb("lock_torture_stats");
1032	} while (!torture_must_stop());
1033	torture_kthread_stopping("lock_torture_stats");
1034	return 0;
1035}
1036
1037
1038static inline void
1039lock_torture_print_module_parms(struct lock_torture_ops *cur_ops,
1040				const char *tag)
1041{
1042	static cpumask_t cpumask_all;
1043	cpumask_t *rcmp = cpumask_nonempty(bind_readers) ? bind_readers : &cpumask_all;
1044	cpumask_t *wcmp = cpumask_nonempty(bind_writers) ? bind_writers : &cpumask_all;
1045
1046	cpumask_setall(&cpumask_all);
1047	pr_alert("%s" TORTURE_FLAG
1048		 "--- %s%s: acq_writer_lim=%d bind_readers=%*pbl bind_writers=%*pbl call_rcu_chains=%d long_hold=%d nested_locks=%d nreaders_stress=%d nwriters_stress=%d onoff_holdoff=%d onoff_interval=%d rt_boost=%d rt_boost_factor=%d shuffle_interval=%d shutdown_secs=%d stat_interval=%d stutter=%d verbose=%d writer_fifo=%d\n",
1049		 torture_type, tag, cxt.debug_lock ? " [debug]": "",
1050		 acq_writer_lim, cpumask_pr_args(rcmp), cpumask_pr_args(wcmp),
1051		 call_rcu_chains, long_hold, nested_locks, cxt.nrealreaders_stress,
1052		 cxt.nrealwriters_stress, onoff_holdoff, onoff_interval, rt_boost,
1053		 rt_boost_factor, shuffle_interval, shutdown_secs, stat_interval, stutter,
1054		 verbose, writer_fifo);
1055}
1056
1057// If requested, maintain call_rcu() chains to keep a grace period always
1058// in flight.  These increase the probability of getting an RCU CPU stall
1059// warning and associated diagnostics when a locking primitive stalls.
1060
1061static void call_rcu_chain_cb(struct rcu_head *rhp)
1062{
1063	struct call_rcu_chain *crcp = container_of(rhp, struct call_rcu_chain, crc_rh);
1064
1065	if (!smp_load_acquire(&crcp->crc_stop)) {
1066		(void)start_poll_synchronize_rcu(); // Start one grace period...
1067		call_rcu(&crcp->crc_rh, call_rcu_chain_cb); // ... and later start another.
1068	}
1069}
1070
1071// Start the requested number of call_rcu() chains.
1072static int call_rcu_chain_init(void)
1073{
1074	int i;
1075
1076	if (call_rcu_chains <= 0)
1077		return 0;
1078	call_rcu_chain_list = kcalloc(call_rcu_chains, sizeof(*call_rcu_chain_list), GFP_KERNEL);
1079	if (!call_rcu_chain_list)
1080		return -ENOMEM;
1081	for (i = 0; i < call_rcu_chains; i++) {
1082		call_rcu_chain_list[i].crc_stop = false;
1083		call_rcu(&call_rcu_chain_list[i].crc_rh, call_rcu_chain_cb);
1084	}
1085	return 0;
1086}
1087
1088// Stop all of the call_rcu() chains.
1089static void call_rcu_chain_cleanup(void)
1090{
1091	int i;
1092
1093	if (!call_rcu_chain_list)
1094		return;
1095	for (i = 0; i < call_rcu_chains; i++)
1096		smp_store_release(&call_rcu_chain_list[i].crc_stop, true);
1097	rcu_barrier();
1098	kfree(call_rcu_chain_list);
1099	call_rcu_chain_list = NULL;
1100}
1101
1102static void lock_torture_cleanup(void)
1103{
1104	int i;
1105
1106	if (torture_cleanup_begin())
1107		return;
1108
1109	/*
1110	 * Indicates early cleanup, meaning that the test has not run,
1111	 * such as when passing bogus args when loading the module.
1112	 * However cxt->cur_ops.init() may have been invoked, so beside
1113	 * perform the underlying torture-specific cleanups, cur_ops.exit()
1114	 * will be invoked if needed.
1115	 */
1116	if (!cxt.lwsa && !cxt.lrsa)
1117		goto end;
1118
1119	if (writer_tasks) {
1120		for (i = 0; i < cxt.nrealwriters_stress; i++)
1121			torture_stop_kthread(lock_torture_writer, writer_tasks[i]);
 
1122		kfree(writer_tasks);
1123		writer_tasks = NULL;
1124	}
1125
1126	if (reader_tasks) {
1127		for (i = 0; i < cxt.nrealreaders_stress; i++)
1128			torture_stop_kthread(lock_torture_reader,
1129					     reader_tasks[i]);
1130		kfree(reader_tasks);
1131		reader_tasks = NULL;
1132	}
1133
1134	torture_stop_kthread(lock_torture_stats, stats_task);
1135	lock_torture_stats_print();  /* -After- the stats thread is stopped! */
1136
1137	if (atomic_read(&cxt.n_lock_torture_errors))
1138		lock_torture_print_module_parms(cxt.cur_ops,
1139						"End of test: FAILURE");
1140	else if (torture_onoff_failures())
1141		lock_torture_print_module_parms(cxt.cur_ops,
1142						"End of test: LOCK_HOTPLUG");
1143	else
1144		lock_torture_print_module_parms(cxt.cur_ops,
1145						"End of test: SUCCESS");
1146
1147	kfree(cxt.lwsa);
1148	cxt.lwsa = NULL;
1149	kfree(cxt.lrsa);
1150	cxt.lrsa = NULL;
1151
1152	call_rcu_chain_cleanup();
1153
1154end:
1155	if (cxt.init_called) {
1156		if (cxt.cur_ops->exit)
1157			cxt.cur_ops->exit();
1158		cxt.init_called = false;
1159	}
1160	torture_cleanup_end();
1161}
1162
1163static int __init lock_torture_init(void)
1164{
1165	int i, j;
1166	int firsterr = 0;
1167	static struct lock_torture_ops *torture_ops[] = {
1168		&lock_busted_ops,
1169		&spin_lock_ops, &spin_lock_irq_ops,
1170		&raw_spin_lock_ops, &raw_spin_lock_irq_ops,
1171		&rw_lock_ops, &rw_lock_irq_ops,
1172		&mutex_lock_ops,
1173		&ww_mutex_lock_ops,
1174#ifdef CONFIG_RT_MUTEXES
1175		&rtmutex_lock_ops,
1176#endif
1177		&rwsem_lock_ops,
1178		&percpu_rwsem_lock_ops,
1179	};
1180
1181	if (!torture_init_begin(torture_type, verbose))
1182		return -EBUSY;
1183
1184	/* Process args and tell the world that the torturer is on the job. */
1185	for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
1186		cxt.cur_ops = torture_ops[i];
1187		if (strcmp(torture_type, cxt.cur_ops->name) == 0)
1188			break;
1189	}
1190	if (i == ARRAY_SIZE(torture_ops)) {
1191		pr_alert("lock-torture: invalid torture type: \"%s\"\n",
1192			 torture_type);
1193		pr_alert("lock-torture types:");
1194		for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
1195			pr_alert(" %s", torture_ops[i]->name);
1196		pr_alert("\n");
1197		firsterr = -EINVAL;
1198		goto unwind;
1199	}
1200
1201	if (nwriters_stress == 0 &&
1202	    (!cxt.cur_ops->readlock || nreaders_stress == 0)) {
1203		pr_alert("lock-torture: must run at least one locking thread\n");
1204		firsterr = -EINVAL;
1205		goto unwind;
1206	}
1207
1208	if (nwriters_stress >= 0)
1209		cxt.nrealwriters_stress = nwriters_stress;
1210	else
1211		cxt.nrealwriters_stress = 2 * num_online_cpus();
1212
1213	if (cxt.cur_ops->init) {
1214		cxt.cur_ops->init();
1215		cxt.init_called = true;
1216	}
1217
1218#ifdef CONFIG_DEBUG_MUTEXES
1219	if (str_has_prefix(torture_type, "mutex"))
1220		cxt.debug_lock = true;
1221#endif
1222#ifdef CONFIG_DEBUG_RT_MUTEXES
1223	if (str_has_prefix(torture_type, "rtmutex"))
1224		cxt.debug_lock = true;
1225#endif
1226#ifdef CONFIG_DEBUG_SPINLOCK
1227	if ((str_has_prefix(torture_type, "spin")) ||
1228	    (str_has_prefix(torture_type, "rw_lock")))
1229		cxt.debug_lock = true;
1230#endif
1231
1232	/* Initialize the statistics so that each run gets its own numbers. */
1233	if (nwriters_stress) {
1234		lock_is_write_held = false;
1235		cxt.lwsa = kmalloc_array(cxt.nrealwriters_stress,
1236					 sizeof(*cxt.lwsa),
1237					 GFP_KERNEL);
1238		if (cxt.lwsa == NULL) {
1239			VERBOSE_TOROUT_STRING("cxt.lwsa: Out of memory");
1240			firsterr = -ENOMEM;
1241			goto unwind;
1242		}
1243
1244		for (i = 0; i < cxt.nrealwriters_stress; i++) {
1245			cxt.lwsa[i].n_lock_fail = 0;
1246			cxt.lwsa[i].n_lock_acquired = 0;
1247		}
1248	}
1249
1250	if (cxt.cur_ops->readlock) {
1251		if (nreaders_stress >= 0)
1252			cxt.nrealreaders_stress = nreaders_stress;
1253		else {
1254			/*
1255			 * By default distribute evenly the number of
1256			 * readers and writers. We still run the same number
1257			 * of threads as the writer-only locks default.
1258			 */
1259			if (nwriters_stress < 0) /* user doesn't care */
1260				cxt.nrealwriters_stress = num_online_cpus();
1261			cxt.nrealreaders_stress = cxt.nrealwriters_stress;
1262		}
1263
1264		if (nreaders_stress) {
1265			cxt.lrsa = kmalloc_array(cxt.nrealreaders_stress,
1266						 sizeof(*cxt.lrsa),
1267						 GFP_KERNEL);
1268			if (cxt.lrsa == NULL) {
1269				VERBOSE_TOROUT_STRING("cxt.lrsa: Out of memory");
1270				firsterr = -ENOMEM;
1271				kfree(cxt.lwsa);
1272				cxt.lwsa = NULL;
1273				goto unwind;
1274			}
1275
1276			for (i = 0; i < cxt.nrealreaders_stress; i++) {
1277				cxt.lrsa[i].n_lock_fail = 0;
1278				cxt.lrsa[i].n_lock_acquired = 0;
1279			}
1280		}
1281	}
1282
1283	firsterr = call_rcu_chain_init();
1284	if (torture_init_error(firsterr))
1285		goto unwind;
1286
1287	lock_torture_print_module_parms(cxt.cur_ops, "Start of test");
1288
1289	/* Prepare torture context. */
1290	if (onoff_interval > 0) {
1291		firsterr = torture_onoff_init(onoff_holdoff * HZ,
1292					      onoff_interval * HZ, NULL);
1293		if (torture_init_error(firsterr))
1294			goto unwind;
1295	}
1296	if (shuffle_interval > 0) {
1297		firsterr = torture_shuffle_init(shuffle_interval);
1298		if (torture_init_error(firsterr))
1299			goto unwind;
1300	}
1301	if (shutdown_secs > 0) {
1302		firsterr = torture_shutdown_init(shutdown_secs,
1303						 lock_torture_cleanup);
1304		if (torture_init_error(firsterr))
1305			goto unwind;
1306	}
1307	if (stutter > 0) {
1308		firsterr = torture_stutter_init(stutter, stutter);
1309		if (torture_init_error(firsterr))
1310			goto unwind;
1311	}
1312
1313	if (nwriters_stress) {
1314		writer_tasks = kcalloc(cxt.nrealwriters_stress,
1315				       sizeof(writer_tasks[0]),
1316				       GFP_KERNEL);
1317		if (writer_tasks == NULL) {
1318			TOROUT_ERRSTRING("writer_tasks: Out of memory");
1319			firsterr = -ENOMEM;
1320			goto unwind;
1321		}
1322	}
1323
1324	/* cap nested_locks to MAX_NESTED_LOCKS */
1325	if (nested_locks > MAX_NESTED_LOCKS)
1326		nested_locks = MAX_NESTED_LOCKS;
1327
1328	if (cxt.cur_ops->readlock) {
1329		reader_tasks = kcalloc(cxt.nrealreaders_stress,
1330				       sizeof(reader_tasks[0]),
1331				       GFP_KERNEL);
1332		if (reader_tasks == NULL) {
1333			TOROUT_ERRSTRING("reader_tasks: Out of memory");
1334			kfree(writer_tasks);
1335			writer_tasks = NULL;
1336			firsterr = -ENOMEM;
1337			goto unwind;
1338		}
1339	}
1340
1341	/*
1342	 * Create the kthreads and start torturing (oh, those poor little locks).
1343	 *
1344	 * TODO: Note that we interleave writers with readers, giving writers a
1345	 * slight advantage, by creating its kthread first. This can be modified
1346	 * for very specific needs, or even let the user choose the policy, if
1347	 * ever wanted.
1348	 */
1349	for (i = 0, j = 0; i < cxt.nrealwriters_stress ||
1350		    j < cxt.nrealreaders_stress; i++, j++) {
1351		if (i >= cxt.nrealwriters_stress)
1352			goto create_reader;
1353
1354		/* Create writer. */
1355		firsterr = torture_create_kthread_cb(lock_torture_writer, &cxt.lwsa[i],
1356						     writer_tasks[i],
1357						     writer_fifo ? sched_set_fifo : NULL);
1358		if (torture_init_error(firsterr))
1359			goto unwind;
1360		if (cpumask_nonempty(bind_writers))
1361			torture_sched_setaffinity(writer_tasks[i]->pid, bind_writers);
1362
1363	create_reader:
1364		if (cxt.cur_ops->readlock == NULL || (j >= cxt.nrealreaders_stress))
1365			continue;
1366		/* Create reader. */
1367		firsterr = torture_create_kthread(lock_torture_reader, &cxt.lrsa[j],
1368						  reader_tasks[j]);
1369		if (torture_init_error(firsterr))
1370			goto unwind;
1371		if (cpumask_nonempty(bind_readers))
1372			torture_sched_setaffinity(reader_tasks[j]->pid, bind_readers);
1373	}
1374	if (stat_interval > 0) {
1375		firsterr = torture_create_kthread(lock_torture_stats, NULL,
1376						  stats_task);
1377		if (torture_init_error(firsterr))
1378			goto unwind;
1379	}
1380	torture_init_end();
1381	return 0;
1382
1383unwind:
1384	torture_init_end();
1385	lock_torture_cleanup();
1386	if (shutdown_secs) {
1387		WARN_ON(!IS_MODULE(CONFIG_LOCK_TORTURE_TEST));
1388		kernel_power_off();
1389	}
1390	return firsterr;
1391}
1392
1393module_init(lock_torture_init);
1394module_exit(lock_torture_cleanup);