Linux Audio

Check our new training course

Loading...
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * Read-Copy Update module-based scalability-test facility
   4 *
   5 * Copyright (C) IBM Corporation, 2015
   6 *
   7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
   8 */
   9
  10#define pr_fmt(fmt) fmt
  11
  12#include <linux/types.h>
  13#include <linux/kernel.h>
  14#include <linux/init.h>
  15#include <linux/mm.h>
  16#include <linux/module.h>
  17#include <linux/kthread.h>
  18#include <linux/err.h>
  19#include <linux/spinlock.h>
  20#include <linux/smp.h>
  21#include <linux/rcupdate.h>
  22#include <linux/interrupt.h>
  23#include <linux/sched.h>
  24#include <uapi/linux/sched/types.h>
  25#include <linux/atomic.h>
  26#include <linux/bitops.h>
  27#include <linux/completion.h>
  28#include <linux/moduleparam.h>
  29#include <linux/percpu.h>
  30#include <linux/notifier.h>
  31#include <linux/reboot.h>
  32#include <linux/freezer.h>
  33#include <linux/cpu.h>
  34#include <linux/delay.h>
  35#include <linux/stat.h>
  36#include <linux/srcu.h>
  37#include <linux/slab.h>
  38#include <asm/byteorder.h>
  39#include <linux/torture.h>
  40#include <linux/vmalloc.h>
  41#include <linux/rcupdate_trace.h>
  42
  43#include "rcu.h"
  44
  45MODULE_LICENSE("GPL");
  46MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com>");
  47
  48#define SCALE_FLAG "-scale:"
  49#define SCALEOUT_STRING(s) \
  50	pr_alert("%s" SCALE_FLAG " %s\n", scale_type, s)
  51#define VERBOSE_SCALEOUT_STRING(s) \
  52	do { if (verbose) pr_alert("%s" SCALE_FLAG " %s\n", scale_type, s); } while (0)
  53#define SCALEOUT_ERRSTRING(s) \
  54	pr_alert("%s" SCALE_FLAG "!!! %s\n", scale_type, s)
  55
  56/*
  57 * The intended use cases for the nreaders and nwriters module parameters
  58 * are as follows:
  59 *
  60 * 1.	Specify only the nr_cpus kernel boot parameter.  This will
  61 *	set both nreaders and nwriters to the value specified by
  62 *	nr_cpus for a mixed reader/writer test.
  63 *
  64 * 2.	Specify the nr_cpus kernel boot parameter, but set
  65 *	rcuscale.nreaders to zero.  This will set nwriters to the
  66 *	value specified by nr_cpus for an update-only test.
  67 *
  68 * 3.	Specify the nr_cpus kernel boot parameter, but set
  69 *	rcuscale.nwriters to zero.  This will set nreaders to the
  70 *	value specified by nr_cpus for a read-only test.
  71 *
  72 * Various other use cases may of course be specified.
  73 *
  74 * Note that this test's readers are intended only as a test load for
  75 * the writers.  The reader scalability statistics will be overly
  76 * pessimistic due to the per-critical-section interrupt disabling,
  77 * test-end checks, and the pair of calls through pointers.
  78 */
  79
  80#ifdef MODULE
  81# define RCUSCALE_SHUTDOWN 0
  82#else
  83# define RCUSCALE_SHUTDOWN 1
  84#endif
  85
  86torture_param(bool, gp_async, false, "Use asynchronous GP wait primitives");
  87torture_param(int, gp_async_max, 1000, "Max # outstanding waits per writer");
  88torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
  89torture_param(int, holdoff, 10, "Holdoff time before test start (s)");
  90torture_param(int, minruntime, 0, "Minimum run time (s)");
  91torture_param(int, nreaders, -1, "Number of RCU reader threads");
  92torture_param(int, nwriters, -1, "Number of RCU updater threads");
  93torture_param(bool, shutdown, RCUSCALE_SHUTDOWN,
  94	      "Shutdown at end of scalability tests.");
  95torture_param(int, verbose, 1, "Enable verbose debugging printk()s");
  96torture_param(int, writer_holdoff, 0, "Holdoff (us) between GPs, zero to disable");
  97torture_param(int, writer_holdoff_jiffies, 0, "Holdoff (jiffies) between GPs, zero to disable");
  98torture_param(int, kfree_rcu_test, 0, "Do we run a kfree_rcu() scale test?");
  99torture_param(int, kfree_mult, 1, "Multiple of kfree_obj size to allocate.");
 100torture_param(int, kfree_by_call_rcu, 0, "Use call_rcu() to emulate kfree_rcu()?");
 101
 102static char *scale_type = "rcu";
 103module_param(scale_type, charp, 0444);
 104MODULE_PARM_DESC(scale_type, "Type of RCU to scalability-test (rcu, srcu, ...)");
 105
 106static int nrealreaders;
 107static int nrealwriters;
 108static struct task_struct **writer_tasks;
 109static struct task_struct **reader_tasks;
 110static struct task_struct *shutdown_task;
 111
 112static u64 **writer_durations;
 113static int *writer_n_durations;
 114static atomic_t n_rcu_scale_reader_started;
 115static atomic_t n_rcu_scale_writer_started;
 116static atomic_t n_rcu_scale_writer_finished;
 117static wait_queue_head_t shutdown_wq;
 118static u64 t_rcu_scale_writer_started;
 119static u64 t_rcu_scale_writer_finished;
 120static unsigned long b_rcu_gp_test_started;
 121static unsigned long b_rcu_gp_test_finished;
 122static DEFINE_PER_CPU(atomic_t, n_async_inflight);
 123
 124#define MAX_MEAS 10000
 125#define MIN_MEAS 100
 126
 127/*
 128 * Operations vector for selecting different types of tests.
 129 */
 130
 131struct rcu_scale_ops {
 132	int ptype;
 133	void (*init)(void);
 134	void (*cleanup)(void);
 135	int (*readlock)(void);
 136	void (*readunlock)(int idx);
 137	unsigned long (*get_gp_seq)(void);
 138	unsigned long (*gp_diff)(unsigned long new, unsigned long old);
 139	unsigned long (*exp_completed)(void);
 140	void (*async)(struct rcu_head *head, rcu_callback_t func);
 141	void (*gp_barrier)(void);
 142	void (*sync)(void);
 143	void (*exp_sync)(void);
 144	struct task_struct *(*rso_gp_kthread)(void);
 145	const char *name;
 146};
 147
 148static struct rcu_scale_ops *cur_ops;
 149
 150/*
 151 * Definitions for rcu scalability testing.
 152 */
 153
 154static int rcu_scale_read_lock(void) __acquires(RCU)
 155{
 156	rcu_read_lock();
 157	return 0;
 158}
 159
 160static void rcu_scale_read_unlock(int idx) __releases(RCU)
 161{
 162	rcu_read_unlock();
 163}
 164
 165static unsigned long __maybe_unused rcu_no_completed(void)
 166{
 167	return 0;
 168}
 169
 170static void rcu_sync_scale_init(void)
 171{
 172}
 173
 174static struct rcu_scale_ops rcu_ops = {
 175	.ptype		= RCU_FLAVOR,
 176	.init		= rcu_sync_scale_init,
 177	.readlock	= rcu_scale_read_lock,
 178	.readunlock	= rcu_scale_read_unlock,
 179	.get_gp_seq	= rcu_get_gp_seq,
 180	.gp_diff	= rcu_seq_diff,
 181	.exp_completed	= rcu_exp_batches_completed,
 182	.async		= call_rcu_hurry,
 183	.gp_barrier	= rcu_barrier,
 184	.sync		= synchronize_rcu,
 185	.exp_sync	= synchronize_rcu_expedited,
 186	.name		= "rcu"
 187};
 188
 189/*
 190 * Definitions for srcu scalability testing.
 191 */
 192
 193DEFINE_STATIC_SRCU(srcu_ctl_scale);
 194static struct srcu_struct *srcu_ctlp = &srcu_ctl_scale;
 195
 196static int srcu_scale_read_lock(void) __acquires(srcu_ctlp)
 197{
 198	return srcu_read_lock(srcu_ctlp);
 199}
 200
 201static void srcu_scale_read_unlock(int idx) __releases(srcu_ctlp)
 202{
 203	srcu_read_unlock(srcu_ctlp, idx);
 204}
 205
 206static unsigned long srcu_scale_completed(void)
 207{
 208	return srcu_batches_completed(srcu_ctlp);
 209}
 210
 211static void srcu_call_rcu(struct rcu_head *head, rcu_callback_t func)
 212{
 213	call_srcu(srcu_ctlp, head, func);
 214}
 215
 216static void srcu_rcu_barrier(void)
 217{
 218	srcu_barrier(srcu_ctlp);
 219}
 220
 221static void srcu_scale_synchronize(void)
 222{
 223	synchronize_srcu(srcu_ctlp);
 224}
 225
 226static void srcu_scale_synchronize_expedited(void)
 227{
 228	synchronize_srcu_expedited(srcu_ctlp);
 229}
 230
 231static struct rcu_scale_ops srcu_ops = {
 232	.ptype		= SRCU_FLAVOR,
 233	.init		= rcu_sync_scale_init,
 234	.readlock	= srcu_scale_read_lock,
 235	.readunlock	= srcu_scale_read_unlock,
 236	.get_gp_seq	= srcu_scale_completed,
 237	.gp_diff	= rcu_seq_diff,
 238	.exp_completed	= srcu_scale_completed,
 239	.async		= srcu_call_rcu,
 240	.gp_barrier	= srcu_rcu_barrier,
 241	.sync		= srcu_scale_synchronize,
 242	.exp_sync	= srcu_scale_synchronize_expedited,
 243	.name		= "srcu"
 244};
 245
 246static struct srcu_struct srcud;
 247
 248static void srcu_sync_scale_init(void)
 249{
 250	srcu_ctlp = &srcud;
 251	init_srcu_struct(srcu_ctlp);
 252}
 253
 254static void srcu_sync_scale_cleanup(void)
 255{
 256	cleanup_srcu_struct(srcu_ctlp);
 257}
 258
 259static struct rcu_scale_ops srcud_ops = {
 260	.ptype		= SRCU_FLAVOR,
 261	.init		= srcu_sync_scale_init,
 262	.cleanup	= srcu_sync_scale_cleanup,
 263	.readlock	= srcu_scale_read_lock,
 264	.readunlock	= srcu_scale_read_unlock,
 265	.get_gp_seq	= srcu_scale_completed,
 266	.gp_diff	= rcu_seq_diff,
 267	.exp_completed	= srcu_scale_completed,
 268	.async		= srcu_call_rcu,
 269	.gp_barrier	= srcu_rcu_barrier,
 270	.sync		= srcu_scale_synchronize,
 271	.exp_sync	= srcu_scale_synchronize_expedited,
 272	.name		= "srcud"
 273};
 274
 275#ifdef CONFIG_TASKS_RCU
 276
 277/*
 278 * Definitions for RCU-tasks scalability testing.
 279 */
 280
 281static int tasks_scale_read_lock(void)
 282{
 283	return 0;
 284}
 285
 286static void tasks_scale_read_unlock(int idx)
 287{
 288}
 289
 290static struct rcu_scale_ops tasks_ops = {
 291	.ptype		= RCU_TASKS_FLAVOR,
 292	.init		= rcu_sync_scale_init,
 293	.readlock	= tasks_scale_read_lock,
 294	.readunlock	= tasks_scale_read_unlock,
 295	.get_gp_seq	= rcu_no_completed,
 296	.gp_diff	= rcu_seq_diff,
 297	.async		= call_rcu_tasks,
 298	.gp_barrier	= rcu_barrier_tasks,
 299	.sync		= synchronize_rcu_tasks,
 300	.exp_sync	= synchronize_rcu_tasks,
 301	.rso_gp_kthread	= get_rcu_tasks_gp_kthread,
 302	.name		= "tasks"
 303};
 304
 305#define TASKS_OPS &tasks_ops,
 306
 307#else // #ifdef CONFIG_TASKS_RCU
 308
 309#define TASKS_OPS
 310
 311#endif // #else // #ifdef CONFIG_TASKS_RCU
 312
 313#ifdef CONFIG_TASKS_RUDE_RCU
 314
 315/*
 316 * Definitions for RCU-tasks-rude scalability testing.
 317 */
 318
 319static int tasks_rude_scale_read_lock(void)
 320{
 321	return 0;
 322}
 323
 324static void tasks_rude_scale_read_unlock(int idx)
 325{
 326}
 327
 328static struct rcu_scale_ops tasks_rude_ops = {
 329	.ptype		= RCU_TASKS_RUDE_FLAVOR,
 330	.init		= rcu_sync_scale_init,
 331	.readlock	= tasks_rude_scale_read_lock,
 332	.readunlock	= tasks_rude_scale_read_unlock,
 333	.get_gp_seq	= rcu_no_completed,
 334	.gp_diff	= rcu_seq_diff,
 335	.async		= call_rcu_tasks_rude,
 336	.gp_barrier	= rcu_barrier_tasks_rude,
 337	.sync		= synchronize_rcu_tasks_rude,
 338	.exp_sync	= synchronize_rcu_tasks_rude,
 339	.rso_gp_kthread	= get_rcu_tasks_rude_gp_kthread,
 340	.name		= "tasks-rude"
 341};
 342
 343#define TASKS_RUDE_OPS &tasks_rude_ops,
 344
 345#else // #ifdef CONFIG_TASKS_RUDE_RCU
 346
 347#define TASKS_RUDE_OPS
 348
 349#endif // #else // #ifdef CONFIG_TASKS_RUDE_RCU
 350
 351#ifdef CONFIG_TASKS_TRACE_RCU
 352
 353/*
 354 * Definitions for RCU-tasks-trace scalability testing.
 355 */
 356
 357static int tasks_trace_scale_read_lock(void)
 358{
 359	rcu_read_lock_trace();
 360	return 0;
 361}
 362
 363static void tasks_trace_scale_read_unlock(int idx)
 364{
 365	rcu_read_unlock_trace();
 366}
 367
 368static struct rcu_scale_ops tasks_tracing_ops = {
 369	.ptype		= RCU_TASKS_FLAVOR,
 370	.init		= rcu_sync_scale_init,
 371	.readlock	= tasks_trace_scale_read_lock,
 372	.readunlock	= tasks_trace_scale_read_unlock,
 373	.get_gp_seq	= rcu_no_completed,
 374	.gp_diff	= rcu_seq_diff,
 375	.async		= call_rcu_tasks_trace,
 376	.gp_barrier	= rcu_barrier_tasks_trace,
 377	.sync		= synchronize_rcu_tasks_trace,
 378	.exp_sync	= synchronize_rcu_tasks_trace,
 379	.rso_gp_kthread	= get_rcu_tasks_trace_gp_kthread,
 380	.name		= "tasks-tracing"
 381};
 382
 383#define TASKS_TRACING_OPS &tasks_tracing_ops,
 384
 385#else // #ifdef CONFIG_TASKS_TRACE_RCU
 386
 387#define TASKS_TRACING_OPS
 388
 389#endif // #else // #ifdef CONFIG_TASKS_TRACE_RCU
 390
 391static unsigned long rcuscale_seq_diff(unsigned long new, unsigned long old)
 392{
 393	if (!cur_ops->gp_diff)
 394		return new - old;
 395	return cur_ops->gp_diff(new, old);
 396}
 397
 398/*
 399 * If scalability tests complete, wait for shutdown to commence.
 400 */
 401static void rcu_scale_wait_shutdown(void)
 402{
 403	cond_resched_tasks_rcu_qs();
 404	if (atomic_read(&n_rcu_scale_writer_finished) < nrealwriters)
 405		return;
 406	while (!torture_must_stop())
 407		schedule_timeout_uninterruptible(1);
 408}
 409
 410/*
 411 * RCU scalability reader kthread.  Repeatedly does empty RCU read-side
 412 * critical section, minimizing update-side interference.  However, the
 413 * point of this test is not to evaluate reader scalability, but instead
 414 * to serve as a test load for update-side scalability testing.
 415 */
 416static int
 417rcu_scale_reader(void *arg)
 418{
 419	unsigned long flags;
 420	int idx;
 421	long me = (long)arg;
 422
 423	VERBOSE_SCALEOUT_STRING("rcu_scale_reader task started");
 424	set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
 425	set_user_nice(current, MAX_NICE);
 426	atomic_inc(&n_rcu_scale_reader_started);
 427
 428	do {
 429		local_irq_save(flags);
 430		idx = cur_ops->readlock();
 431		cur_ops->readunlock(idx);
 432		local_irq_restore(flags);
 433		rcu_scale_wait_shutdown();
 434	} while (!torture_must_stop());
 435	torture_kthread_stopping("rcu_scale_reader");
 436	return 0;
 437}
 438
 439/*
 440 * Callback function for asynchronous grace periods from rcu_scale_writer().
 441 */
 442static void rcu_scale_async_cb(struct rcu_head *rhp)
 443{
 444	atomic_dec(this_cpu_ptr(&n_async_inflight));
 445	kfree(rhp);
 446}
 447
 448/*
 449 * RCU scale writer kthread.  Repeatedly does a grace period.
 450 */
 451static int
 452rcu_scale_writer(void *arg)
 453{
 454	int i = 0;
 455	int i_max;
 456	unsigned long jdone;
 457	long me = (long)arg;
 458	struct rcu_head *rhp = NULL;
 459	bool started = false, done = false, alldone = false;
 460	u64 t;
 461	DEFINE_TORTURE_RANDOM(tr);
 462	u64 *wdp;
 463	u64 *wdpp = writer_durations[me];
 464
 465	VERBOSE_SCALEOUT_STRING("rcu_scale_writer task started");
 466	WARN_ON(!wdpp);
 467	set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
 468	current->flags |= PF_NO_SETAFFINITY;
 469	sched_set_fifo_low(current);
 470
 471	if (holdoff)
 472		schedule_timeout_idle(holdoff * HZ);
 473
 474	/*
 475	 * Wait until rcu_end_inkernel_boot() is called for normal GP tests
 476	 * so that RCU is not always expedited for normal GP tests.
 477	 * The system_state test is approximate, but works well in practice.
 478	 */
 479	while (!gp_exp && system_state != SYSTEM_RUNNING)
 480		schedule_timeout_uninterruptible(1);
 481
 482	t = ktime_get_mono_fast_ns();
 483	if (atomic_inc_return(&n_rcu_scale_writer_started) >= nrealwriters) {
 484		t_rcu_scale_writer_started = t;
 485		if (gp_exp) {
 486			b_rcu_gp_test_started =
 487				cur_ops->exp_completed() / 2;
 488		} else {
 489			b_rcu_gp_test_started = cur_ops->get_gp_seq();
 490		}
 491	}
 492
 493	jdone = jiffies + minruntime * HZ;
 494	do {
 495		if (writer_holdoff)
 496			udelay(writer_holdoff);
 497		if (writer_holdoff_jiffies)
 498			schedule_timeout_idle(torture_random(&tr) % writer_holdoff_jiffies + 1);
 499		wdp = &wdpp[i];
 500		*wdp = ktime_get_mono_fast_ns();
 501		if (gp_async) {
 502retry:
 503			if (!rhp)
 504				rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
 505			if (rhp && atomic_read(this_cpu_ptr(&n_async_inflight)) < gp_async_max) {
 506				atomic_inc(this_cpu_ptr(&n_async_inflight));
 507				cur_ops->async(rhp, rcu_scale_async_cb);
 508				rhp = NULL;
 509			} else if (!kthread_should_stop()) {
 510				cur_ops->gp_barrier();
 511				goto retry;
 512			} else {
 513				kfree(rhp); /* Because we are stopping. */
 514			}
 515		} else if (gp_exp) {
 516			cur_ops->exp_sync();
 517		} else {
 518			cur_ops->sync();
 519		}
 520		t = ktime_get_mono_fast_ns();
 521		*wdp = t - *wdp;
 522		i_max = i;
 523		if (!started &&
 524		    atomic_read(&n_rcu_scale_writer_started) >= nrealwriters)
 525			started = true;
 526		if (!done && i >= MIN_MEAS && time_after(jiffies, jdone)) {
 527			done = true;
 528			sched_set_normal(current, 0);
 529			pr_alert("%s%s rcu_scale_writer %ld has %d measurements\n",
 530				 scale_type, SCALE_FLAG, me, MIN_MEAS);
 531			if (atomic_inc_return(&n_rcu_scale_writer_finished) >=
 532			    nrealwriters) {
 533				schedule_timeout_interruptible(10);
 534				rcu_ftrace_dump(DUMP_ALL);
 535				SCALEOUT_STRING("Test complete");
 536				t_rcu_scale_writer_finished = t;
 537				if (gp_exp) {
 538					b_rcu_gp_test_finished =
 539						cur_ops->exp_completed() / 2;
 540				} else {
 541					b_rcu_gp_test_finished =
 542						cur_ops->get_gp_seq();
 543				}
 544				if (shutdown) {
 545					smp_mb(); /* Assign before wake. */
 546					wake_up(&shutdown_wq);
 547				}
 548			}
 549		}
 550		if (done && !alldone &&
 551		    atomic_read(&n_rcu_scale_writer_finished) >= nrealwriters)
 552			alldone = true;
 553		if (started && !alldone && i < MAX_MEAS - 1)
 554			i++;
 555		rcu_scale_wait_shutdown();
 556	} while (!torture_must_stop());
 557	if (gp_async) {
 558		cur_ops->gp_barrier();
 559	}
 560	writer_n_durations[me] = i_max + 1;
 561	torture_kthread_stopping("rcu_scale_writer");
 562	return 0;
 563}
 564
 565static void
 566rcu_scale_print_module_parms(struct rcu_scale_ops *cur_ops, const char *tag)
 567{
 568	pr_alert("%s" SCALE_FLAG
 569		 "--- %s: gp_async=%d gp_async_max=%d gp_exp=%d holdoff=%d minruntime=%d nreaders=%d nwriters=%d writer_holdoff=%d writer_holdoff_jiffies=%d verbose=%d shutdown=%d\n",
 570		 scale_type, tag, gp_async, gp_async_max, gp_exp, holdoff, minruntime, nrealreaders, nrealwriters, writer_holdoff, writer_holdoff_jiffies, verbose, shutdown);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 571}
 572
 573/*
 574 * Return the number if non-negative.  If -1, the number of CPUs.
 575 * If less than -1, that much less than the number of CPUs, but
 576 * at least one.
 577 */
 578static int compute_real(int n)
 579{
 580	int nr;
 581
 582	if (n >= 0) {
 583		nr = n;
 584	} else {
 585		nr = num_online_cpus() + 1 + n;
 586		if (nr <= 0)
 587			nr = 1;
 588	}
 589	return nr;
 590}
 591
 592/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 593 * kfree_rcu() scalability tests: Start a kfree_rcu() loop on all CPUs for number
 594 * of iterations and measure total time and number of GP for all iterations to complete.
 595 */
 596
 597torture_param(int, kfree_nthreads, -1, "Number of threads running loops of kfree_rcu().");
 598torture_param(int, kfree_alloc_num, 8000, "Number of allocations and frees done in an iteration.");
 599torture_param(int, kfree_loops, 10, "Number of loops doing kfree_alloc_num allocations and frees.");
 600torture_param(bool, kfree_rcu_test_double, false, "Do we run a kfree_rcu() double-argument scale test?");
 601torture_param(bool, kfree_rcu_test_single, false, "Do we run a kfree_rcu() single-argument scale test?");
 602
 603static struct task_struct **kfree_reader_tasks;
 604static int kfree_nrealthreads;
 605static atomic_t n_kfree_scale_thread_started;
 606static atomic_t n_kfree_scale_thread_ended;
 607static struct task_struct *kthread_tp;
 608static u64 kthread_stime;
 609
 610struct kfree_obj {
 611	char kfree_obj[8];
 612	struct rcu_head rh;
 613};
 614
 615/* Used if doing RCU-kfree'ing via call_rcu(). */
 616static void kfree_call_rcu(struct rcu_head *rh)
 617{
 618	struct kfree_obj *obj = container_of(rh, struct kfree_obj, rh);
 619
 620	kfree(obj);
 621}
 622
 623static int
 624kfree_scale_thread(void *arg)
 625{
 626	int i, loop = 0;
 627	long me = (long)arg;
 628	struct kfree_obj *alloc_ptr;
 629	u64 start_time, end_time;
 630	long long mem_begin, mem_during = 0;
 631	bool kfree_rcu_test_both;
 632	DEFINE_TORTURE_RANDOM(tr);
 633
 634	VERBOSE_SCALEOUT_STRING("kfree_scale_thread task started");
 635	set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
 636	set_user_nice(current, MAX_NICE);
 637	kfree_rcu_test_both = (kfree_rcu_test_single == kfree_rcu_test_double);
 638
 639	start_time = ktime_get_mono_fast_ns();
 640
 641	if (atomic_inc_return(&n_kfree_scale_thread_started) >= kfree_nrealthreads) {
 642		if (gp_exp)
 643			b_rcu_gp_test_started = cur_ops->exp_completed() / 2;
 644		else
 645			b_rcu_gp_test_started = cur_ops->get_gp_seq();
 646	}
 647
 648	do {
 649		if (!mem_during) {
 650			mem_during = mem_begin = si_mem_available();
 651		} else if (loop % (kfree_loops / 4) == 0) {
 652			mem_during = (mem_during + si_mem_available()) / 2;
 653		}
 654
 655		for (i = 0; i < kfree_alloc_num; i++) {
 656			alloc_ptr = kmalloc(kfree_mult * sizeof(struct kfree_obj), GFP_KERNEL);
 657			if (!alloc_ptr)
 658				return -ENOMEM;
 659
 660			if (kfree_by_call_rcu) {
 661				call_rcu(&(alloc_ptr->rh), kfree_call_rcu);
 662				continue;
 663			}
 664
 665			// By default kfree_rcu_test_single and kfree_rcu_test_double are
 666			// initialized to false. If both have the same value (false or true)
 667			// both are randomly tested, otherwise only the one with value true
 668			// is tested.
 669			if ((kfree_rcu_test_single && !kfree_rcu_test_double) ||
 670					(kfree_rcu_test_both && torture_random(&tr) & 0x800))
 671				kfree_rcu_mightsleep(alloc_ptr);
 672			else
 673				kfree_rcu(alloc_ptr, rh);
 674		}
 675
 676		cond_resched();
 677	} while (!torture_must_stop() && ++loop < kfree_loops);
 678
 679	if (atomic_inc_return(&n_kfree_scale_thread_ended) >= kfree_nrealthreads) {
 680		end_time = ktime_get_mono_fast_ns();
 681
 682		if (gp_exp)
 683			b_rcu_gp_test_finished = cur_ops->exp_completed() / 2;
 684		else
 685			b_rcu_gp_test_finished = cur_ops->get_gp_seq();
 686
 687		pr_alert("Total time taken by all kfree'ers: %llu ns, loops: %d, batches: %ld, memory footprint: %lldMB\n",
 688		       (unsigned long long)(end_time - start_time), kfree_loops,
 689		       rcuscale_seq_diff(b_rcu_gp_test_finished, b_rcu_gp_test_started),
 690		       (mem_begin - mem_during) >> (20 - PAGE_SHIFT));
 691
 692		if (shutdown) {
 693			smp_mb(); /* Assign before wake. */
 694			wake_up(&shutdown_wq);
 695		}
 696	}
 697
 698	torture_kthread_stopping("kfree_scale_thread");
 699	return 0;
 700}
 701
 702static void
 703kfree_scale_cleanup(void)
 704{
 705	int i;
 706
 707	if (torture_cleanup_begin())
 708		return;
 709
 710	if (kfree_reader_tasks) {
 711		for (i = 0; i < kfree_nrealthreads; i++)
 712			torture_stop_kthread(kfree_scale_thread,
 713					     kfree_reader_tasks[i]);
 714		kfree(kfree_reader_tasks);
 715	}
 716
 717	torture_cleanup_end();
 718}
 719
 720/*
 721 * shutdown kthread.  Just waits to be awakened, then shuts down system.
 722 */
 723static int
 724kfree_scale_shutdown(void *arg)
 725{
 726	wait_event_idle(shutdown_wq,
 727			atomic_read(&n_kfree_scale_thread_ended) >= kfree_nrealthreads);
 728
 729	smp_mb(); /* Wake before output. */
 730
 731	kfree_scale_cleanup();
 732	kernel_power_off();
 733	return -EINVAL;
 734}
 735
 736// Used if doing RCU-kfree'ing via call_rcu().
 737static unsigned long jiffies_at_lazy_cb;
 738static struct rcu_head lazy_test1_rh;
 739static int rcu_lazy_test1_cb_called;
 740static void call_rcu_lazy_test1(struct rcu_head *rh)
 741{
 742	jiffies_at_lazy_cb = jiffies;
 743	WRITE_ONCE(rcu_lazy_test1_cb_called, 1);
 744}
 745
 746static int __init
 747kfree_scale_init(void)
 748{
 749	int firsterr = 0;
 750	long i;
 751	unsigned long jif_start;
 752	unsigned long orig_jif;
 753
 754	pr_alert("%s" SCALE_FLAG
 755		 "--- kfree_rcu_test: kfree_mult=%d kfree_by_call_rcu=%d kfree_nthreads=%d kfree_alloc_num=%d kfree_loops=%d kfree_rcu_test_double=%d kfree_rcu_test_single=%d\n",
 756		 scale_type, kfree_mult, kfree_by_call_rcu, kfree_nthreads, kfree_alloc_num, kfree_loops, kfree_rcu_test_double, kfree_rcu_test_single);
 757
 758	// Also, do a quick self-test to ensure laziness is as much as
 759	// expected.
 760	if (kfree_by_call_rcu && !IS_ENABLED(CONFIG_RCU_LAZY)) {
 761		pr_alert("CONFIG_RCU_LAZY is disabled, falling back to kfree_rcu() for delayed RCU kfree'ing\n");
 762		kfree_by_call_rcu = 0;
 763	}
 764
 765	if (kfree_by_call_rcu) {
 766		/* do a test to check the timeout. */
 767		orig_jif = rcu_get_jiffies_lazy_flush();
 768
 769		rcu_set_jiffies_lazy_flush(2 * HZ);
 770		rcu_barrier();
 771
 772		jif_start = jiffies;
 773		jiffies_at_lazy_cb = 0;
 774		call_rcu(&lazy_test1_rh, call_rcu_lazy_test1);
 775
 776		smp_cond_load_relaxed(&rcu_lazy_test1_cb_called, VAL == 1);
 777
 778		rcu_set_jiffies_lazy_flush(orig_jif);
 779
 780		if (WARN_ON_ONCE(jiffies_at_lazy_cb - jif_start < 2 * HZ)) {
 781			pr_alert("ERROR: call_rcu() CBs are not being lazy as expected!\n");
 782			WARN_ON_ONCE(1);
 783			return -1;
 784		}
 785
 786		if (WARN_ON_ONCE(jiffies_at_lazy_cb - jif_start > 3 * HZ)) {
 787			pr_alert("ERROR: call_rcu() CBs are being too lazy!\n");
 788			WARN_ON_ONCE(1);
 789			return -1;
 790		}
 791	}
 792
 793	kfree_nrealthreads = compute_real(kfree_nthreads);
 794	/* Start up the kthreads. */
 795	if (shutdown) {
 796		init_waitqueue_head(&shutdown_wq);
 797		firsterr = torture_create_kthread(kfree_scale_shutdown, NULL,
 798						  shutdown_task);
 799		if (torture_init_error(firsterr))
 800			goto unwind;
 801		schedule_timeout_uninterruptible(1);
 802	}
 803
 804	pr_alert("kfree object size=%zu, kfree_by_call_rcu=%d\n",
 805			kfree_mult * sizeof(struct kfree_obj),
 806			kfree_by_call_rcu);
 807
 808	kfree_reader_tasks = kcalloc(kfree_nrealthreads, sizeof(kfree_reader_tasks[0]),
 809			       GFP_KERNEL);
 810	if (kfree_reader_tasks == NULL) {
 811		firsterr = -ENOMEM;
 812		goto unwind;
 813	}
 814
 815	for (i = 0; i < kfree_nrealthreads; i++) {
 816		firsterr = torture_create_kthread(kfree_scale_thread, (void *)i,
 817						  kfree_reader_tasks[i]);
 818		if (torture_init_error(firsterr))
 819			goto unwind;
 820	}
 821
 822	while (atomic_read(&n_kfree_scale_thread_started) < kfree_nrealthreads)
 823		schedule_timeout_uninterruptible(1);
 824
 825	torture_init_end();
 826	return 0;
 827
 828unwind:
 829	torture_init_end();
 830	kfree_scale_cleanup();
 831	return firsterr;
 832}
 833
 834static void
 835rcu_scale_cleanup(void)
 836{
 837	int i;
 838	int j;
 839	int ngps = 0;
 840	u64 *wdp;
 841	u64 *wdpp;
 842
 843	/*
 844	 * Would like warning at start, but everything is expedited
 845	 * during the mid-boot phase, so have to wait till the end.
 846	 */
 847	if (rcu_gp_is_expedited() && !rcu_gp_is_normal() && !gp_exp)
 848		SCALEOUT_ERRSTRING("All grace periods expedited, no normal ones to measure!");
 849	if (rcu_gp_is_normal() && gp_exp)
 850		SCALEOUT_ERRSTRING("All grace periods normal, no expedited ones to measure!");
 851	if (gp_exp && gp_async)
 852		SCALEOUT_ERRSTRING("No expedited async GPs, so went with async!");
 853
 854	// If built-in, just report all of the GP kthread's CPU time.
 855	if (IS_BUILTIN(CONFIG_RCU_SCALE_TEST) && !kthread_tp && cur_ops->rso_gp_kthread)
 856		kthread_tp = cur_ops->rso_gp_kthread();
 857	if (kthread_tp) {
 858		u32 ns;
 859		u64 us;
 860
 861		kthread_stime = kthread_tp->stime - kthread_stime;
 862		us = div_u64_rem(kthread_stime, 1000, &ns);
 863		pr_info("rcu_scale: Grace-period kthread CPU time: %llu.%03u us\n", us, ns);
 864		show_rcu_gp_kthreads();
 865	}
 866	if (kfree_rcu_test) {
 867		kfree_scale_cleanup();
 868		return;
 869	}
 870
 871	if (torture_cleanup_begin())
 872		return;
 873	if (!cur_ops) {
 874		torture_cleanup_end();
 875		return;
 876	}
 877
 878	if (reader_tasks) {
 879		for (i = 0; i < nrealreaders; i++)
 880			torture_stop_kthread(rcu_scale_reader,
 881					     reader_tasks[i]);
 882		kfree(reader_tasks);
 883	}
 884
 885	if (writer_tasks) {
 886		for (i = 0; i < nrealwriters; i++) {
 887			torture_stop_kthread(rcu_scale_writer,
 888					     writer_tasks[i]);
 889			if (!writer_n_durations)
 890				continue;
 891			j = writer_n_durations[i];
 892			pr_alert("%s%s writer %d gps: %d\n",
 893				 scale_type, SCALE_FLAG, i, j);
 894			ngps += j;
 895		}
 896		pr_alert("%s%s start: %llu end: %llu duration: %llu gps: %d batches: %ld\n",
 897			 scale_type, SCALE_FLAG,
 898			 t_rcu_scale_writer_started, t_rcu_scale_writer_finished,
 899			 t_rcu_scale_writer_finished -
 900			 t_rcu_scale_writer_started,
 901			 ngps,
 902			 rcuscale_seq_diff(b_rcu_gp_test_finished,
 903					   b_rcu_gp_test_started));
 904		for (i = 0; i < nrealwriters; i++) {
 905			if (!writer_durations)
 906				break;
 907			if (!writer_n_durations)
 908				continue;
 909			wdpp = writer_durations[i];
 910			if (!wdpp)
 911				continue;
 912			for (j = 0; j < writer_n_durations[i]; j++) {
 913				wdp = &wdpp[j];
 914				pr_alert("%s%s %4d writer-duration: %5d %llu\n",
 915					scale_type, SCALE_FLAG,
 916					i, j, *wdp);
 917				if (j % 100 == 0)
 918					schedule_timeout_uninterruptible(1);
 919			}
 920			kfree(writer_durations[i]);
 921		}
 922		kfree(writer_tasks);
 923		kfree(writer_durations);
 924		kfree(writer_n_durations);
 925	}
 926
 927	/* Do torture-type-specific cleanup operations.  */
 928	if (cur_ops->cleanup != NULL)
 929		cur_ops->cleanup();
 930
 931	torture_cleanup_end();
 932}
 933
 934/*
 935 * RCU scalability shutdown kthread.  Just waits to be awakened, then shuts
 936 * down system.
 937 */
 938static int
 939rcu_scale_shutdown(void *arg)
 940{
 941	wait_event_idle(shutdown_wq, atomic_read(&n_rcu_scale_writer_finished) >= nrealwriters);
 942	smp_mb(); /* Wake before output. */
 943	rcu_scale_cleanup();
 944	kernel_power_off();
 945	return -EINVAL;
 946}
 947
 948static int __init
 949rcu_scale_init(void)
 950{
 951	long i;
 952	int firsterr = 0;
 953	static struct rcu_scale_ops *scale_ops[] = {
 954		&rcu_ops, &srcu_ops, &srcud_ops, TASKS_OPS TASKS_RUDE_OPS TASKS_TRACING_OPS
 955	};
 956
 957	if (!torture_init_begin(scale_type, verbose))
 958		return -EBUSY;
 959
 960	/* Process args and announce that the scalability'er is on the job. */
 961	for (i = 0; i < ARRAY_SIZE(scale_ops); i++) {
 962		cur_ops = scale_ops[i];
 963		if (strcmp(scale_type, cur_ops->name) == 0)
 964			break;
 965	}
 966	if (i == ARRAY_SIZE(scale_ops)) {
 967		pr_alert("rcu-scale: invalid scale type: \"%s\"\n", scale_type);
 968		pr_alert("rcu-scale types:");
 969		for (i = 0; i < ARRAY_SIZE(scale_ops); i++)
 970			pr_cont(" %s", scale_ops[i]->name);
 971		pr_cont("\n");
 972		firsterr = -EINVAL;
 973		cur_ops = NULL;
 974		goto unwind;
 975	}
 976	if (cur_ops->init)
 977		cur_ops->init();
 978
 979	if (cur_ops->rso_gp_kthread) {
 980		kthread_tp = cur_ops->rso_gp_kthread();
 981		if (kthread_tp)
 982			kthread_stime = kthread_tp->stime;
 983	}
 984	if (kfree_rcu_test)
 985		return kfree_scale_init();
 986
 987	nrealwriters = compute_real(nwriters);
 988	nrealreaders = compute_real(nreaders);
 989	atomic_set(&n_rcu_scale_reader_started, 0);
 990	atomic_set(&n_rcu_scale_writer_started, 0);
 991	atomic_set(&n_rcu_scale_writer_finished, 0);
 992	rcu_scale_print_module_parms(cur_ops, "Start of test");
 993
 994	/* Start up the kthreads. */
 995
 996	if (shutdown) {
 997		init_waitqueue_head(&shutdown_wq);
 998		firsterr = torture_create_kthread(rcu_scale_shutdown, NULL,
 999						  shutdown_task);
1000		if (torture_init_error(firsterr))
1001			goto unwind;
1002		schedule_timeout_uninterruptible(1);
1003	}
1004	reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]),
1005			       GFP_KERNEL);
1006	if (reader_tasks == NULL) {
1007		SCALEOUT_ERRSTRING("out of memory");
1008		firsterr = -ENOMEM;
1009		goto unwind;
1010	}
1011	for (i = 0; i < nrealreaders; i++) {
1012		firsterr = torture_create_kthread(rcu_scale_reader, (void *)i,
1013						  reader_tasks[i]);
1014		if (torture_init_error(firsterr))
1015			goto unwind;
1016	}
1017	while (atomic_read(&n_rcu_scale_reader_started) < nrealreaders)
1018		schedule_timeout_uninterruptible(1);
1019	writer_tasks = kcalloc(nrealwriters, sizeof(reader_tasks[0]),
1020			       GFP_KERNEL);
1021	writer_durations = kcalloc(nrealwriters, sizeof(*writer_durations),
1022				   GFP_KERNEL);
1023	writer_n_durations =
1024		kcalloc(nrealwriters, sizeof(*writer_n_durations),
1025			GFP_KERNEL);
1026	if (!writer_tasks || !writer_durations || !writer_n_durations) {
1027		SCALEOUT_ERRSTRING("out of memory");
1028		firsterr = -ENOMEM;
1029		goto unwind;
1030	}
1031	for (i = 0; i < nrealwriters; i++) {
1032		writer_durations[i] =
1033			kcalloc(MAX_MEAS, sizeof(*writer_durations[i]),
1034				GFP_KERNEL);
1035		if (!writer_durations[i]) {
1036			firsterr = -ENOMEM;
1037			goto unwind;
1038		}
1039		firsterr = torture_create_kthread(rcu_scale_writer, (void *)i,
1040						  writer_tasks[i]);
1041		if (torture_init_error(firsterr))
1042			goto unwind;
1043	}
1044	torture_init_end();
1045	return 0;
1046
1047unwind:
1048	torture_init_end();
1049	rcu_scale_cleanup();
1050	if (shutdown) {
1051		WARN_ON(!IS_MODULE(CONFIG_RCU_SCALE_TEST));
1052		kernel_power_off();
1053	}
1054	return firsterr;
1055}
1056
1057module_init(rcu_scale_init);
1058module_exit(rcu_scale_cleanup);
  1// SPDX-License-Identifier: GPL-2.0+
  2/*
  3 * Read-Copy Update module-based scalability-test facility
  4 *
  5 * Copyright (C) IBM Corporation, 2015
  6 *
  7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
  8 */
  9
 10#define pr_fmt(fmt) fmt
 11
 12#include <linux/types.h>
 13#include <linux/kernel.h>
 14#include <linux/init.h>
 15#include <linux/mm.h>
 16#include <linux/module.h>
 17#include <linux/kthread.h>
 18#include <linux/err.h>
 19#include <linux/spinlock.h>
 20#include <linux/smp.h>
 21#include <linux/rcupdate.h>
 22#include <linux/interrupt.h>
 23#include <linux/sched.h>
 24#include <uapi/linux/sched/types.h>
 25#include <linux/atomic.h>
 26#include <linux/bitops.h>
 27#include <linux/completion.h>
 28#include <linux/moduleparam.h>
 29#include <linux/percpu.h>
 30#include <linux/notifier.h>
 31#include <linux/reboot.h>
 32#include <linux/freezer.h>
 33#include <linux/cpu.h>
 34#include <linux/delay.h>
 35#include <linux/stat.h>
 36#include <linux/srcu.h>
 37#include <linux/slab.h>
 38#include <asm/byteorder.h>
 39#include <linux/torture.h>
 40#include <linux/vmalloc.h>
 41#include <linux/rcupdate_trace.h>
 42
 43#include "rcu.h"
 44
 45MODULE_LICENSE("GPL");
 46MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com>");
 47
 48#define SCALE_FLAG "-scale:"
 49#define SCALEOUT_STRING(s) \
 50	pr_alert("%s" SCALE_FLAG " %s\n", scale_type, s)
 51#define VERBOSE_SCALEOUT_STRING(s) \
 52	do { if (verbose) pr_alert("%s" SCALE_FLAG " %s\n", scale_type, s); } while (0)
 53#define VERBOSE_SCALEOUT_ERRSTRING(s) \
 54	do { if (verbose) pr_alert("%s" SCALE_FLAG "!!! %s\n", scale_type, s); } while (0)
 55
 56/*
 57 * The intended use cases for the nreaders and nwriters module parameters
 58 * are as follows:
 59 *
 60 * 1.	Specify only the nr_cpus kernel boot parameter.  This will
 61 *	set both nreaders and nwriters to the value specified by
 62 *	nr_cpus for a mixed reader/writer test.
 63 *
 64 * 2.	Specify the nr_cpus kernel boot parameter, but set
 65 *	rcuscale.nreaders to zero.  This will set nwriters to the
 66 *	value specified by nr_cpus for an update-only test.
 67 *
 68 * 3.	Specify the nr_cpus kernel boot parameter, but set
 69 *	rcuscale.nwriters to zero.  This will set nreaders to the
 70 *	value specified by nr_cpus for a read-only test.
 71 *
 72 * Various other use cases may of course be specified.
 73 *
 74 * Note that this test's readers are intended only as a test load for
 75 * the writers.  The reader scalability statistics will be overly
 76 * pessimistic due to the per-critical-section interrupt disabling,
 77 * test-end checks, and the pair of calls through pointers.
 78 */
 79
 80#ifdef MODULE
 81# define RCUSCALE_SHUTDOWN 0
 82#else
 83# define RCUSCALE_SHUTDOWN 1
 84#endif
 85
 86torture_param(bool, gp_async, false, "Use asynchronous GP wait primitives");
 87torture_param(int, gp_async_max, 1000, "Max # outstanding waits per reader");
 88torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
 89torture_param(int, holdoff, 10, "Holdoff time before test start (s)");
 
 90torture_param(int, nreaders, -1, "Number of RCU reader threads");
 91torture_param(int, nwriters, -1, "Number of RCU updater threads");
 92torture_param(bool, shutdown, RCUSCALE_SHUTDOWN,
 93	      "Shutdown at end of scalability tests.");
 94torture_param(int, verbose, 1, "Enable verbose debugging printk()s");
 95torture_param(int, writer_holdoff, 0, "Holdoff (us) between GPs, zero to disable");
 
 96torture_param(int, kfree_rcu_test, 0, "Do we run a kfree_rcu() scale test?");
 97torture_param(int, kfree_mult, 1, "Multiple of kfree_obj size to allocate.");
 
 98
 99static char *scale_type = "rcu";
100module_param(scale_type, charp, 0444);
101MODULE_PARM_DESC(scale_type, "Type of RCU to scalability-test (rcu, srcu, ...)");
102
103static int nrealreaders;
104static int nrealwriters;
105static struct task_struct **writer_tasks;
106static struct task_struct **reader_tasks;
107static struct task_struct *shutdown_task;
108
109static u64 **writer_durations;
110static int *writer_n_durations;
111static atomic_t n_rcu_scale_reader_started;
112static atomic_t n_rcu_scale_writer_started;
113static atomic_t n_rcu_scale_writer_finished;
114static wait_queue_head_t shutdown_wq;
115static u64 t_rcu_scale_writer_started;
116static u64 t_rcu_scale_writer_finished;
117static unsigned long b_rcu_gp_test_started;
118static unsigned long b_rcu_gp_test_finished;
119static DEFINE_PER_CPU(atomic_t, n_async_inflight);
120
121#define MAX_MEAS 10000
122#define MIN_MEAS 100
123
124/*
125 * Operations vector for selecting different types of tests.
126 */
127
128struct rcu_scale_ops {
129	int ptype;
130	void (*init)(void);
131	void (*cleanup)(void);
132	int (*readlock)(void);
133	void (*readunlock)(int idx);
134	unsigned long (*get_gp_seq)(void);
135	unsigned long (*gp_diff)(unsigned long new, unsigned long old);
136	unsigned long (*exp_completed)(void);
137	void (*async)(struct rcu_head *head, rcu_callback_t func);
138	void (*gp_barrier)(void);
139	void (*sync)(void);
140	void (*exp_sync)(void);
 
141	const char *name;
142};
143
144static struct rcu_scale_ops *cur_ops;
145
146/*
147 * Definitions for rcu scalability testing.
148 */
149
150static int rcu_scale_read_lock(void) __acquires(RCU)
151{
152	rcu_read_lock();
153	return 0;
154}
155
156static void rcu_scale_read_unlock(int idx) __releases(RCU)
157{
158	rcu_read_unlock();
159}
160
161static unsigned long __maybe_unused rcu_no_completed(void)
162{
163	return 0;
164}
165
166static void rcu_sync_scale_init(void)
167{
168}
169
170static struct rcu_scale_ops rcu_ops = {
171	.ptype		= RCU_FLAVOR,
172	.init		= rcu_sync_scale_init,
173	.readlock	= rcu_scale_read_lock,
174	.readunlock	= rcu_scale_read_unlock,
175	.get_gp_seq	= rcu_get_gp_seq,
176	.gp_diff	= rcu_seq_diff,
177	.exp_completed	= rcu_exp_batches_completed,
178	.async		= call_rcu,
179	.gp_barrier	= rcu_barrier,
180	.sync		= synchronize_rcu,
181	.exp_sync	= synchronize_rcu_expedited,
182	.name		= "rcu"
183};
184
185/*
186 * Definitions for srcu scalability testing.
187 */
188
189DEFINE_STATIC_SRCU(srcu_ctl_scale);
190static struct srcu_struct *srcu_ctlp = &srcu_ctl_scale;
191
192static int srcu_scale_read_lock(void) __acquires(srcu_ctlp)
193{
194	return srcu_read_lock(srcu_ctlp);
195}
196
197static void srcu_scale_read_unlock(int idx) __releases(srcu_ctlp)
198{
199	srcu_read_unlock(srcu_ctlp, idx);
200}
201
202static unsigned long srcu_scale_completed(void)
203{
204	return srcu_batches_completed(srcu_ctlp);
205}
206
207static void srcu_call_rcu(struct rcu_head *head, rcu_callback_t func)
208{
209	call_srcu(srcu_ctlp, head, func);
210}
211
212static void srcu_rcu_barrier(void)
213{
214	srcu_barrier(srcu_ctlp);
215}
216
217static void srcu_scale_synchronize(void)
218{
219	synchronize_srcu(srcu_ctlp);
220}
221
222static void srcu_scale_synchronize_expedited(void)
223{
224	synchronize_srcu_expedited(srcu_ctlp);
225}
226
227static struct rcu_scale_ops srcu_ops = {
228	.ptype		= SRCU_FLAVOR,
229	.init		= rcu_sync_scale_init,
230	.readlock	= srcu_scale_read_lock,
231	.readunlock	= srcu_scale_read_unlock,
232	.get_gp_seq	= srcu_scale_completed,
233	.gp_diff	= rcu_seq_diff,
234	.exp_completed	= srcu_scale_completed,
235	.async		= srcu_call_rcu,
236	.gp_barrier	= srcu_rcu_barrier,
237	.sync		= srcu_scale_synchronize,
238	.exp_sync	= srcu_scale_synchronize_expedited,
239	.name		= "srcu"
240};
241
242static struct srcu_struct srcud;
243
244static void srcu_sync_scale_init(void)
245{
246	srcu_ctlp = &srcud;
247	init_srcu_struct(srcu_ctlp);
248}
249
250static void srcu_sync_scale_cleanup(void)
251{
252	cleanup_srcu_struct(srcu_ctlp);
253}
254
255static struct rcu_scale_ops srcud_ops = {
256	.ptype		= SRCU_FLAVOR,
257	.init		= srcu_sync_scale_init,
258	.cleanup	= srcu_sync_scale_cleanup,
259	.readlock	= srcu_scale_read_lock,
260	.readunlock	= srcu_scale_read_unlock,
261	.get_gp_seq	= srcu_scale_completed,
262	.gp_diff	= rcu_seq_diff,
263	.exp_completed	= srcu_scale_completed,
264	.async		= srcu_call_rcu,
265	.gp_barrier	= srcu_rcu_barrier,
266	.sync		= srcu_scale_synchronize,
267	.exp_sync	= srcu_scale_synchronize_expedited,
268	.name		= "srcud"
269};
270
 
 
271/*
272 * Definitions for RCU-tasks scalability testing.
273 */
274
275static int tasks_scale_read_lock(void)
276{
277	return 0;
278}
279
280static void tasks_scale_read_unlock(int idx)
281{
282}
283
284static struct rcu_scale_ops tasks_ops = {
285	.ptype		= RCU_TASKS_FLAVOR,
286	.init		= rcu_sync_scale_init,
287	.readlock	= tasks_scale_read_lock,
288	.readunlock	= tasks_scale_read_unlock,
289	.get_gp_seq	= rcu_no_completed,
290	.gp_diff	= rcu_seq_diff,
291	.async		= call_rcu_tasks,
292	.gp_barrier	= rcu_barrier_tasks,
293	.sync		= synchronize_rcu_tasks,
294	.exp_sync	= synchronize_rcu_tasks,
 
295	.name		= "tasks"
296};
297
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
298/*
299 * Definitions for RCU-tasks-trace scalability testing.
300 */
301
302static int tasks_trace_scale_read_lock(void)
303{
304	rcu_read_lock_trace();
305	return 0;
306}
307
308static void tasks_trace_scale_read_unlock(int idx)
309{
310	rcu_read_unlock_trace();
311}
312
313static struct rcu_scale_ops tasks_tracing_ops = {
314	.ptype		= RCU_TASKS_FLAVOR,
315	.init		= rcu_sync_scale_init,
316	.readlock	= tasks_trace_scale_read_lock,
317	.readunlock	= tasks_trace_scale_read_unlock,
318	.get_gp_seq	= rcu_no_completed,
319	.gp_diff	= rcu_seq_diff,
320	.async		= call_rcu_tasks_trace,
321	.gp_barrier	= rcu_barrier_tasks_trace,
322	.sync		= synchronize_rcu_tasks_trace,
323	.exp_sync	= synchronize_rcu_tasks_trace,
 
324	.name		= "tasks-tracing"
325};
326
 
 
 
 
 
 
 
 
327static unsigned long rcuscale_seq_diff(unsigned long new, unsigned long old)
328{
329	if (!cur_ops->gp_diff)
330		return new - old;
331	return cur_ops->gp_diff(new, old);
332}
333
334/*
335 * If scalability tests complete, wait for shutdown to commence.
336 */
337static void rcu_scale_wait_shutdown(void)
338{
339	cond_resched_tasks_rcu_qs();
340	if (atomic_read(&n_rcu_scale_writer_finished) < nrealwriters)
341		return;
342	while (!torture_must_stop())
343		schedule_timeout_uninterruptible(1);
344}
345
346/*
347 * RCU scalability reader kthread.  Repeatedly does empty RCU read-side
348 * critical section, minimizing update-side interference.  However, the
349 * point of this test is not to evaluate reader scalability, but instead
350 * to serve as a test load for update-side scalability testing.
351 */
352static int
353rcu_scale_reader(void *arg)
354{
355	unsigned long flags;
356	int idx;
357	long me = (long)arg;
358
359	VERBOSE_SCALEOUT_STRING("rcu_scale_reader task started");
360	set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
361	set_user_nice(current, MAX_NICE);
362	atomic_inc(&n_rcu_scale_reader_started);
363
364	do {
365		local_irq_save(flags);
366		idx = cur_ops->readlock();
367		cur_ops->readunlock(idx);
368		local_irq_restore(flags);
369		rcu_scale_wait_shutdown();
370	} while (!torture_must_stop());
371	torture_kthread_stopping("rcu_scale_reader");
372	return 0;
373}
374
375/*
376 * Callback function for asynchronous grace periods from rcu_scale_writer().
377 */
378static void rcu_scale_async_cb(struct rcu_head *rhp)
379{
380	atomic_dec(this_cpu_ptr(&n_async_inflight));
381	kfree(rhp);
382}
383
384/*
385 * RCU scale writer kthread.  Repeatedly does a grace period.
386 */
387static int
388rcu_scale_writer(void *arg)
389{
390	int i = 0;
391	int i_max;
 
392	long me = (long)arg;
393	struct rcu_head *rhp = NULL;
394	bool started = false, done = false, alldone = false;
395	u64 t;
 
396	u64 *wdp;
397	u64 *wdpp = writer_durations[me];
398
399	VERBOSE_SCALEOUT_STRING("rcu_scale_writer task started");
400	WARN_ON(!wdpp);
401	set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
 
402	sched_set_fifo_low(current);
403
404	if (holdoff)
405		schedule_timeout_uninterruptible(holdoff * HZ);
406
407	/*
408	 * Wait until rcu_end_inkernel_boot() is called for normal GP tests
409	 * so that RCU is not always expedited for normal GP tests.
410	 * The system_state test is approximate, but works well in practice.
411	 */
412	while (!gp_exp && system_state != SYSTEM_RUNNING)
413		schedule_timeout_uninterruptible(1);
414
415	t = ktime_get_mono_fast_ns();
416	if (atomic_inc_return(&n_rcu_scale_writer_started) >= nrealwriters) {
417		t_rcu_scale_writer_started = t;
418		if (gp_exp) {
419			b_rcu_gp_test_started =
420				cur_ops->exp_completed() / 2;
421		} else {
422			b_rcu_gp_test_started = cur_ops->get_gp_seq();
423		}
424	}
425
 
426	do {
427		if (writer_holdoff)
428			udelay(writer_holdoff);
 
 
429		wdp = &wdpp[i];
430		*wdp = ktime_get_mono_fast_ns();
431		if (gp_async) {
432retry:
433			if (!rhp)
434				rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
435			if (rhp && atomic_read(this_cpu_ptr(&n_async_inflight)) < gp_async_max) {
436				atomic_inc(this_cpu_ptr(&n_async_inflight));
437				cur_ops->async(rhp, rcu_scale_async_cb);
438				rhp = NULL;
439			} else if (!kthread_should_stop()) {
440				cur_ops->gp_barrier();
441				goto retry;
442			} else {
443				kfree(rhp); /* Because we are stopping. */
444			}
445		} else if (gp_exp) {
446			cur_ops->exp_sync();
447		} else {
448			cur_ops->sync();
449		}
450		t = ktime_get_mono_fast_ns();
451		*wdp = t - *wdp;
452		i_max = i;
453		if (!started &&
454		    atomic_read(&n_rcu_scale_writer_started) >= nrealwriters)
455			started = true;
456		if (!done && i >= MIN_MEAS) {
457			done = true;
458			sched_set_normal(current, 0);
459			pr_alert("%s%s rcu_scale_writer %ld has %d measurements\n",
460				 scale_type, SCALE_FLAG, me, MIN_MEAS);
461			if (atomic_inc_return(&n_rcu_scale_writer_finished) >=
462			    nrealwriters) {
463				schedule_timeout_interruptible(10);
464				rcu_ftrace_dump(DUMP_ALL);
465				SCALEOUT_STRING("Test complete");
466				t_rcu_scale_writer_finished = t;
467				if (gp_exp) {
468					b_rcu_gp_test_finished =
469						cur_ops->exp_completed() / 2;
470				} else {
471					b_rcu_gp_test_finished =
472						cur_ops->get_gp_seq();
473				}
474				if (shutdown) {
475					smp_mb(); /* Assign before wake. */
476					wake_up(&shutdown_wq);
477				}
478			}
479		}
480		if (done && !alldone &&
481		    atomic_read(&n_rcu_scale_writer_finished) >= nrealwriters)
482			alldone = true;
483		if (started && !alldone && i < MAX_MEAS - 1)
484			i++;
485		rcu_scale_wait_shutdown();
486	} while (!torture_must_stop());
487	if (gp_async) {
488		cur_ops->gp_barrier();
489	}
490	writer_n_durations[me] = i_max;
491	torture_kthread_stopping("rcu_scale_writer");
492	return 0;
493}
494
495static void
496rcu_scale_print_module_parms(struct rcu_scale_ops *cur_ops, const char *tag)
497{
498	pr_alert("%s" SCALE_FLAG
499		 "--- %s: nreaders=%d nwriters=%d verbose=%d shutdown=%d\n",
500		 scale_type, tag, nrealreaders, nrealwriters, verbose, shutdown);
501}
502
503static void
504rcu_scale_cleanup(void)
505{
506	int i;
507	int j;
508	int ngps = 0;
509	u64 *wdp;
510	u64 *wdpp;
511
512	/*
513	 * Would like warning at start, but everything is expedited
514	 * during the mid-boot phase, so have to wait till the end.
515	 */
516	if (rcu_gp_is_expedited() && !rcu_gp_is_normal() && !gp_exp)
517		VERBOSE_SCALEOUT_ERRSTRING("All grace periods expedited, no normal ones to measure!");
518	if (rcu_gp_is_normal() && gp_exp)
519		VERBOSE_SCALEOUT_ERRSTRING("All grace periods normal, no expedited ones to measure!");
520	if (gp_exp && gp_async)
521		VERBOSE_SCALEOUT_ERRSTRING("No expedited async GPs, so went with async!");
522
523	if (torture_cleanup_begin())
524		return;
525	if (!cur_ops) {
526		torture_cleanup_end();
527		return;
528	}
529
530	if (reader_tasks) {
531		for (i = 0; i < nrealreaders; i++)
532			torture_stop_kthread(rcu_scale_reader,
533					     reader_tasks[i]);
534		kfree(reader_tasks);
535	}
536
537	if (writer_tasks) {
538		for (i = 0; i < nrealwriters; i++) {
539			torture_stop_kthread(rcu_scale_writer,
540					     writer_tasks[i]);
541			if (!writer_n_durations)
542				continue;
543			j = writer_n_durations[i];
544			pr_alert("%s%s writer %d gps: %d\n",
545				 scale_type, SCALE_FLAG, i, j);
546			ngps += j;
547		}
548		pr_alert("%s%s start: %llu end: %llu duration: %llu gps: %d batches: %ld\n",
549			 scale_type, SCALE_FLAG,
550			 t_rcu_scale_writer_started, t_rcu_scale_writer_finished,
551			 t_rcu_scale_writer_finished -
552			 t_rcu_scale_writer_started,
553			 ngps,
554			 rcuscale_seq_diff(b_rcu_gp_test_finished,
555					   b_rcu_gp_test_started));
556		for (i = 0; i < nrealwriters; i++) {
557			if (!writer_durations)
558				break;
559			if (!writer_n_durations)
560				continue;
561			wdpp = writer_durations[i];
562			if (!wdpp)
563				continue;
564			for (j = 0; j <= writer_n_durations[i]; j++) {
565				wdp = &wdpp[j];
566				pr_alert("%s%s %4d writer-duration: %5d %llu\n",
567					scale_type, SCALE_FLAG,
568					i, j, *wdp);
569				if (j % 100 == 0)
570					schedule_timeout_uninterruptible(1);
571			}
572			kfree(writer_durations[i]);
573		}
574		kfree(writer_tasks);
575		kfree(writer_durations);
576		kfree(writer_n_durations);
577	}
578
579	/* Do torture-type-specific cleanup operations.  */
580	if (cur_ops->cleanup != NULL)
581		cur_ops->cleanup();
582
583	torture_cleanup_end();
584}
585
586/*
587 * Return the number if non-negative.  If -1, the number of CPUs.
588 * If less than -1, that much less than the number of CPUs, but
589 * at least one.
590 */
591static int compute_real(int n)
592{
593	int nr;
594
595	if (n >= 0) {
596		nr = n;
597	} else {
598		nr = num_online_cpus() + 1 + n;
599		if (nr <= 0)
600			nr = 1;
601	}
602	return nr;
603}
604
605/*
606 * RCU scalability shutdown kthread.  Just waits to be awakened, then shuts
607 * down system.
608 */
609static int
610rcu_scale_shutdown(void *arg)
611{
612	wait_event(shutdown_wq,
613		   atomic_read(&n_rcu_scale_writer_finished) >= nrealwriters);
614	smp_mb(); /* Wake before output. */
615	rcu_scale_cleanup();
616	kernel_power_off();
617	return -EINVAL;
618}
619
620/*
621 * kfree_rcu() scalability tests: Start a kfree_rcu() loop on all CPUs for number
622 * of iterations and measure total time and number of GP for all iterations to complete.
623 */
624
625torture_param(int, kfree_nthreads, -1, "Number of threads running loops of kfree_rcu().");
626torture_param(int, kfree_alloc_num, 8000, "Number of allocations and frees done in an iteration.");
627torture_param(int, kfree_loops, 10, "Number of loops doing kfree_alloc_num allocations and frees.");
628torture_param(bool, kfree_rcu_test_double, false, "Do we run a kfree_rcu() double-argument scale test?");
629torture_param(bool, kfree_rcu_test_single, false, "Do we run a kfree_rcu() single-argument scale test?");
630
631static struct task_struct **kfree_reader_tasks;
632static int kfree_nrealthreads;
633static atomic_t n_kfree_scale_thread_started;
634static atomic_t n_kfree_scale_thread_ended;
 
 
635
636struct kfree_obj {
637	char kfree_obj[8];
638	struct rcu_head rh;
639};
640
 
 
 
 
 
 
 
 
641static int
642kfree_scale_thread(void *arg)
643{
644	int i, loop = 0;
645	long me = (long)arg;
646	struct kfree_obj *alloc_ptr;
647	u64 start_time, end_time;
648	long long mem_begin, mem_during = 0;
649	bool kfree_rcu_test_both;
650	DEFINE_TORTURE_RANDOM(tr);
651
652	VERBOSE_SCALEOUT_STRING("kfree_scale_thread task started");
653	set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
654	set_user_nice(current, MAX_NICE);
655	kfree_rcu_test_both = (kfree_rcu_test_single == kfree_rcu_test_double);
656
657	start_time = ktime_get_mono_fast_ns();
658
659	if (atomic_inc_return(&n_kfree_scale_thread_started) >= kfree_nrealthreads) {
660		if (gp_exp)
661			b_rcu_gp_test_started = cur_ops->exp_completed() / 2;
662		else
663			b_rcu_gp_test_started = cur_ops->get_gp_seq();
664	}
665
666	do {
667		if (!mem_during) {
668			mem_during = mem_begin = si_mem_available();
669		} else if (loop % (kfree_loops / 4) == 0) {
670			mem_during = (mem_during + si_mem_available()) / 2;
671		}
672
673		for (i = 0; i < kfree_alloc_num; i++) {
674			alloc_ptr = kmalloc(kfree_mult * sizeof(struct kfree_obj), GFP_KERNEL);
675			if (!alloc_ptr)
676				return -ENOMEM;
677
 
 
 
 
 
678			// By default kfree_rcu_test_single and kfree_rcu_test_double are
679			// initialized to false. If both have the same value (false or true)
680			// both are randomly tested, otherwise only the one with value true
681			// is tested.
682			if ((kfree_rcu_test_single && !kfree_rcu_test_double) ||
683					(kfree_rcu_test_both && torture_random(&tr) & 0x800))
684				kfree_rcu(alloc_ptr);
685			else
686				kfree_rcu(alloc_ptr, rh);
687		}
688
689		cond_resched();
690	} while (!torture_must_stop() && ++loop < kfree_loops);
691
692	if (atomic_inc_return(&n_kfree_scale_thread_ended) >= kfree_nrealthreads) {
693		end_time = ktime_get_mono_fast_ns();
694
695		if (gp_exp)
696			b_rcu_gp_test_finished = cur_ops->exp_completed() / 2;
697		else
698			b_rcu_gp_test_finished = cur_ops->get_gp_seq();
699
700		pr_alert("Total time taken by all kfree'ers: %llu ns, loops: %d, batches: %ld, memory footprint: %lldMB\n",
701		       (unsigned long long)(end_time - start_time), kfree_loops,
702		       rcuscale_seq_diff(b_rcu_gp_test_finished, b_rcu_gp_test_started),
703		       (mem_begin - mem_during) >> (20 - PAGE_SHIFT));
704
705		if (shutdown) {
706			smp_mb(); /* Assign before wake. */
707			wake_up(&shutdown_wq);
708		}
709	}
710
711	torture_kthread_stopping("kfree_scale_thread");
712	return 0;
713}
714
715static void
716kfree_scale_cleanup(void)
717{
718	int i;
719
720	if (torture_cleanup_begin())
721		return;
722
723	if (kfree_reader_tasks) {
724		for (i = 0; i < kfree_nrealthreads; i++)
725			torture_stop_kthread(kfree_scale_thread,
726					     kfree_reader_tasks[i]);
727		kfree(kfree_reader_tasks);
728	}
729
730	torture_cleanup_end();
731}
732
733/*
734 * shutdown kthread.  Just waits to be awakened, then shuts down system.
735 */
736static int
737kfree_scale_shutdown(void *arg)
738{
739	wait_event(shutdown_wq,
740		   atomic_read(&n_kfree_scale_thread_ended) >= kfree_nrealthreads);
741
742	smp_mb(); /* Wake before output. */
743
744	kfree_scale_cleanup();
745	kernel_power_off();
746	return -EINVAL;
747}
748
 
 
 
 
 
 
 
 
 
 
749static int __init
750kfree_scale_init(void)
751{
 
752	long i;
753	int firsterr = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
754
755	kfree_nrealthreads = compute_real(kfree_nthreads);
756	/* Start up the kthreads. */
757	if (shutdown) {
758		init_waitqueue_head(&shutdown_wq);
759		firsterr = torture_create_kthread(kfree_scale_shutdown, NULL,
760						  shutdown_task);
761		if (firsterr)
762			goto unwind;
763		schedule_timeout_uninterruptible(1);
764	}
765
766	pr_alert("kfree object size=%zu\n", kfree_mult * sizeof(struct kfree_obj));
 
 
767
768	kfree_reader_tasks = kcalloc(kfree_nrealthreads, sizeof(kfree_reader_tasks[0]),
769			       GFP_KERNEL);
770	if (kfree_reader_tasks == NULL) {
771		firsterr = -ENOMEM;
772		goto unwind;
773	}
774
775	for (i = 0; i < kfree_nrealthreads; i++) {
776		firsterr = torture_create_kthread(kfree_scale_thread, (void *)i,
777						  kfree_reader_tasks[i]);
778		if (firsterr)
779			goto unwind;
780	}
781
782	while (atomic_read(&n_kfree_scale_thread_started) < kfree_nrealthreads)
783		schedule_timeout_uninterruptible(1);
784
785	torture_init_end();
786	return 0;
787
788unwind:
789	torture_init_end();
790	kfree_scale_cleanup();
791	return firsterr;
792}
793
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
794static int __init
795rcu_scale_init(void)
796{
797	long i;
798	int firsterr = 0;
799	static struct rcu_scale_ops *scale_ops[] = {
800		&rcu_ops, &srcu_ops, &srcud_ops, &tasks_ops, &tasks_tracing_ops
801	};
802
803	if (!torture_init_begin(scale_type, verbose))
804		return -EBUSY;
805
806	/* Process args and announce that the scalability'er is on the job. */
807	for (i = 0; i < ARRAY_SIZE(scale_ops); i++) {
808		cur_ops = scale_ops[i];
809		if (strcmp(scale_type, cur_ops->name) == 0)
810			break;
811	}
812	if (i == ARRAY_SIZE(scale_ops)) {
813		pr_alert("rcu-scale: invalid scale type: \"%s\"\n", scale_type);
814		pr_alert("rcu-scale types:");
815		for (i = 0; i < ARRAY_SIZE(scale_ops); i++)
816			pr_cont(" %s", scale_ops[i]->name);
817		pr_cont("\n");
818		firsterr = -EINVAL;
819		cur_ops = NULL;
820		goto unwind;
821	}
822	if (cur_ops->init)
823		cur_ops->init();
824
 
 
 
 
 
825	if (kfree_rcu_test)
826		return kfree_scale_init();
827
828	nrealwriters = compute_real(nwriters);
829	nrealreaders = compute_real(nreaders);
830	atomic_set(&n_rcu_scale_reader_started, 0);
831	atomic_set(&n_rcu_scale_writer_started, 0);
832	atomic_set(&n_rcu_scale_writer_finished, 0);
833	rcu_scale_print_module_parms(cur_ops, "Start of test");
834
835	/* Start up the kthreads. */
836
837	if (shutdown) {
838		init_waitqueue_head(&shutdown_wq);
839		firsterr = torture_create_kthread(rcu_scale_shutdown, NULL,
840						  shutdown_task);
841		if (firsterr)
842			goto unwind;
843		schedule_timeout_uninterruptible(1);
844	}
845	reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]),
846			       GFP_KERNEL);
847	if (reader_tasks == NULL) {
848		VERBOSE_SCALEOUT_ERRSTRING("out of memory");
849		firsterr = -ENOMEM;
850		goto unwind;
851	}
852	for (i = 0; i < nrealreaders; i++) {
853		firsterr = torture_create_kthread(rcu_scale_reader, (void *)i,
854						  reader_tasks[i]);
855		if (firsterr)
856			goto unwind;
857	}
858	while (atomic_read(&n_rcu_scale_reader_started) < nrealreaders)
859		schedule_timeout_uninterruptible(1);
860	writer_tasks = kcalloc(nrealwriters, sizeof(reader_tasks[0]),
861			       GFP_KERNEL);
862	writer_durations = kcalloc(nrealwriters, sizeof(*writer_durations),
863				   GFP_KERNEL);
864	writer_n_durations =
865		kcalloc(nrealwriters, sizeof(*writer_n_durations),
866			GFP_KERNEL);
867	if (!writer_tasks || !writer_durations || !writer_n_durations) {
868		VERBOSE_SCALEOUT_ERRSTRING("out of memory");
869		firsterr = -ENOMEM;
870		goto unwind;
871	}
872	for (i = 0; i < nrealwriters; i++) {
873		writer_durations[i] =
874			kcalloc(MAX_MEAS, sizeof(*writer_durations[i]),
875				GFP_KERNEL);
876		if (!writer_durations[i]) {
877			firsterr = -ENOMEM;
878			goto unwind;
879		}
880		firsterr = torture_create_kthread(rcu_scale_writer, (void *)i,
881						  writer_tasks[i]);
882		if (firsterr)
883			goto unwind;
884	}
885	torture_init_end();
886	return 0;
887
888unwind:
889	torture_init_end();
890	rcu_scale_cleanup();
891	if (shutdown) {
892		WARN_ON(!IS_MODULE(CONFIG_RCU_SCALE_TEST));
893		kernel_power_off();
894	}
895	return firsterr;
896}
897
898module_init(rcu_scale_init);
899module_exit(rcu_scale_cleanup);