Linux Audio

Check our new training course

Loading...
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0+
   2//
   3// Scalability test comparing RCU vs other mechanisms
   4// for acquiring references on objects.
   5//
   6// Copyright (C) Google, 2020.
   7//
   8// Author: Joel Fernandes <joel@joelfernandes.org>
   9
  10#define pr_fmt(fmt) fmt
  11
  12#include <linux/atomic.h>
  13#include <linux/bitops.h>
  14#include <linux/completion.h>
  15#include <linux/cpu.h>
  16#include <linux/delay.h>
  17#include <linux/err.h>
  18#include <linux/init.h>
  19#include <linux/interrupt.h>
  20#include <linux/kthread.h>
  21#include <linux/kernel.h>
  22#include <linux/mm.h>
  23#include <linux/module.h>
  24#include <linux/moduleparam.h>
  25#include <linux/notifier.h>
  26#include <linux/percpu.h>
  27#include <linux/rcupdate.h>
  28#include <linux/rcupdate_trace.h>
  29#include <linux/reboot.h>
  30#include <linux/sched.h>
 
  31#include <linux/spinlock.h>
  32#include <linux/smp.h>
  33#include <linux/stat.h>
  34#include <linux/srcu.h>
  35#include <linux/slab.h>
  36#include <linux/torture.h>
  37#include <linux/types.h>
  38
  39#include "rcu.h"
  40
  41#define SCALE_FLAG "-ref-scale: "
  42
  43#define SCALEOUT(s, x...) \
  44	pr_alert("%s" SCALE_FLAG s, scale_type, ## x)
  45
  46#define VERBOSE_SCALEOUT(s, x...) \
  47	do { \
  48		if (verbose) \
  49			pr_alert("%s" SCALE_FLAG s "\n", scale_type, ## x); \
  50	} while (0)
  51
  52static atomic_t verbose_batch_ctr;
  53
  54#define VERBOSE_SCALEOUT_BATCH(s, x...)							\
  55do {											\
  56	if (verbose &&									\
  57	    (verbose_batched <= 0 ||							\
  58	     !(atomic_inc_return(&verbose_batch_ctr) % verbose_batched))) {		\
  59		schedule_timeout_uninterruptible(1);					\
  60		pr_alert("%s" SCALE_FLAG s "\n", scale_type, ## x);			\
  61	}										\
  62} while (0)
  63
  64#define SCALEOUT_ERRSTRING(s, x...) pr_alert("%s" SCALE_FLAG "!!! " s "\n", scale_type, ## x)
  65
 
  66MODULE_LICENSE("GPL");
  67MODULE_AUTHOR("Joel Fernandes (Google) <joel@joelfernandes.org>");
  68
  69static char *scale_type = "rcu";
  70module_param(scale_type, charp, 0444);
  71MODULE_PARM_DESC(scale_type, "Type of test (rcu, srcu, refcnt, rwsem, rwlock.");
  72
  73torture_param(int, verbose, 0, "Enable verbose debugging printk()s");
  74torture_param(int, verbose_batched, 0, "Batch verbose debugging printk()s");
  75
 
 
 
  76// Wait until there are multiple CPUs before starting test.
  77torture_param(int, holdoff, IS_BUILTIN(CONFIG_RCU_REF_SCALE_TEST) ? 10 : 0,
  78	      "Holdoff time before test start (s)");
  79// Number of typesafe_lookup structures, that is, the degree of concurrency.
  80torture_param(long, lookup_instances, 0, "Number of typesafe_lookup structures.");
  81// Number of loops per experiment, all readers execute operations concurrently.
  82torture_param(long, loops, 10000, "Number of loops per experiment.");
  83// Number of readers, with -1 defaulting to about 75% of the CPUs.
  84torture_param(int, nreaders, -1, "Number of readers, -1 for 75% of CPUs.");
  85// Number of runs.
  86torture_param(int, nruns, 30, "Number of experiments to run.");
  87// Reader delay in nanoseconds, 0 for no delay.
  88torture_param(int, readdelay, 0, "Read-side delay in nanoseconds.");
  89
  90#ifdef MODULE
  91# define REFSCALE_SHUTDOWN 0
  92#else
  93# define REFSCALE_SHUTDOWN 1
  94#endif
  95
  96torture_param(bool, shutdown, REFSCALE_SHUTDOWN,
  97	      "Shutdown at end of scalability tests.");
  98
  99struct reader_task {
 100	struct task_struct *task;
 101	int start_reader;
 102	wait_queue_head_t wq;
 103	u64 last_duration_ns;
 104};
 105
 106static struct task_struct *shutdown_task;
 107static wait_queue_head_t shutdown_wq;
 108
 109static struct task_struct *main_task;
 110static wait_queue_head_t main_wq;
 111static int shutdown_start;
 112
 113static struct reader_task *reader_tasks;
 114
 115// Number of readers that are part of the current experiment.
 116static atomic_t nreaders_exp;
 117
 118// Use to wait for all threads to start.
 119static atomic_t n_init;
 120static atomic_t n_started;
 121static atomic_t n_warmedup;
 122static atomic_t n_cooleddown;
 123
 124// Track which experiment is currently running.
 125static int exp_idx;
 126
 127// Operations vector for selecting different types of tests.
 128struct ref_scale_ops {
 129	bool (*init)(void);
 130	void (*cleanup)(void);
 131	void (*readsection)(const int nloops);
 132	void (*delaysection)(const int nloops, const int udl, const int ndl);
 133	const char *name;
 134};
 135
 136static struct ref_scale_ops *cur_ops;
 137
 138static void un_delay(const int udl, const int ndl)
 139{
 140	if (udl)
 141		udelay(udl);
 142	if (ndl)
 143		ndelay(ndl);
 144}
 145
 146static void ref_rcu_read_section(const int nloops)
 147{
 148	int i;
 149
 150	for (i = nloops; i >= 0; i--) {
 151		rcu_read_lock();
 152		rcu_read_unlock();
 153	}
 154}
 155
 156static void ref_rcu_delay_section(const int nloops, const int udl, const int ndl)
 157{
 158	int i;
 159
 160	for (i = nloops; i >= 0; i--) {
 161		rcu_read_lock();
 162		un_delay(udl, ndl);
 163		rcu_read_unlock();
 164	}
 165}
 166
 167static bool rcu_sync_scale_init(void)
 168{
 169	return true;
 170}
 171
 172static struct ref_scale_ops rcu_ops = {
 173	.init		= rcu_sync_scale_init,
 174	.readsection	= ref_rcu_read_section,
 175	.delaysection	= ref_rcu_delay_section,
 176	.name		= "rcu"
 177};
 178
 179// Definitions for SRCU ref scale testing.
 180DEFINE_STATIC_SRCU(srcu_refctl_scale);
 181static struct srcu_struct *srcu_ctlp = &srcu_refctl_scale;
 182
 183static void srcu_ref_scale_read_section(const int nloops)
 184{
 185	int i;
 186	int idx;
 187
 188	for (i = nloops; i >= 0; i--) {
 189		idx = srcu_read_lock(srcu_ctlp);
 190		srcu_read_unlock(srcu_ctlp, idx);
 191	}
 192}
 193
 194static void srcu_ref_scale_delay_section(const int nloops, const int udl, const int ndl)
 195{
 196	int i;
 197	int idx;
 198
 199	for (i = nloops; i >= 0; i--) {
 200		idx = srcu_read_lock(srcu_ctlp);
 201		un_delay(udl, ndl);
 202		srcu_read_unlock(srcu_ctlp, idx);
 203	}
 204}
 205
 206static struct ref_scale_ops srcu_ops = {
 207	.init		= rcu_sync_scale_init,
 208	.readsection	= srcu_ref_scale_read_section,
 209	.delaysection	= srcu_ref_scale_delay_section,
 210	.name		= "srcu"
 211};
 212
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 213#ifdef CONFIG_TASKS_RCU
 214
 215// Definitions for RCU Tasks ref scale testing: Empty read markers.
 216// These definitions also work for RCU Rude readers.
 217static void rcu_tasks_ref_scale_read_section(const int nloops)
 218{
 219	int i;
 220
 221	for (i = nloops; i >= 0; i--)
 222		continue;
 223}
 224
 225static void rcu_tasks_ref_scale_delay_section(const int nloops, const int udl, const int ndl)
 226{
 227	int i;
 228
 229	for (i = nloops; i >= 0; i--)
 230		un_delay(udl, ndl);
 231}
 232
 233static struct ref_scale_ops rcu_tasks_ops = {
 234	.init		= rcu_sync_scale_init,
 235	.readsection	= rcu_tasks_ref_scale_read_section,
 236	.delaysection	= rcu_tasks_ref_scale_delay_section,
 237	.name		= "rcu-tasks"
 238};
 239
 240#define RCU_TASKS_OPS &rcu_tasks_ops,
 241
 242#else // #ifdef CONFIG_TASKS_RCU
 243
 244#define RCU_TASKS_OPS
 245
 246#endif // #else // #ifdef CONFIG_TASKS_RCU
 247
 248#ifdef CONFIG_TASKS_TRACE_RCU
 249
 250// Definitions for RCU Tasks Trace ref scale testing.
 251static void rcu_trace_ref_scale_read_section(const int nloops)
 252{
 253	int i;
 254
 255	for (i = nloops; i >= 0; i--) {
 256		rcu_read_lock_trace();
 257		rcu_read_unlock_trace();
 258	}
 259}
 260
 261static void rcu_trace_ref_scale_delay_section(const int nloops, const int udl, const int ndl)
 262{
 263	int i;
 264
 265	for (i = nloops; i >= 0; i--) {
 266		rcu_read_lock_trace();
 267		un_delay(udl, ndl);
 268		rcu_read_unlock_trace();
 269	}
 270}
 271
 272static struct ref_scale_ops rcu_trace_ops = {
 273	.init		= rcu_sync_scale_init,
 274	.readsection	= rcu_trace_ref_scale_read_section,
 275	.delaysection	= rcu_trace_ref_scale_delay_section,
 276	.name		= "rcu-trace"
 277};
 278
 279#define RCU_TRACE_OPS &rcu_trace_ops,
 280
 281#else // #ifdef CONFIG_TASKS_TRACE_RCU
 282
 283#define RCU_TRACE_OPS
 284
 285#endif // #else // #ifdef CONFIG_TASKS_TRACE_RCU
 286
 287// Definitions for reference count
 288static atomic_t refcnt;
 289
 290static void ref_refcnt_section(const int nloops)
 291{
 292	int i;
 293
 294	for (i = nloops; i >= 0; i--) {
 295		atomic_inc(&refcnt);
 296		atomic_dec(&refcnt);
 297	}
 298}
 299
 300static void ref_refcnt_delay_section(const int nloops, const int udl, const int ndl)
 301{
 302	int i;
 303
 304	for (i = nloops; i >= 0; i--) {
 305		atomic_inc(&refcnt);
 306		un_delay(udl, ndl);
 307		atomic_dec(&refcnt);
 308	}
 309}
 310
 311static struct ref_scale_ops refcnt_ops = {
 312	.init		= rcu_sync_scale_init,
 313	.readsection	= ref_refcnt_section,
 314	.delaysection	= ref_refcnt_delay_section,
 315	.name		= "refcnt"
 316};
 317
 318// Definitions for rwlock
 319static rwlock_t test_rwlock;
 320
 321static bool ref_rwlock_init(void)
 322{
 323	rwlock_init(&test_rwlock);
 324	return true;
 325}
 326
 327static void ref_rwlock_section(const int nloops)
 328{
 329	int i;
 330
 331	for (i = nloops; i >= 0; i--) {
 332		read_lock(&test_rwlock);
 333		read_unlock(&test_rwlock);
 334	}
 335}
 336
 337static void ref_rwlock_delay_section(const int nloops, const int udl, const int ndl)
 338{
 339	int i;
 340
 341	for (i = nloops; i >= 0; i--) {
 342		read_lock(&test_rwlock);
 343		un_delay(udl, ndl);
 344		read_unlock(&test_rwlock);
 345	}
 346}
 347
 348static struct ref_scale_ops rwlock_ops = {
 349	.init		= ref_rwlock_init,
 350	.readsection	= ref_rwlock_section,
 351	.delaysection	= ref_rwlock_delay_section,
 352	.name		= "rwlock"
 353};
 354
 355// Definitions for rwsem
 356static struct rw_semaphore test_rwsem;
 357
 358static bool ref_rwsem_init(void)
 359{
 360	init_rwsem(&test_rwsem);
 361	return true;
 362}
 363
 364static void ref_rwsem_section(const int nloops)
 365{
 366	int i;
 367
 368	for (i = nloops; i >= 0; i--) {
 369		down_read(&test_rwsem);
 370		up_read(&test_rwsem);
 371	}
 372}
 373
 374static void ref_rwsem_delay_section(const int nloops, const int udl, const int ndl)
 375{
 376	int i;
 377
 378	for (i = nloops; i >= 0; i--) {
 379		down_read(&test_rwsem);
 380		un_delay(udl, ndl);
 381		up_read(&test_rwsem);
 382	}
 383}
 384
 385static struct ref_scale_ops rwsem_ops = {
 386	.init		= ref_rwsem_init,
 387	.readsection	= ref_rwsem_section,
 388	.delaysection	= ref_rwsem_delay_section,
 389	.name		= "rwsem"
 390};
 391
 392// Definitions for global spinlock
 393static DEFINE_RAW_SPINLOCK(test_lock);
 394
 395static void ref_lock_section(const int nloops)
 396{
 397	int i;
 398
 399	preempt_disable();
 400	for (i = nloops; i >= 0; i--) {
 401		raw_spin_lock(&test_lock);
 402		raw_spin_unlock(&test_lock);
 403	}
 404	preempt_enable();
 405}
 406
 407static void ref_lock_delay_section(const int nloops, const int udl, const int ndl)
 408{
 409	int i;
 410
 411	preempt_disable();
 412	for (i = nloops; i >= 0; i--) {
 413		raw_spin_lock(&test_lock);
 414		un_delay(udl, ndl);
 415		raw_spin_unlock(&test_lock);
 416	}
 417	preempt_enable();
 418}
 419
 420static struct ref_scale_ops lock_ops = {
 421	.readsection	= ref_lock_section,
 422	.delaysection	= ref_lock_delay_section,
 423	.name		= "lock"
 424};
 425
 426// Definitions for global irq-save spinlock
 427
 428static void ref_lock_irq_section(const int nloops)
 429{
 430	unsigned long flags;
 431	int i;
 432
 433	preempt_disable();
 434	for (i = nloops; i >= 0; i--) {
 435		raw_spin_lock_irqsave(&test_lock, flags);
 436		raw_spin_unlock_irqrestore(&test_lock, flags);
 437	}
 438	preempt_enable();
 439}
 440
 441static void ref_lock_irq_delay_section(const int nloops, const int udl, const int ndl)
 442{
 443	unsigned long flags;
 444	int i;
 445
 446	preempt_disable();
 447	for (i = nloops; i >= 0; i--) {
 448		raw_spin_lock_irqsave(&test_lock, flags);
 449		un_delay(udl, ndl);
 450		raw_spin_unlock_irqrestore(&test_lock, flags);
 451	}
 452	preempt_enable();
 453}
 454
 455static struct ref_scale_ops lock_irq_ops = {
 456	.readsection	= ref_lock_irq_section,
 457	.delaysection	= ref_lock_irq_delay_section,
 458	.name		= "lock-irq"
 459};
 460
 461// Definitions acquire-release.
 462static DEFINE_PER_CPU(unsigned long, test_acqrel);
 463
 464static void ref_acqrel_section(const int nloops)
 465{
 466	unsigned long x;
 467	int i;
 468
 469	preempt_disable();
 470	for (i = nloops; i >= 0; i--) {
 471		x = smp_load_acquire(this_cpu_ptr(&test_acqrel));
 472		smp_store_release(this_cpu_ptr(&test_acqrel), x + 1);
 473	}
 474	preempt_enable();
 475}
 476
 477static void ref_acqrel_delay_section(const int nloops, const int udl, const int ndl)
 478{
 479	unsigned long x;
 480	int i;
 481
 482	preempt_disable();
 483	for (i = nloops; i >= 0; i--) {
 484		x = smp_load_acquire(this_cpu_ptr(&test_acqrel));
 485		un_delay(udl, ndl);
 486		smp_store_release(this_cpu_ptr(&test_acqrel), x + 1);
 487	}
 488	preempt_enable();
 489}
 490
 491static struct ref_scale_ops acqrel_ops = {
 492	.readsection	= ref_acqrel_section,
 493	.delaysection	= ref_acqrel_delay_section,
 494	.name		= "acqrel"
 495};
 496
 497static volatile u64 stopopts;
 498
 499static void ref_clock_section(const int nloops)
 500{
 501	u64 x = 0;
 502	int i;
 503
 504	preempt_disable();
 505	for (i = nloops; i >= 0; i--)
 506		x += ktime_get_real_fast_ns();
 507	preempt_enable();
 508	stopopts = x;
 509}
 510
 511static void ref_clock_delay_section(const int nloops, const int udl, const int ndl)
 512{
 513	u64 x = 0;
 514	int i;
 515
 516	preempt_disable();
 517	for (i = nloops; i >= 0; i--) {
 518		x += ktime_get_real_fast_ns();
 519		un_delay(udl, ndl);
 520	}
 521	preempt_enable();
 522	stopopts = x;
 523}
 524
 525static struct ref_scale_ops clock_ops = {
 526	.readsection	= ref_clock_section,
 527	.delaysection	= ref_clock_delay_section,
 528	.name		= "clock"
 529};
 530
 531static void ref_jiffies_section(const int nloops)
 532{
 533	u64 x = 0;
 534	int i;
 535
 536	preempt_disable();
 537	for (i = nloops; i >= 0; i--)
 538		x += jiffies;
 539	preempt_enable();
 540	stopopts = x;
 541}
 542
 543static void ref_jiffies_delay_section(const int nloops, const int udl, const int ndl)
 544{
 545	u64 x = 0;
 546	int i;
 547
 548	preempt_disable();
 549	for (i = nloops; i >= 0; i--) {
 550		x += jiffies;
 551		un_delay(udl, ndl);
 552	}
 553	preempt_enable();
 554	stopopts = x;
 555}
 556
 557static struct ref_scale_ops jiffies_ops = {
 558	.readsection	= ref_jiffies_section,
 559	.delaysection	= ref_jiffies_delay_section,
 560	.name		= "jiffies"
 561};
 562
 563////////////////////////////////////////////////////////////////////////
 564//
 565// Methods leveraging SLAB_TYPESAFE_BY_RCU.
 566//
 567
 568// Item to look up in a typesafe manner.  Array of pointers to these.
 569struct refscale_typesafe {
 570	atomic_t rts_refctr;  // Used by all flavors
 571	spinlock_t rts_lock;
 572	seqlock_t rts_seqlock;
 573	unsigned int a;
 574	unsigned int b;
 575};
 576
 577static struct kmem_cache *typesafe_kmem_cachep;
 578static struct refscale_typesafe **rtsarray;
 579static long rtsarray_size;
 580static DEFINE_TORTURE_RANDOM_PERCPU(refscale_rand);
 581static bool (*rts_acquire)(struct refscale_typesafe *rtsp, unsigned int *start);
 582static bool (*rts_release)(struct refscale_typesafe *rtsp, unsigned int start);
 583
 584// Conditionally acquire an explicit in-structure reference count.
 585static bool typesafe_ref_acquire(struct refscale_typesafe *rtsp, unsigned int *start)
 586{
 587	return atomic_inc_not_zero(&rtsp->rts_refctr);
 588}
 589
 590// Unconditionally release an explicit in-structure reference count.
 591static bool typesafe_ref_release(struct refscale_typesafe *rtsp, unsigned int start)
 592{
 593	if (!atomic_dec_return(&rtsp->rts_refctr)) {
 594		WRITE_ONCE(rtsp->a, rtsp->a + 1);
 595		kmem_cache_free(typesafe_kmem_cachep, rtsp);
 596	}
 597	return true;
 598}
 599
 600// Unconditionally acquire an explicit in-structure spinlock.
 601static bool typesafe_lock_acquire(struct refscale_typesafe *rtsp, unsigned int *start)
 602{
 603	spin_lock(&rtsp->rts_lock);
 604	return true;
 605}
 606
 607// Unconditionally release an explicit in-structure spinlock.
 608static bool typesafe_lock_release(struct refscale_typesafe *rtsp, unsigned int start)
 609{
 610	spin_unlock(&rtsp->rts_lock);
 611	return true;
 612}
 613
 614// Unconditionally acquire an explicit in-structure sequence lock.
 615static bool typesafe_seqlock_acquire(struct refscale_typesafe *rtsp, unsigned int *start)
 616{
 617	*start = read_seqbegin(&rtsp->rts_seqlock);
 618	return true;
 619}
 620
 621// Conditionally release an explicit in-structure sequence lock.  Return
 622// true if this release was successful, that is, if no retry is required.
 623static bool typesafe_seqlock_release(struct refscale_typesafe *rtsp, unsigned int start)
 624{
 625	return !read_seqretry(&rtsp->rts_seqlock, start);
 626}
 627
 628// Do a read-side critical section with the specified delay in
 629// microseconds and nanoseconds inserted so as to increase probability
 630// of failure.
 631static void typesafe_delay_section(const int nloops, const int udl, const int ndl)
 632{
 633	unsigned int a;
 634	unsigned int b;
 635	int i;
 636	long idx;
 637	struct refscale_typesafe *rtsp;
 638	unsigned int start;
 639
 640	for (i = nloops; i >= 0; i--) {
 641		preempt_disable();
 642		idx = torture_random(this_cpu_ptr(&refscale_rand)) % rtsarray_size;
 643		preempt_enable();
 644retry:
 645		rcu_read_lock();
 646		rtsp = rcu_dereference(rtsarray[idx]);
 647		a = READ_ONCE(rtsp->a);
 648		if (!rts_acquire(rtsp, &start)) {
 649			rcu_read_unlock();
 650			goto retry;
 651		}
 652		if (a != READ_ONCE(rtsp->a)) {
 653			(void)rts_release(rtsp, start);
 654			rcu_read_unlock();
 655			goto retry;
 656		}
 657		un_delay(udl, ndl);
 658		b = READ_ONCE(rtsp->a);
 659		// Remember, seqlock read-side release can fail.
 660		if (!rts_release(rtsp, start)) {
 661			rcu_read_unlock();
 662			goto retry;
 663		}
 664		WARN_ONCE(a != b, "Re-read of ->a changed from %u to %u.\n", a, b);
 665		b = rtsp->b;
 666		rcu_read_unlock();
 667		WARN_ON_ONCE(a * a != b);
 668	}
 669}
 670
 671// Because the acquisition and release methods are expensive, there
 672// is no point in optimizing away the un_delay() function's two checks.
 673// Thus simply define typesafe_read_section() as a simple wrapper around
 674// typesafe_delay_section().
 675static void typesafe_read_section(const int nloops)
 676{
 677	typesafe_delay_section(nloops, 0, 0);
 678}
 679
 680// Allocate and initialize one refscale_typesafe structure.
 681static struct refscale_typesafe *typesafe_alloc_one(void)
 682{
 683	struct refscale_typesafe *rtsp;
 684
 685	rtsp = kmem_cache_alloc(typesafe_kmem_cachep, GFP_KERNEL);
 686	if (!rtsp)
 687		return NULL;
 688	atomic_set(&rtsp->rts_refctr, 1);
 689	WRITE_ONCE(rtsp->a, rtsp->a + 1);
 690	WRITE_ONCE(rtsp->b, rtsp->a * rtsp->a);
 691	return rtsp;
 692}
 693
 694// Slab-allocator constructor for refscale_typesafe structures created
 695// out of a new slab of system memory.
 696static void refscale_typesafe_ctor(void *rtsp_in)
 697{
 698	struct refscale_typesafe *rtsp = rtsp_in;
 699
 700	spin_lock_init(&rtsp->rts_lock);
 701	seqlock_init(&rtsp->rts_seqlock);
 702	preempt_disable();
 703	rtsp->a = torture_random(this_cpu_ptr(&refscale_rand));
 704	preempt_enable();
 705}
 706
 707static struct ref_scale_ops typesafe_ref_ops;
 708static struct ref_scale_ops typesafe_lock_ops;
 709static struct ref_scale_ops typesafe_seqlock_ops;
 710
 711// Initialize for a typesafe test.
 712static bool typesafe_init(void)
 713{
 714	long idx;
 715	long si = lookup_instances;
 716
 717	typesafe_kmem_cachep = kmem_cache_create("refscale_typesafe",
 718						 sizeof(struct refscale_typesafe), sizeof(void *),
 719						 SLAB_TYPESAFE_BY_RCU, refscale_typesafe_ctor);
 720	if (!typesafe_kmem_cachep)
 721		return false;
 722	if (si < 0)
 723		si = -si * nr_cpu_ids;
 724	else if (si == 0)
 725		si = nr_cpu_ids;
 726	rtsarray_size = si;
 727	rtsarray = kcalloc(si, sizeof(*rtsarray), GFP_KERNEL);
 728	if (!rtsarray)
 729		return false;
 730	for (idx = 0; idx < rtsarray_size; idx++) {
 731		rtsarray[idx] = typesafe_alloc_one();
 732		if (!rtsarray[idx])
 733			return false;
 734	}
 735	if (cur_ops == &typesafe_ref_ops) {
 736		rts_acquire = typesafe_ref_acquire;
 737		rts_release = typesafe_ref_release;
 738	} else if (cur_ops == &typesafe_lock_ops) {
 739		rts_acquire = typesafe_lock_acquire;
 740		rts_release = typesafe_lock_release;
 741	} else if (cur_ops == &typesafe_seqlock_ops) {
 742		rts_acquire = typesafe_seqlock_acquire;
 743		rts_release = typesafe_seqlock_release;
 744	} else {
 745		WARN_ON_ONCE(1);
 746		return false;
 747	}
 748	return true;
 749}
 750
 751// Clean up after a typesafe test.
 752static void typesafe_cleanup(void)
 753{
 754	long idx;
 755
 756	if (rtsarray) {
 757		for (idx = 0; idx < rtsarray_size; idx++)
 758			kmem_cache_free(typesafe_kmem_cachep, rtsarray[idx]);
 759		kfree(rtsarray);
 760		rtsarray = NULL;
 761		rtsarray_size = 0;
 762	}
 763	kmem_cache_destroy(typesafe_kmem_cachep);
 764	typesafe_kmem_cachep = NULL;
 765	rts_acquire = NULL;
 766	rts_release = NULL;
 767}
 768
 769// The typesafe_init() function distinguishes these structures by address.
 770static struct ref_scale_ops typesafe_ref_ops = {
 771	.init		= typesafe_init,
 772	.cleanup	= typesafe_cleanup,
 773	.readsection	= typesafe_read_section,
 774	.delaysection	= typesafe_delay_section,
 775	.name		= "typesafe_ref"
 776};
 777
 778static struct ref_scale_ops typesafe_lock_ops = {
 779	.init		= typesafe_init,
 780	.cleanup	= typesafe_cleanup,
 781	.readsection	= typesafe_read_section,
 782	.delaysection	= typesafe_delay_section,
 783	.name		= "typesafe_lock"
 784};
 785
 786static struct ref_scale_ops typesafe_seqlock_ops = {
 787	.init		= typesafe_init,
 788	.cleanup	= typesafe_cleanup,
 789	.readsection	= typesafe_read_section,
 790	.delaysection	= typesafe_delay_section,
 791	.name		= "typesafe_seqlock"
 792};
 793
 794static void rcu_scale_one_reader(void)
 795{
 796	if (readdelay <= 0)
 797		cur_ops->readsection(loops);
 798	else
 799		cur_ops->delaysection(loops, readdelay / 1000, readdelay % 1000);
 800}
 801
 
 
 
 
 
 
 
 
 
 
 
 
 802// Reader kthread.  Repeatedly does empty RCU read-side
 803// critical section, minimizing update-side interference.
 804static int
 805ref_scale_reader(void *arg)
 806{
 807	unsigned long flags;
 808	long me = (long)arg;
 809	struct reader_task *rt = &(reader_tasks[me]);
 810	u64 start;
 811	s64 duration;
 812
 813	VERBOSE_SCALEOUT_BATCH("ref_scale_reader %ld: task started", me);
 814	WARN_ON_ONCE(set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids)));
 815	set_user_nice(current, MAX_NICE);
 816	atomic_inc(&n_init);
 817	if (holdoff)
 818		schedule_timeout_interruptible(holdoff * HZ);
 819repeat:
 820	VERBOSE_SCALEOUT_BATCH("ref_scale_reader %ld: waiting to start next experiment on cpu %d", me, raw_smp_processor_id());
 821
 822	// Wait for signal that this reader can start.
 823	wait_event(rt->wq, (atomic_read(&nreaders_exp) && smp_load_acquire(&rt->start_reader)) ||
 824			   torture_must_stop());
 825
 826	if (torture_must_stop())
 827		goto end;
 828
 829	// Make sure that the CPU is affinitized appropriately during testing.
 830	WARN_ON_ONCE(raw_smp_processor_id() != me);
 831
 832	WRITE_ONCE(rt->start_reader, 0);
 833	if (!atomic_dec_return(&n_started))
 834		while (atomic_read_acquire(&n_started))
 835			cpu_relax();
 836
 837	VERBOSE_SCALEOUT_BATCH("ref_scale_reader %ld: experiment %d started", me, exp_idx);
 838
 839
 840	// To reduce noise, do an initial cache-warming invocation, check
 841	// in, and then keep warming until everyone has checked in.
 842	rcu_scale_one_reader();
 843	if (!atomic_dec_return(&n_warmedup))
 844		while (atomic_read_acquire(&n_warmedup))
 845			rcu_scale_one_reader();
 846	// Also keep interrupts disabled.  This also has the effect
 847	// of preventing entries into slow path for rcu_read_unlock().
 848	local_irq_save(flags);
 849	start = ktime_get_mono_fast_ns();
 850
 851	rcu_scale_one_reader();
 852
 853	duration = ktime_get_mono_fast_ns() - start;
 854	local_irq_restore(flags);
 855
 856	rt->last_duration_ns = WARN_ON_ONCE(duration < 0) ? 0 : duration;
 857	// To reduce runtime-skew noise, do maintain-load invocations until
 858	// everyone is done.
 859	if (!atomic_dec_return(&n_cooleddown))
 860		while (atomic_read_acquire(&n_cooleddown))
 861			rcu_scale_one_reader();
 862
 863	if (atomic_dec_and_test(&nreaders_exp))
 864		wake_up(&main_wq);
 865
 866	VERBOSE_SCALEOUT_BATCH("ref_scale_reader %ld: experiment %d ended, (readers remaining=%d)",
 867				me, exp_idx, atomic_read(&nreaders_exp));
 868
 869	if (!torture_must_stop())
 870		goto repeat;
 871end:
 872	torture_kthread_stopping("ref_scale_reader");
 873	return 0;
 874}
 875
 876static void reset_readers(void)
 877{
 878	int i;
 879	struct reader_task *rt;
 880
 881	for (i = 0; i < nreaders; i++) {
 882		rt = &(reader_tasks[i]);
 883
 884		rt->last_duration_ns = 0;
 885	}
 886}
 887
 888// Print the results of each reader and return the sum of all their durations.
 889static u64 process_durations(int n)
 890{
 891	int i;
 892	struct reader_task *rt;
 893	char buf1[64];
 894	char *buf;
 895	u64 sum = 0;
 896
 897	buf = kmalloc(800 + 64, GFP_KERNEL);
 898	if (!buf)
 899		return 0;
 900	buf[0] = 0;
 901	sprintf(buf, "Experiment #%d (Format: <THREAD-NUM>:<Total loop time in ns>)",
 902		exp_idx);
 
 903
 904	for (i = 0; i < n && !torture_must_stop(); i++) {
 905		rt = &(reader_tasks[i]);
 906		sprintf(buf1, "%d: %llu\t", i, rt->last_duration_ns);
 907
 908		if (i % 5 == 0)
 909			strcat(buf, "\n");
 910		if (strlen(buf) >= 800) {
 911			pr_alert("%s", buf);
 912			buf[0] = 0;
 
 913		}
 914		strcat(buf, buf1);
 
 915
 916		sum += rt->last_duration_ns;
 917	}
 918	pr_alert("%s\n", buf);
 919
 920	kfree(buf);
 921	return sum;
 922}
 923
 924// The main_func is the main orchestrator, it performs a bunch of
 925// experiments.  For every experiment, it orders all the readers
 926// involved to start and waits for them to finish the experiment. It
 927// then reads their timestamps and starts the next experiment. Each
 928// experiment progresses from 1 concurrent reader to N of them at which
 929// point all the timestamps are printed.
 930static int main_func(void *arg)
 931{
 932	int exp, r;
 933	char buf1[64];
 934	char *buf;
 935	u64 *result_avg;
 936
 937	set_cpus_allowed_ptr(current, cpumask_of(nreaders % nr_cpu_ids));
 938	set_user_nice(current, MAX_NICE);
 939
 940	VERBOSE_SCALEOUT("main_func task started");
 941	result_avg = kzalloc(nruns * sizeof(*result_avg), GFP_KERNEL);
 942	buf = kzalloc(800 + 64, GFP_KERNEL);
 943	if (!result_avg || !buf) {
 944		SCALEOUT_ERRSTRING("out of memory");
 945		goto oom_exit;
 946	}
 947	if (holdoff)
 948		schedule_timeout_interruptible(holdoff * HZ);
 949
 950	// Wait for all threads to start.
 951	atomic_inc(&n_init);
 952	while (atomic_read(&n_init) < nreaders + 1)
 953		schedule_timeout_uninterruptible(1);
 954
 955	// Start exp readers up per experiment
 
 956	for (exp = 0; exp < nruns && !torture_must_stop(); exp++) {
 957		if (torture_must_stop())
 958			goto end;
 959
 960		reset_readers();
 961		atomic_set(&nreaders_exp, nreaders);
 962		atomic_set(&n_started, nreaders);
 963		atomic_set(&n_warmedup, nreaders);
 964		atomic_set(&n_cooleddown, nreaders);
 965
 966		exp_idx = exp;
 967
 968		for (r = 0; r < nreaders; r++) {
 969			smp_store_release(&reader_tasks[r].start_reader, 1);
 970			wake_up(&reader_tasks[r].wq);
 971		}
 972
 973		VERBOSE_SCALEOUT("main_func: experiment started, waiting for %d readers",
 974				nreaders);
 975
 976		wait_event(main_wq,
 977			   !atomic_read(&nreaders_exp) || torture_must_stop());
 978
 979		VERBOSE_SCALEOUT("main_func: experiment ended");
 980
 981		if (torture_must_stop())
 982			goto end;
 983
 984		result_avg[exp] = div_u64(1000 * process_durations(nreaders), nreaders * loops);
 985	}
 
 986
 987	// Print the average of all experiments
 988	SCALEOUT("END OF TEST. Calculating average duration per loop (nanoseconds)...\n");
 989
 990	pr_alert("Runs\tTime(ns)\n");
 991	for (exp = 0; exp < nruns; exp++) {
 992		u64 avg;
 993		u32 rem;
 994
 995		avg = div_u64_rem(result_avg[exp], 1000, &rem);
 996		sprintf(buf1, "%d\t%llu.%03u\n", exp + 1, avg, rem);
 997		strcat(buf, buf1);
 998		if (strlen(buf) >= 800) {
 999			pr_alert("%s", buf);
1000			buf[0] = 0;
1001		}
1002	}
1003
1004	pr_alert("%s", buf);
1005
1006oom_exit:
1007	// This will shutdown everything including us.
1008	if (shutdown) {
1009		shutdown_start = 1;
1010		wake_up(&shutdown_wq);
1011	}
1012
1013	// Wait for torture to stop us
1014	while (!torture_must_stop())
1015		schedule_timeout_uninterruptible(1);
1016
1017end:
1018	torture_kthread_stopping("main_func");
1019	kfree(result_avg);
1020	kfree(buf);
1021	return 0;
1022}
1023
1024static void
1025ref_scale_print_module_parms(struct ref_scale_ops *cur_ops, const char *tag)
1026{
1027	pr_alert("%s" SCALE_FLAG
1028		 "--- %s:  verbose=%d verbose_batched=%d shutdown=%d holdoff=%d lookup_instances=%ld loops=%ld nreaders=%d nruns=%d readdelay=%d\n", scale_type, tag,
1029		 verbose, verbose_batched, shutdown, holdoff, lookup_instances, loops, nreaders, nruns, readdelay);
1030}
1031
1032static void
1033ref_scale_cleanup(void)
1034{
1035	int i;
1036
1037	if (torture_cleanup_begin())
1038		return;
1039
1040	if (!cur_ops) {
1041		torture_cleanup_end();
1042		return;
1043	}
1044
1045	if (reader_tasks) {
1046		for (i = 0; i < nreaders; i++)
1047			torture_stop_kthread("ref_scale_reader",
1048					     reader_tasks[i].task);
1049	}
1050	kfree(reader_tasks);
1051
1052	torture_stop_kthread("main_task", main_task);
1053	kfree(main_task);
1054
1055	// Do scale-type-specific cleanup operations.
1056	if (cur_ops->cleanup != NULL)
1057		cur_ops->cleanup();
1058
1059	torture_cleanup_end();
1060}
1061
1062// Shutdown kthread.  Just waits to be awakened, then shuts down system.
1063static int
1064ref_scale_shutdown(void *arg)
1065{
1066	wait_event_idle(shutdown_wq, shutdown_start);
1067
1068	smp_mb(); // Wake before output.
1069	ref_scale_cleanup();
1070	kernel_power_off();
1071
1072	return -EINVAL;
1073}
1074
1075static int __init
1076ref_scale_init(void)
1077{
1078	long i;
1079	int firsterr = 0;
1080	static struct ref_scale_ops *scale_ops[] = {
1081		&rcu_ops, &srcu_ops, RCU_TRACE_OPS RCU_TASKS_OPS &refcnt_ops, &rwlock_ops,
1082		&rwsem_ops, &lock_ops, &lock_irq_ops, &acqrel_ops, &clock_ops, &jiffies_ops,
1083		&typesafe_ref_ops, &typesafe_lock_ops, &typesafe_seqlock_ops,
 
1084	};
1085
1086	if (!torture_init_begin(scale_type, verbose))
1087		return -EBUSY;
1088
1089	for (i = 0; i < ARRAY_SIZE(scale_ops); i++) {
1090		cur_ops = scale_ops[i];
1091		if (strcmp(scale_type, cur_ops->name) == 0)
1092			break;
1093	}
1094	if (i == ARRAY_SIZE(scale_ops)) {
1095		pr_alert("rcu-scale: invalid scale type: \"%s\"\n", scale_type);
1096		pr_alert("rcu-scale types:");
1097		for (i = 0; i < ARRAY_SIZE(scale_ops); i++)
1098			pr_cont(" %s", scale_ops[i]->name);
1099		pr_cont("\n");
1100		firsterr = -EINVAL;
1101		cur_ops = NULL;
1102		goto unwind;
1103	}
1104	if (cur_ops->init)
1105		if (!cur_ops->init()) {
1106			firsterr = -EUCLEAN;
1107			goto unwind;
1108		}
1109
1110	ref_scale_print_module_parms(cur_ops, "Start of test");
1111
1112	// Shutdown task
1113	if (shutdown) {
1114		init_waitqueue_head(&shutdown_wq);
1115		firsterr = torture_create_kthread(ref_scale_shutdown, NULL,
1116						  shutdown_task);
1117		if (torture_init_error(firsterr))
1118			goto unwind;
1119		schedule_timeout_uninterruptible(1);
1120	}
1121
1122	// Reader tasks (default to ~75% of online CPUs).
1123	if (nreaders < 0)
1124		nreaders = (num_online_cpus() >> 1) + (num_online_cpus() >> 2);
1125	if (WARN_ONCE(loops <= 0, "%s: loops = %ld, adjusted to 1\n", __func__, loops))
1126		loops = 1;
1127	if (WARN_ONCE(nreaders <= 0, "%s: nreaders = %d, adjusted to 1\n", __func__, nreaders))
1128		nreaders = 1;
1129	if (WARN_ONCE(nruns <= 0, "%s: nruns = %d, adjusted to 1\n", __func__, nruns))
1130		nruns = 1;
1131	reader_tasks = kcalloc(nreaders, sizeof(reader_tasks[0]),
1132			       GFP_KERNEL);
1133	if (!reader_tasks) {
1134		SCALEOUT_ERRSTRING("out of memory");
1135		firsterr = -ENOMEM;
1136		goto unwind;
1137	}
1138
1139	VERBOSE_SCALEOUT("Starting %d reader threads", nreaders);
1140
1141	for (i = 0; i < nreaders; i++) {
1142		init_waitqueue_head(&reader_tasks[i].wq);
1143		firsterr = torture_create_kthread(ref_scale_reader, (void *)i,
1144						  reader_tasks[i].task);
1145		if (torture_init_error(firsterr))
1146			goto unwind;
1147	}
1148
1149	// Main Task
1150	init_waitqueue_head(&main_wq);
1151	firsterr = torture_create_kthread(main_func, NULL, main_task);
1152	if (torture_init_error(firsterr))
1153		goto unwind;
1154
1155	torture_init_end();
1156	return 0;
1157
1158unwind:
1159	torture_init_end();
1160	ref_scale_cleanup();
1161	if (shutdown) {
1162		WARN_ON(!IS_MODULE(CONFIG_RCU_REF_SCALE_TEST));
1163		kernel_power_off();
1164	}
1165	return firsterr;
1166}
1167
1168module_init(ref_scale_init);
1169module_exit(ref_scale_cleanup);
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0+
   2//
   3// Scalability test comparing RCU vs other mechanisms
   4// for acquiring references on objects.
   5//
   6// Copyright (C) Google, 2020.
   7//
   8// Author: Joel Fernandes <joel@joelfernandes.org>
   9
  10#define pr_fmt(fmt) fmt
  11
  12#include <linux/atomic.h>
  13#include <linux/bitops.h>
  14#include <linux/completion.h>
  15#include <linux/cpu.h>
  16#include <linux/delay.h>
  17#include <linux/err.h>
  18#include <linux/init.h>
  19#include <linux/interrupt.h>
  20#include <linux/kthread.h>
  21#include <linux/kernel.h>
  22#include <linux/mm.h>
  23#include <linux/module.h>
  24#include <linux/moduleparam.h>
  25#include <linux/notifier.h>
  26#include <linux/percpu.h>
  27#include <linux/rcupdate.h>
  28#include <linux/rcupdate_trace.h>
  29#include <linux/reboot.h>
  30#include <linux/sched.h>
  31#include <linux/seq_buf.h>
  32#include <linux/spinlock.h>
  33#include <linux/smp.h>
  34#include <linux/stat.h>
  35#include <linux/srcu.h>
  36#include <linux/slab.h>
  37#include <linux/torture.h>
  38#include <linux/types.h>
  39
  40#include "rcu.h"
  41
  42#define SCALE_FLAG "-ref-scale: "
  43
  44#define SCALEOUT(s, x...) \
  45	pr_alert("%s" SCALE_FLAG s, scale_type, ## x)
  46
  47#define VERBOSE_SCALEOUT(s, x...) \
  48	do { \
  49		if (verbose) \
  50			pr_alert("%s" SCALE_FLAG s "\n", scale_type, ## x); \
  51	} while (0)
  52
  53static atomic_t verbose_batch_ctr;
  54
  55#define VERBOSE_SCALEOUT_BATCH(s, x...)							\
  56do {											\
  57	if (verbose &&									\
  58	    (verbose_batched <= 0 ||							\
  59	     !(atomic_inc_return(&verbose_batch_ctr) % verbose_batched))) {		\
  60		schedule_timeout_uninterruptible(1);					\
  61		pr_alert("%s" SCALE_FLAG s "\n", scale_type, ## x);			\
  62	}										\
  63} while (0)
  64
  65#define SCALEOUT_ERRSTRING(s, x...) pr_alert("%s" SCALE_FLAG "!!! " s "\n", scale_type, ## x)
  66
  67MODULE_DESCRIPTION("Scalability test for object reference mechanisms");
  68MODULE_LICENSE("GPL");
  69MODULE_AUTHOR("Joel Fernandes (Google) <joel@joelfernandes.org>");
  70
  71static char *scale_type = "rcu";
  72module_param(scale_type, charp, 0444);
  73MODULE_PARM_DESC(scale_type, "Type of test (rcu, srcu, refcnt, rwsem, rwlock.");
  74
  75torture_param(int, verbose, 0, "Enable verbose debugging printk()s");
  76torture_param(int, verbose_batched, 0, "Batch verbose debugging printk()s");
  77
  78// Number of seconds to extend warm-up and cool-down for multiple guest OSes
  79torture_param(long, guest_os_delay, 0,
  80	      "Number of seconds to extend warm-up/cool-down for multiple guest OSes.");
  81// Wait until there are multiple CPUs before starting test.
  82torture_param(int, holdoff, IS_BUILTIN(CONFIG_RCU_REF_SCALE_TEST) ? 10 : 0,
  83	      "Holdoff time before test start (s)");
  84// Number of typesafe_lookup structures, that is, the degree of concurrency.
  85torture_param(long, lookup_instances, 0, "Number of typesafe_lookup structures.");
  86// Number of loops per experiment, all readers execute operations concurrently.
  87torture_param(long, loops, 10000, "Number of loops per experiment.");
  88// Number of readers, with -1 defaulting to about 75% of the CPUs.
  89torture_param(int, nreaders, -1, "Number of readers, -1 for 75% of CPUs.");
  90// Number of runs.
  91torture_param(int, nruns, 30, "Number of experiments to run.");
  92// Reader delay in nanoseconds, 0 for no delay.
  93torture_param(int, readdelay, 0, "Read-side delay in nanoseconds.");
  94
  95#ifdef MODULE
  96# define REFSCALE_SHUTDOWN 0
  97#else
  98# define REFSCALE_SHUTDOWN 1
  99#endif
 100
 101torture_param(bool, shutdown, REFSCALE_SHUTDOWN,
 102	      "Shutdown at end of scalability tests.");
 103
 104struct reader_task {
 105	struct task_struct *task;
 106	int start_reader;
 107	wait_queue_head_t wq;
 108	u64 last_duration_ns;
 109};
 110
 111static struct task_struct *shutdown_task;
 112static wait_queue_head_t shutdown_wq;
 113
 114static struct task_struct *main_task;
 115static wait_queue_head_t main_wq;
 116static int shutdown_start;
 117
 118static struct reader_task *reader_tasks;
 119
 120// Number of readers that are part of the current experiment.
 121static atomic_t nreaders_exp;
 122
 123// Use to wait for all threads to start.
 124static atomic_t n_init;
 125static atomic_t n_started;
 126static atomic_t n_warmedup;
 127static atomic_t n_cooleddown;
 128
 129// Track which experiment is currently running.
 130static int exp_idx;
 131
 132// Operations vector for selecting different types of tests.
 133struct ref_scale_ops {
 134	bool (*init)(void);
 135	void (*cleanup)(void);
 136	void (*readsection)(const int nloops);
 137	void (*delaysection)(const int nloops, const int udl, const int ndl);
 138	const char *name;
 139};
 140
 141static const struct ref_scale_ops *cur_ops;
 142
 143static void un_delay(const int udl, const int ndl)
 144{
 145	if (udl)
 146		udelay(udl);
 147	if (ndl)
 148		ndelay(ndl);
 149}
 150
 151static void ref_rcu_read_section(const int nloops)
 152{
 153	int i;
 154
 155	for (i = nloops; i >= 0; i--) {
 156		rcu_read_lock();
 157		rcu_read_unlock();
 158	}
 159}
 160
 161static void ref_rcu_delay_section(const int nloops, const int udl, const int ndl)
 162{
 163	int i;
 164
 165	for (i = nloops; i >= 0; i--) {
 166		rcu_read_lock();
 167		un_delay(udl, ndl);
 168		rcu_read_unlock();
 169	}
 170}
 171
 172static bool rcu_sync_scale_init(void)
 173{
 174	return true;
 175}
 176
 177static const struct ref_scale_ops rcu_ops = {
 178	.init		= rcu_sync_scale_init,
 179	.readsection	= ref_rcu_read_section,
 180	.delaysection	= ref_rcu_delay_section,
 181	.name		= "rcu"
 182};
 183
 184// Definitions for SRCU ref scale testing.
 185DEFINE_STATIC_SRCU(srcu_refctl_scale);
 186static struct srcu_struct *srcu_ctlp = &srcu_refctl_scale;
 187
 188static void srcu_ref_scale_read_section(const int nloops)
 189{
 190	int i;
 191	int idx;
 192
 193	for (i = nloops; i >= 0; i--) {
 194		idx = srcu_read_lock(srcu_ctlp);
 195		srcu_read_unlock(srcu_ctlp, idx);
 196	}
 197}
 198
 199static void srcu_ref_scale_delay_section(const int nloops, const int udl, const int ndl)
 200{
 201	int i;
 202	int idx;
 203
 204	for (i = nloops; i >= 0; i--) {
 205		idx = srcu_read_lock(srcu_ctlp);
 206		un_delay(udl, ndl);
 207		srcu_read_unlock(srcu_ctlp, idx);
 208	}
 209}
 210
 211static const struct ref_scale_ops srcu_ops = {
 212	.init		= rcu_sync_scale_init,
 213	.readsection	= srcu_ref_scale_read_section,
 214	.delaysection	= srcu_ref_scale_delay_section,
 215	.name		= "srcu"
 216};
 217
 218static void srcu_lite_ref_scale_read_section(const int nloops)
 219{
 220	int i;
 221	int idx;
 222
 223	for (i = nloops; i >= 0; i--) {
 224		idx = srcu_read_lock_lite(srcu_ctlp);
 225		srcu_read_unlock_lite(srcu_ctlp, idx);
 226	}
 227}
 228
 229static void srcu_lite_ref_scale_delay_section(const int nloops, const int udl, const int ndl)
 230{
 231	int i;
 232	int idx;
 233
 234	for (i = nloops; i >= 0; i--) {
 235		idx = srcu_read_lock_lite(srcu_ctlp);
 236		un_delay(udl, ndl);
 237		srcu_read_unlock_lite(srcu_ctlp, idx);
 238	}
 239}
 240
 241static const struct ref_scale_ops srcu_lite_ops = {
 242	.init		= rcu_sync_scale_init,
 243	.readsection	= srcu_lite_ref_scale_read_section,
 244	.delaysection	= srcu_lite_ref_scale_delay_section,
 245	.name		= "srcu-lite"
 246};
 247
 248#ifdef CONFIG_TASKS_RCU
 249
 250// Definitions for RCU Tasks ref scale testing: Empty read markers.
 251// These definitions also work for RCU Rude readers.
 252static void rcu_tasks_ref_scale_read_section(const int nloops)
 253{
 254	int i;
 255
 256	for (i = nloops; i >= 0; i--)
 257		continue;
 258}
 259
 260static void rcu_tasks_ref_scale_delay_section(const int nloops, const int udl, const int ndl)
 261{
 262	int i;
 263
 264	for (i = nloops; i >= 0; i--)
 265		un_delay(udl, ndl);
 266}
 267
 268static const struct ref_scale_ops rcu_tasks_ops = {
 269	.init		= rcu_sync_scale_init,
 270	.readsection	= rcu_tasks_ref_scale_read_section,
 271	.delaysection	= rcu_tasks_ref_scale_delay_section,
 272	.name		= "rcu-tasks"
 273};
 274
 275#define RCU_TASKS_OPS &rcu_tasks_ops,
 276
 277#else // #ifdef CONFIG_TASKS_RCU
 278
 279#define RCU_TASKS_OPS
 280
 281#endif // #else // #ifdef CONFIG_TASKS_RCU
 282
 283#ifdef CONFIG_TASKS_TRACE_RCU
 284
 285// Definitions for RCU Tasks Trace ref scale testing.
 286static void rcu_trace_ref_scale_read_section(const int nloops)
 287{
 288	int i;
 289
 290	for (i = nloops; i >= 0; i--) {
 291		rcu_read_lock_trace();
 292		rcu_read_unlock_trace();
 293	}
 294}
 295
 296static void rcu_trace_ref_scale_delay_section(const int nloops, const int udl, const int ndl)
 297{
 298	int i;
 299
 300	for (i = nloops; i >= 0; i--) {
 301		rcu_read_lock_trace();
 302		un_delay(udl, ndl);
 303		rcu_read_unlock_trace();
 304	}
 305}
 306
 307static const struct ref_scale_ops rcu_trace_ops = {
 308	.init		= rcu_sync_scale_init,
 309	.readsection	= rcu_trace_ref_scale_read_section,
 310	.delaysection	= rcu_trace_ref_scale_delay_section,
 311	.name		= "rcu-trace"
 312};
 313
 314#define RCU_TRACE_OPS &rcu_trace_ops,
 315
 316#else // #ifdef CONFIG_TASKS_TRACE_RCU
 317
 318#define RCU_TRACE_OPS
 319
 320#endif // #else // #ifdef CONFIG_TASKS_TRACE_RCU
 321
 322// Definitions for reference count
 323static atomic_t refcnt;
 324
 325static void ref_refcnt_section(const int nloops)
 326{
 327	int i;
 328
 329	for (i = nloops; i >= 0; i--) {
 330		atomic_inc(&refcnt);
 331		atomic_dec(&refcnt);
 332	}
 333}
 334
 335static void ref_refcnt_delay_section(const int nloops, const int udl, const int ndl)
 336{
 337	int i;
 338
 339	for (i = nloops; i >= 0; i--) {
 340		atomic_inc(&refcnt);
 341		un_delay(udl, ndl);
 342		atomic_dec(&refcnt);
 343	}
 344}
 345
 346static const struct ref_scale_ops refcnt_ops = {
 347	.init		= rcu_sync_scale_init,
 348	.readsection	= ref_refcnt_section,
 349	.delaysection	= ref_refcnt_delay_section,
 350	.name		= "refcnt"
 351};
 352
 353// Definitions for rwlock
 354static rwlock_t test_rwlock;
 355
 356static bool ref_rwlock_init(void)
 357{
 358	rwlock_init(&test_rwlock);
 359	return true;
 360}
 361
 362static void ref_rwlock_section(const int nloops)
 363{
 364	int i;
 365
 366	for (i = nloops; i >= 0; i--) {
 367		read_lock(&test_rwlock);
 368		read_unlock(&test_rwlock);
 369	}
 370}
 371
 372static void ref_rwlock_delay_section(const int nloops, const int udl, const int ndl)
 373{
 374	int i;
 375
 376	for (i = nloops; i >= 0; i--) {
 377		read_lock(&test_rwlock);
 378		un_delay(udl, ndl);
 379		read_unlock(&test_rwlock);
 380	}
 381}
 382
 383static const struct ref_scale_ops rwlock_ops = {
 384	.init		= ref_rwlock_init,
 385	.readsection	= ref_rwlock_section,
 386	.delaysection	= ref_rwlock_delay_section,
 387	.name		= "rwlock"
 388};
 389
 390// Definitions for rwsem
 391static struct rw_semaphore test_rwsem;
 392
 393static bool ref_rwsem_init(void)
 394{
 395	init_rwsem(&test_rwsem);
 396	return true;
 397}
 398
 399static void ref_rwsem_section(const int nloops)
 400{
 401	int i;
 402
 403	for (i = nloops; i >= 0; i--) {
 404		down_read(&test_rwsem);
 405		up_read(&test_rwsem);
 406	}
 407}
 408
 409static void ref_rwsem_delay_section(const int nloops, const int udl, const int ndl)
 410{
 411	int i;
 412
 413	for (i = nloops; i >= 0; i--) {
 414		down_read(&test_rwsem);
 415		un_delay(udl, ndl);
 416		up_read(&test_rwsem);
 417	}
 418}
 419
 420static const struct ref_scale_ops rwsem_ops = {
 421	.init		= ref_rwsem_init,
 422	.readsection	= ref_rwsem_section,
 423	.delaysection	= ref_rwsem_delay_section,
 424	.name		= "rwsem"
 425};
 426
 427// Definitions for global spinlock
 428static DEFINE_RAW_SPINLOCK(test_lock);
 429
 430static void ref_lock_section(const int nloops)
 431{
 432	int i;
 433
 434	preempt_disable();
 435	for (i = nloops; i >= 0; i--) {
 436		raw_spin_lock(&test_lock);
 437		raw_spin_unlock(&test_lock);
 438	}
 439	preempt_enable();
 440}
 441
 442static void ref_lock_delay_section(const int nloops, const int udl, const int ndl)
 443{
 444	int i;
 445
 446	preempt_disable();
 447	for (i = nloops; i >= 0; i--) {
 448		raw_spin_lock(&test_lock);
 449		un_delay(udl, ndl);
 450		raw_spin_unlock(&test_lock);
 451	}
 452	preempt_enable();
 453}
 454
 455static const struct ref_scale_ops lock_ops = {
 456	.readsection	= ref_lock_section,
 457	.delaysection	= ref_lock_delay_section,
 458	.name		= "lock"
 459};
 460
 461// Definitions for global irq-save spinlock
 462
 463static void ref_lock_irq_section(const int nloops)
 464{
 465	unsigned long flags;
 466	int i;
 467
 468	preempt_disable();
 469	for (i = nloops; i >= 0; i--) {
 470		raw_spin_lock_irqsave(&test_lock, flags);
 471		raw_spin_unlock_irqrestore(&test_lock, flags);
 472	}
 473	preempt_enable();
 474}
 475
 476static void ref_lock_irq_delay_section(const int nloops, const int udl, const int ndl)
 477{
 478	unsigned long flags;
 479	int i;
 480
 481	preempt_disable();
 482	for (i = nloops; i >= 0; i--) {
 483		raw_spin_lock_irqsave(&test_lock, flags);
 484		un_delay(udl, ndl);
 485		raw_spin_unlock_irqrestore(&test_lock, flags);
 486	}
 487	preempt_enable();
 488}
 489
 490static const struct ref_scale_ops lock_irq_ops = {
 491	.readsection	= ref_lock_irq_section,
 492	.delaysection	= ref_lock_irq_delay_section,
 493	.name		= "lock-irq"
 494};
 495
 496// Definitions acquire-release.
 497static DEFINE_PER_CPU(unsigned long, test_acqrel);
 498
 499static void ref_acqrel_section(const int nloops)
 500{
 501	unsigned long x;
 502	int i;
 503
 504	preempt_disable();
 505	for (i = nloops; i >= 0; i--) {
 506		x = smp_load_acquire(this_cpu_ptr(&test_acqrel));
 507		smp_store_release(this_cpu_ptr(&test_acqrel), x + 1);
 508	}
 509	preempt_enable();
 510}
 511
 512static void ref_acqrel_delay_section(const int nloops, const int udl, const int ndl)
 513{
 514	unsigned long x;
 515	int i;
 516
 517	preempt_disable();
 518	for (i = nloops; i >= 0; i--) {
 519		x = smp_load_acquire(this_cpu_ptr(&test_acqrel));
 520		un_delay(udl, ndl);
 521		smp_store_release(this_cpu_ptr(&test_acqrel), x + 1);
 522	}
 523	preempt_enable();
 524}
 525
 526static const struct ref_scale_ops acqrel_ops = {
 527	.readsection	= ref_acqrel_section,
 528	.delaysection	= ref_acqrel_delay_section,
 529	.name		= "acqrel"
 530};
 531
 532static volatile u64 stopopts;
 533
 534static void ref_clock_section(const int nloops)
 535{
 536	u64 x = 0;
 537	int i;
 538
 539	preempt_disable();
 540	for (i = nloops; i >= 0; i--)
 541		x += ktime_get_real_fast_ns();
 542	preempt_enable();
 543	stopopts = x;
 544}
 545
 546static void ref_clock_delay_section(const int nloops, const int udl, const int ndl)
 547{
 548	u64 x = 0;
 549	int i;
 550
 551	preempt_disable();
 552	for (i = nloops; i >= 0; i--) {
 553		x += ktime_get_real_fast_ns();
 554		un_delay(udl, ndl);
 555	}
 556	preempt_enable();
 557	stopopts = x;
 558}
 559
 560static const struct ref_scale_ops clock_ops = {
 561	.readsection	= ref_clock_section,
 562	.delaysection	= ref_clock_delay_section,
 563	.name		= "clock"
 564};
 565
 566static void ref_jiffies_section(const int nloops)
 567{
 568	u64 x = 0;
 569	int i;
 570
 571	preempt_disable();
 572	for (i = nloops; i >= 0; i--)
 573		x += jiffies;
 574	preempt_enable();
 575	stopopts = x;
 576}
 577
 578static void ref_jiffies_delay_section(const int nloops, const int udl, const int ndl)
 579{
 580	u64 x = 0;
 581	int i;
 582
 583	preempt_disable();
 584	for (i = nloops; i >= 0; i--) {
 585		x += jiffies;
 586		un_delay(udl, ndl);
 587	}
 588	preempt_enable();
 589	stopopts = x;
 590}
 591
 592static const struct ref_scale_ops jiffies_ops = {
 593	.readsection	= ref_jiffies_section,
 594	.delaysection	= ref_jiffies_delay_section,
 595	.name		= "jiffies"
 596};
 597
 598////////////////////////////////////////////////////////////////////////
 599//
 600// Methods leveraging SLAB_TYPESAFE_BY_RCU.
 601//
 602
 603// Item to look up in a typesafe manner.  Array of pointers to these.
 604struct refscale_typesafe {
 605	atomic_t rts_refctr;  // Used by all flavors
 606	spinlock_t rts_lock;
 607	seqlock_t rts_seqlock;
 608	unsigned int a;
 609	unsigned int b;
 610};
 611
 612static struct kmem_cache *typesafe_kmem_cachep;
 613static struct refscale_typesafe **rtsarray;
 614static long rtsarray_size;
 615static DEFINE_TORTURE_RANDOM_PERCPU(refscale_rand);
 616static bool (*rts_acquire)(struct refscale_typesafe *rtsp, unsigned int *start);
 617static bool (*rts_release)(struct refscale_typesafe *rtsp, unsigned int start);
 618
 619// Conditionally acquire an explicit in-structure reference count.
 620static bool typesafe_ref_acquire(struct refscale_typesafe *rtsp, unsigned int *start)
 621{
 622	return atomic_inc_not_zero(&rtsp->rts_refctr);
 623}
 624
 625// Unconditionally release an explicit in-structure reference count.
 626static bool typesafe_ref_release(struct refscale_typesafe *rtsp, unsigned int start)
 627{
 628	if (!atomic_dec_return(&rtsp->rts_refctr)) {
 629		WRITE_ONCE(rtsp->a, rtsp->a + 1);
 630		kmem_cache_free(typesafe_kmem_cachep, rtsp);
 631	}
 632	return true;
 633}
 634
 635// Unconditionally acquire an explicit in-structure spinlock.
 636static bool typesafe_lock_acquire(struct refscale_typesafe *rtsp, unsigned int *start)
 637{
 638	spin_lock(&rtsp->rts_lock);
 639	return true;
 640}
 641
 642// Unconditionally release an explicit in-structure spinlock.
 643static bool typesafe_lock_release(struct refscale_typesafe *rtsp, unsigned int start)
 644{
 645	spin_unlock(&rtsp->rts_lock);
 646	return true;
 647}
 648
 649// Unconditionally acquire an explicit in-structure sequence lock.
 650static bool typesafe_seqlock_acquire(struct refscale_typesafe *rtsp, unsigned int *start)
 651{
 652	*start = read_seqbegin(&rtsp->rts_seqlock);
 653	return true;
 654}
 655
 656// Conditionally release an explicit in-structure sequence lock.  Return
 657// true if this release was successful, that is, if no retry is required.
 658static bool typesafe_seqlock_release(struct refscale_typesafe *rtsp, unsigned int start)
 659{
 660	return !read_seqretry(&rtsp->rts_seqlock, start);
 661}
 662
 663// Do a read-side critical section with the specified delay in
 664// microseconds and nanoseconds inserted so as to increase probability
 665// of failure.
 666static void typesafe_delay_section(const int nloops, const int udl, const int ndl)
 667{
 668	unsigned int a;
 669	unsigned int b;
 670	int i;
 671	long idx;
 672	struct refscale_typesafe *rtsp;
 673	unsigned int start;
 674
 675	for (i = nloops; i >= 0; i--) {
 676		preempt_disable();
 677		idx = torture_random(this_cpu_ptr(&refscale_rand)) % rtsarray_size;
 678		preempt_enable();
 679retry:
 680		rcu_read_lock();
 681		rtsp = rcu_dereference(rtsarray[idx]);
 682		a = READ_ONCE(rtsp->a);
 683		if (!rts_acquire(rtsp, &start)) {
 684			rcu_read_unlock();
 685			goto retry;
 686		}
 687		if (a != READ_ONCE(rtsp->a)) {
 688			(void)rts_release(rtsp, start);
 689			rcu_read_unlock();
 690			goto retry;
 691		}
 692		un_delay(udl, ndl);
 693		b = READ_ONCE(rtsp->a);
 694		// Remember, seqlock read-side release can fail.
 695		if (!rts_release(rtsp, start)) {
 696			rcu_read_unlock();
 697			goto retry;
 698		}
 699		WARN_ONCE(a != b, "Re-read of ->a changed from %u to %u.\n", a, b);
 700		b = rtsp->b;
 701		rcu_read_unlock();
 702		WARN_ON_ONCE(a * a != b);
 703	}
 704}
 705
 706// Because the acquisition and release methods are expensive, there
 707// is no point in optimizing away the un_delay() function's two checks.
 708// Thus simply define typesafe_read_section() as a simple wrapper around
 709// typesafe_delay_section().
 710static void typesafe_read_section(const int nloops)
 711{
 712	typesafe_delay_section(nloops, 0, 0);
 713}
 714
 715// Allocate and initialize one refscale_typesafe structure.
 716static struct refscale_typesafe *typesafe_alloc_one(void)
 717{
 718	struct refscale_typesafe *rtsp;
 719
 720	rtsp = kmem_cache_alloc(typesafe_kmem_cachep, GFP_KERNEL);
 721	if (!rtsp)
 722		return NULL;
 723	atomic_set(&rtsp->rts_refctr, 1);
 724	WRITE_ONCE(rtsp->a, rtsp->a + 1);
 725	WRITE_ONCE(rtsp->b, rtsp->a * rtsp->a);
 726	return rtsp;
 727}
 728
 729// Slab-allocator constructor for refscale_typesafe structures created
 730// out of a new slab of system memory.
 731static void refscale_typesafe_ctor(void *rtsp_in)
 732{
 733	struct refscale_typesafe *rtsp = rtsp_in;
 734
 735	spin_lock_init(&rtsp->rts_lock);
 736	seqlock_init(&rtsp->rts_seqlock);
 737	preempt_disable();
 738	rtsp->a = torture_random(this_cpu_ptr(&refscale_rand));
 739	preempt_enable();
 740}
 741
 742static const struct ref_scale_ops typesafe_ref_ops;
 743static const struct ref_scale_ops typesafe_lock_ops;
 744static const struct ref_scale_ops typesafe_seqlock_ops;
 745
 746// Initialize for a typesafe test.
 747static bool typesafe_init(void)
 748{
 749	long idx;
 750	long si = lookup_instances;
 751
 752	typesafe_kmem_cachep = kmem_cache_create("refscale_typesafe",
 753						 sizeof(struct refscale_typesafe), sizeof(void *),
 754						 SLAB_TYPESAFE_BY_RCU, refscale_typesafe_ctor);
 755	if (!typesafe_kmem_cachep)
 756		return false;
 757	if (si < 0)
 758		si = -si * nr_cpu_ids;
 759	else if (si == 0)
 760		si = nr_cpu_ids;
 761	rtsarray_size = si;
 762	rtsarray = kcalloc(si, sizeof(*rtsarray), GFP_KERNEL);
 763	if (!rtsarray)
 764		return false;
 765	for (idx = 0; idx < rtsarray_size; idx++) {
 766		rtsarray[idx] = typesafe_alloc_one();
 767		if (!rtsarray[idx])
 768			return false;
 769	}
 770	if (cur_ops == &typesafe_ref_ops) {
 771		rts_acquire = typesafe_ref_acquire;
 772		rts_release = typesafe_ref_release;
 773	} else if (cur_ops == &typesafe_lock_ops) {
 774		rts_acquire = typesafe_lock_acquire;
 775		rts_release = typesafe_lock_release;
 776	} else if (cur_ops == &typesafe_seqlock_ops) {
 777		rts_acquire = typesafe_seqlock_acquire;
 778		rts_release = typesafe_seqlock_release;
 779	} else {
 780		WARN_ON_ONCE(1);
 781		return false;
 782	}
 783	return true;
 784}
 785
 786// Clean up after a typesafe test.
 787static void typesafe_cleanup(void)
 788{
 789	long idx;
 790
 791	if (rtsarray) {
 792		for (idx = 0; idx < rtsarray_size; idx++)
 793			kmem_cache_free(typesafe_kmem_cachep, rtsarray[idx]);
 794		kfree(rtsarray);
 795		rtsarray = NULL;
 796		rtsarray_size = 0;
 797	}
 798	kmem_cache_destroy(typesafe_kmem_cachep);
 799	typesafe_kmem_cachep = NULL;
 800	rts_acquire = NULL;
 801	rts_release = NULL;
 802}
 803
 804// The typesafe_init() function distinguishes these structures by address.
 805static const struct ref_scale_ops typesafe_ref_ops = {
 806	.init		= typesafe_init,
 807	.cleanup	= typesafe_cleanup,
 808	.readsection	= typesafe_read_section,
 809	.delaysection	= typesafe_delay_section,
 810	.name		= "typesafe_ref"
 811};
 812
 813static const struct ref_scale_ops typesafe_lock_ops = {
 814	.init		= typesafe_init,
 815	.cleanup	= typesafe_cleanup,
 816	.readsection	= typesafe_read_section,
 817	.delaysection	= typesafe_delay_section,
 818	.name		= "typesafe_lock"
 819};
 820
 821static const struct ref_scale_ops typesafe_seqlock_ops = {
 822	.init		= typesafe_init,
 823	.cleanup	= typesafe_cleanup,
 824	.readsection	= typesafe_read_section,
 825	.delaysection	= typesafe_delay_section,
 826	.name		= "typesafe_seqlock"
 827};
 828
 829static void rcu_scale_one_reader(void)
 830{
 831	if (readdelay <= 0)
 832		cur_ops->readsection(loops);
 833	else
 834		cur_ops->delaysection(loops, readdelay / 1000, readdelay % 1000);
 835}
 836
 837// Warm up cache, or, if needed run a series of rcu_scale_one_reader()
 838// to allow multiple rcuscale guest OSes to collect mutually valid data.
 839static void rcu_scale_warm_cool(void)
 840{
 841	unsigned long jdone = jiffies + (guest_os_delay > 0 ? guest_os_delay * HZ : -1);
 842
 843	do {
 844		rcu_scale_one_reader();
 845		cond_resched();
 846	} while (time_before(jiffies, jdone));
 847}
 848
 849// Reader kthread.  Repeatedly does empty RCU read-side
 850// critical section, minimizing update-side interference.
 851static int
 852ref_scale_reader(void *arg)
 853{
 854	unsigned long flags;
 855	long me = (long)arg;
 856	struct reader_task *rt = &(reader_tasks[me]);
 857	u64 start;
 858	s64 duration;
 859
 860	VERBOSE_SCALEOUT_BATCH("ref_scale_reader %ld: task started", me);
 861	WARN_ON_ONCE(set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids)));
 862	set_user_nice(current, MAX_NICE);
 863	atomic_inc(&n_init);
 864	if (holdoff)
 865		schedule_timeout_interruptible(holdoff * HZ);
 866repeat:
 867	VERBOSE_SCALEOUT_BATCH("ref_scale_reader %ld: waiting to start next experiment on cpu %d", me, raw_smp_processor_id());
 868
 869	// Wait for signal that this reader can start.
 870	wait_event(rt->wq, (atomic_read(&nreaders_exp) && smp_load_acquire(&rt->start_reader)) ||
 871			   torture_must_stop());
 872
 873	if (torture_must_stop())
 874		goto end;
 875
 876	// Make sure that the CPU is affinitized appropriately during testing.
 877	WARN_ON_ONCE(raw_smp_processor_id() != me % nr_cpu_ids);
 878
 879	WRITE_ONCE(rt->start_reader, 0);
 880	if (!atomic_dec_return(&n_started))
 881		while (atomic_read_acquire(&n_started))
 882			cpu_relax();
 883
 884	VERBOSE_SCALEOUT_BATCH("ref_scale_reader %ld: experiment %d started", me, exp_idx);
 885
 886
 887	// To reduce noise, do an initial cache-warming invocation, check
 888	// in, and then keep warming until everyone has checked in.
 889	rcu_scale_one_reader();
 890	if (!atomic_dec_return(&n_warmedup))
 891		while (atomic_read_acquire(&n_warmedup))
 892			rcu_scale_one_reader();
 893	// Also keep interrupts disabled.  This also has the effect
 894	// of preventing entries into slow path for rcu_read_unlock().
 895	local_irq_save(flags);
 896	start = ktime_get_mono_fast_ns();
 897
 898	rcu_scale_one_reader();
 899
 900	duration = ktime_get_mono_fast_ns() - start;
 901	local_irq_restore(flags);
 902
 903	rt->last_duration_ns = WARN_ON_ONCE(duration < 0) ? 0 : duration;
 904	// To reduce runtime-skew noise, do maintain-load invocations until
 905	// everyone is done.
 906	if (!atomic_dec_return(&n_cooleddown))
 907		while (atomic_read_acquire(&n_cooleddown))
 908			rcu_scale_one_reader();
 909
 910	if (atomic_dec_and_test(&nreaders_exp))
 911		wake_up(&main_wq);
 912
 913	VERBOSE_SCALEOUT_BATCH("ref_scale_reader %ld: experiment %d ended, (readers remaining=%d)",
 914				me, exp_idx, atomic_read(&nreaders_exp));
 915
 916	if (!torture_must_stop())
 917		goto repeat;
 918end:
 919	torture_kthread_stopping("ref_scale_reader");
 920	return 0;
 921}
 922
 923static void reset_readers(void)
 924{
 925	int i;
 926	struct reader_task *rt;
 927
 928	for (i = 0; i < nreaders; i++) {
 929		rt = &(reader_tasks[i]);
 930
 931		rt->last_duration_ns = 0;
 932	}
 933}
 934
 935// Print the results of each reader and return the sum of all their durations.
 936static u64 process_durations(int n)
 937{
 938	int i;
 939	struct reader_task *rt;
 940	struct seq_buf s;
 941	char *buf;
 942	u64 sum = 0;
 943
 944	buf = kmalloc(800 + 64, GFP_KERNEL);
 945	if (!buf)
 946		return 0;
 947	seq_buf_init(&s, buf, 800 + 64);
 948
 949	seq_buf_printf(&s, "Experiment #%d (Format: <THREAD-NUM>:<Total loop time in ns>)",
 950		       exp_idx);
 951
 952	for (i = 0; i < n && !torture_must_stop(); i++) {
 953		rt = &(reader_tasks[i]);
 
 954
 955		if (i % 5 == 0)
 956			seq_buf_putc(&s, '\n');
 957
 958		if (seq_buf_used(&s) >= 800) {
 959			pr_alert("%s", seq_buf_str(&s));
 960			seq_buf_clear(&s);
 961		}
 962
 963		seq_buf_printf(&s, "%d: %llu\t", i, rt->last_duration_ns);
 964
 965		sum += rt->last_duration_ns;
 966	}
 967	pr_alert("%s\n", seq_buf_str(&s));
 968
 969	kfree(buf);
 970	return sum;
 971}
 972
 973// The main_func is the main orchestrator, it performs a bunch of
 974// experiments.  For every experiment, it orders all the readers
 975// involved to start and waits for them to finish the experiment. It
 976// then reads their timestamps and starts the next experiment. Each
 977// experiment progresses from 1 concurrent reader to N of them at which
 978// point all the timestamps are printed.
 979static int main_func(void *arg)
 980{
 981	int exp, r;
 982	char buf1[64];
 983	char *buf;
 984	u64 *result_avg;
 985
 986	set_cpus_allowed_ptr(current, cpumask_of(nreaders % nr_cpu_ids));
 987	set_user_nice(current, MAX_NICE);
 988
 989	VERBOSE_SCALEOUT("main_func task started");
 990	result_avg = kzalloc(nruns * sizeof(*result_avg), GFP_KERNEL);
 991	buf = kzalloc(800 + 64, GFP_KERNEL);
 992	if (!result_avg || !buf) {
 993		SCALEOUT_ERRSTRING("out of memory");
 994		goto oom_exit;
 995	}
 996	if (holdoff)
 997		schedule_timeout_interruptible(holdoff * HZ);
 998
 999	// Wait for all threads to start.
1000	atomic_inc(&n_init);
1001	while (atomic_read(&n_init) < nreaders + 1)
1002		schedule_timeout_uninterruptible(1);
1003
1004	// Start exp readers up per experiment
1005	rcu_scale_warm_cool();
1006	for (exp = 0; exp < nruns && !torture_must_stop(); exp++) {
1007		if (torture_must_stop())
1008			goto end;
1009
1010		reset_readers();
1011		atomic_set(&nreaders_exp, nreaders);
1012		atomic_set(&n_started, nreaders);
1013		atomic_set(&n_warmedup, nreaders);
1014		atomic_set(&n_cooleddown, nreaders);
1015
1016		exp_idx = exp;
1017
1018		for (r = 0; r < nreaders; r++) {
1019			smp_store_release(&reader_tasks[r].start_reader, 1);
1020			wake_up(&reader_tasks[r].wq);
1021		}
1022
1023		VERBOSE_SCALEOUT("main_func: experiment started, waiting for %d readers",
1024				nreaders);
1025
1026		wait_event(main_wq,
1027			   !atomic_read(&nreaders_exp) || torture_must_stop());
1028
1029		VERBOSE_SCALEOUT("main_func: experiment ended");
1030
1031		if (torture_must_stop())
1032			goto end;
1033
1034		result_avg[exp] = div_u64(1000 * process_durations(nreaders), nreaders * loops);
1035	}
1036	rcu_scale_warm_cool();
1037
1038	// Print the average of all experiments
1039	SCALEOUT("END OF TEST. Calculating average duration per loop (nanoseconds)...\n");
1040
1041	pr_alert("Runs\tTime(ns)\n");
1042	for (exp = 0; exp < nruns; exp++) {
1043		u64 avg;
1044		u32 rem;
1045
1046		avg = div_u64_rem(result_avg[exp], 1000, &rem);
1047		sprintf(buf1, "%d\t%llu.%03u\n", exp + 1, avg, rem);
1048		strcat(buf, buf1);
1049		if (strlen(buf) >= 800) {
1050			pr_alert("%s", buf);
1051			buf[0] = 0;
1052		}
1053	}
1054
1055	pr_alert("%s", buf);
1056
1057oom_exit:
1058	// This will shutdown everything including us.
1059	if (shutdown) {
1060		shutdown_start = 1;
1061		wake_up(&shutdown_wq);
1062	}
1063
1064	// Wait for torture to stop us
1065	while (!torture_must_stop())
1066		schedule_timeout_uninterruptible(1);
1067
1068end:
1069	torture_kthread_stopping("main_func");
1070	kfree(result_avg);
1071	kfree(buf);
1072	return 0;
1073}
1074
1075static void
1076ref_scale_print_module_parms(const struct ref_scale_ops *cur_ops, const char *tag)
1077{
1078	pr_alert("%s" SCALE_FLAG
1079		 "--- %s:  verbose=%d verbose_batched=%d shutdown=%d holdoff=%d lookup_instances=%ld loops=%ld nreaders=%d nruns=%d readdelay=%d\n", scale_type, tag,
1080		 verbose, verbose_batched, shutdown, holdoff, lookup_instances, loops, nreaders, nruns, readdelay);
1081}
1082
1083static void
1084ref_scale_cleanup(void)
1085{
1086	int i;
1087
1088	if (torture_cleanup_begin())
1089		return;
1090
1091	if (!cur_ops) {
1092		torture_cleanup_end();
1093		return;
1094	}
1095
1096	if (reader_tasks) {
1097		for (i = 0; i < nreaders; i++)
1098			torture_stop_kthread("ref_scale_reader",
1099					     reader_tasks[i].task);
1100	}
1101	kfree(reader_tasks);
1102
1103	torture_stop_kthread("main_task", main_task);
1104	kfree(main_task);
1105
1106	// Do scale-type-specific cleanup operations.
1107	if (cur_ops->cleanup != NULL)
1108		cur_ops->cleanup();
1109
1110	torture_cleanup_end();
1111}
1112
1113// Shutdown kthread.  Just waits to be awakened, then shuts down system.
1114static int
1115ref_scale_shutdown(void *arg)
1116{
1117	wait_event_idle(shutdown_wq, shutdown_start);
1118
1119	smp_mb(); // Wake before output.
1120	ref_scale_cleanup();
1121	kernel_power_off();
1122
1123	return -EINVAL;
1124}
1125
1126static int __init
1127ref_scale_init(void)
1128{
1129	long i;
1130	int firsterr = 0;
1131	static const struct ref_scale_ops *scale_ops[] = {
1132		&rcu_ops, &srcu_ops, &srcu_lite_ops, RCU_TRACE_OPS RCU_TASKS_OPS
1133		&refcnt_ops, &rwlock_ops, &rwsem_ops, &lock_ops, &lock_irq_ops, &acqrel_ops,
1134		&clock_ops, &jiffies_ops, &typesafe_ref_ops, &typesafe_lock_ops,
1135		&typesafe_seqlock_ops,
1136	};
1137
1138	if (!torture_init_begin(scale_type, verbose))
1139		return -EBUSY;
1140
1141	for (i = 0; i < ARRAY_SIZE(scale_ops); i++) {
1142		cur_ops = scale_ops[i];
1143		if (strcmp(scale_type, cur_ops->name) == 0)
1144			break;
1145	}
1146	if (i == ARRAY_SIZE(scale_ops)) {
1147		pr_alert("rcu-scale: invalid scale type: \"%s\"\n", scale_type);
1148		pr_alert("rcu-scale types:");
1149		for (i = 0; i < ARRAY_SIZE(scale_ops); i++)
1150			pr_cont(" %s", scale_ops[i]->name);
1151		pr_cont("\n");
1152		firsterr = -EINVAL;
1153		cur_ops = NULL;
1154		goto unwind;
1155	}
1156	if (cur_ops->init)
1157		if (!cur_ops->init()) {
1158			firsterr = -EUCLEAN;
1159			goto unwind;
1160		}
1161
1162	ref_scale_print_module_parms(cur_ops, "Start of test");
1163
1164	// Shutdown task
1165	if (shutdown) {
1166		init_waitqueue_head(&shutdown_wq);
1167		firsterr = torture_create_kthread(ref_scale_shutdown, NULL,
1168						  shutdown_task);
1169		if (torture_init_error(firsterr))
1170			goto unwind;
1171		schedule_timeout_uninterruptible(1);
1172	}
1173
1174	// Reader tasks (default to ~75% of online CPUs).
1175	if (nreaders < 0)
1176		nreaders = (num_online_cpus() >> 1) + (num_online_cpus() >> 2);
1177	if (WARN_ONCE(loops <= 0, "%s: loops = %ld, adjusted to 1\n", __func__, loops))
1178		loops = 1;
1179	if (WARN_ONCE(nreaders <= 0, "%s: nreaders = %d, adjusted to 1\n", __func__, nreaders))
1180		nreaders = 1;
1181	if (WARN_ONCE(nruns <= 0, "%s: nruns = %d, adjusted to 1\n", __func__, nruns))
1182		nruns = 1;
1183	reader_tasks = kcalloc(nreaders, sizeof(reader_tasks[0]),
1184			       GFP_KERNEL);
1185	if (!reader_tasks) {
1186		SCALEOUT_ERRSTRING("out of memory");
1187		firsterr = -ENOMEM;
1188		goto unwind;
1189	}
1190
1191	VERBOSE_SCALEOUT("Starting %d reader threads", nreaders);
1192
1193	for (i = 0; i < nreaders; i++) {
1194		init_waitqueue_head(&reader_tasks[i].wq);
1195		firsterr = torture_create_kthread(ref_scale_reader, (void *)i,
1196						  reader_tasks[i].task);
1197		if (torture_init_error(firsterr))
1198			goto unwind;
1199	}
1200
1201	// Main Task
1202	init_waitqueue_head(&main_wq);
1203	firsterr = torture_create_kthread(main_func, NULL, main_task);
1204	if (torture_init_error(firsterr))
1205		goto unwind;
1206
1207	torture_init_end();
1208	return 0;
1209
1210unwind:
1211	torture_init_end();
1212	ref_scale_cleanup();
1213	if (shutdown) {
1214		WARN_ON(!IS_MODULE(CONFIG_RCU_REF_SCALE_TEST));
1215		kernel_power_off();
1216	}
1217	return firsterr;
1218}
1219
1220module_init(ref_scale_init);
1221module_exit(ref_scale_cleanup);