Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0+
  2//
  3// Scalability test comparing RCU vs other mechanisms
  4// for acquiring references on objects.
  5//
  6// Copyright (C) Google, 2020.
  7//
  8// Author: Joel Fernandes <joel@joelfernandes.org>
  9
 10#define pr_fmt(fmt) fmt
 11
 12#include <linux/atomic.h>
 13#include <linux/bitops.h>
 14#include <linux/completion.h>
 15#include <linux/cpu.h>
 16#include <linux/delay.h>
 17#include <linux/err.h>
 18#include <linux/init.h>
 19#include <linux/interrupt.h>
 20#include <linux/kthread.h>
 21#include <linux/kernel.h>
 22#include <linux/mm.h>
 23#include <linux/module.h>
 24#include <linux/moduleparam.h>
 25#include <linux/notifier.h>
 26#include <linux/percpu.h>
 27#include <linux/rcupdate.h>
 28#include <linux/rcupdate_trace.h>
 29#include <linux/reboot.h>
 30#include <linux/sched.h>
 31#include <linux/spinlock.h>
 32#include <linux/smp.h>
 33#include <linux/stat.h>
 34#include <linux/srcu.h>
 35#include <linux/slab.h>
 36#include <linux/torture.h>
 37#include <linux/types.h>
 38
 39#include "rcu.h"
 40
 41#define SCALE_FLAG "-ref-scale: "
 42
 43#define SCALEOUT(s, x...) \
 44	pr_alert("%s" SCALE_FLAG s, scale_type, ## x)
 45
 46#define VERBOSE_SCALEOUT(s, x...) \
 47	do { if (verbose) pr_alert("%s" SCALE_FLAG s, scale_type, ## x); } while (0)
 48
 
 
 
 
 
 
 
 
 
 
 
 
 49#define VERBOSE_SCALEOUT_ERRSTRING(s, x...) \
 50	do { if (verbose) pr_alert("%s" SCALE_FLAG "!!! " s, scale_type, ## x); } while (0)
 51
 52MODULE_LICENSE("GPL");
 53MODULE_AUTHOR("Joel Fernandes (Google) <joel@joelfernandes.org>");
 54
 55static char *scale_type = "rcu";
 56module_param(scale_type, charp, 0444);
 57MODULE_PARM_DESC(scale_type, "Type of test (rcu, srcu, refcnt, rwsem, rwlock.");
 58
 59torture_param(int, verbose, 0, "Enable verbose debugging printk()s");
 
 60
 61// Wait until there are multiple CPUs before starting test.
 62torture_param(int, holdoff, IS_BUILTIN(CONFIG_RCU_REF_SCALE_TEST) ? 10 : 0,
 63	      "Holdoff time before test start (s)");
 64// Number of loops per experiment, all readers execute operations concurrently.
 65torture_param(long, loops, 10000, "Number of loops per experiment.");
 66// Number of readers, with -1 defaulting to about 75% of the CPUs.
 67torture_param(int, nreaders, -1, "Number of readers, -1 for 75% of CPUs.");
 68// Number of runs.
 69torture_param(int, nruns, 30, "Number of experiments to run.");
 70// Reader delay in nanoseconds, 0 for no delay.
 71torture_param(int, readdelay, 0, "Read-side delay in nanoseconds.");
 72
 73#ifdef MODULE
 74# define REFSCALE_SHUTDOWN 0
 75#else
 76# define REFSCALE_SHUTDOWN 1
 77#endif
 78
 79torture_param(bool, shutdown, REFSCALE_SHUTDOWN,
 80	      "Shutdown at end of scalability tests.");
 81
 82struct reader_task {
 83	struct task_struct *task;
 84	int start_reader;
 85	wait_queue_head_t wq;
 86	u64 last_duration_ns;
 87};
 88
 89static struct task_struct *shutdown_task;
 90static wait_queue_head_t shutdown_wq;
 91
 92static struct task_struct *main_task;
 93static wait_queue_head_t main_wq;
 94static int shutdown_start;
 95
 96static struct reader_task *reader_tasks;
 97
 98// Number of readers that are part of the current experiment.
 99static atomic_t nreaders_exp;
100
101// Use to wait for all threads to start.
102static atomic_t n_init;
103static atomic_t n_started;
104static atomic_t n_warmedup;
105static atomic_t n_cooleddown;
106
107// Track which experiment is currently running.
108static int exp_idx;
109
110// Operations vector for selecting different types of tests.
111struct ref_scale_ops {
112	void (*init)(void);
113	void (*cleanup)(void);
114	void (*readsection)(const int nloops);
115	void (*delaysection)(const int nloops, const int udl, const int ndl);
116	const char *name;
117};
118
119static struct ref_scale_ops *cur_ops;
120
121static void un_delay(const int udl, const int ndl)
122{
123	if (udl)
124		udelay(udl);
125	if (ndl)
126		ndelay(ndl);
127}
128
129static void ref_rcu_read_section(const int nloops)
130{
131	int i;
132
133	for (i = nloops; i >= 0; i--) {
134		rcu_read_lock();
135		rcu_read_unlock();
136	}
137}
138
139static void ref_rcu_delay_section(const int nloops, const int udl, const int ndl)
140{
141	int i;
142
143	for (i = nloops; i >= 0; i--) {
144		rcu_read_lock();
145		un_delay(udl, ndl);
146		rcu_read_unlock();
147	}
148}
149
150static void rcu_sync_scale_init(void)
151{
152}
153
154static struct ref_scale_ops rcu_ops = {
155	.init		= rcu_sync_scale_init,
156	.readsection	= ref_rcu_read_section,
157	.delaysection	= ref_rcu_delay_section,
158	.name		= "rcu"
159};
160
161// Definitions for SRCU ref scale testing.
162DEFINE_STATIC_SRCU(srcu_refctl_scale);
163static struct srcu_struct *srcu_ctlp = &srcu_refctl_scale;
164
165static void srcu_ref_scale_read_section(const int nloops)
166{
167	int i;
168	int idx;
169
170	for (i = nloops; i >= 0; i--) {
171		idx = srcu_read_lock(srcu_ctlp);
172		srcu_read_unlock(srcu_ctlp, idx);
173	}
174}
175
176static void srcu_ref_scale_delay_section(const int nloops, const int udl, const int ndl)
177{
178	int i;
179	int idx;
180
181	for (i = nloops; i >= 0; i--) {
182		idx = srcu_read_lock(srcu_ctlp);
183		un_delay(udl, ndl);
184		srcu_read_unlock(srcu_ctlp, idx);
185	}
186}
187
188static struct ref_scale_ops srcu_ops = {
189	.init		= rcu_sync_scale_init,
190	.readsection	= srcu_ref_scale_read_section,
191	.delaysection	= srcu_ref_scale_delay_section,
192	.name		= "srcu"
193};
194
195// Definitions for RCU Tasks ref scale testing: Empty read markers.
196// These definitions also work for RCU Rude readers.
197static void rcu_tasks_ref_scale_read_section(const int nloops)
198{
199	int i;
200
201	for (i = nloops; i >= 0; i--)
202		continue;
203}
204
205static void rcu_tasks_ref_scale_delay_section(const int nloops, const int udl, const int ndl)
206{
207	int i;
208
209	for (i = nloops; i >= 0; i--)
210		un_delay(udl, ndl);
211}
212
213static struct ref_scale_ops rcu_tasks_ops = {
214	.init		= rcu_sync_scale_init,
215	.readsection	= rcu_tasks_ref_scale_read_section,
216	.delaysection	= rcu_tasks_ref_scale_delay_section,
217	.name		= "rcu-tasks"
218};
219
220// Definitions for RCU Tasks Trace ref scale testing.
221static void rcu_trace_ref_scale_read_section(const int nloops)
222{
223	int i;
224
225	for (i = nloops; i >= 0; i--) {
226		rcu_read_lock_trace();
227		rcu_read_unlock_trace();
228	}
229}
230
231static void rcu_trace_ref_scale_delay_section(const int nloops, const int udl, const int ndl)
232{
233	int i;
234
235	for (i = nloops; i >= 0; i--) {
236		rcu_read_lock_trace();
237		un_delay(udl, ndl);
238		rcu_read_unlock_trace();
239	}
240}
241
242static struct ref_scale_ops rcu_trace_ops = {
243	.init		= rcu_sync_scale_init,
244	.readsection	= rcu_trace_ref_scale_read_section,
245	.delaysection	= rcu_trace_ref_scale_delay_section,
246	.name		= "rcu-trace"
247};
248
249// Definitions for reference count
250static atomic_t refcnt;
251
252static void ref_refcnt_section(const int nloops)
253{
254	int i;
255
256	for (i = nloops; i >= 0; i--) {
257		atomic_inc(&refcnt);
258		atomic_dec(&refcnt);
259	}
260}
261
262static void ref_refcnt_delay_section(const int nloops, const int udl, const int ndl)
263{
264	int i;
265
266	for (i = nloops; i >= 0; i--) {
267		atomic_inc(&refcnt);
268		un_delay(udl, ndl);
269		atomic_dec(&refcnt);
270	}
271}
272
273static struct ref_scale_ops refcnt_ops = {
274	.init		= rcu_sync_scale_init,
275	.readsection	= ref_refcnt_section,
276	.delaysection	= ref_refcnt_delay_section,
277	.name		= "refcnt"
278};
279
280// Definitions for rwlock
281static rwlock_t test_rwlock;
282
283static void ref_rwlock_init(void)
284{
285	rwlock_init(&test_rwlock);
286}
287
288static void ref_rwlock_section(const int nloops)
289{
290	int i;
291
292	for (i = nloops; i >= 0; i--) {
293		read_lock(&test_rwlock);
294		read_unlock(&test_rwlock);
295	}
296}
297
298static void ref_rwlock_delay_section(const int nloops, const int udl, const int ndl)
299{
300	int i;
301
302	for (i = nloops; i >= 0; i--) {
303		read_lock(&test_rwlock);
304		un_delay(udl, ndl);
305		read_unlock(&test_rwlock);
306	}
307}
308
309static struct ref_scale_ops rwlock_ops = {
310	.init		= ref_rwlock_init,
311	.readsection	= ref_rwlock_section,
312	.delaysection	= ref_rwlock_delay_section,
313	.name		= "rwlock"
314};
315
316// Definitions for rwsem
317static struct rw_semaphore test_rwsem;
318
319static void ref_rwsem_init(void)
320{
321	init_rwsem(&test_rwsem);
322}
323
324static void ref_rwsem_section(const int nloops)
325{
326	int i;
327
328	for (i = nloops; i >= 0; i--) {
329		down_read(&test_rwsem);
330		up_read(&test_rwsem);
331	}
332}
333
334static void ref_rwsem_delay_section(const int nloops, const int udl, const int ndl)
335{
336	int i;
337
338	for (i = nloops; i >= 0; i--) {
339		down_read(&test_rwsem);
340		un_delay(udl, ndl);
341		up_read(&test_rwsem);
342	}
343}
344
345static struct ref_scale_ops rwsem_ops = {
346	.init		= ref_rwsem_init,
347	.readsection	= ref_rwsem_section,
348	.delaysection	= ref_rwsem_delay_section,
349	.name		= "rwsem"
350};
351
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
352static void rcu_scale_one_reader(void)
353{
354	if (readdelay <= 0)
355		cur_ops->readsection(loops);
356	else
357		cur_ops->delaysection(loops, readdelay / 1000, readdelay % 1000);
358}
359
360// Reader kthread.  Repeatedly does empty RCU read-side
361// critical section, minimizing update-side interference.
362static int
363ref_scale_reader(void *arg)
364{
365	unsigned long flags;
366	long me = (long)arg;
367	struct reader_task *rt = &(reader_tasks[me]);
368	u64 start;
369	s64 duration;
370
371	VERBOSE_SCALEOUT("ref_scale_reader %ld: task started", me);
372	set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
373	set_user_nice(current, MAX_NICE);
374	atomic_inc(&n_init);
375	if (holdoff)
376		schedule_timeout_interruptible(holdoff * HZ);
377repeat:
378	VERBOSE_SCALEOUT("ref_scale_reader %ld: waiting to start next experiment on cpu %d", me, smp_processor_id());
379
380	// Wait for signal that this reader can start.
381	wait_event(rt->wq, (atomic_read(&nreaders_exp) && smp_load_acquire(&rt->start_reader)) ||
382			   torture_must_stop());
383
384	if (torture_must_stop())
385		goto end;
386
387	// Make sure that the CPU is affinitized appropriately during testing.
388	WARN_ON_ONCE(smp_processor_id() != me);
389
390	WRITE_ONCE(rt->start_reader, 0);
391	if (!atomic_dec_return(&n_started))
392		while (atomic_read_acquire(&n_started))
393			cpu_relax();
394
395	VERBOSE_SCALEOUT("ref_scale_reader %ld: experiment %d started", me, exp_idx);
396
397
398	// To reduce noise, do an initial cache-warming invocation, check
399	// in, and then keep warming until everyone has checked in.
400	rcu_scale_one_reader();
401	if (!atomic_dec_return(&n_warmedup))
402		while (atomic_read_acquire(&n_warmedup))
403			rcu_scale_one_reader();
404	// Also keep interrupts disabled.  This also has the effect
405	// of preventing entries into slow path for rcu_read_unlock().
406	local_irq_save(flags);
407	start = ktime_get_mono_fast_ns();
408
409	rcu_scale_one_reader();
410
411	duration = ktime_get_mono_fast_ns() - start;
412	local_irq_restore(flags);
413
414	rt->last_duration_ns = WARN_ON_ONCE(duration < 0) ? 0 : duration;
415	// To reduce runtime-skew noise, do maintain-load invocations until
416	// everyone is done.
417	if (!atomic_dec_return(&n_cooleddown))
418		while (atomic_read_acquire(&n_cooleddown))
419			rcu_scale_one_reader();
420
421	if (atomic_dec_and_test(&nreaders_exp))
422		wake_up(&main_wq);
423
424	VERBOSE_SCALEOUT("ref_scale_reader %ld: experiment %d ended, (readers remaining=%d)",
425			me, exp_idx, atomic_read(&nreaders_exp));
426
427	if (!torture_must_stop())
428		goto repeat;
429end:
430	torture_kthread_stopping("ref_scale_reader");
431	return 0;
432}
433
434static void reset_readers(void)
435{
436	int i;
437	struct reader_task *rt;
438
439	for (i = 0; i < nreaders; i++) {
440		rt = &(reader_tasks[i]);
441
442		rt->last_duration_ns = 0;
443	}
444}
445
446// Print the results of each reader and return the sum of all their durations.
447static u64 process_durations(int n)
448{
449	int i;
450	struct reader_task *rt;
451	char buf1[64];
452	char *buf;
453	u64 sum = 0;
454
455	buf = kmalloc(128 + nreaders * 32, GFP_KERNEL);
456	if (!buf)
457		return 0;
458	buf[0] = 0;
459	sprintf(buf, "Experiment #%d (Format: <THREAD-NUM>:<Total loop time in ns>)",
460		exp_idx);
461
462	for (i = 0; i < n && !torture_must_stop(); i++) {
463		rt = &(reader_tasks[i]);
464		sprintf(buf1, "%d: %llu\t", i, rt->last_duration_ns);
465
466		if (i % 5 == 0)
467			strcat(buf, "\n");
468		strcat(buf, buf1);
469
470		sum += rt->last_duration_ns;
471	}
472	strcat(buf, "\n");
473
474	SCALEOUT("%s\n", buf);
475
476	kfree(buf);
477	return sum;
478}
479
480// The main_func is the main orchestrator, it performs a bunch of
481// experiments.  For every experiment, it orders all the readers
482// involved to start and waits for them to finish the experiment. It
483// then reads their timestamps and starts the next experiment. Each
484// experiment progresses from 1 concurrent reader to N of them at which
485// point all the timestamps are printed.
486static int main_func(void *arg)
487{
488	bool errexit = false;
489	int exp, r;
490	char buf1[64];
491	char *buf;
492	u64 *result_avg;
493
494	set_cpus_allowed_ptr(current, cpumask_of(nreaders % nr_cpu_ids));
495	set_user_nice(current, MAX_NICE);
496
497	VERBOSE_SCALEOUT("main_func task started");
498	result_avg = kzalloc(nruns * sizeof(*result_avg), GFP_KERNEL);
499	buf = kzalloc(64 + nruns * 32, GFP_KERNEL);
500	if (!result_avg || !buf) {
501		VERBOSE_SCALEOUT_ERRSTRING("out of memory");
502		errexit = true;
503	}
504	if (holdoff)
505		schedule_timeout_interruptible(holdoff * HZ);
506
507	// Wait for all threads to start.
508	atomic_inc(&n_init);
509	while (atomic_read(&n_init) < nreaders + 1)
510		schedule_timeout_uninterruptible(1);
511
512	// Start exp readers up per experiment
513	for (exp = 0; exp < nruns && !torture_must_stop(); exp++) {
514		if (errexit)
515			break;
516		if (torture_must_stop())
517			goto end;
518
519		reset_readers();
520		atomic_set(&nreaders_exp, nreaders);
521		atomic_set(&n_started, nreaders);
522		atomic_set(&n_warmedup, nreaders);
523		atomic_set(&n_cooleddown, nreaders);
524
525		exp_idx = exp;
526
527		for (r = 0; r < nreaders; r++) {
528			smp_store_release(&reader_tasks[r].start_reader, 1);
529			wake_up(&reader_tasks[r].wq);
530		}
531
532		VERBOSE_SCALEOUT("main_func: experiment started, waiting for %d readers",
533				nreaders);
534
535		wait_event(main_wq,
536			   !atomic_read(&nreaders_exp) || torture_must_stop());
537
538		VERBOSE_SCALEOUT("main_func: experiment ended");
539
540		if (torture_must_stop())
541			goto end;
542
543		result_avg[exp] = div_u64(1000 * process_durations(nreaders), nreaders * loops);
544	}
545
546	// Print the average of all experiments
547	SCALEOUT("END OF TEST. Calculating average duration per loop (nanoseconds)...\n");
548
549	buf[0] = 0;
550	strcat(buf, "\n");
551	strcat(buf, "Runs\tTime(ns)\n");
 
 
552
553	for (exp = 0; exp < nruns; exp++) {
554		u64 avg;
555		u32 rem;
556
557		if (errexit)
558			break;
559		avg = div_u64_rem(result_avg[exp], 1000, &rem);
560		sprintf(buf1, "%d\t%llu.%03u\n", exp + 1, avg, rem);
561		strcat(buf, buf1);
562	}
563
564	if (!errexit)
565		SCALEOUT("%s", buf);
566
567	// This will shutdown everything including us.
568	if (shutdown) {
569		shutdown_start = 1;
570		wake_up(&shutdown_wq);
571	}
572
573	// Wait for torture to stop us
574	while (!torture_must_stop())
575		schedule_timeout_uninterruptible(1);
576
577end:
578	torture_kthread_stopping("main_func");
579	kfree(result_avg);
580	kfree(buf);
581	return 0;
582}
583
584static void
585ref_scale_print_module_parms(struct ref_scale_ops *cur_ops, const char *tag)
586{
587	pr_alert("%s" SCALE_FLAG
588		 "--- %s:  verbose=%d shutdown=%d holdoff=%d loops=%ld nreaders=%d nruns=%d readdelay=%d\n", scale_type, tag,
589		 verbose, shutdown, holdoff, loops, nreaders, nruns, readdelay);
590}
591
592static void
593ref_scale_cleanup(void)
594{
595	int i;
596
597	if (torture_cleanup_begin())
598		return;
599
600	if (!cur_ops) {
601		torture_cleanup_end();
602		return;
603	}
604
605	if (reader_tasks) {
606		for (i = 0; i < nreaders; i++)
607			torture_stop_kthread("ref_scale_reader",
608					     reader_tasks[i].task);
609	}
610	kfree(reader_tasks);
611
612	torture_stop_kthread("main_task", main_task);
613	kfree(main_task);
614
615	// Do scale-type-specific cleanup operations.
616	if (cur_ops->cleanup != NULL)
617		cur_ops->cleanup();
618
619	torture_cleanup_end();
620}
621
622// Shutdown kthread.  Just waits to be awakened, then shuts down system.
623static int
624ref_scale_shutdown(void *arg)
625{
626	wait_event(shutdown_wq, shutdown_start);
627
628	smp_mb(); // Wake before output.
629	ref_scale_cleanup();
630	kernel_power_off();
631
632	return -EINVAL;
633}
634
635static int __init
636ref_scale_init(void)
637{
638	long i;
639	int firsterr = 0;
640	static struct ref_scale_ops *scale_ops[] = {
641		&rcu_ops, &srcu_ops, &rcu_trace_ops, &rcu_tasks_ops,
642		&refcnt_ops, &rwlock_ops, &rwsem_ops,
643	};
644
645	if (!torture_init_begin(scale_type, verbose))
646		return -EBUSY;
647
648	for (i = 0; i < ARRAY_SIZE(scale_ops); i++) {
649		cur_ops = scale_ops[i];
650		if (strcmp(scale_type, cur_ops->name) == 0)
651			break;
652	}
653	if (i == ARRAY_SIZE(scale_ops)) {
654		pr_alert("rcu-scale: invalid scale type: \"%s\"\n", scale_type);
655		pr_alert("rcu-scale types:");
656		for (i = 0; i < ARRAY_SIZE(scale_ops); i++)
657			pr_cont(" %s", scale_ops[i]->name);
658		pr_cont("\n");
659		WARN_ON(!IS_MODULE(CONFIG_RCU_REF_SCALE_TEST));
660		firsterr = -EINVAL;
661		cur_ops = NULL;
662		goto unwind;
663	}
664	if (cur_ops->init)
665		cur_ops->init();
666
667	ref_scale_print_module_parms(cur_ops, "Start of test");
668
669	// Shutdown task
670	if (shutdown) {
671		init_waitqueue_head(&shutdown_wq);
672		firsterr = torture_create_kthread(ref_scale_shutdown, NULL,
673						  shutdown_task);
674		if (firsterr)
675			goto unwind;
676		schedule_timeout_uninterruptible(1);
677	}
678
679	// Reader tasks (default to ~75% of online CPUs).
680	if (nreaders < 0)
681		nreaders = (num_online_cpus() >> 1) + (num_online_cpus() >> 2);
 
 
 
 
 
 
682	reader_tasks = kcalloc(nreaders, sizeof(reader_tasks[0]),
683			       GFP_KERNEL);
684	if (!reader_tasks) {
685		VERBOSE_SCALEOUT_ERRSTRING("out of memory");
686		firsterr = -ENOMEM;
687		goto unwind;
688	}
689
690	VERBOSE_SCALEOUT("Starting %d reader threads\n", nreaders);
691
692	for (i = 0; i < nreaders; i++) {
693		firsterr = torture_create_kthread(ref_scale_reader, (void *)i,
694						  reader_tasks[i].task);
695		if (firsterr)
696			goto unwind;
697
698		init_waitqueue_head(&(reader_tasks[i].wq));
699	}
700
701	// Main Task
702	init_waitqueue_head(&main_wq);
703	firsterr = torture_create_kthread(main_func, NULL, main_task);
704	if (firsterr)
705		goto unwind;
706
707	torture_init_end();
708	return 0;
709
710unwind:
711	torture_init_end();
712	ref_scale_cleanup();
 
 
 
 
713	return firsterr;
714}
715
716module_init(ref_scale_init);
717module_exit(ref_scale_cleanup);
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0+
  2//
  3// Scalability test comparing RCU vs other mechanisms
  4// for acquiring references on objects.
  5//
  6// Copyright (C) Google, 2020.
  7//
  8// Author: Joel Fernandes <joel@joelfernandes.org>
  9
 10#define pr_fmt(fmt) fmt
 11
 12#include <linux/atomic.h>
 13#include <linux/bitops.h>
 14#include <linux/completion.h>
 15#include <linux/cpu.h>
 16#include <linux/delay.h>
 17#include <linux/err.h>
 18#include <linux/init.h>
 19#include <linux/interrupt.h>
 20#include <linux/kthread.h>
 21#include <linux/kernel.h>
 22#include <linux/mm.h>
 23#include <linux/module.h>
 24#include <linux/moduleparam.h>
 25#include <linux/notifier.h>
 26#include <linux/percpu.h>
 27#include <linux/rcupdate.h>
 28#include <linux/rcupdate_trace.h>
 29#include <linux/reboot.h>
 30#include <linux/sched.h>
 31#include <linux/spinlock.h>
 32#include <linux/smp.h>
 33#include <linux/stat.h>
 34#include <linux/srcu.h>
 35#include <linux/slab.h>
 36#include <linux/torture.h>
 37#include <linux/types.h>
 38
 39#include "rcu.h"
 40
 41#define SCALE_FLAG "-ref-scale: "
 42
 43#define SCALEOUT(s, x...) \
 44	pr_alert("%s" SCALE_FLAG s, scale_type, ## x)
 45
 46#define VERBOSE_SCALEOUT(s, x...) \
 47	do { if (verbose) pr_alert("%s" SCALE_FLAG s, scale_type, ## x); } while (0)
 48
 49static atomic_t verbose_batch_ctr;
 50
 51#define VERBOSE_SCALEOUT_BATCH(s, x...)							\
 52do {											\
 53	if (verbose &&									\
 54	    (verbose_batched <= 0 ||							\
 55	     !(atomic_inc_return(&verbose_batch_ctr) % verbose_batched))) {		\
 56		schedule_timeout_uninterruptible(1);					\
 57		pr_alert("%s" SCALE_FLAG s, scale_type, ## x);				\
 58	}										\
 59} while (0)
 60
 61#define VERBOSE_SCALEOUT_ERRSTRING(s, x...) \
 62	do { if (verbose) pr_alert("%s" SCALE_FLAG "!!! " s, scale_type, ## x); } while (0)
 63
 64MODULE_LICENSE("GPL");
 65MODULE_AUTHOR("Joel Fernandes (Google) <joel@joelfernandes.org>");
 66
 67static char *scale_type = "rcu";
 68module_param(scale_type, charp, 0444);
 69MODULE_PARM_DESC(scale_type, "Type of test (rcu, srcu, refcnt, rwsem, rwlock.");
 70
 71torture_param(int, verbose, 0, "Enable verbose debugging printk()s");
 72torture_param(int, verbose_batched, 0, "Batch verbose debugging printk()s");
 73
 74// Wait until there are multiple CPUs before starting test.
 75torture_param(int, holdoff, IS_BUILTIN(CONFIG_RCU_REF_SCALE_TEST) ? 10 : 0,
 76	      "Holdoff time before test start (s)");
 77// Number of loops per experiment, all readers execute operations concurrently.
 78torture_param(long, loops, 10000, "Number of loops per experiment.");
 79// Number of readers, with -1 defaulting to about 75% of the CPUs.
 80torture_param(int, nreaders, -1, "Number of readers, -1 for 75% of CPUs.");
 81// Number of runs.
 82torture_param(int, nruns, 30, "Number of experiments to run.");
 83// Reader delay in nanoseconds, 0 for no delay.
 84torture_param(int, readdelay, 0, "Read-side delay in nanoseconds.");
 85
 86#ifdef MODULE
 87# define REFSCALE_SHUTDOWN 0
 88#else
 89# define REFSCALE_SHUTDOWN 1
 90#endif
 91
 92torture_param(bool, shutdown, REFSCALE_SHUTDOWN,
 93	      "Shutdown at end of scalability tests.");
 94
 95struct reader_task {
 96	struct task_struct *task;
 97	int start_reader;
 98	wait_queue_head_t wq;
 99	u64 last_duration_ns;
100};
101
102static struct task_struct *shutdown_task;
103static wait_queue_head_t shutdown_wq;
104
105static struct task_struct *main_task;
106static wait_queue_head_t main_wq;
107static int shutdown_start;
108
109static struct reader_task *reader_tasks;
110
111// Number of readers that are part of the current experiment.
112static atomic_t nreaders_exp;
113
114// Use to wait for all threads to start.
115static atomic_t n_init;
116static atomic_t n_started;
117static atomic_t n_warmedup;
118static atomic_t n_cooleddown;
119
120// Track which experiment is currently running.
121static int exp_idx;
122
123// Operations vector for selecting different types of tests.
124struct ref_scale_ops {
125	void (*init)(void);
126	void (*cleanup)(void);
127	void (*readsection)(const int nloops);
128	void (*delaysection)(const int nloops, const int udl, const int ndl);
129	const char *name;
130};
131
132static struct ref_scale_ops *cur_ops;
133
134static void un_delay(const int udl, const int ndl)
135{
136	if (udl)
137		udelay(udl);
138	if (ndl)
139		ndelay(ndl);
140}
141
142static void ref_rcu_read_section(const int nloops)
143{
144	int i;
145
146	for (i = nloops; i >= 0; i--) {
147		rcu_read_lock();
148		rcu_read_unlock();
149	}
150}
151
152static void ref_rcu_delay_section(const int nloops, const int udl, const int ndl)
153{
154	int i;
155
156	for (i = nloops; i >= 0; i--) {
157		rcu_read_lock();
158		un_delay(udl, ndl);
159		rcu_read_unlock();
160	}
161}
162
163static void rcu_sync_scale_init(void)
164{
165}
166
167static struct ref_scale_ops rcu_ops = {
168	.init		= rcu_sync_scale_init,
169	.readsection	= ref_rcu_read_section,
170	.delaysection	= ref_rcu_delay_section,
171	.name		= "rcu"
172};
173
174// Definitions for SRCU ref scale testing.
175DEFINE_STATIC_SRCU(srcu_refctl_scale);
176static struct srcu_struct *srcu_ctlp = &srcu_refctl_scale;
177
178static void srcu_ref_scale_read_section(const int nloops)
179{
180	int i;
181	int idx;
182
183	for (i = nloops; i >= 0; i--) {
184		idx = srcu_read_lock(srcu_ctlp);
185		srcu_read_unlock(srcu_ctlp, idx);
186	}
187}
188
189static void srcu_ref_scale_delay_section(const int nloops, const int udl, const int ndl)
190{
191	int i;
192	int idx;
193
194	for (i = nloops; i >= 0; i--) {
195		idx = srcu_read_lock(srcu_ctlp);
196		un_delay(udl, ndl);
197		srcu_read_unlock(srcu_ctlp, idx);
198	}
199}
200
201static struct ref_scale_ops srcu_ops = {
202	.init		= rcu_sync_scale_init,
203	.readsection	= srcu_ref_scale_read_section,
204	.delaysection	= srcu_ref_scale_delay_section,
205	.name		= "srcu"
206};
207
208// Definitions for RCU Tasks ref scale testing: Empty read markers.
209// These definitions also work for RCU Rude readers.
210static void rcu_tasks_ref_scale_read_section(const int nloops)
211{
212	int i;
213
214	for (i = nloops; i >= 0; i--)
215		continue;
216}
217
218static void rcu_tasks_ref_scale_delay_section(const int nloops, const int udl, const int ndl)
219{
220	int i;
221
222	for (i = nloops; i >= 0; i--)
223		un_delay(udl, ndl);
224}
225
226static struct ref_scale_ops rcu_tasks_ops = {
227	.init		= rcu_sync_scale_init,
228	.readsection	= rcu_tasks_ref_scale_read_section,
229	.delaysection	= rcu_tasks_ref_scale_delay_section,
230	.name		= "rcu-tasks"
231};
232
233// Definitions for RCU Tasks Trace ref scale testing.
234static void rcu_trace_ref_scale_read_section(const int nloops)
235{
236	int i;
237
238	for (i = nloops; i >= 0; i--) {
239		rcu_read_lock_trace();
240		rcu_read_unlock_trace();
241	}
242}
243
244static void rcu_trace_ref_scale_delay_section(const int nloops, const int udl, const int ndl)
245{
246	int i;
247
248	for (i = nloops; i >= 0; i--) {
249		rcu_read_lock_trace();
250		un_delay(udl, ndl);
251		rcu_read_unlock_trace();
252	}
253}
254
255static struct ref_scale_ops rcu_trace_ops = {
256	.init		= rcu_sync_scale_init,
257	.readsection	= rcu_trace_ref_scale_read_section,
258	.delaysection	= rcu_trace_ref_scale_delay_section,
259	.name		= "rcu-trace"
260};
261
262// Definitions for reference count
263static atomic_t refcnt;
264
265static void ref_refcnt_section(const int nloops)
266{
267	int i;
268
269	for (i = nloops; i >= 0; i--) {
270		atomic_inc(&refcnt);
271		atomic_dec(&refcnt);
272	}
273}
274
275static void ref_refcnt_delay_section(const int nloops, const int udl, const int ndl)
276{
277	int i;
278
279	for (i = nloops; i >= 0; i--) {
280		atomic_inc(&refcnt);
281		un_delay(udl, ndl);
282		atomic_dec(&refcnt);
283	}
284}
285
286static struct ref_scale_ops refcnt_ops = {
287	.init		= rcu_sync_scale_init,
288	.readsection	= ref_refcnt_section,
289	.delaysection	= ref_refcnt_delay_section,
290	.name		= "refcnt"
291};
292
293// Definitions for rwlock
294static rwlock_t test_rwlock;
295
296static void ref_rwlock_init(void)
297{
298	rwlock_init(&test_rwlock);
299}
300
301static void ref_rwlock_section(const int nloops)
302{
303	int i;
304
305	for (i = nloops; i >= 0; i--) {
306		read_lock(&test_rwlock);
307		read_unlock(&test_rwlock);
308	}
309}
310
311static void ref_rwlock_delay_section(const int nloops, const int udl, const int ndl)
312{
313	int i;
314
315	for (i = nloops; i >= 0; i--) {
316		read_lock(&test_rwlock);
317		un_delay(udl, ndl);
318		read_unlock(&test_rwlock);
319	}
320}
321
322static struct ref_scale_ops rwlock_ops = {
323	.init		= ref_rwlock_init,
324	.readsection	= ref_rwlock_section,
325	.delaysection	= ref_rwlock_delay_section,
326	.name		= "rwlock"
327};
328
329// Definitions for rwsem
330static struct rw_semaphore test_rwsem;
331
332static void ref_rwsem_init(void)
333{
334	init_rwsem(&test_rwsem);
335}
336
337static void ref_rwsem_section(const int nloops)
338{
339	int i;
340
341	for (i = nloops; i >= 0; i--) {
342		down_read(&test_rwsem);
343		up_read(&test_rwsem);
344	}
345}
346
347static void ref_rwsem_delay_section(const int nloops, const int udl, const int ndl)
348{
349	int i;
350
351	for (i = nloops; i >= 0; i--) {
352		down_read(&test_rwsem);
353		un_delay(udl, ndl);
354		up_read(&test_rwsem);
355	}
356}
357
358static struct ref_scale_ops rwsem_ops = {
359	.init		= ref_rwsem_init,
360	.readsection	= ref_rwsem_section,
361	.delaysection	= ref_rwsem_delay_section,
362	.name		= "rwsem"
363};
364
365// Definitions for global spinlock
366static DEFINE_SPINLOCK(test_lock);
367
368static void ref_lock_section(const int nloops)
369{
370	int i;
371
372	preempt_disable();
373	for (i = nloops; i >= 0; i--) {
374		spin_lock(&test_lock);
375		spin_unlock(&test_lock);
376	}
377	preempt_enable();
378}
379
380static void ref_lock_delay_section(const int nloops, const int udl, const int ndl)
381{
382	int i;
383
384	preempt_disable();
385	for (i = nloops; i >= 0; i--) {
386		spin_lock(&test_lock);
387		un_delay(udl, ndl);
388		spin_unlock(&test_lock);
389	}
390	preempt_enable();
391}
392
393static struct ref_scale_ops lock_ops = {
394	.readsection	= ref_lock_section,
395	.delaysection	= ref_lock_delay_section,
396	.name		= "lock"
397};
398
399// Definitions for global irq-save spinlock
400
401static void ref_lock_irq_section(const int nloops)
402{
403	unsigned long flags;
404	int i;
405
406	preempt_disable();
407	for (i = nloops; i >= 0; i--) {
408		spin_lock_irqsave(&test_lock, flags);
409		spin_unlock_irqrestore(&test_lock, flags);
410	}
411	preempt_enable();
412}
413
414static void ref_lock_irq_delay_section(const int nloops, const int udl, const int ndl)
415{
416	unsigned long flags;
417	int i;
418
419	preempt_disable();
420	for (i = nloops; i >= 0; i--) {
421		spin_lock_irqsave(&test_lock, flags);
422		un_delay(udl, ndl);
423		spin_unlock_irqrestore(&test_lock, flags);
424	}
425	preempt_enable();
426}
427
428static struct ref_scale_ops lock_irq_ops = {
429	.readsection	= ref_lock_irq_section,
430	.delaysection	= ref_lock_irq_delay_section,
431	.name		= "lock-irq"
432};
433
434// Definitions acquire-release.
435static DEFINE_PER_CPU(unsigned long, test_acqrel);
436
437static void ref_acqrel_section(const int nloops)
438{
439	unsigned long x;
440	int i;
441
442	preempt_disable();
443	for (i = nloops; i >= 0; i--) {
444		x = smp_load_acquire(this_cpu_ptr(&test_acqrel));
445		smp_store_release(this_cpu_ptr(&test_acqrel), x + 1);
446	}
447	preempt_enable();
448}
449
450static void ref_acqrel_delay_section(const int nloops, const int udl, const int ndl)
451{
452	unsigned long x;
453	int i;
454
455	preempt_disable();
456	for (i = nloops; i >= 0; i--) {
457		x = smp_load_acquire(this_cpu_ptr(&test_acqrel));
458		un_delay(udl, ndl);
459		smp_store_release(this_cpu_ptr(&test_acqrel), x + 1);
460	}
461	preempt_enable();
462}
463
464static struct ref_scale_ops acqrel_ops = {
465	.readsection	= ref_acqrel_section,
466	.delaysection	= ref_acqrel_delay_section,
467	.name		= "acqrel"
468};
469
470static void rcu_scale_one_reader(void)
471{
472	if (readdelay <= 0)
473		cur_ops->readsection(loops);
474	else
475		cur_ops->delaysection(loops, readdelay / 1000, readdelay % 1000);
476}
477
478// Reader kthread.  Repeatedly does empty RCU read-side
479// critical section, minimizing update-side interference.
480static int
481ref_scale_reader(void *arg)
482{
483	unsigned long flags;
484	long me = (long)arg;
485	struct reader_task *rt = &(reader_tasks[me]);
486	u64 start;
487	s64 duration;
488
489	VERBOSE_SCALEOUT_BATCH("ref_scale_reader %ld: task started", me);
490	WARN_ON_ONCE(set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids)));
491	set_user_nice(current, MAX_NICE);
492	atomic_inc(&n_init);
493	if (holdoff)
494		schedule_timeout_interruptible(holdoff * HZ);
495repeat:
496	VERBOSE_SCALEOUT_BATCH("ref_scale_reader %ld: waiting to start next experiment on cpu %d", me, raw_smp_processor_id());
497
498	// Wait for signal that this reader can start.
499	wait_event(rt->wq, (atomic_read(&nreaders_exp) && smp_load_acquire(&rt->start_reader)) ||
500			   torture_must_stop());
501
502	if (torture_must_stop())
503		goto end;
504
505	// Make sure that the CPU is affinitized appropriately during testing.
506	WARN_ON_ONCE(raw_smp_processor_id() != me);
507
508	WRITE_ONCE(rt->start_reader, 0);
509	if (!atomic_dec_return(&n_started))
510		while (atomic_read_acquire(&n_started))
511			cpu_relax();
512
513	VERBOSE_SCALEOUT_BATCH("ref_scale_reader %ld: experiment %d started", me, exp_idx);
514
515
516	// To reduce noise, do an initial cache-warming invocation, check
517	// in, and then keep warming until everyone has checked in.
518	rcu_scale_one_reader();
519	if (!atomic_dec_return(&n_warmedup))
520		while (atomic_read_acquire(&n_warmedup))
521			rcu_scale_one_reader();
522	// Also keep interrupts disabled.  This also has the effect
523	// of preventing entries into slow path for rcu_read_unlock().
524	local_irq_save(flags);
525	start = ktime_get_mono_fast_ns();
526
527	rcu_scale_one_reader();
528
529	duration = ktime_get_mono_fast_ns() - start;
530	local_irq_restore(flags);
531
532	rt->last_duration_ns = WARN_ON_ONCE(duration < 0) ? 0 : duration;
533	// To reduce runtime-skew noise, do maintain-load invocations until
534	// everyone is done.
535	if (!atomic_dec_return(&n_cooleddown))
536		while (atomic_read_acquire(&n_cooleddown))
537			rcu_scale_one_reader();
538
539	if (atomic_dec_and_test(&nreaders_exp))
540		wake_up(&main_wq);
541
542	VERBOSE_SCALEOUT_BATCH("ref_scale_reader %ld: experiment %d ended, (readers remaining=%d)",
543				me, exp_idx, atomic_read(&nreaders_exp));
544
545	if (!torture_must_stop())
546		goto repeat;
547end:
548	torture_kthread_stopping("ref_scale_reader");
549	return 0;
550}
551
552static void reset_readers(void)
553{
554	int i;
555	struct reader_task *rt;
556
557	for (i = 0; i < nreaders; i++) {
558		rt = &(reader_tasks[i]);
559
560		rt->last_duration_ns = 0;
561	}
562}
563
564// Print the results of each reader and return the sum of all their durations.
565static u64 process_durations(int n)
566{
567	int i;
568	struct reader_task *rt;
569	char buf1[64];
570	char *buf;
571	u64 sum = 0;
572
573	buf = kmalloc(128 + nreaders * 32, GFP_KERNEL);
574	if (!buf)
575		return 0;
576	buf[0] = 0;
577	sprintf(buf, "Experiment #%d (Format: <THREAD-NUM>:<Total loop time in ns>)",
578		exp_idx);
579
580	for (i = 0; i < n && !torture_must_stop(); i++) {
581		rt = &(reader_tasks[i]);
582		sprintf(buf1, "%d: %llu\t", i, rt->last_duration_ns);
583
584		if (i % 5 == 0)
585			strcat(buf, "\n");
586		strcat(buf, buf1);
587
588		sum += rt->last_duration_ns;
589	}
590	strcat(buf, "\n");
591
592	SCALEOUT("%s\n", buf);
593
594	kfree(buf);
595	return sum;
596}
597
598// The main_func is the main orchestrator, it performs a bunch of
599// experiments.  For every experiment, it orders all the readers
600// involved to start and waits for them to finish the experiment. It
601// then reads their timestamps and starts the next experiment. Each
602// experiment progresses from 1 concurrent reader to N of them at which
603// point all the timestamps are printed.
604static int main_func(void *arg)
605{
606	bool errexit = false;
607	int exp, r;
608	char buf1[64];
609	char *buf;
610	u64 *result_avg;
611
612	set_cpus_allowed_ptr(current, cpumask_of(nreaders % nr_cpu_ids));
613	set_user_nice(current, MAX_NICE);
614
615	VERBOSE_SCALEOUT("main_func task started");
616	result_avg = kzalloc(nruns * sizeof(*result_avg), GFP_KERNEL);
617	buf = kzalloc(64 + nruns * 32, GFP_KERNEL);
618	if (!result_avg || !buf) {
619		VERBOSE_SCALEOUT_ERRSTRING("out of memory");
620		errexit = true;
621	}
622	if (holdoff)
623		schedule_timeout_interruptible(holdoff * HZ);
624
625	// Wait for all threads to start.
626	atomic_inc(&n_init);
627	while (atomic_read(&n_init) < nreaders + 1)
628		schedule_timeout_uninterruptible(1);
629
630	// Start exp readers up per experiment
631	for (exp = 0; exp < nruns && !torture_must_stop(); exp++) {
632		if (errexit)
633			break;
634		if (torture_must_stop())
635			goto end;
636
637		reset_readers();
638		atomic_set(&nreaders_exp, nreaders);
639		atomic_set(&n_started, nreaders);
640		atomic_set(&n_warmedup, nreaders);
641		atomic_set(&n_cooleddown, nreaders);
642
643		exp_idx = exp;
644
645		for (r = 0; r < nreaders; r++) {
646			smp_store_release(&reader_tasks[r].start_reader, 1);
647			wake_up(&reader_tasks[r].wq);
648		}
649
650		VERBOSE_SCALEOUT("main_func: experiment started, waiting for %d readers",
651				nreaders);
652
653		wait_event(main_wq,
654			   !atomic_read(&nreaders_exp) || torture_must_stop());
655
656		VERBOSE_SCALEOUT("main_func: experiment ended");
657
658		if (torture_must_stop())
659			goto end;
660
661		result_avg[exp] = div_u64(1000 * process_durations(nreaders), nreaders * loops);
662	}
663
664	// Print the average of all experiments
665	SCALEOUT("END OF TEST. Calculating average duration per loop (nanoseconds)...\n");
666
667	if (!errexit) {
668		buf[0] = 0;
669		strcat(buf, "\n");
670		strcat(buf, "Runs\tTime(ns)\n");
671	}
672
673	for (exp = 0; exp < nruns; exp++) {
674		u64 avg;
675		u32 rem;
676
677		if (errexit)
678			break;
679		avg = div_u64_rem(result_avg[exp], 1000, &rem);
680		sprintf(buf1, "%d\t%llu.%03u\n", exp + 1, avg, rem);
681		strcat(buf, buf1);
682	}
683
684	if (!errexit)
685		SCALEOUT("%s", buf);
686
687	// This will shutdown everything including us.
688	if (shutdown) {
689		shutdown_start = 1;
690		wake_up(&shutdown_wq);
691	}
692
693	// Wait for torture to stop us
694	while (!torture_must_stop())
695		schedule_timeout_uninterruptible(1);
696
697end:
698	torture_kthread_stopping("main_func");
699	kfree(result_avg);
700	kfree(buf);
701	return 0;
702}
703
704static void
705ref_scale_print_module_parms(struct ref_scale_ops *cur_ops, const char *tag)
706{
707	pr_alert("%s" SCALE_FLAG
708		 "--- %s:  verbose=%d shutdown=%d holdoff=%d loops=%ld nreaders=%d nruns=%d readdelay=%d\n", scale_type, tag,
709		 verbose, shutdown, holdoff, loops, nreaders, nruns, readdelay);
710}
711
712static void
713ref_scale_cleanup(void)
714{
715	int i;
716
717	if (torture_cleanup_begin())
718		return;
719
720	if (!cur_ops) {
721		torture_cleanup_end();
722		return;
723	}
724
725	if (reader_tasks) {
726		for (i = 0; i < nreaders; i++)
727			torture_stop_kthread("ref_scale_reader",
728					     reader_tasks[i].task);
729	}
730	kfree(reader_tasks);
731
732	torture_stop_kthread("main_task", main_task);
733	kfree(main_task);
734
735	// Do scale-type-specific cleanup operations.
736	if (cur_ops->cleanup != NULL)
737		cur_ops->cleanup();
738
739	torture_cleanup_end();
740}
741
742// Shutdown kthread.  Just waits to be awakened, then shuts down system.
743static int
744ref_scale_shutdown(void *arg)
745{
746	wait_event(shutdown_wq, shutdown_start);
747
748	smp_mb(); // Wake before output.
749	ref_scale_cleanup();
750	kernel_power_off();
751
752	return -EINVAL;
753}
754
755static int __init
756ref_scale_init(void)
757{
758	long i;
759	int firsterr = 0;
760	static struct ref_scale_ops *scale_ops[] = {
761		&rcu_ops, &srcu_ops, &rcu_trace_ops, &rcu_tasks_ops, &refcnt_ops, &rwlock_ops,
762		&rwsem_ops, &lock_ops, &lock_irq_ops, &acqrel_ops,
763	};
764
765	if (!torture_init_begin(scale_type, verbose))
766		return -EBUSY;
767
768	for (i = 0; i < ARRAY_SIZE(scale_ops); i++) {
769		cur_ops = scale_ops[i];
770		if (strcmp(scale_type, cur_ops->name) == 0)
771			break;
772	}
773	if (i == ARRAY_SIZE(scale_ops)) {
774		pr_alert("rcu-scale: invalid scale type: \"%s\"\n", scale_type);
775		pr_alert("rcu-scale types:");
776		for (i = 0; i < ARRAY_SIZE(scale_ops); i++)
777			pr_cont(" %s", scale_ops[i]->name);
778		pr_cont("\n");
 
779		firsterr = -EINVAL;
780		cur_ops = NULL;
781		goto unwind;
782	}
783	if (cur_ops->init)
784		cur_ops->init();
785
786	ref_scale_print_module_parms(cur_ops, "Start of test");
787
788	// Shutdown task
789	if (shutdown) {
790		init_waitqueue_head(&shutdown_wq);
791		firsterr = torture_create_kthread(ref_scale_shutdown, NULL,
792						  shutdown_task);
793		if (firsterr)
794			goto unwind;
795		schedule_timeout_uninterruptible(1);
796	}
797
798	// Reader tasks (default to ~75% of online CPUs).
799	if (nreaders < 0)
800		nreaders = (num_online_cpus() >> 1) + (num_online_cpus() >> 2);
801	if (WARN_ONCE(loops <= 0, "%s: loops = %ld, adjusted to 1\n", __func__, loops))
802		loops = 1;
803	if (WARN_ONCE(nreaders <= 0, "%s: nreaders = %d, adjusted to 1\n", __func__, nreaders))
804		nreaders = 1;
805	if (WARN_ONCE(nruns <= 0, "%s: nruns = %d, adjusted to 1\n", __func__, nruns))
806		nruns = 1;
807	reader_tasks = kcalloc(nreaders, sizeof(reader_tasks[0]),
808			       GFP_KERNEL);
809	if (!reader_tasks) {
810		VERBOSE_SCALEOUT_ERRSTRING("out of memory");
811		firsterr = -ENOMEM;
812		goto unwind;
813	}
814
815	VERBOSE_SCALEOUT("Starting %d reader threads\n", nreaders);
816
817	for (i = 0; i < nreaders; i++) {
818		firsterr = torture_create_kthread(ref_scale_reader, (void *)i,
819						  reader_tasks[i].task);
820		if (firsterr)
821			goto unwind;
822
823		init_waitqueue_head(&(reader_tasks[i].wq));
824	}
825
826	// Main Task
827	init_waitqueue_head(&main_wq);
828	firsterr = torture_create_kthread(main_func, NULL, main_task);
829	if (firsterr)
830		goto unwind;
831
832	torture_init_end();
833	return 0;
834
835unwind:
836	torture_init_end();
837	ref_scale_cleanup();
838	if (shutdown) {
839		WARN_ON(!IS_MODULE(CONFIG_RCU_REF_SCALE_TEST));
840		kernel_power_off();
841	}
842	return firsterr;
843}
844
845module_init(ref_scale_init);
846module_exit(ref_scale_cleanup);