Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.15.
  1/*
  2 * Read-Copy Update module-based performance-test facility
  3 *
  4 * This program is free software; you can redistribute it and/or modify
  5 * it under the terms of the GNU General Public License as published by
  6 * the Free Software Foundation; either version 2 of the License, or
  7 * (at your option) any later version.
  8 *
  9 * This program is distributed in the hope that it will be useful,
 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 12 * GNU General Public License for more details.
 13 *
 14 * You should have received a copy of the GNU General Public License
 15 * along with this program; if not, you can access it online at
 16 * http://www.gnu.org/licenses/gpl-2.0.html.
 17 *
 18 * Copyright (C) IBM Corporation, 2015
 19 *
 20 * Authors: Paul E. McKenney <paulmck@us.ibm.com>
 21 */
 22#include <linux/types.h>
 23#include <linux/kernel.h>
 24#include <linux/init.h>
 25#include <linux/module.h>
 26#include <linux/kthread.h>
 27#include <linux/err.h>
 28#include <linux/spinlock.h>
 29#include <linux/smp.h>
 30#include <linux/rcupdate.h>
 31#include <linux/interrupt.h>
 32#include <linux/sched.h>
 33#include <uapi/linux/sched/types.h>
 34#include <linux/atomic.h>
 35#include <linux/bitops.h>
 36#include <linux/completion.h>
 37#include <linux/moduleparam.h>
 38#include <linux/percpu.h>
 39#include <linux/notifier.h>
 40#include <linux/reboot.h>
 41#include <linux/freezer.h>
 42#include <linux/cpu.h>
 43#include <linux/delay.h>
 44#include <linux/stat.h>
 45#include <linux/srcu.h>
 46#include <linux/slab.h>
 47#include <asm/byteorder.h>
 48#include <linux/torture.h>
 49#include <linux/vmalloc.h>
 50
 51#include "rcu.h"
 52
 53MODULE_LICENSE("GPL");
 54MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.vnet.ibm.com>");
 55
 56#define PERF_FLAG "-perf:"
 57#define PERFOUT_STRING(s) \
 58	pr_alert("%s" PERF_FLAG " %s\n", perf_type, s)
 59#define VERBOSE_PERFOUT_STRING(s) \
 60	do { if (verbose) pr_alert("%s" PERF_FLAG " %s\n", perf_type, s); } while (0)
 61#define VERBOSE_PERFOUT_ERRSTRING(s) \
 62	do { if (verbose) pr_alert("%s" PERF_FLAG "!!! %s\n", perf_type, s); } while (0)
 63
 64/*
 65 * The intended use cases for the nreaders and nwriters module parameters
 66 * are as follows:
 67 *
 68 * 1.	Specify only the nr_cpus kernel boot parameter.  This will
 69 *	set both nreaders and nwriters to the value specified by
 70 *	nr_cpus for a mixed reader/writer test.
 71 *
 72 * 2.	Specify the nr_cpus kernel boot parameter, but set
 73 *	rcuperf.nreaders to zero.  This will set nwriters to the
 74 *	value specified by nr_cpus for an update-only test.
 75 *
 76 * 3.	Specify the nr_cpus kernel boot parameter, but set
 77 *	rcuperf.nwriters to zero.  This will set nreaders to the
 78 *	value specified by nr_cpus for a read-only test.
 79 *
 80 * Various other use cases may of course be specified.
 81 */
 82
 83torture_param(bool, gp_async, false, "Use asynchronous GP wait primitives");
 84torture_param(int, gp_async_max, 1000, "Max # outstanding waits per reader");
 85torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
 86torture_param(int, holdoff, 10, "Holdoff time before test start (s)");
 87torture_param(int, nreaders, -1, "Number of RCU reader threads");
 88torture_param(int, nwriters, -1, "Number of RCU updater threads");
 89torture_param(bool, shutdown, !IS_ENABLED(MODULE),
 90	      "Shutdown at end of performance tests.");
 91torture_param(bool, verbose, true, "Enable verbose debugging printk()s");
 92torture_param(int, writer_holdoff, 0, "Holdoff (us) between GPs, zero to disable");
 93
 94static char *perf_type = "rcu";
 95module_param(perf_type, charp, 0444);
 96MODULE_PARM_DESC(perf_type, "Type of RCU to performance-test (rcu, rcu_bh, ...)");
 97
 98static int nrealreaders;
 99static int nrealwriters;
100static struct task_struct **writer_tasks;
101static struct task_struct **reader_tasks;
102static struct task_struct *shutdown_task;
103
104static u64 **writer_durations;
105static int *writer_n_durations;
106static atomic_t n_rcu_perf_reader_started;
107static atomic_t n_rcu_perf_writer_started;
108static atomic_t n_rcu_perf_writer_finished;
109static wait_queue_head_t shutdown_wq;
110static u64 t_rcu_perf_writer_started;
111static u64 t_rcu_perf_writer_finished;
112static unsigned long b_rcu_perf_writer_started;
113static unsigned long b_rcu_perf_writer_finished;
114static DEFINE_PER_CPU(atomic_t, n_async_inflight);
115
116static int rcu_perf_writer_state;
117#define RTWS_INIT		0
118#define RTWS_ASYNC		1
119#define RTWS_BARRIER		2
120#define RTWS_EXP_SYNC		3
121#define RTWS_SYNC		4
122#define RTWS_IDLE		5
123#define RTWS_STOPPING		6
124
125#define MAX_MEAS 10000
126#define MIN_MEAS 100
127
128/*
129 * Operations vector for selecting different types of tests.
130 */
131
132struct rcu_perf_ops {
133	int ptype;
134	void (*init)(void);
135	void (*cleanup)(void);
136	int (*readlock)(void);
137	void (*readunlock)(int idx);
138	unsigned long (*started)(void);
139	unsigned long (*completed)(void);
140	unsigned long (*exp_completed)(void);
141	void (*async)(struct rcu_head *head, rcu_callback_t func);
142	void (*gp_barrier)(void);
143	void (*sync)(void);
144	void (*exp_sync)(void);
145	const char *name;
146};
147
148static struct rcu_perf_ops *cur_ops;
149
150/*
151 * Definitions for rcu perf testing.
152 */
153
154static int rcu_perf_read_lock(void) __acquires(RCU)
155{
156	rcu_read_lock();
157	return 0;
158}
159
160static void rcu_perf_read_unlock(int idx) __releases(RCU)
161{
162	rcu_read_unlock();
163}
164
165static unsigned long __maybe_unused rcu_no_completed(void)
166{
167	return 0;
168}
169
170static void rcu_sync_perf_init(void)
171{
172}
173
174static struct rcu_perf_ops rcu_ops = {
175	.ptype		= RCU_FLAVOR,
176	.init		= rcu_sync_perf_init,
177	.readlock	= rcu_perf_read_lock,
178	.readunlock	= rcu_perf_read_unlock,
179	.started	= rcu_batches_started,
180	.completed	= rcu_batches_completed,
181	.exp_completed	= rcu_exp_batches_completed,
182	.async		= call_rcu,
183	.gp_barrier	= rcu_barrier,
184	.sync		= synchronize_rcu,
185	.exp_sync	= synchronize_rcu_expedited,
186	.name		= "rcu"
187};
188
189/*
190 * Definitions for rcu_bh perf testing.
191 */
192
193static int rcu_bh_perf_read_lock(void) __acquires(RCU_BH)
194{
195	rcu_read_lock_bh();
196	return 0;
197}
198
199static void rcu_bh_perf_read_unlock(int idx) __releases(RCU_BH)
200{
201	rcu_read_unlock_bh();
202}
203
204static struct rcu_perf_ops rcu_bh_ops = {
205	.ptype		= RCU_BH_FLAVOR,
206	.init		= rcu_sync_perf_init,
207	.readlock	= rcu_bh_perf_read_lock,
208	.readunlock	= rcu_bh_perf_read_unlock,
209	.started	= rcu_batches_started_bh,
210	.completed	= rcu_batches_completed_bh,
211	.exp_completed	= rcu_exp_batches_completed_sched,
212	.async		= call_rcu_bh,
213	.gp_barrier	= rcu_barrier_bh,
214	.sync		= synchronize_rcu_bh,
215	.exp_sync	= synchronize_rcu_bh_expedited,
216	.name		= "rcu_bh"
217};
218
219/*
220 * Definitions for srcu perf testing.
221 */
222
223DEFINE_STATIC_SRCU(srcu_ctl_perf);
224static struct srcu_struct *srcu_ctlp = &srcu_ctl_perf;
225
226static int srcu_perf_read_lock(void) __acquires(srcu_ctlp)
227{
228	return srcu_read_lock(srcu_ctlp);
229}
230
231static void srcu_perf_read_unlock(int idx) __releases(srcu_ctlp)
232{
233	srcu_read_unlock(srcu_ctlp, idx);
234}
235
236static unsigned long srcu_perf_completed(void)
237{
238	return srcu_batches_completed(srcu_ctlp);
239}
240
241static void srcu_call_rcu(struct rcu_head *head, rcu_callback_t func)
242{
243	call_srcu(srcu_ctlp, head, func);
244}
245
246static void srcu_rcu_barrier(void)
247{
248	srcu_barrier(srcu_ctlp);
249}
250
251static void srcu_perf_synchronize(void)
252{
253	synchronize_srcu(srcu_ctlp);
254}
255
256static void srcu_perf_synchronize_expedited(void)
257{
258	synchronize_srcu_expedited(srcu_ctlp);
259}
260
261static struct rcu_perf_ops srcu_ops = {
262	.ptype		= SRCU_FLAVOR,
263	.init		= rcu_sync_perf_init,
264	.readlock	= srcu_perf_read_lock,
265	.readunlock	= srcu_perf_read_unlock,
266	.started	= NULL,
267	.completed	= srcu_perf_completed,
268	.exp_completed	= srcu_perf_completed,
269	.async		= srcu_call_rcu,
270	.gp_barrier	= srcu_rcu_barrier,
271	.sync		= srcu_perf_synchronize,
272	.exp_sync	= srcu_perf_synchronize_expedited,
273	.name		= "srcu"
274};
275
276static struct srcu_struct srcud;
277
278static void srcu_sync_perf_init(void)
279{
280	srcu_ctlp = &srcud;
281	init_srcu_struct(srcu_ctlp);
282}
283
284static void srcu_sync_perf_cleanup(void)
285{
286	cleanup_srcu_struct(srcu_ctlp);
287}
288
289static struct rcu_perf_ops srcud_ops = {
290	.ptype		= SRCU_FLAVOR,
291	.init		= srcu_sync_perf_init,
292	.cleanup	= srcu_sync_perf_cleanup,
293	.readlock	= srcu_perf_read_lock,
294	.readunlock	= srcu_perf_read_unlock,
295	.started	= NULL,
296	.completed	= srcu_perf_completed,
297	.exp_completed	= srcu_perf_completed,
298	.async		= srcu_call_rcu,
299	.gp_barrier	= srcu_rcu_barrier,
300	.sync		= srcu_perf_synchronize,
301	.exp_sync	= srcu_perf_synchronize_expedited,
302	.name		= "srcud"
303};
304
305/*
306 * Definitions for sched perf testing.
307 */
308
309static int sched_perf_read_lock(void)
310{
311	preempt_disable();
312	return 0;
313}
314
315static void sched_perf_read_unlock(int idx)
316{
317	preempt_enable();
318}
319
320static struct rcu_perf_ops sched_ops = {
321	.ptype		= RCU_SCHED_FLAVOR,
322	.init		= rcu_sync_perf_init,
323	.readlock	= sched_perf_read_lock,
324	.readunlock	= sched_perf_read_unlock,
325	.started	= rcu_batches_started_sched,
326	.completed	= rcu_batches_completed_sched,
327	.exp_completed	= rcu_exp_batches_completed_sched,
328	.async		= call_rcu_sched,
329	.gp_barrier	= rcu_barrier_sched,
330	.sync		= synchronize_sched,
331	.exp_sync	= synchronize_sched_expedited,
332	.name		= "sched"
333};
334
335/*
336 * Definitions for RCU-tasks perf testing.
337 */
338
339static int tasks_perf_read_lock(void)
340{
341	return 0;
342}
343
344static void tasks_perf_read_unlock(int idx)
345{
346}
347
348static struct rcu_perf_ops tasks_ops = {
349	.ptype		= RCU_TASKS_FLAVOR,
350	.init		= rcu_sync_perf_init,
351	.readlock	= tasks_perf_read_lock,
352	.readunlock	= tasks_perf_read_unlock,
353	.started	= rcu_no_completed,
354	.completed	= rcu_no_completed,
355	.async		= call_rcu_tasks,
356	.gp_barrier	= rcu_barrier_tasks,
357	.sync		= synchronize_rcu_tasks,
358	.exp_sync	= synchronize_rcu_tasks,
359	.name		= "tasks"
360};
361
362static bool __maybe_unused torturing_tasks(void)
363{
364	return cur_ops == &tasks_ops;
365}
366
367/*
368 * If performance tests complete, wait for shutdown to commence.
369 */
370static void rcu_perf_wait_shutdown(void)
371{
372	cond_resched_rcu_qs();
373	if (atomic_read(&n_rcu_perf_writer_finished) < nrealwriters)
374		return;
375	while (!torture_must_stop())
376		schedule_timeout_uninterruptible(1);
377}
378
379/*
380 * RCU perf reader kthread.  Repeatedly does empty RCU read-side
381 * critical section, minimizing update-side interference.
382 */
383static int
384rcu_perf_reader(void *arg)
385{
386	unsigned long flags;
387	int idx;
388	long me = (long)arg;
389
390	VERBOSE_PERFOUT_STRING("rcu_perf_reader task started");
391	set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
392	set_user_nice(current, MAX_NICE);
393	atomic_inc(&n_rcu_perf_reader_started);
394
395	do {
396		local_irq_save(flags);
397		idx = cur_ops->readlock();
398		cur_ops->readunlock(idx);
399		local_irq_restore(flags);
400		rcu_perf_wait_shutdown();
401	} while (!torture_must_stop());
402	torture_kthread_stopping("rcu_perf_reader");
403	return 0;
404}
405
406/*
407 * Callback function for asynchronous grace periods from rcu_perf_writer().
408 */
409static void rcu_perf_async_cb(struct rcu_head *rhp)
410{
411	atomic_dec(this_cpu_ptr(&n_async_inflight));
412	kfree(rhp);
413}
414
415/*
416 * RCU perf writer kthread.  Repeatedly does a grace period.
417 */
418static int
419rcu_perf_writer(void *arg)
420{
421	int i = 0;
422	int i_max;
423	long me = (long)arg;
424	struct rcu_head *rhp = NULL;
425	struct sched_param sp;
426	bool started = false, done = false, alldone = false;
427	u64 t;
428	u64 *wdp;
429	u64 *wdpp = writer_durations[me];
430
431	VERBOSE_PERFOUT_STRING("rcu_perf_writer task started");
432	WARN_ON(!wdpp);
433	set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
434	sp.sched_priority = 1;
435	sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
436
437	if (holdoff)
438		schedule_timeout_uninterruptible(holdoff * HZ);
439
440	t = ktime_get_mono_fast_ns();
441	if (atomic_inc_return(&n_rcu_perf_writer_started) >= nrealwriters) {
442		t_rcu_perf_writer_started = t;
443		if (gp_exp) {
444			b_rcu_perf_writer_started =
445				cur_ops->exp_completed() / 2;
446		} else {
447			b_rcu_perf_writer_started =
448				cur_ops->completed();
449		}
450	}
451
452	do {
453		if (writer_holdoff)
454			udelay(writer_holdoff);
455		wdp = &wdpp[i];
456		*wdp = ktime_get_mono_fast_ns();
457		if (gp_async) {
458retry:
459			if (!rhp)
460				rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
461			if (rhp && atomic_read(this_cpu_ptr(&n_async_inflight)) < gp_async_max) {
462				rcu_perf_writer_state = RTWS_ASYNC;
463				atomic_inc(this_cpu_ptr(&n_async_inflight));
464				cur_ops->async(rhp, rcu_perf_async_cb);
465				rhp = NULL;
466			} else if (!kthread_should_stop()) {
467				rcu_perf_writer_state = RTWS_BARRIER;
468				cur_ops->gp_barrier();
469				goto retry;
470			} else {
471				kfree(rhp); /* Because we are stopping. */
472			}
473		} else if (gp_exp) {
474			rcu_perf_writer_state = RTWS_EXP_SYNC;
475			cur_ops->exp_sync();
476		} else {
477			rcu_perf_writer_state = RTWS_SYNC;
478			cur_ops->sync();
479		}
480		rcu_perf_writer_state = RTWS_IDLE;
481		t = ktime_get_mono_fast_ns();
482		*wdp = t - *wdp;
483		i_max = i;
484		if (!started &&
485		    atomic_read(&n_rcu_perf_writer_started) >= nrealwriters)
486			started = true;
487		if (!done && i >= MIN_MEAS) {
488			done = true;
489			sp.sched_priority = 0;
490			sched_setscheduler_nocheck(current,
491						   SCHED_NORMAL, &sp);
492			pr_alert("%s%s rcu_perf_writer %ld has %d measurements\n",
493				 perf_type, PERF_FLAG, me, MIN_MEAS);
494			if (atomic_inc_return(&n_rcu_perf_writer_finished) >=
495			    nrealwriters) {
496				schedule_timeout_interruptible(10);
497				rcu_ftrace_dump(DUMP_ALL);
498				PERFOUT_STRING("Test complete");
499				t_rcu_perf_writer_finished = t;
500				if (gp_exp) {
501					b_rcu_perf_writer_finished =
502						cur_ops->exp_completed() / 2;
503				} else {
504					b_rcu_perf_writer_finished =
505						cur_ops->completed();
506				}
507				if (shutdown) {
508					smp_mb(); /* Assign before wake. */
509					wake_up(&shutdown_wq);
510				}
511			}
512		}
513		if (done && !alldone &&
514		    atomic_read(&n_rcu_perf_writer_finished) >= nrealwriters)
515			alldone = true;
516		if (started && !alldone && i < MAX_MEAS - 1)
517			i++;
518		rcu_perf_wait_shutdown();
519	} while (!torture_must_stop());
520	if (gp_async) {
521		rcu_perf_writer_state = RTWS_BARRIER;
522		cur_ops->gp_barrier();
523	}
524	rcu_perf_writer_state = RTWS_STOPPING;
525	writer_n_durations[me] = i_max;
526	torture_kthread_stopping("rcu_perf_writer");
527	return 0;
528}
529
530static inline void
531rcu_perf_print_module_parms(struct rcu_perf_ops *cur_ops, const char *tag)
532{
533	pr_alert("%s" PERF_FLAG
534		 "--- %s: nreaders=%d nwriters=%d verbose=%d shutdown=%d\n",
535		 perf_type, tag, nrealreaders, nrealwriters, verbose, shutdown);
536}
537
538static void
539rcu_perf_cleanup(void)
540{
541	int i;
542	int j;
543	int ngps = 0;
544	u64 *wdp;
545	u64 *wdpp;
546
547	/*
548	 * Would like warning at start, but everything is expedited
549	 * during the mid-boot phase, so have to wait till the end.
550	 */
551	if (rcu_gp_is_expedited() && !rcu_gp_is_normal() && !gp_exp)
552		VERBOSE_PERFOUT_ERRSTRING("All grace periods expedited, no normal ones to measure!");
553	if (rcu_gp_is_normal() && gp_exp)
554		VERBOSE_PERFOUT_ERRSTRING("All grace periods normal, no expedited ones to measure!");
555	if (gp_exp && gp_async)
556		VERBOSE_PERFOUT_ERRSTRING("No expedited async GPs, so went with async!");
557
558	if (torture_cleanup_begin())
559		return;
560
561	if (reader_tasks) {
562		for (i = 0; i < nrealreaders; i++)
563			torture_stop_kthread(rcu_perf_reader,
564					     reader_tasks[i]);
565		kfree(reader_tasks);
566	}
567
568	if (writer_tasks) {
569		for (i = 0; i < nrealwriters; i++) {
570			torture_stop_kthread(rcu_perf_writer,
571					     writer_tasks[i]);
572			if (!writer_n_durations)
573				continue;
574			j = writer_n_durations[i];
575			pr_alert("%s%s writer %d gps: %d\n",
576				 perf_type, PERF_FLAG, i, j);
577			ngps += j;
578		}
579		pr_alert("%s%s start: %llu end: %llu duration: %llu gps: %d batches: %ld\n",
580			 perf_type, PERF_FLAG,
581			 t_rcu_perf_writer_started, t_rcu_perf_writer_finished,
582			 t_rcu_perf_writer_finished -
583			 t_rcu_perf_writer_started,
584			 ngps,
585			 b_rcu_perf_writer_finished -
586			 b_rcu_perf_writer_started);
587		for (i = 0; i < nrealwriters; i++) {
588			if (!writer_durations)
589				break;
590			if (!writer_n_durations)
591				continue;
592			wdpp = writer_durations[i];
593			if (!wdpp)
594				continue;
595			for (j = 0; j <= writer_n_durations[i]; j++) {
596				wdp = &wdpp[j];
597				pr_alert("%s%s %4d writer-duration: %5d %llu\n",
598					perf_type, PERF_FLAG,
599					i, j, *wdp);
600				if (j % 100 == 0)
601					schedule_timeout_uninterruptible(1);
602			}
603			kfree(writer_durations[i]);
604		}
605		kfree(writer_tasks);
606		kfree(writer_durations);
607		kfree(writer_n_durations);
608	}
609
610	/* Do flavor-specific cleanup operations.  */
611	if (cur_ops->cleanup != NULL)
612		cur_ops->cleanup();
613
614	torture_cleanup_end();
615}
616
617/*
618 * Return the number if non-negative.  If -1, the number of CPUs.
619 * If less than -1, that much less than the number of CPUs, but
620 * at least one.
621 */
622static int compute_real(int n)
623{
624	int nr;
625
626	if (n >= 0) {
627		nr = n;
628	} else {
629		nr = num_online_cpus() + 1 + n;
630		if (nr <= 0)
631			nr = 1;
632	}
633	return nr;
634}
635
636/*
637 * RCU perf shutdown kthread.  Just waits to be awakened, then shuts
638 * down system.
639 */
640static int
641rcu_perf_shutdown(void *arg)
642{
643	do {
644		wait_event(shutdown_wq,
645			   atomic_read(&n_rcu_perf_writer_finished) >=
646			   nrealwriters);
647	} while (atomic_read(&n_rcu_perf_writer_finished) < nrealwriters);
648	smp_mb(); /* Wake before output. */
649	rcu_perf_cleanup();
650	kernel_power_off();
651	return -EINVAL;
652}
653
654static int __init
655rcu_perf_init(void)
656{
657	long i;
658	int firsterr = 0;
659	static struct rcu_perf_ops *perf_ops[] = {
660		&rcu_ops, &rcu_bh_ops, &srcu_ops, &srcud_ops, &sched_ops,
661		&tasks_ops,
662	};
663
664	if (!torture_init_begin(perf_type, verbose))
665		return -EBUSY;
666
667	/* Process args and tell the world that the perf'er is on the job. */
668	for (i = 0; i < ARRAY_SIZE(perf_ops); i++) {
669		cur_ops = perf_ops[i];
670		if (strcmp(perf_type, cur_ops->name) == 0)
671			break;
672	}
673	if (i == ARRAY_SIZE(perf_ops)) {
674		pr_alert("rcu-perf: invalid perf type: \"%s\"\n",
675			 perf_type);
676		pr_alert("rcu-perf types:");
677		for (i = 0; i < ARRAY_SIZE(perf_ops); i++)
678			pr_alert(" %s", perf_ops[i]->name);
679		pr_alert("\n");
680		firsterr = -EINVAL;
681		goto unwind;
682	}
683	if (cur_ops->init)
684		cur_ops->init();
685
686	nrealwriters = compute_real(nwriters);
687	nrealreaders = compute_real(nreaders);
688	atomic_set(&n_rcu_perf_reader_started, 0);
689	atomic_set(&n_rcu_perf_writer_started, 0);
690	atomic_set(&n_rcu_perf_writer_finished, 0);
691	rcu_perf_print_module_parms(cur_ops, "Start of test");
692
693	/* Start up the kthreads. */
694
695	if (shutdown) {
696		init_waitqueue_head(&shutdown_wq);
697		firsterr = torture_create_kthread(rcu_perf_shutdown, NULL,
698						  shutdown_task);
699		if (firsterr)
700			goto unwind;
701		schedule_timeout_uninterruptible(1);
702	}
703	reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]),
704			       GFP_KERNEL);
705	if (reader_tasks == NULL) {
706		VERBOSE_PERFOUT_ERRSTRING("out of memory");
707		firsterr = -ENOMEM;
708		goto unwind;
709	}
710	for (i = 0; i < nrealreaders; i++) {
711		firsterr = torture_create_kthread(rcu_perf_reader, (void *)i,
712						  reader_tasks[i]);
713		if (firsterr)
714			goto unwind;
715	}
716	while (atomic_read(&n_rcu_perf_reader_started) < nrealreaders)
717		schedule_timeout_uninterruptible(1);
718	writer_tasks = kcalloc(nrealwriters, sizeof(reader_tasks[0]),
719			       GFP_KERNEL);
720	writer_durations = kcalloc(nrealwriters, sizeof(*writer_durations),
721				   GFP_KERNEL);
722	writer_n_durations =
723		kcalloc(nrealwriters, sizeof(*writer_n_durations),
724			GFP_KERNEL);
725	if (!writer_tasks || !writer_durations || !writer_n_durations) {
726		VERBOSE_PERFOUT_ERRSTRING("out of memory");
727		firsterr = -ENOMEM;
728		goto unwind;
729	}
730	for (i = 0; i < nrealwriters; i++) {
731		writer_durations[i] =
732			kcalloc(MAX_MEAS, sizeof(*writer_durations[i]),
733				GFP_KERNEL);
734		if (!writer_durations[i]) {
735			firsterr = -ENOMEM;
736			goto unwind;
737		}
738		firsterr = torture_create_kthread(rcu_perf_writer, (void *)i,
739						  writer_tasks[i]);
740		if (firsterr)
741			goto unwind;
742	}
743	torture_init_end();
744	return 0;
745
746unwind:
747	torture_init_end();
748	rcu_perf_cleanup();
749	return firsterr;
750}
751
752module_init(rcu_perf_init);
753module_exit(rcu_perf_cleanup);