Loading...
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Read-Copy Update module-based performance-test facility
4 *
5 * Copyright (C) IBM Corporation, 2015
6 *
7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
8 */
9
10#define pr_fmt(fmt) fmt
11
12#include <linux/types.h>
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/module.h>
16#include <linux/kthread.h>
17#include <linux/err.h>
18#include <linux/spinlock.h>
19#include <linux/smp.h>
20#include <linux/rcupdate.h>
21#include <linux/interrupt.h>
22#include <linux/sched.h>
23#include <uapi/linux/sched/types.h>
24#include <linux/atomic.h>
25#include <linux/bitops.h>
26#include <linux/completion.h>
27#include <linux/moduleparam.h>
28#include <linux/percpu.h>
29#include <linux/notifier.h>
30#include <linux/reboot.h>
31#include <linux/freezer.h>
32#include <linux/cpu.h>
33#include <linux/delay.h>
34#include <linux/stat.h>
35#include <linux/srcu.h>
36#include <linux/slab.h>
37#include <asm/byteorder.h>
38#include <linux/torture.h>
39#include <linux/vmalloc.h>
40
41#include "rcu.h"
42
43MODULE_LICENSE("GPL");
44MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com>");
45
46#define PERF_FLAG "-perf:"
47#define PERFOUT_STRING(s) \
48 pr_alert("%s" PERF_FLAG " %s\n", perf_type, s)
49#define VERBOSE_PERFOUT_STRING(s) \
50 do { if (verbose) pr_alert("%s" PERF_FLAG " %s\n", perf_type, s); } while (0)
51#define VERBOSE_PERFOUT_ERRSTRING(s) \
52 do { if (verbose) pr_alert("%s" PERF_FLAG "!!! %s\n", perf_type, s); } while (0)
53
54/*
55 * The intended use cases for the nreaders and nwriters module parameters
56 * are as follows:
57 *
58 * 1. Specify only the nr_cpus kernel boot parameter. This will
59 * set both nreaders and nwriters to the value specified by
60 * nr_cpus for a mixed reader/writer test.
61 *
62 * 2. Specify the nr_cpus kernel boot parameter, but set
63 * rcuperf.nreaders to zero. This will set nwriters to the
64 * value specified by nr_cpus for an update-only test.
65 *
66 * 3. Specify the nr_cpus kernel boot parameter, but set
67 * rcuperf.nwriters to zero. This will set nreaders to the
68 * value specified by nr_cpus for a read-only test.
69 *
70 * Various other use cases may of course be specified.
71 */
72
73#ifdef MODULE
74# define RCUPERF_SHUTDOWN 0
75#else
76# define RCUPERF_SHUTDOWN 1
77#endif
78
79torture_param(bool, gp_async, false, "Use asynchronous GP wait primitives");
80torture_param(int, gp_async_max, 1000, "Max # outstanding waits per reader");
81torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
82torture_param(int, holdoff, 10, "Holdoff time before test start (s)");
83torture_param(int, nreaders, -1, "Number of RCU reader threads");
84torture_param(int, nwriters, -1, "Number of RCU updater threads");
85torture_param(bool, shutdown, RCUPERF_SHUTDOWN,
86 "Shutdown at end of performance tests.");
87torture_param(int, verbose, 1, "Enable verbose debugging printk()s");
88torture_param(int, writer_holdoff, 0, "Holdoff (us) between GPs, zero to disable");
89
90static char *perf_type = "rcu";
91module_param(perf_type, charp, 0444);
92MODULE_PARM_DESC(perf_type, "Type of RCU to performance-test (rcu, srcu, ...)");
93
94static int nrealreaders;
95static int nrealwriters;
96static struct task_struct **writer_tasks;
97static struct task_struct **reader_tasks;
98static struct task_struct *shutdown_task;
99
100static u64 **writer_durations;
101static int *writer_n_durations;
102static atomic_t n_rcu_perf_reader_started;
103static atomic_t n_rcu_perf_writer_started;
104static atomic_t n_rcu_perf_writer_finished;
105static wait_queue_head_t shutdown_wq;
106static u64 t_rcu_perf_writer_started;
107static u64 t_rcu_perf_writer_finished;
108static unsigned long b_rcu_perf_writer_started;
109static unsigned long b_rcu_perf_writer_finished;
110static DEFINE_PER_CPU(atomic_t, n_async_inflight);
111
112static int rcu_perf_writer_state;
113#define RTWS_INIT 0
114#define RTWS_ASYNC 1
115#define RTWS_BARRIER 2
116#define RTWS_EXP_SYNC 3
117#define RTWS_SYNC 4
118#define RTWS_IDLE 5
119#define RTWS_STOPPING 6
120
121#define MAX_MEAS 10000
122#define MIN_MEAS 100
123
124/*
125 * Operations vector for selecting different types of tests.
126 */
127
128struct rcu_perf_ops {
129 int ptype;
130 void (*init)(void);
131 void (*cleanup)(void);
132 int (*readlock)(void);
133 void (*readunlock)(int idx);
134 unsigned long (*get_gp_seq)(void);
135 unsigned long (*gp_diff)(unsigned long new, unsigned long old);
136 unsigned long (*exp_completed)(void);
137 void (*async)(struct rcu_head *head, rcu_callback_t func);
138 void (*gp_barrier)(void);
139 void (*sync)(void);
140 void (*exp_sync)(void);
141 const char *name;
142};
143
144static struct rcu_perf_ops *cur_ops;
145
146/*
147 * Definitions for rcu perf testing.
148 */
149
150static int rcu_perf_read_lock(void) __acquires(RCU)
151{
152 rcu_read_lock();
153 return 0;
154}
155
156static void rcu_perf_read_unlock(int idx) __releases(RCU)
157{
158 rcu_read_unlock();
159}
160
161static unsigned long __maybe_unused rcu_no_completed(void)
162{
163 return 0;
164}
165
166static void rcu_sync_perf_init(void)
167{
168}
169
170static struct rcu_perf_ops rcu_ops = {
171 .ptype = RCU_FLAVOR,
172 .init = rcu_sync_perf_init,
173 .readlock = rcu_perf_read_lock,
174 .readunlock = rcu_perf_read_unlock,
175 .get_gp_seq = rcu_get_gp_seq,
176 .gp_diff = rcu_seq_diff,
177 .exp_completed = rcu_exp_batches_completed,
178 .async = call_rcu,
179 .gp_barrier = rcu_barrier,
180 .sync = synchronize_rcu,
181 .exp_sync = synchronize_rcu_expedited,
182 .name = "rcu"
183};
184
185/*
186 * Definitions for srcu perf testing.
187 */
188
189DEFINE_STATIC_SRCU(srcu_ctl_perf);
190static struct srcu_struct *srcu_ctlp = &srcu_ctl_perf;
191
192static int srcu_perf_read_lock(void) __acquires(srcu_ctlp)
193{
194 return srcu_read_lock(srcu_ctlp);
195}
196
197static void srcu_perf_read_unlock(int idx) __releases(srcu_ctlp)
198{
199 srcu_read_unlock(srcu_ctlp, idx);
200}
201
202static unsigned long srcu_perf_completed(void)
203{
204 return srcu_batches_completed(srcu_ctlp);
205}
206
207static void srcu_call_rcu(struct rcu_head *head, rcu_callback_t func)
208{
209 call_srcu(srcu_ctlp, head, func);
210}
211
212static void srcu_rcu_barrier(void)
213{
214 srcu_barrier(srcu_ctlp);
215}
216
217static void srcu_perf_synchronize(void)
218{
219 synchronize_srcu(srcu_ctlp);
220}
221
222static void srcu_perf_synchronize_expedited(void)
223{
224 synchronize_srcu_expedited(srcu_ctlp);
225}
226
227static struct rcu_perf_ops srcu_ops = {
228 .ptype = SRCU_FLAVOR,
229 .init = rcu_sync_perf_init,
230 .readlock = srcu_perf_read_lock,
231 .readunlock = srcu_perf_read_unlock,
232 .get_gp_seq = srcu_perf_completed,
233 .gp_diff = rcu_seq_diff,
234 .exp_completed = srcu_perf_completed,
235 .async = srcu_call_rcu,
236 .gp_barrier = srcu_rcu_barrier,
237 .sync = srcu_perf_synchronize,
238 .exp_sync = srcu_perf_synchronize_expedited,
239 .name = "srcu"
240};
241
242static struct srcu_struct srcud;
243
244static void srcu_sync_perf_init(void)
245{
246 srcu_ctlp = &srcud;
247 init_srcu_struct(srcu_ctlp);
248}
249
250static void srcu_sync_perf_cleanup(void)
251{
252 cleanup_srcu_struct(srcu_ctlp);
253}
254
255static struct rcu_perf_ops srcud_ops = {
256 .ptype = SRCU_FLAVOR,
257 .init = srcu_sync_perf_init,
258 .cleanup = srcu_sync_perf_cleanup,
259 .readlock = srcu_perf_read_lock,
260 .readunlock = srcu_perf_read_unlock,
261 .get_gp_seq = srcu_perf_completed,
262 .gp_diff = rcu_seq_diff,
263 .exp_completed = srcu_perf_completed,
264 .async = srcu_call_rcu,
265 .gp_barrier = srcu_rcu_barrier,
266 .sync = srcu_perf_synchronize,
267 .exp_sync = srcu_perf_synchronize_expedited,
268 .name = "srcud"
269};
270
271/*
272 * Definitions for RCU-tasks perf testing.
273 */
274
275static int tasks_perf_read_lock(void)
276{
277 return 0;
278}
279
280static void tasks_perf_read_unlock(int idx)
281{
282}
283
284static struct rcu_perf_ops tasks_ops = {
285 .ptype = RCU_TASKS_FLAVOR,
286 .init = rcu_sync_perf_init,
287 .readlock = tasks_perf_read_lock,
288 .readunlock = tasks_perf_read_unlock,
289 .get_gp_seq = rcu_no_completed,
290 .gp_diff = rcu_seq_diff,
291 .async = call_rcu_tasks,
292 .gp_barrier = rcu_barrier_tasks,
293 .sync = synchronize_rcu_tasks,
294 .exp_sync = synchronize_rcu_tasks,
295 .name = "tasks"
296};
297
298static unsigned long rcuperf_seq_diff(unsigned long new, unsigned long old)
299{
300 if (!cur_ops->gp_diff)
301 return new - old;
302 return cur_ops->gp_diff(new, old);
303}
304
305/*
306 * If performance tests complete, wait for shutdown to commence.
307 */
308static void rcu_perf_wait_shutdown(void)
309{
310 cond_resched_tasks_rcu_qs();
311 if (atomic_read(&n_rcu_perf_writer_finished) < nrealwriters)
312 return;
313 while (!torture_must_stop())
314 schedule_timeout_uninterruptible(1);
315}
316
317/*
318 * RCU perf reader kthread. Repeatedly does empty RCU read-side
319 * critical section, minimizing update-side interference.
320 */
321static int
322rcu_perf_reader(void *arg)
323{
324 unsigned long flags;
325 int idx;
326 long me = (long)arg;
327
328 VERBOSE_PERFOUT_STRING("rcu_perf_reader task started");
329 set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
330 set_user_nice(current, MAX_NICE);
331 atomic_inc(&n_rcu_perf_reader_started);
332
333 do {
334 local_irq_save(flags);
335 idx = cur_ops->readlock();
336 cur_ops->readunlock(idx);
337 local_irq_restore(flags);
338 rcu_perf_wait_shutdown();
339 } while (!torture_must_stop());
340 torture_kthread_stopping("rcu_perf_reader");
341 return 0;
342}
343
344/*
345 * Callback function for asynchronous grace periods from rcu_perf_writer().
346 */
347static void rcu_perf_async_cb(struct rcu_head *rhp)
348{
349 atomic_dec(this_cpu_ptr(&n_async_inflight));
350 kfree(rhp);
351}
352
353/*
354 * RCU perf writer kthread. Repeatedly does a grace period.
355 */
356static int
357rcu_perf_writer(void *arg)
358{
359 int i = 0;
360 int i_max;
361 long me = (long)arg;
362 struct rcu_head *rhp = NULL;
363 struct sched_param sp;
364 bool started = false, done = false, alldone = false;
365 u64 t;
366 u64 *wdp;
367 u64 *wdpp = writer_durations[me];
368
369 VERBOSE_PERFOUT_STRING("rcu_perf_writer task started");
370 WARN_ON(!wdpp);
371 set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
372 sp.sched_priority = 1;
373 sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
374
375 if (holdoff)
376 schedule_timeout_uninterruptible(holdoff * HZ);
377
378 /*
379 * Wait until rcu_end_inkernel_boot() is called for normal GP tests
380 * so that RCU is not always expedited for normal GP tests.
381 * The system_state test is approximate, but works well in practice.
382 */
383 while (!gp_exp && system_state != SYSTEM_RUNNING)
384 schedule_timeout_uninterruptible(1);
385
386 t = ktime_get_mono_fast_ns();
387 if (atomic_inc_return(&n_rcu_perf_writer_started) >= nrealwriters) {
388 t_rcu_perf_writer_started = t;
389 if (gp_exp) {
390 b_rcu_perf_writer_started =
391 cur_ops->exp_completed() / 2;
392 } else {
393 b_rcu_perf_writer_started = cur_ops->get_gp_seq();
394 }
395 }
396
397 do {
398 if (writer_holdoff)
399 udelay(writer_holdoff);
400 wdp = &wdpp[i];
401 *wdp = ktime_get_mono_fast_ns();
402 if (gp_async) {
403retry:
404 if (!rhp)
405 rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
406 if (rhp && atomic_read(this_cpu_ptr(&n_async_inflight)) < gp_async_max) {
407 rcu_perf_writer_state = RTWS_ASYNC;
408 atomic_inc(this_cpu_ptr(&n_async_inflight));
409 cur_ops->async(rhp, rcu_perf_async_cb);
410 rhp = NULL;
411 } else if (!kthread_should_stop()) {
412 rcu_perf_writer_state = RTWS_BARRIER;
413 cur_ops->gp_barrier();
414 goto retry;
415 } else {
416 kfree(rhp); /* Because we are stopping. */
417 }
418 } else if (gp_exp) {
419 rcu_perf_writer_state = RTWS_EXP_SYNC;
420 cur_ops->exp_sync();
421 } else {
422 rcu_perf_writer_state = RTWS_SYNC;
423 cur_ops->sync();
424 }
425 rcu_perf_writer_state = RTWS_IDLE;
426 t = ktime_get_mono_fast_ns();
427 *wdp = t - *wdp;
428 i_max = i;
429 if (!started &&
430 atomic_read(&n_rcu_perf_writer_started) >= nrealwriters)
431 started = true;
432 if (!done && i >= MIN_MEAS) {
433 done = true;
434 sp.sched_priority = 0;
435 sched_setscheduler_nocheck(current,
436 SCHED_NORMAL, &sp);
437 pr_alert("%s%s rcu_perf_writer %ld has %d measurements\n",
438 perf_type, PERF_FLAG, me, MIN_MEAS);
439 if (atomic_inc_return(&n_rcu_perf_writer_finished) >=
440 nrealwriters) {
441 schedule_timeout_interruptible(10);
442 rcu_ftrace_dump(DUMP_ALL);
443 PERFOUT_STRING("Test complete");
444 t_rcu_perf_writer_finished = t;
445 if (gp_exp) {
446 b_rcu_perf_writer_finished =
447 cur_ops->exp_completed() / 2;
448 } else {
449 b_rcu_perf_writer_finished =
450 cur_ops->get_gp_seq();
451 }
452 if (shutdown) {
453 smp_mb(); /* Assign before wake. */
454 wake_up(&shutdown_wq);
455 }
456 }
457 }
458 if (done && !alldone &&
459 atomic_read(&n_rcu_perf_writer_finished) >= nrealwriters)
460 alldone = true;
461 if (started && !alldone && i < MAX_MEAS - 1)
462 i++;
463 rcu_perf_wait_shutdown();
464 } while (!torture_must_stop());
465 if (gp_async) {
466 rcu_perf_writer_state = RTWS_BARRIER;
467 cur_ops->gp_barrier();
468 }
469 rcu_perf_writer_state = RTWS_STOPPING;
470 writer_n_durations[me] = i_max;
471 torture_kthread_stopping("rcu_perf_writer");
472 return 0;
473}
474
475static void
476rcu_perf_print_module_parms(struct rcu_perf_ops *cur_ops, const char *tag)
477{
478 pr_alert("%s" PERF_FLAG
479 "--- %s: nreaders=%d nwriters=%d verbose=%d shutdown=%d\n",
480 perf_type, tag, nrealreaders, nrealwriters, verbose, shutdown);
481}
482
483static void
484rcu_perf_cleanup(void)
485{
486 int i;
487 int j;
488 int ngps = 0;
489 u64 *wdp;
490 u64 *wdpp;
491
492 /*
493 * Would like warning at start, but everything is expedited
494 * during the mid-boot phase, so have to wait till the end.
495 */
496 if (rcu_gp_is_expedited() && !rcu_gp_is_normal() && !gp_exp)
497 VERBOSE_PERFOUT_ERRSTRING("All grace periods expedited, no normal ones to measure!");
498 if (rcu_gp_is_normal() && gp_exp)
499 VERBOSE_PERFOUT_ERRSTRING("All grace periods normal, no expedited ones to measure!");
500 if (gp_exp && gp_async)
501 VERBOSE_PERFOUT_ERRSTRING("No expedited async GPs, so went with async!");
502
503 if (torture_cleanup_begin())
504 return;
505 if (!cur_ops) {
506 torture_cleanup_end();
507 return;
508 }
509
510 if (reader_tasks) {
511 for (i = 0; i < nrealreaders; i++)
512 torture_stop_kthread(rcu_perf_reader,
513 reader_tasks[i]);
514 kfree(reader_tasks);
515 }
516
517 if (writer_tasks) {
518 for (i = 0; i < nrealwriters; i++) {
519 torture_stop_kthread(rcu_perf_writer,
520 writer_tasks[i]);
521 if (!writer_n_durations)
522 continue;
523 j = writer_n_durations[i];
524 pr_alert("%s%s writer %d gps: %d\n",
525 perf_type, PERF_FLAG, i, j);
526 ngps += j;
527 }
528 pr_alert("%s%s start: %llu end: %llu duration: %llu gps: %d batches: %ld\n",
529 perf_type, PERF_FLAG,
530 t_rcu_perf_writer_started, t_rcu_perf_writer_finished,
531 t_rcu_perf_writer_finished -
532 t_rcu_perf_writer_started,
533 ngps,
534 rcuperf_seq_diff(b_rcu_perf_writer_finished,
535 b_rcu_perf_writer_started));
536 for (i = 0; i < nrealwriters; i++) {
537 if (!writer_durations)
538 break;
539 if (!writer_n_durations)
540 continue;
541 wdpp = writer_durations[i];
542 if (!wdpp)
543 continue;
544 for (j = 0; j <= writer_n_durations[i]; j++) {
545 wdp = &wdpp[j];
546 pr_alert("%s%s %4d writer-duration: %5d %llu\n",
547 perf_type, PERF_FLAG,
548 i, j, *wdp);
549 if (j % 100 == 0)
550 schedule_timeout_uninterruptible(1);
551 }
552 kfree(writer_durations[i]);
553 }
554 kfree(writer_tasks);
555 kfree(writer_durations);
556 kfree(writer_n_durations);
557 }
558
559 /* Do torture-type-specific cleanup operations. */
560 if (cur_ops->cleanup != NULL)
561 cur_ops->cleanup();
562
563 torture_cleanup_end();
564}
565
566/*
567 * Return the number if non-negative. If -1, the number of CPUs.
568 * If less than -1, that much less than the number of CPUs, but
569 * at least one.
570 */
571static int compute_real(int n)
572{
573 int nr;
574
575 if (n >= 0) {
576 nr = n;
577 } else {
578 nr = num_online_cpus() + 1 + n;
579 if (nr <= 0)
580 nr = 1;
581 }
582 return nr;
583}
584
585/*
586 * RCU perf shutdown kthread. Just waits to be awakened, then shuts
587 * down system.
588 */
589static int
590rcu_perf_shutdown(void *arg)
591{
592 do {
593 wait_event(shutdown_wq,
594 atomic_read(&n_rcu_perf_writer_finished) >=
595 nrealwriters);
596 } while (atomic_read(&n_rcu_perf_writer_finished) < nrealwriters);
597 smp_mb(); /* Wake before output. */
598 rcu_perf_cleanup();
599 kernel_power_off();
600 return -EINVAL;
601}
602
603static int __init
604rcu_perf_init(void)
605{
606 long i;
607 int firsterr = 0;
608 static struct rcu_perf_ops *perf_ops[] = {
609 &rcu_ops, &srcu_ops, &srcud_ops, &tasks_ops,
610 };
611
612 if (!torture_init_begin(perf_type, verbose))
613 return -EBUSY;
614
615 /* Process args and tell the world that the perf'er is on the job. */
616 for (i = 0; i < ARRAY_SIZE(perf_ops); i++) {
617 cur_ops = perf_ops[i];
618 if (strcmp(perf_type, cur_ops->name) == 0)
619 break;
620 }
621 if (i == ARRAY_SIZE(perf_ops)) {
622 pr_alert("rcu-perf: invalid perf type: \"%s\"\n", perf_type);
623 pr_alert("rcu-perf types:");
624 for (i = 0; i < ARRAY_SIZE(perf_ops); i++)
625 pr_cont(" %s", perf_ops[i]->name);
626 pr_cont("\n");
627 WARN_ON(!IS_MODULE(CONFIG_RCU_PERF_TEST));
628 firsterr = -EINVAL;
629 cur_ops = NULL;
630 goto unwind;
631 }
632 if (cur_ops->init)
633 cur_ops->init();
634
635 nrealwriters = compute_real(nwriters);
636 nrealreaders = compute_real(nreaders);
637 atomic_set(&n_rcu_perf_reader_started, 0);
638 atomic_set(&n_rcu_perf_writer_started, 0);
639 atomic_set(&n_rcu_perf_writer_finished, 0);
640 rcu_perf_print_module_parms(cur_ops, "Start of test");
641
642 /* Start up the kthreads. */
643
644 if (shutdown) {
645 init_waitqueue_head(&shutdown_wq);
646 firsterr = torture_create_kthread(rcu_perf_shutdown, NULL,
647 shutdown_task);
648 if (firsterr)
649 goto unwind;
650 schedule_timeout_uninterruptible(1);
651 }
652 reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]),
653 GFP_KERNEL);
654 if (reader_tasks == NULL) {
655 VERBOSE_PERFOUT_ERRSTRING("out of memory");
656 firsterr = -ENOMEM;
657 goto unwind;
658 }
659 for (i = 0; i < nrealreaders; i++) {
660 firsterr = torture_create_kthread(rcu_perf_reader, (void *)i,
661 reader_tasks[i]);
662 if (firsterr)
663 goto unwind;
664 }
665 while (atomic_read(&n_rcu_perf_reader_started) < nrealreaders)
666 schedule_timeout_uninterruptible(1);
667 writer_tasks = kcalloc(nrealwriters, sizeof(reader_tasks[0]),
668 GFP_KERNEL);
669 writer_durations = kcalloc(nrealwriters, sizeof(*writer_durations),
670 GFP_KERNEL);
671 writer_n_durations =
672 kcalloc(nrealwriters, sizeof(*writer_n_durations),
673 GFP_KERNEL);
674 if (!writer_tasks || !writer_durations || !writer_n_durations) {
675 VERBOSE_PERFOUT_ERRSTRING("out of memory");
676 firsterr = -ENOMEM;
677 goto unwind;
678 }
679 for (i = 0; i < nrealwriters; i++) {
680 writer_durations[i] =
681 kcalloc(MAX_MEAS, sizeof(*writer_durations[i]),
682 GFP_KERNEL);
683 if (!writer_durations[i]) {
684 firsterr = -ENOMEM;
685 goto unwind;
686 }
687 firsterr = torture_create_kthread(rcu_perf_writer, (void *)i,
688 writer_tasks[i]);
689 if (firsterr)
690 goto unwind;
691 }
692 torture_init_end();
693 return 0;
694
695unwind:
696 torture_init_end();
697 rcu_perf_cleanup();
698 return firsterr;
699}
700
701module_init(rcu_perf_init);
702module_exit(rcu_perf_cleanup);
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Read-Copy Update module-based performance-test facility
4 *
5 * Copyright (C) IBM Corporation, 2015
6 *
7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
8 */
9
10#define pr_fmt(fmt) fmt
11
12#include <linux/types.h>
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/mm.h>
16#include <linux/module.h>
17#include <linux/kthread.h>
18#include <linux/err.h>
19#include <linux/spinlock.h>
20#include <linux/smp.h>
21#include <linux/rcupdate.h>
22#include <linux/interrupt.h>
23#include <linux/sched.h>
24#include <uapi/linux/sched/types.h>
25#include <linux/atomic.h>
26#include <linux/bitops.h>
27#include <linux/completion.h>
28#include <linux/moduleparam.h>
29#include <linux/percpu.h>
30#include <linux/notifier.h>
31#include <linux/reboot.h>
32#include <linux/freezer.h>
33#include <linux/cpu.h>
34#include <linux/delay.h>
35#include <linux/stat.h>
36#include <linux/srcu.h>
37#include <linux/slab.h>
38#include <asm/byteorder.h>
39#include <linux/torture.h>
40#include <linux/vmalloc.h>
41
42#include "rcu.h"
43
44MODULE_LICENSE("GPL");
45MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com>");
46
47#define PERF_FLAG "-perf:"
48#define PERFOUT_STRING(s) \
49 pr_alert("%s" PERF_FLAG " %s\n", perf_type, s)
50#define VERBOSE_PERFOUT_STRING(s) \
51 do { if (verbose) pr_alert("%s" PERF_FLAG " %s\n", perf_type, s); } while (0)
52#define VERBOSE_PERFOUT_ERRSTRING(s) \
53 do { if (verbose) pr_alert("%s" PERF_FLAG "!!! %s\n", perf_type, s); } while (0)
54
55/*
56 * The intended use cases for the nreaders and nwriters module parameters
57 * are as follows:
58 *
59 * 1. Specify only the nr_cpus kernel boot parameter. This will
60 * set both nreaders and nwriters to the value specified by
61 * nr_cpus for a mixed reader/writer test.
62 *
63 * 2. Specify the nr_cpus kernel boot parameter, but set
64 * rcuperf.nreaders to zero. This will set nwriters to the
65 * value specified by nr_cpus for an update-only test.
66 *
67 * 3. Specify the nr_cpus kernel boot parameter, but set
68 * rcuperf.nwriters to zero. This will set nreaders to the
69 * value specified by nr_cpus for a read-only test.
70 *
71 * Various other use cases may of course be specified.
72 *
73 * Note that this test's readers are intended only as a test load for
74 * the writers. The reader performance statistics will be overly
75 * pessimistic due to the per-critical-section interrupt disabling,
76 * test-end checks, and the pair of calls through pointers.
77 */
78
79#ifdef MODULE
80# define RCUPERF_SHUTDOWN 0
81#else
82# define RCUPERF_SHUTDOWN 1
83#endif
84
85torture_param(bool, gp_async, false, "Use asynchronous GP wait primitives");
86torture_param(int, gp_async_max, 1000, "Max # outstanding waits per reader");
87torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
88torture_param(int, holdoff, 10, "Holdoff time before test start (s)");
89torture_param(int, nreaders, -1, "Number of RCU reader threads");
90torture_param(int, nwriters, -1, "Number of RCU updater threads");
91torture_param(bool, shutdown, RCUPERF_SHUTDOWN,
92 "Shutdown at end of performance tests.");
93torture_param(int, verbose, 1, "Enable verbose debugging printk()s");
94torture_param(int, writer_holdoff, 0, "Holdoff (us) between GPs, zero to disable");
95torture_param(int, kfree_rcu_test, 0, "Do we run a kfree_rcu() perf test?");
96torture_param(int, kfree_mult, 1, "Multiple of kfree_obj size to allocate.");
97
98static char *perf_type = "rcu";
99module_param(perf_type, charp, 0444);
100MODULE_PARM_DESC(perf_type, "Type of RCU to performance-test (rcu, srcu, ...)");
101
102static int nrealreaders;
103static int nrealwriters;
104static struct task_struct **writer_tasks;
105static struct task_struct **reader_tasks;
106static struct task_struct *shutdown_task;
107
108static u64 **writer_durations;
109static int *writer_n_durations;
110static atomic_t n_rcu_perf_reader_started;
111static atomic_t n_rcu_perf_writer_started;
112static atomic_t n_rcu_perf_writer_finished;
113static wait_queue_head_t shutdown_wq;
114static u64 t_rcu_perf_writer_started;
115static u64 t_rcu_perf_writer_finished;
116static unsigned long b_rcu_gp_test_started;
117static unsigned long b_rcu_gp_test_finished;
118static DEFINE_PER_CPU(atomic_t, n_async_inflight);
119
120#define MAX_MEAS 10000
121#define MIN_MEAS 100
122
123/*
124 * Operations vector for selecting different types of tests.
125 */
126
127struct rcu_perf_ops {
128 int ptype;
129 void (*init)(void);
130 void (*cleanup)(void);
131 int (*readlock)(void);
132 void (*readunlock)(int idx);
133 unsigned long (*get_gp_seq)(void);
134 unsigned long (*gp_diff)(unsigned long new, unsigned long old);
135 unsigned long (*exp_completed)(void);
136 void (*async)(struct rcu_head *head, rcu_callback_t func);
137 void (*gp_barrier)(void);
138 void (*sync)(void);
139 void (*exp_sync)(void);
140 const char *name;
141};
142
143static struct rcu_perf_ops *cur_ops;
144
145/*
146 * Definitions for rcu perf testing.
147 */
148
149static int rcu_perf_read_lock(void) __acquires(RCU)
150{
151 rcu_read_lock();
152 return 0;
153}
154
155static void rcu_perf_read_unlock(int idx) __releases(RCU)
156{
157 rcu_read_unlock();
158}
159
160static unsigned long __maybe_unused rcu_no_completed(void)
161{
162 return 0;
163}
164
165static void rcu_sync_perf_init(void)
166{
167}
168
169static struct rcu_perf_ops rcu_ops = {
170 .ptype = RCU_FLAVOR,
171 .init = rcu_sync_perf_init,
172 .readlock = rcu_perf_read_lock,
173 .readunlock = rcu_perf_read_unlock,
174 .get_gp_seq = rcu_get_gp_seq,
175 .gp_diff = rcu_seq_diff,
176 .exp_completed = rcu_exp_batches_completed,
177 .async = call_rcu,
178 .gp_barrier = rcu_barrier,
179 .sync = synchronize_rcu,
180 .exp_sync = synchronize_rcu_expedited,
181 .name = "rcu"
182};
183
184/*
185 * Definitions for srcu perf testing.
186 */
187
188DEFINE_STATIC_SRCU(srcu_ctl_perf);
189static struct srcu_struct *srcu_ctlp = &srcu_ctl_perf;
190
191static int srcu_perf_read_lock(void) __acquires(srcu_ctlp)
192{
193 return srcu_read_lock(srcu_ctlp);
194}
195
196static void srcu_perf_read_unlock(int idx) __releases(srcu_ctlp)
197{
198 srcu_read_unlock(srcu_ctlp, idx);
199}
200
201static unsigned long srcu_perf_completed(void)
202{
203 return srcu_batches_completed(srcu_ctlp);
204}
205
206static void srcu_call_rcu(struct rcu_head *head, rcu_callback_t func)
207{
208 call_srcu(srcu_ctlp, head, func);
209}
210
211static void srcu_rcu_barrier(void)
212{
213 srcu_barrier(srcu_ctlp);
214}
215
216static void srcu_perf_synchronize(void)
217{
218 synchronize_srcu(srcu_ctlp);
219}
220
221static void srcu_perf_synchronize_expedited(void)
222{
223 synchronize_srcu_expedited(srcu_ctlp);
224}
225
226static struct rcu_perf_ops srcu_ops = {
227 .ptype = SRCU_FLAVOR,
228 .init = rcu_sync_perf_init,
229 .readlock = srcu_perf_read_lock,
230 .readunlock = srcu_perf_read_unlock,
231 .get_gp_seq = srcu_perf_completed,
232 .gp_diff = rcu_seq_diff,
233 .exp_completed = srcu_perf_completed,
234 .async = srcu_call_rcu,
235 .gp_barrier = srcu_rcu_barrier,
236 .sync = srcu_perf_synchronize,
237 .exp_sync = srcu_perf_synchronize_expedited,
238 .name = "srcu"
239};
240
241static struct srcu_struct srcud;
242
243static void srcu_sync_perf_init(void)
244{
245 srcu_ctlp = &srcud;
246 init_srcu_struct(srcu_ctlp);
247}
248
249static void srcu_sync_perf_cleanup(void)
250{
251 cleanup_srcu_struct(srcu_ctlp);
252}
253
254static struct rcu_perf_ops srcud_ops = {
255 .ptype = SRCU_FLAVOR,
256 .init = srcu_sync_perf_init,
257 .cleanup = srcu_sync_perf_cleanup,
258 .readlock = srcu_perf_read_lock,
259 .readunlock = srcu_perf_read_unlock,
260 .get_gp_seq = srcu_perf_completed,
261 .gp_diff = rcu_seq_diff,
262 .exp_completed = srcu_perf_completed,
263 .async = srcu_call_rcu,
264 .gp_barrier = srcu_rcu_barrier,
265 .sync = srcu_perf_synchronize,
266 .exp_sync = srcu_perf_synchronize_expedited,
267 .name = "srcud"
268};
269
270/*
271 * Definitions for RCU-tasks perf testing.
272 */
273
274static int tasks_perf_read_lock(void)
275{
276 return 0;
277}
278
279static void tasks_perf_read_unlock(int idx)
280{
281}
282
283static struct rcu_perf_ops tasks_ops = {
284 .ptype = RCU_TASKS_FLAVOR,
285 .init = rcu_sync_perf_init,
286 .readlock = tasks_perf_read_lock,
287 .readunlock = tasks_perf_read_unlock,
288 .get_gp_seq = rcu_no_completed,
289 .gp_diff = rcu_seq_diff,
290 .async = call_rcu_tasks,
291 .gp_barrier = rcu_barrier_tasks,
292 .sync = synchronize_rcu_tasks,
293 .exp_sync = synchronize_rcu_tasks,
294 .name = "tasks"
295};
296
297static unsigned long rcuperf_seq_diff(unsigned long new, unsigned long old)
298{
299 if (!cur_ops->gp_diff)
300 return new - old;
301 return cur_ops->gp_diff(new, old);
302}
303
304/*
305 * If performance tests complete, wait for shutdown to commence.
306 */
307static void rcu_perf_wait_shutdown(void)
308{
309 cond_resched_tasks_rcu_qs();
310 if (atomic_read(&n_rcu_perf_writer_finished) < nrealwriters)
311 return;
312 while (!torture_must_stop())
313 schedule_timeout_uninterruptible(1);
314}
315
316/*
317 * RCU perf reader kthread. Repeatedly does empty RCU read-side critical
318 * section, minimizing update-side interference. However, the point of
319 * this test is not to evaluate reader performance, but instead to serve
320 * as a test load for update-side performance testing.
321 */
322static int
323rcu_perf_reader(void *arg)
324{
325 unsigned long flags;
326 int idx;
327 long me = (long)arg;
328
329 VERBOSE_PERFOUT_STRING("rcu_perf_reader task started");
330 set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
331 set_user_nice(current, MAX_NICE);
332 atomic_inc(&n_rcu_perf_reader_started);
333
334 do {
335 local_irq_save(flags);
336 idx = cur_ops->readlock();
337 cur_ops->readunlock(idx);
338 local_irq_restore(flags);
339 rcu_perf_wait_shutdown();
340 } while (!torture_must_stop());
341 torture_kthread_stopping("rcu_perf_reader");
342 return 0;
343}
344
345/*
346 * Callback function for asynchronous grace periods from rcu_perf_writer().
347 */
348static void rcu_perf_async_cb(struct rcu_head *rhp)
349{
350 atomic_dec(this_cpu_ptr(&n_async_inflight));
351 kfree(rhp);
352}
353
354/*
355 * RCU perf writer kthread. Repeatedly does a grace period.
356 */
357static int
358rcu_perf_writer(void *arg)
359{
360 int i = 0;
361 int i_max;
362 long me = (long)arg;
363 struct rcu_head *rhp = NULL;
364 bool started = false, done = false, alldone = false;
365 u64 t;
366 u64 *wdp;
367 u64 *wdpp = writer_durations[me];
368
369 VERBOSE_PERFOUT_STRING("rcu_perf_writer task started");
370 WARN_ON(!wdpp);
371 set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
372 sched_set_fifo_low(current);
373
374 if (holdoff)
375 schedule_timeout_uninterruptible(holdoff * HZ);
376
377 /*
378 * Wait until rcu_end_inkernel_boot() is called for normal GP tests
379 * so that RCU is not always expedited for normal GP tests.
380 * The system_state test is approximate, but works well in practice.
381 */
382 while (!gp_exp && system_state != SYSTEM_RUNNING)
383 schedule_timeout_uninterruptible(1);
384
385 t = ktime_get_mono_fast_ns();
386 if (atomic_inc_return(&n_rcu_perf_writer_started) >= nrealwriters) {
387 t_rcu_perf_writer_started = t;
388 if (gp_exp) {
389 b_rcu_gp_test_started =
390 cur_ops->exp_completed() / 2;
391 } else {
392 b_rcu_gp_test_started = cur_ops->get_gp_seq();
393 }
394 }
395
396 do {
397 if (writer_holdoff)
398 udelay(writer_holdoff);
399 wdp = &wdpp[i];
400 *wdp = ktime_get_mono_fast_ns();
401 if (gp_async) {
402retry:
403 if (!rhp)
404 rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
405 if (rhp && atomic_read(this_cpu_ptr(&n_async_inflight)) < gp_async_max) {
406 atomic_inc(this_cpu_ptr(&n_async_inflight));
407 cur_ops->async(rhp, rcu_perf_async_cb);
408 rhp = NULL;
409 } else if (!kthread_should_stop()) {
410 cur_ops->gp_barrier();
411 goto retry;
412 } else {
413 kfree(rhp); /* Because we are stopping. */
414 }
415 } else if (gp_exp) {
416 cur_ops->exp_sync();
417 } else {
418 cur_ops->sync();
419 }
420 t = ktime_get_mono_fast_ns();
421 *wdp = t - *wdp;
422 i_max = i;
423 if (!started &&
424 atomic_read(&n_rcu_perf_writer_started) >= nrealwriters)
425 started = true;
426 if (!done && i >= MIN_MEAS) {
427 done = true;
428 sched_set_normal(current, 0);
429 pr_alert("%s%s rcu_perf_writer %ld has %d measurements\n",
430 perf_type, PERF_FLAG, me, MIN_MEAS);
431 if (atomic_inc_return(&n_rcu_perf_writer_finished) >=
432 nrealwriters) {
433 schedule_timeout_interruptible(10);
434 rcu_ftrace_dump(DUMP_ALL);
435 PERFOUT_STRING("Test complete");
436 t_rcu_perf_writer_finished = t;
437 if (gp_exp) {
438 b_rcu_gp_test_finished =
439 cur_ops->exp_completed() / 2;
440 } else {
441 b_rcu_gp_test_finished =
442 cur_ops->get_gp_seq();
443 }
444 if (shutdown) {
445 smp_mb(); /* Assign before wake. */
446 wake_up(&shutdown_wq);
447 }
448 }
449 }
450 if (done && !alldone &&
451 atomic_read(&n_rcu_perf_writer_finished) >= nrealwriters)
452 alldone = true;
453 if (started && !alldone && i < MAX_MEAS - 1)
454 i++;
455 rcu_perf_wait_shutdown();
456 } while (!torture_must_stop());
457 if (gp_async) {
458 cur_ops->gp_barrier();
459 }
460 writer_n_durations[me] = i_max;
461 torture_kthread_stopping("rcu_perf_writer");
462 return 0;
463}
464
465static void
466rcu_perf_print_module_parms(struct rcu_perf_ops *cur_ops, const char *tag)
467{
468 pr_alert("%s" PERF_FLAG
469 "--- %s: nreaders=%d nwriters=%d verbose=%d shutdown=%d\n",
470 perf_type, tag, nrealreaders, nrealwriters, verbose, shutdown);
471}
472
473static void
474rcu_perf_cleanup(void)
475{
476 int i;
477 int j;
478 int ngps = 0;
479 u64 *wdp;
480 u64 *wdpp;
481
482 /*
483 * Would like warning at start, but everything is expedited
484 * during the mid-boot phase, so have to wait till the end.
485 */
486 if (rcu_gp_is_expedited() && !rcu_gp_is_normal() && !gp_exp)
487 VERBOSE_PERFOUT_ERRSTRING("All grace periods expedited, no normal ones to measure!");
488 if (rcu_gp_is_normal() && gp_exp)
489 VERBOSE_PERFOUT_ERRSTRING("All grace periods normal, no expedited ones to measure!");
490 if (gp_exp && gp_async)
491 VERBOSE_PERFOUT_ERRSTRING("No expedited async GPs, so went with async!");
492
493 if (torture_cleanup_begin())
494 return;
495 if (!cur_ops) {
496 torture_cleanup_end();
497 return;
498 }
499
500 if (reader_tasks) {
501 for (i = 0; i < nrealreaders; i++)
502 torture_stop_kthread(rcu_perf_reader,
503 reader_tasks[i]);
504 kfree(reader_tasks);
505 }
506
507 if (writer_tasks) {
508 for (i = 0; i < nrealwriters; i++) {
509 torture_stop_kthread(rcu_perf_writer,
510 writer_tasks[i]);
511 if (!writer_n_durations)
512 continue;
513 j = writer_n_durations[i];
514 pr_alert("%s%s writer %d gps: %d\n",
515 perf_type, PERF_FLAG, i, j);
516 ngps += j;
517 }
518 pr_alert("%s%s start: %llu end: %llu duration: %llu gps: %d batches: %ld\n",
519 perf_type, PERF_FLAG,
520 t_rcu_perf_writer_started, t_rcu_perf_writer_finished,
521 t_rcu_perf_writer_finished -
522 t_rcu_perf_writer_started,
523 ngps,
524 rcuperf_seq_diff(b_rcu_gp_test_finished,
525 b_rcu_gp_test_started));
526 for (i = 0; i < nrealwriters; i++) {
527 if (!writer_durations)
528 break;
529 if (!writer_n_durations)
530 continue;
531 wdpp = writer_durations[i];
532 if (!wdpp)
533 continue;
534 for (j = 0; j <= writer_n_durations[i]; j++) {
535 wdp = &wdpp[j];
536 pr_alert("%s%s %4d writer-duration: %5d %llu\n",
537 perf_type, PERF_FLAG,
538 i, j, *wdp);
539 if (j % 100 == 0)
540 schedule_timeout_uninterruptible(1);
541 }
542 kfree(writer_durations[i]);
543 }
544 kfree(writer_tasks);
545 kfree(writer_durations);
546 kfree(writer_n_durations);
547 }
548
549 /* Do torture-type-specific cleanup operations. */
550 if (cur_ops->cleanup != NULL)
551 cur_ops->cleanup();
552
553 torture_cleanup_end();
554}
555
556/*
557 * Return the number if non-negative. If -1, the number of CPUs.
558 * If less than -1, that much less than the number of CPUs, but
559 * at least one.
560 */
561static int compute_real(int n)
562{
563 int nr;
564
565 if (n >= 0) {
566 nr = n;
567 } else {
568 nr = num_online_cpus() + 1 + n;
569 if (nr <= 0)
570 nr = 1;
571 }
572 return nr;
573}
574
575/*
576 * RCU perf shutdown kthread. Just waits to be awakened, then shuts
577 * down system.
578 */
579static int
580rcu_perf_shutdown(void *arg)
581{
582 wait_event(shutdown_wq,
583 atomic_read(&n_rcu_perf_writer_finished) >= nrealwriters);
584 smp_mb(); /* Wake before output. */
585 rcu_perf_cleanup();
586 kernel_power_off();
587 return -EINVAL;
588}
589
590/*
591 * kfree_rcu() performance tests: Start a kfree_rcu() loop on all CPUs for number
592 * of iterations and measure total time and number of GP for all iterations to complete.
593 */
594
595torture_param(int, kfree_nthreads, -1, "Number of threads running loops of kfree_rcu().");
596torture_param(int, kfree_alloc_num, 8000, "Number of allocations and frees done in an iteration.");
597torture_param(int, kfree_loops, 10, "Number of loops doing kfree_alloc_num allocations and frees.");
598
599static struct task_struct **kfree_reader_tasks;
600static int kfree_nrealthreads;
601static atomic_t n_kfree_perf_thread_started;
602static atomic_t n_kfree_perf_thread_ended;
603
604struct kfree_obj {
605 char kfree_obj[8];
606 struct rcu_head rh;
607};
608
609static int
610kfree_perf_thread(void *arg)
611{
612 int i, loop = 0;
613 long me = (long)arg;
614 struct kfree_obj *alloc_ptr;
615 u64 start_time, end_time;
616 long long mem_begin, mem_during = 0;
617
618 VERBOSE_PERFOUT_STRING("kfree_perf_thread task started");
619 set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
620 set_user_nice(current, MAX_NICE);
621
622 start_time = ktime_get_mono_fast_ns();
623
624 if (atomic_inc_return(&n_kfree_perf_thread_started) >= kfree_nrealthreads) {
625 if (gp_exp)
626 b_rcu_gp_test_started = cur_ops->exp_completed() / 2;
627 else
628 b_rcu_gp_test_started = cur_ops->get_gp_seq();
629 }
630
631 do {
632 if (!mem_during) {
633 mem_during = mem_begin = si_mem_available();
634 } else if (loop % (kfree_loops / 4) == 0) {
635 mem_during = (mem_during + si_mem_available()) / 2;
636 }
637
638 for (i = 0; i < kfree_alloc_num; i++) {
639 alloc_ptr = kmalloc(kfree_mult * sizeof(struct kfree_obj), GFP_KERNEL);
640 if (!alloc_ptr)
641 return -ENOMEM;
642
643 kfree_rcu(alloc_ptr, rh);
644 }
645
646 cond_resched();
647 } while (!torture_must_stop() && ++loop < kfree_loops);
648
649 if (atomic_inc_return(&n_kfree_perf_thread_ended) >= kfree_nrealthreads) {
650 end_time = ktime_get_mono_fast_ns();
651
652 if (gp_exp)
653 b_rcu_gp_test_finished = cur_ops->exp_completed() / 2;
654 else
655 b_rcu_gp_test_finished = cur_ops->get_gp_seq();
656
657 pr_alert("Total time taken by all kfree'ers: %llu ns, loops: %d, batches: %ld, memory footprint: %lldMB\n",
658 (unsigned long long)(end_time - start_time), kfree_loops,
659 rcuperf_seq_diff(b_rcu_gp_test_finished, b_rcu_gp_test_started),
660 (mem_begin - mem_during) >> (20 - PAGE_SHIFT));
661
662 if (shutdown) {
663 smp_mb(); /* Assign before wake. */
664 wake_up(&shutdown_wq);
665 }
666 }
667
668 torture_kthread_stopping("kfree_perf_thread");
669 return 0;
670}
671
672static void
673kfree_perf_cleanup(void)
674{
675 int i;
676
677 if (torture_cleanup_begin())
678 return;
679
680 if (kfree_reader_tasks) {
681 for (i = 0; i < kfree_nrealthreads; i++)
682 torture_stop_kthread(kfree_perf_thread,
683 kfree_reader_tasks[i]);
684 kfree(kfree_reader_tasks);
685 }
686
687 torture_cleanup_end();
688}
689
690/*
691 * shutdown kthread. Just waits to be awakened, then shuts down system.
692 */
693static int
694kfree_perf_shutdown(void *arg)
695{
696 wait_event(shutdown_wq,
697 atomic_read(&n_kfree_perf_thread_ended) >= kfree_nrealthreads);
698
699 smp_mb(); /* Wake before output. */
700
701 kfree_perf_cleanup();
702 kernel_power_off();
703 return -EINVAL;
704}
705
706static int __init
707kfree_perf_init(void)
708{
709 long i;
710 int firsterr = 0;
711
712 kfree_nrealthreads = compute_real(kfree_nthreads);
713 /* Start up the kthreads. */
714 if (shutdown) {
715 init_waitqueue_head(&shutdown_wq);
716 firsterr = torture_create_kthread(kfree_perf_shutdown, NULL,
717 shutdown_task);
718 if (firsterr)
719 goto unwind;
720 schedule_timeout_uninterruptible(1);
721 }
722
723 pr_alert("kfree object size=%zu\n", kfree_mult * sizeof(struct kfree_obj));
724
725 kfree_reader_tasks = kcalloc(kfree_nrealthreads, sizeof(kfree_reader_tasks[0]),
726 GFP_KERNEL);
727 if (kfree_reader_tasks == NULL) {
728 firsterr = -ENOMEM;
729 goto unwind;
730 }
731
732 for (i = 0; i < kfree_nrealthreads; i++) {
733 firsterr = torture_create_kthread(kfree_perf_thread, (void *)i,
734 kfree_reader_tasks[i]);
735 if (firsterr)
736 goto unwind;
737 }
738
739 while (atomic_read(&n_kfree_perf_thread_started) < kfree_nrealthreads)
740 schedule_timeout_uninterruptible(1);
741
742 torture_init_end();
743 return 0;
744
745unwind:
746 torture_init_end();
747 kfree_perf_cleanup();
748 return firsterr;
749}
750
751static int __init
752rcu_perf_init(void)
753{
754 long i;
755 int firsterr = 0;
756 static struct rcu_perf_ops *perf_ops[] = {
757 &rcu_ops, &srcu_ops, &srcud_ops, &tasks_ops,
758 };
759
760 if (!torture_init_begin(perf_type, verbose))
761 return -EBUSY;
762
763 /* Process args and tell the world that the perf'er is on the job. */
764 for (i = 0; i < ARRAY_SIZE(perf_ops); i++) {
765 cur_ops = perf_ops[i];
766 if (strcmp(perf_type, cur_ops->name) == 0)
767 break;
768 }
769 if (i == ARRAY_SIZE(perf_ops)) {
770 pr_alert("rcu-perf: invalid perf type: \"%s\"\n", perf_type);
771 pr_alert("rcu-perf types:");
772 for (i = 0; i < ARRAY_SIZE(perf_ops); i++)
773 pr_cont(" %s", perf_ops[i]->name);
774 pr_cont("\n");
775 WARN_ON(!IS_MODULE(CONFIG_RCU_PERF_TEST));
776 firsterr = -EINVAL;
777 cur_ops = NULL;
778 goto unwind;
779 }
780 if (cur_ops->init)
781 cur_ops->init();
782
783 if (kfree_rcu_test)
784 return kfree_perf_init();
785
786 nrealwriters = compute_real(nwriters);
787 nrealreaders = compute_real(nreaders);
788 atomic_set(&n_rcu_perf_reader_started, 0);
789 atomic_set(&n_rcu_perf_writer_started, 0);
790 atomic_set(&n_rcu_perf_writer_finished, 0);
791 rcu_perf_print_module_parms(cur_ops, "Start of test");
792
793 /* Start up the kthreads. */
794
795 if (shutdown) {
796 init_waitqueue_head(&shutdown_wq);
797 firsterr = torture_create_kthread(rcu_perf_shutdown, NULL,
798 shutdown_task);
799 if (firsterr)
800 goto unwind;
801 schedule_timeout_uninterruptible(1);
802 }
803 reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]),
804 GFP_KERNEL);
805 if (reader_tasks == NULL) {
806 VERBOSE_PERFOUT_ERRSTRING("out of memory");
807 firsterr = -ENOMEM;
808 goto unwind;
809 }
810 for (i = 0; i < nrealreaders; i++) {
811 firsterr = torture_create_kthread(rcu_perf_reader, (void *)i,
812 reader_tasks[i]);
813 if (firsterr)
814 goto unwind;
815 }
816 while (atomic_read(&n_rcu_perf_reader_started) < nrealreaders)
817 schedule_timeout_uninterruptible(1);
818 writer_tasks = kcalloc(nrealwriters, sizeof(reader_tasks[0]),
819 GFP_KERNEL);
820 writer_durations = kcalloc(nrealwriters, sizeof(*writer_durations),
821 GFP_KERNEL);
822 writer_n_durations =
823 kcalloc(nrealwriters, sizeof(*writer_n_durations),
824 GFP_KERNEL);
825 if (!writer_tasks || !writer_durations || !writer_n_durations) {
826 VERBOSE_PERFOUT_ERRSTRING("out of memory");
827 firsterr = -ENOMEM;
828 goto unwind;
829 }
830 for (i = 0; i < nrealwriters; i++) {
831 writer_durations[i] =
832 kcalloc(MAX_MEAS, sizeof(*writer_durations[i]),
833 GFP_KERNEL);
834 if (!writer_durations[i]) {
835 firsterr = -ENOMEM;
836 goto unwind;
837 }
838 firsterr = torture_create_kthread(rcu_perf_writer, (void *)i,
839 writer_tasks[i]);
840 if (firsterr)
841 goto unwind;
842 }
843 torture_init_end();
844 return 0;
845
846unwind:
847 torture_init_end();
848 rcu_perf_cleanup();
849 return firsterr;
850}
851
852module_init(rcu_perf_init);
853module_exit(rcu_perf_cleanup);