Loading...
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Module-based torture test facility for locking
4 *
5 * Copyright (C) IBM Corporation, 2014
6 *
7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
8 * Davidlohr Bueso <dave@stgolabs.net>
9 * Based on kernel/rcu/torture.c.
10 */
11
12#define pr_fmt(fmt) fmt
13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/kthread.h>
17#include <linux/sched/rt.h>
18#include <linux/spinlock.h>
19#include <linux/mutex.h>
20#include <linux/rwsem.h>
21#include <linux/smp.h>
22#include <linux/interrupt.h>
23#include <linux/sched.h>
24#include <uapi/linux/sched/types.h>
25#include <linux/rtmutex.h>
26#include <linux/atomic.h>
27#include <linux/moduleparam.h>
28#include <linux/delay.h>
29#include <linux/slab.h>
30#include <linux/torture.h>
31#include <linux/reboot.h>
32
33MODULE_LICENSE("GPL");
34MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com>");
35
36torture_param(int, acq_writer_lim, 0, "Write_acquisition time limit (jiffies).");
37torture_param(int, call_rcu_chains, 0, "Self-propagate call_rcu() chains during test (0=disable).");
38torture_param(int, long_hold, 100, "Do occasional long hold of lock (ms), 0=disable");
39torture_param(int, nested_locks, 0, "Number of nested locks (max = 8)");
40torture_param(int, nreaders_stress, -1, "Number of read-locking stress-test threads");
41torture_param(int, nwriters_stress, -1, "Number of write-locking stress-test threads");
42torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
43torture_param(int, onoff_interval, 0, "Time between CPU hotplugs (s), 0=disable");
44torture_param(int, rt_boost, 2,
45 "Do periodic rt-boost. 0=Disable, 1=Only for rt_mutex, 2=For all lock types.");
46torture_param(int, rt_boost_factor, 50, "A factor determining how often rt-boost happens.");
47torture_param(int, shuffle_interval, 3, "Number of jiffies between shuffles, 0=disable");
48torture_param(int, shutdown_secs, 0, "Shutdown time (j), <= zero to disable.");
49torture_param(int, stat_interval, 60, "Number of seconds between stats printk()s");
50torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable");
51torture_param(int, verbose, 1, "Enable verbose debugging printk()s");
52torture_param(int, writer_fifo, 0, "Run writers at sched_set_fifo() priority");
53/* Going much higher trips "BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!" errors */
54#define MAX_NESTED_LOCKS 8
55
56static char *torture_type = IS_ENABLED(CONFIG_PREEMPT_RT) ? "raw_spin_lock" : "spin_lock";
57module_param(torture_type, charp, 0444);
58MODULE_PARM_DESC(torture_type,
59 "Type of lock to torture (spin_lock, spin_lock_irq, mutex_lock, ...)");
60
61static cpumask_var_t bind_readers; // Bind the readers to the specified set of CPUs.
62static cpumask_var_t bind_writers; // Bind the writers to the specified set of CPUs.
63
64// Parse a cpumask kernel parameter. If there are more users later on,
65// this might need to got to a more central location.
66static int param_set_cpumask(const char *val, const struct kernel_param *kp)
67{
68 cpumask_var_t *cm_bind = kp->arg;
69 int ret;
70 char *s;
71
72 if (!alloc_cpumask_var(cm_bind, GFP_KERNEL)) {
73 s = "Out of memory";
74 ret = -ENOMEM;
75 goto out_err;
76 }
77 ret = cpulist_parse(val, *cm_bind);
78 if (!ret)
79 return ret;
80 s = "Bad CPU range";
81out_err:
82 pr_warn("%s: %s, all CPUs set\n", kp->name, s);
83 cpumask_setall(*cm_bind);
84 return ret;
85}
86
87// Output a cpumask kernel parameter.
88static int param_get_cpumask(char *buffer, const struct kernel_param *kp)
89{
90 cpumask_var_t *cm_bind = kp->arg;
91
92 return sprintf(buffer, "%*pbl", cpumask_pr_args(*cm_bind));
93}
94
95static bool cpumask_nonempty(cpumask_var_t mask)
96{
97 return cpumask_available(mask) && !cpumask_empty(mask);
98}
99
100static const struct kernel_param_ops lt_bind_ops = {
101 .set = param_set_cpumask,
102 .get = param_get_cpumask,
103};
104
105module_param_cb(bind_readers, <_bind_ops, &bind_readers, 0644);
106module_param_cb(bind_writers, <_bind_ops, &bind_writers, 0644);
107
108long torture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask);
109
110static struct task_struct *stats_task;
111static struct task_struct **writer_tasks;
112static struct task_struct **reader_tasks;
113
114static bool lock_is_write_held;
115static atomic_t lock_is_read_held;
116static unsigned long last_lock_release;
117
118struct lock_stress_stats {
119 long n_lock_fail;
120 long n_lock_acquired;
121};
122
123struct call_rcu_chain {
124 struct rcu_head crc_rh;
125 bool crc_stop;
126};
127struct call_rcu_chain *call_rcu_chain_list;
128
129/* Forward reference. */
130static void lock_torture_cleanup(void);
131
132/*
133 * Operations vector for selecting different types of tests.
134 */
135struct lock_torture_ops {
136 void (*init)(void);
137 void (*exit)(void);
138 int (*nested_lock)(int tid, u32 lockset);
139 int (*writelock)(int tid);
140 void (*write_delay)(struct torture_random_state *trsp);
141 void (*task_boost)(struct torture_random_state *trsp);
142 void (*writeunlock)(int tid);
143 void (*nested_unlock)(int tid, u32 lockset);
144 int (*readlock)(int tid);
145 void (*read_delay)(struct torture_random_state *trsp);
146 void (*readunlock)(int tid);
147
148 unsigned long flags; /* for irq spinlocks */
149 const char *name;
150};
151
152struct lock_torture_cxt {
153 int nrealwriters_stress;
154 int nrealreaders_stress;
155 bool debug_lock;
156 bool init_called;
157 atomic_t n_lock_torture_errors;
158 struct lock_torture_ops *cur_ops;
159 struct lock_stress_stats *lwsa; /* writer statistics */
160 struct lock_stress_stats *lrsa; /* reader statistics */
161};
162static struct lock_torture_cxt cxt = { 0, 0, false, false,
163 ATOMIC_INIT(0),
164 NULL, NULL};
165/*
166 * Definitions for lock torture testing.
167 */
168
169static int torture_lock_busted_write_lock(int tid __maybe_unused)
170{
171 return 0; /* BUGGY, do not use in real life!!! */
172}
173
174static void torture_lock_busted_write_delay(struct torture_random_state *trsp)
175{
176 /* We want a long delay occasionally to force massive contention. */
177 if (long_hold && !(torture_random(trsp) % (cxt.nrealwriters_stress * 2000 * long_hold)))
178 mdelay(long_hold);
179 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
180 torture_preempt_schedule(); /* Allow test to be preempted. */
181}
182
183static void torture_lock_busted_write_unlock(int tid __maybe_unused)
184{
185 /* BUGGY, do not use in real life!!! */
186}
187
188static void __torture_rt_boost(struct torture_random_state *trsp)
189{
190 const unsigned int factor = rt_boost_factor;
191
192 if (!rt_task(current)) {
193 /*
194 * Boost priority once every rt_boost_factor operations. When
195 * the task tries to take the lock, the rtmutex it will account
196 * for the new priority, and do any corresponding pi-dance.
197 */
198 if (trsp && !(torture_random(trsp) %
199 (cxt.nrealwriters_stress * factor))) {
200 sched_set_fifo(current);
201 } else /* common case, do nothing */
202 return;
203 } else {
204 /*
205 * The task will remain boosted for another 10 * rt_boost_factor
206 * operations, then restored back to its original prio, and so
207 * forth.
208 *
209 * When @trsp is nil, we want to force-reset the task for
210 * stopping the kthread.
211 */
212 if (!trsp || !(torture_random(trsp) %
213 (cxt.nrealwriters_stress * factor * 2))) {
214 sched_set_normal(current, 0);
215 } else /* common case, do nothing */
216 return;
217 }
218}
219
220static void torture_rt_boost(struct torture_random_state *trsp)
221{
222 if (rt_boost != 2)
223 return;
224
225 __torture_rt_boost(trsp);
226}
227
228static struct lock_torture_ops lock_busted_ops = {
229 .writelock = torture_lock_busted_write_lock,
230 .write_delay = torture_lock_busted_write_delay,
231 .task_boost = torture_rt_boost,
232 .writeunlock = torture_lock_busted_write_unlock,
233 .readlock = NULL,
234 .read_delay = NULL,
235 .readunlock = NULL,
236 .name = "lock_busted"
237};
238
239static DEFINE_SPINLOCK(torture_spinlock);
240
241static int torture_spin_lock_write_lock(int tid __maybe_unused)
242__acquires(torture_spinlock)
243{
244 spin_lock(&torture_spinlock);
245 return 0;
246}
247
248static void torture_spin_lock_write_delay(struct torture_random_state *trsp)
249{
250 const unsigned long shortdelay_us = 2;
251 unsigned long j;
252
253 /* We want a short delay mostly to emulate likely code, and
254 * we want a long delay occasionally to force massive contention.
255 */
256 if (long_hold && !(torture_random(trsp) % (cxt.nrealwriters_stress * 2000 * long_hold))) {
257 j = jiffies;
258 mdelay(long_hold);
259 pr_alert("%s: delay = %lu jiffies.\n", __func__, jiffies - j);
260 }
261 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 200 * shortdelay_us)))
262 udelay(shortdelay_us);
263 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
264 torture_preempt_schedule(); /* Allow test to be preempted. */
265}
266
267static void torture_spin_lock_write_unlock(int tid __maybe_unused)
268__releases(torture_spinlock)
269{
270 spin_unlock(&torture_spinlock);
271}
272
273static struct lock_torture_ops spin_lock_ops = {
274 .writelock = torture_spin_lock_write_lock,
275 .write_delay = torture_spin_lock_write_delay,
276 .task_boost = torture_rt_boost,
277 .writeunlock = torture_spin_lock_write_unlock,
278 .readlock = NULL,
279 .read_delay = NULL,
280 .readunlock = NULL,
281 .name = "spin_lock"
282};
283
284static int torture_spin_lock_write_lock_irq(int tid __maybe_unused)
285__acquires(torture_spinlock)
286{
287 unsigned long flags;
288
289 spin_lock_irqsave(&torture_spinlock, flags);
290 cxt.cur_ops->flags = flags;
291 return 0;
292}
293
294static void torture_lock_spin_write_unlock_irq(int tid __maybe_unused)
295__releases(torture_spinlock)
296{
297 spin_unlock_irqrestore(&torture_spinlock, cxt.cur_ops->flags);
298}
299
300static struct lock_torture_ops spin_lock_irq_ops = {
301 .writelock = torture_spin_lock_write_lock_irq,
302 .write_delay = torture_spin_lock_write_delay,
303 .task_boost = torture_rt_boost,
304 .writeunlock = torture_lock_spin_write_unlock_irq,
305 .readlock = NULL,
306 .read_delay = NULL,
307 .readunlock = NULL,
308 .name = "spin_lock_irq"
309};
310
311static DEFINE_RAW_SPINLOCK(torture_raw_spinlock);
312
313static int torture_raw_spin_lock_write_lock(int tid __maybe_unused)
314__acquires(torture_raw_spinlock)
315{
316 raw_spin_lock(&torture_raw_spinlock);
317 return 0;
318}
319
320static void torture_raw_spin_lock_write_unlock(int tid __maybe_unused)
321__releases(torture_raw_spinlock)
322{
323 raw_spin_unlock(&torture_raw_spinlock);
324}
325
326static struct lock_torture_ops raw_spin_lock_ops = {
327 .writelock = torture_raw_spin_lock_write_lock,
328 .write_delay = torture_spin_lock_write_delay,
329 .task_boost = torture_rt_boost,
330 .writeunlock = torture_raw_spin_lock_write_unlock,
331 .readlock = NULL,
332 .read_delay = NULL,
333 .readunlock = NULL,
334 .name = "raw_spin_lock"
335};
336
337static int torture_raw_spin_lock_write_lock_irq(int tid __maybe_unused)
338__acquires(torture_raw_spinlock)
339{
340 unsigned long flags;
341
342 raw_spin_lock_irqsave(&torture_raw_spinlock, flags);
343 cxt.cur_ops->flags = flags;
344 return 0;
345}
346
347static void torture_raw_spin_lock_write_unlock_irq(int tid __maybe_unused)
348__releases(torture_raw_spinlock)
349{
350 raw_spin_unlock_irqrestore(&torture_raw_spinlock, cxt.cur_ops->flags);
351}
352
353static struct lock_torture_ops raw_spin_lock_irq_ops = {
354 .writelock = torture_raw_spin_lock_write_lock_irq,
355 .write_delay = torture_spin_lock_write_delay,
356 .task_boost = torture_rt_boost,
357 .writeunlock = torture_raw_spin_lock_write_unlock_irq,
358 .readlock = NULL,
359 .read_delay = NULL,
360 .readunlock = NULL,
361 .name = "raw_spin_lock_irq"
362};
363
364static DEFINE_RWLOCK(torture_rwlock);
365
366static int torture_rwlock_write_lock(int tid __maybe_unused)
367__acquires(torture_rwlock)
368{
369 write_lock(&torture_rwlock);
370 return 0;
371}
372
373static void torture_rwlock_write_delay(struct torture_random_state *trsp)
374{
375 const unsigned long shortdelay_us = 2;
376
377 /* We want a short delay mostly to emulate likely code, and
378 * we want a long delay occasionally to force massive contention.
379 */
380 if (long_hold && !(torture_random(trsp) % (cxt.nrealwriters_stress * 2000 * long_hold)))
381 mdelay(long_hold);
382 else
383 udelay(shortdelay_us);
384}
385
386static void torture_rwlock_write_unlock(int tid __maybe_unused)
387__releases(torture_rwlock)
388{
389 write_unlock(&torture_rwlock);
390}
391
392static int torture_rwlock_read_lock(int tid __maybe_unused)
393__acquires(torture_rwlock)
394{
395 read_lock(&torture_rwlock);
396 return 0;
397}
398
399static void torture_rwlock_read_delay(struct torture_random_state *trsp)
400{
401 const unsigned long shortdelay_us = 10;
402
403 /* We want a short delay mostly to emulate likely code, and
404 * we want a long delay occasionally to force massive contention.
405 */
406 if (long_hold && !(torture_random(trsp) % (cxt.nrealreaders_stress * 2000 * long_hold)))
407 mdelay(long_hold);
408 else
409 udelay(shortdelay_us);
410}
411
412static void torture_rwlock_read_unlock(int tid __maybe_unused)
413__releases(torture_rwlock)
414{
415 read_unlock(&torture_rwlock);
416}
417
418static struct lock_torture_ops rw_lock_ops = {
419 .writelock = torture_rwlock_write_lock,
420 .write_delay = torture_rwlock_write_delay,
421 .task_boost = torture_rt_boost,
422 .writeunlock = torture_rwlock_write_unlock,
423 .readlock = torture_rwlock_read_lock,
424 .read_delay = torture_rwlock_read_delay,
425 .readunlock = torture_rwlock_read_unlock,
426 .name = "rw_lock"
427};
428
429static int torture_rwlock_write_lock_irq(int tid __maybe_unused)
430__acquires(torture_rwlock)
431{
432 unsigned long flags;
433
434 write_lock_irqsave(&torture_rwlock, flags);
435 cxt.cur_ops->flags = flags;
436 return 0;
437}
438
439static void torture_rwlock_write_unlock_irq(int tid __maybe_unused)
440__releases(torture_rwlock)
441{
442 write_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
443}
444
445static int torture_rwlock_read_lock_irq(int tid __maybe_unused)
446__acquires(torture_rwlock)
447{
448 unsigned long flags;
449
450 read_lock_irqsave(&torture_rwlock, flags);
451 cxt.cur_ops->flags = flags;
452 return 0;
453}
454
455static void torture_rwlock_read_unlock_irq(int tid __maybe_unused)
456__releases(torture_rwlock)
457{
458 read_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
459}
460
461static struct lock_torture_ops rw_lock_irq_ops = {
462 .writelock = torture_rwlock_write_lock_irq,
463 .write_delay = torture_rwlock_write_delay,
464 .task_boost = torture_rt_boost,
465 .writeunlock = torture_rwlock_write_unlock_irq,
466 .readlock = torture_rwlock_read_lock_irq,
467 .read_delay = torture_rwlock_read_delay,
468 .readunlock = torture_rwlock_read_unlock_irq,
469 .name = "rw_lock_irq"
470};
471
472static DEFINE_MUTEX(torture_mutex);
473static struct mutex torture_nested_mutexes[MAX_NESTED_LOCKS];
474static struct lock_class_key nested_mutex_keys[MAX_NESTED_LOCKS];
475
476static void torture_mutex_init(void)
477{
478 int i;
479
480 for (i = 0; i < MAX_NESTED_LOCKS; i++)
481 __mutex_init(&torture_nested_mutexes[i], __func__,
482 &nested_mutex_keys[i]);
483}
484
485static int torture_mutex_nested_lock(int tid __maybe_unused,
486 u32 lockset)
487{
488 int i;
489
490 for (i = 0; i < nested_locks; i++)
491 if (lockset & (1 << i))
492 mutex_lock(&torture_nested_mutexes[i]);
493 return 0;
494}
495
496static int torture_mutex_lock(int tid __maybe_unused)
497__acquires(torture_mutex)
498{
499 mutex_lock(&torture_mutex);
500 return 0;
501}
502
503static void torture_mutex_delay(struct torture_random_state *trsp)
504{
505 /* We want a long delay occasionally to force massive contention. */
506 if (long_hold && !(torture_random(trsp) % (cxt.nrealwriters_stress * 2000 * long_hold)))
507 mdelay(long_hold * 5);
508 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
509 torture_preempt_schedule(); /* Allow test to be preempted. */
510}
511
512static void torture_mutex_unlock(int tid __maybe_unused)
513__releases(torture_mutex)
514{
515 mutex_unlock(&torture_mutex);
516}
517
518static void torture_mutex_nested_unlock(int tid __maybe_unused,
519 u32 lockset)
520{
521 int i;
522
523 for (i = nested_locks - 1; i >= 0; i--)
524 if (lockset & (1 << i))
525 mutex_unlock(&torture_nested_mutexes[i]);
526}
527
528static struct lock_torture_ops mutex_lock_ops = {
529 .init = torture_mutex_init,
530 .nested_lock = torture_mutex_nested_lock,
531 .writelock = torture_mutex_lock,
532 .write_delay = torture_mutex_delay,
533 .task_boost = torture_rt_boost,
534 .writeunlock = torture_mutex_unlock,
535 .nested_unlock = torture_mutex_nested_unlock,
536 .readlock = NULL,
537 .read_delay = NULL,
538 .readunlock = NULL,
539 .name = "mutex_lock"
540};
541
542#include <linux/ww_mutex.h>
543/*
544 * The torture ww_mutexes should belong to the same lock class as
545 * torture_ww_class to avoid lockdep problem. The ww_mutex_init()
546 * function is called for initialization to ensure that.
547 */
548static DEFINE_WD_CLASS(torture_ww_class);
549static struct ww_mutex torture_ww_mutex_0, torture_ww_mutex_1, torture_ww_mutex_2;
550static struct ww_acquire_ctx *ww_acquire_ctxs;
551
552static void torture_ww_mutex_init(void)
553{
554 ww_mutex_init(&torture_ww_mutex_0, &torture_ww_class);
555 ww_mutex_init(&torture_ww_mutex_1, &torture_ww_class);
556 ww_mutex_init(&torture_ww_mutex_2, &torture_ww_class);
557
558 ww_acquire_ctxs = kmalloc_array(cxt.nrealwriters_stress,
559 sizeof(*ww_acquire_ctxs),
560 GFP_KERNEL);
561 if (!ww_acquire_ctxs)
562 VERBOSE_TOROUT_STRING("ww_acquire_ctx: Out of memory");
563}
564
565static void torture_ww_mutex_exit(void)
566{
567 kfree(ww_acquire_ctxs);
568}
569
570static int torture_ww_mutex_lock(int tid)
571__acquires(torture_ww_mutex_0)
572__acquires(torture_ww_mutex_1)
573__acquires(torture_ww_mutex_2)
574{
575 LIST_HEAD(list);
576 struct reorder_lock {
577 struct list_head link;
578 struct ww_mutex *lock;
579 } locks[3], *ll, *ln;
580 struct ww_acquire_ctx *ctx = &ww_acquire_ctxs[tid];
581
582 locks[0].lock = &torture_ww_mutex_0;
583 list_add(&locks[0].link, &list);
584
585 locks[1].lock = &torture_ww_mutex_1;
586 list_add(&locks[1].link, &list);
587
588 locks[2].lock = &torture_ww_mutex_2;
589 list_add(&locks[2].link, &list);
590
591 ww_acquire_init(ctx, &torture_ww_class);
592
593 list_for_each_entry(ll, &list, link) {
594 int err;
595
596 err = ww_mutex_lock(ll->lock, ctx);
597 if (!err)
598 continue;
599
600 ln = ll;
601 list_for_each_entry_continue_reverse(ln, &list, link)
602 ww_mutex_unlock(ln->lock);
603
604 if (err != -EDEADLK)
605 return err;
606
607 ww_mutex_lock_slow(ll->lock, ctx);
608 list_move(&ll->link, &list);
609 }
610
611 return 0;
612}
613
614static void torture_ww_mutex_unlock(int tid)
615__releases(torture_ww_mutex_0)
616__releases(torture_ww_mutex_1)
617__releases(torture_ww_mutex_2)
618{
619 struct ww_acquire_ctx *ctx = &ww_acquire_ctxs[tid];
620
621 ww_mutex_unlock(&torture_ww_mutex_0);
622 ww_mutex_unlock(&torture_ww_mutex_1);
623 ww_mutex_unlock(&torture_ww_mutex_2);
624 ww_acquire_fini(ctx);
625}
626
627static struct lock_torture_ops ww_mutex_lock_ops = {
628 .init = torture_ww_mutex_init,
629 .exit = torture_ww_mutex_exit,
630 .writelock = torture_ww_mutex_lock,
631 .write_delay = torture_mutex_delay,
632 .task_boost = torture_rt_boost,
633 .writeunlock = torture_ww_mutex_unlock,
634 .readlock = NULL,
635 .read_delay = NULL,
636 .readunlock = NULL,
637 .name = "ww_mutex_lock"
638};
639
640#ifdef CONFIG_RT_MUTEXES
641static DEFINE_RT_MUTEX(torture_rtmutex);
642static struct rt_mutex torture_nested_rtmutexes[MAX_NESTED_LOCKS];
643static struct lock_class_key nested_rtmutex_keys[MAX_NESTED_LOCKS];
644
645static void torture_rtmutex_init(void)
646{
647 int i;
648
649 for (i = 0; i < MAX_NESTED_LOCKS; i++)
650 __rt_mutex_init(&torture_nested_rtmutexes[i], __func__,
651 &nested_rtmutex_keys[i]);
652}
653
654static int torture_rtmutex_nested_lock(int tid __maybe_unused,
655 u32 lockset)
656{
657 int i;
658
659 for (i = 0; i < nested_locks; i++)
660 if (lockset & (1 << i))
661 rt_mutex_lock(&torture_nested_rtmutexes[i]);
662 return 0;
663}
664
665static int torture_rtmutex_lock(int tid __maybe_unused)
666__acquires(torture_rtmutex)
667{
668 rt_mutex_lock(&torture_rtmutex);
669 return 0;
670}
671
672static void torture_rtmutex_delay(struct torture_random_state *trsp)
673{
674 const unsigned long shortdelay_us = 2;
675
676 /*
677 * We want a short delay mostly to emulate likely code, and
678 * we want a long delay occasionally to force massive contention.
679 */
680 if (long_hold && !(torture_random(trsp) % (cxt.nrealwriters_stress * 2000 * long_hold)))
681 mdelay(long_hold);
682 if (!(torture_random(trsp) %
683 (cxt.nrealwriters_stress * 200 * shortdelay_us)))
684 udelay(shortdelay_us);
685 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
686 torture_preempt_schedule(); /* Allow test to be preempted. */
687}
688
689static void torture_rtmutex_unlock(int tid __maybe_unused)
690__releases(torture_rtmutex)
691{
692 rt_mutex_unlock(&torture_rtmutex);
693}
694
695static void torture_rt_boost_rtmutex(struct torture_random_state *trsp)
696{
697 if (!rt_boost)
698 return;
699
700 __torture_rt_boost(trsp);
701}
702
703static void torture_rtmutex_nested_unlock(int tid __maybe_unused,
704 u32 lockset)
705{
706 int i;
707
708 for (i = nested_locks - 1; i >= 0; i--)
709 if (lockset & (1 << i))
710 rt_mutex_unlock(&torture_nested_rtmutexes[i]);
711}
712
713static struct lock_torture_ops rtmutex_lock_ops = {
714 .init = torture_rtmutex_init,
715 .nested_lock = torture_rtmutex_nested_lock,
716 .writelock = torture_rtmutex_lock,
717 .write_delay = torture_rtmutex_delay,
718 .task_boost = torture_rt_boost_rtmutex,
719 .writeunlock = torture_rtmutex_unlock,
720 .nested_unlock = torture_rtmutex_nested_unlock,
721 .readlock = NULL,
722 .read_delay = NULL,
723 .readunlock = NULL,
724 .name = "rtmutex_lock"
725};
726#endif
727
728static DECLARE_RWSEM(torture_rwsem);
729static int torture_rwsem_down_write(int tid __maybe_unused)
730__acquires(torture_rwsem)
731{
732 down_write(&torture_rwsem);
733 return 0;
734}
735
736static void torture_rwsem_write_delay(struct torture_random_state *trsp)
737{
738 /* We want a long delay occasionally to force massive contention. */
739 if (long_hold && !(torture_random(trsp) % (cxt.nrealwriters_stress * 2000 * long_hold)))
740 mdelay(long_hold * 10);
741 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
742 torture_preempt_schedule(); /* Allow test to be preempted. */
743}
744
745static void torture_rwsem_up_write(int tid __maybe_unused)
746__releases(torture_rwsem)
747{
748 up_write(&torture_rwsem);
749}
750
751static int torture_rwsem_down_read(int tid __maybe_unused)
752__acquires(torture_rwsem)
753{
754 down_read(&torture_rwsem);
755 return 0;
756}
757
758static void torture_rwsem_read_delay(struct torture_random_state *trsp)
759{
760 /* We want a long delay occasionally to force massive contention. */
761 if (long_hold && !(torture_random(trsp) % (cxt.nrealreaders_stress * 2000 * long_hold)))
762 mdelay(long_hold * 2);
763 else
764 mdelay(long_hold / 2);
765 if (!(torture_random(trsp) % (cxt.nrealreaders_stress * 20000)))
766 torture_preempt_schedule(); /* Allow test to be preempted. */
767}
768
769static void torture_rwsem_up_read(int tid __maybe_unused)
770__releases(torture_rwsem)
771{
772 up_read(&torture_rwsem);
773}
774
775static struct lock_torture_ops rwsem_lock_ops = {
776 .writelock = torture_rwsem_down_write,
777 .write_delay = torture_rwsem_write_delay,
778 .task_boost = torture_rt_boost,
779 .writeunlock = torture_rwsem_up_write,
780 .readlock = torture_rwsem_down_read,
781 .read_delay = torture_rwsem_read_delay,
782 .readunlock = torture_rwsem_up_read,
783 .name = "rwsem_lock"
784};
785
786#include <linux/percpu-rwsem.h>
787static struct percpu_rw_semaphore pcpu_rwsem;
788
789static void torture_percpu_rwsem_init(void)
790{
791 BUG_ON(percpu_init_rwsem(&pcpu_rwsem));
792}
793
794static void torture_percpu_rwsem_exit(void)
795{
796 percpu_free_rwsem(&pcpu_rwsem);
797}
798
799static int torture_percpu_rwsem_down_write(int tid __maybe_unused)
800__acquires(pcpu_rwsem)
801{
802 percpu_down_write(&pcpu_rwsem);
803 return 0;
804}
805
806static void torture_percpu_rwsem_up_write(int tid __maybe_unused)
807__releases(pcpu_rwsem)
808{
809 percpu_up_write(&pcpu_rwsem);
810}
811
812static int torture_percpu_rwsem_down_read(int tid __maybe_unused)
813__acquires(pcpu_rwsem)
814{
815 percpu_down_read(&pcpu_rwsem);
816 return 0;
817}
818
819static void torture_percpu_rwsem_up_read(int tid __maybe_unused)
820__releases(pcpu_rwsem)
821{
822 percpu_up_read(&pcpu_rwsem);
823}
824
825static struct lock_torture_ops percpu_rwsem_lock_ops = {
826 .init = torture_percpu_rwsem_init,
827 .exit = torture_percpu_rwsem_exit,
828 .writelock = torture_percpu_rwsem_down_write,
829 .write_delay = torture_rwsem_write_delay,
830 .task_boost = torture_rt_boost,
831 .writeunlock = torture_percpu_rwsem_up_write,
832 .readlock = torture_percpu_rwsem_down_read,
833 .read_delay = torture_rwsem_read_delay,
834 .readunlock = torture_percpu_rwsem_up_read,
835 .name = "percpu_rwsem_lock"
836};
837
838/*
839 * Lock torture writer kthread. Repeatedly acquires and releases
840 * the lock, checking for duplicate acquisitions.
841 */
842static int lock_torture_writer(void *arg)
843{
844 unsigned long j;
845 unsigned long j1;
846 u32 lockset_mask;
847 struct lock_stress_stats *lwsp = arg;
848 DEFINE_TORTURE_RANDOM(rand);
849 bool skip_main_lock;
850 int tid = lwsp - cxt.lwsa;
851
852 VERBOSE_TOROUT_STRING("lock_torture_writer task started");
853 if (!rt_task(current))
854 set_user_nice(current, MAX_NICE);
855
856 do {
857 if ((torture_random(&rand) & 0xfffff) == 0)
858 schedule_timeout_uninterruptible(1);
859
860 lockset_mask = torture_random(&rand);
861 /*
862 * When using nested_locks, we want to occasionally
863 * skip the main lock so we can avoid always serializing
864 * the lock chains on that central lock. By skipping the
865 * main lock occasionally, we can create different
866 * contention patterns (allowing for multiple disjoint
867 * blocked trees)
868 */
869 skip_main_lock = (nested_locks &&
870 !(torture_random(&rand) % 100));
871
872 cxt.cur_ops->task_boost(&rand);
873 if (cxt.cur_ops->nested_lock)
874 cxt.cur_ops->nested_lock(tid, lockset_mask);
875
876 if (!skip_main_lock) {
877 if (acq_writer_lim > 0)
878 j = jiffies;
879 cxt.cur_ops->writelock(tid);
880 if (WARN_ON_ONCE(lock_is_write_held))
881 lwsp->n_lock_fail++;
882 lock_is_write_held = true;
883 if (WARN_ON_ONCE(atomic_read(&lock_is_read_held)))
884 lwsp->n_lock_fail++; /* rare, but... */
885 if (acq_writer_lim > 0) {
886 j1 = jiffies;
887 WARN_ONCE(time_after(j1, j + acq_writer_lim),
888 "%s: Lock acquisition took %lu jiffies.\n",
889 __func__, j1 - j);
890 }
891 lwsp->n_lock_acquired++;
892
893 cxt.cur_ops->write_delay(&rand);
894
895 lock_is_write_held = false;
896 WRITE_ONCE(last_lock_release, jiffies);
897 cxt.cur_ops->writeunlock(tid);
898 }
899 if (cxt.cur_ops->nested_unlock)
900 cxt.cur_ops->nested_unlock(tid, lockset_mask);
901
902 stutter_wait("lock_torture_writer");
903 } while (!torture_must_stop());
904
905 cxt.cur_ops->task_boost(NULL); /* reset prio */
906 torture_kthread_stopping("lock_torture_writer");
907 return 0;
908}
909
910/*
911 * Lock torture reader kthread. Repeatedly acquires and releases
912 * the reader lock.
913 */
914static int lock_torture_reader(void *arg)
915{
916 struct lock_stress_stats *lrsp = arg;
917 int tid = lrsp - cxt.lrsa;
918 DEFINE_TORTURE_RANDOM(rand);
919
920 VERBOSE_TOROUT_STRING("lock_torture_reader task started");
921 set_user_nice(current, MAX_NICE);
922
923 do {
924 if ((torture_random(&rand) & 0xfffff) == 0)
925 schedule_timeout_uninterruptible(1);
926
927 cxt.cur_ops->readlock(tid);
928 atomic_inc(&lock_is_read_held);
929 if (WARN_ON_ONCE(lock_is_write_held))
930 lrsp->n_lock_fail++; /* rare, but... */
931
932 lrsp->n_lock_acquired++;
933 cxt.cur_ops->read_delay(&rand);
934 atomic_dec(&lock_is_read_held);
935 cxt.cur_ops->readunlock(tid);
936
937 stutter_wait("lock_torture_reader");
938 } while (!torture_must_stop());
939 torture_kthread_stopping("lock_torture_reader");
940 return 0;
941}
942
943/*
944 * Create an lock-torture-statistics message in the specified buffer.
945 */
946static void __torture_print_stats(char *page,
947 struct lock_stress_stats *statp, bool write)
948{
949 long cur;
950 bool fail = false;
951 int i, n_stress;
952 long max = 0, min = statp ? data_race(statp[0].n_lock_acquired) : 0;
953 long long sum = 0;
954
955 n_stress = write ? cxt.nrealwriters_stress : cxt.nrealreaders_stress;
956 for (i = 0; i < n_stress; i++) {
957 if (data_race(statp[i].n_lock_fail))
958 fail = true;
959 cur = data_race(statp[i].n_lock_acquired);
960 sum += cur;
961 if (max < cur)
962 max = cur;
963 if (min > cur)
964 min = cur;
965 }
966 page += sprintf(page,
967 "%s: Total: %lld Max/Min: %ld/%ld %s Fail: %d %s\n",
968 write ? "Writes" : "Reads ",
969 sum, max, min,
970 !onoff_interval && max / 2 > min ? "???" : "",
971 fail, fail ? "!!!" : "");
972 if (fail)
973 atomic_inc(&cxt.n_lock_torture_errors);
974}
975
976/*
977 * Print torture statistics. Caller must ensure that there is only one
978 * call to this function at a given time!!! This is normally accomplished
979 * by relying on the module system to only have one copy of the module
980 * loaded, and then by giving the lock_torture_stats kthread full control
981 * (or the init/cleanup functions when lock_torture_stats thread is not
982 * running).
983 */
984static void lock_torture_stats_print(void)
985{
986 int size = cxt.nrealwriters_stress * 200 + 8192;
987 char *buf;
988
989 if (cxt.cur_ops->readlock)
990 size += cxt.nrealreaders_stress * 200 + 8192;
991
992 buf = kmalloc(size, GFP_KERNEL);
993 if (!buf) {
994 pr_err("lock_torture_stats_print: Out of memory, need: %d",
995 size);
996 return;
997 }
998
999 __torture_print_stats(buf, cxt.lwsa, true);
1000 pr_alert("%s", buf);
1001 kfree(buf);
1002
1003 if (cxt.cur_ops->readlock) {
1004 buf = kmalloc(size, GFP_KERNEL);
1005 if (!buf) {
1006 pr_err("lock_torture_stats_print: Out of memory, need: %d",
1007 size);
1008 return;
1009 }
1010
1011 __torture_print_stats(buf, cxt.lrsa, false);
1012 pr_alert("%s", buf);
1013 kfree(buf);
1014 }
1015}
1016
1017/*
1018 * Periodically prints torture statistics, if periodic statistics printing
1019 * was specified via the stat_interval module parameter.
1020 *
1021 * No need to worry about fullstop here, since this one doesn't reference
1022 * volatile state or register callbacks.
1023 */
1024static int lock_torture_stats(void *arg)
1025{
1026 VERBOSE_TOROUT_STRING("lock_torture_stats task started");
1027 do {
1028 schedule_timeout_interruptible(stat_interval * HZ);
1029 lock_torture_stats_print();
1030 torture_shutdown_absorb("lock_torture_stats");
1031 } while (!torture_must_stop());
1032 torture_kthread_stopping("lock_torture_stats");
1033 return 0;
1034}
1035
1036
1037static inline void
1038lock_torture_print_module_parms(struct lock_torture_ops *cur_ops,
1039 const char *tag)
1040{
1041 static cpumask_t cpumask_all;
1042 cpumask_t *rcmp = cpumask_nonempty(bind_readers) ? bind_readers : &cpumask_all;
1043 cpumask_t *wcmp = cpumask_nonempty(bind_writers) ? bind_writers : &cpumask_all;
1044
1045 cpumask_setall(&cpumask_all);
1046 pr_alert("%s" TORTURE_FLAG
1047 "--- %s%s: acq_writer_lim=%d bind_readers=%*pbl bind_writers=%*pbl call_rcu_chains=%d long_hold=%d nested_locks=%d nreaders_stress=%d nwriters_stress=%d onoff_holdoff=%d onoff_interval=%d rt_boost=%d rt_boost_factor=%d shuffle_interval=%d shutdown_secs=%d stat_interval=%d stutter=%d verbose=%d writer_fifo=%d\n",
1048 torture_type, tag, cxt.debug_lock ? " [debug]": "",
1049 acq_writer_lim, cpumask_pr_args(rcmp), cpumask_pr_args(wcmp),
1050 call_rcu_chains, long_hold, nested_locks, cxt.nrealreaders_stress,
1051 cxt.nrealwriters_stress, onoff_holdoff, onoff_interval, rt_boost,
1052 rt_boost_factor, shuffle_interval, shutdown_secs, stat_interval, stutter,
1053 verbose, writer_fifo);
1054}
1055
1056// If requested, maintain call_rcu() chains to keep a grace period always
1057// in flight. These increase the probability of getting an RCU CPU stall
1058// warning and associated diagnostics when a locking primitive stalls.
1059
1060static void call_rcu_chain_cb(struct rcu_head *rhp)
1061{
1062 struct call_rcu_chain *crcp = container_of(rhp, struct call_rcu_chain, crc_rh);
1063
1064 if (!smp_load_acquire(&crcp->crc_stop)) {
1065 (void)start_poll_synchronize_rcu(); // Start one grace period...
1066 call_rcu(&crcp->crc_rh, call_rcu_chain_cb); // ... and later start another.
1067 }
1068}
1069
1070// Start the requested number of call_rcu() chains.
1071static int call_rcu_chain_init(void)
1072{
1073 int i;
1074
1075 if (call_rcu_chains <= 0)
1076 return 0;
1077 call_rcu_chain_list = kcalloc(call_rcu_chains, sizeof(*call_rcu_chain_list), GFP_KERNEL);
1078 if (!call_rcu_chain_list)
1079 return -ENOMEM;
1080 for (i = 0; i < call_rcu_chains; i++) {
1081 call_rcu_chain_list[i].crc_stop = false;
1082 call_rcu(&call_rcu_chain_list[i].crc_rh, call_rcu_chain_cb);
1083 }
1084 return 0;
1085}
1086
1087// Stop all of the call_rcu() chains.
1088static void call_rcu_chain_cleanup(void)
1089{
1090 int i;
1091
1092 if (!call_rcu_chain_list)
1093 return;
1094 for (i = 0; i < call_rcu_chains; i++)
1095 smp_store_release(&call_rcu_chain_list[i].crc_stop, true);
1096 rcu_barrier();
1097 kfree(call_rcu_chain_list);
1098 call_rcu_chain_list = NULL;
1099}
1100
1101static void lock_torture_cleanup(void)
1102{
1103 int i;
1104
1105 if (torture_cleanup_begin())
1106 return;
1107
1108 /*
1109 * Indicates early cleanup, meaning that the test has not run,
1110 * such as when passing bogus args when loading the module.
1111 * However cxt->cur_ops.init() may have been invoked, so beside
1112 * perform the underlying torture-specific cleanups, cur_ops.exit()
1113 * will be invoked if needed.
1114 */
1115 if (!cxt.lwsa && !cxt.lrsa)
1116 goto end;
1117
1118 if (writer_tasks) {
1119 for (i = 0; i < cxt.nrealwriters_stress; i++)
1120 torture_stop_kthread(lock_torture_writer, writer_tasks[i]);
1121 kfree(writer_tasks);
1122 writer_tasks = NULL;
1123 }
1124
1125 if (reader_tasks) {
1126 for (i = 0; i < cxt.nrealreaders_stress; i++)
1127 torture_stop_kthread(lock_torture_reader,
1128 reader_tasks[i]);
1129 kfree(reader_tasks);
1130 reader_tasks = NULL;
1131 }
1132
1133 torture_stop_kthread(lock_torture_stats, stats_task);
1134 lock_torture_stats_print(); /* -After- the stats thread is stopped! */
1135
1136 if (atomic_read(&cxt.n_lock_torture_errors))
1137 lock_torture_print_module_parms(cxt.cur_ops,
1138 "End of test: FAILURE");
1139 else if (torture_onoff_failures())
1140 lock_torture_print_module_parms(cxt.cur_ops,
1141 "End of test: LOCK_HOTPLUG");
1142 else
1143 lock_torture_print_module_parms(cxt.cur_ops,
1144 "End of test: SUCCESS");
1145
1146 kfree(cxt.lwsa);
1147 cxt.lwsa = NULL;
1148 kfree(cxt.lrsa);
1149 cxt.lrsa = NULL;
1150
1151 call_rcu_chain_cleanup();
1152
1153end:
1154 if (cxt.init_called) {
1155 if (cxt.cur_ops->exit)
1156 cxt.cur_ops->exit();
1157 cxt.init_called = false;
1158 }
1159 torture_cleanup_end();
1160}
1161
1162static int __init lock_torture_init(void)
1163{
1164 int i, j;
1165 int firsterr = 0;
1166 static struct lock_torture_ops *torture_ops[] = {
1167 &lock_busted_ops,
1168 &spin_lock_ops, &spin_lock_irq_ops,
1169 &raw_spin_lock_ops, &raw_spin_lock_irq_ops,
1170 &rw_lock_ops, &rw_lock_irq_ops,
1171 &mutex_lock_ops,
1172 &ww_mutex_lock_ops,
1173#ifdef CONFIG_RT_MUTEXES
1174 &rtmutex_lock_ops,
1175#endif
1176 &rwsem_lock_ops,
1177 &percpu_rwsem_lock_ops,
1178 };
1179
1180 if (!torture_init_begin(torture_type, verbose))
1181 return -EBUSY;
1182
1183 /* Process args and tell the world that the torturer is on the job. */
1184 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
1185 cxt.cur_ops = torture_ops[i];
1186 if (strcmp(torture_type, cxt.cur_ops->name) == 0)
1187 break;
1188 }
1189 if (i == ARRAY_SIZE(torture_ops)) {
1190 pr_alert("lock-torture: invalid torture type: \"%s\"\n",
1191 torture_type);
1192 pr_alert("lock-torture types:");
1193 for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
1194 pr_alert(" %s", torture_ops[i]->name);
1195 pr_alert("\n");
1196 firsterr = -EINVAL;
1197 goto unwind;
1198 }
1199
1200 if (nwriters_stress == 0 &&
1201 (!cxt.cur_ops->readlock || nreaders_stress == 0)) {
1202 pr_alert("lock-torture: must run at least one locking thread\n");
1203 firsterr = -EINVAL;
1204 goto unwind;
1205 }
1206
1207 if (nwriters_stress >= 0)
1208 cxt.nrealwriters_stress = nwriters_stress;
1209 else
1210 cxt.nrealwriters_stress = 2 * num_online_cpus();
1211
1212 if (cxt.cur_ops->init) {
1213 cxt.cur_ops->init();
1214 cxt.init_called = true;
1215 }
1216
1217#ifdef CONFIG_DEBUG_MUTEXES
1218 if (str_has_prefix(torture_type, "mutex"))
1219 cxt.debug_lock = true;
1220#endif
1221#ifdef CONFIG_DEBUG_RT_MUTEXES
1222 if (str_has_prefix(torture_type, "rtmutex"))
1223 cxt.debug_lock = true;
1224#endif
1225#ifdef CONFIG_DEBUG_SPINLOCK
1226 if ((str_has_prefix(torture_type, "spin")) ||
1227 (str_has_prefix(torture_type, "rw_lock")))
1228 cxt.debug_lock = true;
1229#endif
1230
1231 /* Initialize the statistics so that each run gets its own numbers. */
1232 if (nwriters_stress) {
1233 lock_is_write_held = false;
1234 cxt.lwsa = kmalloc_array(cxt.nrealwriters_stress,
1235 sizeof(*cxt.lwsa),
1236 GFP_KERNEL);
1237 if (cxt.lwsa == NULL) {
1238 VERBOSE_TOROUT_STRING("cxt.lwsa: Out of memory");
1239 firsterr = -ENOMEM;
1240 goto unwind;
1241 }
1242
1243 for (i = 0; i < cxt.nrealwriters_stress; i++) {
1244 cxt.lwsa[i].n_lock_fail = 0;
1245 cxt.lwsa[i].n_lock_acquired = 0;
1246 }
1247 }
1248
1249 if (cxt.cur_ops->readlock) {
1250 if (nreaders_stress >= 0)
1251 cxt.nrealreaders_stress = nreaders_stress;
1252 else {
1253 /*
1254 * By default distribute evenly the number of
1255 * readers and writers. We still run the same number
1256 * of threads as the writer-only locks default.
1257 */
1258 if (nwriters_stress < 0) /* user doesn't care */
1259 cxt.nrealwriters_stress = num_online_cpus();
1260 cxt.nrealreaders_stress = cxt.nrealwriters_stress;
1261 }
1262
1263 if (nreaders_stress) {
1264 cxt.lrsa = kmalloc_array(cxt.nrealreaders_stress,
1265 sizeof(*cxt.lrsa),
1266 GFP_KERNEL);
1267 if (cxt.lrsa == NULL) {
1268 VERBOSE_TOROUT_STRING("cxt.lrsa: Out of memory");
1269 firsterr = -ENOMEM;
1270 kfree(cxt.lwsa);
1271 cxt.lwsa = NULL;
1272 goto unwind;
1273 }
1274
1275 for (i = 0; i < cxt.nrealreaders_stress; i++) {
1276 cxt.lrsa[i].n_lock_fail = 0;
1277 cxt.lrsa[i].n_lock_acquired = 0;
1278 }
1279 }
1280 }
1281
1282 firsterr = call_rcu_chain_init();
1283 if (torture_init_error(firsterr))
1284 goto unwind;
1285
1286 lock_torture_print_module_parms(cxt.cur_ops, "Start of test");
1287
1288 /* Prepare torture context. */
1289 if (onoff_interval > 0) {
1290 firsterr = torture_onoff_init(onoff_holdoff * HZ,
1291 onoff_interval * HZ, NULL);
1292 if (torture_init_error(firsterr))
1293 goto unwind;
1294 }
1295 if (shuffle_interval > 0) {
1296 firsterr = torture_shuffle_init(shuffle_interval);
1297 if (torture_init_error(firsterr))
1298 goto unwind;
1299 }
1300 if (shutdown_secs > 0) {
1301 firsterr = torture_shutdown_init(shutdown_secs,
1302 lock_torture_cleanup);
1303 if (torture_init_error(firsterr))
1304 goto unwind;
1305 }
1306 if (stutter > 0) {
1307 firsterr = torture_stutter_init(stutter, stutter);
1308 if (torture_init_error(firsterr))
1309 goto unwind;
1310 }
1311
1312 if (nwriters_stress) {
1313 writer_tasks = kcalloc(cxt.nrealwriters_stress,
1314 sizeof(writer_tasks[0]),
1315 GFP_KERNEL);
1316 if (writer_tasks == NULL) {
1317 TOROUT_ERRSTRING("writer_tasks: Out of memory");
1318 firsterr = -ENOMEM;
1319 goto unwind;
1320 }
1321 }
1322
1323 /* cap nested_locks to MAX_NESTED_LOCKS */
1324 if (nested_locks > MAX_NESTED_LOCKS)
1325 nested_locks = MAX_NESTED_LOCKS;
1326
1327 if (cxt.cur_ops->readlock) {
1328 reader_tasks = kcalloc(cxt.nrealreaders_stress,
1329 sizeof(reader_tasks[0]),
1330 GFP_KERNEL);
1331 if (reader_tasks == NULL) {
1332 TOROUT_ERRSTRING("reader_tasks: Out of memory");
1333 kfree(writer_tasks);
1334 writer_tasks = NULL;
1335 firsterr = -ENOMEM;
1336 goto unwind;
1337 }
1338 }
1339
1340 /*
1341 * Create the kthreads and start torturing (oh, those poor little locks).
1342 *
1343 * TODO: Note that we interleave writers with readers, giving writers a
1344 * slight advantage, by creating its kthread first. This can be modified
1345 * for very specific needs, or even let the user choose the policy, if
1346 * ever wanted.
1347 */
1348 for (i = 0, j = 0; i < cxt.nrealwriters_stress ||
1349 j < cxt.nrealreaders_stress; i++, j++) {
1350 if (i >= cxt.nrealwriters_stress)
1351 goto create_reader;
1352
1353 /* Create writer. */
1354 firsterr = torture_create_kthread_cb(lock_torture_writer, &cxt.lwsa[i],
1355 writer_tasks[i],
1356 writer_fifo ? sched_set_fifo : NULL);
1357 if (torture_init_error(firsterr))
1358 goto unwind;
1359 if (cpumask_nonempty(bind_writers))
1360 torture_sched_setaffinity(writer_tasks[i]->pid, bind_writers);
1361
1362 create_reader:
1363 if (cxt.cur_ops->readlock == NULL || (j >= cxt.nrealreaders_stress))
1364 continue;
1365 /* Create reader. */
1366 firsterr = torture_create_kthread(lock_torture_reader, &cxt.lrsa[j],
1367 reader_tasks[j]);
1368 if (torture_init_error(firsterr))
1369 goto unwind;
1370 if (cpumask_nonempty(bind_readers))
1371 torture_sched_setaffinity(reader_tasks[j]->pid, bind_readers);
1372 }
1373 if (stat_interval > 0) {
1374 firsterr = torture_create_kthread(lock_torture_stats, NULL,
1375 stats_task);
1376 if (torture_init_error(firsterr))
1377 goto unwind;
1378 }
1379 torture_init_end();
1380 return 0;
1381
1382unwind:
1383 torture_init_end();
1384 lock_torture_cleanup();
1385 if (shutdown_secs) {
1386 WARN_ON(!IS_MODULE(CONFIG_LOCK_TORTURE_TEST));
1387 kernel_power_off();
1388 }
1389 return firsterr;
1390}
1391
1392module_init(lock_torture_init);
1393module_exit(lock_torture_cleanup);
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Module-based torture test facility for locking
4 *
5 * Copyright (C) IBM Corporation, 2014
6 *
7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
8 * Davidlohr Bueso <dave@stgolabs.net>
9 * Based on kernel/rcu/torture.c.
10 */
11
12#define pr_fmt(fmt) fmt
13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/kthread.h>
17#include <linux/sched/rt.h>
18#include <linux/spinlock.h>
19#include <linux/mutex.h>
20#include <linux/rwsem.h>
21#include <linux/smp.h>
22#include <linux/interrupt.h>
23#include <linux/sched.h>
24#include <uapi/linux/sched/types.h>
25#include <linux/rtmutex.h>
26#include <linux/atomic.h>
27#include <linux/moduleparam.h>
28#include <linux/delay.h>
29#include <linux/slab.h>
30#include <linux/torture.h>
31#include <linux/reboot.h>
32
33MODULE_DESCRIPTION("torture test facility for locking");
34MODULE_LICENSE("GPL");
35MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com>");
36
37torture_param(int, acq_writer_lim, 0, "Write_acquisition time limit (jiffies).");
38torture_param(int, call_rcu_chains, 0, "Self-propagate call_rcu() chains during test (0=disable).");
39torture_param(int, long_hold, 100, "Do occasional long hold of lock (ms), 0=disable");
40torture_param(int, nested_locks, 0, "Number of nested locks (max = 8)");
41torture_param(int, nreaders_stress, -1, "Number of read-locking stress-test threads");
42torture_param(int, nwriters_stress, -1, "Number of write-locking stress-test threads");
43torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
44torture_param(int, onoff_interval, 0, "Time between CPU hotplugs (s), 0=disable");
45torture_param(int, rt_boost, 2,
46 "Do periodic rt-boost. 0=Disable, 1=Only for rt_mutex, 2=For all lock types.");
47torture_param(int, rt_boost_factor, 50, "A factor determining how often rt-boost happens.");
48torture_param(int, shuffle_interval, 3, "Number of jiffies between shuffles, 0=disable");
49torture_param(int, shutdown_secs, 0, "Shutdown time (j), <= zero to disable.");
50torture_param(int, stat_interval, 60, "Number of seconds between stats printk()s");
51torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable");
52torture_param(int, verbose, 1, "Enable verbose debugging printk()s");
53torture_param(int, writer_fifo, 0, "Run writers at sched_set_fifo() priority");
54/* Going much higher trips "BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!" errors */
55#define MAX_NESTED_LOCKS 8
56
57static char *torture_type = IS_ENABLED(CONFIG_PREEMPT_RT) ? "raw_spin_lock" : "spin_lock";
58module_param(torture_type, charp, 0444);
59MODULE_PARM_DESC(torture_type,
60 "Type of lock to torture (spin_lock, spin_lock_irq, mutex_lock, ...)");
61
62static cpumask_var_t bind_readers; // Bind the readers to the specified set of CPUs.
63static cpumask_var_t bind_writers; // Bind the writers to the specified set of CPUs.
64
65// Parse a cpumask kernel parameter. If there are more users later on,
66// this might need to got to a more central location.
67static int param_set_cpumask(const char *val, const struct kernel_param *kp)
68{
69 cpumask_var_t *cm_bind = kp->arg;
70 int ret;
71 char *s;
72
73 if (!alloc_cpumask_var(cm_bind, GFP_KERNEL)) {
74 s = "Out of memory";
75 ret = -ENOMEM;
76 goto out_err;
77 }
78 ret = cpulist_parse(val, *cm_bind);
79 if (!ret)
80 return ret;
81 s = "Bad CPU range";
82out_err:
83 pr_warn("%s: %s, all CPUs set\n", kp->name, s);
84 cpumask_setall(*cm_bind);
85 return ret;
86}
87
88// Output a cpumask kernel parameter.
89static int param_get_cpumask(char *buffer, const struct kernel_param *kp)
90{
91 cpumask_var_t *cm_bind = kp->arg;
92
93 return sprintf(buffer, "%*pbl", cpumask_pr_args(*cm_bind));
94}
95
96static bool cpumask_nonempty(cpumask_var_t mask)
97{
98 return cpumask_available(mask) && !cpumask_empty(mask);
99}
100
101static const struct kernel_param_ops lt_bind_ops = {
102 .set = param_set_cpumask,
103 .get = param_get_cpumask,
104};
105
106module_param_cb(bind_readers, <_bind_ops, &bind_readers, 0644);
107module_param_cb(bind_writers, <_bind_ops, &bind_writers, 0644);
108
109long torture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask);
110
111static struct task_struct *stats_task;
112static struct task_struct **writer_tasks;
113static struct task_struct **reader_tasks;
114
115static bool lock_is_write_held;
116static atomic_t lock_is_read_held;
117static unsigned long last_lock_release;
118
119struct lock_stress_stats {
120 long n_lock_fail;
121 long n_lock_acquired;
122};
123
124struct call_rcu_chain {
125 struct rcu_head crc_rh;
126 bool crc_stop;
127};
128struct call_rcu_chain *call_rcu_chain_list;
129
130/* Forward reference. */
131static void lock_torture_cleanup(void);
132
133/*
134 * Operations vector for selecting different types of tests.
135 */
136struct lock_torture_ops {
137 void (*init)(void);
138 void (*exit)(void);
139 int (*nested_lock)(int tid, u32 lockset);
140 int (*writelock)(int tid);
141 void (*write_delay)(struct torture_random_state *trsp);
142 void (*task_boost)(struct torture_random_state *trsp);
143 void (*writeunlock)(int tid);
144 void (*nested_unlock)(int tid, u32 lockset);
145 int (*readlock)(int tid);
146 void (*read_delay)(struct torture_random_state *trsp);
147 void (*readunlock)(int tid);
148
149 unsigned long flags; /* for irq spinlocks */
150 const char *name;
151};
152
153struct lock_torture_cxt {
154 int nrealwriters_stress;
155 int nrealreaders_stress;
156 bool debug_lock;
157 bool init_called;
158 atomic_t n_lock_torture_errors;
159 struct lock_torture_ops *cur_ops;
160 struct lock_stress_stats *lwsa; /* writer statistics */
161 struct lock_stress_stats *lrsa; /* reader statistics */
162};
163static struct lock_torture_cxt cxt = { 0, 0, false, false,
164 ATOMIC_INIT(0),
165 NULL, NULL};
166/*
167 * Definitions for lock torture testing.
168 */
169
170static int torture_lock_busted_write_lock(int tid __maybe_unused)
171{
172 return 0; /* BUGGY, do not use in real life!!! */
173}
174
175static void torture_lock_busted_write_delay(struct torture_random_state *trsp)
176{
177 /* We want a long delay occasionally to force massive contention. */
178 if (long_hold && !(torture_random(trsp) % (cxt.nrealwriters_stress * 2000 * long_hold)))
179 mdelay(long_hold);
180 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
181 torture_preempt_schedule(); /* Allow test to be preempted. */
182}
183
184static void torture_lock_busted_write_unlock(int tid __maybe_unused)
185{
186 /* BUGGY, do not use in real life!!! */
187}
188
189static void __torture_rt_boost(struct torture_random_state *trsp)
190{
191 const unsigned int factor = rt_boost_factor;
192
193 if (!rt_task(current)) {
194 /*
195 * Boost priority once every rt_boost_factor operations. When
196 * the task tries to take the lock, the rtmutex it will account
197 * for the new priority, and do any corresponding pi-dance.
198 */
199 if (trsp && !(torture_random(trsp) %
200 (cxt.nrealwriters_stress * factor))) {
201 sched_set_fifo(current);
202 } else /* common case, do nothing */
203 return;
204 } else {
205 /*
206 * The task will remain boosted for another 10 * rt_boost_factor
207 * operations, then restored back to its original prio, and so
208 * forth.
209 *
210 * When @trsp is nil, we want to force-reset the task for
211 * stopping the kthread.
212 */
213 if (!trsp || !(torture_random(trsp) %
214 (cxt.nrealwriters_stress * factor * 2))) {
215 sched_set_normal(current, 0);
216 } else /* common case, do nothing */
217 return;
218 }
219}
220
221static void torture_rt_boost(struct torture_random_state *trsp)
222{
223 if (rt_boost != 2)
224 return;
225
226 __torture_rt_boost(trsp);
227}
228
229static struct lock_torture_ops lock_busted_ops = {
230 .writelock = torture_lock_busted_write_lock,
231 .write_delay = torture_lock_busted_write_delay,
232 .task_boost = torture_rt_boost,
233 .writeunlock = torture_lock_busted_write_unlock,
234 .readlock = NULL,
235 .read_delay = NULL,
236 .readunlock = NULL,
237 .name = "lock_busted"
238};
239
240static DEFINE_SPINLOCK(torture_spinlock);
241
242static int torture_spin_lock_write_lock(int tid __maybe_unused)
243__acquires(torture_spinlock)
244{
245 spin_lock(&torture_spinlock);
246 return 0;
247}
248
249static void torture_spin_lock_write_delay(struct torture_random_state *trsp)
250{
251 const unsigned long shortdelay_us = 2;
252 unsigned long j;
253
254 /* We want a short delay mostly to emulate likely code, and
255 * we want a long delay occasionally to force massive contention.
256 */
257 if (long_hold && !(torture_random(trsp) % (cxt.nrealwriters_stress * 2000 * long_hold))) {
258 j = jiffies;
259 mdelay(long_hold);
260 pr_alert("%s: delay = %lu jiffies.\n", __func__, jiffies - j);
261 }
262 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 200 * shortdelay_us)))
263 udelay(shortdelay_us);
264 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
265 torture_preempt_schedule(); /* Allow test to be preempted. */
266}
267
268static void torture_spin_lock_write_unlock(int tid __maybe_unused)
269__releases(torture_spinlock)
270{
271 spin_unlock(&torture_spinlock);
272}
273
274static struct lock_torture_ops spin_lock_ops = {
275 .writelock = torture_spin_lock_write_lock,
276 .write_delay = torture_spin_lock_write_delay,
277 .task_boost = torture_rt_boost,
278 .writeunlock = torture_spin_lock_write_unlock,
279 .readlock = NULL,
280 .read_delay = NULL,
281 .readunlock = NULL,
282 .name = "spin_lock"
283};
284
285static int torture_spin_lock_write_lock_irq(int tid __maybe_unused)
286__acquires(torture_spinlock)
287{
288 unsigned long flags;
289
290 spin_lock_irqsave(&torture_spinlock, flags);
291 cxt.cur_ops->flags = flags;
292 return 0;
293}
294
295static void torture_lock_spin_write_unlock_irq(int tid __maybe_unused)
296__releases(torture_spinlock)
297{
298 spin_unlock_irqrestore(&torture_spinlock, cxt.cur_ops->flags);
299}
300
301static struct lock_torture_ops spin_lock_irq_ops = {
302 .writelock = torture_spin_lock_write_lock_irq,
303 .write_delay = torture_spin_lock_write_delay,
304 .task_boost = torture_rt_boost,
305 .writeunlock = torture_lock_spin_write_unlock_irq,
306 .readlock = NULL,
307 .read_delay = NULL,
308 .readunlock = NULL,
309 .name = "spin_lock_irq"
310};
311
312static DEFINE_RAW_SPINLOCK(torture_raw_spinlock);
313
314static int torture_raw_spin_lock_write_lock(int tid __maybe_unused)
315__acquires(torture_raw_spinlock)
316{
317 raw_spin_lock(&torture_raw_spinlock);
318 return 0;
319}
320
321static void torture_raw_spin_lock_write_unlock(int tid __maybe_unused)
322__releases(torture_raw_spinlock)
323{
324 raw_spin_unlock(&torture_raw_spinlock);
325}
326
327static struct lock_torture_ops raw_spin_lock_ops = {
328 .writelock = torture_raw_spin_lock_write_lock,
329 .write_delay = torture_spin_lock_write_delay,
330 .task_boost = torture_rt_boost,
331 .writeunlock = torture_raw_spin_lock_write_unlock,
332 .readlock = NULL,
333 .read_delay = NULL,
334 .readunlock = NULL,
335 .name = "raw_spin_lock"
336};
337
338static int torture_raw_spin_lock_write_lock_irq(int tid __maybe_unused)
339__acquires(torture_raw_spinlock)
340{
341 unsigned long flags;
342
343 raw_spin_lock_irqsave(&torture_raw_spinlock, flags);
344 cxt.cur_ops->flags = flags;
345 return 0;
346}
347
348static void torture_raw_spin_lock_write_unlock_irq(int tid __maybe_unused)
349__releases(torture_raw_spinlock)
350{
351 raw_spin_unlock_irqrestore(&torture_raw_spinlock, cxt.cur_ops->flags);
352}
353
354static struct lock_torture_ops raw_spin_lock_irq_ops = {
355 .writelock = torture_raw_spin_lock_write_lock_irq,
356 .write_delay = torture_spin_lock_write_delay,
357 .task_boost = torture_rt_boost,
358 .writeunlock = torture_raw_spin_lock_write_unlock_irq,
359 .readlock = NULL,
360 .read_delay = NULL,
361 .readunlock = NULL,
362 .name = "raw_spin_lock_irq"
363};
364
365static DEFINE_RWLOCK(torture_rwlock);
366
367static int torture_rwlock_write_lock(int tid __maybe_unused)
368__acquires(torture_rwlock)
369{
370 write_lock(&torture_rwlock);
371 return 0;
372}
373
374static void torture_rwlock_write_delay(struct torture_random_state *trsp)
375{
376 const unsigned long shortdelay_us = 2;
377
378 /* We want a short delay mostly to emulate likely code, and
379 * we want a long delay occasionally to force massive contention.
380 */
381 if (long_hold && !(torture_random(trsp) % (cxt.nrealwriters_stress * 2000 * long_hold)))
382 mdelay(long_hold);
383 else
384 udelay(shortdelay_us);
385}
386
387static void torture_rwlock_write_unlock(int tid __maybe_unused)
388__releases(torture_rwlock)
389{
390 write_unlock(&torture_rwlock);
391}
392
393static int torture_rwlock_read_lock(int tid __maybe_unused)
394__acquires(torture_rwlock)
395{
396 read_lock(&torture_rwlock);
397 return 0;
398}
399
400static void torture_rwlock_read_delay(struct torture_random_state *trsp)
401{
402 const unsigned long shortdelay_us = 10;
403
404 /* We want a short delay mostly to emulate likely code, and
405 * we want a long delay occasionally to force massive contention.
406 */
407 if (long_hold && !(torture_random(trsp) % (cxt.nrealreaders_stress * 2000 * long_hold)))
408 mdelay(long_hold);
409 else
410 udelay(shortdelay_us);
411}
412
413static void torture_rwlock_read_unlock(int tid __maybe_unused)
414__releases(torture_rwlock)
415{
416 read_unlock(&torture_rwlock);
417}
418
419static struct lock_torture_ops rw_lock_ops = {
420 .writelock = torture_rwlock_write_lock,
421 .write_delay = torture_rwlock_write_delay,
422 .task_boost = torture_rt_boost,
423 .writeunlock = torture_rwlock_write_unlock,
424 .readlock = torture_rwlock_read_lock,
425 .read_delay = torture_rwlock_read_delay,
426 .readunlock = torture_rwlock_read_unlock,
427 .name = "rw_lock"
428};
429
430static int torture_rwlock_write_lock_irq(int tid __maybe_unused)
431__acquires(torture_rwlock)
432{
433 unsigned long flags;
434
435 write_lock_irqsave(&torture_rwlock, flags);
436 cxt.cur_ops->flags = flags;
437 return 0;
438}
439
440static void torture_rwlock_write_unlock_irq(int tid __maybe_unused)
441__releases(torture_rwlock)
442{
443 write_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
444}
445
446static int torture_rwlock_read_lock_irq(int tid __maybe_unused)
447__acquires(torture_rwlock)
448{
449 unsigned long flags;
450
451 read_lock_irqsave(&torture_rwlock, flags);
452 cxt.cur_ops->flags = flags;
453 return 0;
454}
455
456static void torture_rwlock_read_unlock_irq(int tid __maybe_unused)
457__releases(torture_rwlock)
458{
459 read_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
460}
461
462static struct lock_torture_ops rw_lock_irq_ops = {
463 .writelock = torture_rwlock_write_lock_irq,
464 .write_delay = torture_rwlock_write_delay,
465 .task_boost = torture_rt_boost,
466 .writeunlock = torture_rwlock_write_unlock_irq,
467 .readlock = torture_rwlock_read_lock_irq,
468 .read_delay = torture_rwlock_read_delay,
469 .readunlock = torture_rwlock_read_unlock_irq,
470 .name = "rw_lock_irq"
471};
472
473static DEFINE_MUTEX(torture_mutex);
474static struct mutex torture_nested_mutexes[MAX_NESTED_LOCKS];
475static struct lock_class_key nested_mutex_keys[MAX_NESTED_LOCKS];
476
477static void torture_mutex_init(void)
478{
479 int i;
480
481 for (i = 0; i < MAX_NESTED_LOCKS; i++)
482 __mutex_init(&torture_nested_mutexes[i], __func__,
483 &nested_mutex_keys[i]);
484}
485
486static int torture_mutex_nested_lock(int tid __maybe_unused,
487 u32 lockset)
488{
489 int i;
490
491 for (i = 0; i < nested_locks; i++)
492 if (lockset & (1 << i))
493 mutex_lock(&torture_nested_mutexes[i]);
494 return 0;
495}
496
497static int torture_mutex_lock(int tid __maybe_unused)
498__acquires(torture_mutex)
499{
500 mutex_lock(&torture_mutex);
501 return 0;
502}
503
504static void torture_mutex_delay(struct torture_random_state *trsp)
505{
506 /* We want a long delay occasionally to force massive contention. */
507 if (long_hold && !(torture_random(trsp) % (cxt.nrealwriters_stress * 2000 * long_hold)))
508 mdelay(long_hold * 5);
509 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
510 torture_preempt_schedule(); /* Allow test to be preempted. */
511}
512
513static void torture_mutex_unlock(int tid __maybe_unused)
514__releases(torture_mutex)
515{
516 mutex_unlock(&torture_mutex);
517}
518
519static void torture_mutex_nested_unlock(int tid __maybe_unused,
520 u32 lockset)
521{
522 int i;
523
524 for (i = nested_locks - 1; i >= 0; i--)
525 if (lockset & (1 << i))
526 mutex_unlock(&torture_nested_mutexes[i]);
527}
528
529static struct lock_torture_ops mutex_lock_ops = {
530 .init = torture_mutex_init,
531 .nested_lock = torture_mutex_nested_lock,
532 .writelock = torture_mutex_lock,
533 .write_delay = torture_mutex_delay,
534 .task_boost = torture_rt_boost,
535 .writeunlock = torture_mutex_unlock,
536 .nested_unlock = torture_mutex_nested_unlock,
537 .readlock = NULL,
538 .read_delay = NULL,
539 .readunlock = NULL,
540 .name = "mutex_lock"
541};
542
543#include <linux/ww_mutex.h>
544/*
545 * The torture ww_mutexes should belong to the same lock class as
546 * torture_ww_class to avoid lockdep problem. The ww_mutex_init()
547 * function is called for initialization to ensure that.
548 */
549static DEFINE_WD_CLASS(torture_ww_class);
550static struct ww_mutex torture_ww_mutex_0, torture_ww_mutex_1, torture_ww_mutex_2;
551static struct ww_acquire_ctx *ww_acquire_ctxs;
552
553static void torture_ww_mutex_init(void)
554{
555 ww_mutex_init(&torture_ww_mutex_0, &torture_ww_class);
556 ww_mutex_init(&torture_ww_mutex_1, &torture_ww_class);
557 ww_mutex_init(&torture_ww_mutex_2, &torture_ww_class);
558
559 ww_acquire_ctxs = kmalloc_array(cxt.nrealwriters_stress,
560 sizeof(*ww_acquire_ctxs),
561 GFP_KERNEL);
562 if (!ww_acquire_ctxs)
563 VERBOSE_TOROUT_STRING("ww_acquire_ctx: Out of memory");
564}
565
566static void torture_ww_mutex_exit(void)
567{
568 kfree(ww_acquire_ctxs);
569}
570
571static int torture_ww_mutex_lock(int tid)
572__acquires(torture_ww_mutex_0)
573__acquires(torture_ww_mutex_1)
574__acquires(torture_ww_mutex_2)
575{
576 LIST_HEAD(list);
577 struct reorder_lock {
578 struct list_head link;
579 struct ww_mutex *lock;
580 } locks[3], *ll, *ln;
581 struct ww_acquire_ctx *ctx = &ww_acquire_ctxs[tid];
582
583 locks[0].lock = &torture_ww_mutex_0;
584 list_add(&locks[0].link, &list);
585
586 locks[1].lock = &torture_ww_mutex_1;
587 list_add(&locks[1].link, &list);
588
589 locks[2].lock = &torture_ww_mutex_2;
590 list_add(&locks[2].link, &list);
591
592 ww_acquire_init(ctx, &torture_ww_class);
593
594 list_for_each_entry(ll, &list, link) {
595 int err;
596
597 err = ww_mutex_lock(ll->lock, ctx);
598 if (!err)
599 continue;
600
601 ln = ll;
602 list_for_each_entry_continue_reverse(ln, &list, link)
603 ww_mutex_unlock(ln->lock);
604
605 if (err != -EDEADLK)
606 return err;
607
608 ww_mutex_lock_slow(ll->lock, ctx);
609 list_move(&ll->link, &list);
610 }
611
612 return 0;
613}
614
615static void torture_ww_mutex_unlock(int tid)
616__releases(torture_ww_mutex_0)
617__releases(torture_ww_mutex_1)
618__releases(torture_ww_mutex_2)
619{
620 struct ww_acquire_ctx *ctx = &ww_acquire_ctxs[tid];
621
622 ww_mutex_unlock(&torture_ww_mutex_0);
623 ww_mutex_unlock(&torture_ww_mutex_1);
624 ww_mutex_unlock(&torture_ww_mutex_2);
625 ww_acquire_fini(ctx);
626}
627
628static struct lock_torture_ops ww_mutex_lock_ops = {
629 .init = torture_ww_mutex_init,
630 .exit = torture_ww_mutex_exit,
631 .writelock = torture_ww_mutex_lock,
632 .write_delay = torture_mutex_delay,
633 .task_boost = torture_rt_boost,
634 .writeunlock = torture_ww_mutex_unlock,
635 .readlock = NULL,
636 .read_delay = NULL,
637 .readunlock = NULL,
638 .name = "ww_mutex_lock"
639};
640
641#ifdef CONFIG_RT_MUTEXES
642static DEFINE_RT_MUTEX(torture_rtmutex);
643static struct rt_mutex torture_nested_rtmutexes[MAX_NESTED_LOCKS];
644static struct lock_class_key nested_rtmutex_keys[MAX_NESTED_LOCKS];
645
646static void torture_rtmutex_init(void)
647{
648 int i;
649
650 for (i = 0; i < MAX_NESTED_LOCKS; i++)
651 __rt_mutex_init(&torture_nested_rtmutexes[i], __func__,
652 &nested_rtmutex_keys[i]);
653}
654
655static int torture_rtmutex_nested_lock(int tid __maybe_unused,
656 u32 lockset)
657{
658 int i;
659
660 for (i = 0; i < nested_locks; i++)
661 if (lockset & (1 << i))
662 rt_mutex_lock(&torture_nested_rtmutexes[i]);
663 return 0;
664}
665
666static int torture_rtmutex_lock(int tid __maybe_unused)
667__acquires(torture_rtmutex)
668{
669 rt_mutex_lock(&torture_rtmutex);
670 return 0;
671}
672
673static void torture_rtmutex_delay(struct torture_random_state *trsp)
674{
675 const unsigned long shortdelay_us = 2;
676
677 /*
678 * We want a short delay mostly to emulate likely code, and
679 * we want a long delay occasionally to force massive contention.
680 */
681 if (long_hold && !(torture_random(trsp) % (cxt.nrealwriters_stress * 2000 * long_hold)))
682 mdelay(long_hold);
683 if (!(torture_random(trsp) %
684 (cxt.nrealwriters_stress * 200 * shortdelay_us)))
685 udelay(shortdelay_us);
686 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
687 torture_preempt_schedule(); /* Allow test to be preempted. */
688}
689
690static void torture_rtmutex_unlock(int tid __maybe_unused)
691__releases(torture_rtmutex)
692{
693 rt_mutex_unlock(&torture_rtmutex);
694}
695
696static void torture_rt_boost_rtmutex(struct torture_random_state *trsp)
697{
698 if (!rt_boost)
699 return;
700
701 __torture_rt_boost(trsp);
702}
703
704static void torture_rtmutex_nested_unlock(int tid __maybe_unused,
705 u32 lockset)
706{
707 int i;
708
709 for (i = nested_locks - 1; i >= 0; i--)
710 if (lockset & (1 << i))
711 rt_mutex_unlock(&torture_nested_rtmutexes[i]);
712}
713
714static struct lock_torture_ops rtmutex_lock_ops = {
715 .init = torture_rtmutex_init,
716 .nested_lock = torture_rtmutex_nested_lock,
717 .writelock = torture_rtmutex_lock,
718 .write_delay = torture_rtmutex_delay,
719 .task_boost = torture_rt_boost_rtmutex,
720 .writeunlock = torture_rtmutex_unlock,
721 .nested_unlock = torture_rtmutex_nested_unlock,
722 .readlock = NULL,
723 .read_delay = NULL,
724 .readunlock = NULL,
725 .name = "rtmutex_lock"
726};
727#endif
728
729static DECLARE_RWSEM(torture_rwsem);
730static int torture_rwsem_down_write(int tid __maybe_unused)
731__acquires(torture_rwsem)
732{
733 down_write(&torture_rwsem);
734 return 0;
735}
736
737static void torture_rwsem_write_delay(struct torture_random_state *trsp)
738{
739 /* We want a long delay occasionally to force massive contention. */
740 if (long_hold && !(torture_random(trsp) % (cxt.nrealwriters_stress * 2000 * long_hold)))
741 mdelay(long_hold * 10);
742 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
743 torture_preempt_schedule(); /* Allow test to be preempted. */
744}
745
746static void torture_rwsem_up_write(int tid __maybe_unused)
747__releases(torture_rwsem)
748{
749 up_write(&torture_rwsem);
750}
751
752static int torture_rwsem_down_read(int tid __maybe_unused)
753__acquires(torture_rwsem)
754{
755 down_read(&torture_rwsem);
756 return 0;
757}
758
759static void torture_rwsem_read_delay(struct torture_random_state *trsp)
760{
761 /* We want a long delay occasionally to force massive contention. */
762 if (long_hold && !(torture_random(trsp) % (cxt.nrealreaders_stress * 2000 * long_hold)))
763 mdelay(long_hold * 2);
764 else
765 mdelay(long_hold / 2);
766 if (!(torture_random(trsp) % (cxt.nrealreaders_stress * 20000)))
767 torture_preempt_schedule(); /* Allow test to be preempted. */
768}
769
770static void torture_rwsem_up_read(int tid __maybe_unused)
771__releases(torture_rwsem)
772{
773 up_read(&torture_rwsem);
774}
775
776static struct lock_torture_ops rwsem_lock_ops = {
777 .writelock = torture_rwsem_down_write,
778 .write_delay = torture_rwsem_write_delay,
779 .task_boost = torture_rt_boost,
780 .writeunlock = torture_rwsem_up_write,
781 .readlock = torture_rwsem_down_read,
782 .read_delay = torture_rwsem_read_delay,
783 .readunlock = torture_rwsem_up_read,
784 .name = "rwsem_lock"
785};
786
787#include <linux/percpu-rwsem.h>
788static struct percpu_rw_semaphore pcpu_rwsem;
789
790static void torture_percpu_rwsem_init(void)
791{
792 BUG_ON(percpu_init_rwsem(&pcpu_rwsem));
793}
794
795static void torture_percpu_rwsem_exit(void)
796{
797 percpu_free_rwsem(&pcpu_rwsem);
798}
799
800static int torture_percpu_rwsem_down_write(int tid __maybe_unused)
801__acquires(pcpu_rwsem)
802{
803 percpu_down_write(&pcpu_rwsem);
804 return 0;
805}
806
807static void torture_percpu_rwsem_up_write(int tid __maybe_unused)
808__releases(pcpu_rwsem)
809{
810 percpu_up_write(&pcpu_rwsem);
811}
812
813static int torture_percpu_rwsem_down_read(int tid __maybe_unused)
814__acquires(pcpu_rwsem)
815{
816 percpu_down_read(&pcpu_rwsem);
817 return 0;
818}
819
820static void torture_percpu_rwsem_up_read(int tid __maybe_unused)
821__releases(pcpu_rwsem)
822{
823 percpu_up_read(&pcpu_rwsem);
824}
825
826static struct lock_torture_ops percpu_rwsem_lock_ops = {
827 .init = torture_percpu_rwsem_init,
828 .exit = torture_percpu_rwsem_exit,
829 .writelock = torture_percpu_rwsem_down_write,
830 .write_delay = torture_rwsem_write_delay,
831 .task_boost = torture_rt_boost,
832 .writeunlock = torture_percpu_rwsem_up_write,
833 .readlock = torture_percpu_rwsem_down_read,
834 .read_delay = torture_rwsem_read_delay,
835 .readunlock = torture_percpu_rwsem_up_read,
836 .name = "percpu_rwsem_lock"
837};
838
839/*
840 * Lock torture writer kthread. Repeatedly acquires and releases
841 * the lock, checking for duplicate acquisitions.
842 */
843static int lock_torture_writer(void *arg)
844{
845 unsigned long j;
846 unsigned long j1;
847 u32 lockset_mask;
848 struct lock_stress_stats *lwsp = arg;
849 DEFINE_TORTURE_RANDOM(rand);
850 bool skip_main_lock;
851 int tid = lwsp - cxt.lwsa;
852
853 VERBOSE_TOROUT_STRING("lock_torture_writer task started");
854 if (!rt_task(current))
855 set_user_nice(current, MAX_NICE);
856
857 do {
858 if ((torture_random(&rand) & 0xfffff) == 0)
859 schedule_timeout_uninterruptible(1);
860
861 lockset_mask = torture_random(&rand);
862 /*
863 * When using nested_locks, we want to occasionally
864 * skip the main lock so we can avoid always serializing
865 * the lock chains on that central lock. By skipping the
866 * main lock occasionally, we can create different
867 * contention patterns (allowing for multiple disjoint
868 * blocked trees)
869 */
870 skip_main_lock = (nested_locks &&
871 !(torture_random(&rand) % 100));
872
873 cxt.cur_ops->task_boost(&rand);
874 if (cxt.cur_ops->nested_lock)
875 cxt.cur_ops->nested_lock(tid, lockset_mask);
876
877 if (!skip_main_lock) {
878 if (acq_writer_lim > 0)
879 j = jiffies;
880 cxt.cur_ops->writelock(tid);
881 if (WARN_ON_ONCE(lock_is_write_held))
882 lwsp->n_lock_fail++;
883 lock_is_write_held = true;
884 if (WARN_ON_ONCE(atomic_read(&lock_is_read_held)))
885 lwsp->n_lock_fail++; /* rare, but... */
886 if (acq_writer_lim > 0) {
887 j1 = jiffies;
888 WARN_ONCE(time_after(j1, j + acq_writer_lim),
889 "%s: Lock acquisition took %lu jiffies.\n",
890 __func__, j1 - j);
891 }
892 lwsp->n_lock_acquired++;
893
894 cxt.cur_ops->write_delay(&rand);
895
896 lock_is_write_held = false;
897 WRITE_ONCE(last_lock_release, jiffies);
898 cxt.cur_ops->writeunlock(tid);
899 }
900 if (cxt.cur_ops->nested_unlock)
901 cxt.cur_ops->nested_unlock(tid, lockset_mask);
902
903 stutter_wait("lock_torture_writer");
904 } while (!torture_must_stop());
905
906 cxt.cur_ops->task_boost(NULL); /* reset prio */
907 torture_kthread_stopping("lock_torture_writer");
908 return 0;
909}
910
911/*
912 * Lock torture reader kthread. Repeatedly acquires and releases
913 * the reader lock.
914 */
915static int lock_torture_reader(void *arg)
916{
917 struct lock_stress_stats *lrsp = arg;
918 int tid = lrsp - cxt.lrsa;
919 DEFINE_TORTURE_RANDOM(rand);
920
921 VERBOSE_TOROUT_STRING("lock_torture_reader task started");
922 set_user_nice(current, MAX_NICE);
923
924 do {
925 if ((torture_random(&rand) & 0xfffff) == 0)
926 schedule_timeout_uninterruptible(1);
927
928 cxt.cur_ops->readlock(tid);
929 atomic_inc(&lock_is_read_held);
930 if (WARN_ON_ONCE(lock_is_write_held))
931 lrsp->n_lock_fail++; /* rare, but... */
932
933 lrsp->n_lock_acquired++;
934 cxt.cur_ops->read_delay(&rand);
935 atomic_dec(&lock_is_read_held);
936 cxt.cur_ops->readunlock(tid);
937
938 stutter_wait("lock_torture_reader");
939 } while (!torture_must_stop());
940 torture_kthread_stopping("lock_torture_reader");
941 return 0;
942}
943
944/*
945 * Create an lock-torture-statistics message in the specified buffer.
946 */
947static void __torture_print_stats(char *page,
948 struct lock_stress_stats *statp, bool write)
949{
950 long cur;
951 bool fail = false;
952 int i, n_stress;
953 long max = 0, min = statp ? data_race(statp[0].n_lock_acquired) : 0;
954 long long sum = 0;
955
956 n_stress = write ? cxt.nrealwriters_stress : cxt.nrealreaders_stress;
957 for (i = 0; i < n_stress; i++) {
958 if (data_race(statp[i].n_lock_fail))
959 fail = true;
960 cur = data_race(statp[i].n_lock_acquired);
961 sum += cur;
962 if (max < cur)
963 max = cur;
964 if (min > cur)
965 min = cur;
966 }
967 page += sprintf(page,
968 "%s: Total: %lld Max/Min: %ld/%ld %s Fail: %d %s\n",
969 write ? "Writes" : "Reads ",
970 sum, max, min,
971 !onoff_interval && max / 2 > min ? "???" : "",
972 fail, fail ? "!!!" : "");
973 if (fail)
974 atomic_inc(&cxt.n_lock_torture_errors);
975}
976
977/*
978 * Print torture statistics. Caller must ensure that there is only one
979 * call to this function at a given time!!! This is normally accomplished
980 * by relying on the module system to only have one copy of the module
981 * loaded, and then by giving the lock_torture_stats kthread full control
982 * (or the init/cleanup functions when lock_torture_stats thread is not
983 * running).
984 */
985static void lock_torture_stats_print(void)
986{
987 int size = cxt.nrealwriters_stress * 200 + 8192;
988 char *buf;
989
990 if (cxt.cur_ops->readlock)
991 size += cxt.nrealreaders_stress * 200 + 8192;
992
993 buf = kmalloc(size, GFP_KERNEL);
994 if (!buf) {
995 pr_err("lock_torture_stats_print: Out of memory, need: %d",
996 size);
997 return;
998 }
999
1000 __torture_print_stats(buf, cxt.lwsa, true);
1001 pr_alert("%s", buf);
1002 kfree(buf);
1003
1004 if (cxt.cur_ops->readlock) {
1005 buf = kmalloc(size, GFP_KERNEL);
1006 if (!buf) {
1007 pr_err("lock_torture_stats_print: Out of memory, need: %d",
1008 size);
1009 return;
1010 }
1011
1012 __torture_print_stats(buf, cxt.lrsa, false);
1013 pr_alert("%s", buf);
1014 kfree(buf);
1015 }
1016}
1017
1018/*
1019 * Periodically prints torture statistics, if periodic statistics printing
1020 * was specified via the stat_interval module parameter.
1021 *
1022 * No need to worry about fullstop here, since this one doesn't reference
1023 * volatile state or register callbacks.
1024 */
1025static int lock_torture_stats(void *arg)
1026{
1027 VERBOSE_TOROUT_STRING("lock_torture_stats task started");
1028 do {
1029 schedule_timeout_interruptible(stat_interval * HZ);
1030 lock_torture_stats_print();
1031 torture_shutdown_absorb("lock_torture_stats");
1032 } while (!torture_must_stop());
1033 torture_kthread_stopping("lock_torture_stats");
1034 return 0;
1035}
1036
1037
1038static inline void
1039lock_torture_print_module_parms(struct lock_torture_ops *cur_ops,
1040 const char *tag)
1041{
1042 static cpumask_t cpumask_all;
1043 cpumask_t *rcmp = cpumask_nonempty(bind_readers) ? bind_readers : &cpumask_all;
1044 cpumask_t *wcmp = cpumask_nonempty(bind_writers) ? bind_writers : &cpumask_all;
1045
1046 cpumask_setall(&cpumask_all);
1047 pr_alert("%s" TORTURE_FLAG
1048 "--- %s%s: acq_writer_lim=%d bind_readers=%*pbl bind_writers=%*pbl call_rcu_chains=%d long_hold=%d nested_locks=%d nreaders_stress=%d nwriters_stress=%d onoff_holdoff=%d onoff_interval=%d rt_boost=%d rt_boost_factor=%d shuffle_interval=%d shutdown_secs=%d stat_interval=%d stutter=%d verbose=%d writer_fifo=%d\n",
1049 torture_type, tag, cxt.debug_lock ? " [debug]": "",
1050 acq_writer_lim, cpumask_pr_args(rcmp), cpumask_pr_args(wcmp),
1051 call_rcu_chains, long_hold, nested_locks, cxt.nrealreaders_stress,
1052 cxt.nrealwriters_stress, onoff_holdoff, onoff_interval, rt_boost,
1053 rt_boost_factor, shuffle_interval, shutdown_secs, stat_interval, stutter,
1054 verbose, writer_fifo);
1055}
1056
1057// If requested, maintain call_rcu() chains to keep a grace period always
1058// in flight. These increase the probability of getting an RCU CPU stall
1059// warning and associated diagnostics when a locking primitive stalls.
1060
1061static void call_rcu_chain_cb(struct rcu_head *rhp)
1062{
1063 struct call_rcu_chain *crcp = container_of(rhp, struct call_rcu_chain, crc_rh);
1064
1065 if (!smp_load_acquire(&crcp->crc_stop)) {
1066 (void)start_poll_synchronize_rcu(); // Start one grace period...
1067 call_rcu(&crcp->crc_rh, call_rcu_chain_cb); // ... and later start another.
1068 }
1069}
1070
1071// Start the requested number of call_rcu() chains.
1072static int call_rcu_chain_init(void)
1073{
1074 int i;
1075
1076 if (call_rcu_chains <= 0)
1077 return 0;
1078 call_rcu_chain_list = kcalloc(call_rcu_chains, sizeof(*call_rcu_chain_list), GFP_KERNEL);
1079 if (!call_rcu_chain_list)
1080 return -ENOMEM;
1081 for (i = 0; i < call_rcu_chains; i++) {
1082 call_rcu_chain_list[i].crc_stop = false;
1083 call_rcu(&call_rcu_chain_list[i].crc_rh, call_rcu_chain_cb);
1084 }
1085 return 0;
1086}
1087
1088// Stop all of the call_rcu() chains.
1089static void call_rcu_chain_cleanup(void)
1090{
1091 int i;
1092
1093 if (!call_rcu_chain_list)
1094 return;
1095 for (i = 0; i < call_rcu_chains; i++)
1096 smp_store_release(&call_rcu_chain_list[i].crc_stop, true);
1097 rcu_barrier();
1098 kfree(call_rcu_chain_list);
1099 call_rcu_chain_list = NULL;
1100}
1101
1102static void lock_torture_cleanup(void)
1103{
1104 int i;
1105
1106 if (torture_cleanup_begin())
1107 return;
1108
1109 /*
1110 * Indicates early cleanup, meaning that the test has not run,
1111 * such as when passing bogus args when loading the module.
1112 * However cxt->cur_ops.init() may have been invoked, so beside
1113 * perform the underlying torture-specific cleanups, cur_ops.exit()
1114 * will be invoked if needed.
1115 */
1116 if (!cxt.lwsa && !cxt.lrsa)
1117 goto end;
1118
1119 if (writer_tasks) {
1120 for (i = 0; i < cxt.nrealwriters_stress; i++)
1121 torture_stop_kthread(lock_torture_writer, writer_tasks[i]);
1122 kfree(writer_tasks);
1123 writer_tasks = NULL;
1124 }
1125
1126 if (reader_tasks) {
1127 for (i = 0; i < cxt.nrealreaders_stress; i++)
1128 torture_stop_kthread(lock_torture_reader,
1129 reader_tasks[i]);
1130 kfree(reader_tasks);
1131 reader_tasks = NULL;
1132 }
1133
1134 torture_stop_kthread(lock_torture_stats, stats_task);
1135 lock_torture_stats_print(); /* -After- the stats thread is stopped! */
1136
1137 if (atomic_read(&cxt.n_lock_torture_errors))
1138 lock_torture_print_module_parms(cxt.cur_ops,
1139 "End of test: FAILURE");
1140 else if (torture_onoff_failures())
1141 lock_torture_print_module_parms(cxt.cur_ops,
1142 "End of test: LOCK_HOTPLUG");
1143 else
1144 lock_torture_print_module_parms(cxt.cur_ops,
1145 "End of test: SUCCESS");
1146
1147 kfree(cxt.lwsa);
1148 cxt.lwsa = NULL;
1149 kfree(cxt.lrsa);
1150 cxt.lrsa = NULL;
1151
1152 call_rcu_chain_cleanup();
1153
1154end:
1155 if (cxt.init_called) {
1156 if (cxt.cur_ops->exit)
1157 cxt.cur_ops->exit();
1158 cxt.init_called = false;
1159 }
1160 torture_cleanup_end();
1161}
1162
1163static int __init lock_torture_init(void)
1164{
1165 int i, j;
1166 int firsterr = 0;
1167 static struct lock_torture_ops *torture_ops[] = {
1168 &lock_busted_ops,
1169 &spin_lock_ops, &spin_lock_irq_ops,
1170 &raw_spin_lock_ops, &raw_spin_lock_irq_ops,
1171 &rw_lock_ops, &rw_lock_irq_ops,
1172 &mutex_lock_ops,
1173 &ww_mutex_lock_ops,
1174#ifdef CONFIG_RT_MUTEXES
1175 &rtmutex_lock_ops,
1176#endif
1177 &rwsem_lock_ops,
1178 &percpu_rwsem_lock_ops,
1179 };
1180
1181 if (!torture_init_begin(torture_type, verbose))
1182 return -EBUSY;
1183
1184 /* Process args and tell the world that the torturer is on the job. */
1185 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
1186 cxt.cur_ops = torture_ops[i];
1187 if (strcmp(torture_type, cxt.cur_ops->name) == 0)
1188 break;
1189 }
1190 if (i == ARRAY_SIZE(torture_ops)) {
1191 pr_alert("lock-torture: invalid torture type: \"%s\"\n",
1192 torture_type);
1193 pr_alert("lock-torture types:");
1194 for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
1195 pr_alert(" %s", torture_ops[i]->name);
1196 pr_alert("\n");
1197 firsterr = -EINVAL;
1198 goto unwind;
1199 }
1200
1201 if (nwriters_stress == 0 &&
1202 (!cxt.cur_ops->readlock || nreaders_stress == 0)) {
1203 pr_alert("lock-torture: must run at least one locking thread\n");
1204 firsterr = -EINVAL;
1205 goto unwind;
1206 }
1207
1208 if (nwriters_stress >= 0)
1209 cxt.nrealwriters_stress = nwriters_stress;
1210 else
1211 cxt.nrealwriters_stress = 2 * num_online_cpus();
1212
1213 if (cxt.cur_ops->init) {
1214 cxt.cur_ops->init();
1215 cxt.init_called = true;
1216 }
1217
1218#ifdef CONFIG_DEBUG_MUTEXES
1219 if (str_has_prefix(torture_type, "mutex"))
1220 cxt.debug_lock = true;
1221#endif
1222#ifdef CONFIG_DEBUG_RT_MUTEXES
1223 if (str_has_prefix(torture_type, "rtmutex"))
1224 cxt.debug_lock = true;
1225#endif
1226#ifdef CONFIG_DEBUG_SPINLOCK
1227 if ((str_has_prefix(torture_type, "spin")) ||
1228 (str_has_prefix(torture_type, "rw_lock")))
1229 cxt.debug_lock = true;
1230#endif
1231
1232 /* Initialize the statistics so that each run gets its own numbers. */
1233 if (nwriters_stress) {
1234 lock_is_write_held = false;
1235 cxt.lwsa = kmalloc_array(cxt.nrealwriters_stress,
1236 sizeof(*cxt.lwsa),
1237 GFP_KERNEL);
1238 if (cxt.lwsa == NULL) {
1239 VERBOSE_TOROUT_STRING("cxt.lwsa: Out of memory");
1240 firsterr = -ENOMEM;
1241 goto unwind;
1242 }
1243
1244 for (i = 0; i < cxt.nrealwriters_stress; i++) {
1245 cxt.lwsa[i].n_lock_fail = 0;
1246 cxt.lwsa[i].n_lock_acquired = 0;
1247 }
1248 }
1249
1250 if (cxt.cur_ops->readlock) {
1251 if (nreaders_stress >= 0)
1252 cxt.nrealreaders_stress = nreaders_stress;
1253 else {
1254 /*
1255 * By default distribute evenly the number of
1256 * readers and writers. We still run the same number
1257 * of threads as the writer-only locks default.
1258 */
1259 if (nwriters_stress < 0) /* user doesn't care */
1260 cxt.nrealwriters_stress = num_online_cpus();
1261 cxt.nrealreaders_stress = cxt.nrealwriters_stress;
1262 }
1263
1264 if (nreaders_stress) {
1265 cxt.lrsa = kmalloc_array(cxt.nrealreaders_stress,
1266 sizeof(*cxt.lrsa),
1267 GFP_KERNEL);
1268 if (cxt.lrsa == NULL) {
1269 VERBOSE_TOROUT_STRING("cxt.lrsa: Out of memory");
1270 firsterr = -ENOMEM;
1271 kfree(cxt.lwsa);
1272 cxt.lwsa = NULL;
1273 goto unwind;
1274 }
1275
1276 for (i = 0; i < cxt.nrealreaders_stress; i++) {
1277 cxt.lrsa[i].n_lock_fail = 0;
1278 cxt.lrsa[i].n_lock_acquired = 0;
1279 }
1280 }
1281 }
1282
1283 firsterr = call_rcu_chain_init();
1284 if (torture_init_error(firsterr))
1285 goto unwind;
1286
1287 lock_torture_print_module_parms(cxt.cur_ops, "Start of test");
1288
1289 /* Prepare torture context. */
1290 if (onoff_interval > 0) {
1291 firsterr = torture_onoff_init(onoff_holdoff * HZ,
1292 onoff_interval * HZ, NULL);
1293 if (torture_init_error(firsterr))
1294 goto unwind;
1295 }
1296 if (shuffle_interval > 0) {
1297 firsterr = torture_shuffle_init(shuffle_interval);
1298 if (torture_init_error(firsterr))
1299 goto unwind;
1300 }
1301 if (shutdown_secs > 0) {
1302 firsterr = torture_shutdown_init(shutdown_secs,
1303 lock_torture_cleanup);
1304 if (torture_init_error(firsterr))
1305 goto unwind;
1306 }
1307 if (stutter > 0) {
1308 firsterr = torture_stutter_init(stutter, stutter);
1309 if (torture_init_error(firsterr))
1310 goto unwind;
1311 }
1312
1313 if (nwriters_stress) {
1314 writer_tasks = kcalloc(cxt.nrealwriters_stress,
1315 sizeof(writer_tasks[0]),
1316 GFP_KERNEL);
1317 if (writer_tasks == NULL) {
1318 TOROUT_ERRSTRING("writer_tasks: Out of memory");
1319 firsterr = -ENOMEM;
1320 goto unwind;
1321 }
1322 }
1323
1324 /* cap nested_locks to MAX_NESTED_LOCKS */
1325 if (nested_locks > MAX_NESTED_LOCKS)
1326 nested_locks = MAX_NESTED_LOCKS;
1327
1328 if (cxt.cur_ops->readlock) {
1329 reader_tasks = kcalloc(cxt.nrealreaders_stress,
1330 sizeof(reader_tasks[0]),
1331 GFP_KERNEL);
1332 if (reader_tasks == NULL) {
1333 TOROUT_ERRSTRING("reader_tasks: Out of memory");
1334 kfree(writer_tasks);
1335 writer_tasks = NULL;
1336 firsterr = -ENOMEM;
1337 goto unwind;
1338 }
1339 }
1340
1341 /*
1342 * Create the kthreads and start torturing (oh, those poor little locks).
1343 *
1344 * TODO: Note that we interleave writers with readers, giving writers a
1345 * slight advantage, by creating its kthread first. This can be modified
1346 * for very specific needs, or even let the user choose the policy, if
1347 * ever wanted.
1348 */
1349 for (i = 0, j = 0; i < cxt.nrealwriters_stress ||
1350 j < cxt.nrealreaders_stress; i++, j++) {
1351 if (i >= cxt.nrealwriters_stress)
1352 goto create_reader;
1353
1354 /* Create writer. */
1355 firsterr = torture_create_kthread_cb(lock_torture_writer, &cxt.lwsa[i],
1356 writer_tasks[i],
1357 writer_fifo ? sched_set_fifo : NULL);
1358 if (torture_init_error(firsterr))
1359 goto unwind;
1360 if (cpumask_nonempty(bind_writers))
1361 torture_sched_setaffinity(writer_tasks[i]->pid, bind_writers);
1362
1363 create_reader:
1364 if (cxt.cur_ops->readlock == NULL || (j >= cxt.nrealreaders_stress))
1365 continue;
1366 /* Create reader. */
1367 firsterr = torture_create_kthread(lock_torture_reader, &cxt.lrsa[j],
1368 reader_tasks[j]);
1369 if (torture_init_error(firsterr))
1370 goto unwind;
1371 if (cpumask_nonempty(bind_readers))
1372 torture_sched_setaffinity(reader_tasks[j]->pid, bind_readers);
1373 }
1374 if (stat_interval > 0) {
1375 firsterr = torture_create_kthread(lock_torture_stats, NULL,
1376 stats_task);
1377 if (torture_init_error(firsterr))
1378 goto unwind;
1379 }
1380 torture_init_end();
1381 return 0;
1382
1383unwind:
1384 torture_init_end();
1385 lock_torture_cleanup();
1386 if (shutdown_secs) {
1387 WARN_ON(!IS_MODULE(CONFIG_LOCK_TORTURE_TEST));
1388 kernel_power_off();
1389 }
1390 return firsterr;
1391}
1392
1393module_init(lock_torture_init);
1394module_exit(lock_torture_cleanup);