Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Module-based API test facility for ww_mutexes
4 */
5
6#include <linux/kernel.h>
7
8#include <linux/completion.h>
9#include <linux/delay.h>
10#include <linux/kthread.h>
11#include <linux/module.h>
12#include <linux/prandom.h>
13#include <linux/slab.h>
14#include <linux/ww_mutex.h>
15
16static DEFINE_WD_CLASS(ww_class);
17struct workqueue_struct *wq;
18
19#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
20#define ww_acquire_init_noinject(a, b) do { \
21 ww_acquire_init((a), (b)); \
22 (a)->deadlock_inject_countdown = ~0U; \
23 } while (0)
24#else
25#define ww_acquire_init_noinject(a, b) ww_acquire_init((a), (b))
26#endif
27
28struct test_mutex {
29 struct work_struct work;
30 struct ww_mutex mutex;
31 struct completion ready, go, done;
32 unsigned int flags;
33};
34
35#define TEST_MTX_SPIN BIT(0)
36#define TEST_MTX_TRY BIT(1)
37#define TEST_MTX_CTX BIT(2)
38#define __TEST_MTX_LAST BIT(3)
39
40static void test_mutex_work(struct work_struct *work)
41{
42 struct test_mutex *mtx = container_of(work, typeof(*mtx), work);
43
44 complete(&mtx->ready);
45 wait_for_completion(&mtx->go);
46
47 if (mtx->flags & TEST_MTX_TRY) {
48 while (!ww_mutex_trylock(&mtx->mutex, NULL))
49 cond_resched();
50 } else {
51 ww_mutex_lock(&mtx->mutex, NULL);
52 }
53 complete(&mtx->done);
54 ww_mutex_unlock(&mtx->mutex);
55}
56
57static int __test_mutex(unsigned int flags)
58{
59#define TIMEOUT (HZ / 16)
60 struct test_mutex mtx;
61 struct ww_acquire_ctx ctx;
62 int ret;
63
64 ww_mutex_init(&mtx.mutex, &ww_class);
65 if (flags & TEST_MTX_CTX)
66 ww_acquire_init(&ctx, &ww_class);
67
68 INIT_WORK_ONSTACK(&mtx.work, test_mutex_work);
69 init_completion(&mtx.ready);
70 init_completion(&mtx.go);
71 init_completion(&mtx.done);
72 mtx.flags = flags;
73
74 schedule_work(&mtx.work);
75
76 wait_for_completion(&mtx.ready);
77 ww_mutex_lock(&mtx.mutex, (flags & TEST_MTX_CTX) ? &ctx : NULL);
78 complete(&mtx.go);
79 if (flags & TEST_MTX_SPIN) {
80 unsigned long timeout = jiffies + TIMEOUT;
81
82 ret = 0;
83 do {
84 if (completion_done(&mtx.done)) {
85 ret = -EINVAL;
86 break;
87 }
88 cond_resched();
89 } while (time_before(jiffies, timeout));
90 } else {
91 ret = wait_for_completion_timeout(&mtx.done, TIMEOUT);
92 }
93 ww_mutex_unlock(&mtx.mutex);
94 if (flags & TEST_MTX_CTX)
95 ww_acquire_fini(&ctx);
96
97 if (ret) {
98 pr_err("%s(flags=%x): mutual exclusion failure\n",
99 __func__, flags);
100 ret = -EINVAL;
101 }
102
103 flush_work(&mtx.work);
104 destroy_work_on_stack(&mtx.work);
105 return ret;
106#undef TIMEOUT
107}
108
109static int test_mutex(void)
110{
111 int ret;
112 int i;
113
114 for (i = 0; i < __TEST_MTX_LAST; i++) {
115 ret = __test_mutex(i);
116 if (ret)
117 return ret;
118 }
119
120 return 0;
121}
122
123static int test_aa(bool trylock)
124{
125 struct ww_mutex mutex;
126 struct ww_acquire_ctx ctx;
127 int ret;
128 const char *from = trylock ? "trylock" : "lock";
129
130 ww_mutex_init(&mutex, &ww_class);
131 ww_acquire_init(&ctx, &ww_class);
132
133 if (!trylock) {
134 ret = ww_mutex_lock(&mutex, &ctx);
135 if (ret) {
136 pr_err("%s: initial lock failed!\n", __func__);
137 goto out;
138 }
139 } else {
140 ret = !ww_mutex_trylock(&mutex, &ctx);
141 if (ret) {
142 pr_err("%s: initial trylock failed!\n", __func__);
143 goto out;
144 }
145 }
146
147 if (ww_mutex_trylock(&mutex, NULL)) {
148 pr_err("%s: trylocked itself without context from %s!\n", __func__, from);
149 ww_mutex_unlock(&mutex);
150 ret = -EINVAL;
151 goto out;
152 }
153
154 if (ww_mutex_trylock(&mutex, &ctx)) {
155 pr_err("%s: trylocked itself with context from %s!\n", __func__, from);
156 ww_mutex_unlock(&mutex);
157 ret = -EINVAL;
158 goto out;
159 }
160
161 ret = ww_mutex_lock(&mutex, &ctx);
162 if (ret != -EALREADY) {
163 pr_err("%s: missed deadlock for recursing, ret=%d from %s\n",
164 __func__, ret, from);
165 if (!ret)
166 ww_mutex_unlock(&mutex);
167 ret = -EINVAL;
168 goto out;
169 }
170
171 ww_mutex_unlock(&mutex);
172 ret = 0;
173out:
174 ww_acquire_fini(&ctx);
175 return ret;
176}
177
178struct test_abba {
179 struct work_struct work;
180 struct ww_mutex a_mutex;
181 struct ww_mutex b_mutex;
182 struct completion a_ready;
183 struct completion b_ready;
184 bool resolve, trylock;
185 int result;
186};
187
188static void test_abba_work(struct work_struct *work)
189{
190 struct test_abba *abba = container_of(work, typeof(*abba), work);
191 struct ww_acquire_ctx ctx;
192 int err;
193
194 ww_acquire_init_noinject(&ctx, &ww_class);
195 if (!abba->trylock)
196 ww_mutex_lock(&abba->b_mutex, &ctx);
197 else
198 WARN_ON(!ww_mutex_trylock(&abba->b_mutex, &ctx));
199
200 WARN_ON(READ_ONCE(abba->b_mutex.ctx) != &ctx);
201
202 complete(&abba->b_ready);
203 wait_for_completion(&abba->a_ready);
204
205 err = ww_mutex_lock(&abba->a_mutex, &ctx);
206 if (abba->resolve && err == -EDEADLK) {
207 ww_mutex_unlock(&abba->b_mutex);
208 ww_mutex_lock_slow(&abba->a_mutex, &ctx);
209 err = ww_mutex_lock(&abba->b_mutex, &ctx);
210 }
211
212 if (!err)
213 ww_mutex_unlock(&abba->a_mutex);
214 ww_mutex_unlock(&abba->b_mutex);
215 ww_acquire_fini(&ctx);
216
217 abba->result = err;
218}
219
220static int test_abba(bool trylock, bool resolve)
221{
222 struct test_abba abba;
223 struct ww_acquire_ctx ctx;
224 int err, ret;
225
226 ww_mutex_init(&abba.a_mutex, &ww_class);
227 ww_mutex_init(&abba.b_mutex, &ww_class);
228 INIT_WORK_ONSTACK(&abba.work, test_abba_work);
229 init_completion(&abba.a_ready);
230 init_completion(&abba.b_ready);
231 abba.trylock = trylock;
232 abba.resolve = resolve;
233
234 schedule_work(&abba.work);
235
236 ww_acquire_init_noinject(&ctx, &ww_class);
237 if (!trylock)
238 ww_mutex_lock(&abba.a_mutex, &ctx);
239 else
240 WARN_ON(!ww_mutex_trylock(&abba.a_mutex, &ctx));
241
242 WARN_ON(READ_ONCE(abba.a_mutex.ctx) != &ctx);
243
244 complete(&abba.a_ready);
245 wait_for_completion(&abba.b_ready);
246
247 err = ww_mutex_lock(&abba.b_mutex, &ctx);
248 if (resolve && err == -EDEADLK) {
249 ww_mutex_unlock(&abba.a_mutex);
250 ww_mutex_lock_slow(&abba.b_mutex, &ctx);
251 err = ww_mutex_lock(&abba.a_mutex, &ctx);
252 }
253
254 if (!err)
255 ww_mutex_unlock(&abba.b_mutex);
256 ww_mutex_unlock(&abba.a_mutex);
257 ww_acquire_fini(&ctx);
258
259 flush_work(&abba.work);
260 destroy_work_on_stack(&abba.work);
261
262 ret = 0;
263 if (resolve) {
264 if (err || abba.result) {
265 pr_err("%s: failed to resolve ABBA deadlock, A err=%d, B err=%d\n",
266 __func__, err, abba.result);
267 ret = -EINVAL;
268 }
269 } else {
270 if (err != -EDEADLK && abba.result != -EDEADLK) {
271 pr_err("%s: missed ABBA deadlock, A err=%d, B err=%d\n",
272 __func__, err, abba.result);
273 ret = -EINVAL;
274 }
275 }
276 return ret;
277}
278
279struct test_cycle {
280 struct work_struct work;
281 struct ww_mutex a_mutex;
282 struct ww_mutex *b_mutex;
283 struct completion *a_signal;
284 struct completion b_signal;
285 int result;
286};
287
288static void test_cycle_work(struct work_struct *work)
289{
290 struct test_cycle *cycle = container_of(work, typeof(*cycle), work);
291 struct ww_acquire_ctx ctx;
292 int err, erra = 0;
293
294 ww_acquire_init_noinject(&ctx, &ww_class);
295 ww_mutex_lock(&cycle->a_mutex, &ctx);
296
297 complete(cycle->a_signal);
298 wait_for_completion(&cycle->b_signal);
299
300 err = ww_mutex_lock(cycle->b_mutex, &ctx);
301 if (err == -EDEADLK) {
302 err = 0;
303 ww_mutex_unlock(&cycle->a_mutex);
304 ww_mutex_lock_slow(cycle->b_mutex, &ctx);
305 erra = ww_mutex_lock(&cycle->a_mutex, &ctx);
306 }
307
308 if (!err)
309 ww_mutex_unlock(cycle->b_mutex);
310 if (!erra)
311 ww_mutex_unlock(&cycle->a_mutex);
312 ww_acquire_fini(&ctx);
313
314 cycle->result = err ?: erra;
315}
316
317static int __test_cycle(unsigned int nthreads)
318{
319 struct test_cycle *cycles;
320 unsigned int n, last = nthreads - 1;
321 int ret;
322
323 cycles = kmalloc_array(nthreads, sizeof(*cycles), GFP_KERNEL);
324 if (!cycles)
325 return -ENOMEM;
326
327 for (n = 0; n < nthreads; n++) {
328 struct test_cycle *cycle = &cycles[n];
329
330 ww_mutex_init(&cycle->a_mutex, &ww_class);
331 if (n == last)
332 cycle->b_mutex = &cycles[0].a_mutex;
333 else
334 cycle->b_mutex = &cycles[n + 1].a_mutex;
335
336 if (n == 0)
337 cycle->a_signal = &cycles[last].b_signal;
338 else
339 cycle->a_signal = &cycles[n - 1].b_signal;
340 init_completion(&cycle->b_signal);
341
342 INIT_WORK(&cycle->work, test_cycle_work);
343 cycle->result = 0;
344 }
345
346 for (n = 0; n < nthreads; n++)
347 queue_work(wq, &cycles[n].work);
348
349 flush_workqueue(wq);
350
351 ret = 0;
352 for (n = 0; n < nthreads; n++) {
353 struct test_cycle *cycle = &cycles[n];
354
355 if (!cycle->result)
356 continue;
357
358 pr_err("cyclic deadlock not resolved, ret[%d/%d] = %d\n",
359 n, nthreads, cycle->result);
360 ret = -EINVAL;
361 break;
362 }
363
364 for (n = 0; n < nthreads; n++)
365 ww_mutex_destroy(&cycles[n].a_mutex);
366 kfree(cycles);
367 return ret;
368}
369
370static int test_cycle(unsigned int ncpus)
371{
372 unsigned int n;
373 int ret;
374
375 for (n = 2; n <= ncpus + 1; n++) {
376 ret = __test_cycle(n);
377 if (ret)
378 return ret;
379 }
380
381 return 0;
382}
383
384struct stress {
385 struct work_struct work;
386 struct ww_mutex *locks;
387 unsigned long timeout;
388 int nlocks;
389};
390
391struct rnd_state rng;
392DEFINE_SPINLOCK(rng_lock);
393
394static inline u32 prandom_u32_below(u32 ceil)
395{
396 u32 ret;
397
398 spin_lock(&rng_lock);
399 ret = prandom_u32_state(&rng) % ceil;
400 spin_unlock(&rng_lock);
401 return ret;
402}
403
404static int *get_random_order(int count)
405{
406 int *order;
407 int n, r;
408
409 order = kmalloc_array(count, sizeof(*order), GFP_KERNEL);
410 if (!order)
411 return order;
412
413 for (n = 0; n < count; n++)
414 order[n] = n;
415
416 for (n = count - 1; n > 1; n--) {
417 r = prandom_u32_below(n + 1);
418 if (r != n)
419 swap(order[n], order[r]);
420 }
421
422 return order;
423}
424
425static void dummy_load(struct stress *stress)
426{
427 usleep_range(1000, 2000);
428}
429
430static void stress_inorder_work(struct work_struct *work)
431{
432 struct stress *stress = container_of(work, typeof(*stress), work);
433 const int nlocks = stress->nlocks;
434 struct ww_mutex *locks = stress->locks;
435 struct ww_acquire_ctx ctx;
436 int *order;
437
438 order = get_random_order(nlocks);
439 if (!order)
440 return;
441
442 do {
443 int contended = -1;
444 int n, err;
445
446 ww_acquire_init(&ctx, &ww_class);
447retry:
448 err = 0;
449 for (n = 0; n < nlocks; n++) {
450 if (n == contended)
451 continue;
452
453 err = ww_mutex_lock(&locks[order[n]], &ctx);
454 if (err < 0)
455 break;
456 }
457 if (!err)
458 dummy_load(stress);
459
460 if (contended > n)
461 ww_mutex_unlock(&locks[order[contended]]);
462 contended = n;
463 while (n--)
464 ww_mutex_unlock(&locks[order[n]]);
465
466 if (err == -EDEADLK) {
467 if (!time_after(jiffies, stress->timeout)) {
468 ww_mutex_lock_slow(&locks[order[contended]], &ctx);
469 goto retry;
470 }
471 }
472
473 ww_acquire_fini(&ctx);
474 if (err) {
475 pr_err_once("stress (%s) failed with %d\n",
476 __func__, err);
477 break;
478 }
479 } while (!time_after(jiffies, stress->timeout));
480
481 kfree(order);
482}
483
484struct reorder_lock {
485 struct list_head link;
486 struct ww_mutex *lock;
487};
488
489static void stress_reorder_work(struct work_struct *work)
490{
491 struct stress *stress = container_of(work, typeof(*stress), work);
492 LIST_HEAD(locks);
493 struct ww_acquire_ctx ctx;
494 struct reorder_lock *ll, *ln;
495 int *order;
496 int n, err;
497
498 order = get_random_order(stress->nlocks);
499 if (!order)
500 return;
501
502 for (n = 0; n < stress->nlocks; n++) {
503 ll = kmalloc(sizeof(*ll), GFP_KERNEL);
504 if (!ll)
505 goto out;
506
507 ll->lock = &stress->locks[order[n]];
508 list_add(&ll->link, &locks);
509 }
510 kfree(order);
511 order = NULL;
512
513 do {
514 ww_acquire_init(&ctx, &ww_class);
515
516 list_for_each_entry(ll, &locks, link) {
517 err = ww_mutex_lock(ll->lock, &ctx);
518 if (!err)
519 continue;
520
521 ln = ll;
522 list_for_each_entry_continue_reverse(ln, &locks, link)
523 ww_mutex_unlock(ln->lock);
524
525 if (err != -EDEADLK) {
526 pr_err_once("stress (%s) failed with %d\n",
527 __func__, err);
528 break;
529 }
530
531 ww_mutex_lock_slow(ll->lock, &ctx);
532 list_move(&ll->link, &locks); /* restarts iteration */
533 }
534
535 dummy_load(stress);
536 list_for_each_entry(ll, &locks, link)
537 ww_mutex_unlock(ll->lock);
538
539 ww_acquire_fini(&ctx);
540 } while (!time_after(jiffies, stress->timeout));
541
542out:
543 list_for_each_entry_safe(ll, ln, &locks, link)
544 kfree(ll);
545 kfree(order);
546}
547
548static void stress_one_work(struct work_struct *work)
549{
550 struct stress *stress = container_of(work, typeof(*stress), work);
551 const int nlocks = stress->nlocks;
552 struct ww_mutex *lock = stress->locks + get_random_u32_below(nlocks);
553 int err;
554
555 do {
556 err = ww_mutex_lock(lock, NULL);
557 if (!err) {
558 dummy_load(stress);
559 ww_mutex_unlock(lock);
560 } else {
561 pr_err_once("stress (%s) failed with %d\n",
562 __func__, err);
563 break;
564 }
565 } while (!time_after(jiffies, stress->timeout));
566}
567
568#define STRESS_INORDER BIT(0)
569#define STRESS_REORDER BIT(1)
570#define STRESS_ONE BIT(2)
571#define STRESS_ALL (STRESS_INORDER | STRESS_REORDER | STRESS_ONE)
572
573static int stress(int nlocks, int nthreads, unsigned int flags)
574{
575 struct ww_mutex *locks;
576 struct stress *stress_array;
577 int n, count;
578
579 locks = kmalloc_array(nlocks, sizeof(*locks), GFP_KERNEL);
580 if (!locks)
581 return -ENOMEM;
582
583 stress_array = kmalloc_array(nthreads, sizeof(*stress_array),
584 GFP_KERNEL);
585 if (!stress_array) {
586 kfree(locks);
587 return -ENOMEM;
588 }
589
590 for (n = 0; n < nlocks; n++)
591 ww_mutex_init(&locks[n], &ww_class);
592
593 count = 0;
594 for (n = 0; nthreads; n++) {
595 struct stress *stress;
596 void (*fn)(struct work_struct *work);
597
598 fn = NULL;
599 switch (n & 3) {
600 case 0:
601 if (flags & STRESS_INORDER)
602 fn = stress_inorder_work;
603 break;
604 case 1:
605 if (flags & STRESS_REORDER)
606 fn = stress_reorder_work;
607 break;
608 case 2:
609 if (flags & STRESS_ONE)
610 fn = stress_one_work;
611 break;
612 }
613
614 if (!fn)
615 continue;
616
617 stress = &stress_array[count++];
618
619 INIT_WORK(&stress->work, fn);
620 stress->locks = locks;
621 stress->nlocks = nlocks;
622 stress->timeout = jiffies + 2*HZ;
623
624 queue_work(wq, &stress->work);
625 nthreads--;
626 }
627
628 flush_workqueue(wq);
629
630 for (n = 0; n < nlocks; n++)
631 ww_mutex_destroy(&locks[n]);
632 kfree(stress_array);
633 kfree(locks);
634
635 return 0;
636}
637
638static int __init test_ww_mutex_init(void)
639{
640 int ncpus = num_online_cpus();
641 int ret, i;
642
643 printk(KERN_INFO "Beginning ww mutex selftests\n");
644
645 prandom_seed_state(&rng, get_random_u64());
646
647 wq = alloc_workqueue("test-ww_mutex", WQ_UNBOUND, 0);
648 if (!wq)
649 return -ENOMEM;
650
651 ret = test_mutex();
652 if (ret)
653 return ret;
654
655 ret = test_aa(false);
656 if (ret)
657 return ret;
658
659 ret = test_aa(true);
660 if (ret)
661 return ret;
662
663 for (i = 0; i < 4; i++) {
664 ret = test_abba(i & 1, i & 2);
665 if (ret)
666 return ret;
667 }
668
669 ret = test_cycle(ncpus);
670 if (ret)
671 return ret;
672
673 ret = stress(16, 2*ncpus, STRESS_INORDER);
674 if (ret)
675 return ret;
676
677 ret = stress(16, 2*ncpus, STRESS_REORDER);
678 if (ret)
679 return ret;
680
681 ret = stress(2046, hweight32(STRESS_ALL)*ncpus, STRESS_ALL);
682 if (ret)
683 return ret;
684
685 printk(KERN_INFO "All ww mutex selftests passed\n");
686 return 0;
687}
688
689static void __exit test_ww_mutex_exit(void)
690{
691 destroy_workqueue(wq);
692}
693
694module_init(test_ww_mutex_init);
695module_exit(test_ww_mutex_exit);
696
697MODULE_LICENSE("GPL");
698MODULE_AUTHOR("Intel Corporation");
699MODULE_DESCRIPTION("API test facility for ww_mutexes");
1/*
2 * Module-based API test facility for ww_mutexes
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
17 */
18
19#include <linux/kernel.h>
20
21#include <linux/completion.h>
22#include <linux/delay.h>
23#include <linux/kthread.h>
24#include <linux/module.h>
25#include <linux/random.h>
26#include <linux/slab.h>
27#include <linux/ww_mutex.h>
28
29static DEFINE_WW_CLASS(ww_class);
30struct workqueue_struct *wq;
31
32struct test_mutex {
33 struct work_struct work;
34 struct ww_mutex mutex;
35 struct completion ready, go, done;
36 unsigned int flags;
37};
38
39#define TEST_MTX_SPIN BIT(0)
40#define TEST_MTX_TRY BIT(1)
41#define TEST_MTX_CTX BIT(2)
42#define __TEST_MTX_LAST BIT(3)
43
44static void test_mutex_work(struct work_struct *work)
45{
46 struct test_mutex *mtx = container_of(work, typeof(*mtx), work);
47
48 complete(&mtx->ready);
49 wait_for_completion(&mtx->go);
50
51 if (mtx->flags & TEST_MTX_TRY) {
52 while (!ww_mutex_trylock(&mtx->mutex))
53 cond_resched();
54 } else {
55 ww_mutex_lock(&mtx->mutex, NULL);
56 }
57 complete(&mtx->done);
58 ww_mutex_unlock(&mtx->mutex);
59}
60
61static int __test_mutex(unsigned int flags)
62{
63#define TIMEOUT (HZ / 16)
64 struct test_mutex mtx;
65 struct ww_acquire_ctx ctx;
66 int ret;
67
68 ww_mutex_init(&mtx.mutex, &ww_class);
69 ww_acquire_init(&ctx, &ww_class);
70
71 INIT_WORK_ONSTACK(&mtx.work, test_mutex_work);
72 init_completion(&mtx.ready);
73 init_completion(&mtx.go);
74 init_completion(&mtx.done);
75 mtx.flags = flags;
76
77 schedule_work(&mtx.work);
78
79 wait_for_completion(&mtx.ready);
80 ww_mutex_lock(&mtx.mutex, (flags & TEST_MTX_CTX) ? &ctx : NULL);
81 complete(&mtx.go);
82 if (flags & TEST_MTX_SPIN) {
83 unsigned long timeout = jiffies + TIMEOUT;
84
85 ret = 0;
86 do {
87 if (completion_done(&mtx.done)) {
88 ret = -EINVAL;
89 break;
90 }
91 cond_resched();
92 } while (time_before(jiffies, timeout));
93 } else {
94 ret = wait_for_completion_timeout(&mtx.done, TIMEOUT);
95 }
96 ww_mutex_unlock(&mtx.mutex);
97 ww_acquire_fini(&ctx);
98
99 if (ret) {
100 pr_err("%s(flags=%x): mutual exclusion failure\n",
101 __func__, flags);
102 ret = -EINVAL;
103 }
104
105 flush_work(&mtx.work);
106 destroy_work_on_stack(&mtx.work);
107 return ret;
108#undef TIMEOUT
109}
110
111static int test_mutex(void)
112{
113 int ret;
114 int i;
115
116 for (i = 0; i < __TEST_MTX_LAST; i++) {
117 ret = __test_mutex(i);
118 if (ret)
119 return ret;
120 }
121
122 return 0;
123}
124
125static int test_aa(void)
126{
127 struct ww_mutex mutex;
128 struct ww_acquire_ctx ctx;
129 int ret;
130
131 ww_mutex_init(&mutex, &ww_class);
132 ww_acquire_init(&ctx, &ww_class);
133
134 ww_mutex_lock(&mutex, &ctx);
135
136 if (ww_mutex_trylock(&mutex)) {
137 pr_err("%s: trylocked itself!\n", __func__);
138 ww_mutex_unlock(&mutex);
139 ret = -EINVAL;
140 goto out;
141 }
142
143 ret = ww_mutex_lock(&mutex, &ctx);
144 if (ret != -EALREADY) {
145 pr_err("%s: missed deadlock for recursing, ret=%d\n",
146 __func__, ret);
147 if (!ret)
148 ww_mutex_unlock(&mutex);
149 ret = -EINVAL;
150 goto out;
151 }
152
153 ret = 0;
154out:
155 ww_mutex_unlock(&mutex);
156 ww_acquire_fini(&ctx);
157 return ret;
158}
159
160struct test_abba {
161 struct work_struct work;
162 struct ww_mutex a_mutex;
163 struct ww_mutex b_mutex;
164 struct completion a_ready;
165 struct completion b_ready;
166 bool resolve;
167 int result;
168};
169
170static void test_abba_work(struct work_struct *work)
171{
172 struct test_abba *abba = container_of(work, typeof(*abba), work);
173 struct ww_acquire_ctx ctx;
174 int err;
175
176 ww_acquire_init(&ctx, &ww_class);
177 ww_mutex_lock(&abba->b_mutex, &ctx);
178
179 complete(&abba->b_ready);
180 wait_for_completion(&abba->a_ready);
181
182 err = ww_mutex_lock(&abba->a_mutex, &ctx);
183 if (abba->resolve && err == -EDEADLK) {
184 ww_mutex_unlock(&abba->b_mutex);
185 ww_mutex_lock_slow(&abba->a_mutex, &ctx);
186 err = ww_mutex_lock(&abba->b_mutex, &ctx);
187 }
188
189 if (!err)
190 ww_mutex_unlock(&abba->a_mutex);
191 ww_mutex_unlock(&abba->b_mutex);
192 ww_acquire_fini(&ctx);
193
194 abba->result = err;
195}
196
197static int test_abba(bool resolve)
198{
199 struct test_abba abba;
200 struct ww_acquire_ctx ctx;
201 int err, ret;
202
203 ww_mutex_init(&abba.a_mutex, &ww_class);
204 ww_mutex_init(&abba.b_mutex, &ww_class);
205 INIT_WORK_ONSTACK(&abba.work, test_abba_work);
206 init_completion(&abba.a_ready);
207 init_completion(&abba.b_ready);
208 abba.resolve = resolve;
209
210 schedule_work(&abba.work);
211
212 ww_acquire_init(&ctx, &ww_class);
213 ww_mutex_lock(&abba.a_mutex, &ctx);
214
215 complete(&abba.a_ready);
216 wait_for_completion(&abba.b_ready);
217
218 err = ww_mutex_lock(&abba.b_mutex, &ctx);
219 if (resolve && err == -EDEADLK) {
220 ww_mutex_unlock(&abba.a_mutex);
221 ww_mutex_lock_slow(&abba.b_mutex, &ctx);
222 err = ww_mutex_lock(&abba.a_mutex, &ctx);
223 }
224
225 if (!err)
226 ww_mutex_unlock(&abba.b_mutex);
227 ww_mutex_unlock(&abba.a_mutex);
228 ww_acquire_fini(&ctx);
229
230 flush_work(&abba.work);
231 destroy_work_on_stack(&abba.work);
232
233 ret = 0;
234 if (resolve) {
235 if (err || abba.result) {
236 pr_err("%s: failed to resolve ABBA deadlock, A err=%d, B err=%d\n",
237 __func__, err, abba.result);
238 ret = -EINVAL;
239 }
240 } else {
241 if (err != -EDEADLK && abba.result != -EDEADLK) {
242 pr_err("%s: missed ABBA deadlock, A err=%d, B err=%d\n",
243 __func__, err, abba.result);
244 ret = -EINVAL;
245 }
246 }
247 return ret;
248}
249
250struct test_cycle {
251 struct work_struct work;
252 struct ww_mutex a_mutex;
253 struct ww_mutex *b_mutex;
254 struct completion *a_signal;
255 struct completion b_signal;
256 int result;
257};
258
259static void test_cycle_work(struct work_struct *work)
260{
261 struct test_cycle *cycle = container_of(work, typeof(*cycle), work);
262 struct ww_acquire_ctx ctx;
263 int err;
264
265 ww_acquire_init(&ctx, &ww_class);
266 ww_mutex_lock(&cycle->a_mutex, &ctx);
267
268 complete(cycle->a_signal);
269 wait_for_completion(&cycle->b_signal);
270
271 err = ww_mutex_lock(cycle->b_mutex, &ctx);
272 if (err == -EDEADLK) {
273 ww_mutex_unlock(&cycle->a_mutex);
274 ww_mutex_lock_slow(cycle->b_mutex, &ctx);
275 err = ww_mutex_lock(&cycle->a_mutex, &ctx);
276 }
277
278 if (!err)
279 ww_mutex_unlock(cycle->b_mutex);
280 ww_mutex_unlock(&cycle->a_mutex);
281 ww_acquire_fini(&ctx);
282
283 cycle->result = err;
284}
285
286static int __test_cycle(unsigned int nthreads)
287{
288 struct test_cycle *cycles;
289 unsigned int n, last = nthreads - 1;
290 int ret;
291
292 cycles = kmalloc_array(nthreads, sizeof(*cycles), GFP_KERNEL);
293 if (!cycles)
294 return -ENOMEM;
295
296 for (n = 0; n < nthreads; n++) {
297 struct test_cycle *cycle = &cycles[n];
298
299 ww_mutex_init(&cycle->a_mutex, &ww_class);
300 if (n == last)
301 cycle->b_mutex = &cycles[0].a_mutex;
302 else
303 cycle->b_mutex = &cycles[n + 1].a_mutex;
304
305 if (n == 0)
306 cycle->a_signal = &cycles[last].b_signal;
307 else
308 cycle->a_signal = &cycles[n - 1].b_signal;
309 init_completion(&cycle->b_signal);
310
311 INIT_WORK(&cycle->work, test_cycle_work);
312 cycle->result = 0;
313 }
314
315 for (n = 0; n < nthreads; n++)
316 queue_work(wq, &cycles[n].work);
317
318 flush_workqueue(wq);
319
320 ret = 0;
321 for (n = 0; n < nthreads; n++) {
322 struct test_cycle *cycle = &cycles[n];
323
324 if (!cycle->result)
325 continue;
326
327 pr_err("cylic deadlock not resolved, ret[%d/%d] = %d\n",
328 n, nthreads, cycle->result);
329 ret = -EINVAL;
330 break;
331 }
332
333 for (n = 0; n < nthreads; n++)
334 ww_mutex_destroy(&cycles[n].a_mutex);
335 kfree(cycles);
336 return ret;
337}
338
339static int test_cycle(unsigned int ncpus)
340{
341 unsigned int n;
342 int ret;
343
344 for (n = 2; n <= ncpus + 1; n++) {
345 ret = __test_cycle(n);
346 if (ret)
347 return ret;
348 }
349
350 return 0;
351}
352
353struct stress {
354 struct work_struct work;
355 struct ww_mutex *locks;
356 unsigned long timeout;
357 int nlocks;
358};
359
360static int *get_random_order(int count)
361{
362 int *order;
363 int n, r, tmp;
364
365 order = kmalloc_array(count, sizeof(*order), GFP_KERNEL);
366 if (!order)
367 return order;
368
369 for (n = 0; n < count; n++)
370 order[n] = n;
371
372 for (n = count - 1; n > 1; n--) {
373 r = get_random_int() % (n + 1);
374 if (r != n) {
375 tmp = order[n];
376 order[n] = order[r];
377 order[r] = tmp;
378 }
379 }
380
381 return order;
382}
383
384static void dummy_load(struct stress *stress)
385{
386 usleep_range(1000, 2000);
387}
388
389static void stress_inorder_work(struct work_struct *work)
390{
391 struct stress *stress = container_of(work, typeof(*stress), work);
392 const int nlocks = stress->nlocks;
393 struct ww_mutex *locks = stress->locks;
394 struct ww_acquire_ctx ctx;
395 int *order;
396
397 order = get_random_order(nlocks);
398 if (!order)
399 return;
400
401 do {
402 int contended = -1;
403 int n, err;
404
405 ww_acquire_init(&ctx, &ww_class);
406retry:
407 err = 0;
408 for (n = 0; n < nlocks; n++) {
409 if (n == contended)
410 continue;
411
412 err = ww_mutex_lock(&locks[order[n]], &ctx);
413 if (err < 0)
414 break;
415 }
416 if (!err)
417 dummy_load(stress);
418
419 if (contended > n)
420 ww_mutex_unlock(&locks[order[contended]]);
421 contended = n;
422 while (n--)
423 ww_mutex_unlock(&locks[order[n]]);
424
425 if (err == -EDEADLK) {
426 ww_mutex_lock_slow(&locks[order[contended]], &ctx);
427 goto retry;
428 }
429
430 if (err) {
431 pr_err_once("stress (%s) failed with %d\n",
432 __func__, err);
433 break;
434 }
435
436 ww_acquire_fini(&ctx);
437 } while (!time_after(jiffies, stress->timeout));
438
439 kfree(order);
440 kfree(stress);
441}
442
443struct reorder_lock {
444 struct list_head link;
445 struct ww_mutex *lock;
446};
447
448static void stress_reorder_work(struct work_struct *work)
449{
450 struct stress *stress = container_of(work, typeof(*stress), work);
451 LIST_HEAD(locks);
452 struct ww_acquire_ctx ctx;
453 struct reorder_lock *ll, *ln;
454 int *order;
455 int n, err;
456
457 order = get_random_order(stress->nlocks);
458 if (!order)
459 return;
460
461 for (n = 0; n < stress->nlocks; n++) {
462 ll = kmalloc(sizeof(*ll), GFP_KERNEL);
463 if (!ll)
464 goto out;
465
466 ll->lock = &stress->locks[order[n]];
467 list_add(&ll->link, &locks);
468 }
469 kfree(order);
470 order = NULL;
471
472 do {
473 ww_acquire_init(&ctx, &ww_class);
474
475 list_for_each_entry(ll, &locks, link) {
476 err = ww_mutex_lock(ll->lock, &ctx);
477 if (!err)
478 continue;
479
480 ln = ll;
481 list_for_each_entry_continue_reverse(ln, &locks, link)
482 ww_mutex_unlock(ln->lock);
483
484 if (err != -EDEADLK) {
485 pr_err_once("stress (%s) failed with %d\n",
486 __func__, err);
487 break;
488 }
489
490 ww_mutex_lock_slow(ll->lock, &ctx);
491 list_move(&ll->link, &locks); /* restarts iteration */
492 }
493
494 dummy_load(stress);
495 list_for_each_entry(ll, &locks, link)
496 ww_mutex_unlock(ll->lock);
497
498 ww_acquire_fini(&ctx);
499 } while (!time_after(jiffies, stress->timeout));
500
501out:
502 list_for_each_entry_safe(ll, ln, &locks, link)
503 kfree(ll);
504 kfree(order);
505 kfree(stress);
506}
507
508static void stress_one_work(struct work_struct *work)
509{
510 struct stress *stress = container_of(work, typeof(*stress), work);
511 const int nlocks = stress->nlocks;
512 struct ww_mutex *lock = stress->locks + (get_random_int() % nlocks);
513 int err;
514
515 do {
516 err = ww_mutex_lock(lock, NULL);
517 if (!err) {
518 dummy_load(stress);
519 ww_mutex_unlock(lock);
520 } else {
521 pr_err_once("stress (%s) failed with %d\n",
522 __func__, err);
523 break;
524 }
525 } while (!time_after(jiffies, stress->timeout));
526
527 kfree(stress);
528}
529
530#define STRESS_INORDER BIT(0)
531#define STRESS_REORDER BIT(1)
532#define STRESS_ONE BIT(2)
533#define STRESS_ALL (STRESS_INORDER | STRESS_REORDER | STRESS_ONE)
534
535static int stress(int nlocks, int nthreads, unsigned int flags)
536{
537 struct ww_mutex *locks;
538 int n;
539
540 locks = kmalloc_array(nlocks, sizeof(*locks), GFP_KERNEL);
541 if (!locks)
542 return -ENOMEM;
543
544 for (n = 0; n < nlocks; n++)
545 ww_mutex_init(&locks[n], &ww_class);
546
547 for (n = 0; nthreads; n++) {
548 struct stress *stress;
549 void (*fn)(struct work_struct *work);
550
551 fn = NULL;
552 switch (n & 3) {
553 case 0:
554 if (flags & STRESS_INORDER)
555 fn = stress_inorder_work;
556 break;
557 case 1:
558 if (flags & STRESS_REORDER)
559 fn = stress_reorder_work;
560 break;
561 case 2:
562 if (flags & STRESS_ONE)
563 fn = stress_one_work;
564 break;
565 }
566
567 if (!fn)
568 continue;
569
570 stress = kmalloc(sizeof(*stress), GFP_KERNEL);
571 if (!stress)
572 break;
573
574 INIT_WORK(&stress->work, fn);
575 stress->locks = locks;
576 stress->nlocks = nlocks;
577 stress->timeout = jiffies + 2*HZ;
578
579 queue_work(wq, &stress->work);
580 nthreads--;
581 }
582
583 flush_workqueue(wq);
584
585 for (n = 0; n < nlocks; n++)
586 ww_mutex_destroy(&locks[n]);
587 kfree(locks);
588
589 return 0;
590}
591
592static int __init test_ww_mutex_init(void)
593{
594 int ncpus = num_online_cpus();
595 int ret;
596
597 wq = alloc_workqueue("test-ww_mutex", WQ_UNBOUND, 0);
598 if (!wq)
599 return -ENOMEM;
600
601 ret = test_mutex();
602 if (ret)
603 return ret;
604
605 ret = test_aa();
606 if (ret)
607 return ret;
608
609 ret = test_abba(false);
610 if (ret)
611 return ret;
612
613 ret = test_abba(true);
614 if (ret)
615 return ret;
616
617 ret = test_cycle(ncpus);
618 if (ret)
619 return ret;
620
621 ret = stress(16, 2*ncpus, STRESS_INORDER);
622 if (ret)
623 return ret;
624
625 ret = stress(16, 2*ncpus, STRESS_REORDER);
626 if (ret)
627 return ret;
628
629 ret = stress(4095, hweight32(STRESS_ALL)*ncpus, STRESS_ALL);
630 if (ret)
631 return ret;
632
633 return 0;
634}
635
636static void __exit test_ww_mutex_exit(void)
637{
638 destroy_workqueue(wq);
639}
640
641module_init(test_ww_mutex_init);
642module_exit(test_ww_mutex_exit);
643
644MODULE_LICENSE("GPL");
645MODULE_AUTHOR("Intel Corporation");