Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Module-based API test facility for ww_mutexes
4 */
5
6#include <linux/kernel.h>
7
8#include <linux/completion.h>
9#include <linux/delay.h>
10#include <linux/kthread.h>
11#include <linux/module.h>
12#include <linux/prandom.h>
13#include <linux/slab.h>
14#include <linux/ww_mutex.h>
15
16static DEFINE_WD_CLASS(ww_class);
17struct workqueue_struct *wq;
18
19#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
20#define ww_acquire_init_noinject(a, b) do { \
21 ww_acquire_init((a), (b)); \
22 (a)->deadlock_inject_countdown = ~0U; \
23 } while (0)
24#else
25#define ww_acquire_init_noinject(a, b) ww_acquire_init((a), (b))
26#endif
27
28struct test_mutex {
29 struct work_struct work;
30 struct ww_mutex mutex;
31 struct completion ready, go, done;
32 unsigned int flags;
33};
34
35#define TEST_MTX_SPIN BIT(0)
36#define TEST_MTX_TRY BIT(1)
37#define TEST_MTX_CTX BIT(2)
38#define __TEST_MTX_LAST BIT(3)
39
40static void test_mutex_work(struct work_struct *work)
41{
42 struct test_mutex *mtx = container_of(work, typeof(*mtx), work);
43
44 complete(&mtx->ready);
45 wait_for_completion(&mtx->go);
46
47 if (mtx->flags & TEST_MTX_TRY) {
48 while (!ww_mutex_trylock(&mtx->mutex, NULL))
49 cond_resched();
50 } else {
51 ww_mutex_lock(&mtx->mutex, NULL);
52 }
53 complete(&mtx->done);
54 ww_mutex_unlock(&mtx->mutex);
55}
56
57static int __test_mutex(unsigned int flags)
58{
59#define TIMEOUT (HZ / 16)
60 struct test_mutex mtx;
61 struct ww_acquire_ctx ctx;
62 int ret;
63
64 ww_mutex_init(&mtx.mutex, &ww_class);
65 ww_acquire_init(&ctx, &ww_class);
66
67 INIT_WORK_ONSTACK(&mtx.work, test_mutex_work);
68 init_completion(&mtx.ready);
69 init_completion(&mtx.go);
70 init_completion(&mtx.done);
71 mtx.flags = flags;
72
73 schedule_work(&mtx.work);
74
75 wait_for_completion(&mtx.ready);
76 ww_mutex_lock(&mtx.mutex, (flags & TEST_MTX_CTX) ? &ctx : NULL);
77 complete(&mtx.go);
78 if (flags & TEST_MTX_SPIN) {
79 unsigned long timeout = jiffies + TIMEOUT;
80
81 ret = 0;
82 do {
83 if (completion_done(&mtx.done)) {
84 ret = -EINVAL;
85 break;
86 }
87 cond_resched();
88 } while (time_before(jiffies, timeout));
89 } else {
90 ret = wait_for_completion_timeout(&mtx.done, TIMEOUT);
91 }
92 ww_mutex_unlock(&mtx.mutex);
93 ww_acquire_fini(&ctx);
94
95 if (ret) {
96 pr_err("%s(flags=%x): mutual exclusion failure\n",
97 __func__, flags);
98 ret = -EINVAL;
99 }
100
101 flush_work(&mtx.work);
102 destroy_work_on_stack(&mtx.work);
103 return ret;
104#undef TIMEOUT
105}
106
107static int test_mutex(void)
108{
109 int ret;
110 int i;
111
112 for (i = 0; i < __TEST_MTX_LAST; i++) {
113 ret = __test_mutex(i);
114 if (ret)
115 return ret;
116 }
117
118 return 0;
119}
120
121static int test_aa(bool trylock)
122{
123 struct ww_mutex mutex;
124 struct ww_acquire_ctx ctx;
125 int ret;
126 const char *from = trylock ? "trylock" : "lock";
127
128 ww_mutex_init(&mutex, &ww_class);
129 ww_acquire_init(&ctx, &ww_class);
130
131 if (!trylock) {
132 ret = ww_mutex_lock(&mutex, &ctx);
133 if (ret) {
134 pr_err("%s: initial lock failed!\n", __func__);
135 goto out;
136 }
137 } else {
138 ret = !ww_mutex_trylock(&mutex, &ctx);
139 if (ret) {
140 pr_err("%s: initial trylock failed!\n", __func__);
141 goto out;
142 }
143 }
144
145 if (ww_mutex_trylock(&mutex, NULL)) {
146 pr_err("%s: trylocked itself without context from %s!\n", __func__, from);
147 ww_mutex_unlock(&mutex);
148 ret = -EINVAL;
149 goto out;
150 }
151
152 if (ww_mutex_trylock(&mutex, &ctx)) {
153 pr_err("%s: trylocked itself with context from %s!\n", __func__, from);
154 ww_mutex_unlock(&mutex);
155 ret = -EINVAL;
156 goto out;
157 }
158
159 ret = ww_mutex_lock(&mutex, &ctx);
160 if (ret != -EALREADY) {
161 pr_err("%s: missed deadlock for recursing, ret=%d from %s\n",
162 __func__, ret, from);
163 if (!ret)
164 ww_mutex_unlock(&mutex);
165 ret = -EINVAL;
166 goto out;
167 }
168
169 ww_mutex_unlock(&mutex);
170 ret = 0;
171out:
172 ww_acquire_fini(&ctx);
173 return ret;
174}
175
176struct test_abba {
177 struct work_struct work;
178 struct ww_mutex a_mutex;
179 struct ww_mutex b_mutex;
180 struct completion a_ready;
181 struct completion b_ready;
182 bool resolve, trylock;
183 int result;
184};
185
186static void test_abba_work(struct work_struct *work)
187{
188 struct test_abba *abba = container_of(work, typeof(*abba), work);
189 struct ww_acquire_ctx ctx;
190 int err;
191
192 ww_acquire_init_noinject(&ctx, &ww_class);
193 if (!abba->trylock)
194 ww_mutex_lock(&abba->b_mutex, &ctx);
195 else
196 WARN_ON(!ww_mutex_trylock(&abba->b_mutex, &ctx));
197
198 WARN_ON(READ_ONCE(abba->b_mutex.ctx) != &ctx);
199
200 complete(&abba->b_ready);
201 wait_for_completion(&abba->a_ready);
202
203 err = ww_mutex_lock(&abba->a_mutex, &ctx);
204 if (abba->resolve && err == -EDEADLK) {
205 ww_mutex_unlock(&abba->b_mutex);
206 ww_mutex_lock_slow(&abba->a_mutex, &ctx);
207 err = ww_mutex_lock(&abba->b_mutex, &ctx);
208 }
209
210 if (!err)
211 ww_mutex_unlock(&abba->a_mutex);
212 ww_mutex_unlock(&abba->b_mutex);
213 ww_acquire_fini(&ctx);
214
215 abba->result = err;
216}
217
218static int test_abba(bool trylock, bool resolve)
219{
220 struct test_abba abba;
221 struct ww_acquire_ctx ctx;
222 int err, ret;
223
224 ww_mutex_init(&abba.a_mutex, &ww_class);
225 ww_mutex_init(&abba.b_mutex, &ww_class);
226 INIT_WORK_ONSTACK(&abba.work, test_abba_work);
227 init_completion(&abba.a_ready);
228 init_completion(&abba.b_ready);
229 abba.trylock = trylock;
230 abba.resolve = resolve;
231
232 schedule_work(&abba.work);
233
234 ww_acquire_init_noinject(&ctx, &ww_class);
235 if (!trylock)
236 ww_mutex_lock(&abba.a_mutex, &ctx);
237 else
238 WARN_ON(!ww_mutex_trylock(&abba.a_mutex, &ctx));
239
240 WARN_ON(READ_ONCE(abba.a_mutex.ctx) != &ctx);
241
242 complete(&abba.a_ready);
243 wait_for_completion(&abba.b_ready);
244
245 err = ww_mutex_lock(&abba.b_mutex, &ctx);
246 if (resolve && err == -EDEADLK) {
247 ww_mutex_unlock(&abba.a_mutex);
248 ww_mutex_lock_slow(&abba.b_mutex, &ctx);
249 err = ww_mutex_lock(&abba.a_mutex, &ctx);
250 }
251
252 if (!err)
253 ww_mutex_unlock(&abba.b_mutex);
254 ww_mutex_unlock(&abba.a_mutex);
255 ww_acquire_fini(&ctx);
256
257 flush_work(&abba.work);
258 destroy_work_on_stack(&abba.work);
259
260 ret = 0;
261 if (resolve) {
262 if (err || abba.result) {
263 pr_err("%s: failed to resolve ABBA deadlock, A err=%d, B err=%d\n",
264 __func__, err, abba.result);
265 ret = -EINVAL;
266 }
267 } else {
268 if (err != -EDEADLK && abba.result != -EDEADLK) {
269 pr_err("%s: missed ABBA deadlock, A err=%d, B err=%d\n",
270 __func__, err, abba.result);
271 ret = -EINVAL;
272 }
273 }
274 return ret;
275}
276
277struct test_cycle {
278 struct work_struct work;
279 struct ww_mutex a_mutex;
280 struct ww_mutex *b_mutex;
281 struct completion *a_signal;
282 struct completion b_signal;
283 int result;
284};
285
286static void test_cycle_work(struct work_struct *work)
287{
288 struct test_cycle *cycle = container_of(work, typeof(*cycle), work);
289 struct ww_acquire_ctx ctx;
290 int err, erra = 0;
291
292 ww_acquire_init_noinject(&ctx, &ww_class);
293 ww_mutex_lock(&cycle->a_mutex, &ctx);
294
295 complete(cycle->a_signal);
296 wait_for_completion(&cycle->b_signal);
297
298 err = ww_mutex_lock(cycle->b_mutex, &ctx);
299 if (err == -EDEADLK) {
300 err = 0;
301 ww_mutex_unlock(&cycle->a_mutex);
302 ww_mutex_lock_slow(cycle->b_mutex, &ctx);
303 erra = ww_mutex_lock(&cycle->a_mutex, &ctx);
304 }
305
306 if (!err)
307 ww_mutex_unlock(cycle->b_mutex);
308 if (!erra)
309 ww_mutex_unlock(&cycle->a_mutex);
310 ww_acquire_fini(&ctx);
311
312 cycle->result = err ?: erra;
313}
314
315static int __test_cycle(unsigned int nthreads)
316{
317 struct test_cycle *cycles;
318 unsigned int n, last = nthreads - 1;
319 int ret;
320
321 cycles = kmalloc_array(nthreads, sizeof(*cycles), GFP_KERNEL);
322 if (!cycles)
323 return -ENOMEM;
324
325 for (n = 0; n < nthreads; n++) {
326 struct test_cycle *cycle = &cycles[n];
327
328 ww_mutex_init(&cycle->a_mutex, &ww_class);
329 if (n == last)
330 cycle->b_mutex = &cycles[0].a_mutex;
331 else
332 cycle->b_mutex = &cycles[n + 1].a_mutex;
333
334 if (n == 0)
335 cycle->a_signal = &cycles[last].b_signal;
336 else
337 cycle->a_signal = &cycles[n - 1].b_signal;
338 init_completion(&cycle->b_signal);
339
340 INIT_WORK(&cycle->work, test_cycle_work);
341 cycle->result = 0;
342 }
343
344 for (n = 0; n < nthreads; n++)
345 queue_work(wq, &cycles[n].work);
346
347 flush_workqueue(wq);
348
349 ret = 0;
350 for (n = 0; n < nthreads; n++) {
351 struct test_cycle *cycle = &cycles[n];
352
353 if (!cycle->result)
354 continue;
355
356 pr_err("cyclic deadlock not resolved, ret[%d/%d] = %d\n",
357 n, nthreads, cycle->result);
358 ret = -EINVAL;
359 break;
360 }
361
362 for (n = 0; n < nthreads; n++)
363 ww_mutex_destroy(&cycles[n].a_mutex);
364 kfree(cycles);
365 return ret;
366}
367
368static int test_cycle(unsigned int ncpus)
369{
370 unsigned int n;
371 int ret;
372
373 for (n = 2; n <= ncpus + 1; n++) {
374 ret = __test_cycle(n);
375 if (ret)
376 return ret;
377 }
378
379 return 0;
380}
381
382struct stress {
383 struct work_struct work;
384 struct ww_mutex *locks;
385 unsigned long timeout;
386 int nlocks;
387};
388
389struct rnd_state rng;
390DEFINE_SPINLOCK(rng_lock);
391
392static inline u32 prandom_u32_below(u32 ceil)
393{
394 u32 ret;
395
396 spin_lock(&rng_lock);
397 ret = prandom_u32_state(&rng) % ceil;
398 spin_unlock(&rng_lock);
399 return ret;
400}
401
402static int *get_random_order(int count)
403{
404 int *order;
405 int n, r, tmp;
406
407 order = kmalloc_array(count, sizeof(*order), GFP_KERNEL);
408 if (!order)
409 return order;
410
411 for (n = 0; n < count; n++)
412 order[n] = n;
413
414 for (n = count - 1; n > 1; n--) {
415 r = prandom_u32_below(n + 1);
416 if (r != n) {
417 tmp = order[n];
418 order[n] = order[r];
419 order[r] = tmp;
420 }
421 }
422
423 return order;
424}
425
426static void dummy_load(struct stress *stress)
427{
428 usleep_range(1000, 2000);
429}
430
431static void stress_inorder_work(struct work_struct *work)
432{
433 struct stress *stress = container_of(work, typeof(*stress), work);
434 const int nlocks = stress->nlocks;
435 struct ww_mutex *locks = stress->locks;
436 struct ww_acquire_ctx ctx;
437 int *order;
438
439 order = get_random_order(nlocks);
440 if (!order)
441 return;
442
443 do {
444 int contended = -1;
445 int n, err;
446
447 ww_acquire_init(&ctx, &ww_class);
448retry:
449 err = 0;
450 for (n = 0; n < nlocks; n++) {
451 if (n == contended)
452 continue;
453
454 err = ww_mutex_lock(&locks[order[n]], &ctx);
455 if (err < 0)
456 break;
457 }
458 if (!err)
459 dummy_load(stress);
460
461 if (contended > n)
462 ww_mutex_unlock(&locks[order[contended]]);
463 contended = n;
464 while (n--)
465 ww_mutex_unlock(&locks[order[n]]);
466
467 if (err == -EDEADLK) {
468 if (!time_after(jiffies, stress->timeout)) {
469 ww_mutex_lock_slow(&locks[order[contended]], &ctx);
470 goto retry;
471 }
472 }
473
474 ww_acquire_fini(&ctx);
475 if (err) {
476 pr_err_once("stress (%s) failed with %d\n",
477 __func__, err);
478 break;
479 }
480 } while (!time_after(jiffies, stress->timeout));
481
482 kfree(order);
483}
484
485struct reorder_lock {
486 struct list_head link;
487 struct ww_mutex *lock;
488};
489
490static void stress_reorder_work(struct work_struct *work)
491{
492 struct stress *stress = container_of(work, typeof(*stress), work);
493 LIST_HEAD(locks);
494 struct ww_acquire_ctx ctx;
495 struct reorder_lock *ll, *ln;
496 int *order;
497 int n, err;
498
499 order = get_random_order(stress->nlocks);
500 if (!order)
501 return;
502
503 for (n = 0; n < stress->nlocks; n++) {
504 ll = kmalloc(sizeof(*ll), GFP_KERNEL);
505 if (!ll)
506 goto out;
507
508 ll->lock = &stress->locks[order[n]];
509 list_add(&ll->link, &locks);
510 }
511 kfree(order);
512 order = NULL;
513
514 do {
515 ww_acquire_init(&ctx, &ww_class);
516
517 list_for_each_entry(ll, &locks, link) {
518 err = ww_mutex_lock(ll->lock, &ctx);
519 if (!err)
520 continue;
521
522 ln = ll;
523 list_for_each_entry_continue_reverse(ln, &locks, link)
524 ww_mutex_unlock(ln->lock);
525
526 if (err != -EDEADLK) {
527 pr_err_once("stress (%s) failed with %d\n",
528 __func__, err);
529 break;
530 }
531
532 ww_mutex_lock_slow(ll->lock, &ctx);
533 list_move(&ll->link, &locks); /* restarts iteration */
534 }
535
536 dummy_load(stress);
537 list_for_each_entry(ll, &locks, link)
538 ww_mutex_unlock(ll->lock);
539
540 ww_acquire_fini(&ctx);
541 } while (!time_after(jiffies, stress->timeout));
542
543out:
544 list_for_each_entry_safe(ll, ln, &locks, link)
545 kfree(ll);
546 kfree(order);
547}
548
549static void stress_one_work(struct work_struct *work)
550{
551 struct stress *stress = container_of(work, typeof(*stress), work);
552 const int nlocks = stress->nlocks;
553 struct ww_mutex *lock = stress->locks + get_random_u32_below(nlocks);
554 int err;
555
556 do {
557 err = ww_mutex_lock(lock, NULL);
558 if (!err) {
559 dummy_load(stress);
560 ww_mutex_unlock(lock);
561 } else {
562 pr_err_once("stress (%s) failed with %d\n",
563 __func__, err);
564 break;
565 }
566 } while (!time_after(jiffies, stress->timeout));
567}
568
569#define STRESS_INORDER BIT(0)
570#define STRESS_REORDER BIT(1)
571#define STRESS_ONE BIT(2)
572#define STRESS_ALL (STRESS_INORDER | STRESS_REORDER | STRESS_ONE)
573
574static int stress(int nlocks, int nthreads, unsigned int flags)
575{
576 struct ww_mutex *locks;
577 struct stress *stress_array;
578 int n, count;
579
580 locks = kmalloc_array(nlocks, sizeof(*locks), GFP_KERNEL);
581 if (!locks)
582 return -ENOMEM;
583
584 stress_array = kmalloc_array(nthreads, sizeof(*stress_array),
585 GFP_KERNEL);
586 if (!stress_array) {
587 kfree(locks);
588 return -ENOMEM;
589 }
590
591 for (n = 0; n < nlocks; n++)
592 ww_mutex_init(&locks[n], &ww_class);
593
594 count = 0;
595 for (n = 0; nthreads; n++) {
596 struct stress *stress;
597 void (*fn)(struct work_struct *work);
598
599 fn = NULL;
600 switch (n & 3) {
601 case 0:
602 if (flags & STRESS_INORDER)
603 fn = stress_inorder_work;
604 break;
605 case 1:
606 if (flags & STRESS_REORDER)
607 fn = stress_reorder_work;
608 break;
609 case 2:
610 if (flags & STRESS_ONE)
611 fn = stress_one_work;
612 break;
613 }
614
615 if (!fn)
616 continue;
617
618 stress = &stress_array[count++];
619
620 INIT_WORK(&stress->work, fn);
621 stress->locks = locks;
622 stress->nlocks = nlocks;
623 stress->timeout = jiffies + 2*HZ;
624
625 queue_work(wq, &stress->work);
626 nthreads--;
627 }
628
629 flush_workqueue(wq);
630
631 for (n = 0; n < nlocks; n++)
632 ww_mutex_destroy(&locks[n]);
633 kfree(stress_array);
634 kfree(locks);
635
636 return 0;
637}
638
639static int __init test_ww_mutex_init(void)
640{
641 int ncpus = num_online_cpus();
642 int ret, i;
643
644 printk(KERN_INFO "Beginning ww mutex selftests\n");
645
646 prandom_seed_state(&rng, get_random_u64());
647
648 wq = alloc_workqueue("test-ww_mutex", WQ_UNBOUND, 0);
649 if (!wq)
650 return -ENOMEM;
651
652 ret = test_mutex();
653 if (ret)
654 return ret;
655
656 ret = test_aa(false);
657 if (ret)
658 return ret;
659
660 ret = test_aa(true);
661 if (ret)
662 return ret;
663
664 for (i = 0; i < 4; i++) {
665 ret = test_abba(i & 1, i & 2);
666 if (ret)
667 return ret;
668 }
669
670 ret = test_cycle(ncpus);
671 if (ret)
672 return ret;
673
674 ret = stress(16, 2*ncpus, STRESS_INORDER);
675 if (ret)
676 return ret;
677
678 ret = stress(16, 2*ncpus, STRESS_REORDER);
679 if (ret)
680 return ret;
681
682 ret = stress(2047, hweight32(STRESS_ALL)*ncpus, STRESS_ALL);
683 if (ret)
684 return ret;
685
686 printk(KERN_INFO "All ww mutex selftests passed\n");
687 return 0;
688}
689
690static void __exit test_ww_mutex_exit(void)
691{
692 destroy_workqueue(wq);
693}
694
695module_init(test_ww_mutex_init);
696module_exit(test_ww_mutex_exit);
697
698MODULE_LICENSE("GPL");
699MODULE_AUTHOR("Intel Corporation");
1/*
2 * Module-based API test facility for ww_mutexes
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
17 */
18
19#include <linux/kernel.h>
20
21#include <linux/completion.h>
22#include <linux/delay.h>
23#include <linux/kthread.h>
24#include <linux/module.h>
25#include <linux/random.h>
26#include <linux/slab.h>
27#include <linux/ww_mutex.h>
28
29static DEFINE_WW_CLASS(ww_class);
30struct workqueue_struct *wq;
31
32struct test_mutex {
33 struct work_struct work;
34 struct ww_mutex mutex;
35 struct completion ready, go, done;
36 unsigned int flags;
37};
38
39#define TEST_MTX_SPIN BIT(0)
40#define TEST_MTX_TRY BIT(1)
41#define TEST_MTX_CTX BIT(2)
42#define __TEST_MTX_LAST BIT(3)
43
44static void test_mutex_work(struct work_struct *work)
45{
46 struct test_mutex *mtx = container_of(work, typeof(*mtx), work);
47
48 complete(&mtx->ready);
49 wait_for_completion(&mtx->go);
50
51 if (mtx->flags & TEST_MTX_TRY) {
52 while (!ww_mutex_trylock(&mtx->mutex))
53 cond_resched();
54 } else {
55 ww_mutex_lock(&mtx->mutex, NULL);
56 }
57 complete(&mtx->done);
58 ww_mutex_unlock(&mtx->mutex);
59}
60
61static int __test_mutex(unsigned int flags)
62{
63#define TIMEOUT (HZ / 16)
64 struct test_mutex mtx;
65 struct ww_acquire_ctx ctx;
66 int ret;
67
68 ww_mutex_init(&mtx.mutex, &ww_class);
69 ww_acquire_init(&ctx, &ww_class);
70
71 INIT_WORK_ONSTACK(&mtx.work, test_mutex_work);
72 init_completion(&mtx.ready);
73 init_completion(&mtx.go);
74 init_completion(&mtx.done);
75 mtx.flags = flags;
76
77 schedule_work(&mtx.work);
78
79 wait_for_completion(&mtx.ready);
80 ww_mutex_lock(&mtx.mutex, (flags & TEST_MTX_CTX) ? &ctx : NULL);
81 complete(&mtx.go);
82 if (flags & TEST_MTX_SPIN) {
83 unsigned long timeout = jiffies + TIMEOUT;
84
85 ret = 0;
86 do {
87 if (completion_done(&mtx.done)) {
88 ret = -EINVAL;
89 break;
90 }
91 cond_resched();
92 } while (time_before(jiffies, timeout));
93 } else {
94 ret = wait_for_completion_timeout(&mtx.done, TIMEOUT);
95 }
96 ww_mutex_unlock(&mtx.mutex);
97 ww_acquire_fini(&ctx);
98
99 if (ret) {
100 pr_err("%s(flags=%x): mutual exclusion failure\n",
101 __func__, flags);
102 ret = -EINVAL;
103 }
104
105 flush_work(&mtx.work);
106 destroy_work_on_stack(&mtx.work);
107 return ret;
108#undef TIMEOUT
109}
110
111static int test_mutex(void)
112{
113 int ret;
114 int i;
115
116 for (i = 0; i < __TEST_MTX_LAST; i++) {
117 ret = __test_mutex(i);
118 if (ret)
119 return ret;
120 }
121
122 return 0;
123}
124
125static int test_aa(void)
126{
127 struct ww_mutex mutex;
128 struct ww_acquire_ctx ctx;
129 int ret;
130
131 ww_mutex_init(&mutex, &ww_class);
132 ww_acquire_init(&ctx, &ww_class);
133
134 ww_mutex_lock(&mutex, &ctx);
135
136 if (ww_mutex_trylock(&mutex)) {
137 pr_err("%s: trylocked itself!\n", __func__);
138 ww_mutex_unlock(&mutex);
139 ret = -EINVAL;
140 goto out;
141 }
142
143 ret = ww_mutex_lock(&mutex, &ctx);
144 if (ret != -EALREADY) {
145 pr_err("%s: missed deadlock for recursing, ret=%d\n",
146 __func__, ret);
147 if (!ret)
148 ww_mutex_unlock(&mutex);
149 ret = -EINVAL;
150 goto out;
151 }
152
153 ret = 0;
154out:
155 ww_mutex_unlock(&mutex);
156 ww_acquire_fini(&ctx);
157 return ret;
158}
159
160struct test_abba {
161 struct work_struct work;
162 struct ww_mutex a_mutex;
163 struct ww_mutex b_mutex;
164 struct completion a_ready;
165 struct completion b_ready;
166 bool resolve;
167 int result;
168};
169
170static void test_abba_work(struct work_struct *work)
171{
172 struct test_abba *abba = container_of(work, typeof(*abba), work);
173 struct ww_acquire_ctx ctx;
174 int err;
175
176 ww_acquire_init(&ctx, &ww_class);
177 ww_mutex_lock(&abba->b_mutex, &ctx);
178
179 complete(&abba->b_ready);
180 wait_for_completion(&abba->a_ready);
181
182 err = ww_mutex_lock(&abba->a_mutex, &ctx);
183 if (abba->resolve && err == -EDEADLK) {
184 ww_mutex_unlock(&abba->b_mutex);
185 ww_mutex_lock_slow(&abba->a_mutex, &ctx);
186 err = ww_mutex_lock(&abba->b_mutex, &ctx);
187 }
188
189 if (!err)
190 ww_mutex_unlock(&abba->a_mutex);
191 ww_mutex_unlock(&abba->b_mutex);
192 ww_acquire_fini(&ctx);
193
194 abba->result = err;
195}
196
197static int test_abba(bool resolve)
198{
199 struct test_abba abba;
200 struct ww_acquire_ctx ctx;
201 int err, ret;
202
203 ww_mutex_init(&abba.a_mutex, &ww_class);
204 ww_mutex_init(&abba.b_mutex, &ww_class);
205 INIT_WORK_ONSTACK(&abba.work, test_abba_work);
206 init_completion(&abba.a_ready);
207 init_completion(&abba.b_ready);
208 abba.resolve = resolve;
209
210 schedule_work(&abba.work);
211
212 ww_acquire_init(&ctx, &ww_class);
213 ww_mutex_lock(&abba.a_mutex, &ctx);
214
215 complete(&abba.a_ready);
216 wait_for_completion(&abba.b_ready);
217
218 err = ww_mutex_lock(&abba.b_mutex, &ctx);
219 if (resolve && err == -EDEADLK) {
220 ww_mutex_unlock(&abba.a_mutex);
221 ww_mutex_lock_slow(&abba.b_mutex, &ctx);
222 err = ww_mutex_lock(&abba.a_mutex, &ctx);
223 }
224
225 if (!err)
226 ww_mutex_unlock(&abba.b_mutex);
227 ww_mutex_unlock(&abba.a_mutex);
228 ww_acquire_fini(&ctx);
229
230 flush_work(&abba.work);
231 destroy_work_on_stack(&abba.work);
232
233 ret = 0;
234 if (resolve) {
235 if (err || abba.result) {
236 pr_err("%s: failed to resolve ABBA deadlock, A err=%d, B err=%d\n",
237 __func__, err, abba.result);
238 ret = -EINVAL;
239 }
240 } else {
241 if (err != -EDEADLK && abba.result != -EDEADLK) {
242 pr_err("%s: missed ABBA deadlock, A err=%d, B err=%d\n",
243 __func__, err, abba.result);
244 ret = -EINVAL;
245 }
246 }
247 return ret;
248}
249
250struct test_cycle {
251 struct work_struct work;
252 struct ww_mutex a_mutex;
253 struct ww_mutex *b_mutex;
254 struct completion *a_signal;
255 struct completion b_signal;
256 int result;
257};
258
259static void test_cycle_work(struct work_struct *work)
260{
261 struct test_cycle *cycle = container_of(work, typeof(*cycle), work);
262 struct ww_acquire_ctx ctx;
263 int err;
264
265 ww_acquire_init(&ctx, &ww_class);
266 ww_mutex_lock(&cycle->a_mutex, &ctx);
267
268 complete(cycle->a_signal);
269 wait_for_completion(&cycle->b_signal);
270
271 err = ww_mutex_lock(cycle->b_mutex, &ctx);
272 if (err == -EDEADLK) {
273 ww_mutex_unlock(&cycle->a_mutex);
274 ww_mutex_lock_slow(cycle->b_mutex, &ctx);
275 err = ww_mutex_lock(&cycle->a_mutex, &ctx);
276 }
277
278 if (!err)
279 ww_mutex_unlock(cycle->b_mutex);
280 ww_mutex_unlock(&cycle->a_mutex);
281 ww_acquire_fini(&ctx);
282
283 cycle->result = err;
284}
285
286static int __test_cycle(unsigned int nthreads)
287{
288 struct test_cycle *cycles;
289 unsigned int n, last = nthreads - 1;
290 int ret;
291
292 cycles = kmalloc_array(nthreads, sizeof(*cycles), GFP_KERNEL);
293 if (!cycles)
294 return -ENOMEM;
295
296 for (n = 0; n < nthreads; n++) {
297 struct test_cycle *cycle = &cycles[n];
298
299 ww_mutex_init(&cycle->a_mutex, &ww_class);
300 if (n == last)
301 cycle->b_mutex = &cycles[0].a_mutex;
302 else
303 cycle->b_mutex = &cycles[n + 1].a_mutex;
304
305 if (n == 0)
306 cycle->a_signal = &cycles[last].b_signal;
307 else
308 cycle->a_signal = &cycles[n - 1].b_signal;
309 init_completion(&cycle->b_signal);
310
311 INIT_WORK(&cycle->work, test_cycle_work);
312 cycle->result = 0;
313 }
314
315 for (n = 0; n < nthreads; n++)
316 queue_work(wq, &cycles[n].work);
317
318 flush_workqueue(wq);
319
320 ret = 0;
321 for (n = 0; n < nthreads; n++) {
322 struct test_cycle *cycle = &cycles[n];
323
324 if (!cycle->result)
325 continue;
326
327 pr_err("cylic deadlock not resolved, ret[%d/%d] = %d\n",
328 n, nthreads, cycle->result);
329 ret = -EINVAL;
330 break;
331 }
332
333 for (n = 0; n < nthreads; n++)
334 ww_mutex_destroy(&cycles[n].a_mutex);
335 kfree(cycles);
336 return ret;
337}
338
339static int test_cycle(unsigned int ncpus)
340{
341 unsigned int n;
342 int ret;
343
344 for (n = 2; n <= ncpus + 1; n++) {
345 ret = __test_cycle(n);
346 if (ret)
347 return ret;
348 }
349
350 return 0;
351}
352
353struct stress {
354 struct work_struct work;
355 struct ww_mutex *locks;
356 unsigned long timeout;
357 int nlocks;
358};
359
360static int *get_random_order(int count)
361{
362 int *order;
363 int n, r, tmp;
364
365 order = kmalloc_array(count, sizeof(*order), GFP_KERNEL);
366 if (!order)
367 return order;
368
369 for (n = 0; n < count; n++)
370 order[n] = n;
371
372 for (n = count - 1; n > 1; n--) {
373 r = get_random_int() % (n + 1);
374 if (r != n) {
375 tmp = order[n];
376 order[n] = order[r];
377 order[r] = tmp;
378 }
379 }
380
381 return order;
382}
383
384static void dummy_load(struct stress *stress)
385{
386 usleep_range(1000, 2000);
387}
388
389static void stress_inorder_work(struct work_struct *work)
390{
391 struct stress *stress = container_of(work, typeof(*stress), work);
392 const int nlocks = stress->nlocks;
393 struct ww_mutex *locks = stress->locks;
394 struct ww_acquire_ctx ctx;
395 int *order;
396
397 order = get_random_order(nlocks);
398 if (!order)
399 return;
400
401 do {
402 int contended = -1;
403 int n, err;
404
405 ww_acquire_init(&ctx, &ww_class);
406retry:
407 err = 0;
408 for (n = 0; n < nlocks; n++) {
409 if (n == contended)
410 continue;
411
412 err = ww_mutex_lock(&locks[order[n]], &ctx);
413 if (err < 0)
414 break;
415 }
416 if (!err)
417 dummy_load(stress);
418
419 if (contended > n)
420 ww_mutex_unlock(&locks[order[contended]]);
421 contended = n;
422 while (n--)
423 ww_mutex_unlock(&locks[order[n]]);
424
425 if (err == -EDEADLK) {
426 ww_mutex_lock_slow(&locks[order[contended]], &ctx);
427 goto retry;
428 }
429
430 if (err) {
431 pr_err_once("stress (%s) failed with %d\n",
432 __func__, err);
433 break;
434 }
435
436 ww_acquire_fini(&ctx);
437 } while (!time_after(jiffies, stress->timeout));
438
439 kfree(order);
440 kfree(stress);
441}
442
443struct reorder_lock {
444 struct list_head link;
445 struct ww_mutex *lock;
446};
447
448static void stress_reorder_work(struct work_struct *work)
449{
450 struct stress *stress = container_of(work, typeof(*stress), work);
451 LIST_HEAD(locks);
452 struct ww_acquire_ctx ctx;
453 struct reorder_lock *ll, *ln;
454 int *order;
455 int n, err;
456
457 order = get_random_order(stress->nlocks);
458 if (!order)
459 return;
460
461 for (n = 0; n < stress->nlocks; n++) {
462 ll = kmalloc(sizeof(*ll), GFP_KERNEL);
463 if (!ll)
464 goto out;
465
466 ll->lock = &stress->locks[order[n]];
467 list_add(&ll->link, &locks);
468 }
469 kfree(order);
470 order = NULL;
471
472 do {
473 ww_acquire_init(&ctx, &ww_class);
474
475 list_for_each_entry(ll, &locks, link) {
476 err = ww_mutex_lock(ll->lock, &ctx);
477 if (!err)
478 continue;
479
480 ln = ll;
481 list_for_each_entry_continue_reverse(ln, &locks, link)
482 ww_mutex_unlock(ln->lock);
483
484 if (err != -EDEADLK) {
485 pr_err_once("stress (%s) failed with %d\n",
486 __func__, err);
487 break;
488 }
489
490 ww_mutex_lock_slow(ll->lock, &ctx);
491 list_move(&ll->link, &locks); /* restarts iteration */
492 }
493
494 dummy_load(stress);
495 list_for_each_entry(ll, &locks, link)
496 ww_mutex_unlock(ll->lock);
497
498 ww_acquire_fini(&ctx);
499 } while (!time_after(jiffies, stress->timeout));
500
501out:
502 list_for_each_entry_safe(ll, ln, &locks, link)
503 kfree(ll);
504 kfree(order);
505 kfree(stress);
506}
507
508static void stress_one_work(struct work_struct *work)
509{
510 struct stress *stress = container_of(work, typeof(*stress), work);
511 const int nlocks = stress->nlocks;
512 struct ww_mutex *lock = stress->locks + (get_random_int() % nlocks);
513 int err;
514
515 do {
516 err = ww_mutex_lock(lock, NULL);
517 if (!err) {
518 dummy_load(stress);
519 ww_mutex_unlock(lock);
520 } else {
521 pr_err_once("stress (%s) failed with %d\n",
522 __func__, err);
523 break;
524 }
525 } while (!time_after(jiffies, stress->timeout));
526
527 kfree(stress);
528}
529
530#define STRESS_INORDER BIT(0)
531#define STRESS_REORDER BIT(1)
532#define STRESS_ONE BIT(2)
533#define STRESS_ALL (STRESS_INORDER | STRESS_REORDER | STRESS_ONE)
534
535static int stress(int nlocks, int nthreads, unsigned int flags)
536{
537 struct ww_mutex *locks;
538 int n;
539
540 locks = kmalloc_array(nlocks, sizeof(*locks), GFP_KERNEL);
541 if (!locks)
542 return -ENOMEM;
543
544 for (n = 0; n < nlocks; n++)
545 ww_mutex_init(&locks[n], &ww_class);
546
547 for (n = 0; nthreads; n++) {
548 struct stress *stress;
549 void (*fn)(struct work_struct *work);
550
551 fn = NULL;
552 switch (n & 3) {
553 case 0:
554 if (flags & STRESS_INORDER)
555 fn = stress_inorder_work;
556 break;
557 case 1:
558 if (flags & STRESS_REORDER)
559 fn = stress_reorder_work;
560 break;
561 case 2:
562 if (flags & STRESS_ONE)
563 fn = stress_one_work;
564 break;
565 }
566
567 if (!fn)
568 continue;
569
570 stress = kmalloc(sizeof(*stress), GFP_KERNEL);
571 if (!stress)
572 break;
573
574 INIT_WORK(&stress->work, fn);
575 stress->locks = locks;
576 stress->nlocks = nlocks;
577 stress->timeout = jiffies + 2*HZ;
578
579 queue_work(wq, &stress->work);
580 nthreads--;
581 }
582
583 flush_workqueue(wq);
584
585 for (n = 0; n < nlocks; n++)
586 ww_mutex_destroy(&locks[n]);
587 kfree(locks);
588
589 return 0;
590}
591
592static int __init test_ww_mutex_init(void)
593{
594 int ncpus = num_online_cpus();
595 int ret;
596
597 wq = alloc_workqueue("test-ww_mutex", WQ_UNBOUND, 0);
598 if (!wq)
599 return -ENOMEM;
600
601 ret = test_mutex();
602 if (ret)
603 return ret;
604
605 ret = test_aa();
606 if (ret)
607 return ret;
608
609 ret = test_abba(false);
610 if (ret)
611 return ret;
612
613 ret = test_abba(true);
614 if (ret)
615 return ret;
616
617 ret = test_cycle(ncpus);
618 if (ret)
619 return ret;
620
621 ret = stress(16, 2*ncpus, STRESS_INORDER);
622 if (ret)
623 return ret;
624
625 ret = stress(16, 2*ncpus, STRESS_REORDER);
626 if (ret)
627 return ret;
628
629 ret = stress(4095, hweight32(STRESS_ALL)*ncpus, STRESS_ALL);
630 if (ret)
631 return ret;
632
633 return 0;
634}
635
636static void __exit test_ww_mutex_exit(void)
637{
638 destroy_workqueue(wq);
639}
640
641module_init(test_ww_mutex_init);
642module_exit(test_ww_mutex_exit);
643
644MODULE_LICENSE("GPL");
645MODULE_AUTHOR("Intel Corporation");