Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Module-based API test facility for ww_mutexes
4 */
5
6#include <linux/kernel.h>
7
8#include <linux/completion.h>
9#include <linux/delay.h>
10#include <linux/kthread.h>
11#include <linux/module.h>
12#include <linux/prandom.h>
13#include <linux/slab.h>
14#include <linux/ww_mutex.h>
15
16static DEFINE_WD_CLASS(ww_class);
17struct workqueue_struct *wq;
18
19#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
20#define ww_acquire_init_noinject(a, b) do { \
21 ww_acquire_init((a), (b)); \
22 (a)->deadlock_inject_countdown = ~0U; \
23 } while (0)
24#else
25#define ww_acquire_init_noinject(a, b) ww_acquire_init((a), (b))
26#endif
27
28struct test_mutex {
29 struct work_struct work;
30 struct ww_mutex mutex;
31 struct completion ready, go, done;
32 unsigned int flags;
33};
34
35#define TEST_MTX_SPIN BIT(0)
36#define TEST_MTX_TRY BIT(1)
37#define TEST_MTX_CTX BIT(2)
38#define __TEST_MTX_LAST BIT(3)
39
40static void test_mutex_work(struct work_struct *work)
41{
42 struct test_mutex *mtx = container_of(work, typeof(*mtx), work);
43
44 complete(&mtx->ready);
45 wait_for_completion(&mtx->go);
46
47 if (mtx->flags & TEST_MTX_TRY) {
48 while (!ww_mutex_trylock(&mtx->mutex, NULL))
49 cond_resched();
50 } else {
51 ww_mutex_lock(&mtx->mutex, NULL);
52 }
53 complete(&mtx->done);
54 ww_mutex_unlock(&mtx->mutex);
55}
56
57static int __test_mutex(unsigned int flags)
58{
59#define TIMEOUT (HZ / 16)
60 struct test_mutex mtx;
61 struct ww_acquire_ctx ctx;
62 int ret;
63
64 ww_mutex_init(&mtx.mutex, &ww_class);
65 if (flags & TEST_MTX_CTX)
66 ww_acquire_init(&ctx, &ww_class);
67
68 INIT_WORK_ONSTACK(&mtx.work, test_mutex_work);
69 init_completion(&mtx.ready);
70 init_completion(&mtx.go);
71 init_completion(&mtx.done);
72 mtx.flags = flags;
73
74 schedule_work(&mtx.work);
75
76 wait_for_completion(&mtx.ready);
77 ww_mutex_lock(&mtx.mutex, (flags & TEST_MTX_CTX) ? &ctx : NULL);
78 complete(&mtx.go);
79 if (flags & TEST_MTX_SPIN) {
80 unsigned long timeout = jiffies + TIMEOUT;
81
82 ret = 0;
83 do {
84 if (completion_done(&mtx.done)) {
85 ret = -EINVAL;
86 break;
87 }
88 cond_resched();
89 } while (time_before(jiffies, timeout));
90 } else {
91 ret = wait_for_completion_timeout(&mtx.done, TIMEOUT);
92 }
93 ww_mutex_unlock(&mtx.mutex);
94 if (flags & TEST_MTX_CTX)
95 ww_acquire_fini(&ctx);
96
97 if (ret) {
98 pr_err("%s(flags=%x): mutual exclusion failure\n",
99 __func__, flags);
100 ret = -EINVAL;
101 }
102
103 flush_work(&mtx.work);
104 destroy_work_on_stack(&mtx.work);
105 return ret;
106#undef TIMEOUT
107}
108
109static int test_mutex(void)
110{
111 int ret;
112 int i;
113
114 for (i = 0; i < __TEST_MTX_LAST; i++) {
115 ret = __test_mutex(i);
116 if (ret)
117 return ret;
118 }
119
120 return 0;
121}
122
123static int test_aa(bool trylock)
124{
125 struct ww_mutex mutex;
126 struct ww_acquire_ctx ctx;
127 int ret;
128 const char *from = trylock ? "trylock" : "lock";
129
130 ww_mutex_init(&mutex, &ww_class);
131 ww_acquire_init(&ctx, &ww_class);
132
133 if (!trylock) {
134 ret = ww_mutex_lock(&mutex, &ctx);
135 if (ret) {
136 pr_err("%s: initial lock failed!\n", __func__);
137 goto out;
138 }
139 } else {
140 ret = !ww_mutex_trylock(&mutex, &ctx);
141 if (ret) {
142 pr_err("%s: initial trylock failed!\n", __func__);
143 goto out;
144 }
145 }
146
147 if (ww_mutex_trylock(&mutex, NULL)) {
148 pr_err("%s: trylocked itself without context from %s!\n", __func__, from);
149 ww_mutex_unlock(&mutex);
150 ret = -EINVAL;
151 goto out;
152 }
153
154 if (ww_mutex_trylock(&mutex, &ctx)) {
155 pr_err("%s: trylocked itself with context from %s!\n", __func__, from);
156 ww_mutex_unlock(&mutex);
157 ret = -EINVAL;
158 goto out;
159 }
160
161 ret = ww_mutex_lock(&mutex, &ctx);
162 if (ret != -EALREADY) {
163 pr_err("%s: missed deadlock for recursing, ret=%d from %s\n",
164 __func__, ret, from);
165 if (!ret)
166 ww_mutex_unlock(&mutex);
167 ret = -EINVAL;
168 goto out;
169 }
170
171 ww_mutex_unlock(&mutex);
172 ret = 0;
173out:
174 ww_acquire_fini(&ctx);
175 return ret;
176}
177
178struct test_abba {
179 struct work_struct work;
180 struct ww_mutex a_mutex;
181 struct ww_mutex b_mutex;
182 struct completion a_ready;
183 struct completion b_ready;
184 bool resolve, trylock;
185 int result;
186};
187
188static void test_abba_work(struct work_struct *work)
189{
190 struct test_abba *abba = container_of(work, typeof(*abba), work);
191 struct ww_acquire_ctx ctx;
192 int err;
193
194 ww_acquire_init_noinject(&ctx, &ww_class);
195 if (!abba->trylock)
196 ww_mutex_lock(&abba->b_mutex, &ctx);
197 else
198 WARN_ON(!ww_mutex_trylock(&abba->b_mutex, &ctx));
199
200 WARN_ON(READ_ONCE(abba->b_mutex.ctx) != &ctx);
201
202 complete(&abba->b_ready);
203 wait_for_completion(&abba->a_ready);
204
205 err = ww_mutex_lock(&abba->a_mutex, &ctx);
206 if (abba->resolve && err == -EDEADLK) {
207 ww_mutex_unlock(&abba->b_mutex);
208 ww_mutex_lock_slow(&abba->a_mutex, &ctx);
209 err = ww_mutex_lock(&abba->b_mutex, &ctx);
210 }
211
212 if (!err)
213 ww_mutex_unlock(&abba->a_mutex);
214 ww_mutex_unlock(&abba->b_mutex);
215 ww_acquire_fini(&ctx);
216
217 abba->result = err;
218}
219
220static int test_abba(bool trylock, bool resolve)
221{
222 struct test_abba abba;
223 struct ww_acquire_ctx ctx;
224 int err, ret;
225
226 ww_mutex_init(&abba.a_mutex, &ww_class);
227 ww_mutex_init(&abba.b_mutex, &ww_class);
228 INIT_WORK_ONSTACK(&abba.work, test_abba_work);
229 init_completion(&abba.a_ready);
230 init_completion(&abba.b_ready);
231 abba.trylock = trylock;
232 abba.resolve = resolve;
233
234 schedule_work(&abba.work);
235
236 ww_acquire_init_noinject(&ctx, &ww_class);
237 if (!trylock)
238 ww_mutex_lock(&abba.a_mutex, &ctx);
239 else
240 WARN_ON(!ww_mutex_trylock(&abba.a_mutex, &ctx));
241
242 WARN_ON(READ_ONCE(abba.a_mutex.ctx) != &ctx);
243
244 complete(&abba.a_ready);
245 wait_for_completion(&abba.b_ready);
246
247 err = ww_mutex_lock(&abba.b_mutex, &ctx);
248 if (resolve && err == -EDEADLK) {
249 ww_mutex_unlock(&abba.a_mutex);
250 ww_mutex_lock_slow(&abba.b_mutex, &ctx);
251 err = ww_mutex_lock(&abba.a_mutex, &ctx);
252 }
253
254 if (!err)
255 ww_mutex_unlock(&abba.b_mutex);
256 ww_mutex_unlock(&abba.a_mutex);
257 ww_acquire_fini(&ctx);
258
259 flush_work(&abba.work);
260 destroy_work_on_stack(&abba.work);
261
262 ret = 0;
263 if (resolve) {
264 if (err || abba.result) {
265 pr_err("%s: failed to resolve ABBA deadlock, A err=%d, B err=%d\n",
266 __func__, err, abba.result);
267 ret = -EINVAL;
268 }
269 } else {
270 if (err != -EDEADLK && abba.result != -EDEADLK) {
271 pr_err("%s: missed ABBA deadlock, A err=%d, B err=%d\n",
272 __func__, err, abba.result);
273 ret = -EINVAL;
274 }
275 }
276 return ret;
277}
278
279struct test_cycle {
280 struct work_struct work;
281 struct ww_mutex a_mutex;
282 struct ww_mutex *b_mutex;
283 struct completion *a_signal;
284 struct completion b_signal;
285 int result;
286};
287
288static void test_cycle_work(struct work_struct *work)
289{
290 struct test_cycle *cycle = container_of(work, typeof(*cycle), work);
291 struct ww_acquire_ctx ctx;
292 int err, erra = 0;
293
294 ww_acquire_init_noinject(&ctx, &ww_class);
295 ww_mutex_lock(&cycle->a_mutex, &ctx);
296
297 complete(cycle->a_signal);
298 wait_for_completion(&cycle->b_signal);
299
300 err = ww_mutex_lock(cycle->b_mutex, &ctx);
301 if (err == -EDEADLK) {
302 err = 0;
303 ww_mutex_unlock(&cycle->a_mutex);
304 ww_mutex_lock_slow(cycle->b_mutex, &ctx);
305 erra = ww_mutex_lock(&cycle->a_mutex, &ctx);
306 }
307
308 if (!err)
309 ww_mutex_unlock(cycle->b_mutex);
310 if (!erra)
311 ww_mutex_unlock(&cycle->a_mutex);
312 ww_acquire_fini(&ctx);
313
314 cycle->result = err ?: erra;
315}
316
317static int __test_cycle(unsigned int nthreads)
318{
319 struct test_cycle *cycles;
320 unsigned int n, last = nthreads - 1;
321 int ret;
322
323 cycles = kmalloc_array(nthreads, sizeof(*cycles), GFP_KERNEL);
324 if (!cycles)
325 return -ENOMEM;
326
327 for (n = 0; n < nthreads; n++) {
328 struct test_cycle *cycle = &cycles[n];
329
330 ww_mutex_init(&cycle->a_mutex, &ww_class);
331 if (n == last)
332 cycle->b_mutex = &cycles[0].a_mutex;
333 else
334 cycle->b_mutex = &cycles[n + 1].a_mutex;
335
336 if (n == 0)
337 cycle->a_signal = &cycles[last].b_signal;
338 else
339 cycle->a_signal = &cycles[n - 1].b_signal;
340 init_completion(&cycle->b_signal);
341
342 INIT_WORK(&cycle->work, test_cycle_work);
343 cycle->result = 0;
344 }
345
346 for (n = 0; n < nthreads; n++)
347 queue_work(wq, &cycles[n].work);
348
349 flush_workqueue(wq);
350
351 ret = 0;
352 for (n = 0; n < nthreads; n++) {
353 struct test_cycle *cycle = &cycles[n];
354
355 if (!cycle->result)
356 continue;
357
358 pr_err("cyclic deadlock not resolved, ret[%d/%d] = %d\n",
359 n, nthreads, cycle->result);
360 ret = -EINVAL;
361 break;
362 }
363
364 for (n = 0; n < nthreads; n++)
365 ww_mutex_destroy(&cycles[n].a_mutex);
366 kfree(cycles);
367 return ret;
368}
369
370static int test_cycle(unsigned int ncpus)
371{
372 unsigned int n;
373 int ret;
374
375 for (n = 2; n <= ncpus + 1; n++) {
376 ret = __test_cycle(n);
377 if (ret)
378 return ret;
379 }
380
381 return 0;
382}
383
384struct stress {
385 struct work_struct work;
386 struct ww_mutex *locks;
387 unsigned long timeout;
388 int nlocks;
389};
390
391struct rnd_state rng;
392DEFINE_SPINLOCK(rng_lock);
393
394static inline u32 prandom_u32_below(u32 ceil)
395{
396 u32 ret;
397
398 spin_lock(&rng_lock);
399 ret = prandom_u32_state(&rng) % ceil;
400 spin_unlock(&rng_lock);
401 return ret;
402}
403
404static int *get_random_order(int count)
405{
406 int *order;
407 int n, r;
408
409 order = kmalloc_array(count, sizeof(*order), GFP_KERNEL);
410 if (!order)
411 return order;
412
413 for (n = 0; n < count; n++)
414 order[n] = n;
415
416 for (n = count - 1; n > 1; n--) {
417 r = prandom_u32_below(n + 1);
418 if (r != n)
419 swap(order[n], order[r]);
420 }
421
422 return order;
423}
424
425static void dummy_load(struct stress *stress)
426{
427 usleep_range(1000, 2000);
428}
429
430static void stress_inorder_work(struct work_struct *work)
431{
432 struct stress *stress = container_of(work, typeof(*stress), work);
433 const int nlocks = stress->nlocks;
434 struct ww_mutex *locks = stress->locks;
435 struct ww_acquire_ctx ctx;
436 int *order;
437
438 order = get_random_order(nlocks);
439 if (!order)
440 return;
441
442 do {
443 int contended = -1;
444 int n, err;
445
446 ww_acquire_init(&ctx, &ww_class);
447retry:
448 err = 0;
449 for (n = 0; n < nlocks; n++) {
450 if (n == contended)
451 continue;
452
453 err = ww_mutex_lock(&locks[order[n]], &ctx);
454 if (err < 0)
455 break;
456 }
457 if (!err)
458 dummy_load(stress);
459
460 if (contended > n)
461 ww_mutex_unlock(&locks[order[contended]]);
462 contended = n;
463 while (n--)
464 ww_mutex_unlock(&locks[order[n]]);
465
466 if (err == -EDEADLK) {
467 if (!time_after(jiffies, stress->timeout)) {
468 ww_mutex_lock_slow(&locks[order[contended]], &ctx);
469 goto retry;
470 }
471 }
472
473 ww_acquire_fini(&ctx);
474 if (err) {
475 pr_err_once("stress (%s) failed with %d\n",
476 __func__, err);
477 break;
478 }
479 } while (!time_after(jiffies, stress->timeout));
480
481 kfree(order);
482}
483
484struct reorder_lock {
485 struct list_head link;
486 struct ww_mutex *lock;
487};
488
489static void stress_reorder_work(struct work_struct *work)
490{
491 struct stress *stress = container_of(work, typeof(*stress), work);
492 LIST_HEAD(locks);
493 struct ww_acquire_ctx ctx;
494 struct reorder_lock *ll, *ln;
495 int *order;
496 int n, err;
497
498 order = get_random_order(stress->nlocks);
499 if (!order)
500 return;
501
502 for (n = 0; n < stress->nlocks; n++) {
503 ll = kmalloc(sizeof(*ll), GFP_KERNEL);
504 if (!ll)
505 goto out;
506
507 ll->lock = &stress->locks[order[n]];
508 list_add(&ll->link, &locks);
509 }
510 kfree(order);
511 order = NULL;
512
513 do {
514 ww_acquire_init(&ctx, &ww_class);
515
516 list_for_each_entry(ll, &locks, link) {
517 err = ww_mutex_lock(ll->lock, &ctx);
518 if (!err)
519 continue;
520
521 ln = ll;
522 list_for_each_entry_continue_reverse(ln, &locks, link)
523 ww_mutex_unlock(ln->lock);
524
525 if (err != -EDEADLK) {
526 pr_err_once("stress (%s) failed with %d\n",
527 __func__, err);
528 break;
529 }
530
531 ww_mutex_lock_slow(ll->lock, &ctx);
532 list_move(&ll->link, &locks); /* restarts iteration */
533 }
534
535 dummy_load(stress);
536 list_for_each_entry(ll, &locks, link)
537 ww_mutex_unlock(ll->lock);
538
539 ww_acquire_fini(&ctx);
540 } while (!time_after(jiffies, stress->timeout));
541
542out:
543 list_for_each_entry_safe(ll, ln, &locks, link)
544 kfree(ll);
545 kfree(order);
546}
547
548static void stress_one_work(struct work_struct *work)
549{
550 struct stress *stress = container_of(work, typeof(*stress), work);
551 const int nlocks = stress->nlocks;
552 struct ww_mutex *lock = stress->locks + get_random_u32_below(nlocks);
553 int err;
554
555 do {
556 err = ww_mutex_lock(lock, NULL);
557 if (!err) {
558 dummy_load(stress);
559 ww_mutex_unlock(lock);
560 } else {
561 pr_err_once("stress (%s) failed with %d\n",
562 __func__, err);
563 break;
564 }
565 } while (!time_after(jiffies, stress->timeout));
566}
567
568#define STRESS_INORDER BIT(0)
569#define STRESS_REORDER BIT(1)
570#define STRESS_ONE BIT(2)
571#define STRESS_ALL (STRESS_INORDER | STRESS_REORDER | STRESS_ONE)
572
573static int stress(int nlocks, int nthreads, unsigned int flags)
574{
575 struct ww_mutex *locks;
576 struct stress *stress_array;
577 int n, count;
578
579 locks = kmalloc_array(nlocks, sizeof(*locks), GFP_KERNEL);
580 if (!locks)
581 return -ENOMEM;
582
583 stress_array = kmalloc_array(nthreads, sizeof(*stress_array),
584 GFP_KERNEL);
585 if (!stress_array) {
586 kfree(locks);
587 return -ENOMEM;
588 }
589
590 for (n = 0; n < nlocks; n++)
591 ww_mutex_init(&locks[n], &ww_class);
592
593 count = 0;
594 for (n = 0; nthreads; n++) {
595 struct stress *stress;
596 void (*fn)(struct work_struct *work);
597
598 fn = NULL;
599 switch (n & 3) {
600 case 0:
601 if (flags & STRESS_INORDER)
602 fn = stress_inorder_work;
603 break;
604 case 1:
605 if (flags & STRESS_REORDER)
606 fn = stress_reorder_work;
607 break;
608 case 2:
609 if (flags & STRESS_ONE)
610 fn = stress_one_work;
611 break;
612 }
613
614 if (!fn)
615 continue;
616
617 stress = &stress_array[count++];
618
619 INIT_WORK(&stress->work, fn);
620 stress->locks = locks;
621 stress->nlocks = nlocks;
622 stress->timeout = jiffies + 2*HZ;
623
624 queue_work(wq, &stress->work);
625 nthreads--;
626 }
627
628 flush_workqueue(wq);
629
630 for (n = 0; n < nlocks; n++)
631 ww_mutex_destroy(&locks[n]);
632 kfree(stress_array);
633 kfree(locks);
634
635 return 0;
636}
637
638static int __init test_ww_mutex_init(void)
639{
640 int ncpus = num_online_cpus();
641 int ret, i;
642
643 printk(KERN_INFO "Beginning ww mutex selftests\n");
644
645 prandom_seed_state(&rng, get_random_u64());
646
647 wq = alloc_workqueue("test-ww_mutex", WQ_UNBOUND, 0);
648 if (!wq)
649 return -ENOMEM;
650
651 ret = test_mutex();
652 if (ret)
653 return ret;
654
655 ret = test_aa(false);
656 if (ret)
657 return ret;
658
659 ret = test_aa(true);
660 if (ret)
661 return ret;
662
663 for (i = 0; i < 4; i++) {
664 ret = test_abba(i & 1, i & 2);
665 if (ret)
666 return ret;
667 }
668
669 ret = test_cycle(ncpus);
670 if (ret)
671 return ret;
672
673 ret = stress(16, 2*ncpus, STRESS_INORDER);
674 if (ret)
675 return ret;
676
677 ret = stress(16, 2*ncpus, STRESS_REORDER);
678 if (ret)
679 return ret;
680
681 ret = stress(2046, hweight32(STRESS_ALL)*ncpus, STRESS_ALL);
682 if (ret)
683 return ret;
684
685 printk(KERN_INFO "All ww mutex selftests passed\n");
686 return 0;
687}
688
689static void __exit test_ww_mutex_exit(void)
690{
691 destroy_workqueue(wq);
692}
693
694module_init(test_ww_mutex_init);
695module_exit(test_ww_mutex_exit);
696
697MODULE_LICENSE("GPL");
698MODULE_AUTHOR("Intel Corporation");
699MODULE_DESCRIPTION("API test facility for ww_mutexes");
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Module-based API test facility for ww_mutexes
4 */
5
6#include <linux/kernel.h>
7
8#include <linux/completion.h>
9#include <linux/delay.h>
10#include <linux/kthread.h>
11#include <linux/module.h>
12#include <linux/random.h>
13#include <linux/slab.h>
14#include <linux/ww_mutex.h>
15
16static DEFINE_WD_CLASS(ww_class);
17struct workqueue_struct *wq;
18
19struct test_mutex {
20 struct work_struct work;
21 struct ww_mutex mutex;
22 struct completion ready, go, done;
23 unsigned int flags;
24};
25
26#define TEST_MTX_SPIN BIT(0)
27#define TEST_MTX_TRY BIT(1)
28#define TEST_MTX_CTX BIT(2)
29#define __TEST_MTX_LAST BIT(3)
30
31static void test_mutex_work(struct work_struct *work)
32{
33 struct test_mutex *mtx = container_of(work, typeof(*mtx), work);
34
35 complete(&mtx->ready);
36 wait_for_completion(&mtx->go);
37
38 if (mtx->flags & TEST_MTX_TRY) {
39 while (!ww_mutex_trylock(&mtx->mutex))
40 cond_resched();
41 } else {
42 ww_mutex_lock(&mtx->mutex, NULL);
43 }
44 complete(&mtx->done);
45 ww_mutex_unlock(&mtx->mutex);
46}
47
48static int __test_mutex(unsigned int flags)
49{
50#define TIMEOUT (HZ / 16)
51 struct test_mutex mtx;
52 struct ww_acquire_ctx ctx;
53 int ret;
54
55 ww_mutex_init(&mtx.mutex, &ww_class);
56 ww_acquire_init(&ctx, &ww_class);
57
58 INIT_WORK_ONSTACK(&mtx.work, test_mutex_work);
59 init_completion(&mtx.ready);
60 init_completion(&mtx.go);
61 init_completion(&mtx.done);
62 mtx.flags = flags;
63
64 schedule_work(&mtx.work);
65
66 wait_for_completion(&mtx.ready);
67 ww_mutex_lock(&mtx.mutex, (flags & TEST_MTX_CTX) ? &ctx : NULL);
68 complete(&mtx.go);
69 if (flags & TEST_MTX_SPIN) {
70 unsigned long timeout = jiffies + TIMEOUT;
71
72 ret = 0;
73 do {
74 if (completion_done(&mtx.done)) {
75 ret = -EINVAL;
76 break;
77 }
78 cond_resched();
79 } while (time_before(jiffies, timeout));
80 } else {
81 ret = wait_for_completion_timeout(&mtx.done, TIMEOUT);
82 }
83 ww_mutex_unlock(&mtx.mutex);
84 ww_acquire_fini(&ctx);
85
86 if (ret) {
87 pr_err("%s(flags=%x): mutual exclusion failure\n",
88 __func__, flags);
89 ret = -EINVAL;
90 }
91
92 flush_work(&mtx.work);
93 destroy_work_on_stack(&mtx.work);
94 return ret;
95#undef TIMEOUT
96}
97
98static int test_mutex(void)
99{
100 int ret;
101 int i;
102
103 for (i = 0; i < __TEST_MTX_LAST; i++) {
104 ret = __test_mutex(i);
105 if (ret)
106 return ret;
107 }
108
109 return 0;
110}
111
112static int test_aa(void)
113{
114 struct ww_mutex mutex;
115 struct ww_acquire_ctx ctx;
116 int ret;
117
118 ww_mutex_init(&mutex, &ww_class);
119 ww_acquire_init(&ctx, &ww_class);
120
121 ww_mutex_lock(&mutex, &ctx);
122
123 if (ww_mutex_trylock(&mutex)) {
124 pr_err("%s: trylocked itself!\n", __func__);
125 ww_mutex_unlock(&mutex);
126 ret = -EINVAL;
127 goto out;
128 }
129
130 ret = ww_mutex_lock(&mutex, &ctx);
131 if (ret != -EALREADY) {
132 pr_err("%s: missed deadlock for recursing, ret=%d\n",
133 __func__, ret);
134 if (!ret)
135 ww_mutex_unlock(&mutex);
136 ret = -EINVAL;
137 goto out;
138 }
139
140 ret = 0;
141out:
142 ww_mutex_unlock(&mutex);
143 ww_acquire_fini(&ctx);
144 return ret;
145}
146
147struct test_abba {
148 struct work_struct work;
149 struct ww_mutex a_mutex;
150 struct ww_mutex b_mutex;
151 struct completion a_ready;
152 struct completion b_ready;
153 bool resolve;
154 int result;
155};
156
157static void test_abba_work(struct work_struct *work)
158{
159 struct test_abba *abba = container_of(work, typeof(*abba), work);
160 struct ww_acquire_ctx ctx;
161 int err;
162
163 ww_acquire_init(&ctx, &ww_class);
164 ww_mutex_lock(&abba->b_mutex, &ctx);
165
166 complete(&abba->b_ready);
167 wait_for_completion(&abba->a_ready);
168
169 err = ww_mutex_lock(&abba->a_mutex, &ctx);
170 if (abba->resolve && err == -EDEADLK) {
171 ww_mutex_unlock(&abba->b_mutex);
172 ww_mutex_lock_slow(&abba->a_mutex, &ctx);
173 err = ww_mutex_lock(&abba->b_mutex, &ctx);
174 }
175
176 if (!err)
177 ww_mutex_unlock(&abba->a_mutex);
178 ww_mutex_unlock(&abba->b_mutex);
179 ww_acquire_fini(&ctx);
180
181 abba->result = err;
182}
183
184static int test_abba(bool resolve)
185{
186 struct test_abba abba;
187 struct ww_acquire_ctx ctx;
188 int err, ret;
189
190 ww_mutex_init(&abba.a_mutex, &ww_class);
191 ww_mutex_init(&abba.b_mutex, &ww_class);
192 INIT_WORK_ONSTACK(&abba.work, test_abba_work);
193 init_completion(&abba.a_ready);
194 init_completion(&abba.b_ready);
195 abba.resolve = resolve;
196
197 schedule_work(&abba.work);
198
199 ww_acquire_init(&ctx, &ww_class);
200 ww_mutex_lock(&abba.a_mutex, &ctx);
201
202 complete(&abba.a_ready);
203 wait_for_completion(&abba.b_ready);
204
205 err = ww_mutex_lock(&abba.b_mutex, &ctx);
206 if (resolve && err == -EDEADLK) {
207 ww_mutex_unlock(&abba.a_mutex);
208 ww_mutex_lock_slow(&abba.b_mutex, &ctx);
209 err = ww_mutex_lock(&abba.a_mutex, &ctx);
210 }
211
212 if (!err)
213 ww_mutex_unlock(&abba.b_mutex);
214 ww_mutex_unlock(&abba.a_mutex);
215 ww_acquire_fini(&ctx);
216
217 flush_work(&abba.work);
218 destroy_work_on_stack(&abba.work);
219
220 ret = 0;
221 if (resolve) {
222 if (err || abba.result) {
223 pr_err("%s: failed to resolve ABBA deadlock, A err=%d, B err=%d\n",
224 __func__, err, abba.result);
225 ret = -EINVAL;
226 }
227 } else {
228 if (err != -EDEADLK && abba.result != -EDEADLK) {
229 pr_err("%s: missed ABBA deadlock, A err=%d, B err=%d\n",
230 __func__, err, abba.result);
231 ret = -EINVAL;
232 }
233 }
234 return ret;
235}
236
237struct test_cycle {
238 struct work_struct work;
239 struct ww_mutex a_mutex;
240 struct ww_mutex *b_mutex;
241 struct completion *a_signal;
242 struct completion b_signal;
243 int result;
244};
245
246static void test_cycle_work(struct work_struct *work)
247{
248 struct test_cycle *cycle = container_of(work, typeof(*cycle), work);
249 struct ww_acquire_ctx ctx;
250 int err, erra = 0;
251
252 ww_acquire_init(&ctx, &ww_class);
253 ww_mutex_lock(&cycle->a_mutex, &ctx);
254
255 complete(cycle->a_signal);
256 wait_for_completion(&cycle->b_signal);
257
258 err = ww_mutex_lock(cycle->b_mutex, &ctx);
259 if (err == -EDEADLK) {
260 err = 0;
261 ww_mutex_unlock(&cycle->a_mutex);
262 ww_mutex_lock_slow(cycle->b_mutex, &ctx);
263 erra = ww_mutex_lock(&cycle->a_mutex, &ctx);
264 }
265
266 if (!err)
267 ww_mutex_unlock(cycle->b_mutex);
268 if (!erra)
269 ww_mutex_unlock(&cycle->a_mutex);
270 ww_acquire_fini(&ctx);
271
272 cycle->result = err ?: erra;
273}
274
275static int __test_cycle(unsigned int nthreads)
276{
277 struct test_cycle *cycles;
278 unsigned int n, last = nthreads - 1;
279 int ret;
280
281 cycles = kmalloc_array(nthreads, sizeof(*cycles), GFP_KERNEL);
282 if (!cycles)
283 return -ENOMEM;
284
285 for (n = 0; n < nthreads; n++) {
286 struct test_cycle *cycle = &cycles[n];
287
288 ww_mutex_init(&cycle->a_mutex, &ww_class);
289 if (n == last)
290 cycle->b_mutex = &cycles[0].a_mutex;
291 else
292 cycle->b_mutex = &cycles[n + 1].a_mutex;
293
294 if (n == 0)
295 cycle->a_signal = &cycles[last].b_signal;
296 else
297 cycle->a_signal = &cycles[n - 1].b_signal;
298 init_completion(&cycle->b_signal);
299
300 INIT_WORK(&cycle->work, test_cycle_work);
301 cycle->result = 0;
302 }
303
304 for (n = 0; n < nthreads; n++)
305 queue_work(wq, &cycles[n].work);
306
307 flush_workqueue(wq);
308
309 ret = 0;
310 for (n = 0; n < nthreads; n++) {
311 struct test_cycle *cycle = &cycles[n];
312
313 if (!cycle->result)
314 continue;
315
316 pr_err("cyclic deadlock not resolved, ret[%d/%d] = %d\n",
317 n, nthreads, cycle->result);
318 ret = -EINVAL;
319 break;
320 }
321
322 for (n = 0; n < nthreads; n++)
323 ww_mutex_destroy(&cycles[n].a_mutex);
324 kfree(cycles);
325 return ret;
326}
327
328static int test_cycle(unsigned int ncpus)
329{
330 unsigned int n;
331 int ret;
332
333 for (n = 2; n <= ncpus + 1; n++) {
334 ret = __test_cycle(n);
335 if (ret)
336 return ret;
337 }
338
339 return 0;
340}
341
342struct stress {
343 struct work_struct work;
344 struct ww_mutex *locks;
345 unsigned long timeout;
346 int nlocks;
347};
348
349static int *get_random_order(int count)
350{
351 int *order;
352 int n, r, tmp;
353
354 order = kmalloc_array(count, sizeof(*order), GFP_KERNEL);
355 if (!order)
356 return order;
357
358 for (n = 0; n < count; n++)
359 order[n] = n;
360
361 for (n = count - 1; n > 1; n--) {
362 r = get_random_int() % (n + 1);
363 if (r != n) {
364 tmp = order[n];
365 order[n] = order[r];
366 order[r] = tmp;
367 }
368 }
369
370 return order;
371}
372
373static void dummy_load(struct stress *stress)
374{
375 usleep_range(1000, 2000);
376}
377
378static void stress_inorder_work(struct work_struct *work)
379{
380 struct stress *stress = container_of(work, typeof(*stress), work);
381 const int nlocks = stress->nlocks;
382 struct ww_mutex *locks = stress->locks;
383 struct ww_acquire_ctx ctx;
384 int *order;
385
386 order = get_random_order(nlocks);
387 if (!order)
388 return;
389
390 do {
391 int contended = -1;
392 int n, err;
393
394 ww_acquire_init(&ctx, &ww_class);
395retry:
396 err = 0;
397 for (n = 0; n < nlocks; n++) {
398 if (n == contended)
399 continue;
400
401 err = ww_mutex_lock(&locks[order[n]], &ctx);
402 if (err < 0)
403 break;
404 }
405 if (!err)
406 dummy_load(stress);
407
408 if (contended > n)
409 ww_mutex_unlock(&locks[order[contended]]);
410 contended = n;
411 while (n--)
412 ww_mutex_unlock(&locks[order[n]]);
413
414 if (err == -EDEADLK) {
415 ww_mutex_lock_slow(&locks[order[contended]], &ctx);
416 goto retry;
417 }
418
419 if (err) {
420 pr_err_once("stress (%s) failed with %d\n",
421 __func__, err);
422 break;
423 }
424
425 ww_acquire_fini(&ctx);
426 } while (!time_after(jiffies, stress->timeout));
427
428 kfree(order);
429 kfree(stress);
430}
431
432struct reorder_lock {
433 struct list_head link;
434 struct ww_mutex *lock;
435};
436
437static void stress_reorder_work(struct work_struct *work)
438{
439 struct stress *stress = container_of(work, typeof(*stress), work);
440 LIST_HEAD(locks);
441 struct ww_acquire_ctx ctx;
442 struct reorder_lock *ll, *ln;
443 int *order;
444 int n, err;
445
446 order = get_random_order(stress->nlocks);
447 if (!order)
448 return;
449
450 for (n = 0; n < stress->nlocks; n++) {
451 ll = kmalloc(sizeof(*ll), GFP_KERNEL);
452 if (!ll)
453 goto out;
454
455 ll->lock = &stress->locks[order[n]];
456 list_add(&ll->link, &locks);
457 }
458 kfree(order);
459 order = NULL;
460
461 do {
462 ww_acquire_init(&ctx, &ww_class);
463
464 list_for_each_entry(ll, &locks, link) {
465 err = ww_mutex_lock(ll->lock, &ctx);
466 if (!err)
467 continue;
468
469 ln = ll;
470 list_for_each_entry_continue_reverse(ln, &locks, link)
471 ww_mutex_unlock(ln->lock);
472
473 if (err != -EDEADLK) {
474 pr_err_once("stress (%s) failed with %d\n",
475 __func__, err);
476 break;
477 }
478
479 ww_mutex_lock_slow(ll->lock, &ctx);
480 list_move(&ll->link, &locks); /* restarts iteration */
481 }
482
483 dummy_load(stress);
484 list_for_each_entry(ll, &locks, link)
485 ww_mutex_unlock(ll->lock);
486
487 ww_acquire_fini(&ctx);
488 } while (!time_after(jiffies, stress->timeout));
489
490out:
491 list_for_each_entry_safe(ll, ln, &locks, link)
492 kfree(ll);
493 kfree(order);
494 kfree(stress);
495}
496
497static void stress_one_work(struct work_struct *work)
498{
499 struct stress *stress = container_of(work, typeof(*stress), work);
500 const int nlocks = stress->nlocks;
501 struct ww_mutex *lock = stress->locks + (get_random_int() % nlocks);
502 int err;
503
504 do {
505 err = ww_mutex_lock(lock, NULL);
506 if (!err) {
507 dummy_load(stress);
508 ww_mutex_unlock(lock);
509 } else {
510 pr_err_once("stress (%s) failed with %d\n",
511 __func__, err);
512 break;
513 }
514 } while (!time_after(jiffies, stress->timeout));
515
516 kfree(stress);
517}
518
519#define STRESS_INORDER BIT(0)
520#define STRESS_REORDER BIT(1)
521#define STRESS_ONE BIT(2)
522#define STRESS_ALL (STRESS_INORDER | STRESS_REORDER | STRESS_ONE)
523
524static int stress(int nlocks, int nthreads, unsigned int flags)
525{
526 struct ww_mutex *locks;
527 int n;
528
529 locks = kmalloc_array(nlocks, sizeof(*locks), GFP_KERNEL);
530 if (!locks)
531 return -ENOMEM;
532
533 for (n = 0; n < nlocks; n++)
534 ww_mutex_init(&locks[n], &ww_class);
535
536 for (n = 0; nthreads; n++) {
537 struct stress *stress;
538 void (*fn)(struct work_struct *work);
539
540 fn = NULL;
541 switch (n & 3) {
542 case 0:
543 if (flags & STRESS_INORDER)
544 fn = stress_inorder_work;
545 break;
546 case 1:
547 if (flags & STRESS_REORDER)
548 fn = stress_reorder_work;
549 break;
550 case 2:
551 if (flags & STRESS_ONE)
552 fn = stress_one_work;
553 break;
554 }
555
556 if (!fn)
557 continue;
558
559 stress = kmalloc(sizeof(*stress), GFP_KERNEL);
560 if (!stress)
561 break;
562
563 INIT_WORK(&stress->work, fn);
564 stress->locks = locks;
565 stress->nlocks = nlocks;
566 stress->timeout = jiffies + 2*HZ;
567
568 queue_work(wq, &stress->work);
569 nthreads--;
570 }
571
572 flush_workqueue(wq);
573
574 for (n = 0; n < nlocks; n++)
575 ww_mutex_destroy(&locks[n]);
576 kfree(locks);
577
578 return 0;
579}
580
581static int __init test_ww_mutex_init(void)
582{
583 int ncpus = num_online_cpus();
584 int ret;
585
586 wq = alloc_workqueue("test-ww_mutex", WQ_UNBOUND, 0);
587 if (!wq)
588 return -ENOMEM;
589
590 ret = test_mutex();
591 if (ret)
592 return ret;
593
594 ret = test_aa();
595 if (ret)
596 return ret;
597
598 ret = test_abba(false);
599 if (ret)
600 return ret;
601
602 ret = test_abba(true);
603 if (ret)
604 return ret;
605
606 ret = test_cycle(ncpus);
607 if (ret)
608 return ret;
609
610 ret = stress(16, 2*ncpus, STRESS_INORDER);
611 if (ret)
612 return ret;
613
614 ret = stress(16, 2*ncpus, STRESS_REORDER);
615 if (ret)
616 return ret;
617
618 ret = stress(4095, hweight32(STRESS_ALL)*ncpus, STRESS_ALL);
619 if (ret)
620 return ret;
621
622 return 0;
623}
624
625static void __exit test_ww_mutex_exit(void)
626{
627 destroy_workqueue(wq);
628}
629
630module_init(test_ww_mutex_init);
631module_exit(test_ww_mutex_exit);
632
633MODULE_LICENSE("GPL");
634MODULE_AUTHOR("Intel Corporation");