Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Module-based API test facility for ww_mutexes
4 */
5
6#include <linux/kernel.h>
7
8#include <linux/completion.h>
9#include <linux/delay.h>
10#include <linux/kthread.h>
11#include <linux/module.h>
12#include <linux/prandom.h>
13#include <linux/slab.h>
14#include <linux/ww_mutex.h>
15
16static DEFINE_WD_CLASS(ww_class);
17struct workqueue_struct *wq;
18
19#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
20#define ww_acquire_init_noinject(a, b) do { \
21 ww_acquire_init((a), (b)); \
22 (a)->deadlock_inject_countdown = ~0U; \
23 } while (0)
24#else
25#define ww_acquire_init_noinject(a, b) ww_acquire_init((a), (b))
26#endif
27
28struct test_mutex {
29 struct work_struct work;
30 struct ww_mutex mutex;
31 struct completion ready, go, done;
32 unsigned int flags;
33};
34
35#define TEST_MTX_SPIN BIT(0)
36#define TEST_MTX_TRY BIT(1)
37#define TEST_MTX_CTX BIT(2)
38#define __TEST_MTX_LAST BIT(3)
39
40static void test_mutex_work(struct work_struct *work)
41{
42 struct test_mutex *mtx = container_of(work, typeof(*mtx), work);
43
44 complete(&mtx->ready);
45 wait_for_completion(&mtx->go);
46
47 if (mtx->flags & TEST_MTX_TRY) {
48 while (!ww_mutex_trylock(&mtx->mutex, NULL))
49 cond_resched();
50 } else {
51 ww_mutex_lock(&mtx->mutex, NULL);
52 }
53 complete(&mtx->done);
54 ww_mutex_unlock(&mtx->mutex);
55}
56
57static int __test_mutex(unsigned int flags)
58{
59#define TIMEOUT (HZ / 16)
60 struct test_mutex mtx;
61 struct ww_acquire_ctx ctx;
62 int ret;
63
64 ww_mutex_init(&mtx.mutex, &ww_class);
65 if (flags & TEST_MTX_CTX)
66 ww_acquire_init(&ctx, &ww_class);
67
68 INIT_WORK_ONSTACK(&mtx.work, test_mutex_work);
69 init_completion(&mtx.ready);
70 init_completion(&mtx.go);
71 init_completion(&mtx.done);
72 mtx.flags = flags;
73
74 schedule_work(&mtx.work);
75
76 wait_for_completion(&mtx.ready);
77 ww_mutex_lock(&mtx.mutex, (flags & TEST_MTX_CTX) ? &ctx : NULL);
78 complete(&mtx.go);
79 if (flags & TEST_MTX_SPIN) {
80 unsigned long timeout = jiffies + TIMEOUT;
81
82 ret = 0;
83 do {
84 if (completion_done(&mtx.done)) {
85 ret = -EINVAL;
86 break;
87 }
88 cond_resched();
89 } while (time_before(jiffies, timeout));
90 } else {
91 ret = wait_for_completion_timeout(&mtx.done, TIMEOUT);
92 }
93 ww_mutex_unlock(&mtx.mutex);
94 if (flags & TEST_MTX_CTX)
95 ww_acquire_fini(&ctx);
96
97 if (ret) {
98 pr_err("%s(flags=%x): mutual exclusion failure\n",
99 __func__, flags);
100 ret = -EINVAL;
101 }
102
103 flush_work(&mtx.work);
104 destroy_work_on_stack(&mtx.work);
105 return ret;
106#undef TIMEOUT
107}
108
109static int test_mutex(void)
110{
111 int ret;
112 int i;
113
114 for (i = 0; i < __TEST_MTX_LAST; i++) {
115 ret = __test_mutex(i);
116 if (ret)
117 return ret;
118 }
119
120 return 0;
121}
122
123static int test_aa(bool trylock)
124{
125 struct ww_mutex mutex;
126 struct ww_acquire_ctx ctx;
127 int ret;
128 const char *from = trylock ? "trylock" : "lock";
129
130 ww_mutex_init(&mutex, &ww_class);
131 ww_acquire_init(&ctx, &ww_class);
132
133 if (!trylock) {
134 ret = ww_mutex_lock(&mutex, &ctx);
135 if (ret) {
136 pr_err("%s: initial lock failed!\n", __func__);
137 goto out;
138 }
139 } else {
140 ret = !ww_mutex_trylock(&mutex, &ctx);
141 if (ret) {
142 pr_err("%s: initial trylock failed!\n", __func__);
143 goto out;
144 }
145 }
146
147 if (ww_mutex_trylock(&mutex, NULL)) {
148 pr_err("%s: trylocked itself without context from %s!\n", __func__, from);
149 ww_mutex_unlock(&mutex);
150 ret = -EINVAL;
151 goto out;
152 }
153
154 if (ww_mutex_trylock(&mutex, &ctx)) {
155 pr_err("%s: trylocked itself with context from %s!\n", __func__, from);
156 ww_mutex_unlock(&mutex);
157 ret = -EINVAL;
158 goto out;
159 }
160
161 ret = ww_mutex_lock(&mutex, &ctx);
162 if (ret != -EALREADY) {
163 pr_err("%s: missed deadlock for recursing, ret=%d from %s\n",
164 __func__, ret, from);
165 if (!ret)
166 ww_mutex_unlock(&mutex);
167 ret = -EINVAL;
168 goto out;
169 }
170
171 ww_mutex_unlock(&mutex);
172 ret = 0;
173out:
174 ww_acquire_fini(&ctx);
175 return ret;
176}
177
178struct test_abba {
179 struct work_struct work;
180 struct ww_mutex a_mutex;
181 struct ww_mutex b_mutex;
182 struct completion a_ready;
183 struct completion b_ready;
184 bool resolve, trylock;
185 int result;
186};
187
188static void test_abba_work(struct work_struct *work)
189{
190 struct test_abba *abba = container_of(work, typeof(*abba), work);
191 struct ww_acquire_ctx ctx;
192 int err;
193
194 ww_acquire_init_noinject(&ctx, &ww_class);
195 if (!abba->trylock)
196 ww_mutex_lock(&abba->b_mutex, &ctx);
197 else
198 WARN_ON(!ww_mutex_trylock(&abba->b_mutex, &ctx));
199
200 WARN_ON(READ_ONCE(abba->b_mutex.ctx) != &ctx);
201
202 complete(&abba->b_ready);
203 wait_for_completion(&abba->a_ready);
204
205 err = ww_mutex_lock(&abba->a_mutex, &ctx);
206 if (abba->resolve && err == -EDEADLK) {
207 ww_mutex_unlock(&abba->b_mutex);
208 ww_mutex_lock_slow(&abba->a_mutex, &ctx);
209 err = ww_mutex_lock(&abba->b_mutex, &ctx);
210 }
211
212 if (!err)
213 ww_mutex_unlock(&abba->a_mutex);
214 ww_mutex_unlock(&abba->b_mutex);
215 ww_acquire_fini(&ctx);
216
217 abba->result = err;
218}
219
220static int test_abba(bool trylock, bool resolve)
221{
222 struct test_abba abba;
223 struct ww_acquire_ctx ctx;
224 int err, ret;
225
226 ww_mutex_init(&abba.a_mutex, &ww_class);
227 ww_mutex_init(&abba.b_mutex, &ww_class);
228 INIT_WORK_ONSTACK(&abba.work, test_abba_work);
229 init_completion(&abba.a_ready);
230 init_completion(&abba.b_ready);
231 abba.trylock = trylock;
232 abba.resolve = resolve;
233
234 schedule_work(&abba.work);
235
236 ww_acquire_init_noinject(&ctx, &ww_class);
237 if (!trylock)
238 ww_mutex_lock(&abba.a_mutex, &ctx);
239 else
240 WARN_ON(!ww_mutex_trylock(&abba.a_mutex, &ctx));
241
242 WARN_ON(READ_ONCE(abba.a_mutex.ctx) != &ctx);
243
244 complete(&abba.a_ready);
245 wait_for_completion(&abba.b_ready);
246
247 err = ww_mutex_lock(&abba.b_mutex, &ctx);
248 if (resolve && err == -EDEADLK) {
249 ww_mutex_unlock(&abba.a_mutex);
250 ww_mutex_lock_slow(&abba.b_mutex, &ctx);
251 err = ww_mutex_lock(&abba.a_mutex, &ctx);
252 }
253
254 if (!err)
255 ww_mutex_unlock(&abba.b_mutex);
256 ww_mutex_unlock(&abba.a_mutex);
257 ww_acquire_fini(&ctx);
258
259 flush_work(&abba.work);
260 destroy_work_on_stack(&abba.work);
261
262 ret = 0;
263 if (resolve) {
264 if (err || abba.result) {
265 pr_err("%s: failed to resolve ABBA deadlock, A err=%d, B err=%d\n",
266 __func__, err, abba.result);
267 ret = -EINVAL;
268 }
269 } else {
270 if (err != -EDEADLK && abba.result != -EDEADLK) {
271 pr_err("%s: missed ABBA deadlock, A err=%d, B err=%d\n",
272 __func__, err, abba.result);
273 ret = -EINVAL;
274 }
275 }
276 return ret;
277}
278
279struct test_cycle {
280 struct work_struct work;
281 struct ww_mutex a_mutex;
282 struct ww_mutex *b_mutex;
283 struct completion *a_signal;
284 struct completion b_signal;
285 int result;
286};
287
288static void test_cycle_work(struct work_struct *work)
289{
290 struct test_cycle *cycle = container_of(work, typeof(*cycle), work);
291 struct ww_acquire_ctx ctx;
292 int err, erra = 0;
293
294 ww_acquire_init_noinject(&ctx, &ww_class);
295 ww_mutex_lock(&cycle->a_mutex, &ctx);
296
297 complete(cycle->a_signal);
298 wait_for_completion(&cycle->b_signal);
299
300 err = ww_mutex_lock(cycle->b_mutex, &ctx);
301 if (err == -EDEADLK) {
302 err = 0;
303 ww_mutex_unlock(&cycle->a_mutex);
304 ww_mutex_lock_slow(cycle->b_mutex, &ctx);
305 erra = ww_mutex_lock(&cycle->a_mutex, &ctx);
306 }
307
308 if (!err)
309 ww_mutex_unlock(cycle->b_mutex);
310 if (!erra)
311 ww_mutex_unlock(&cycle->a_mutex);
312 ww_acquire_fini(&ctx);
313
314 cycle->result = err ?: erra;
315}
316
317static int __test_cycle(unsigned int nthreads)
318{
319 struct test_cycle *cycles;
320 unsigned int n, last = nthreads - 1;
321 int ret;
322
323 cycles = kmalloc_array(nthreads, sizeof(*cycles), GFP_KERNEL);
324 if (!cycles)
325 return -ENOMEM;
326
327 for (n = 0; n < nthreads; n++) {
328 struct test_cycle *cycle = &cycles[n];
329
330 ww_mutex_init(&cycle->a_mutex, &ww_class);
331 if (n == last)
332 cycle->b_mutex = &cycles[0].a_mutex;
333 else
334 cycle->b_mutex = &cycles[n + 1].a_mutex;
335
336 if (n == 0)
337 cycle->a_signal = &cycles[last].b_signal;
338 else
339 cycle->a_signal = &cycles[n - 1].b_signal;
340 init_completion(&cycle->b_signal);
341
342 INIT_WORK(&cycle->work, test_cycle_work);
343 cycle->result = 0;
344 }
345
346 for (n = 0; n < nthreads; n++)
347 queue_work(wq, &cycles[n].work);
348
349 flush_workqueue(wq);
350
351 ret = 0;
352 for (n = 0; n < nthreads; n++) {
353 struct test_cycle *cycle = &cycles[n];
354
355 if (!cycle->result)
356 continue;
357
358 pr_err("cyclic deadlock not resolved, ret[%d/%d] = %d\n",
359 n, nthreads, cycle->result);
360 ret = -EINVAL;
361 break;
362 }
363
364 for (n = 0; n < nthreads; n++)
365 ww_mutex_destroy(&cycles[n].a_mutex);
366 kfree(cycles);
367 return ret;
368}
369
370static int test_cycle(unsigned int ncpus)
371{
372 unsigned int n;
373 int ret;
374
375 for (n = 2; n <= ncpus + 1; n++) {
376 ret = __test_cycle(n);
377 if (ret)
378 return ret;
379 }
380
381 return 0;
382}
383
384struct stress {
385 struct work_struct work;
386 struct ww_mutex *locks;
387 unsigned long timeout;
388 int nlocks;
389};
390
391struct rnd_state rng;
392DEFINE_SPINLOCK(rng_lock);
393
394static inline u32 prandom_u32_below(u32 ceil)
395{
396 u32 ret;
397
398 spin_lock(&rng_lock);
399 ret = prandom_u32_state(&rng) % ceil;
400 spin_unlock(&rng_lock);
401 return ret;
402}
403
404static int *get_random_order(int count)
405{
406 int *order;
407 int n, r;
408
409 order = kmalloc_array(count, sizeof(*order), GFP_KERNEL);
410 if (!order)
411 return order;
412
413 for (n = 0; n < count; n++)
414 order[n] = n;
415
416 for (n = count - 1; n > 1; n--) {
417 r = prandom_u32_below(n + 1);
418 if (r != n)
419 swap(order[n], order[r]);
420 }
421
422 return order;
423}
424
425static void dummy_load(struct stress *stress)
426{
427 usleep_range(1000, 2000);
428}
429
430static void stress_inorder_work(struct work_struct *work)
431{
432 struct stress *stress = container_of(work, typeof(*stress), work);
433 const int nlocks = stress->nlocks;
434 struct ww_mutex *locks = stress->locks;
435 struct ww_acquire_ctx ctx;
436 int *order;
437
438 order = get_random_order(nlocks);
439 if (!order)
440 return;
441
442 do {
443 int contended = -1;
444 int n, err;
445
446 ww_acquire_init(&ctx, &ww_class);
447retry:
448 err = 0;
449 for (n = 0; n < nlocks; n++) {
450 if (n == contended)
451 continue;
452
453 err = ww_mutex_lock(&locks[order[n]], &ctx);
454 if (err < 0)
455 break;
456 }
457 if (!err)
458 dummy_load(stress);
459
460 if (contended > n)
461 ww_mutex_unlock(&locks[order[contended]]);
462 contended = n;
463 while (n--)
464 ww_mutex_unlock(&locks[order[n]]);
465
466 if (err == -EDEADLK) {
467 if (!time_after(jiffies, stress->timeout)) {
468 ww_mutex_lock_slow(&locks[order[contended]], &ctx);
469 goto retry;
470 }
471 }
472
473 ww_acquire_fini(&ctx);
474 if (err) {
475 pr_err_once("stress (%s) failed with %d\n",
476 __func__, err);
477 break;
478 }
479 } while (!time_after(jiffies, stress->timeout));
480
481 kfree(order);
482}
483
484struct reorder_lock {
485 struct list_head link;
486 struct ww_mutex *lock;
487};
488
489static void stress_reorder_work(struct work_struct *work)
490{
491 struct stress *stress = container_of(work, typeof(*stress), work);
492 LIST_HEAD(locks);
493 struct ww_acquire_ctx ctx;
494 struct reorder_lock *ll, *ln;
495 int *order;
496 int n, err;
497
498 order = get_random_order(stress->nlocks);
499 if (!order)
500 return;
501
502 for (n = 0; n < stress->nlocks; n++) {
503 ll = kmalloc(sizeof(*ll), GFP_KERNEL);
504 if (!ll)
505 goto out;
506
507 ll->lock = &stress->locks[order[n]];
508 list_add(&ll->link, &locks);
509 }
510 kfree(order);
511 order = NULL;
512
513 do {
514 ww_acquire_init(&ctx, &ww_class);
515
516 list_for_each_entry(ll, &locks, link) {
517 err = ww_mutex_lock(ll->lock, &ctx);
518 if (!err)
519 continue;
520
521 ln = ll;
522 list_for_each_entry_continue_reverse(ln, &locks, link)
523 ww_mutex_unlock(ln->lock);
524
525 if (err != -EDEADLK) {
526 pr_err_once("stress (%s) failed with %d\n",
527 __func__, err);
528 break;
529 }
530
531 ww_mutex_lock_slow(ll->lock, &ctx);
532 list_move(&ll->link, &locks); /* restarts iteration */
533 }
534
535 dummy_load(stress);
536 list_for_each_entry(ll, &locks, link)
537 ww_mutex_unlock(ll->lock);
538
539 ww_acquire_fini(&ctx);
540 } while (!time_after(jiffies, stress->timeout));
541
542out:
543 list_for_each_entry_safe(ll, ln, &locks, link)
544 kfree(ll);
545 kfree(order);
546}
547
548static void stress_one_work(struct work_struct *work)
549{
550 struct stress *stress = container_of(work, typeof(*stress), work);
551 const int nlocks = stress->nlocks;
552 struct ww_mutex *lock = stress->locks + get_random_u32_below(nlocks);
553 int err;
554
555 do {
556 err = ww_mutex_lock(lock, NULL);
557 if (!err) {
558 dummy_load(stress);
559 ww_mutex_unlock(lock);
560 } else {
561 pr_err_once("stress (%s) failed with %d\n",
562 __func__, err);
563 break;
564 }
565 } while (!time_after(jiffies, stress->timeout));
566}
567
568#define STRESS_INORDER BIT(0)
569#define STRESS_REORDER BIT(1)
570#define STRESS_ONE BIT(2)
571#define STRESS_ALL (STRESS_INORDER | STRESS_REORDER | STRESS_ONE)
572
573static int stress(int nlocks, int nthreads, unsigned int flags)
574{
575 struct ww_mutex *locks;
576 struct stress *stress_array;
577 int n, count;
578
579 locks = kmalloc_array(nlocks, sizeof(*locks), GFP_KERNEL);
580 if (!locks)
581 return -ENOMEM;
582
583 stress_array = kmalloc_array(nthreads, sizeof(*stress_array),
584 GFP_KERNEL);
585 if (!stress_array) {
586 kfree(locks);
587 return -ENOMEM;
588 }
589
590 for (n = 0; n < nlocks; n++)
591 ww_mutex_init(&locks[n], &ww_class);
592
593 count = 0;
594 for (n = 0; nthreads; n++) {
595 struct stress *stress;
596 void (*fn)(struct work_struct *work);
597
598 fn = NULL;
599 switch (n & 3) {
600 case 0:
601 if (flags & STRESS_INORDER)
602 fn = stress_inorder_work;
603 break;
604 case 1:
605 if (flags & STRESS_REORDER)
606 fn = stress_reorder_work;
607 break;
608 case 2:
609 if (flags & STRESS_ONE)
610 fn = stress_one_work;
611 break;
612 }
613
614 if (!fn)
615 continue;
616
617 stress = &stress_array[count++];
618
619 INIT_WORK(&stress->work, fn);
620 stress->locks = locks;
621 stress->nlocks = nlocks;
622 stress->timeout = jiffies + 2*HZ;
623
624 queue_work(wq, &stress->work);
625 nthreads--;
626 }
627
628 flush_workqueue(wq);
629
630 for (n = 0; n < nlocks; n++)
631 ww_mutex_destroy(&locks[n]);
632 kfree(stress_array);
633 kfree(locks);
634
635 return 0;
636}
637
638static int __init test_ww_mutex_init(void)
639{
640 int ncpus = num_online_cpus();
641 int ret, i;
642
643 printk(KERN_INFO "Beginning ww mutex selftests\n");
644
645 prandom_seed_state(&rng, get_random_u64());
646
647 wq = alloc_workqueue("test-ww_mutex", WQ_UNBOUND, 0);
648 if (!wq)
649 return -ENOMEM;
650
651 ret = test_mutex();
652 if (ret)
653 return ret;
654
655 ret = test_aa(false);
656 if (ret)
657 return ret;
658
659 ret = test_aa(true);
660 if (ret)
661 return ret;
662
663 for (i = 0; i < 4; i++) {
664 ret = test_abba(i & 1, i & 2);
665 if (ret)
666 return ret;
667 }
668
669 ret = test_cycle(ncpus);
670 if (ret)
671 return ret;
672
673 ret = stress(16, 2*ncpus, STRESS_INORDER);
674 if (ret)
675 return ret;
676
677 ret = stress(16, 2*ncpus, STRESS_REORDER);
678 if (ret)
679 return ret;
680
681 ret = stress(2046, hweight32(STRESS_ALL)*ncpus, STRESS_ALL);
682 if (ret)
683 return ret;
684
685 printk(KERN_INFO "All ww mutex selftests passed\n");
686 return 0;
687}
688
689static void __exit test_ww_mutex_exit(void)
690{
691 destroy_workqueue(wq);
692}
693
694module_init(test_ww_mutex_init);
695module_exit(test_ww_mutex_exit);
696
697MODULE_LICENSE("GPL");
698MODULE_AUTHOR("Intel Corporation");
699MODULE_DESCRIPTION("API test facility for ww_mutexes");
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Module-based API test facility for ww_mutexes
4 */
5
6#include <linux/kernel.h>
7
8#include <linux/completion.h>
9#include <linux/delay.h>
10#include <linux/kthread.h>
11#include <linux/module.h>
12#include <linux/prandom.h>
13#include <linux/slab.h>
14#include <linux/ww_mutex.h>
15
16static DEFINE_WD_CLASS(ww_class);
17struct workqueue_struct *wq;
18
19#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
20#define ww_acquire_init_noinject(a, b) do { \
21 ww_acquire_init((a), (b)); \
22 (a)->deadlock_inject_countdown = ~0U; \
23 } while (0)
24#else
25#define ww_acquire_init_noinject(a, b) ww_acquire_init((a), (b))
26#endif
27
28struct test_mutex {
29 struct work_struct work;
30 struct ww_mutex mutex;
31 struct completion ready, go, done;
32 unsigned int flags;
33};
34
35#define TEST_MTX_SPIN BIT(0)
36#define TEST_MTX_TRY BIT(1)
37#define TEST_MTX_CTX BIT(2)
38#define __TEST_MTX_LAST BIT(3)
39
40static void test_mutex_work(struct work_struct *work)
41{
42 struct test_mutex *mtx = container_of(work, typeof(*mtx), work);
43
44 complete(&mtx->ready);
45 wait_for_completion(&mtx->go);
46
47 if (mtx->flags & TEST_MTX_TRY) {
48 while (!ww_mutex_trylock(&mtx->mutex, NULL))
49 cond_resched();
50 } else {
51 ww_mutex_lock(&mtx->mutex, NULL);
52 }
53 complete(&mtx->done);
54 ww_mutex_unlock(&mtx->mutex);
55}
56
57static int __test_mutex(unsigned int flags)
58{
59#define TIMEOUT (HZ / 16)
60 struct test_mutex mtx;
61 struct ww_acquire_ctx ctx;
62 int ret;
63
64 ww_mutex_init(&mtx.mutex, &ww_class);
65 ww_acquire_init(&ctx, &ww_class);
66
67 INIT_WORK_ONSTACK(&mtx.work, test_mutex_work);
68 init_completion(&mtx.ready);
69 init_completion(&mtx.go);
70 init_completion(&mtx.done);
71 mtx.flags = flags;
72
73 schedule_work(&mtx.work);
74
75 wait_for_completion(&mtx.ready);
76 ww_mutex_lock(&mtx.mutex, (flags & TEST_MTX_CTX) ? &ctx : NULL);
77 complete(&mtx.go);
78 if (flags & TEST_MTX_SPIN) {
79 unsigned long timeout = jiffies + TIMEOUT;
80
81 ret = 0;
82 do {
83 if (completion_done(&mtx.done)) {
84 ret = -EINVAL;
85 break;
86 }
87 cond_resched();
88 } while (time_before(jiffies, timeout));
89 } else {
90 ret = wait_for_completion_timeout(&mtx.done, TIMEOUT);
91 }
92 ww_mutex_unlock(&mtx.mutex);
93 ww_acquire_fini(&ctx);
94
95 if (ret) {
96 pr_err("%s(flags=%x): mutual exclusion failure\n",
97 __func__, flags);
98 ret = -EINVAL;
99 }
100
101 flush_work(&mtx.work);
102 destroy_work_on_stack(&mtx.work);
103 return ret;
104#undef TIMEOUT
105}
106
107static int test_mutex(void)
108{
109 int ret;
110 int i;
111
112 for (i = 0; i < __TEST_MTX_LAST; i++) {
113 ret = __test_mutex(i);
114 if (ret)
115 return ret;
116 }
117
118 return 0;
119}
120
121static int test_aa(bool trylock)
122{
123 struct ww_mutex mutex;
124 struct ww_acquire_ctx ctx;
125 int ret;
126 const char *from = trylock ? "trylock" : "lock";
127
128 ww_mutex_init(&mutex, &ww_class);
129 ww_acquire_init(&ctx, &ww_class);
130
131 if (!trylock) {
132 ret = ww_mutex_lock(&mutex, &ctx);
133 if (ret) {
134 pr_err("%s: initial lock failed!\n", __func__);
135 goto out;
136 }
137 } else {
138 ret = !ww_mutex_trylock(&mutex, &ctx);
139 if (ret) {
140 pr_err("%s: initial trylock failed!\n", __func__);
141 goto out;
142 }
143 }
144
145 if (ww_mutex_trylock(&mutex, NULL)) {
146 pr_err("%s: trylocked itself without context from %s!\n", __func__, from);
147 ww_mutex_unlock(&mutex);
148 ret = -EINVAL;
149 goto out;
150 }
151
152 if (ww_mutex_trylock(&mutex, &ctx)) {
153 pr_err("%s: trylocked itself with context from %s!\n", __func__, from);
154 ww_mutex_unlock(&mutex);
155 ret = -EINVAL;
156 goto out;
157 }
158
159 ret = ww_mutex_lock(&mutex, &ctx);
160 if (ret != -EALREADY) {
161 pr_err("%s: missed deadlock for recursing, ret=%d from %s\n",
162 __func__, ret, from);
163 if (!ret)
164 ww_mutex_unlock(&mutex);
165 ret = -EINVAL;
166 goto out;
167 }
168
169 ww_mutex_unlock(&mutex);
170 ret = 0;
171out:
172 ww_acquire_fini(&ctx);
173 return ret;
174}
175
176struct test_abba {
177 struct work_struct work;
178 struct ww_mutex a_mutex;
179 struct ww_mutex b_mutex;
180 struct completion a_ready;
181 struct completion b_ready;
182 bool resolve, trylock;
183 int result;
184};
185
186static void test_abba_work(struct work_struct *work)
187{
188 struct test_abba *abba = container_of(work, typeof(*abba), work);
189 struct ww_acquire_ctx ctx;
190 int err;
191
192 ww_acquire_init_noinject(&ctx, &ww_class);
193 if (!abba->trylock)
194 ww_mutex_lock(&abba->b_mutex, &ctx);
195 else
196 WARN_ON(!ww_mutex_trylock(&abba->b_mutex, &ctx));
197
198 WARN_ON(READ_ONCE(abba->b_mutex.ctx) != &ctx);
199
200 complete(&abba->b_ready);
201 wait_for_completion(&abba->a_ready);
202
203 err = ww_mutex_lock(&abba->a_mutex, &ctx);
204 if (abba->resolve && err == -EDEADLK) {
205 ww_mutex_unlock(&abba->b_mutex);
206 ww_mutex_lock_slow(&abba->a_mutex, &ctx);
207 err = ww_mutex_lock(&abba->b_mutex, &ctx);
208 }
209
210 if (!err)
211 ww_mutex_unlock(&abba->a_mutex);
212 ww_mutex_unlock(&abba->b_mutex);
213 ww_acquire_fini(&ctx);
214
215 abba->result = err;
216}
217
218static int test_abba(bool trylock, bool resolve)
219{
220 struct test_abba abba;
221 struct ww_acquire_ctx ctx;
222 int err, ret;
223
224 ww_mutex_init(&abba.a_mutex, &ww_class);
225 ww_mutex_init(&abba.b_mutex, &ww_class);
226 INIT_WORK_ONSTACK(&abba.work, test_abba_work);
227 init_completion(&abba.a_ready);
228 init_completion(&abba.b_ready);
229 abba.trylock = trylock;
230 abba.resolve = resolve;
231
232 schedule_work(&abba.work);
233
234 ww_acquire_init_noinject(&ctx, &ww_class);
235 if (!trylock)
236 ww_mutex_lock(&abba.a_mutex, &ctx);
237 else
238 WARN_ON(!ww_mutex_trylock(&abba.a_mutex, &ctx));
239
240 WARN_ON(READ_ONCE(abba.a_mutex.ctx) != &ctx);
241
242 complete(&abba.a_ready);
243 wait_for_completion(&abba.b_ready);
244
245 err = ww_mutex_lock(&abba.b_mutex, &ctx);
246 if (resolve && err == -EDEADLK) {
247 ww_mutex_unlock(&abba.a_mutex);
248 ww_mutex_lock_slow(&abba.b_mutex, &ctx);
249 err = ww_mutex_lock(&abba.a_mutex, &ctx);
250 }
251
252 if (!err)
253 ww_mutex_unlock(&abba.b_mutex);
254 ww_mutex_unlock(&abba.a_mutex);
255 ww_acquire_fini(&ctx);
256
257 flush_work(&abba.work);
258 destroy_work_on_stack(&abba.work);
259
260 ret = 0;
261 if (resolve) {
262 if (err || abba.result) {
263 pr_err("%s: failed to resolve ABBA deadlock, A err=%d, B err=%d\n",
264 __func__, err, abba.result);
265 ret = -EINVAL;
266 }
267 } else {
268 if (err != -EDEADLK && abba.result != -EDEADLK) {
269 pr_err("%s: missed ABBA deadlock, A err=%d, B err=%d\n",
270 __func__, err, abba.result);
271 ret = -EINVAL;
272 }
273 }
274 return ret;
275}
276
277struct test_cycle {
278 struct work_struct work;
279 struct ww_mutex a_mutex;
280 struct ww_mutex *b_mutex;
281 struct completion *a_signal;
282 struct completion b_signal;
283 int result;
284};
285
286static void test_cycle_work(struct work_struct *work)
287{
288 struct test_cycle *cycle = container_of(work, typeof(*cycle), work);
289 struct ww_acquire_ctx ctx;
290 int err, erra = 0;
291
292 ww_acquire_init_noinject(&ctx, &ww_class);
293 ww_mutex_lock(&cycle->a_mutex, &ctx);
294
295 complete(cycle->a_signal);
296 wait_for_completion(&cycle->b_signal);
297
298 err = ww_mutex_lock(cycle->b_mutex, &ctx);
299 if (err == -EDEADLK) {
300 err = 0;
301 ww_mutex_unlock(&cycle->a_mutex);
302 ww_mutex_lock_slow(cycle->b_mutex, &ctx);
303 erra = ww_mutex_lock(&cycle->a_mutex, &ctx);
304 }
305
306 if (!err)
307 ww_mutex_unlock(cycle->b_mutex);
308 if (!erra)
309 ww_mutex_unlock(&cycle->a_mutex);
310 ww_acquire_fini(&ctx);
311
312 cycle->result = err ?: erra;
313}
314
315static int __test_cycle(unsigned int nthreads)
316{
317 struct test_cycle *cycles;
318 unsigned int n, last = nthreads - 1;
319 int ret;
320
321 cycles = kmalloc_array(nthreads, sizeof(*cycles), GFP_KERNEL);
322 if (!cycles)
323 return -ENOMEM;
324
325 for (n = 0; n < nthreads; n++) {
326 struct test_cycle *cycle = &cycles[n];
327
328 ww_mutex_init(&cycle->a_mutex, &ww_class);
329 if (n == last)
330 cycle->b_mutex = &cycles[0].a_mutex;
331 else
332 cycle->b_mutex = &cycles[n + 1].a_mutex;
333
334 if (n == 0)
335 cycle->a_signal = &cycles[last].b_signal;
336 else
337 cycle->a_signal = &cycles[n - 1].b_signal;
338 init_completion(&cycle->b_signal);
339
340 INIT_WORK(&cycle->work, test_cycle_work);
341 cycle->result = 0;
342 }
343
344 for (n = 0; n < nthreads; n++)
345 queue_work(wq, &cycles[n].work);
346
347 flush_workqueue(wq);
348
349 ret = 0;
350 for (n = 0; n < nthreads; n++) {
351 struct test_cycle *cycle = &cycles[n];
352
353 if (!cycle->result)
354 continue;
355
356 pr_err("cyclic deadlock not resolved, ret[%d/%d] = %d\n",
357 n, nthreads, cycle->result);
358 ret = -EINVAL;
359 break;
360 }
361
362 for (n = 0; n < nthreads; n++)
363 ww_mutex_destroy(&cycles[n].a_mutex);
364 kfree(cycles);
365 return ret;
366}
367
368static int test_cycle(unsigned int ncpus)
369{
370 unsigned int n;
371 int ret;
372
373 for (n = 2; n <= ncpus + 1; n++) {
374 ret = __test_cycle(n);
375 if (ret)
376 return ret;
377 }
378
379 return 0;
380}
381
382struct stress {
383 struct work_struct work;
384 struct ww_mutex *locks;
385 unsigned long timeout;
386 int nlocks;
387};
388
389struct rnd_state rng;
390DEFINE_SPINLOCK(rng_lock);
391
392static inline u32 prandom_u32_below(u32 ceil)
393{
394 u32 ret;
395
396 spin_lock(&rng_lock);
397 ret = prandom_u32_state(&rng) % ceil;
398 spin_unlock(&rng_lock);
399 return ret;
400}
401
402static int *get_random_order(int count)
403{
404 int *order;
405 int n, r, tmp;
406
407 order = kmalloc_array(count, sizeof(*order), GFP_KERNEL);
408 if (!order)
409 return order;
410
411 for (n = 0; n < count; n++)
412 order[n] = n;
413
414 for (n = count - 1; n > 1; n--) {
415 r = prandom_u32_below(n + 1);
416 if (r != n) {
417 tmp = order[n];
418 order[n] = order[r];
419 order[r] = tmp;
420 }
421 }
422
423 return order;
424}
425
426static void dummy_load(struct stress *stress)
427{
428 usleep_range(1000, 2000);
429}
430
431static void stress_inorder_work(struct work_struct *work)
432{
433 struct stress *stress = container_of(work, typeof(*stress), work);
434 const int nlocks = stress->nlocks;
435 struct ww_mutex *locks = stress->locks;
436 struct ww_acquire_ctx ctx;
437 int *order;
438
439 order = get_random_order(nlocks);
440 if (!order)
441 return;
442
443 do {
444 int contended = -1;
445 int n, err;
446
447 ww_acquire_init(&ctx, &ww_class);
448retry:
449 err = 0;
450 for (n = 0; n < nlocks; n++) {
451 if (n == contended)
452 continue;
453
454 err = ww_mutex_lock(&locks[order[n]], &ctx);
455 if (err < 0)
456 break;
457 }
458 if (!err)
459 dummy_load(stress);
460
461 if (contended > n)
462 ww_mutex_unlock(&locks[order[contended]]);
463 contended = n;
464 while (n--)
465 ww_mutex_unlock(&locks[order[n]]);
466
467 if (err == -EDEADLK) {
468 if (!time_after(jiffies, stress->timeout)) {
469 ww_mutex_lock_slow(&locks[order[contended]], &ctx);
470 goto retry;
471 }
472 }
473
474 ww_acquire_fini(&ctx);
475 if (err) {
476 pr_err_once("stress (%s) failed with %d\n",
477 __func__, err);
478 break;
479 }
480 } while (!time_after(jiffies, stress->timeout));
481
482 kfree(order);
483}
484
485struct reorder_lock {
486 struct list_head link;
487 struct ww_mutex *lock;
488};
489
490static void stress_reorder_work(struct work_struct *work)
491{
492 struct stress *stress = container_of(work, typeof(*stress), work);
493 LIST_HEAD(locks);
494 struct ww_acquire_ctx ctx;
495 struct reorder_lock *ll, *ln;
496 int *order;
497 int n, err;
498
499 order = get_random_order(stress->nlocks);
500 if (!order)
501 return;
502
503 for (n = 0; n < stress->nlocks; n++) {
504 ll = kmalloc(sizeof(*ll), GFP_KERNEL);
505 if (!ll)
506 goto out;
507
508 ll->lock = &stress->locks[order[n]];
509 list_add(&ll->link, &locks);
510 }
511 kfree(order);
512 order = NULL;
513
514 do {
515 ww_acquire_init(&ctx, &ww_class);
516
517 list_for_each_entry(ll, &locks, link) {
518 err = ww_mutex_lock(ll->lock, &ctx);
519 if (!err)
520 continue;
521
522 ln = ll;
523 list_for_each_entry_continue_reverse(ln, &locks, link)
524 ww_mutex_unlock(ln->lock);
525
526 if (err != -EDEADLK) {
527 pr_err_once("stress (%s) failed with %d\n",
528 __func__, err);
529 break;
530 }
531
532 ww_mutex_lock_slow(ll->lock, &ctx);
533 list_move(&ll->link, &locks); /* restarts iteration */
534 }
535
536 dummy_load(stress);
537 list_for_each_entry(ll, &locks, link)
538 ww_mutex_unlock(ll->lock);
539
540 ww_acquire_fini(&ctx);
541 } while (!time_after(jiffies, stress->timeout));
542
543out:
544 list_for_each_entry_safe(ll, ln, &locks, link)
545 kfree(ll);
546 kfree(order);
547}
548
549static void stress_one_work(struct work_struct *work)
550{
551 struct stress *stress = container_of(work, typeof(*stress), work);
552 const int nlocks = stress->nlocks;
553 struct ww_mutex *lock = stress->locks + get_random_u32_below(nlocks);
554 int err;
555
556 do {
557 err = ww_mutex_lock(lock, NULL);
558 if (!err) {
559 dummy_load(stress);
560 ww_mutex_unlock(lock);
561 } else {
562 pr_err_once("stress (%s) failed with %d\n",
563 __func__, err);
564 break;
565 }
566 } while (!time_after(jiffies, stress->timeout));
567}
568
569#define STRESS_INORDER BIT(0)
570#define STRESS_REORDER BIT(1)
571#define STRESS_ONE BIT(2)
572#define STRESS_ALL (STRESS_INORDER | STRESS_REORDER | STRESS_ONE)
573
574static int stress(int nlocks, int nthreads, unsigned int flags)
575{
576 struct ww_mutex *locks;
577 struct stress *stress_array;
578 int n, count;
579
580 locks = kmalloc_array(nlocks, sizeof(*locks), GFP_KERNEL);
581 if (!locks)
582 return -ENOMEM;
583
584 stress_array = kmalloc_array(nthreads, sizeof(*stress_array),
585 GFP_KERNEL);
586 if (!stress_array) {
587 kfree(locks);
588 return -ENOMEM;
589 }
590
591 for (n = 0; n < nlocks; n++)
592 ww_mutex_init(&locks[n], &ww_class);
593
594 count = 0;
595 for (n = 0; nthreads; n++) {
596 struct stress *stress;
597 void (*fn)(struct work_struct *work);
598
599 fn = NULL;
600 switch (n & 3) {
601 case 0:
602 if (flags & STRESS_INORDER)
603 fn = stress_inorder_work;
604 break;
605 case 1:
606 if (flags & STRESS_REORDER)
607 fn = stress_reorder_work;
608 break;
609 case 2:
610 if (flags & STRESS_ONE)
611 fn = stress_one_work;
612 break;
613 }
614
615 if (!fn)
616 continue;
617
618 stress = &stress_array[count++];
619
620 INIT_WORK(&stress->work, fn);
621 stress->locks = locks;
622 stress->nlocks = nlocks;
623 stress->timeout = jiffies + 2*HZ;
624
625 queue_work(wq, &stress->work);
626 nthreads--;
627 }
628
629 flush_workqueue(wq);
630
631 for (n = 0; n < nlocks; n++)
632 ww_mutex_destroy(&locks[n]);
633 kfree(stress_array);
634 kfree(locks);
635
636 return 0;
637}
638
639static int __init test_ww_mutex_init(void)
640{
641 int ncpus = num_online_cpus();
642 int ret, i;
643
644 printk(KERN_INFO "Beginning ww mutex selftests\n");
645
646 prandom_seed_state(&rng, get_random_u64());
647
648 wq = alloc_workqueue("test-ww_mutex", WQ_UNBOUND, 0);
649 if (!wq)
650 return -ENOMEM;
651
652 ret = test_mutex();
653 if (ret)
654 return ret;
655
656 ret = test_aa(false);
657 if (ret)
658 return ret;
659
660 ret = test_aa(true);
661 if (ret)
662 return ret;
663
664 for (i = 0; i < 4; i++) {
665 ret = test_abba(i & 1, i & 2);
666 if (ret)
667 return ret;
668 }
669
670 ret = test_cycle(ncpus);
671 if (ret)
672 return ret;
673
674 ret = stress(16, 2*ncpus, STRESS_INORDER);
675 if (ret)
676 return ret;
677
678 ret = stress(16, 2*ncpus, STRESS_REORDER);
679 if (ret)
680 return ret;
681
682 ret = stress(2047, hweight32(STRESS_ALL)*ncpus, STRESS_ALL);
683 if (ret)
684 return ret;
685
686 printk(KERN_INFO "All ww mutex selftests passed\n");
687 return 0;
688}
689
690static void __exit test_ww_mutex_exit(void)
691{
692 destroy_workqueue(wq);
693}
694
695module_init(test_ww_mutex_init);
696module_exit(test_ww_mutex_exit);
697
698MODULE_LICENSE("GPL");
699MODULE_AUTHOR("Intel Corporation");