Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Module-based API test facility for ww_mutexes
  4 */
  5
  6#include <linux/kernel.h>
  7
  8#include <linux/completion.h>
  9#include <linux/delay.h>
 10#include <linux/kthread.h>
 11#include <linux/module.h>
 12#include <linux/random.h>
 13#include <linux/slab.h>
 14#include <linux/ww_mutex.h>
 15
 16static DEFINE_WD_CLASS(ww_class);
 17struct workqueue_struct *wq;
 18
 
 
 
 
 
 
 
 
 
 19struct test_mutex {
 20	struct work_struct work;
 21	struct ww_mutex mutex;
 22	struct completion ready, go, done;
 23	unsigned int flags;
 24};
 25
 26#define TEST_MTX_SPIN BIT(0)
 27#define TEST_MTX_TRY BIT(1)
 28#define TEST_MTX_CTX BIT(2)
 29#define __TEST_MTX_LAST BIT(3)
 30
 31static void test_mutex_work(struct work_struct *work)
 32{
 33	struct test_mutex *mtx = container_of(work, typeof(*mtx), work);
 34
 35	complete(&mtx->ready);
 36	wait_for_completion(&mtx->go);
 37
 38	if (mtx->flags & TEST_MTX_TRY) {
 39		while (!ww_mutex_trylock(&mtx->mutex))
 40			cond_resched();
 41	} else {
 42		ww_mutex_lock(&mtx->mutex, NULL);
 43	}
 44	complete(&mtx->done);
 45	ww_mutex_unlock(&mtx->mutex);
 46}
 47
 48static int __test_mutex(unsigned int flags)
 49{
 50#define TIMEOUT (HZ / 16)
 51	struct test_mutex mtx;
 52	struct ww_acquire_ctx ctx;
 53	int ret;
 54
 55	ww_mutex_init(&mtx.mutex, &ww_class);
 56	ww_acquire_init(&ctx, &ww_class);
 57
 58	INIT_WORK_ONSTACK(&mtx.work, test_mutex_work);
 59	init_completion(&mtx.ready);
 60	init_completion(&mtx.go);
 61	init_completion(&mtx.done);
 62	mtx.flags = flags;
 63
 64	schedule_work(&mtx.work);
 65
 66	wait_for_completion(&mtx.ready);
 67	ww_mutex_lock(&mtx.mutex, (flags & TEST_MTX_CTX) ? &ctx : NULL);
 68	complete(&mtx.go);
 69	if (flags & TEST_MTX_SPIN) {
 70		unsigned long timeout = jiffies + TIMEOUT;
 71
 72		ret = 0;
 73		do {
 74			if (completion_done(&mtx.done)) {
 75				ret = -EINVAL;
 76				break;
 77			}
 78			cond_resched();
 79		} while (time_before(jiffies, timeout));
 80	} else {
 81		ret = wait_for_completion_timeout(&mtx.done, TIMEOUT);
 82	}
 83	ww_mutex_unlock(&mtx.mutex);
 84	ww_acquire_fini(&ctx);
 85
 86	if (ret) {
 87		pr_err("%s(flags=%x): mutual exclusion failure\n",
 88		       __func__, flags);
 89		ret = -EINVAL;
 90	}
 91
 92	flush_work(&mtx.work);
 93	destroy_work_on_stack(&mtx.work);
 94	return ret;
 95#undef TIMEOUT
 96}
 97
 98static int test_mutex(void)
 99{
100	int ret;
101	int i;
102
103	for (i = 0; i < __TEST_MTX_LAST; i++) {
104		ret = __test_mutex(i);
105		if (ret)
106			return ret;
107	}
108
109	return 0;
110}
111
112static int test_aa(void)
113{
114	struct ww_mutex mutex;
115	struct ww_acquire_ctx ctx;
116	int ret;
 
117
118	ww_mutex_init(&mutex, &ww_class);
119	ww_acquire_init(&ctx, &ww_class);
120
121	ww_mutex_lock(&mutex, &ctx);
 
 
 
 
 
 
 
 
 
 
 
 
122
123	if (ww_mutex_trylock(&mutex))  {
124		pr_err("%s: trylocked itself!\n", __func__);
 
 
 
 
 
 
 
125		ww_mutex_unlock(&mutex);
126		ret = -EINVAL;
127		goto out;
128	}
129
130	ret = ww_mutex_lock(&mutex, &ctx);
131	if (ret != -EALREADY) {
132		pr_err("%s: missed deadlock for recursing, ret=%d\n",
133		       __func__, ret);
134		if (!ret)
135			ww_mutex_unlock(&mutex);
136		ret = -EINVAL;
137		goto out;
138	}
139
 
140	ret = 0;
141out:
142	ww_mutex_unlock(&mutex);
143	ww_acquire_fini(&ctx);
144	return ret;
145}
146
147struct test_abba {
148	struct work_struct work;
149	struct ww_mutex a_mutex;
150	struct ww_mutex b_mutex;
151	struct completion a_ready;
152	struct completion b_ready;
153	bool resolve;
154	int result;
155};
156
157static void test_abba_work(struct work_struct *work)
158{
159	struct test_abba *abba = container_of(work, typeof(*abba), work);
160	struct ww_acquire_ctx ctx;
161	int err;
162
163	ww_acquire_init(&ctx, &ww_class);
164	ww_mutex_lock(&abba->b_mutex, &ctx);
 
 
 
 
 
165
166	complete(&abba->b_ready);
167	wait_for_completion(&abba->a_ready);
168
169	err = ww_mutex_lock(&abba->a_mutex, &ctx);
170	if (abba->resolve && err == -EDEADLK) {
171		ww_mutex_unlock(&abba->b_mutex);
172		ww_mutex_lock_slow(&abba->a_mutex, &ctx);
173		err = ww_mutex_lock(&abba->b_mutex, &ctx);
174	}
175
176	if (!err)
177		ww_mutex_unlock(&abba->a_mutex);
178	ww_mutex_unlock(&abba->b_mutex);
179	ww_acquire_fini(&ctx);
180
181	abba->result = err;
182}
183
184static int test_abba(bool resolve)
185{
186	struct test_abba abba;
187	struct ww_acquire_ctx ctx;
188	int err, ret;
189
190	ww_mutex_init(&abba.a_mutex, &ww_class);
191	ww_mutex_init(&abba.b_mutex, &ww_class);
192	INIT_WORK_ONSTACK(&abba.work, test_abba_work);
193	init_completion(&abba.a_ready);
194	init_completion(&abba.b_ready);
 
195	abba.resolve = resolve;
196
197	schedule_work(&abba.work);
198
199	ww_acquire_init(&ctx, &ww_class);
200	ww_mutex_lock(&abba.a_mutex, &ctx);
 
 
 
 
 
201
202	complete(&abba.a_ready);
203	wait_for_completion(&abba.b_ready);
204
205	err = ww_mutex_lock(&abba.b_mutex, &ctx);
206	if (resolve && err == -EDEADLK) {
207		ww_mutex_unlock(&abba.a_mutex);
208		ww_mutex_lock_slow(&abba.b_mutex, &ctx);
209		err = ww_mutex_lock(&abba.a_mutex, &ctx);
210	}
211
212	if (!err)
213		ww_mutex_unlock(&abba.b_mutex);
214	ww_mutex_unlock(&abba.a_mutex);
215	ww_acquire_fini(&ctx);
216
217	flush_work(&abba.work);
218	destroy_work_on_stack(&abba.work);
219
220	ret = 0;
221	if (resolve) {
222		if (err || abba.result) {
223			pr_err("%s: failed to resolve ABBA deadlock, A err=%d, B err=%d\n",
224			       __func__, err, abba.result);
225			ret = -EINVAL;
226		}
227	} else {
228		if (err != -EDEADLK && abba.result != -EDEADLK) {
229			pr_err("%s: missed ABBA deadlock, A err=%d, B err=%d\n",
230			       __func__, err, abba.result);
231			ret = -EINVAL;
232		}
233	}
234	return ret;
235}
236
237struct test_cycle {
238	struct work_struct work;
239	struct ww_mutex a_mutex;
240	struct ww_mutex *b_mutex;
241	struct completion *a_signal;
242	struct completion b_signal;
243	int result;
244};
245
246static void test_cycle_work(struct work_struct *work)
247{
248	struct test_cycle *cycle = container_of(work, typeof(*cycle), work);
249	struct ww_acquire_ctx ctx;
250	int err, erra = 0;
251
252	ww_acquire_init(&ctx, &ww_class);
253	ww_mutex_lock(&cycle->a_mutex, &ctx);
254
255	complete(cycle->a_signal);
256	wait_for_completion(&cycle->b_signal);
257
258	err = ww_mutex_lock(cycle->b_mutex, &ctx);
259	if (err == -EDEADLK) {
260		err = 0;
261		ww_mutex_unlock(&cycle->a_mutex);
262		ww_mutex_lock_slow(cycle->b_mutex, &ctx);
263		erra = ww_mutex_lock(&cycle->a_mutex, &ctx);
264	}
265
266	if (!err)
267		ww_mutex_unlock(cycle->b_mutex);
268	if (!erra)
269		ww_mutex_unlock(&cycle->a_mutex);
270	ww_acquire_fini(&ctx);
271
272	cycle->result = err ?: erra;
273}
274
275static int __test_cycle(unsigned int nthreads)
276{
277	struct test_cycle *cycles;
278	unsigned int n, last = nthreads - 1;
279	int ret;
280
281	cycles = kmalloc_array(nthreads, sizeof(*cycles), GFP_KERNEL);
282	if (!cycles)
283		return -ENOMEM;
284
285	for (n = 0; n < nthreads; n++) {
286		struct test_cycle *cycle = &cycles[n];
287
288		ww_mutex_init(&cycle->a_mutex, &ww_class);
289		if (n == last)
290			cycle->b_mutex = &cycles[0].a_mutex;
291		else
292			cycle->b_mutex = &cycles[n + 1].a_mutex;
293
294		if (n == 0)
295			cycle->a_signal = &cycles[last].b_signal;
296		else
297			cycle->a_signal = &cycles[n - 1].b_signal;
298		init_completion(&cycle->b_signal);
299
300		INIT_WORK(&cycle->work, test_cycle_work);
301		cycle->result = 0;
302	}
303
304	for (n = 0; n < nthreads; n++)
305		queue_work(wq, &cycles[n].work);
306
307	flush_workqueue(wq);
308
309	ret = 0;
310	for (n = 0; n < nthreads; n++) {
311		struct test_cycle *cycle = &cycles[n];
312
313		if (!cycle->result)
314			continue;
315
316		pr_err("cyclic deadlock not resolved, ret[%d/%d] = %d\n",
317		       n, nthreads, cycle->result);
318		ret = -EINVAL;
319		break;
320	}
321
322	for (n = 0; n < nthreads; n++)
323		ww_mutex_destroy(&cycles[n].a_mutex);
324	kfree(cycles);
325	return ret;
326}
327
328static int test_cycle(unsigned int ncpus)
329{
330	unsigned int n;
331	int ret;
332
333	for (n = 2; n <= ncpus + 1; n++) {
334		ret = __test_cycle(n);
335		if (ret)
336			return ret;
337	}
338
339	return 0;
340}
341
342struct stress {
343	struct work_struct work;
344	struct ww_mutex *locks;
345	unsigned long timeout;
346	int nlocks;
347};
348
349static int *get_random_order(int count)
350{
351	int *order;
352	int n, r, tmp;
353
354	order = kmalloc_array(count, sizeof(*order), GFP_KERNEL);
355	if (!order)
356		return order;
357
358	for (n = 0; n < count; n++)
359		order[n] = n;
360
361	for (n = count - 1; n > 1; n--) {
362		r = get_random_int() % (n + 1);
363		if (r != n) {
364			tmp = order[n];
365			order[n] = order[r];
366			order[r] = tmp;
367		}
368	}
369
370	return order;
371}
372
373static void dummy_load(struct stress *stress)
374{
375	usleep_range(1000, 2000);
376}
377
378static void stress_inorder_work(struct work_struct *work)
379{
380	struct stress *stress = container_of(work, typeof(*stress), work);
381	const int nlocks = stress->nlocks;
382	struct ww_mutex *locks = stress->locks;
383	struct ww_acquire_ctx ctx;
384	int *order;
385
386	order = get_random_order(nlocks);
387	if (!order)
388		return;
389
390	do {
391		int contended = -1;
392		int n, err;
393
394		ww_acquire_init(&ctx, &ww_class);
395retry:
396		err = 0;
397		for (n = 0; n < nlocks; n++) {
398			if (n == contended)
399				continue;
400
401			err = ww_mutex_lock(&locks[order[n]], &ctx);
402			if (err < 0)
403				break;
404		}
405		if (!err)
406			dummy_load(stress);
407
408		if (contended > n)
409			ww_mutex_unlock(&locks[order[contended]]);
410		contended = n;
411		while (n--)
412			ww_mutex_unlock(&locks[order[n]]);
413
414		if (err == -EDEADLK) {
415			ww_mutex_lock_slow(&locks[order[contended]], &ctx);
416			goto retry;
417		}
418
419		if (err) {
420			pr_err_once("stress (%s) failed with %d\n",
421				    __func__, err);
422			break;
423		}
424
425		ww_acquire_fini(&ctx);
426	} while (!time_after(jiffies, stress->timeout));
427
428	kfree(order);
429	kfree(stress);
430}
431
432struct reorder_lock {
433	struct list_head link;
434	struct ww_mutex *lock;
435};
436
437static void stress_reorder_work(struct work_struct *work)
438{
439	struct stress *stress = container_of(work, typeof(*stress), work);
440	LIST_HEAD(locks);
441	struct ww_acquire_ctx ctx;
442	struct reorder_lock *ll, *ln;
443	int *order;
444	int n, err;
445
446	order = get_random_order(stress->nlocks);
447	if (!order)
448		return;
449
450	for (n = 0; n < stress->nlocks; n++) {
451		ll = kmalloc(sizeof(*ll), GFP_KERNEL);
452		if (!ll)
453			goto out;
454
455		ll->lock = &stress->locks[order[n]];
456		list_add(&ll->link, &locks);
457	}
458	kfree(order);
459	order = NULL;
460
461	do {
462		ww_acquire_init(&ctx, &ww_class);
463
464		list_for_each_entry(ll, &locks, link) {
465			err = ww_mutex_lock(ll->lock, &ctx);
466			if (!err)
467				continue;
468
469			ln = ll;
470			list_for_each_entry_continue_reverse(ln, &locks, link)
471				ww_mutex_unlock(ln->lock);
472
473			if (err != -EDEADLK) {
474				pr_err_once("stress (%s) failed with %d\n",
475					    __func__, err);
476				break;
477			}
478
479			ww_mutex_lock_slow(ll->lock, &ctx);
480			list_move(&ll->link, &locks); /* restarts iteration */
481		}
482
483		dummy_load(stress);
484		list_for_each_entry(ll, &locks, link)
485			ww_mutex_unlock(ll->lock);
486
487		ww_acquire_fini(&ctx);
488	} while (!time_after(jiffies, stress->timeout));
489
490out:
491	list_for_each_entry_safe(ll, ln, &locks, link)
492		kfree(ll);
493	kfree(order);
494	kfree(stress);
495}
496
497static void stress_one_work(struct work_struct *work)
498{
499	struct stress *stress = container_of(work, typeof(*stress), work);
500	const int nlocks = stress->nlocks;
501	struct ww_mutex *lock = stress->locks + (get_random_int() % nlocks);
502	int err;
503
504	do {
505		err = ww_mutex_lock(lock, NULL);
506		if (!err) {
507			dummy_load(stress);
508			ww_mutex_unlock(lock);
509		} else {
510			pr_err_once("stress (%s) failed with %d\n",
511				    __func__, err);
512			break;
513		}
514	} while (!time_after(jiffies, stress->timeout));
515
516	kfree(stress);
517}
518
519#define STRESS_INORDER BIT(0)
520#define STRESS_REORDER BIT(1)
521#define STRESS_ONE BIT(2)
522#define STRESS_ALL (STRESS_INORDER | STRESS_REORDER | STRESS_ONE)
523
524static int stress(int nlocks, int nthreads, unsigned int flags)
525{
526	struct ww_mutex *locks;
527	int n;
528
529	locks = kmalloc_array(nlocks, sizeof(*locks), GFP_KERNEL);
530	if (!locks)
531		return -ENOMEM;
532
533	for (n = 0; n < nlocks; n++)
534		ww_mutex_init(&locks[n], &ww_class);
535
536	for (n = 0; nthreads; n++) {
537		struct stress *stress;
538		void (*fn)(struct work_struct *work);
539
540		fn = NULL;
541		switch (n & 3) {
542		case 0:
543			if (flags & STRESS_INORDER)
544				fn = stress_inorder_work;
545			break;
546		case 1:
547			if (flags & STRESS_REORDER)
548				fn = stress_reorder_work;
549			break;
550		case 2:
551			if (flags & STRESS_ONE)
552				fn = stress_one_work;
553			break;
554		}
555
556		if (!fn)
557			continue;
558
559		stress = kmalloc(sizeof(*stress), GFP_KERNEL);
560		if (!stress)
561			break;
562
563		INIT_WORK(&stress->work, fn);
564		stress->locks = locks;
565		stress->nlocks = nlocks;
566		stress->timeout = jiffies + 2*HZ;
567
568		queue_work(wq, &stress->work);
569		nthreads--;
570	}
571
572	flush_workqueue(wq);
573
574	for (n = 0; n < nlocks; n++)
575		ww_mutex_destroy(&locks[n]);
576	kfree(locks);
577
578	return 0;
579}
580
581static int __init test_ww_mutex_init(void)
582{
583	int ncpus = num_online_cpus();
584	int ret;
 
 
585
586	wq = alloc_workqueue("test-ww_mutex", WQ_UNBOUND, 0);
587	if (!wq)
588		return -ENOMEM;
589
590	ret = test_mutex();
591	if (ret)
592		return ret;
593
594	ret = test_aa();
595	if (ret)
596		return ret;
597
598	ret = test_abba(false);
599	if (ret)
600		return ret;
601
602	ret = test_abba(true);
603	if (ret)
604		return ret;
 
 
605
606	ret = test_cycle(ncpus);
607	if (ret)
608		return ret;
609
610	ret = stress(16, 2*ncpus, STRESS_INORDER);
611	if (ret)
612		return ret;
613
614	ret = stress(16, 2*ncpus, STRESS_REORDER);
615	if (ret)
616		return ret;
617
618	ret = stress(4095, hweight32(STRESS_ALL)*ncpus, STRESS_ALL);
619	if (ret)
620		return ret;
621
 
622	return 0;
623}
624
625static void __exit test_ww_mutex_exit(void)
626{
627	destroy_workqueue(wq);
628}
629
630module_init(test_ww_mutex_init);
631module_exit(test_ww_mutex_exit);
632
633MODULE_LICENSE("GPL");
634MODULE_AUTHOR("Intel Corporation");
v6.2
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Module-based API test facility for ww_mutexes
  4 */
  5
  6#include <linux/kernel.h>
  7
  8#include <linux/completion.h>
  9#include <linux/delay.h>
 10#include <linux/kthread.h>
 11#include <linux/module.h>
 12#include <linux/random.h>
 13#include <linux/slab.h>
 14#include <linux/ww_mutex.h>
 15
 16static DEFINE_WD_CLASS(ww_class);
 17struct workqueue_struct *wq;
 18
 19#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
 20#define ww_acquire_init_noinject(a, b) do { \
 21		ww_acquire_init((a), (b)); \
 22		(a)->deadlock_inject_countdown = ~0U; \
 23	} while (0)
 24#else
 25#define ww_acquire_init_noinject(a, b) ww_acquire_init((a), (b))
 26#endif
 27
 28struct test_mutex {
 29	struct work_struct work;
 30	struct ww_mutex mutex;
 31	struct completion ready, go, done;
 32	unsigned int flags;
 33};
 34
 35#define TEST_MTX_SPIN BIT(0)
 36#define TEST_MTX_TRY BIT(1)
 37#define TEST_MTX_CTX BIT(2)
 38#define __TEST_MTX_LAST BIT(3)
 39
 40static void test_mutex_work(struct work_struct *work)
 41{
 42	struct test_mutex *mtx = container_of(work, typeof(*mtx), work);
 43
 44	complete(&mtx->ready);
 45	wait_for_completion(&mtx->go);
 46
 47	if (mtx->flags & TEST_MTX_TRY) {
 48		while (!ww_mutex_trylock(&mtx->mutex, NULL))
 49			cond_resched();
 50	} else {
 51		ww_mutex_lock(&mtx->mutex, NULL);
 52	}
 53	complete(&mtx->done);
 54	ww_mutex_unlock(&mtx->mutex);
 55}
 56
 57static int __test_mutex(unsigned int flags)
 58{
 59#define TIMEOUT (HZ / 16)
 60	struct test_mutex mtx;
 61	struct ww_acquire_ctx ctx;
 62	int ret;
 63
 64	ww_mutex_init(&mtx.mutex, &ww_class);
 65	ww_acquire_init(&ctx, &ww_class);
 66
 67	INIT_WORK_ONSTACK(&mtx.work, test_mutex_work);
 68	init_completion(&mtx.ready);
 69	init_completion(&mtx.go);
 70	init_completion(&mtx.done);
 71	mtx.flags = flags;
 72
 73	schedule_work(&mtx.work);
 74
 75	wait_for_completion(&mtx.ready);
 76	ww_mutex_lock(&mtx.mutex, (flags & TEST_MTX_CTX) ? &ctx : NULL);
 77	complete(&mtx.go);
 78	if (flags & TEST_MTX_SPIN) {
 79		unsigned long timeout = jiffies + TIMEOUT;
 80
 81		ret = 0;
 82		do {
 83			if (completion_done(&mtx.done)) {
 84				ret = -EINVAL;
 85				break;
 86			}
 87			cond_resched();
 88		} while (time_before(jiffies, timeout));
 89	} else {
 90		ret = wait_for_completion_timeout(&mtx.done, TIMEOUT);
 91	}
 92	ww_mutex_unlock(&mtx.mutex);
 93	ww_acquire_fini(&ctx);
 94
 95	if (ret) {
 96		pr_err("%s(flags=%x): mutual exclusion failure\n",
 97		       __func__, flags);
 98		ret = -EINVAL;
 99	}
100
101	flush_work(&mtx.work);
102	destroy_work_on_stack(&mtx.work);
103	return ret;
104#undef TIMEOUT
105}
106
107static int test_mutex(void)
108{
109	int ret;
110	int i;
111
112	for (i = 0; i < __TEST_MTX_LAST; i++) {
113		ret = __test_mutex(i);
114		if (ret)
115			return ret;
116	}
117
118	return 0;
119}
120
121static int test_aa(bool trylock)
122{
123	struct ww_mutex mutex;
124	struct ww_acquire_ctx ctx;
125	int ret;
126	const char *from = trylock ? "trylock" : "lock";
127
128	ww_mutex_init(&mutex, &ww_class);
129	ww_acquire_init(&ctx, &ww_class);
130
131	if (!trylock) {
132		ret = ww_mutex_lock(&mutex, &ctx);
133		if (ret) {
134			pr_err("%s: initial lock failed!\n", __func__);
135			goto out;
136		}
137	} else {
138		ret = !ww_mutex_trylock(&mutex, &ctx);
139		if (ret) {
140			pr_err("%s: initial trylock failed!\n", __func__);
141			goto out;
142		}
143	}
144
145	if (ww_mutex_trylock(&mutex, NULL))  {
146		pr_err("%s: trylocked itself without context from %s!\n", __func__, from);
147		ww_mutex_unlock(&mutex);
148		ret = -EINVAL;
149		goto out;
150	}
151
152	if (ww_mutex_trylock(&mutex, &ctx))  {
153		pr_err("%s: trylocked itself with context from %s!\n", __func__, from);
154		ww_mutex_unlock(&mutex);
155		ret = -EINVAL;
156		goto out;
157	}
158
159	ret = ww_mutex_lock(&mutex, &ctx);
160	if (ret != -EALREADY) {
161		pr_err("%s: missed deadlock for recursing, ret=%d from %s\n",
162		       __func__, ret, from);
163		if (!ret)
164			ww_mutex_unlock(&mutex);
165		ret = -EINVAL;
166		goto out;
167	}
168
169	ww_mutex_unlock(&mutex);
170	ret = 0;
171out:
 
172	ww_acquire_fini(&ctx);
173	return ret;
174}
175
176struct test_abba {
177	struct work_struct work;
178	struct ww_mutex a_mutex;
179	struct ww_mutex b_mutex;
180	struct completion a_ready;
181	struct completion b_ready;
182	bool resolve, trylock;
183	int result;
184};
185
186static void test_abba_work(struct work_struct *work)
187{
188	struct test_abba *abba = container_of(work, typeof(*abba), work);
189	struct ww_acquire_ctx ctx;
190	int err;
191
192	ww_acquire_init_noinject(&ctx, &ww_class);
193	if (!abba->trylock)
194		ww_mutex_lock(&abba->b_mutex, &ctx);
195	else
196		WARN_ON(!ww_mutex_trylock(&abba->b_mutex, &ctx));
197
198	WARN_ON(READ_ONCE(abba->b_mutex.ctx) != &ctx);
199
200	complete(&abba->b_ready);
201	wait_for_completion(&abba->a_ready);
202
203	err = ww_mutex_lock(&abba->a_mutex, &ctx);
204	if (abba->resolve && err == -EDEADLK) {
205		ww_mutex_unlock(&abba->b_mutex);
206		ww_mutex_lock_slow(&abba->a_mutex, &ctx);
207		err = ww_mutex_lock(&abba->b_mutex, &ctx);
208	}
209
210	if (!err)
211		ww_mutex_unlock(&abba->a_mutex);
212	ww_mutex_unlock(&abba->b_mutex);
213	ww_acquire_fini(&ctx);
214
215	abba->result = err;
216}
217
218static int test_abba(bool trylock, bool resolve)
219{
220	struct test_abba abba;
221	struct ww_acquire_ctx ctx;
222	int err, ret;
223
224	ww_mutex_init(&abba.a_mutex, &ww_class);
225	ww_mutex_init(&abba.b_mutex, &ww_class);
226	INIT_WORK_ONSTACK(&abba.work, test_abba_work);
227	init_completion(&abba.a_ready);
228	init_completion(&abba.b_ready);
229	abba.trylock = trylock;
230	abba.resolve = resolve;
231
232	schedule_work(&abba.work);
233
234	ww_acquire_init_noinject(&ctx, &ww_class);
235	if (!trylock)
236		ww_mutex_lock(&abba.a_mutex, &ctx);
237	else
238		WARN_ON(!ww_mutex_trylock(&abba.a_mutex, &ctx));
239
240	WARN_ON(READ_ONCE(abba.a_mutex.ctx) != &ctx);
241
242	complete(&abba.a_ready);
243	wait_for_completion(&abba.b_ready);
244
245	err = ww_mutex_lock(&abba.b_mutex, &ctx);
246	if (resolve && err == -EDEADLK) {
247		ww_mutex_unlock(&abba.a_mutex);
248		ww_mutex_lock_slow(&abba.b_mutex, &ctx);
249		err = ww_mutex_lock(&abba.a_mutex, &ctx);
250	}
251
252	if (!err)
253		ww_mutex_unlock(&abba.b_mutex);
254	ww_mutex_unlock(&abba.a_mutex);
255	ww_acquire_fini(&ctx);
256
257	flush_work(&abba.work);
258	destroy_work_on_stack(&abba.work);
259
260	ret = 0;
261	if (resolve) {
262		if (err || abba.result) {
263			pr_err("%s: failed to resolve ABBA deadlock, A err=%d, B err=%d\n",
264			       __func__, err, abba.result);
265			ret = -EINVAL;
266		}
267	} else {
268		if (err != -EDEADLK && abba.result != -EDEADLK) {
269			pr_err("%s: missed ABBA deadlock, A err=%d, B err=%d\n",
270			       __func__, err, abba.result);
271			ret = -EINVAL;
272		}
273	}
274	return ret;
275}
276
277struct test_cycle {
278	struct work_struct work;
279	struct ww_mutex a_mutex;
280	struct ww_mutex *b_mutex;
281	struct completion *a_signal;
282	struct completion b_signal;
283	int result;
284};
285
286static void test_cycle_work(struct work_struct *work)
287{
288	struct test_cycle *cycle = container_of(work, typeof(*cycle), work);
289	struct ww_acquire_ctx ctx;
290	int err, erra = 0;
291
292	ww_acquire_init_noinject(&ctx, &ww_class);
293	ww_mutex_lock(&cycle->a_mutex, &ctx);
294
295	complete(cycle->a_signal);
296	wait_for_completion(&cycle->b_signal);
297
298	err = ww_mutex_lock(cycle->b_mutex, &ctx);
299	if (err == -EDEADLK) {
300		err = 0;
301		ww_mutex_unlock(&cycle->a_mutex);
302		ww_mutex_lock_slow(cycle->b_mutex, &ctx);
303		erra = ww_mutex_lock(&cycle->a_mutex, &ctx);
304	}
305
306	if (!err)
307		ww_mutex_unlock(cycle->b_mutex);
308	if (!erra)
309		ww_mutex_unlock(&cycle->a_mutex);
310	ww_acquire_fini(&ctx);
311
312	cycle->result = err ?: erra;
313}
314
315static int __test_cycle(unsigned int nthreads)
316{
317	struct test_cycle *cycles;
318	unsigned int n, last = nthreads - 1;
319	int ret;
320
321	cycles = kmalloc_array(nthreads, sizeof(*cycles), GFP_KERNEL);
322	if (!cycles)
323		return -ENOMEM;
324
325	for (n = 0; n < nthreads; n++) {
326		struct test_cycle *cycle = &cycles[n];
327
328		ww_mutex_init(&cycle->a_mutex, &ww_class);
329		if (n == last)
330			cycle->b_mutex = &cycles[0].a_mutex;
331		else
332			cycle->b_mutex = &cycles[n + 1].a_mutex;
333
334		if (n == 0)
335			cycle->a_signal = &cycles[last].b_signal;
336		else
337			cycle->a_signal = &cycles[n - 1].b_signal;
338		init_completion(&cycle->b_signal);
339
340		INIT_WORK(&cycle->work, test_cycle_work);
341		cycle->result = 0;
342	}
343
344	for (n = 0; n < nthreads; n++)
345		queue_work(wq, &cycles[n].work);
346
347	flush_workqueue(wq);
348
349	ret = 0;
350	for (n = 0; n < nthreads; n++) {
351		struct test_cycle *cycle = &cycles[n];
352
353		if (!cycle->result)
354			continue;
355
356		pr_err("cyclic deadlock not resolved, ret[%d/%d] = %d\n",
357		       n, nthreads, cycle->result);
358		ret = -EINVAL;
359		break;
360	}
361
362	for (n = 0; n < nthreads; n++)
363		ww_mutex_destroy(&cycles[n].a_mutex);
364	kfree(cycles);
365	return ret;
366}
367
368static int test_cycle(unsigned int ncpus)
369{
370	unsigned int n;
371	int ret;
372
373	for (n = 2; n <= ncpus + 1; n++) {
374		ret = __test_cycle(n);
375		if (ret)
376			return ret;
377	}
378
379	return 0;
380}
381
382struct stress {
383	struct work_struct work;
384	struct ww_mutex *locks;
385	unsigned long timeout;
386	int nlocks;
387};
388
389static int *get_random_order(int count)
390{
391	int *order;
392	int n, r, tmp;
393
394	order = kmalloc_array(count, sizeof(*order), GFP_KERNEL);
395	if (!order)
396		return order;
397
398	for (n = 0; n < count; n++)
399		order[n] = n;
400
401	for (n = count - 1; n > 1; n--) {
402		r = get_random_u32_below(n + 1);
403		if (r != n) {
404			tmp = order[n];
405			order[n] = order[r];
406			order[r] = tmp;
407		}
408	}
409
410	return order;
411}
412
413static void dummy_load(struct stress *stress)
414{
415	usleep_range(1000, 2000);
416}
417
418static void stress_inorder_work(struct work_struct *work)
419{
420	struct stress *stress = container_of(work, typeof(*stress), work);
421	const int nlocks = stress->nlocks;
422	struct ww_mutex *locks = stress->locks;
423	struct ww_acquire_ctx ctx;
424	int *order;
425
426	order = get_random_order(nlocks);
427	if (!order)
428		return;
429
430	do {
431		int contended = -1;
432		int n, err;
433
434		ww_acquire_init(&ctx, &ww_class);
435retry:
436		err = 0;
437		for (n = 0; n < nlocks; n++) {
438			if (n == contended)
439				continue;
440
441			err = ww_mutex_lock(&locks[order[n]], &ctx);
442			if (err < 0)
443				break;
444		}
445		if (!err)
446			dummy_load(stress);
447
448		if (contended > n)
449			ww_mutex_unlock(&locks[order[contended]]);
450		contended = n;
451		while (n--)
452			ww_mutex_unlock(&locks[order[n]]);
453
454		if (err == -EDEADLK) {
455			ww_mutex_lock_slow(&locks[order[contended]], &ctx);
456			goto retry;
457		}
458
459		if (err) {
460			pr_err_once("stress (%s) failed with %d\n",
461				    __func__, err);
462			break;
463		}
464
465		ww_acquire_fini(&ctx);
466	} while (!time_after(jiffies, stress->timeout));
467
468	kfree(order);
469	kfree(stress);
470}
471
472struct reorder_lock {
473	struct list_head link;
474	struct ww_mutex *lock;
475};
476
477static void stress_reorder_work(struct work_struct *work)
478{
479	struct stress *stress = container_of(work, typeof(*stress), work);
480	LIST_HEAD(locks);
481	struct ww_acquire_ctx ctx;
482	struct reorder_lock *ll, *ln;
483	int *order;
484	int n, err;
485
486	order = get_random_order(stress->nlocks);
487	if (!order)
488		return;
489
490	for (n = 0; n < stress->nlocks; n++) {
491		ll = kmalloc(sizeof(*ll), GFP_KERNEL);
492		if (!ll)
493			goto out;
494
495		ll->lock = &stress->locks[order[n]];
496		list_add(&ll->link, &locks);
497	}
498	kfree(order);
499	order = NULL;
500
501	do {
502		ww_acquire_init(&ctx, &ww_class);
503
504		list_for_each_entry(ll, &locks, link) {
505			err = ww_mutex_lock(ll->lock, &ctx);
506			if (!err)
507				continue;
508
509			ln = ll;
510			list_for_each_entry_continue_reverse(ln, &locks, link)
511				ww_mutex_unlock(ln->lock);
512
513			if (err != -EDEADLK) {
514				pr_err_once("stress (%s) failed with %d\n",
515					    __func__, err);
516				break;
517			}
518
519			ww_mutex_lock_slow(ll->lock, &ctx);
520			list_move(&ll->link, &locks); /* restarts iteration */
521		}
522
523		dummy_load(stress);
524		list_for_each_entry(ll, &locks, link)
525			ww_mutex_unlock(ll->lock);
526
527		ww_acquire_fini(&ctx);
528	} while (!time_after(jiffies, stress->timeout));
529
530out:
531	list_for_each_entry_safe(ll, ln, &locks, link)
532		kfree(ll);
533	kfree(order);
534	kfree(stress);
535}
536
537static void stress_one_work(struct work_struct *work)
538{
539	struct stress *stress = container_of(work, typeof(*stress), work);
540	const int nlocks = stress->nlocks;
541	struct ww_mutex *lock = stress->locks + get_random_u32_below(nlocks);
542	int err;
543
544	do {
545		err = ww_mutex_lock(lock, NULL);
546		if (!err) {
547			dummy_load(stress);
548			ww_mutex_unlock(lock);
549		} else {
550			pr_err_once("stress (%s) failed with %d\n",
551				    __func__, err);
552			break;
553		}
554	} while (!time_after(jiffies, stress->timeout));
555
556	kfree(stress);
557}
558
559#define STRESS_INORDER BIT(0)
560#define STRESS_REORDER BIT(1)
561#define STRESS_ONE BIT(2)
562#define STRESS_ALL (STRESS_INORDER | STRESS_REORDER | STRESS_ONE)
563
564static int stress(int nlocks, int nthreads, unsigned int flags)
565{
566	struct ww_mutex *locks;
567	int n;
568
569	locks = kmalloc_array(nlocks, sizeof(*locks), GFP_KERNEL);
570	if (!locks)
571		return -ENOMEM;
572
573	for (n = 0; n < nlocks; n++)
574		ww_mutex_init(&locks[n], &ww_class);
575
576	for (n = 0; nthreads; n++) {
577		struct stress *stress;
578		void (*fn)(struct work_struct *work);
579
580		fn = NULL;
581		switch (n & 3) {
582		case 0:
583			if (flags & STRESS_INORDER)
584				fn = stress_inorder_work;
585			break;
586		case 1:
587			if (flags & STRESS_REORDER)
588				fn = stress_reorder_work;
589			break;
590		case 2:
591			if (flags & STRESS_ONE)
592				fn = stress_one_work;
593			break;
594		}
595
596		if (!fn)
597			continue;
598
599		stress = kmalloc(sizeof(*stress), GFP_KERNEL);
600		if (!stress)
601			break;
602
603		INIT_WORK(&stress->work, fn);
604		stress->locks = locks;
605		stress->nlocks = nlocks;
606		stress->timeout = jiffies + 2*HZ;
607
608		queue_work(wq, &stress->work);
609		nthreads--;
610	}
611
612	flush_workqueue(wq);
613
614	for (n = 0; n < nlocks; n++)
615		ww_mutex_destroy(&locks[n]);
616	kfree(locks);
617
618	return 0;
619}
620
621static int __init test_ww_mutex_init(void)
622{
623	int ncpus = num_online_cpus();
624	int ret, i;
625
626	printk(KERN_INFO "Beginning ww mutex selftests\n");
627
628	wq = alloc_workqueue("test-ww_mutex", WQ_UNBOUND, 0);
629	if (!wq)
630		return -ENOMEM;
631
632	ret = test_mutex();
633	if (ret)
634		return ret;
635
636	ret = test_aa(false);
637	if (ret)
638		return ret;
639
640	ret = test_aa(true);
641	if (ret)
642		return ret;
643
644	for (i = 0; i < 4; i++) {
645		ret = test_abba(i & 1, i & 2);
646		if (ret)
647			return ret;
648	}
649
650	ret = test_cycle(ncpus);
651	if (ret)
652		return ret;
653
654	ret = stress(16, 2*ncpus, STRESS_INORDER);
655	if (ret)
656		return ret;
657
658	ret = stress(16, 2*ncpus, STRESS_REORDER);
659	if (ret)
660		return ret;
661
662	ret = stress(4095, hweight32(STRESS_ALL)*ncpus, STRESS_ALL);
663	if (ret)
664		return ret;
665
666	printk(KERN_INFO "All ww mutex selftests passed\n");
667	return 0;
668}
669
670static void __exit test_ww_mutex_exit(void)
671{
672	destroy_workqueue(wq);
673}
674
675module_init(test_ww_mutex_init);
676module_exit(test_ww_mutex_exit);
677
678MODULE_LICENSE("GPL");
679MODULE_AUTHOR("Intel Corporation");