Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Module-based API test facility for ww_mutexes
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  4 */
  5
  6#include <linux/kernel.h>
  7
  8#include <linux/completion.h>
  9#include <linux/delay.h>
 10#include <linux/kthread.h>
 11#include <linux/module.h>
 12#include <linux/random.h>
 13#include <linux/slab.h>
 14#include <linux/ww_mutex.h>
 15
 16static DEFINE_WD_CLASS(ww_class);
 17struct workqueue_struct *wq;
 18
 19struct test_mutex {
 20	struct work_struct work;
 21	struct ww_mutex mutex;
 22	struct completion ready, go, done;
 23	unsigned int flags;
 24};
 25
 26#define TEST_MTX_SPIN BIT(0)
 27#define TEST_MTX_TRY BIT(1)
 28#define TEST_MTX_CTX BIT(2)
 29#define __TEST_MTX_LAST BIT(3)
 30
 31static void test_mutex_work(struct work_struct *work)
 32{
 33	struct test_mutex *mtx = container_of(work, typeof(*mtx), work);
 34
 35	complete(&mtx->ready);
 36	wait_for_completion(&mtx->go);
 37
 38	if (mtx->flags & TEST_MTX_TRY) {
 39		while (!ww_mutex_trylock(&mtx->mutex))
 40			cond_resched();
 41	} else {
 42		ww_mutex_lock(&mtx->mutex, NULL);
 43	}
 44	complete(&mtx->done);
 45	ww_mutex_unlock(&mtx->mutex);
 46}
 47
 48static int __test_mutex(unsigned int flags)
 49{
 50#define TIMEOUT (HZ / 16)
 51	struct test_mutex mtx;
 52	struct ww_acquire_ctx ctx;
 53	int ret;
 54
 55	ww_mutex_init(&mtx.mutex, &ww_class);
 56	ww_acquire_init(&ctx, &ww_class);
 57
 58	INIT_WORK_ONSTACK(&mtx.work, test_mutex_work);
 59	init_completion(&mtx.ready);
 60	init_completion(&mtx.go);
 61	init_completion(&mtx.done);
 62	mtx.flags = flags;
 63
 64	schedule_work(&mtx.work);
 65
 66	wait_for_completion(&mtx.ready);
 67	ww_mutex_lock(&mtx.mutex, (flags & TEST_MTX_CTX) ? &ctx : NULL);
 68	complete(&mtx.go);
 69	if (flags & TEST_MTX_SPIN) {
 70		unsigned long timeout = jiffies + TIMEOUT;
 71
 72		ret = 0;
 73		do {
 74			if (completion_done(&mtx.done)) {
 75				ret = -EINVAL;
 76				break;
 77			}
 78			cond_resched();
 79		} while (time_before(jiffies, timeout));
 80	} else {
 81		ret = wait_for_completion_timeout(&mtx.done, TIMEOUT);
 82	}
 83	ww_mutex_unlock(&mtx.mutex);
 84	ww_acquire_fini(&ctx);
 85
 86	if (ret) {
 87		pr_err("%s(flags=%x): mutual exclusion failure\n",
 88		       __func__, flags);
 89		ret = -EINVAL;
 90	}
 91
 92	flush_work(&mtx.work);
 93	destroy_work_on_stack(&mtx.work);
 94	return ret;
 95#undef TIMEOUT
 96}
 97
 98static int test_mutex(void)
 99{
100	int ret;
101	int i;
102
103	for (i = 0; i < __TEST_MTX_LAST; i++) {
104		ret = __test_mutex(i);
105		if (ret)
106			return ret;
107	}
108
109	return 0;
110}
111
112static int test_aa(void)
113{
114	struct ww_mutex mutex;
115	struct ww_acquire_ctx ctx;
116	int ret;
117
118	ww_mutex_init(&mutex, &ww_class);
119	ww_acquire_init(&ctx, &ww_class);
120
121	ww_mutex_lock(&mutex, &ctx);
122
123	if (ww_mutex_trylock(&mutex))  {
124		pr_err("%s: trylocked itself!\n", __func__);
125		ww_mutex_unlock(&mutex);
126		ret = -EINVAL;
127		goto out;
128	}
129
130	ret = ww_mutex_lock(&mutex, &ctx);
131	if (ret != -EALREADY) {
132		pr_err("%s: missed deadlock for recursing, ret=%d\n",
133		       __func__, ret);
134		if (!ret)
135			ww_mutex_unlock(&mutex);
136		ret = -EINVAL;
137		goto out;
138	}
139
140	ret = 0;
141out:
142	ww_mutex_unlock(&mutex);
143	ww_acquire_fini(&ctx);
144	return ret;
145}
146
147struct test_abba {
148	struct work_struct work;
149	struct ww_mutex a_mutex;
150	struct ww_mutex b_mutex;
151	struct completion a_ready;
152	struct completion b_ready;
153	bool resolve;
154	int result;
155};
156
157static void test_abba_work(struct work_struct *work)
158{
159	struct test_abba *abba = container_of(work, typeof(*abba), work);
160	struct ww_acquire_ctx ctx;
161	int err;
162
163	ww_acquire_init(&ctx, &ww_class);
164	ww_mutex_lock(&abba->b_mutex, &ctx);
165
166	complete(&abba->b_ready);
167	wait_for_completion(&abba->a_ready);
168
169	err = ww_mutex_lock(&abba->a_mutex, &ctx);
170	if (abba->resolve && err == -EDEADLK) {
171		ww_mutex_unlock(&abba->b_mutex);
172		ww_mutex_lock_slow(&abba->a_mutex, &ctx);
173		err = ww_mutex_lock(&abba->b_mutex, &ctx);
174	}
175
176	if (!err)
177		ww_mutex_unlock(&abba->a_mutex);
178	ww_mutex_unlock(&abba->b_mutex);
179	ww_acquire_fini(&ctx);
180
181	abba->result = err;
182}
183
184static int test_abba(bool resolve)
185{
186	struct test_abba abba;
187	struct ww_acquire_ctx ctx;
188	int err, ret;
189
190	ww_mutex_init(&abba.a_mutex, &ww_class);
191	ww_mutex_init(&abba.b_mutex, &ww_class);
192	INIT_WORK_ONSTACK(&abba.work, test_abba_work);
193	init_completion(&abba.a_ready);
194	init_completion(&abba.b_ready);
195	abba.resolve = resolve;
196
197	schedule_work(&abba.work);
198
199	ww_acquire_init(&ctx, &ww_class);
200	ww_mutex_lock(&abba.a_mutex, &ctx);
201
202	complete(&abba.a_ready);
203	wait_for_completion(&abba.b_ready);
204
205	err = ww_mutex_lock(&abba.b_mutex, &ctx);
206	if (resolve && err == -EDEADLK) {
207		ww_mutex_unlock(&abba.a_mutex);
208		ww_mutex_lock_slow(&abba.b_mutex, &ctx);
209		err = ww_mutex_lock(&abba.a_mutex, &ctx);
210	}
211
212	if (!err)
213		ww_mutex_unlock(&abba.b_mutex);
214	ww_mutex_unlock(&abba.a_mutex);
215	ww_acquire_fini(&ctx);
216
217	flush_work(&abba.work);
218	destroy_work_on_stack(&abba.work);
219
220	ret = 0;
221	if (resolve) {
222		if (err || abba.result) {
223			pr_err("%s: failed to resolve ABBA deadlock, A err=%d, B err=%d\n",
224			       __func__, err, abba.result);
225			ret = -EINVAL;
226		}
227	} else {
228		if (err != -EDEADLK && abba.result != -EDEADLK) {
229			pr_err("%s: missed ABBA deadlock, A err=%d, B err=%d\n",
230			       __func__, err, abba.result);
231			ret = -EINVAL;
232		}
233	}
234	return ret;
235}
236
237struct test_cycle {
238	struct work_struct work;
239	struct ww_mutex a_mutex;
240	struct ww_mutex *b_mutex;
241	struct completion *a_signal;
242	struct completion b_signal;
243	int result;
244};
245
246static void test_cycle_work(struct work_struct *work)
247{
248	struct test_cycle *cycle = container_of(work, typeof(*cycle), work);
249	struct ww_acquire_ctx ctx;
250	int err, erra = 0;
251
252	ww_acquire_init(&ctx, &ww_class);
253	ww_mutex_lock(&cycle->a_mutex, &ctx);
254
255	complete(cycle->a_signal);
256	wait_for_completion(&cycle->b_signal);
257
258	err = ww_mutex_lock(cycle->b_mutex, &ctx);
259	if (err == -EDEADLK) {
260		err = 0;
261		ww_mutex_unlock(&cycle->a_mutex);
262		ww_mutex_lock_slow(cycle->b_mutex, &ctx);
263		erra = ww_mutex_lock(&cycle->a_mutex, &ctx);
264	}
265
266	if (!err)
267		ww_mutex_unlock(cycle->b_mutex);
268	if (!erra)
269		ww_mutex_unlock(&cycle->a_mutex);
270	ww_acquire_fini(&ctx);
271
272	cycle->result = err ?: erra;
273}
274
275static int __test_cycle(unsigned int nthreads)
276{
277	struct test_cycle *cycles;
278	unsigned int n, last = nthreads - 1;
279	int ret;
280
281	cycles = kmalloc_array(nthreads, sizeof(*cycles), GFP_KERNEL);
282	if (!cycles)
283		return -ENOMEM;
284
285	for (n = 0; n < nthreads; n++) {
286		struct test_cycle *cycle = &cycles[n];
287
288		ww_mutex_init(&cycle->a_mutex, &ww_class);
289		if (n == last)
290			cycle->b_mutex = &cycles[0].a_mutex;
291		else
292			cycle->b_mutex = &cycles[n + 1].a_mutex;
293
294		if (n == 0)
295			cycle->a_signal = &cycles[last].b_signal;
296		else
297			cycle->a_signal = &cycles[n - 1].b_signal;
298		init_completion(&cycle->b_signal);
299
300		INIT_WORK(&cycle->work, test_cycle_work);
301		cycle->result = 0;
302	}
303
304	for (n = 0; n < nthreads; n++)
305		queue_work(wq, &cycles[n].work);
306
307	flush_workqueue(wq);
308
309	ret = 0;
310	for (n = 0; n < nthreads; n++) {
311		struct test_cycle *cycle = &cycles[n];
312
313		if (!cycle->result)
314			continue;
315
316		pr_err("cyclic deadlock not resolved, ret[%d/%d] = %d\n",
317		       n, nthreads, cycle->result);
318		ret = -EINVAL;
319		break;
320	}
321
322	for (n = 0; n < nthreads; n++)
323		ww_mutex_destroy(&cycles[n].a_mutex);
324	kfree(cycles);
325	return ret;
326}
327
328static int test_cycle(unsigned int ncpus)
329{
330	unsigned int n;
331	int ret;
332
333	for (n = 2; n <= ncpus + 1; n++) {
334		ret = __test_cycle(n);
335		if (ret)
336			return ret;
337	}
338
339	return 0;
340}
341
342struct stress {
343	struct work_struct work;
344	struct ww_mutex *locks;
345	unsigned long timeout;
346	int nlocks;
347};
348
349static int *get_random_order(int count)
350{
351	int *order;
352	int n, r, tmp;
353
354	order = kmalloc_array(count, sizeof(*order), GFP_KERNEL);
355	if (!order)
356		return order;
357
358	for (n = 0; n < count; n++)
359		order[n] = n;
360
361	for (n = count - 1; n > 1; n--) {
362		r = get_random_int() % (n + 1);
363		if (r != n) {
364			tmp = order[n];
365			order[n] = order[r];
366			order[r] = tmp;
367		}
368	}
369
370	return order;
371}
372
373static void dummy_load(struct stress *stress)
374{
375	usleep_range(1000, 2000);
376}
377
378static void stress_inorder_work(struct work_struct *work)
379{
380	struct stress *stress = container_of(work, typeof(*stress), work);
381	const int nlocks = stress->nlocks;
382	struct ww_mutex *locks = stress->locks;
383	struct ww_acquire_ctx ctx;
384	int *order;
385
386	order = get_random_order(nlocks);
387	if (!order)
388		return;
389
390	do {
391		int contended = -1;
392		int n, err;
393
394		ww_acquire_init(&ctx, &ww_class);
395retry:
396		err = 0;
397		for (n = 0; n < nlocks; n++) {
398			if (n == contended)
399				continue;
400
401			err = ww_mutex_lock(&locks[order[n]], &ctx);
402			if (err < 0)
403				break;
404		}
405		if (!err)
406			dummy_load(stress);
407
408		if (contended > n)
409			ww_mutex_unlock(&locks[order[contended]]);
410		contended = n;
411		while (n--)
412			ww_mutex_unlock(&locks[order[n]]);
413
414		if (err == -EDEADLK) {
415			ww_mutex_lock_slow(&locks[order[contended]], &ctx);
416			goto retry;
417		}
418
419		if (err) {
420			pr_err_once("stress (%s) failed with %d\n",
421				    __func__, err);
422			break;
423		}
424
425		ww_acquire_fini(&ctx);
426	} while (!time_after(jiffies, stress->timeout));
427
428	kfree(order);
429	kfree(stress);
430}
431
432struct reorder_lock {
433	struct list_head link;
434	struct ww_mutex *lock;
435};
436
437static void stress_reorder_work(struct work_struct *work)
438{
439	struct stress *stress = container_of(work, typeof(*stress), work);
440	LIST_HEAD(locks);
441	struct ww_acquire_ctx ctx;
442	struct reorder_lock *ll, *ln;
443	int *order;
444	int n, err;
445
446	order = get_random_order(stress->nlocks);
447	if (!order)
448		return;
449
450	for (n = 0; n < stress->nlocks; n++) {
451		ll = kmalloc(sizeof(*ll), GFP_KERNEL);
452		if (!ll)
453			goto out;
454
455		ll->lock = &stress->locks[order[n]];
456		list_add(&ll->link, &locks);
457	}
458	kfree(order);
459	order = NULL;
460
461	do {
462		ww_acquire_init(&ctx, &ww_class);
463
464		list_for_each_entry(ll, &locks, link) {
465			err = ww_mutex_lock(ll->lock, &ctx);
466			if (!err)
467				continue;
468
469			ln = ll;
470			list_for_each_entry_continue_reverse(ln, &locks, link)
471				ww_mutex_unlock(ln->lock);
472
473			if (err != -EDEADLK) {
474				pr_err_once("stress (%s) failed with %d\n",
475					    __func__, err);
476				break;
477			}
478
479			ww_mutex_lock_slow(ll->lock, &ctx);
480			list_move(&ll->link, &locks); /* restarts iteration */
481		}
482
483		dummy_load(stress);
484		list_for_each_entry(ll, &locks, link)
485			ww_mutex_unlock(ll->lock);
486
487		ww_acquire_fini(&ctx);
488	} while (!time_after(jiffies, stress->timeout));
489
490out:
491	list_for_each_entry_safe(ll, ln, &locks, link)
492		kfree(ll);
493	kfree(order);
494	kfree(stress);
495}
496
497static void stress_one_work(struct work_struct *work)
498{
499	struct stress *stress = container_of(work, typeof(*stress), work);
500	const int nlocks = stress->nlocks;
501	struct ww_mutex *lock = stress->locks + (get_random_int() % nlocks);
502	int err;
503
504	do {
505		err = ww_mutex_lock(lock, NULL);
506		if (!err) {
507			dummy_load(stress);
508			ww_mutex_unlock(lock);
509		} else {
510			pr_err_once("stress (%s) failed with %d\n",
511				    __func__, err);
512			break;
513		}
514	} while (!time_after(jiffies, stress->timeout));
515
516	kfree(stress);
517}
518
519#define STRESS_INORDER BIT(0)
520#define STRESS_REORDER BIT(1)
521#define STRESS_ONE BIT(2)
522#define STRESS_ALL (STRESS_INORDER | STRESS_REORDER | STRESS_ONE)
523
524static int stress(int nlocks, int nthreads, unsigned int flags)
525{
526	struct ww_mutex *locks;
527	int n;
528
529	locks = kmalloc_array(nlocks, sizeof(*locks), GFP_KERNEL);
530	if (!locks)
531		return -ENOMEM;
532
533	for (n = 0; n < nlocks; n++)
534		ww_mutex_init(&locks[n], &ww_class);
535
536	for (n = 0; nthreads; n++) {
537		struct stress *stress;
538		void (*fn)(struct work_struct *work);
539
540		fn = NULL;
541		switch (n & 3) {
542		case 0:
543			if (flags & STRESS_INORDER)
544				fn = stress_inorder_work;
545			break;
546		case 1:
547			if (flags & STRESS_REORDER)
548				fn = stress_reorder_work;
549			break;
550		case 2:
551			if (flags & STRESS_ONE)
552				fn = stress_one_work;
553			break;
554		}
555
556		if (!fn)
557			continue;
558
559		stress = kmalloc(sizeof(*stress), GFP_KERNEL);
560		if (!stress)
561			break;
562
563		INIT_WORK(&stress->work, fn);
564		stress->locks = locks;
565		stress->nlocks = nlocks;
566		stress->timeout = jiffies + 2*HZ;
567
568		queue_work(wq, &stress->work);
569		nthreads--;
570	}
571
572	flush_workqueue(wq);
573
574	for (n = 0; n < nlocks; n++)
575		ww_mutex_destroy(&locks[n]);
576	kfree(locks);
577
578	return 0;
579}
580
581static int __init test_ww_mutex_init(void)
582{
583	int ncpus = num_online_cpus();
584	int ret;
585
586	wq = alloc_workqueue("test-ww_mutex", WQ_UNBOUND, 0);
587	if (!wq)
588		return -ENOMEM;
589
590	ret = test_mutex();
591	if (ret)
592		return ret;
593
594	ret = test_aa();
595	if (ret)
596		return ret;
597
598	ret = test_abba(false);
599	if (ret)
600		return ret;
601
602	ret = test_abba(true);
603	if (ret)
604		return ret;
605
606	ret = test_cycle(ncpus);
607	if (ret)
608		return ret;
609
610	ret = stress(16, 2*ncpus, STRESS_INORDER);
611	if (ret)
612		return ret;
613
614	ret = stress(16, 2*ncpus, STRESS_REORDER);
615	if (ret)
616		return ret;
617
618	ret = stress(4095, hweight32(STRESS_ALL)*ncpus, STRESS_ALL);
619	if (ret)
620		return ret;
621
622	return 0;
623}
624
625static void __exit test_ww_mutex_exit(void)
626{
627	destroy_workqueue(wq);
628}
629
630module_init(test_ww_mutex_init);
631module_exit(test_ww_mutex_exit);
632
633MODULE_LICENSE("GPL");
634MODULE_AUTHOR("Intel Corporation");
v4.17
 
  1/*
  2 * Module-based API test facility for ww_mutexes
  3 *
  4 * This program is free software; you can redistribute it and/or modify
  5 * it under the terms of the GNU General Public License as published by
  6 * the Free Software Foundation; either version 2 of the License, or
  7 * (at your option) any later version.
  8 *
  9 * This program is distributed in the hope that it will be useful,
 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 12 * GNU General Public License for more details.
 13 *
 14 * You should have received a copy of the GNU General Public License
 15 * along with this program; if not, you can access it online at
 16 * http://www.gnu.org/licenses/gpl-2.0.html.
 17 */
 18
 19#include <linux/kernel.h>
 20
 21#include <linux/completion.h>
 22#include <linux/delay.h>
 23#include <linux/kthread.h>
 24#include <linux/module.h>
 25#include <linux/random.h>
 26#include <linux/slab.h>
 27#include <linux/ww_mutex.h>
 28
 29static DEFINE_WW_CLASS(ww_class);
 30struct workqueue_struct *wq;
 31
 32struct test_mutex {
 33	struct work_struct work;
 34	struct ww_mutex mutex;
 35	struct completion ready, go, done;
 36	unsigned int flags;
 37};
 38
 39#define TEST_MTX_SPIN BIT(0)
 40#define TEST_MTX_TRY BIT(1)
 41#define TEST_MTX_CTX BIT(2)
 42#define __TEST_MTX_LAST BIT(3)
 43
 44static void test_mutex_work(struct work_struct *work)
 45{
 46	struct test_mutex *mtx = container_of(work, typeof(*mtx), work);
 47
 48	complete(&mtx->ready);
 49	wait_for_completion(&mtx->go);
 50
 51	if (mtx->flags & TEST_MTX_TRY) {
 52		while (!ww_mutex_trylock(&mtx->mutex))
 53			cond_resched();
 54	} else {
 55		ww_mutex_lock(&mtx->mutex, NULL);
 56	}
 57	complete(&mtx->done);
 58	ww_mutex_unlock(&mtx->mutex);
 59}
 60
 61static int __test_mutex(unsigned int flags)
 62{
 63#define TIMEOUT (HZ / 16)
 64	struct test_mutex mtx;
 65	struct ww_acquire_ctx ctx;
 66	int ret;
 67
 68	ww_mutex_init(&mtx.mutex, &ww_class);
 69	ww_acquire_init(&ctx, &ww_class);
 70
 71	INIT_WORK_ONSTACK(&mtx.work, test_mutex_work);
 72	init_completion(&mtx.ready);
 73	init_completion(&mtx.go);
 74	init_completion(&mtx.done);
 75	mtx.flags = flags;
 76
 77	schedule_work(&mtx.work);
 78
 79	wait_for_completion(&mtx.ready);
 80	ww_mutex_lock(&mtx.mutex, (flags & TEST_MTX_CTX) ? &ctx : NULL);
 81	complete(&mtx.go);
 82	if (flags & TEST_MTX_SPIN) {
 83		unsigned long timeout = jiffies + TIMEOUT;
 84
 85		ret = 0;
 86		do {
 87			if (completion_done(&mtx.done)) {
 88				ret = -EINVAL;
 89				break;
 90			}
 91			cond_resched();
 92		} while (time_before(jiffies, timeout));
 93	} else {
 94		ret = wait_for_completion_timeout(&mtx.done, TIMEOUT);
 95	}
 96	ww_mutex_unlock(&mtx.mutex);
 97	ww_acquire_fini(&ctx);
 98
 99	if (ret) {
100		pr_err("%s(flags=%x): mutual exclusion failure\n",
101		       __func__, flags);
102		ret = -EINVAL;
103	}
104
105	flush_work(&mtx.work);
106	destroy_work_on_stack(&mtx.work);
107	return ret;
108#undef TIMEOUT
109}
110
111static int test_mutex(void)
112{
113	int ret;
114	int i;
115
116	for (i = 0; i < __TEST_MTX_LAST; i++) {
117		ret = __test_mutex(i);
118		if (ret)
119			return ret;
120	}
121
122	return 0;
123}
124
125static int test_aa(void)
126{
127	struct ww_mutex mutex;
128	struct ww_acquire_ctx ctx;
129	int ret;
130
131	ww_mutex_init(&mutex, &ww_class);
132	ww_acquire_init(&ctx, &ww_class);
133
134	ww_mutex_lock(&mutex, &ctx);
135
136	if (ww_mutex_trylock(&mutex))  {
137		pr_err("%s: trylocked itself!\n", __func__);
138		ww_mutex_unlock(&mutex);
139		ret = -EINVAL;
140		goto out;
141	}
142
143	ret = ww_mutex_lock(&mutex, &ctx);
144	if (ret != -EALREADY) {
145		pr_err("%s: missed deadlock for recursing, ret=%d\n",
146		       __func__, ret);
147		if (!ret)
148			ww_mutex_unlock(&mutex);
149		ret = -EINVAL;
150		goto out;
151	}
152
153	ret = 0;
154out:
155	ww_mutex_unlock(&mutex);
156	ww_acquire_fini(&ctx);
157	return ret;
158}
159
160struct test_abba {
161	struct work_struct work;
162	struct ww_mutex a_mutex;
163	struct ww_mutex b_mutex;
164	struct completion a_ready;
165	struct completion b_ready;
166	bool resolve;
167	int result;
168};
169
170static void test_abba_work(struct work_struct *work)
171{
172	struct test_abba *abba = container_of(work, typeof(*abba), work);
173	struct ww_acquire_ctx ctx;
174	int err;
175
176	ww_acquire_init(&ctx, &ww_class);
177	ww_mutex_lock(&abba->b_mutex, &ctx);
178
179	complete(&abba->b_ready);
180	wait_for_completion(&abba->a_ready);
181
182	err = ww_mutex_lock(&abba->a_mutex, &ctx);
183	if (abba->resolve && err == -EDEADLK) {
184		ww_mutex_unlock(&abba->b_mutex);
185		ww_mutex_lock_slow(&abba->a_mutex, &ctx);
186		err = ww_mutex_lock(&abba->b_mutex, &ctx);
187	}
188
189	if (!err)
190		ww_mutex_unlock(&abba->a_mutex);
191	ww_mutex_unlock(&abba->b_mutex);
192	ww_acquire_fini(&ctx);
193
194	abba->result = err;
195}
196
197static int test_abba(bool resolve)
198{
199	struct test_abba abba;
200	struct ww_acquire_ctx ctx;
201	int err, ret;
202
203	ww_mutex_init(&abba.a_mutex, &ww_class);
204	ww_mutex_init(&abba.b_mutex, &ww_class);
205	INIT_WORK_ONSTACK(&abba.work, test_abba_work);
206	init_completion(&abba.a_ready);
207	init_completion(&abba.b_ready);
208	abba.resolve = resolve;
209
210	schedule_work(&abba.work);
211
212	ww_acquire_init(&ctx, &ww_class);
213	ww_mutex_lock(&abba.a_mutex, &ctx);
214
215	complete(&abba.a_ready);
216	wait_for_completion(&abba.b_ready);
217
218	err = ww_mutex_lock(&abba.b_mutex, &ctx);
219	if (resolve && err == -EDEADLK) {
220		ww_mutex_unlock(&abba.a_mutex);
221		ww_mutex_lock_slow(&abba.b_mutex, &ctx);
222		err = ww_mutex_lock(&abba.a_mutex, &ctx);
223	}
224
225	if (!err)
226		ww_mutex_unlock(&abba.b_mutex);
227	ww_mutex_unlock(&abba.a_mutex);
228	ww_acquire_fini(&ctx);
229
230	flush_work(&abba.work);
231	destroy_work_on_stack(&abba.work);
232
233	ret = 0;
234	if (resolve) {
235		if (err || abba.result) {
236			pr_err("%s: failed to resolve ABBA deadlock, A err=%d, B err=%d\n",
237			       __func__, err, abba.result);
238			ret = -EINVAL;
239		}
240	} else {
241		if (err != -EDEADLK && abba.result != -EDEADLK) {
242			pr_err("%s: missed ABBA deadlock, A err=%d, B err=%d\n",
243			       __func__, err, abba.result);
244			ret = -EINVAL;
245		}
246	}
247	return ret;
248}
249
250struct test_cycle {
251	struct work_struct work;
252	struct ww_mutex a_mutex;
253	struct ww_mutex *b_mutex;
254	struct completion *a_signal;
255	struct completion b_signal;
256	int result;
257};
258
259static void test_cycle_work(struct work_struct *work)
260{
261	struct test_cycle *cycle = container_of(work, typeof(*cycle), work);
262	struct ww_acquire_ctx ctx;
263	int err;
264
265	ww_acquire_init(&ctx, &ww_class);
266	ww_mutex_lock(&cycle->a_mutex, &ctx);
267
268	complete(cycle->a_signal);
269	wait_for_completion(&cycle->b_signal);
270
271	err = ww_mutex_lock(cycle->b_mutex, &ctx);
272	if (err == -EDEADLK) {
 
273		ww_mutex_unlock(&cycle->a_mutex);
274		ww_mutex_lock_slow(cycle->b_mutex, &ctx);
275		err = ww_mutex_lock(&cycle->a_mutex, &ctx);
276	}
277
278	if (!err)
279		ww_mutex_unlock(cycle->b_mutex);
280	ww_mutex_unlock(&cycle->a_mutex);
 
281	ww_acquire_fini(&ctx);
282
283	cycle->result = err;
284}
285
286static int __test_cycle(unsigned int nthreads)
287{
288	struct test_cycle *cycles;
289	unsigned int n, last = nthreads - 1;
290	int ret;
291
292	cycles = kmalloc_array(nthreads, sizeof(*cycles), GFP_KERNEL);
293	if (!cycles)
294		return -ENOMEM;
295
296	for (n = 0; n < nthreads; n++) {
297		struct test_cycle *cycle = &cycles[n];
298
299		ww_mutex_init(&cycle->a_mutex, &ww_class);
300		if (n == last)
301			cycle->b_mutex = &cycles[0].a_mutex;
302		else
303			cycle->b_mutex = &cycles[n + 1].a_mutex;
304
305		if (n == 0)
306			cycle->a_signal = &cycles[last].b_signal;
307		else
308			cycle->a_signal = &cycles[n - 1].b_signal;
309		init_completion(&cycle->b_signal);
310
311		INIT_WORK(&cycle->work, test_cycle_work);
312		cycle->result = 0;
313	}
314
315	for (n = 0; n < nthreads; n++)
316		queue_work(wq, &cycles[n].work);
317
318	flush_workqueue(wq);
319
320	ret = 0;
321	for (n = 0; n < nthreads; n++) {
322		struct test_cycle *cycle = &cycles[n];
323
324		if (!cycle->result)
325			continue;
326
327		pr_err("cylic deadlock not resolved, ret[%d/%d] = %d\n",
328		       n, nthreads, cycle->result);
329		ret = -EINVAL;
330		break;
331	}
332
333	for (n = 0; n < nthreads; n++)
334		ww_mutex_destroy(&cycles[n].a_mutex);
335	kfree(cycles);
336	return ret;
337}
338
339static int test_cycle(unsigned int ncpus)
340{
341	unsigned int n;
342	int ret;
343
344	for (n = 2; n <= ncpus + 1; n++) {
345		ret = __test_cycle(n);
346		if (ret)
347			return ret;
348	}
349
350	return 0;
351}
352
353struct stress {
354	struct work_struct work;
355	struct ww_mutex *locks;
356	unsigned long timeout;
357	int nlocks;
358};
359
360static int *get_random_order(int count)
361{
362	int *order;
363	int n, r, tmp;
364
365	order = kmalloc_array(count, sizeof(*order), GFP_KERNEL);
366	if (!order)
367		return order;
368
369	for (n = 0; n < count; n++)
370		order[n] = n;
371
372	for (n = count - 1; n > 1; n--) {
373		r = get_random_int() % (n + 1);
374		if (r != n) {
375			tmp = order[n];
376			order[n] = order[r];
377			order[r] = tmp;
378		}
379	}
380
381	return order;
382}
383
384static void dummy_load(struct stress *stress)
385{
386	usleep_range(1000, 2000);
387}
388
389static void stress_inorder_work(struct work_struct *work)
390{
391	struct stress *stress = container_of(work, typeof(*stress), work);
392	const int nlocks = stress->nlocks;
393	struct ww_mutex *locks = stress->locks;
394	struct ww_acquire_ctx ctx;
395	int *order;
396
397	order = get_random_order(nlocks);
398	if (!order)
399		return;
400
401	do {
402		int contended = -1;
403		int n, err;
404
405		ww_acquire_init(&ctx, &ww_class);
406retry:
407		err = 0;
408		for (n = 0; n < nlocks; n++) {
409			if (n == contended)
410				continue;
411
412			err = ww_mutex_lock(&locks[order[n]], &ctx);
413			if (err < 0)
414				break;
415		}
416		if (!err)
417			dummy_load(stress);
418
419		if (contended > n)
420			ww_mutex_unlock(&locks[order[contended]]);
421		contended = n;
422		while (n--)
423			ww_mutex_unlock(&locks[order[n]]);
424
425		if (err == -EDEADLK) {
426			ww_mutex_lock_slow(&locks[order[contended]], &ctx);
427			goto retry;
428		}
429
430		if (err) {
431			pr_err_once("stress (%s) failed with %d\n",
432				    __func__, err);
433			break;
434		}
435
436		ww_acquire_fini(&ctx);
437	} while (!time_after(jiffies, stress->timeout));
438
439	kfree(order);
440	kfree(stress);
441}
442
443struct reorder_lock {
444	struct list_head link;
445	struct ww_mutex *lock;
446};
447
448static void stress_reorder_work(struct work_struct *work)
449{
450	struct stress *stress = container_of(work, typeof(*stress), work);
451	LIST_HEAD(locks);
452	struct ww_acquire_ctx ctx;
453	struct reorder_lock *ll, *ln;
454	int *order;
455	int n, err;
456
457	order = get_random_order(stress->nlocks);
458	if (!order)
459		return;
460
461	for (n = 0; n < stress->nlocks; n++) {
462		ll = kmalloc(sizeof(*ll), GFP_KERNEL);
463		if (!ll)
464			goto out;
465
466		ll->lock = &stress->locks[order[n]];
467		list_add(&ll->link, &locks);
468	}
469	kfree(order);
470	order = NULL;
471
472	do {
473		ww_acquire_init(&ctx, &ww_class);
474
475		list_for_each_entry(ll, &locks, link) {
476			err = ww_mutex_lock(ll->lock, &ctx);
477			if (!err)
478				continue;
479
480			ln = ll;
481			list_for_each_entry_continue_reverse(ln, &locks, link)
482				ww_mutex_unlock(ln->lock);
483
484			if (err != -EDEADLK) {
485				pr_err_once("stress (%s) failed with %d\n",
486					    __func__, err);
487				break;
488			}
489
490			ww_mutex_lock_slow(ll->lock, &ctx);
491			list_move(&ll->link, &locks); /* restarts iteration */
492		}
493
494		dummy_load(stress);
495		list_for_each_entry(ll, &locks, link)
496			ww_mutex_unlock(ll->lock);
497
498		ww_acquire_fini(&ctx);
499	} while (!time_after(jiffies, stress->timeout));
500
501out:
502	list_for_each_entry_safe(ll, ln, &locks, link)
503		kfree(ll);
504	kfree(order);
505	kfree(stress);
506}
507
508static void stress_one_work(struct work_struct *work)
509{
510	struct stress *stress = container_of(work, typeof(*stress), work);
511	const int nlocks = stress->nlocks;
512	struct ww_mutex *lock = stress->locks + (get_random_int() % nlocks);
513	int err;
514
515	do {
516		err = ww_mutex_lock(lock, NULL);
517		if (!err) {
518			dummy_load(stress);
519			ww_mutex_unlock(lock);
520		} else {
521			pr_err_once("stress (%s) failed with %d\n",
522				    __func__, err);
523			break;
524		}
525	} while (!time_after(jiffies, stress->timeout));
526
527	kfree(stress);
528}
529
530#define STRESS_INORDER BIT(0)
531#define STRESS_REORDER BIT(1)
532#define STRESS_ONE BIT(2)
533#define STRESS_ALL (STRESS_INORDER | STRESS_REORDER | STRESS_ONE)
534
535static int stress(int nlocks, int nthreads, unsigned int flags)
536{
537	struct ww_mutex *locks;
538	int n;
539
540	locks = kmalloc_array(nlocks, sizeof(*locks), GFP_KERNEL);
541	if (!locks)
542		return -ENOMEM;
543
544	for (n = 0; n < nlocks; n++)
545		ww_mutex_init(&locks[n], &ww_class);
546
547	for (n = 0; nthreads; n++) {
548		struct stress *stress;
549		void (*fn)(struct work_struct *work);
550
551		fn = NULL;
552		switch (n & 3) {
553		case 0:
554			if (flags & STRESS_INORDER)
555				fn = stress_inorder_work;
556			break;
557		case 1:
558			if (flags & STRESS_REORDER)
559				fn = stress_reorder_work;
560			break;
561		case 2:
562			if (flags & STRESS_ONE)
563				fn = stress_one_work;
564			break;
565		}
566
567		if (!fn)
568			continue;
569
570		stress = kmalloc(sizeof(*stress), GFP_KERNEL);
571		if (!stress)
572			break;
573
574		INIT_WORK(&stress->work, fn);
575		stress->locks = locks;
576		stress->nlocks = nlocks;
577		stress->timeout = jiffies + 2*HZ;
578
579		queue_work(wq, &stress->work);
580		nthreads--;
581	}
582
583	flush_workqueue(wq);
584
585	for (n = 0; n < nlocks; n++)
586		ww_mutex_destroy(&locks[n]);
587	kfree(locks);
588
589	return 0;
590}
591
592static int __init test_ww_mutex_init(void)
593{
594	int ncpus = num_online_cpus();
595	int ret;
596
597	wq = alloc_workqueue("test-ww_mutex", WQ_UNBOUND, 0);
598	if (!wq)
599		return -ENOMEM;
600
601	ret = test_mutex();
602	if (ret)
603		return ret;
604
605	ret = test_aa();
606	if (ret)
607		return ret;
608
609	ret = test_abba(false);
610	if (ret)
611		return ret;
612
613	ret = test_abba(true);
614	if (ret)
615		return ret;
616
617	ret = test_cycle(ncpus);
618	if (ret)
619		return ret;
620
621	ret = stress(16, 2*ncpus, STRESS_INORDER);
622	if (ret)
623		return ret;
624
625	ret = stress(16, 2*ncpus, STRESS_REORDER);
626	if (ret)
627		return ret;
628
629	ret = stress(4095, hweight32(STRESS_ALL)*ncpus, STRESS_ALL);
630	if (ret)
631		return ret;
632
633	return 0;
634}
635
636static void __exit test_ww_mutex_exit(void)
637{
638	destroy_workqueue(wq);
639}
640
641module_init(test_ww_mutex_init);
642module_exit(test_ww_mutex_exit);
643
644MODULE_LICENSE("GPL");
645MODULE_AUTHOR("Intel Corporation");