Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Apr 14-17, 2025
Register
Loading...
Note: File does not exist in v4.6.
  1/*
  2 * Copyright © 2016 Intel Corporation
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice (including the next
 12 * paragraph) shall be included in all copies or substantial portions of the
 13 * Software.
 14 *
 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 21 * IN THE SOFTWARE.
 22 *
 23 */
 24
 25#include "../i915_selftest.h"
 26#include "i915_random.h"
 27
 28#include "mock_gem_device.h"
 29#include "mock_engine.h"
 30
 31static int check_rbtree(struct intel_engine_cs *engine,
 32			const unsigned long *bitmap,
 33			const struct intel_wait *waiters,
 34			const int count)
 35{
 36	struct intel_breadcrumbs *b = &engine->breadcrumbs;
 37	struct rb_node *rb;
 38	int n;
 39
 40	if (&b->irq_wait->node != rb_first(&b->waiters)) {
 41		pr_err("First waiter does not match first element of wait-tree\n");
 42		return -EINVAL;
 43	}
 44
 45	n = find_first_bit(bitmap, count);
 46	for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
 47		struct intel_wait *w = container_of(rb, typeof(*w), node);
 48		int idx = w - waiters;
 49
 50		if (!test_bit(idx, bitmap)) {
 51			pr_err("waiter[%d, seqno=%d] removed but still in wait-tree\n",
 52			       idx, w->seqno);
 53			return -EINVAL;
 54		}
 55
 56		if (n != idx) {
 57			pr_err("waiter[%d, seqno=%d] does not match expected next element in tree [%d]\n",
 58			       idx, w->seqno, n);
 59			return -EINVAL;
 60		}
 61
 62		n = find_next_bit(bitmap, count, n + 1);
 63	}
 64
 65	return 0;
 66}
 67
 68static int check_completion(struct intel_engine_cs *engine,
 69			    const unsigned long *bitmap,
 70			    const struct intel_wait *waiters,
 71			    const int count)
 72{
 73	int n;
 74
 75	for (n = 0; n < count; n++) {
 76		if (intel_wait_complete(&waiters[n]) != !!test_bit(n, bitmap))
 77			continue;
 78
 79		pr_err("waiter[%d, seqno=%d] is %s, but expected %s\n",
 80		       n, waiters[n].seqno,
 81		       intel_wait_complete(&waiters[n]) ? "complete" : "active",
 82		       test_bit(n, bitmap) ? "active" : "complete");
 83		return -EINVAL;
 84	}
 85
 86	return 0;
 87}
 88
 89static int check_rbtree_empty(struct intel_engine_cs *engine)
 90{
 91	struct intel_breadcrumbs *b = &engine->breadcrumbs;
 92
 93	if (b->irq_wait) {
 94		pr_err("Empty breadcrumbs still has a waiter\n");
 95		return -EINVAL;
 96	}
 97
 98	if (!RB_EMPTY_ROOT(&b->waiters)) {
 99		pr_err("Empty breadcrumbs, but wait-tree not empty\n");
100		return -EINVAL;
101	}
102
103	return 0;
104}
105
106static int igt_random_insert_remove(void *arg)
107{
108	const u32 seqno_bias = 0x1000;
109	I915_RND_STATE(prng);
110	struct intel_engine_cs *engine = arg;
111	struct intel_wait *waiters;
112	const int count = 4096;
113	unsigned int *order;
114	unsigned long *bitmap;
115	int err = -ENOMEM;
116	int n;
117
118	mock_engine_reset(engine);
119
120	waiters = kvmalloc_array(count, sizeof(*waiters), GFP_KERNEL);
121	if (!waiters)
122		goto out_engines;
123
124	bitmap = kcalloc(DIV_ROUND_UP(count, BITS_PER_LONG), sizeof(*bitmap),
125			 GFP_KERNEL);
126	if (!bitmap)
127		goto out_waiters;
128
129	order = i915_random_order(count, &prng);
130	if (!order)
131		goto out_bitmap;
132
133	for (n = 0; n < count; n++)
134		intel_wait_init_for_seqno(&waiters[n], seqno_bias + n);
135
136	err = check_rbtree(engine, bitmap, waiters, count);
137	if (err)
138		goto out_order;
139
140	/* Add and remove waiters into the rbtree in random order. At each
141	 * step, we verify that the rbtree is correctly ordered.
142	 */
143	for (n = 0; n < count; n++) {
144		int i = order[n];
145
146		intel_engine_add_wait(engine, &waiters[i]);
147		__set_bit(i, bitmap);
148
149		err = check_rbtree(engine, bitmap, waiters, count);
150		if (err)
151			goto out_order;
152	}
153
154	i915_random_reorder(order, count, &prng);
155	for (n = 0; n < count; n++) {
156		int i = order[n];
157
158		intel_engine_remove_wait(engine, &waiters[i]);
159		__clear_bit(i, bitmap);
160
161		err = check_rbtree(engine, bitmap, waiters, count);
162		if (err)
163			goto out_order;
164	}
165
166	err = check_rbtree_empty(engine);
167out_order:
168	kfree(order);
169out_bitmap:
170	kfree(bitmap);
171out_waiters:
172	kvfree(waiters);
173out_engines:
174	mock_engine_flush(engine);
175	return err;
176}
177
178static int igt_insert_complete(void *arg)
179{
180	const u32 seqno_bias = 0x1000;
181	struct intel_engine_cs *engine = arg;
182	struct intel_wait *waiters;
183	const int count = 4096;
184	unsigned long *bitmap;
185	int err = -ENOMEM;
186	int n, m;
187
188	mock_engine_reset(engine);
189
190	waiters = kvmalloc_array(count, sizeof(*waiters), GFP_KERNEL);
191	if (!waiters)
192		goto out_engines;
193
194	bitmap = kcalloc(DIV_ROUND_UP(count, BITS_PER_LONG), sizeof(*bitmap),
195			 GFP_KERNEL);
196	if (!bitmap)
197		goto out_waiters;
198
199	for (n = 0; n < count; n++) {
200		intel_wait_init_for_seqno(&waiters[n], n + seqno_bias);
201		intel_engine_add_wait(engine, &waiters[n]);
202		__set_bit(n, bitmap);
203	}
204	err = check_rbtree(engine, bitmap, waiters, count);
205	if (err)
206		goto out_bitmap;
207
208	/* On each step, we advance the seqno so that several waiters are then
209	 * complete (we increase the seqno by increasingly larger values to
210	 * retire more and more waiters at once). All retired waiters should
211	 * be woken and removed from the rbtree, and so that we check.
212	 */
213	for (n = 0; n < count; n = m) {
214		int seqno = 2 * n;
215
216		GEM_BUG_ON(find_first_bit(bitmap, count) != n);
217
218		if (intel_wait_complete(&waiters[n])) {
219			pr_err("waiter[%d, seqno=%d] completed too early\n",
220			       n, waiters[n].seqno);
221			err = -EINVAL;
222			goto out_bitmap;
223		}
224
225		/* complete the following waiters */
226		mock_seqno_advance(engine, seqno + seqno_bias);
227		for (m = n; m <= seqno; m++) {
228			if (m == count)
229				break;
230
231			GEM_BUG_ON(!test_bit(m, bitmap));
232			__clear_bit(m, bitmap);
233		}
234
235		intel_engine_remove_wait(engine, &waiters[n]);
236		RB_CLEAR_NODE(&waiters[n].node);
237
238		err = check_rbtree(engine, bitmap, waiters, count);
239		if (err) {
240			pr_err("rbtree corrupt after seqno advance to %d\n",
241			       seqno + seqno_bias);
242			goto out_bitmap;
243		}
244
245		err = check_completion(engine, bitmap, waiters, count);
246		if (err) {
247			pr_err("completions after seqno advance to %d failed\n",
248			       seqno + seqno_bias);
249			goto out_bitmap;
250		}
251	}
252
253	err = check_rbtree_empty(engine);
254out_bitmap:
255	kfree(bitmap);
256out_waiters:
257	kvfree(waiters);
258out_engines:
259	mock_engine_flush(engine);
260	return err;
261}
262
263struct igt_wakeup {
264	struct task_struct *tsk;
265	atomic_t *ready, *set, *done;
266	struct intel_engine_cs *engine;
267	unsigned long flags;
268#define STOP 0
269#define IDLE 1
270	wait_queue_head_t *wq;
271	u32 seqno;
272};
273
274static bool wait_for_ready(struct igt_wakeup *w)
275{
276	DEFINE_WAIT(ready);
277
278	set_bit(IDLE, &w->flags);
279	if (atomic_dec_and_test(w->done))
280		wake_up_var(w->done);
281
282	if (test_bit(STOP, &w->flags))
283		goto out;
284
285	for (;;) {
286		prepare_to_wait(w->wq, &ready, TASK_INTERRUPTIBLE);
287		if (atomic_read(w->ready) == 0)
288			break;
289
290		schedule();
291	}
292	finish_wait(w->wq, &ready);
293
294out:
295	clear_bit(IDLE, &w->flags);
296	if (atomic_dec_and_test(w->set))
297		wake_up_var(w->set);
298
299	return !test_bit(STOP, &w->flags);
300}
301
302static int igt_wakeup_thread(void *arg)
303{
304	struct igt_wakeup *w = arg;
305	struct intel_wait wait;
306
307	while (wait_for_ready(w)) {
308		GEM_BUG_ON(kthread_should_stop());
309
310		intel_wait_init_for_seqno(&wait, w->seqno);
311		intel_engine_add_wait(w->engine, &wait);
312		for (;;) {
313			set_current_state(TASK_UNINTERRUPTIBLE);
314			if (i915_seqno_passed(intel_engine_get_seqno(w->engine),
315					      w->seqno))
316				break;
317
318			if (test_bit(STOP, &w->flags)) /* emergency escape */
319				break;
320
321			schedule();
322		}
323		intel_engine_remove_wait(w->engine, &wait);
324		__set_current_state(TASK_RUNNING);
325	}
326
327	return 0;
328}
329
330static void igt_wake_all_sync(atomic_t *ready,
331			      atomic_t *set,
332			      atomic_t *done,
333			      wait_queue_head_t *wq,
334			      int count)
335{
336	atomic_set(set, count);
337	atomic_set(ready, 0);
338	wake_up_all(wq);
339
340	wait_var_event(set, !atomic_read(set));
341	atomic_set(ready, count);
342	atomic_set(done, count);
343}
344
345static int igt_wakeup(void *arg)
346{
347	I915_RND_STATE(prng);
348	struct intel_engine_cs *engine = arg;
349	struct igt_wakeup *waiters;
350	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
351	const int count = 4096;
352	const u32 max_seqno = count / 4;
353	atomic_t ready, set, done;
354	int err = -ENOMEM;
355	int n, step;
356
357	mock_engine_reset(engine);
358
359	waiters = kvmalloc_array(count, sizeof(*waiters), GFP_KERNEL);
360	if (!waiters)
361		goto out_engines;
362
363	/* Create a large number of threads, each waiting on a random seqno.
364	 * Multiple waiters will be waiting for the same seqno.
365	 */
366	atomic_set(&ready, count);
367	for (n = 0; n < count; n++) {
368		waiters[n].wq = &wq;
369		waiters[n].ready = &ready;
370		waiters[n].set = &set;
371		waiters[n].done = &done;
372		waiters[n].engine = engine;
373		waiters[n].flags = BIT(IDLE);
374
375		waiters[n].tsk = kthread_run(igt_wakeup_thread, &waiters[n],
376					     "i915/igt:%d", n);
377		if (IS_ERR(waiters[n].tsk))
378			goto out_waiters;
379
380		get_task_struct(waiters[n].tsk);
381	}
382
383	for (step = 1; step <= max_seqno; step <<= 1) {
384		u32 seqno;
385
386		/* The waiter threads start paused as we assign them a random
387		 * seqno and reset the engine. Once the engine is reset,
388		 * we signal that the threads may begin their wait upon their
389		 * seqno.
390		 */
391		for (n = 0; n < count; n++) {
392			GEM_BUG_ON(!test_bit(IDLE, &waiters[n].flags));
393			waiters[n].seqno =
394				1 + prandom_u32_state(&prng) % max_seqno;
395		}
396		mock_seqno_advance(engine, 0);
397		igt_wake_all_sync(&ready, &set, &done, &wq, count);
398
399		/* Simulate the GPU doing chunks of work, with one or more
400		 * seqno appearing to finish at the same time. A random number
401		 * of threads will be waiting upon the update and hopefully be
402		 * woken.
403		 */
404		for (seqno = 1; seqno <= max_seqno + step; seqno += step) {
405			usleep_range(50, 500);
406			mock_seqno_advance(engine, seqno);
407		}
408		GEM_BUG_ON(intel_engine_get_seqno(engine) < 1 + max_seqno);
409
410		/* With the seqno now beyond any of the waiting threads, they
411		 * should all be woken, see that they are complete and signal
412		 * that they are ready for the next test. We wait until all
413		 * threads are complete and waiting for us (i.e. not a seqno).
414		 */
415		err = wait_var_event_timeout(&done, !atomic_read(&done), 10 * HZ);
416		if (err) {
417			pr_err("Timed out waiting for %d remaining waiters\n",
418			       atomic_read(&done));
419			break;
420		}
421
422		err = check_rbtree_empty(engine);
423		if (err)
424			break;
425	}
426
427out_waiters:
428	for (n = 0; n < count; n++) {
429		if (IS_ERR(waiters[n].tsk))
430			break;
431
432		set_bit(STOP, &waiters[n].flags);
433	}
434	mock_seqno_advance(engine, INT_MAX); /* wakeup any broken waiters */
435	igt_wake_all_sync(&ready, &set, &done, &wq, n);
436
437	for (n = 0; n < count; n++) {
438		if (IS_ERR(waiters[n].tsk))
439			break;
440
441		kthread_stop(waiters[n].tsk);
442		put_task_struct(waiters[n].tsk);
443	}
444
445	kvfree(waiters);
446out_engines:
447	mock_engine_flush(engine);
448	return err;
449}
450
451int intel_breadcrumbs_mock_selftests(void)
452{
453	static const struct i915_subtest tests[] = {
454		SUBTEST(igt_random_insert_remove),
455		SUBTEST(igt_insert_complete),
456		SUBTEST(igt_wakeup),
457	};
458	struct drm_i915_private *i915;
459	int err;
460
461	i915 = mock_gem_device();
462	if (!i915)
463		return -ENOMEM;
464
465	err = i915_subtests(tests, i915->engine[RCS]);
466	drm_dev_unref(&i915->drm);
467
468	return err;
469}