Linux Audio

Check our new training course

Embedded Linux training

Mar 31-Apr 8, 2025
Register
Loading...
Note: File does not exist in v3.1.
  1/*
  2 * Copyright © 2016 Intel Corporation
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice (including the next
 12 * paragraph) shall be included in all copies or substantial portions of the
 13 * Software.
 14 *
 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 21 * IN THE SOFTWARE.
 22 *
 23 */
 24
 25#include "gem/i915_gem_pm.h"
 26#include "gem/selftests/igt_gem_utils.h"
 27#include "gem/selftests/mock_context.h"
 28#include "gt/intel_gt.h"
 29
 30#include "i915_selftest.h"
 31
 32#include "igt_flush_test.h"
 33#include "lib_sw_fence.h"
 34#include "mock_drm.h"
 35#include "mock_gem_device.h"
 36
 37static void quirk_add(struct drm_i915_gem_object *obj,
 38		      struct list_head *objects)
 39{
 40	/* quirk is only for live tiled objects, use it to declare ownership */
 41	GEM_BUG_ON(obj->mm.quirked);
 42	obj->mm.quirked = true;
 43	list_add(&obj->st_link, objects);
 44}
 45
 46static int populate_ggtt(struct drm_i915_private *i915,
 47			 struct list_head *objects)
 48{
 49	unsigned long unbound, bound, count;
 50	struct drm_i915_gem_object *obj;
 51
 52	count = 0;
 53	do {
 54		struct i915_vma *vma;
 55
 56		obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
 57		if (IS_ERR(obj))
 58			return PTR_ERR(obj);
 59
 60		vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
 61		if (IS_ERR(vma)) {
 62			i915_gem_object_put(obj);
 63			if (vma == ERR_PTR(-ENOSPC))
 64				break;
 65
 66			return PTR_ERR(vma);
 67		}
 68
 69		quirk_add(obj, objects);
 70		count++;
 71	} while (1);
 72	pr_debug("Filled GGTT with %lu pages [%llu total]\n",
 73		 count, i915->ggtt.vm.total / PAGE_SIZE);
 74
 75	bound = 0;
 76	unbound = 0;
 77	list_for_each_entry(obj, objects, st_link) {
 78		GEM_BUG_ON(!obj->mm.quirked);
 79
 80		if (atomic_read(&obj->bind_count))
 81			bound++;
 82		else
 83			unbound++;
 84	}
 85	GEM_BUG_ON(bound + unbound != count);
 86
 87	if (unbound) {
 88		pr_err("%s: Found %lu objects unbound, expected %u!\n",
 89		       __func__, unbound, 0);
 90		return -EINVAL;
 91	}
 92
 93	if (bound != count) {
 94		pr_err("%s: Found %lu objects bound, expected %lu!\n",
 95		       __func__, bound, count);
 96		return -EINVAL;
 97	}
 98
 99	if (list_empty(&i915->ggtt.vm.bound_list)) {
100		pr_err("No objects on the GGTT inactive list!\n");
101		return -EINVAL;
102	}
103
104	return 0;
105}
106
107static void unpin_ggtt(struct drm_i915_private *i915)
108{
109	struct i915_ggtt *ggtt = &i915->ggtt;
110	struct i915_vma *vma;
111
112	mutex_lock(&ggtt->vm.mutex);
113	list_for_each_entry(vma, &i915->ggtt.vm.bound_list, vm_link)
114		if (vma->obj->mm.quirked)
115			i915_vma_unpin(vma);
116	mutex_unlock(&ggtt->vm.mutex);
117}
118
119static void cleanup_objects(struct drm_i915_private *i915,
120			    struct list_head *list)
121{
122	struct drm_i915_gem_object *obj, *on;
123
124	list_for_each_entry_safe(obj, on, list, st_link) {
125		GEM_BUG_ON(!obj->mm.quirked);
126		obj->mm.quirked = false;
127		i915_gem_object_put(obj);
128	}
129
130	mutex_unlock(&i915->drm.struct_mutex);
131
132	i915_gem_drain_freed_objects(i915);
133
134	mutex_lock(&i915->drm.struct_mutex);
135}
136
137static int igt_evict_something(void *arg)
138{
139	struct drm_i915_private *i915 = arg;
140	struct i915_ggtt *ggtt = &i915->ggtt;
141	LIST_HEAD(objects);
142	int err;
143
144	/* Fill the GGTT with pinned objects and try to evict one. */
145
146	err = populate_ggtt(i915, &objects);
147	if (err)
148		goto cleanup;
149
150	/* Everything is pinned, nothing should happen */
151	err = i915_gem_evict_something(&ggtt->vm,
152				       I915_GTT_PAGE_SIZE, 0, 0,
153				       0, U64_MAX,
154				       0);
155	if (err != -ENOSPC) {
156		pr_err("i915_gem_evict_something failed on a full GGTT with err=%d\n",
157		       err);
158		goto cleanup;
159	}
160
161	unpin_ggtt(i915);
162
163	/* Everything is unpinned, we should be able to evict something */
164	err = i915_gem_evict_something(&ggtt->vm,
165				       I915_GTT_PAGE_SIZE, 0, 0,
166				       0, U64_MAX,
167				       0);
168	if (err) {
169		pr_err("i915_gem_evict_something failed on a full GGTT with err=%d\n",
170		       err);
171		goto cleanup;
172	}
173
174cleanup:
175	cleanup_objects(i915, &objects);
176	return err;
177}
178
179static int igt_overcommit(void *arg)
180{
181	struct drm_i915_private *i915 = arg;
182	struct drm_i915_gem_object *obj;
183	struct i915_vma *vma;
184	LIST_HEAD(objects);
185	int err;
186
187	/* Fill the GGTT with pinned objects and then try to pin one more.
188	 * We expect it to fail.
189	 */
190
191	err = populate_ggtt(i915, &objects);
192	if (err)
193		goto cleanup;
194
195	obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
196	if (IS_ERR(obj)) {
197		err = PTR_ERR(obj);
198		goto cleanup;
199	}
200
201	quirk_add(obj, &objects);
202
203	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
204	if (!IS_ERR(vma) || PTR_ERR(vma) != -ENOSPC) {
205		pr_err("Failed to evict+insert, i915_gem_object_ggtt_pin returned err=%d\n", (int)PTR_ERR(vma));
206		err = -EINVAL;
207		goto cleanup;
208	}
209
210cleanup:
211	cleanup_objects(i915, &objects);
212	return err;
213}
214
215static int igt_evict_for_vma(void *arg)
216{
217	struct drm_i915_private *i915 = arg;
218	struct i915_ggtt *ggtt = &i915->ggtt;
219	struct drm_mm_node target = {
220		.start = 0,
221		.size = 4096,
222	};
223	LIST_HEAD(objects);
224	int err;
225
226	/* Fill the GGTT with pinned objects and try to evict a range. */
227
228	err = populate_ggtt(i915, &objects);
229	if (err)
230		goto cleanup;
231
232	/* Everything is pinned, nothing should happen */
233	err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
234	if (err != -ENOSPC) {
235		pr_err("i915_gem_evict_for_node on a full GGTT returned err=%d\n",
236		       err);
237		goto cleanup;
238	}
239
240	unpin_ggtt(i915);
241
242	/* Everything is unpinned, we should be able to evict the node */
243	err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
244	if (err) {
245		pr_err("i915_gem_evict_for_node returned err=%d\n",
246		       err);
247		goto cleanup;
248	}
249
250cleanup:
251	cleanup_objects(i915, &objects);
252	return err;
253}
254
255static void mock_color_adjust(const struct drm_mm_node *node,
256			      unsigned long color,
257			      u64 *start,
258			      u64 *end)
259{
260}
261
262static int igt_evict_for_cache_color(void *arg)
263{
264	struct drm_i915_private *i915 = arg;
265	struct i915_ggtt *ggtt = &i915->ggtt;
266	const unsigned long flags = PIN_OFFSET_FIXED;
267	struct drm_mm_node target = {
268		.start = I915_GTT_PAGE_SIZE * 2,
269		.size = I915_GTT_PAGE_SIZE,
270		.color = I915_CACHE_LLC,
271	};
272	struct drm_i915_gem_object *obj;
273	struct i915_vma *vma;
274	LIST_HEAD(objects);
275	int err;
276
277	/* Currently the use of color_adjust is limited to cache domains within
278	 * the ggtt, and so the presence of mm.color_adjust is assumed to be
279	 * i915_gtt_color_adjust throughout our driver, so using a mock color
280	 * adjust will work just fine for our purposes.
281	 */
282	ggtt->vm.mm.color_adjust = mock_color_adjust;
283
284	obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
285	if (IS_ERR(obj)) {
286		err = PTR_ERR(obj);
287		goto cleanup;
288	}
289	i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
290	quirk_add(obj, &objects);
291
292	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
293				       I915_GTT_PAGE_SIZE | flags);
294	if (IS_ERR(vma)) {
295		pr_err("[0]i915_gem_object_ggtt_pin failed\n");
296		err = PTR_ERR(vma);
297		goto cleanup;
298	}
299
300	obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
301	if (IS_ERR(obj)) {
302		err = PTR_ERR(obj);
303		goto cleanup;
304	}
305	i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
306	quirk_add(obj, &objects);
307
308	/* Neighbouring; same colour - should fit */
309	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
310				       (I915_GTT_PAGE_SIZE * 2) | flags);
311	if (IS_ERR(vma)) {
312		pr_err("[1]i915_gem_object_ggtt_pin failed\n");
313		err = PTR_ERR(vma);
314		goto cleanup;
315	}
316
317	i915_vma_unpin(vma);
318
319	/* Remove just the second vma */
320	err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
321	if (err) {
322		pr_err("[0]i915_gem_evict_for_node returned err=%d\n", err);
323		goto cleanup;
324	}
325
326	/* Attempt to remove the first *pinned* vma, by removing the (empty)
327	 * neighbour -- this should fail.
328	 */
329	target.color = I915_CACHE_L3_LLC;
330
331	err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
332	if (!err) {
333		pr_err("[1]i915_gem_evict_for_node returned err=%d\n", err);
334		err = -EINVAL;
335		goto cleanup;
336	}
337
338	err = 0;
339
340cleanup:
341	unpin_ggtt(i915);
342	cleanup_objects(i915, &objects);
343	ggtt->vm.mm.color_adjust = NULL;
344	return err;
345}
346
347static int igt_evict_vm(void *arg)
348{
349	struct drm_i915_private *i915 = arg;
350	struct i915_ggtt *ggtt = &i915->ggtt;
351	LIST_HEAD(objects);
352	int err;
353
354	/* Fill the GGTT with pinned objects and try to evict everything. */
355
356	err = populate_ggtt(i915, &objects);
357	if (err)
358		goto cleanup;
359
360	/* Everything is pinned, nothing should happen */
361	err = i915_gem_evict_vm(&ggtt->vm);
362	if (err) {
363		pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n",
364		       err);
365		goto cleanup;
366	}
367
368	unpin_ggtt(i915);
369
370	err = i915_gem_evict_vm(&ggtt->vm);
371	if (err) {
372		pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n",
373		       err);
374		goto cleanup;
375	}
376
377cleanup:
378	cleanup_objects(i915, &objects);
379	return err;
380}
381
382static int igt_evict_contexts(void *arg)
383{
384	const u64 PRETEND_GGTT_SIZE = 16ull << 20;
385	struct drm_i915_private *i915 = arg;
386	struct intel_engine_cs *engine;
387	enum intel_engine_id id;
388	struct reserved {
389		struct drm_mm_node node;
390		struct reserved *next;
391	} *reserved = NULL;
392	intel_wakeref_t wakeref;
393	struct drm_mm_node hole;
394	unsigned long count;
395	int err;
396
397	/*
398	 * The purpose of this test is to verify that we will trigger an
399	 * eviction in the GGTT when constructing a request that requires
400	 * additional space in the GGTT for pinning the context. This space
401	 * is not directly tied to the request so reclaiming it requires
402	 * extra work.
403	 *
404	 * As such this test is only meaningful for full-ppgtt environments
405	 * where the GTT space of the request is separate from the GGTT
406	 * allocation required to build the request.
407	 */
408	if (!HAS_FULL_PPGTT(i915))
409		return 0;
410
411	mutex_lock(&i915->drm.struct_mutex);
412	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
413
414	/* Reserve a block so that we know we have enough to fit a few rq */
415	memset(&hole, 0, sizeof(hole));
416	err = i915_gem_gtt_insert(&i915->ggtt.vm, &hole,
417				  PRETEND_GGTT_SIZE, 0, I915_COLOR_UNEVICTABLE,
418				  0, i915->ggtt.vm.total,
419				  PIN_NOEVICT);
420	if (err)
421		goto out_locked;
422
423	/* Make the GGTT appear small by filling it with unevictable nodes */
424	count = 0;
425	do {
426		struct reserved *r;
427
428		r = kcalloc(1, sizeof(*r), GFP_KERNEL);
429		if (!r) {
430			err = -ENOMEM;
431			goto out_locked;
432		}
433
434		if (i915_gem_gtt_insert(&i915->ggtt.vm, &r->node,
435					1ul << 20, 0, I915_COLOR_UNEVICTABLE,
436					0, i915->ggtt.vm.total,
437					PIN_NOEVICT)) {
438			kfree(r);
439			break;
440		}
441
442		r->next = reserved;
443		reserved = r;
444
445		count++;
446	} while (1);
447	drm_mm_remove_node(&hole);
448	mutex_unlock(&i915->drm.struct_mutex);
449	pr_info("Filled GGTT with %lu 1MiB nodes\n", count);
450
451	/* Overfill the GGTT with context objects and so try to evict one. */
452	for_each_engine(engine, i915, id) {
453		struct i915_sw_fence fence;
454		struct drm_file *file;
455
456		file = mock_file(i915);
457		if (IS_ERR(file)) {
458			err = PTR_ERR(file);
459			break;
460		}
461
462		count = 0;
463		mutex_lock(&i915->drm.struct_mutex);
464		onstack_fence_init(&fence);
465		do {
466			struct i915_request *rq;
467			struct i915_gem_context *ctx;
468
469			ctx = live_context(i915, file);
470			if (IS_ERR(ctx))
471				break;
472
473			/* We will need some GGTT space for the rq's context */
474			igt_evict_ctl.fail_if_busy = true;
475			rq = igt_request_alloc(ctx, engine);
476			igt_evict_ctl.fail_if_busy = false;
477
478			if (IS_ERR(rq)) {
479				/* When full, fail_if_busy will trigger EBUSY */
480				if (PTR_ERR(rq) != -EBUSY) {
481					pr_err("Unexpected error from request alloc (ctx hw id %u, on %s): %d\n",
482					       ctx->hw_id, engine->name,
483					       (int)PTR_ERR(rq));
484					err = PTR_ERR(rq);
485				}
486				break;
487			}
488
489			/* Keep every request/ctx pinned until we are full */
490			err = i915_sw_fence_await_sw_fence_gfp(&rq->submit,
491							       &fence,
492							       GFP_KERNEL);
493			if (err < 0)
494				break;
495
496			i915_request_add(rq);
497			count++;
498			err = 0;
499		} while(1);
500		mutex_unlock(&i915->drm.struct_mutex);
501
502		onstack_fence_fini(&fence);
503		pr_info("Submitted %lu contexts/requests on %s\n",
504			count, engine->name);
505
506		mock_file_free(i915, file);
507		if (err)
508			break;
509	}
510
511	mutex_lock(&i915->drm.struct_mutex);
512out_locked:
513	if (igt_flush_test(i915, I915_WAIT_LOCKED))
514		err = -EIO;
515	while (reserved) {
516		struct reserved *next = reserved->next;
517
518		drm_mm_remove_node(&reserved->node);
519		kfree(reserved);
520
521		reserved = next;
522	}
523	if (drm_mm_node_allocated(&hole))
524		drm_mm_remove_node(&hole);
525	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
526	mutex_unlock(&i915->drm.struct_mutex);
527
528	return err;
529}
530
531int i915_gem_evict_mock_selftests(void)
532{
533	static const struct i915_subtest tests[] = {
534		SUBTEST(igt_evict_something),
535		SUBTEST(igt_evict_for_vma),
536		SUBTEST(igt_evict_for_cache_color),
537		SUBTEST(igt_evict_vm),
538		SUBTEST(igt_overcommit),
539	};
540	struct drm_i915_private *i915;
541	intel_wakeref_t wakeref;
542	int err = 0;
543
544	i915 = mock_gem_device();
545	if (!i915)
546		return -ENOMEM;
547
548	mutex_lock(&i915->drm.struct_mutex);
549	with_intel_runtime_pm(&i915->runtime_pm, wakeref)
550		err = i915_subtests(tests, i915);
551
552	mutex_unlock(&i915->drm.struct_mutex);
553
554	drm_dev_put(&i915->drm);
555	return err;
556}
557
558int i915_gem_evict_live_selftests(struct drm_i915_private *i915)
559{
560	static const struct i915_subtest tests[] = {
561		SUBTEST(igt_evict_contexts),
562	};
563
564	if (intel_gt_is_wedged(&i915->gt))
565		return 0;
566
567	return i915_subtests(tests, i915);
568}