Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Apr 14-17, 2025
Register
Loading...
v5.14.15
  1/*
  2 * Copyright © 2008-2010 Intel Corporation
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice (including the next
 12 * paragraph) shall be included in all copies or substantial portions of the
 13 * Software.
 14 *
 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 21 * IN THE SOFTWARE.
 22 *
 23 * Authors:
 24 *    Eric Anholt <eric@anholt.net>
 25 *    Chris Wilson <chris@chris-wilson.co.uuk>
 26 *
 27 */
 28
 29#include "gem/i915_gem_context.h"
 
 30#include "gt/intel_gt_requests.h"
 31
 32#include "i915_drv.h"
 
 33#include "i915_trace.h"
 34
 35I915_SELFTEST_DECLARE(static struct igt_evict_ctl {
 36	bool fail_if_busy:1;
 37} igt_evict_ctl;)
 38
 
 
 
 
 
 39static int ggtt_flush(struct intel_gt *gt)
 40{
 41	/*
 42	 * Not everything in the GGTT is tracked via vma (otherwise we
 43	 * could evict as required with minimal stalling) so we are forced
 44	 * to idle the GPU and explicitly retire outstanding requests in
 45	 * the hopes that we can then remove contexts and the like only
 46	 * bound by their active reference.
 47	 */
 48	return intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
 49}
 50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 51static bool
 52mark_free(struct drm_mm_scan *scan,
 
 53	  struct i915_vma *vma,
 54	  unsigned int flags,
 55	  struct list_head *unwind)
 56{
 57	if (i915_vma_is_pinned(vma))
 58		return false;
 59
 
 
 
 60	list_add(&vma->evict_link, unwind);
 61	return drm_mm_scan_add_block(scan, &vma->node);
 62}
 63
 64static bool defer_evict(struct i915_vma *vma)
 65{
 66	if (i915_vma_is_active(vma))
 67		return true;
 68
 69	if (i915_vma_is_scanout(vma))
 70		return true;
 71
 72	return false;
 73}
 74
 75/**
 76 * i915_gem_evict_something - Evict vmas to make room for binding a new one
 77 * @vm: address space to evict from
 
 78 * @min_size: size of the desired free space
 79 * @alignment: alignment constraint of the desired free space
 80 * @color: color for the desired space
 81 * @start: start (inclusive) of the range from which to evict objects
 82 * @end: end (exclusive) of the range from which to evict objects
 83 * @flags: additional flags to control the eviction algorithm
 84 *
 85 * This function will try to evict vmas until a free space satisfying the
 86 * requirements is found. Callers must check first whether any such hole exists
 87 * already before calling this function.
 88 *
 89 * This function is used by the object/vma binding code.
 90 *
 91 * Since this function is only used to free up virtual address space it only
 92 * ignores pinned vmas, and not object where the backing storage itself is
 93 * pinned. Hence obj->pages_pin_count does not protect against eviction.
 94 *
 95 * To clarify: This is for freeing up virtual address space, not for freeing
 96 * memory in e.g. the shrinker.
 97 */
 98int
 99i915_gem_evict_something(struct i915_address_space *vm,
 
100			 u64 min_size, u64 alignment,
101			 unsigned long color,
102			 u64 start, u64 end,
103			 unsigned flags)
104{
105	struct drm_mm_scan scan;
106	struct list_head eviction_list;
107	struct i915_vma *vma, *next;
108	struct drm_mm_node *node;
109	enum drm_mm_insert_mode mode;
110	struct i915_vma *active;
111	int ret;
112
113	lockdep_assert_held(&vm->mutex);
114	trace_i915_gem_evict(vm, min_size, alignment, flags);
115
116	/*
117	 * The goal is to evict objects and amalgamate space in rough LRU order.
118	 * Since both active and inactive objects reside on the same list,
119	 * in a mix of creation and last scanned order, as we process the list
120	 * we sort it into inactive/active, which keeps the active portion
121	 * in a rough MRU order.
122	 *
123	 * The retirement sequence is thus:
124	 *   1. Inactive objects (already retired, random order)
125	 *   2. Active objects (will stall on unbinding, oldest scanned first)
126	 */
127	mode = DRM_MM_INSERT_BEST;
128	if (flags & PIN_HIGH)
129		mode = DRM_MM_INSERT_HIGH;
130	if (flags & PIN_MAPPABLE)
131		mode = DRM_MM_INSERT_LOW;
132	drm_mm_scan_init_with_range(&scan, &vm->mm,
133				    min_size, alignment, color,
134				    start, end, mode);
135
136	intel_gt_retire_requests(vm->gt);
137
138search_again:
139	active = NULL;
140	INIT_LIST_HEAD(&eviction_list);
141	list_for_each_entry_safe(vma, next, &vm->bound_list, vm_link) {
142		if (vma == active) { /* now seen this vma twice */
143			if (flags & PIN_NONBLOCK)
144				break;
145
146			active = ERR_PTR(-EAGAIN);
147		}
148
149		/*
150		 * We keep this list in a rough least-recently scanned order
151		 * of active elements (inactive elements are cheap to reap).
152		 * New entries are added to the end, and we move anything we
153		 * scan to the end. The assumption is that the working set
154		 * of applications is either steady state (and thanks to the
155		 * userspace bo cache it almost always is) or volatile and
156		 * frequently replaced after a frame, which are self-evicting!
157		 * Given that assumption, the MRU order of the scan list is
158		 * fairly static, and keeping it in least-recently scan order
159		 * is suitable.
160		 *
161		 * To notice when we complete one full cycle, we record the
162		 * first active element seen, before moving it to the tail.
163		 */
164		if (active != ERR_PTR(-EAGAIN) && defer_evict(vma)) {
165			if (!active)
166				active = vma;
167
168			list_move_tail(&vma->vm_link, &vm->bound_list);
169			continue;
170		}
171
172		if (mark_free(&scan, vma, flags, &eviction_list))
173			goto found;
174	}
175
176	/* Nothing found, clean up and bail out! */
177	list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
178		ret = drm_mm_scan_remove_block(&scan, &vma->node);
179		BUG_ON(ret);
 
180	}
181
182	/*
183	 * Can we unpin some objects such as idle hw contents,
184	 * or pending flips? But since only the GGTT has global entries
185	 * such as scanouts, rinbuffers and contexts, we can skip the
186	 * purge when inspecting per-process local address spaces.
187	 */
188	if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK)
189		return -ENOSPC;
190
191	/*
192	 * Not everything in the GGTT is tracked via VMA using
193	 * i915_vma_move_to_active(), otherwise we could evict as required
194	 * with minimal stalling. Instead we are forced to idle the GPU and
195	 * explicitly retire outstanding requests which will then remove
196	 * the pinning for active objects such as contexts and ring,
197	 * enabling us to evict them on the next iteration.
198	 *
199	 * To ensure that all user contexts are evictable, we perform
200	 * a switch to the perma-pinned kernel context. This all also gives
201	 * us a termination condition, when the last retired context is
202	 * the kernel's there is no more we can evict.
203	 */
204	if (I915_SELFTEST_ONLY(igt_evict_ctl.fail_if_busy))
205		return -EBUSY;
206
207	ret = ggtt_flush(vm->gt);
208	if (ret)
209		return ret;
210
211	cond_resched();
212
213	flags |= PIN_NONBLOCK;
214	goto search_again;
215
216found:
217	/* drm_mm doesn't allow any other other operations while
218	 * scanning, therefore store to-be-evicted objects on a
219	 * temporary list and take a reference for all before
220	 * calling unbind (which may remove the active reference
221	 * of any of our objects, thus corrupting the list).
222	 */
223	list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
224		if (drm_mm_scan_remove_block(&scan, &vma->node))
225			__i915_vma_pin(vma);
226		else
227			list_del(&vma->evict_link);
 
 
228	}
229
230	/* Unbinding will emit any required flushes */
231	ret = 0;
232	list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
233		__i915_vma_unpin(vma);
234		if (ret == 0)
235			ret = __i915_vma_unbind(vma);
 
236	}
237
238	while (ret == 0 && (node = drm_mm_scan_color_evict(&scan))) {
239		vma = container_of(node, struct i915_vma, node);
240
241		/* If we find any non-objects (!vma), we cannot evict them */
242		if (vma->node.color != I915_COLOR_UNEVICTABLE)
 
243			ret = __i915_vma_unbind(vma);
244		else
245			ret = -ENOSPC; /* XXX search failed, try again? */
 
 
246	}
247
248	return ret;
249}
250
251/**
252 * i915_gem_evict_for_node - Evict vmas to make room for binding a new one
253 * @vm: address space to evict from
 
254 * @target: range (and color) to evict for
255 * @flags: additional flags to control the eviction algorithm
256 *
257 * This function will try to evict vmas that overlap the target node.
258 *
259 * To clarify: This is for freeing up virtual address space, not for freeing
260 * memory in e.g. the shrinker.
261 */
262int i915_gem_evict_for_node(struct i915_address_space *vm,
 
263			    struct drm_mm_node *target,
264			    unsigned int flags)
265{
266	LIST_HEAD(eviction_list);
267	struct drm_mm_node *node;
268	u64 start = target->start;
269	u64 end = start + target->size;
270	struct i915_vma *vma, *next;
271	int ret = 0;
272
273	lockdep_assert_held(&vm->mutex);
274	GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
275	GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
276
277	trace_i915_gem_evict_node(vm, target, flags);
278
279	/*
280	 * Retire before we search the active list. Although we have
281	 * reasonable accuracy in our retirement lists, we may have
282	 * a stray pin (preventing eviction) that can only be resolved by
283	 * retiring.
284	 */
285	intel_gt_retire_requests(vm->gt);
286
287	if (i915_vm_has_cache_coloring(vm)) {
288		/* Expand search to cover neighbouring guard pages (or lack!) */
289		if (start)
290			start -= I915_GTT_PAGE_SIZE;
291
292		/* Always look at the page afterwards to avoid the end-of-GTT */
293		end += I915_GTT_PAGE_SIZE;
294	}
295	GEM_BUG_ON(start >= end);
296
297	drm_mm_for_each_node_in_range(node, &vm->mm, start, end) {
298		/* If we find any non-objects (!vma), we cannot evict them */
299		if (node->color == I915_COLOR_UNEVICTABLE) {
300			ret = -ENOSPC;
301			break;
302		}
303
304		GEM_BUG_ON(!drm_mm_node_allocated(node));
305		vma = container_of(node, typeof(*vma), node);
306
307		/*
308		 * If we are using coloring to insert guard pages between
309		 * different cache domains within the address space, we have
310		 * to check whether the objects on either side of our range
311		 * abutt and conflict. If they are in conflict, then we evict
312		 * those as well to make room for our guard pages.
313		 */
314		if (i915_vm_has_cache_coloring(vm)) {
315			if (node->start + node->size == target->start) {
316				if (node->color == target->color)
317					continue;
318			}
319			if (node->start == target->start + target->size) {
320				if (node->color == target->color)
321					continue;
322			}
323		}
324
325		if (i915_vma_is_pinned(vma)) {
326			ret = -ENOSPC;
327			break;
328		}
329
330		if (flags & PIN_NONBLOCK && i915_vma_is_active(vma)) {
331			ret = -ENOSPC;
332			break;
333		}
334
 
 
 
 
 
335		/*
336		 * Never show fear in the face of dragons!
337		 *
338		 * We cannot directly remove this node from within this
339		 * iterator and as with i915_gem_evict_something() we employ
340		 * the vma pin_count in order to prevent the action of
341		 * unbinding one vma from freeing (by dropping its active
342		 * reference) another in our eviction list.
343		 */
344		__i915_vma_pin(vma);
345		list_add(&vma->evict_link, &eviction_list);
346	}
347
348	list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
349		__i915_vma_unpin(vma);
350		if (ret == 0)
351			ret = __i915_vma_unbind(vma);
 
 
352	}
353
354	return ret;
355}
356
357/**
358 * i915_gem_evict_vm - Evict all idle vmas from a vm
359 * @vm: Address space to cleanse
 
 
 
 
 
 
 
360 *
361 * This function evicts all vmas from a vm.
362 *
363 * This is used by the execbuf code as a last-ditch effort to defragment the
364 * address space.
365 *
366 * To clarify: This is for freeing up virtual address space, not for freeing
367 * memory in e.g. the shrinker.
368 */
369int i915_gem_evict_vm(struct i915_address_space *vm)
 
370{
371	int ret = 0;
372
373	lockdep_assert_held(&vm->mutex);
374	trace_i915_gem_evict_vm(vm);
375
376	/* Switch back to the default context in order to unpin
377	 * the existing context objects. However, such objects only
378	 * pin themselves inside the global GTT and performing the
379	 * switch otherwise is ineffective.
380	 */
381	if (i915_is_ggtt(vm)) {
382		ret = ggtt_flush(vm->gt);
383		if (ret)
384			return ret;
385	}
386
387	do {
388		struct i915_vma *vma, *vn;
389		LIST_HEAD(eviction_list);
 
390
391		list_for_each_entry(vma, &vm->bound_list, vm_link) {
392			if (i915_vma_is_pinned(vma))
393				continue;
394
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
395			__i915_vma_pin(vma);
396			list_add(&vma->evict_link, &eviction_list);
397		}
398		if (list_empty(&eviction_list))
399			break;
400
401		ret = 0;
 
 
 
 
 
 
 
 
 
 
 
 
402		list_for_each_entry_safe(vma, vn, &eviction_list, evict_link) {
403			__i915_vma_unpin(vma);
404			if (ret == 0)
405				ret = __i915_vma_unbind(vma);
406			if (ret != -EINTR) /* "Get me out of here!" */
407				ret = 0;
 
 
 
 
408		}
409	} while (ret == 0);
410
411	return ret;
412}
413
414#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
415#include "selftests/i915_gem_evict.c"
416#endif
v6.2
  1/*
  2 * Copyright © 2008-2010 Intel Corporation
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice (including the next
 12 * paragraph) shall be included in all copies or substantial portions of the
 13 * Software.
 14 *
 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 21 * IN THE SOFTWARE.
 22 *
 23 * Authors:
 24 *    Eric Anholt <eric@anholt.net>
 25 *    Chris Wilson <chris@chris-wilson.co.uuk>
 26 *
 27 */
 28
 29#include "gem/i915_gem_context.h"
 30#include "gt/intel_gt.h"
 31#include "gt/intel_gt_requests.h"
 32
 33#include "i915_drv.h"
 34#include "i915_gem_evict.h"
 35#include "i915_trace.h"
 36
 37I915_SELFTEST_DECLARE(static struct igt_evict_ctl {
 38	bool fail_if_busy:1;
 39} igt_evict_ctl;)
 40
 41static bool dying_vma(struct i915_vma *vma)
 42{
 43	return !kref_read(&vma->obj->base.refcount);
 44}
 45
 46static int ggtt_flush(struct intel_gt *gt)
 47{
 48	/*
 49	 * Not everything in the GGTT is tracked via vma (otherwise we
 50	 * could evict as required with minimal stalling) so we are forced
 51	 * to idle the GPU and explicitly retire outstanding requests in
 52	 * the hopes that we can then remove contexts and the like only
 53	 * bound by their active reference.
 54	 */
 55	return intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
 56}
 57
 58static bool grab_vma(struct i915_vma *vma, struct i915_gem_ww_ctx *ww)
 59{
 60	/*
 61	 * We add the extra refcount so the object doesn't drop to zero until
 62	 * after ungrab_vma(), this way trylock is always paired with unlock.
 63	 */
 64	if (i915_gem_object_get_rcu(vma->obj)) {
 65		if (!i915_gem_object_trylock(vma->obj, ww)) {
 66			i915_gem_object_put(vma->obj);
 67			return false;
 68		}
 69	} else {
 70		/* Dead objects don't need pins */
 71		atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
 72	}
 73
 74	return true;
 75}
 76
 77static void ungrab_vma(struct i915_vma *vma)
 78{
 79	if (dying_vma(vma))
 80		return;
 81
 82	i915_gem_object_unlock(vma->obj);
 83	i915_gem_object_put(vma->obj);
 84}
 85
 86static bool
 87mark_free(struct drm_mm_scan *scan,
 88	  struct i915_gem_ww_ctx *ww,
 89	  struct i915_vma *vma,
 90	  unsigned int flags,
 91	  struct list_head *unwind)
 92{
 93	if (i915_vma_is_pinned(vma))
 94		return false;
 95
 96	if (!grab_vma(vma, ww))
 97		return false;
 98
 99	list_add(&vma->evict_link, unwind);
100	return drm_mm_scan_add_block(scan, &vma->node);
101}
102
103static bool defer_evict(struct i915_vma *vma)
104{
105	if (i915_vma_is_active(vma))
106		return true;
107
108	if (i915_vma_is_scanout(vma))
109		return true;
110
111	return false;
112}
113
114/**
115 * i915_gem_evict_something - Evict vmas to make room for binding a new one
116 * @vm: address space to evict from
117 * @ww: An optional struct i915_gem_ww_ctx.
118 * @min_size: size of the desired free space
119 * @alignment: alignment constraint of the desired free space
120 * @color: color for the desired space
121 * @start: start (inclusive) of the range from which to evict objects
122 * @end: end (exclusive) of the range from which to evict objects
123 * @flags: additional flags to control the eviction algorithm
124 *
125 * This function will try to evict vmas until a free space satisfying the
126 * requirements is found. Callers must check first whether any such hole exists
127 * already before calling this function.
128 *
129 * This function is used by the object/vma binding code.
130 *
131 * Since this function is only used to free up virtual address space it only
132 * ignores pinned vmas, and not object where the backing storage itself is
133 * pinned. Hence obj->pages_pin_count does not protect against eviction.
134 *
135 * To clarify: This is for freeing up virtual address space, not for freeing
136 * memory in e.g. the shrinker.
137 */
138int
139i915_gem_evict_something(struct i915_address_space *vm,
140			 struct i915_gem_ww_ctx *ww,
141			 u64 min_size, u64 alignment,
142			 unsigned long color,
143			 u64 start, u64 end,
144			 unsigned flags)
145{
146	struct drm_mm_scan scan;
147	struct list_head eviction_list;
148	struct i915_vma *vma, *next;
149	struct drm_mm_node *node;
150	enum drm_mm_insert_mode mode;
151	struct i915_vma *active;
152	int ret;
153
154	lockdep_assert_held(&vm->mutex);
155	trace_i915_gem_evict(vm, min_size, alignment, flags);
156
157	/*
158	 * The goal is to evict objects and amalgamate space in rough LRU order.
159	 * Since both active and inactive objects reside on the same list,
160	 * in a mix of creation and last scanned order, as we process the list
161	 * we sort it into inactive/active, which keeps the active portion
162	 * in a rough MRU order.
163	 *
164	 * The retirement sequence is thus:
165	 *   1. Inactive objects (already retired, random order)
166	 *   2. Active objects (will stall on unbinding, oldest scanned first)
167	 */
168	mode = DRM_MM_INSERT_BEST;
169	if (flags & PIN_HIGH)
170		mode = DRM_MM_INSERT_HIGH;
171	if (flags & PIN_MAPPABLE)
172		mode = DRM_MM_INSERT_LOW;
173	drm_mm_scan_init_with_range(&scan, &vm->mm,
174				    min_size, alignment, color,
175				    start, end, mode);
176
177	intel_gt_retire_requests(vm->gt);
178
179search_again:
180	active = NULL;
181	INIT_LIST_HEAD(&eviction_list);
182	list_for_each_entry_safe(vma, next, &vm->bound_list, vm_link) {
183		if (vma == active) { /* now seen this vma twice */
184			if (flags & PIN_NONBLOCK)
185				break;
186
187			active = ERR_PTR(-EAGAIN);
188		}
189
190		/*
191		 * We keep this list in a rough least-recently scanned order
192		 * of active elements (inactive elements are cheap to reap).
193		 * New entries are added to the end, and we move anything we
194		 * scan to the end. The assumption is that the working set
195		 * of applications is either steady state (and thanks to the
196		 * userspace bo cache it almost always is) or volatile and
197		 * frequently replaced after a frame, which are self-evicting!
198		 * Given that assumption, the MRU order of the scan list is
199		 * fairly static, and keeping it in least-recently scan order
200		 * is suitable.
201		 *
202		 * To notice when we complete one full cycle, we record the
203		 * first active element seen, before moving it to the tail.
204		 */
205		if (active != ERR_PTR(-EAGAIN) && defer_evict(vma)) {
206			if (!active)
207				active = vma;
208
209			list_move_tail(&vma->vm_link, &vm->bound_list);
210			continue;
211		}
212
213		if (mark_free(&scan, ww, vma, flags, &eviction_list))
214			goto found;
215	}
216
217	/* Nothing found, clean up and bail out! */
218	list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
219		ret = drm_mm_scan_remove_block(&scan, &vma->node);
220		BUG_ON(ret);
221		ungrab_vma(vma);
222	}
223
224	/*
225	 * Can we unpin some objects such as idle hw contents,
226	 * or pending flips? But since only the GGTT has global entries
227	 * such as scanouts, rinbuffers and contexts, we can skip the
228	 * purge when inspecting per-process local address spaces.
229	 */
230	if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK)
231		return -ENOSPC;
232
233	/*
234	 * Not everything in the GGTT is tracked via VMA using
235	 * i915_vma_move_to_active(), otherwise we could evict as required
236	 * with minimal stalling. Instead we are forced to idle the GPU and
237	 * explicitly retire outstanding requests which will then remove
238	 * the pinning for active objects such as contexts and ring,
239	 * enabling us to evict them on the next iteration.
240	 *
241	 * To ensure that all user contexts are evictable, we perform
242	 * a switch to the perma-pinned kernel context. This all also gives
243	 * us a termination condition, when the last retired context is
244	 * the kernel's there is no more we can evict.
245	 */
246	if (I915_SELFTEST_ONLY(igt_evict_ctl.fail_if_busy))
247		return -EBUSY;
248
249	ret = ggtt_flush(vm->gt);
250	if (ret)
251		return ret;
252
253	cond_resched();
254
255	flags |= PIN_NONBLOCK;
256	goto search_again;
257
258found:
259	/* drm_mm doesn't allow any other other operations while
260	 * scanning, therefore store to-be-evicted objects on a
261	 * temporary list and take a reference for all before
262	 * calling unbind (which may remove the active reference
263	 * of any of our objects, thus corrupting the list).
264	 */
265	list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
266		if (drm_mm_scan_remove_block(&scan, &vma->node)) {
267			__i915_vma_pin(vma);
268		} else {
269			list_del(&vma->evict_link);
270			ungrab_vma(vma);
271		}
272	}
273
274	/* Unbinding will emit any required flushes */
275	ret = 0;
276	list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
277		__i915_vma_unpin(vma);
278		if (ret == 0)
279			ret = __i915_vma_unbind(vma);
280		ungrab_vma(vma);
281	}
282
283	while (ret == 0 && (node = drm_mm_scan_color_evict(&scan))) {
284		vma = container_of(node, struct i915_vma, node);
285
286		/* If we find any non-objects (!vma), we cannot evict them */
287		if (vma->node.color != I915_COLOR_UNEVICTABLE &&
288		    grab_vma(vma, ww)) {
289			ret = __i915_vma_unbind(vma);
290			ungrab_vma(vma);
291		} else {
292			ret = -ENOSPC;
293		}
294	}
295
296	return ret;
297}
298
299/**
300 * i915_gem_evict_for_node - Evict vmas to make room for binding a new one
301 * @vm: address space to evict from
302 * @ww: An optional struct i915_gem_ww_ctx.
303 * @target: range (and color) to evict for
304 * @flags: additional flags to control the eviction algorithm
305 *
306 * This function will try to evict vmas that overlap the target node.
307 *
308 * To clarify: This is for freeing up virtual address space, not for freeing
309 * memory in e.g. the shrinker.
310 */
311int i915_gem_evict_for_node(struct i915_address_space *vm,
312			    struct i915_gem_ww_ctx *ww,
313			    struct drm_mm_node *target,
314			    unsigned int flags)
315{
316	LIST_HEAD(eviction_list);
317	struct drm_mm_node *node;
318	u64 start = target->start;
319	u64 end = start + target->size;
320	struct i915_vma *vma, *next;
321	int ret = 0;
322
323	lockdep_assert_held(&vm->mutex);
324	GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
325	GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
326
327	trace_i915_gem_evict_node(vm, target, flags);
328
329	/*
330	 * Retire before we search the active list. Although we have
331	 * reasonable accuracy in our retirement lists, we may have
332	 * a stray pin (preventing eviction) that can only be resolved by
333	 * retiring.
334	 */
335	intel_gt_retire_requests(vm->gt);
336
337	if (i915_vm_has_cache_coloring(vm)) {
338		/* Expand search to cover neighbouring guard pages (or lack!) */
339		if (start)
340			start -= I915_GTT_PAGE_SIZE;
341
342		/* Always look at the page afterwards to avoid the end-of-GTT */
343		end += I915_GTT_PAGE_SIZE;
344	}
345	GEM_BUG_ON(start >= end);
346
347	drm_mm_for_each_node_in_range(node, &vm->mm, start, end) {
348		/* If we find any non-objects (!vma), we cannot evict them */
349		if (node->color == I915_COLOR_UNEVICTABLE) {
350			ret = -ENOSPC;
351			break;
352		}
353
354		GEM_BUG_ON(!drm_mm_node_allocated(node));
355		vma = container_of(node, typeof(*vma), node);
356
357		/*
358		 * If we are using coloring to insert guard pages between
359		 * different cache domains within the address space, we have
360		 * to check whether the objects on either side of our range
361		 * abutt and conflict. If they are in conflict, then we evict
362		 * those as well to make room for our guard pages.
363		 */
364		if (i915_vm_has_cache_coloring(vm)) {
365			if (node->start + node->size == target->start) {
366				if (node->color == target->color)
367					continue;
368			}
369			if (node->start == target->start + target->size) {
370				if (node->color == target->color)
371					continue;
372			}
373		}
374
375		if (i915_vma_is_pinned(vma)) {
376			ret = -ENOSPC;
377			break;
378		}
379
380		if (flags & PIN_NONBLOCK && i915_vma_is_active(vma)) {
381			ret = -ENOSPC;
382			break;
383		}
384
385		if (!grab_vma(vma, ww)) {
386			ret = -ENOSPC;
387			break;
388		}
389
390		/*
391		 * Never show fear in the face of dragons!
392		 *
393		 * We cannot directly remove this node from within this
394		 * iterator and as with i915_gem_evict_something() we employ
395		 * the vma pin_count in order to prevent the action of
396		 * unbinding one vma from freeing (by dropping its active
397		 * reference) another in our eviction list.
398		 */
399		__i915_vma_pin(vma);
400		list_add(&vma->evict_link, &eviction_list);
401	}
402
403	list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
404		__i915_vma_unpin(vma);
405		if (ret == 0)
406			ret = __i915_vma_unbind(vma);
407
408		ungrab_vma(vma);
409	}
410
411	return ret;
412}
413
414/**
415 * i915_gem_evict_vm - Evict all idle vmas from a vm
416 * @vm: Address space to cleanse
417 * @ww: An optional struct i915_gem_ww_ctx. If not NULL, i915_gem_evict_vm
418 * will be able to evict vma's locked by the ww as well.
419 * @busy_bo: Optional pointer to struct drm_i915_gem_object. If not NULL, then
420 * in the event i915_gem_evict_vm() is unable to trylock an object for eviction,
421 * then @busy_bo will point to it. -EBUSY is also returned. The caller must drop
422 * the vm->mutex, before trying again to acquire the contended lock. The caller
423 * also owns a reference to the object.
424 *
425 * This function evicts all vmas from a vm.
426 *
427 * This is used by the execbuf code as a last-ditch effort to defragment the
428 * address space.
429 *
430 * To clarify: This is for freeing up virtual address space, not for freeing
431 * memory in e.g. the shrinker.
432 */
433int i915_gem_evict_vm(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww,
434		      struct drm_i915_gem_object **busy_bo)
435{
436	int ret = 0;
437
438	lockdep_assert_held(&vm->mutex);
439	trace_i915_gem_evict_vm(vm);
440
441	/* Switch back to the default context in order to unpin
442	 * the existing context objects. However, such objects only
443	 * pin themselves inside the global GTT and performing the
444	 * switch otherwise is ineffective.
445	 */
446	if (i915_is_ggtt(vm)) {
447		ret = ggtt_flush(vm->gt);
448		if (ret)
449			return ret;
450	}
451
452	do {
453		struct i915_vma *vma, *vn;
454		LIST_HEAD(eviction_list);
455		LIST_HEAD(locked_eviction_list);
456
457		list_for_each_entry(vma, &vm->bound_list, vm_link) {
458			if (i915_vma_is_pinned(vma))
459				continue;
460
461			/*
462			 * If we already own the lock, trylock fails. In case
463			 * the resv is shared among multiple objects, we still
464			 * need the object ref.
465			 */
466			if (!i915_gem_object_get_rcu(vma->obj) ||
467			    (ww && (dma_resv_locking_ctx(vma->obj->base.resv) == &ww->ctx))) {
468				__i915_vma_pin(vma);
469				list_add(&vma->evict_link, &locked_eviction_list);
470				continue;
471			}
472
473			if (!i915_gem_object_trylock(vma->obj, ww)) {
474				if (busy_bo) {
475					*busy_bo = vma->obj; /* holds ref */
476					ret = -EBUSY;
477					break;
478				}
479				i915_gem_object_put(vma->obj);
480				continue;
481			}
482
483			__i915_vma_pin(vma);
484			list_add(&vma->evict_link, &eviction_list);
485		}
486		if (list_empty(&eviction_list) && list_empty(&locked_eviction_list))
487			break;
488
489		/* Unbind locked objects first, before unlocking the eviction_list */
490		list_for_each_entry_safe(vma, vn, &locked_eviction_list, evict_link) {
491			__i915_vma_unpin(vma);
492
493			if (ret == 0) {
494				ret = __i915_vma_unbind(vma);
495				if (ret != -EINTR) /* "Get me out of here!" */
496					ret = 0;
497			}
498			if (!dying_vma(vma))
499				i915_gem_object_put(vma->obj);
500		}
501
502		list_for_each_entry_safe(vma, vn, &eviction_list, evict_link) {
503			__i915_vma_unpin(vma);
504			if (ret == 0) {
505				ret = __i915_vma_unbind(vma);
506				if (ret != -EINTR) /* "Get me out of here!" */
507					ret = 0;
508			}
509
510			i915_gem_object_unlock(vma->obj);
511			i915_gem_object_put(vma->obj);
512		}
513	} while (ret == 0);
514
515	return ret;
516}
517
518#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
519#include "selftests/i915_gem_evict.c"
520#endif