Loading...
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2010 Daniel Vetter
4 * Copyright © 2020 Intel Corporation
5 */
6
7#include <linux/slab.h> /* fault-inject.h is not standalone! */
8
9#include <linux/fault-inject.h>
10#include <linux/log2.h>
11#include <linux/random.h>
12#include <linux/seq_file.h>
13#include <linux/stop_machine.h>
14
15#include <asm/set_memory.h>
16#include <asm/smp.h>
17
18#include "gt/intel_gt.h"
19#include "gt/intel_gt_requests.h"
20
21#include "i915_drv.h"
22#include "i915_gem_evict.h"
23#include "i915_scatterlist.h"
24#include "i915_trace.h"
25#include "i915_vgpu.h"
26
27int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
28 struct sg_table *pages)
29{
30 do {
31 if (dma_map_sg_attrs(obj->base.dev->dev,
32 pages->sgl, pages->nents,
33 DMA_BIDIRECTIONAL,
34 DMA_ATTR_SKIP_CPU_SYNC |
35 DMA_ATTR_NO_KERNEL_MAPPING |
36 DMA_ATTR_NO_WARN))
37 return 0;
38
39 /*
40 * If the DMA remap fails, one cause can be that we have
41 * too many objects pinned in a small remapping table,
42 * such as swiotlb. Incrementally purge all other objects and
43 * try again - if there are no more pages to remove from
44 * the DMA remapper, i915_gem_shrink will return 0.
45 */
46 GEM_BUG_ON(obj->mm.pages == pages);
47 } while (i915_gem_shrink(NULL, to_i915(obj->base.dev),
48 obj->base.size >> PAGE_SHIFT, NULL,
49 I915_SHRINK_BOUND |
50 I915_SHRINK_UNBOUND));
51
52 return -ENOSPC;
53}
54
55void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
56 struct sg_table *pages)
57{
58 struct drm_i915_private *i915 = to_i915(obj->base.dev);
59 struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
60
61 /* XXX This does not prevent more requests being submitted! */
62 if (unlikely(ggtt->do_idle_maps))
63 /* Wait a bit, in the hope it avoids the hang */
64 usleep_range(100, 250);
65
66 dma_unmap_sg(i915->drm.dev, pages->sgl, pages->nents,
67 DMA_BIDIRECTIONAL);
68}
69
70/**
71 * i915_gem_gtt_reserve - reserve a node in an address_space (GTT)
72 * @vm: the &struct i915_address_space
73 * @ww: An optional struct i915_gem_ww_ctx.
74 * @node: the &struct drm_mm_node (typically i915_vma.mode)
75 * @size: how much space to allocate inside the GTT,
76 * must be #I915_GTT_PAGE_SIZE aligned
77 * @offset: where to insert inside the GTT,
78 * must be #I915_GTT_MIN_ALIGNMENT aligned, and the node
79 * (@offset + @size) must fit within the address space
80 * @color: color to apply to node, if this node is not from a VMA,
81 * color must be #I915_COLOR_UNEVICTABLE
82 * @flags: control search and eviction behaviour
83 *
84 * i915_gem_gtt_reserve() tries to insert the @node at the exact @offset inside
85 * the address space (using @size and @color). If the @node does not fit, it
86 * tries to evict any overlapping nodes from the GTT, including any
87 * neighbouring nodes if the colors do not match (to ensure guard pages between
88 * differing domains). See i915_gem_evict_for_node() for the gory details
89 * on the eviction algorithm. #PIN_NONBLOCK may used to prevent waiting on
90 * evicting active overlapping objects, and any overlapping node that is pinned
91 * or marked as unevictable will also result in failure.
92 *
93 * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
94 * asked to wait for eviction and interrupted.
95 */
96int i915_gem_gtt_reserve(struct i915_address_space *vm,
97 struct i915_gem_ww_ctx *ww,
98 struct drm_mm_node *node,
99 u64 size, u64 offset, unsigned long color,
100 unsigned int flags)
101{
102 int err;
103
104 GEM_BUG_ON(!size);
105 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
106 GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT));
107 GEM_BUG_ON(range_overflows(offset, size, vm->total));
108 GEM_BUG_ON(vm == &to_gt(vm->i915)->ggtt->alias->vm);
109 GEM_BUG_ON(drm_mm_node_allocated(node));
110
111 node->size = size;
112 node->start = offset;
113 node->color = color;
114
115 err = drm_mm_reserve_node(&vm->mm, node);
116 if (err != -ENOSPC)
117 return err;
118
119 if (flags & PIN_NOEVICT)
120 return -ENOSPC;
121
122 err = i915_gem_evict_for_node(vm, ww, node, flags);
123 if (err == 0)
124 err = drm_mm_reserve_node(&vm->mm, node);
125
126 return err;
127}
128
129static u64 random_offset(u64 start, u64 end, u64 len, u64 align)
130{
131 u64 range, addr;
132
133 GEM_BUG_ON(range_overflows(start, len, end));
134 GEM_BUG_ON(round_up(start, align) > round_down(end - len, align));
135
136 range = round_down(end - len, align) - round_up(start, align);
137 if (range) {
138 if (sizeof(unsigned long) == sizeof(u64)) {
139 addr = get_random_u64();
140 } else {
141 addr = get_random_u32();
142 if (range > U32_MAX) {
143 addr <<= 32;
144 addr |= get_random_u32();
145 }
146 }
147 div64_u64_rem(addr, range, &addr);
148 start += addr;
149 }
150
151 return round_up(start, align);
152}
153
154/**
155 * i915_gem_gtt_insert - insert a node into an address_space (GTT)
156 * @vm: the &struct i915_address_space
157 * @ww: An optional struct i915_gem_ww_ctx.
158 * @node: the &struct drm_mm_node (typically i915_vma.node)
159 * @size: how much space to allocate inside the GTT,
160 * must be #I915_GTT_PAGE_SIZE aligned
161 * @alignment: required alignment of starting offset, may be 0 but
162 * if specified, this must be a power-of-two and at least
163 * #I915_GTT_MIN_ALIGNMENT
164 * @color: color to apply to node
165 * @start: start of any range restriction inside GTT (0 for all),
166 * must be #I915_GTT_PAGE_SIZE aligned
167 * @end: end of any range restriction inside GTT (U64_MAX for all),
168 * must be #I915_GTT_PAGE_SIZE aligned if not U64_MAX
169 * @flags: control search and eviction behaviour
170 *
171 * i915_gem_gtt_insert() first searches for an available hole into which
172 * is can insert the node. The hole address is aligned to @alignment and
173 * its @size must then fit entirely within the [@start, @end] bounds. The
174 * nodes on either side of the hole must match @color, or else a guard page
175 * will be inserted between the two nodes (or the node evicted). If no
176 * suitable hole is found, first a victim is randomly selected and tested
177 * for eviction, otherwise then the LRU list of objects within the GTT
178 * is scanned to find the first set of replacement nodes to create the hole.
179 * Those old overlapping nodes are evicted from the GTT (and so must be
180 * rebound before any future use). Any node that is currently pinned cannot
181 * be evicted (see i915_vma_pin()). Similar if the node's VMA is currently
182 * active and #PIN_NONBLOCK is specified, that node is also skipped when
183 * searching for an eviction candidate. See i915_gem_evict_something() for
184 * the gory details on the eviction algorithm.
185 *
186 * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
187 * asked to wait for eviction and interrupted.
188 */
189int i915_gem_gtt_insert(struct i915_address_space *vm,
190 struct i915_gem_ww_ctx *ww,
191 struct drm_mm_node *node,
192 u64 size, u64 alignment, unsigned long color,
193 u64 start, u64 end, unsigned int flags)
194{
195 enum drm_mm_insert_mode mode;
196 u64 offset;
197 int err;
198
199 lockdep_assert_held(&vm->mutex);
200
201 GEM_BUG_ON(!size);
202 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
203 GEM_BUG_ON(alignment && !is_power_of_2(alignment));
204 GEM_BUG_ON(alignment && !IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
205 GEM_BUG_ON(start >= end);
206 GEM_BUG_ON(start > 0 && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
207 GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
208 GEM_BUG_ON(vm == &to_gt(vm->i915)->ggtt->alias->vm);
209 GEM_BUG_ON(drm_mm_node_allocated(node));
210
211 if (unlikely(range_overflows(start, size, end)))
212 return -ENOSPC;
213
214 if (unlikely(round_up(start, alignment) > round_down(end - size, alignment)))
215 return -ENOSPC;
216
217 mode = DRM_MM_INSERT_BEST;
218 if (flags & PIN_HIGH)
219 mode = DRM_MM_INSERT_HIGHEST;
220 if (flags & PIN_MAPPABLE)
221 mode = DRM_MM_INSERT_LOW;
222
223 /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
224 * so we know that we always have a minimum alignment of 4096.
225 * The drm_mm range manager is optimised to return results
226 * with zero alignment, so where possible use the optimal
227 * path.
228 */
229 BUILD_BUG_ON(I915_GTT_MIN_ALIGNMENT > I915_GTT_PAGE_SIZE);
230 if (alignment <= I915_GTT_MIN_ALIGNMENT)
231 alignment = 0;
232
233 err = drm_mm_insert_node_in_range(&vm->mm, node,
234 size, alignment, color,
235 start, end, mode);
236 if (err != -ENOSPC)
237 return err;
238
239 if (mode & DRM_MM_INSERT_ONCE) {
240 err = drm_mm_insert_node_in_range(&vm->mm, node,
241 size, alignment, color,
242 start, end,
243 DRM_MM_INSERT_BEST);
244 if (err != -ENOSPC)
245 return err;
246 }
247
248 if (flags & PIN_NOEVICT)
249 return -ENOSPC;
250
251 /*
252 * No free space, pick a slot at random.
253 *
254 * There is a pathological case here using a GTT shared between
255 * mmap and GPU (i.e. ggtt/aliasing_ppgtt but not full-ppgtt):
256 *
257 * |<-- 256 MiB aperture -->||<-- 1792 MiB unmappable -->|
258 * (64k objects) (448k objects)
259 *
260 * Now imagine that the eviction LRU is ordered top-down (just because
261 * pathology meets real life), and that we need to evict an object to
262 * make room inside the aperture. The eviction scan then has to walk
263 * the 448k list before it finds one within range. And now imagine that
264 * it has to search for a new hole between every byte inside the memcpy,
265 * for several simultaneous clients.
266 *
267 * On a full-ppgtt system, if we have run out of available space, there
268 * will be lots and lots of objects in the eviction list! Again,
269 * searching that LRU list may be slow if we are also applying any
270 * range restrictions (e.g. restriction to low 4GiB) and so, for
271 * simplicity and similarilty between different GTT, try the single
272 * random replacement first.
273 */
274 offset = random_offset(start, end,
275 size, alignment ?: I915_GTT_MIN_ALIGNMENT);
276 err = i915_gem_gtt_reserve(vm, ww, node, size, offset, color, flags);
277 if (err != -ENOSPC)
278 return err;
279
280 if (flags & PIN_NOSEARCH)
281 return -ENOSPC;
282
283 /* Randomly selected placement is pinned, do a search */
284 err = i915_gem_evict_something(vm, ww, size, alignment, color,
285 start, end, flags);
286 if (err)
287 return err;
288
289 return drm_mm_insert_node_in_range(&vm->mm, node,
290 size, alignment, color,
291 start, end, DRM_MM_INSERT_EVICT);
292}
293
294#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
295#include "selftests/i915_gem_gtt.c"
296#endif
1/*
2 * Copyright © 2010 Daniel Vetter
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25#include "drmP.h"
26#include "drm.h"
27#include "i915_drm.h"
28#include "i915_drv.h"
29#include "i915_trace.h"
30#include "intel_drv.h"
31
32/* XXX kill agp_type! */
33static unsigned int cache_level_to_agp_type(struct drm_device *dev,
34 enum i915_cache_level cache_level)
35{
36 switch (cache_level) {
37 case I915_CACHE_LLC_MLC:
38 if (INTEL_INFO(dev)->gen >= 6)
39 return AGP_USER_CACHED_MEMORY_LLC_MLC;
40 /* Older chipsets do not have this extra level of CPU
41 * cacheing, so fallthrough and request the PTE simply
42 * as cached.
43 */
44 case I915_CACHE_LLC:
45 return AGP_USER_CACHED_MEMORY;
46 default:
47 case I915_CACHE_NONE:
48 return AGP_USER_MEMORY;
49 }
50}
51
52void i915_gem_restore_gtt_mappings(struct drm_device *dev)
53{
54 struct drm_i915_private *dev_priv = dev->dev_private;
55 struct drm_i915_gem_object *obj;
56
57 /* First fill our portion of the GTT with scratch pages */
58 intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE,
59 (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
60
61 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
62 i915_gem_clflush_object(obj);
63 i915_gem_gtt_rebind_object(obj, obj->cache_level);
64 }
65
66 intel_gtt_chipset_flush();
67}
68
69int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj)
70{
71 struct drm_device *dev = obj->base.dev;
72 struct drm_i915_private *dev_priv = dev->dev_private;
73 unsigned int agp_type = cache_level_to_agp_type(dev, obj->cache_level);
74 int ret;
75
76 if (dev_priv->mm.gtt->needs_dmar) {
77 ret = intel_gtt_map_memory(obj->pages,
78 obj->base.size >> PAGE_SHIFT,
79 &obj->sg_list,
80 &obj->num_sg);
81 if (ret != 0)
82 return ret;
83
84 intel_gtt_insert_sg_entries(obj->sg_list,
85 obj->num_sg,
86 obj->gtt_space->start >> PAGE_SHIFT,
87 agp_type);
88 } else
89 intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
90 obj->base.size >> PAGE_SHIFT,
91 obj->pages,
92 agp_type);
93
94 return 0;
95}
96
97void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj,
98 enum i915_cache_level cache_level)
99{
100 struct drm_device *dev = obj->base.dev;
101 struct drm_i915_private *dev_priv = dev->dev_private;
102 unsigned int agp_type = cache_level_to_agp_type(dev, cache_level);
103
104 if (dev_priv->mm.gtt->needs_dmar) {
105 BUG_ON(!obj->sg_list);
106
107 intel_gtt_insert_sg_entries(obj->sg_list,
108 obj->num_sg,
109 obj->gtt_space->start >> PAGE_SHIFT,
110 agp_type);
111 } else
112 intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
113 obj->base.size >> PAGE_SHIFT,
114 obj->pages,
115 agp_type);
116}
117
118void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
119{
120 intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
121 obj->base.size >> PAGE_SHIFT);
122
123 if (obj->sg_list) {
124 intel_gtt_unmap_memory(obj->sg_list, obj->num_sg);
125 obj->sg_list = NULL;
126 }
127}