Linux Audio

Check our new training course

Loading...
v3.5.6
 
  1/*
  2 * Copyright © 2010 Daniel Vetter
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice (including the next
 12 * paragraph) shall be included in all copies or substantial portions of the
 13 * Software.
 14 *
 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 21 * IN THE SOFTWARE.
 22 *
 23 */
 24
 25#include "drmP.h"
 26#include "drm.h"
 27#include "i915_drm.h"
 28#include "i915_drv.h"
 29#include "i915_trace.h"
 30#include "intel_drv.h"
 31
 32/* PPGTT support for Sandybdrige/Gen6 and later */
 33static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
 34				   unsigned first_entry,
 35				   unsigned num_entries)
 36{
 37	uint32_t *pt_vaddr;
 38	uint32_t scratch_pte;
 39	unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
 40	unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
 41	unsigned last_pte, i;
 42
 43	scratch_pte = GEN6_PTE_ADDR_ENCODE(ppgtt->scratch_page_dma_addr);
 44	scratch_pte |= GEN6_PTE_VALID | GEN6_PTE_CACHE_LLC;
 45
 46	while (num_entries) {
 47		last_pte = first_pte + num_entries;
 48		if (last_pte > I915_PPGTT_PT_ENTRIES)
 49			last_pte = I915_PPGTT_PT_ENTRIES;
 50
 51		pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
 52
 53		for (i = first_pte; i < last_pte; i++)
 54			pt_vaddr[i] = scratch_pte;
 55
 56		kunmap_atomic(pt_vaddr);
 57
 58		num_entries -= last_pte - first_pte;
 59		first_pte = 0;
 60		act_pd++;
 61	}
 62}
 63
 64int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
 65{
 66	struct drm_i915_private *dev_priv = dev->dev_private;
 67	struct i915_hw_ppgtt *ppgtt;
 68	unsigned first_pd_entry_in_global_pt;
 69	int i;
 70	int ret = -ENOMEM;
 71
 72	/* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
 73	 * entries. For aliasing ppgtt support we just steal them at the end for
 74	 * now. */
 75	first_pd_entry_in_global_pt = 512*1024 - I915_PPGTT_PD_ENTRIES;
 76
 77	ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
 78	if (!ppgtt)
 79		return ret;
 80
 81	ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
 82	ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
 83				  GFP_KERNEL);
 84	if (!ppgtt->pt_pages)
 85		goto err_ppgtt;
 86
 87	for (i = 0; i < ppgtt->num_pd_entries; i++) {
 88		ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
 89		if (!ppgtt->pt_pages[i])
 90			goto err_pt_alloc;
 91	}
 92
 93	if (dev_priv->mm.gtt->needs_dmar) {
 94		ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t)
 95						*ppgtt->num_pd_entries,
 96					     GFP_KERNEL);
 97		if (!ppgtt->pt_dma_addr)
 98			goto err_pt_alloc;
 99
100		for (i = 0; i < ppgtt->num_pd_entries; i++) {
101			dma_addr_t pt_addr;
102
103			pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i],
104					       0, 4096,
105					       PCI_DMA_BIDIRECTIONAL);
106
107			if (pci_dma_mapping_error(dev->pdev,
108						  pt_addr)) {
109				ret = -EIO;
110				goto err_pd_pin;
111
112			}
113			ppgtt->pt_dma_addr[i] = pt_addr;
114		}
115	}
116
117	ppgtt->scratch_page_dma_addr = dev_priv->mm.gtt->scratch_page_dma;
118
119	i915_ppgtt_clear_range(ppgtt, 0,
120			       ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
121
122	ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(uint32_t);
 
 
 
 
 
 
 
 
 
 
 
123
124	dev_priv->mm.aliasing_ppgtt = ppgtt;
125
126	return 0;
127
128err_pd_pin:
129	if (ppgtt->pt_dma_addr) {
130		for (i--; i >= 0; i--)
131			pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
132				       4096, PCI_DMA_BIDIRECTIONAL);
133	}
134err_pt_alloc:
135	kfree(ppgtt->pt_dma_addr);
136	for (i = 0; i < ppgtt->num_pd_entries; i++) {
137		if (ppgtt->pt_pages[i])
138			__free_page(ppgtt->pt_pages[i]);
139	}
140	kfree(ppgtt->pt_pages);
141err_ppgtt:
142	kfree(ppgtt);
143
144	return ret;
145}
146
147void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
148{
149	struct drm_i915_private *dev_priv = dev->dev_private;
150	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
151	int i;
152
153	if (!ppgtt)
154		return;
155
156	if (ppgtt->pt_dma_addr) {
157		for (i = 0; i < ppgtt->num_pd_entries; i++)
158			pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
159				       4096, PCI_DMA_BIDIRECTIONAL);
160	}
161
162	kfree(ppgtt->pt_dma_addr);
163	for (i = 0; i < ppgtt->num_pd_entries; i++)
164		__free_page(ppgtt->pt_pages[i]);
165	kfree(ppgtt->pt_pages);
166	kfree(ppgtt);
167}
168
169static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
170					 struct scatterlist *sg_list,
171					 unsigned sg_len,
172					 unsigned first_entry,
173					 uint32_t pte_flags)
174{
175	uint32_t *pt_vaddr, pte;
176	unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
177	unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
178	unsigned i, j, m, segment_len;
179	dma_addr_t page_addr;
180	struct scatterlist *sg;
181
182	/* init sg walking */
183	sg = sg_list;
184	i = 0;
185	segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
186	m = 0;
187
188	while (i < sg_len) {
189		pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
190
191		for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) {
192			page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
193			pte = GEN6_PTE_ADDR_ENCODE(page_addr);
194			pt_vaddr[j] = pte | pte_flags;
195
196			/* grab the next page */
197			m++;
198			if (m == segment_len) {
199				sg = sg_next(sg);
200				i++;
201				if (i == sg_len)
202					break;
203
204				segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
205				m = 0;
206			}
207		}
208
209		kunmap_atomic(pt_vaddr);
210
211		first_pte = 0;
212		act_pd++;
213	}
214}
215
216static void i915_ppgtt_insert_pages(struct i915_hw_ppgtt *ppgtt,
217				    unsigned first_entry, unsigned num_entries,
218				    struct page **pages, uint32_t pte_flags)
219{
220	uint32_t *pt_vaddr, pte;
221	unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
222	unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
223	unsigned last_pte, i;
224	dma_addr_t page_addr;
225
226	while (num_entries) {
227		last_pte = first_pte + num_entries;
228		last_pte = min_t(unsigned, last_pte, I915_PPGTT_PT_ENTRIES);
229
230		pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
231
232		for (i = first_pte; i < last_pte; i++) {
233			page_addr = page_to_phys(*pages);
234			pte = GEN6_PTE_ADDR_ENCODE(page_addr);
235			pt_vaddr[i] = pte | pte_flags;
236
237			pages++;
238		}
239
240		kunmap_atomic(pt_vaddr);
241
242		num_entries -= last_pte - first_pte;
243		first_pte = 0;
244		act_pd++;
245	}
246}
247
248void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
249			    struct drm_i915_gem_object *obj,
250			    enum i915_cache_level cache_level)
251{
252	struct drm_device *dev = obj->base.dev;
253	struct drm_i915_private *dev_priv = dev->dev_private;
254	uint32_t pte_flags = GEN6_PTE_VALID;
255
256	switch (cache_level) {
257	case I915_CACHE_LLC_MLC:
258		pte_flags |= GEN6_PTE_CACHE_LLC_MLC;
259		break;
260	case I915_CACHE_LLC:
261		pte_flags |= GEN6_PTE_CACHE_LLC;
262		break;
263	case I915_CACHE_NONE:
264		pte_flags |= GEN6_PTE_UNCACHED;
265		break;
266	default:
267		BUG();
268	}
269
270	if (obj->sg_table) {
271		i915_ppgtt_insert_sg_entries(ppgtt,
272					     obj->sg_table->sgl,
273					     obj->sg_table->nents,
274					     obj->gtt_space->start >> PAGE_SHIFT,
275					     pte_flags);
276	} else if (dev_priv->mm.gtt->needs_dmar) {
277		BUG_ON(!obj->sg_list);
278
279		i915_ppgtt_insert_sg_entries(ppgtt,
280					     obj->sg_list,
281					     obj->num_sg,
282					     obj->gtt_space->start >> PAGE_SHIFT,
283					     pte_flags);
284	} else
285		i915_ppgtt_insert_pages(ppgtt,
286					obj->gtt_space->start >> PAGE_SHIFT,
287					obj->base.size >> PAGE_SHIFT,
288					obj->pages,
289					pte_flags);
290}
291
292void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
293			      struct drm_i915_gem_object *obj)
294{
295	i915_ppgtt_clear_range(ppgtt,
296			       obj->gtt_space->start >> PAGE_SHIFT,
297			       obj->base.size >> PAGE_SHIFT);
298}
299
300/* XXX kill agp_type! */
301static unsigned int cache_level_to_agp_type(struct drm_device *dev,
302					    enum i915_cache_level cache_level)
303{
304	switch (cache_level) {
305	case I915_CACHE_LLC_MLC:
306		if (INTEL_INFO(dev)->gen >= 6)
307			return AGP_USER_CACHED_MEMORY_LLC_MLC;
308		/* Older chipsets do not have this extra level of CPU
309		 * cacheing, so fallthrough and request the PTE simply
310		 * as cached.
 
 
 
 
 
 
 
 
311		 */
312	case I915_CACHE_LLC:
313		return AGP_USER_CACHED_MEMORY;
314	default:
315	case I915_CACHE_NONE:
316		return AGP_USER_MEMORY;
317	}
318}
319
320static bool do_idling(struct drm_i915_private *dev_priv)
321{
322	bool ret = dev_priv->mm.interruptible;
323
324	if (unlikely(dev_priv->mm.gtt->do_idle_maps)) {
325		dev_priv->mm.interruptible = false;
326		if (i915_gpu_idle(dev_priv->dev)) {
327			DRM_ERROR("Couldn't idle GPU\n");
328			/* Wait a bit, in hopes it avoids the hang */
329			udelay(10);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
330		}
 
 
331	}
332
333	return ret;
334}
335
336static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
337{
338	if (unlikely(dev_priv->mm.gtt->do_idle_maps))
339		dev_priv->mm.interruptible = interruptible;
340}
341
342void i915_gem_restore_gtt_mappings(struct drm_device *dev)
343{
344	struct drm_i915_private *dev_priv = dev->dev_private;
345	struct drm_i915_gem_object *obj;
346
347	/* First fill our portion of the GTT with scratch pages */
348	intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE,
349			      (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
350
351	list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
352		i915_gem_clflush_object(obj);
353		i915_gem_gtt_bind_object(obj, obj->cache_level);
354	}
355
356	intel_gtt_chipset_flush();
357}
358
359int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
360{
361	struct drm_device *dev = obj->base.dev;
362	struct drm_i915_private *dev_priv = dev->dev_private;
363
364	if (dev_priv->mm.gtt->needs_dmar)
365		return intel_gtt_map_memory(obj->pages,
366					    obj->base.size >> PAGE_SHIFT,
367					    &obj->sg_list,
368					    &obj->num_sg);
369	else
370		return 0;
371}
372
373void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
374			      enum i915_cache_level cache_level)
375{
376	struct drm_device *dev = obj->base.dev;
377	struct drm_i915_private *dev_priv = dev->dev_private;
378	unsigned int agp_type = cache_level_to_agp_type(dev, cache_level);
379
380	if (obj->sg_table) {
381		intel_gtt_insert_sg_entries(obj->sg_table->sgl,
382					    obj->sg_table->nents,
383					    obj->gtt_space->start >> PAGE_SHIFT,
384					    agp_type);
385	} else if (dev_priv->mm.gtt->needs_dmar) {
386		BUG_ON(!obj->sg_list);
387
388		intel_gtt_insert_sg_entries(obj->sg_list,
389					    obj->num_sg,
390					    obj->gtt_space->start >> PAGE_SHIFT,
391					    agp_type);
392	} else
393		intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
394				       obj->base.size >> PAGE_SHIFT,
395				       obj->pages,
396				       agp_type);
397
398	obj->has_global_gtt_mapping = 1;
399}
400
401void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
402{
403	intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
404			      obj->base.size >> PAGE_SHIFT);
405
406	obj->has_global_gtt_mapping = 0;
407}
408
409void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
410{
411	struct drm_device *dev = obj->base.dev;
412	struct drm_i915_private *dev_priv = dev->dev_private;
413	bool interruptible;
414
415	interruptible = do_idling(dev_priv);
416
417	if (obj->sg_list) {
418		intel_gtt_unmap_memory(obj->sg_list, obj->num_sg);
419		obj->sg_list = NULL;
420	}
421
422	undo_idling(dev_priv, interruptible);
423}
424
425void i915_gem_init_global_gtt(struct drm_device *dev,
426			      unsigned long start,
427			      unsigned long mappable_end,
428			      unsigned long end)
429{
430	drm_i915_private_t *dev_priv = dev->dev_private;
431
432	/* Substract the guard page ... */
433	drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE);
434
435	dev_priv->mm.gtt_start = start;
436	dev_priv->mm.gtt_mappable_end = mappable_end;
437	dev_priv->mm.gtt_end = end;
438	dev_priv->mm.gtt_total = end - start;
439	dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
440
441	/* ... but ensure that we clear the entire range. */
442	intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
443}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
v6.2
  1// SPDX-License-Identifier: MIT
  2/*
  3 * Copyright © 2010 Daniel Vetter
  4 * Copyright © 2020 Intel Corporation
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  5 */
  6
  7#include <linux/slab.h> /* fault-inject.h is not standalone! */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  8
  9#include <linux/fault-inject.h>
 10#include <linux/log2.h>
 11#include <linux/random.h>
 12#include <linux/seq_file.h>
 13#include <linux/stop_machine.h>
 14
 15#include <asm/set_memory.h>
 16#include <asm/smp.h>
 17
 18#include "display/intel_frontbuffer.h"
 19#include "gt/intel_gt.h"
 20#include "gt/intel_gt_requests.h"
 21
 22#include "i915_drv.h"
 23#include "i915_gem_evict.h"
 24#include "i915_scatterlist.h"
 25#include "i915_trace.h"
 26#include "i915_vgpu.h"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 27
 28int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
 29			       struct sg_table *pages)
 
 30{
 31	do {
 32		if (dma_map_sg_attrs(obj->base.dev->dev,
 33				     pages->sgl, pages->nents,
 34				     DMA_BIDIRECTIONAL,
 35				     DMA_ATTR_SKIP_CPU_SYNC |
 36				     DMA_ATTR_NO_KERNEL_MAPPING |
 37				     DMA_ATTR_NO_WARN))
 38			return 0;
 39
 40		/*
 41		 * If the DMA remap fails, one cause can be that we have
 42		 * too many objects pinned in a small remapping table,
 43		 * such as swiotlb. Incrementally purge all other objects and
 44		 * try again - if there are no more pages to remove from
 45		 * the DMA remapper, i915_gem_shrink will return 0.
 46		 */
 47		GEM_BUG_ON(obj->mm.pages == pages);
 48	} while (i915_gem_shrink(NULL, to_i915(obj->base.dev),
 49				 obj->base.size >> PAGE_SHIFT, NULL,
 50				 I915_SHRINK_BOUND |
 51				 I915_SHRINK_UNBOUND));
 52
 53	return -ENOSPC;
 54}
 55
 56void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
 57			       struct sg_table *pages)
 58{
 59	struct drm_i915_private *i915 = to_i915(obj->base.dev);
 60	struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
 61
 62	/* XXX This does not prevent more requests being submitted! */
 63	if (unlikely(ggtt->do_idle_maps))
 64		/* Wait a bit, in the hope it avoids the hang */
 65		usleep_range(100, 250);
 66
 67	dma_unmap_sg(i915->drm.dev, pages->sgl, pages->nents,
 68		     DMA_BIDIRECTIONAL);
 69}
 70
 71/**
 72 * i915_gem_gtt_reserve - reserve a node in an address_space (GTT)
 73 * @vm: the &struct i915_address_space
 74 * @ww: An optional struct i915_gem_ww_ctx.
 75 * @node: the &struct drm_mm_node (typically i915_vma.mode)
 76 * @size: how much space to allocate inside the GTT,
 77 *        must be #I915_GTT_PAGE_SIZE aligned
 78 * @offset: where to insert inside the GTT,
 79 *          must be #I915_GTT_MIN_ALIGNMENT aligned, and the node
 80 *          (@offset + @size) must fit within the address space
 81 * @color: color to apply to node, if this node is not from a VMA,
 82 *         color must be #I915_COLOR_UNEVICTABLE
 83 * @flags: control search and eviction behaviour
 84 *
 85 * i915_gem_gtt_reserve() tries to insert the @node at the exact @offset inside
 86 * the address space (using @size and @color). If the @node does not fit, it
 87 * tries to evict any overlapping nodes from the GTT, including any
 88 * neighbouring nodes if the colors do not match (to ensure guard pages between
 89 * differing domains). See i915_gem_evict_for_node() for the gory details
 90 * on the eviction algorithm. #PIN_NONBLOCK may used to prevent waiting on
 91 * evicting active overlapping objects, and any overlapping node that is pinned
 92 * or marked as unevictable will also result in failure.
 93 *
 94 * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
 95 * asked to wait for eviction and interrupted.
 96 */
 97int i915_gem_gtt_reserve(struct i915_address_space *vm,
 98			 struct i915_gem_ww_ctx *ww,
 99			 struct drm_mm_node *node,
100			 u64 size, u64 offset, unsigned long color,
101			 unsigned int flags)
102{
103	int err;
104
105	GEM_BUG_ON(!size);
106	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
107	GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT));
108	GEM_BUG_ON(range_overflows(offset, size, vm->total));
109	GEM_BUG_ON(vm == &to_gt(vm->i915)->ggtt->alias->vm);
110	GEM_BUG_ON(drm_mm_node_allocated(node));
111
112	node->size = size;
113	node->start = offset;
114	node->color = color;
115
116	err = drm_mm_reserve_node(&vm->mm, node);
117	if (err != -ENOSPC)
118		return err;
119
120	if (flags & PIN_NOEVICT)
121		return -ENOSPC;
122
123	err = i915_gem_evict_for_node(vm, ww, node, flags);
124	if (err == 0)
125		err = drm_mm_reserve_node(&vm->mm, node);
126
127	return err;
128}
129
130static u64 random_offset(u64 start, u64 end, u64 len, u64 align)
131{
132	u64 range, addr;
133
134	GEM_BUG_ON(range_overflows(start, len, end));
135	GEM_BUG_ON(round_up(start, align) > round_down(end - len, align));
136
137	range = round_down(end - len, align) - round_up(start, align);
138	if (range) {
139		if (sizeof(unsigned long) == sizeof(u64)) {
140			addr = get_random_u64();
141		} else {
142			addr = get_random_u32();
143			if (range > U32_MAX) {
144				addr <<= 32;
145				addr |= get_random_u32();
146			}
147		}
148		div64_u64_rem(addr, range, &addr);
149		start += addr;
150	}
151
152	return round_up(start, align);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
153}
154
155/**
156 * i915_gem_gtt_insert - insert a node into an address_space (GTT)
157 * @vm: the &struct i915_address_space
158 * @ww: An optional struct i915_gem_ww_ctx.
159 * @node: the &struct drm_mm_node (typically i915_vma.node)
160 * @size: how much space to allocate inside the GTT,
161 *        must be #I915_GTT_PAGE_SIZE aligned
162 * @alignment: required alignment of starting offset, may be 0 but
163 *             if specified, this must be a power-of-two and at least
164 *             #I915_GTT_MIN_ALIGNMENT
165 * @color: color to apply to node
166 * @start: start of any range restriction inside GTT (0 for all),
167 *         must be #I915_GTT_PAGE_SIZE aligned
168 * @end: end of any range restriction inside GTT (U64_MAX for all),
169 *       must be #I915_GTT_PAGE_SIZE aligned if not U64_MAX
170 * @flags: control search and eviction behaviour
171 *
172 * i915_gem_gtt_insert() first searches for an available hole into which
173 * is can insert the node. The hole address is aligned to @alignment and
174 * its @size must then fit entirely within the [@start, @end] bounds. The
175 * nodes on either side of the hole must match @color, or else a guard page
176 * will be inserted between the two nodes (or the node evicted). If no
177 * suitable hole is found, first a victim is randomly selected and tested
178 * for eviction, otherwise then the LRU list of objects within the GTT
179 * is scanned to find the first set of replacement nodes to create the hole.
180 * Those old overlapping nodes are evicted from the GTT (and so must be
181 * rebound before any future use). Any node that is currently pinned cannot
182 * be evicted (see i915_vma_pin()). Similar if the node's VMA is currently
183 * active and #PIN_NONBLOCK is specified, that node is also skipped when
184 * searching for an eviction candidate. See i915_gem_evict_something() for
185 * the gory details on the eviction algorithm.
186 *
187 * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
188 * asked to wait for eviction and interrupted.
189 */
190int i915_gem_gtt_insert(struct i915_address_space *vm,
191			struct i915_gem_ww_ctx *ww,
192			struct drm_mm_node *node,
193			u64 size, u64 alignment, unsigned long color,
194			u64 start, u64 end, unsigned int flags)
195{
196	enum drm_mm_insert_mode mode;
197	u64 offset;
198	int err;
199
200	lockdep_assert_held(&vm->mutex);
201
202	GEM_BUG_ON(!size);
203	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
204	GEM_BUG_ON(alignment && !is_power_of_2(alignment));
205	GEM_BUG_ON(alignment && !IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
206	GEM_BUG_ON(start >= end);
207	GEM_BUG_ON(start > 0  && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
208	GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
209	GEM_BUG_ON(vm == &to_gt(vm->i915)->ggtt->alias->vm);
210	GEM_BUG_ON(drm_mm_node_allocated(node));
211
212	if (unlikely(range_overflows(start, size, end)))
213		return -ENOSPC;
214
215	if (unlikely(round_up(start, alignment) > round_down(end - size, alignment)))
216		return -ENOSPC;
217
218	mode = DRM_MM_INSERT_BEST;
219	if (flags & PIN_HIGH)
220		mode = DRM_MM_INSERT_HIGHEST;
221	if (flags & PIN_MAPPABLE)
222		mode = DRM_MM_INSERT_LOW;
223
224	/* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
225	 * so we know that we always have a minimum alignment of 4096.
226	 * The drm_mm range manager is optimised to return results
227	 * with zero alignment, so where possible use the optimal
228	 * path.
229	 */
230	BUILD_BUG_ON(I915_GTT_MIN_ALIGNMENT > I915_GTT_PAGE_SIZE);
231	if (alignment <= I915_GTT_MIN_ALIGNMENT)
232		alignment = 0;
233
234	err = drm_mm_insert_node_in_range(&vm->mm, node,
235					  size, alignment, color,
236					  start, end, mode);
237	if (err != -ENOSPC)
238		return err;
239
240	if (mode & DRM_MM_INSERT_ONCE) {
241		err = drm_mm_insert_node_in_range(&vm->mm, node,
242						  size, alignment, color,
243						  start, end,
244						  DRM_MM_INSERT_BEST);
245		if (err != -ENOSPC)
246			return err;
247	}
248
249	if (flags & PIN_NOEVICT)
250		return -ENOSPC;
251
252	/*
253	 * No free space, pick a slot at random.
254	 *
255	 * There is a pathological case here using a GTT shared between
256	 * mmap and GPU (i.e. ggtt/aliasing_ppgtt but not full-ppgtt):
257	 *
258	 *    |<-- 256 MiB aperture -->||<-- 1792 MiB unmappable -->|
259	 *         (64k objects)             (448k objects)
260	 *
261	 * Now imagine that the eviction LRU is ordered top-down (just because
262	 * pathology meets real life), and that we need to evict an object to
263	 * make room inside the aperture. The eviction scan then has to walk
264	 * the 448k list before it finds one within range. And now imagine that
265	 * it has to search for a new hole between every byte inside the memcpy,
266	 * for several simultaneous clients.
267	 *
268	 * On a full-ppgtt system, if we have run out of available space, there
269	 * will be lots and lots of objects in the eviction list! Again,
270	 * searching that LRU list may be slow if we are also applying any
271	 * range restrictions (e.g. restriction to low 4GiB) and so, for
272	 * simplicity and similarilty between different GTT, try the single
273	 * random replacement first.
274	 */
275	offset = random_offset(start, end,
276			       size, alignment ?: I915_GTT_MIN_ALIGNMENT);
277	err = i915_gem_gtt_reserve(vm, ww, node, size, offset, color, flags);
278	if (err != -ENOSPC)
279		return err;
280
281	if (flags & PIN_NOSEARCH)
282		return -ENOSPC;
283
284	/* Randomly selected placement is pinned, do a search */
285	err = i915_gem_evict_something(vm, ww, size, alignment, color,
286				       start, end, flags);
287	if (err)
288		return err;
289
290	return drm_mm_insert_node_in_range(&vm->mm, node,
291					   size, alignment, color,
292					   start, end, DRM_MM_INSERT_EVICT);
293}
294
295#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
296#include "selftests/i915_gem_gtt.c"
297#endif