Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Feb 10-13, 2025
Register
Loading...
Note: File does not exist in v3.1.
  1// SPDX-License-Identifier: MIT
  2/*
  3 * Copyright © 2021 Intel Corporation
  4 */
  5#include <drm/ttm/ttm_bo_driver.h>
  6#include <drm/ttm/ttm_device.h>
  7#include <drm/ttm/ttm_range_manager.h>
  8
  9#include "i915_drv.h"
 10#include "i915_scatterlist.h"
 11#include "i915_ttm_buddy_manager.h"
 12
 13#include "intel_region_ttm.h"
 14
 15#include "gem/i915_gem_region.h"
 16#include "gem/i915_gem_ttm.h" /* For the funcs/ops export only */
 17/**
 18 * DOC: TTM support structure
 19 *
 20 * The code in this file deals with setting up memory managers for TTM
 21 * LMEM and MOCK regions and converting the output from
 22 * the managers to struct sg_table, Basically providing the mapping from
 23 * i915 GEM regions to TTM memory types and resource managers.
 24 */
 25
 26/**
 27 * intel_region_ttm_device_init - Initialize a TTM device
 28 * @dev_priv: Pointer to an i915 device private structure.
 29 *
 30 * Return: 0 on success, negative error code on failure.
 31 */
 32int intel_region_ttm_device_init(struct drm_i915_private *dev_priv)
 33{
 34	struct drm_device *drm = &dev_priv->drm;
 35
 36	return ttm_device_init(&dev_priv->bdev, i915_ttm_driver(),
 37			       drm->dev, drm->anon_inode->i_mapping,
 38			       drm->vma_offset_manager, false, false);
 39}
 40
 41/**
 42 * intel_region_ttm_device_fini - Finalize a TTM device
 43 * @dev_priv: Pointer to an i915 device private structure.
 44 */
 45void intel_region_ttm_device_fini(struct drm_i915_private *dev_priv)
 46{
 47	ttm_device_fini(&dev_priv->bdev);
 48}
 49
 50/*
 51 * Map the i915 memory regions to TTM memory types. We use the
 52 * driver-private types for now, reserving TTM_PL_VRAM for stolen
 53 * memory and TTM_PL_TT for GGTT use if decided to implement this.
 54 */
 55int intel_region_to_ttm_type(const struct intel_memory_region *mem)
 56{
 57	int type;
 58
 59	GEM_BUG_ON(mem->type != INTEL_MEMORY_LOCAL &&
 60		   mem->type != INTEL_MEMORY_MOCK &&
 61		   mem->type != INTEL_MEMORY_SYSTEM);
 62
 63	if (mem->type == INTEL_MEMORY_SYSTEM)
 64		return TTM_PL_SYSTEM;
 65
 66	type = mem->instance + TTM_PL_PRIV;
 67	GEM_BUG_ON(type >= TTM_NUM_MEM_TYPES);
 68
 69	return type;
 70}
 71
 72/**
 73 * intel_region_ttm_init - Initialize a memory region for TTM.
 74 * @mem: The region to initialize.
 75 *
 76 * This function initializes a suitable TTM resource manager for the
 77 * region, and if it's a LMEM region type, attaches it to the TTM
 78 * device. MOCK regions are NOT attached to the TTM device, since we don't
 79 * have one for the mock selftests.
 80 *
 81 * Return: 0 on success, negative error code on failure.
 82 */
 83int intel_region_ttm_init(struct intel_memory_region *mem)
 84{
 85	struct ttm_device *bdev = &mem->i915->bdev;
 86	int mem_type = intel_region_to_ttm_type(mem);
 87	int ret;
 88
 89	ret = i915_ttm_buddy_man_init(bdev, mem_type, false,
 90				      resource_size(&mem->region),
 91				      mem->io_size,
 92				      mem->min_page_size, PAGE_SIZE);
 93	if (ret)
 94		return ret;
 95
 96	mem->region_private = ttm_manager_type(bdev, mem_type);
 97
 98	return 0;
 99}
100
101/**
102 * intel_region_ttm_fini - Finalize a TTM region.
103 * @mem: The memory region
104 *
105 * This functions takes down the TTM resource manager associated with the
106 * memory region, and if it was registered with the TTM device,
107 * removes that registration.
108 */
109int intel_region_ttm_fini(struct intel_memory_region *mem)
110{
111	struct ttm_resource_manager *man = mem->region_private;
112	int ret = -EBUSY;
113	int count;
114
115	/*
116	 * Put the region's move fences. This releases requests that
117	 * may hold on to contexts and vms that may hold on to buffer
118	 * objects placed in this region.
119	 */
120	if (man)
121		ttm_resource_manager_cleanup(man);
122
123	/* Flush objects from region. */
124	for (count = 0; count < 10; ++count) {
125		i915_gem_flush_free_objects(mem->i915);
126
127		mutex_lock(&mem->objects.lock);
128		if (list_empty(&mem->objects.list))
129			ret = 0;
130		mutex_unlock(&mem->objects.lock);
131		if (!ret)
132			break;
133
134		msleep(20);
135		flush_delayed_work(&mem->i915->bdev.wq);
136	}
137
138	/* If we leaked objects, Don't free the region causing use after free */
139	if (ret || !man)
140		return ret;
141
142	ret = i915_ttm_buddy_man_fini(&mem->i915->bdev,
143				      intel_region_to_ttm_type(mem));
144	GEM_WARN_ON(ret);
145	mem->region_private = NULL;
146
147	return ret;
148}
149
150/**
151 * intel_region_ttm_resource_to_rsgt -
152 * Convert an opaque TTM resource manager resource to a refcounted sg_table.
153 * @mem: The memory region.
154 * @res: The resource manager resource obtained from the TTM resource manager.
155 * @page_alignment: Required page alignment for each sg entry. Power of two.
156 *
157 * The gem backends typically use sg-tables for operations on the underlying
158 * io_memory. So provide a way for the backends to translate the
159 * nodes they are handed from TTM to sg-tables.
160 *
161 * Return: A malloced sg_table on success, an error pointer on failure.
162 */
163struct i915_refct_sgt *
164intel_region_ttm_resource_to_rsgt(struct intel_memory_region *mem,
165				  struct ttm_resource *res,
166				  u32 page_alignment)
167{
168	if (mem->is_range_manager) {
169		struct ttm_range_mgr_node *range_node =
170			to_ttm_range_mgr_node(res);
171
172		return i915_rsgt_from_mm_node(&range_node->mm_nodes[0],
173					      mem->region.start,
174					      page_alignment);
175	} else {
176		return i915_rsgt_from_buddy_resource(res, mem->region.start,
177						     page_alignment);
178	}
179}
180
181#ifdef CONFIG_DRM_I915_SELFTEST
182/**
183 * intel_region_ttm_resource_alloc - Allocate memory resources from a region
184 * @mem: The memory region,
185 * @size: The requested size in bytes
186 * @flags: Allocation flags
187 *
188 * This functionality is provided only for callers that need to allocate
189 * memory from standalone TTM range managers, without the TTM eviction
190 * functionality. Don't use if you are not completely sure that's the
191 * case. The returned opaque node can be converted to an sg_table using
192 * intel_region_ttm_resource_to_st(), and can be freed using
193 * intel_region_ttm_resource_free().
194 *
195 * Return: A valid pointer on success, an error pointer on failure.
196 */
197struct ttm_resource *
198intel_region_ttm_resource_alloc(struct intel_memory_region *mem,
199				resource_size_t offset,
200				resource_size_t size,
201				unsigned int flags)
202{
203	struct ttm_resource_manager *man = mem->region_private;
204	struct ttm_place place = {};
205	struct ttm_buffer_object mock_bo = {};
206	struct ttm_resource *res;
207	int ret;
208
209	if (flags & I915_BO_ALLOC_CONTIGUOUS)
210		place.flags |= TTM_PL_FLAG_CONTIGUOUS;
211	if (offset != I915_BO_INVALID_OFFSET) {
212		place.fpfn = offset >> PAGE_SHIFT;
213		place.lpfn = place.fpfn + (size >> PAGE_SHIFT);
214	} else if (mem->io_size && mem->io_size < mem->total) {
215		if (flags & I915_BO_ALLOC_GPU_ONLY) {
216			place.flags |= TTM_PL_FLAG_TOPDOWN;
217		} else {
218			place.fpfn = 0;
219			place.lpfn = mem->io_size >> PAGE_SHIFT;
220		}
221	}
222
223	mock_bo.base.size = size;
224	mock_bo.bdev = &mem->i915->bdev;
225
226	ret = man->func->alloc(man, &mock_bo, &place, &res);
227	if (ret == -ENOSPC)
228		ret = -ENXIO;
229	if (!ret)
230		res->bo = NULL; /* Rather blow up, then some uaf */
231	return ret ? ERR_PTR(ret) : res;
232}
233
234#endif
235
236/**
237 * intel_region_ttm_resource_free - Free a resource allocated from a resource manager
238 * @mem: The region the resource was allocated from.
239 * @res: The opaque resource representing an allocation.
240 */
241void intel_region_ttm_resource_free(struct intel_memory_region *mem,
242				    struct ttm_resource *res)
243{
244	struct ttm_resource_manager *man = mem->region_private;
245	struct ttm_buffer_object mock_bo = {};
246
247	mock_bo.base.size = res->size;
248	mock_bo.bdev = &mem->i915->bdev;
249	res->bo = &mock_bo;
250
251	man->func->free(man, res);
252}