Linux Audio

Check our new training course

Loading...
v5.14.15
  1// SPDX-License-Identifier: MIT
  2/*
  3 * Copyright © 2021 Intel Corporation
  4 */
  5#include <drm/ttm/ttm_bo_driver.h>
  6#include <drm/ttm/ttm_device.h>
  7#include <drm/ttm/ttm_range_manager.h>
  8
  9#include "i915_drv.h"
 10#include "i915_scatterlist.h"
 
 11
 12#include "intel_region_ttm.h"
 13
 
 
 14/**
 15 * DOC: TTM support structure
 16 *
 17 * The code in this file deals with setting up memory managers for TTM
 18 * LMEM and MOCK regions and converting the output from
 19 * the managers to struct sg_table, Basically providing the mapping from
 20 * i915 GEM regions to TTM memory types and resource managers.
 21 */
 22
 23/* A Zero-initialized driver for now. We don't have a TTM backend yet. */
 24static struct ttm_device_funcs i915_ttm_bo_driver;
 25
 26/**
 27 * intel_region_ttm_device_init - Initialize a TTM device
 28 * @dev_priv: Pointer to an i915 device private structure.
 29 *
 30 * Return: 0 on success, negative error code on failure.
 31 */
 32int intel_region_ttm_device_init(struct drm_i915_private *dev_priv)
 33{
 34	struct drm_device *drm = &dev_priv->drm;
 35
 36	return ttm_device_init(&dev_priv->bdev, &i915_ttm_bo_driver,
 37			       drm->dev, drm->anon_inode->i_mapping,
 38			       drm->vma_offset_manager, false, false);
 39}
 40
 41/**
 42 * intel_region_ttm_device_fini - Finalize a TTM device
 43 * @dev_priv: Pointer to an i915 device private structure.
 44 */
 45void intel_region_ttm_device_fini(struct drm_i915_private *dev_priv)
 46{
 47	ttm_device_fini(&dev_priv->bdev);
 48}
 49
 50/*
 51 * Map the i915 memory regions to TTM memory types. We use the
 52 * driver-private types for now, reserving TTM_PL_VRAM for stolen
 53 * memory and TTM_PL_TT for GGTT use if decided to implement this.
 54 */
 55static int intel_region_to_ttm_type(struct intel_memory_region *mem)
 56{
 57	int type;
 58
 59	GEM_BUG_ON(mem->type != INTEL_MEMORY_LOCAL &&
 60		   mem->type != INTEL_MEMORY_MOCK);
 
 
 
 
 61
 62	type = mem->instance + TTM_PL_PRIV;
 63	GEM_BUG_ON(type >= TTM_NUM_MEM_TYPES);
 64
 65	return type;
 66}
 67
 68static struct ttm_resource *
 69intel_region_ttm_node_reserve(struct intel_memory_region *mem,
 70			      resource_size_t offset,
 71			      resource_size_t size)
 72{
 73	struct ttm_resource_manager *man = mem->region_private;
 74	struct ttm_place place = {};
 75	struct ttm_buffer_object mock_bo = {};
 76	struct ttm_resource *res;
 77	int ret;
 78
 79	/*
 80	 * Having to use a mock_bo is unfortunate but stems from some
 81	 * drivers having private managers that insist to know what the
 82	 * allocate memory is intended for, using it to send private
 83	 * data to the manager. Also recently the bo has been used to send
 84	 * alignment info to the manager. Assume that apart from the latter,
 85	 * none of the managers we use will ever access the buffer object
 86	 * members, hoping we can pass the alignment info in the
 87	 * struct ttm_place in the future.
 88	 */
 89
 90	place.fpfn = offset >> PAGE_SHIFT;
 91	place.lpfn = place.fpfn + (size >> PAGE_SHIFT);
 92	mock_bo.base.size = size;
 93	ret = man->func->alloc(man, &mock_bo, &place, &res);
 94	if (ret == -ENOSPC)
 95		ret = -ENXIO;
 96
 97	return ret ? ERR_PTR(ret) : res;
 98}
 99
100/**
101 * intel_region_ttm_node_free - Free a node allocated from a resource manager
102 * @mem: The region the node was allocated from.
103 * @node: The opaque node representing an allocation.
 
 
 
 
 
 
104 */
105void intel_region_ttm_node_free(struct intel_memory_region *mem,
106				struct ttm_resource *res)
107{
108	struct ttm_resource_manager *man = mem->region_private;
109
110	man->func->free(man, res);
111}
112
113static const struct intel_memory_region_private_ops priv_ops = {
114	.reserve = intel_region_ttm_node_reserve,
115	.free = intel_region_ttm_node_free,
116};
117
118int intel_region_ttm_init(struct intel_memory_region *mem)
119{
120	struct ttm_device *bdev = &mem->i915->bdev;
121	int mem_type = intel_region_to_ttm_type(mem);
122	int ret;
123
124	ret = ttm_range_man_init(bdev, mem_type, false,
125				 resource_size(&mem->region) >> PAGE_SHIFT);
 
 
126	if (ret)
127		return ret;
128
129	mem->chunk_size = PAGE_SIZE;
130	mem->max_order =
131		get_order(rounddown_pow_of_two(resource_size(&mem->region)));
132	mem->is_range_manager = true;
133	mem->priv_ops = &priv_ops;
134	mem->region_private = ttm_manager_type(bdev, mem_type);
135
136	return 0;
137}
138
139/**
140 * intel_region_ttm_fini - Finalize a TTM region.
141 * @mem: The memory region
142 *
143 * This functions takes down the TTM resource manager associated with the
144 * memory region, and if it was registered with the TTM device,
145 * removes that registration.
146 */
147void intel_region_ttm_fini(struct intel_memory_region *mem)
148{
149	int ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
150
151	ret = ttm_range_man_fini(&mem->i915->bdev,
152				 intel_region_to_ttm_type(mem));
153	GEM_WARN_ON(ret);
154	mem->region_private = NULL;
 
 
155}
156
157/**
158 * intel_region_ttm_node_to_st - Convert an opaque TTM resource manager node
159 * to an sg_table.
160 * @mem: The memory region.
161 * @node: The resource manager node obtained from the TTM resource manager.
 
162 *
163 * The gem backends typically use sg-tables for operations on the underlying
164 * io_memory. So provide a way for the backends to translate the
165 * nodes they are handed from TTM to sg-tables.
166 *
167 * Return: A malloced sg_table on success, an error pointer on failure.
168 */
169struct sg_table *intel_region_ttm_node_to_st(struct intel_memory_region *mem,
170					     struct ttm_resource *res)
 
 
171{
172	struct ttm_range_mgr_node *range_node =
173		container_of(res, typeof(*range_node), base);
 
174
175	GEM_WARN_ON(!mem->is_range_manager);
176	return i915_sg_from_mm_node(&range_node->mm_nodes[0],
177				    mem->region.start);
 
 
 
 
178}
179
 
180/**
181 * intel_region_ttm_node_alloc - Allocate memory resources from a region
182 * @mem: The memory region,
 
183 * @size: The requested size in bytes
184 * @flags: Allocation flags
185 *
186 * This functionality is provided only for callers that need to allocate
187 * memory from standalone TTM range managers, without the TTM eviction
188 * functionality. Don't use if you are not completely sure that's the
189 * case. The returned opaque node can be converted to an sg_table using
190 * intel_region_ttm_node_to_st(), and can be freed using
191 * intel_region_ttm_node_free().
192 *
193 * Return: A valid pointer on success, an error pointer on failure.
194 */
195struct ttm_resource *
196intel_region_ttm_node_alloc(struct intel_memory_region *mem,
197			    resource_size_t size,
198			    unsigned int flags)
 
199{
200	struct ttm_resource_manager *man = mem->region_private;
201	struct ttm_place place = {};
202	struct ttm_buffer_object mock_bo = {};
203	struct ttm_resource *res;
204	int ret;
205
206	/*
207	 * We ignore the flags for now since we're using the range
208	 * manager and contigous and min page size would be fulfilled
209	 * by default if size is min page size aligned.
210	 */
211	mock_bo.base.size = size;
212
213	if (mem->is_range_manager) {
214		if (size >= SZ_1G)
215			mock_bo.page_alignment = SZ_1G >> PAGE_SHIFT;
216		else if (size >= SZ_2M)
217			mock_bo.page_alignment = SZ_2M >> PAGE_SHIFT;
218		else if (size >= SZ_64K)
219			mock_bo.page_alignment = SZ_64K >> PAGE_SHIFT;
 
 
 
 
 
 
 
 
 
 
220	}
221
 
 
 
222	ret = man->func->alloc(man, &mock_bo, &place, &res);
 
 
223	if (ret == -ENOSPC)
224		ret = -ENXIO;
 
 
225	return ret ? ERR_PTR(ret) : res;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
226}
v6.13.7
  1// SPDX-License-Identifier: MIT
  2/*
  3 * Copyright © 2021 Intel Corporation
  4 */
 
  5#include <drm/ttm/ttm_device.h>
  6#include <drm/ttm/ttm_range_manager.h>
  7
  8#include "i915_drv.h"
  9#include "i915_scatterlist.h"
 10#include "i915_ttm_buddy_manager.h"
 11
 12#include "intel_region_ttm.h"
 13
 14#include "gem/i915_gem_region.h"
 15#include "gem/i915_gem_ttm.h" /* For the funcs/ops export only */
 16/**
 17 * DOC: TTM support structure
 18 *
 19 * The code in this file deals with setting up memory managers for TTM
 20 * LMEM and MOCK regions and converting the output from
 21 * the managers to struct sg_table, Basically providing the mapping from
 22 * i915 GEM regions to TTM memory types and resource managers.
 23 */
 24
 
 
 
 25/**
 26 * intel_region_ttm_device_init - Initialize a TTM device
 27 * @dev_priv: Pointer to an i915 device private structure.
 28 *
 29 * Return: 0 on success, negative error code on failure.
 30 */
 31int intel_region_ttm_device_init(struct drm_i915_private *dev_priv)
 32{
 33	struct drm_device *drm = &dev_priv->drm;
 34
 35	return ttm_device_init(&dev_priv->bdev, i915_ttm_driver(),
 36			       drm->dev, drm->anon_inode->i_mapping,
 37			       drm->vma_offset_manager, false, false);
 38}
 39
 40/**
 41 * intel_region_ttm_device_fini - Finalize a TTM device
 42 * @dev_priv: Pointer to an i915 device private structure.
 43 */
 44void intel_region_ttm_device_fini(struct drm_i915_private *dev_priv)
 45{
 46	ttm_device_fini(&dev_priv->bdev);
 47}
 48
 49/*
 50 * Map the i915 memory regions to TTM memory types. We use the
 51 * driver-private types for now, reserving TTM_PL_VRAM for stolen
 52 * memory and TTM_PL_TT for GGTT use if decided to implement this.
 53 */
 54int intel_region_to_ttm_type(const struct intel_memory_region *mem)
 55{
 56	int type;
 57
 58	GEM_BUG_ON(mem->type != INTEL_MEMORY_LOCAL &&
 59		   mem->type != INTEL_MEMORY_MOCK &&
 60		   mem->type != INTEL_MEMORY_SYSTEM);
 61
 62	if (mem->type == INTEL_MEMORY_SYSTEM)
 63		return TTM_PL_SYSTEM;
 64
 65	type = mem->instance + TTM_PL_PRIV;
 66	GEM_BUG_ON(type >= TTM_NUM_MEM_TYPES);
 67
 68	return type;
 69}
 70
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 71/**
 72 * intel_region_ttm_init - Initialize a memory region for TTM.
 73 * @mem: The region to initialize.
 74 *
 75 * This function initializes a suitable TTM resource manager for the
 76 * region, and if it's a LMEM region type, attaches it to the TTM
 77 * device. MOCK regions are NOT attached to the TTM device, since we don't
 78 * have one for the mock selftests.
 79 *
 80 * Return: 0 on success, negative error code on failure.
 81 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 82int intel_region_ttm_init(struct intel_memory_region *mem)
 83{
 84	struct ttm_device *bdev = &mem->i915->bdev;
 85	int mem_type = intel_region_to_ttm_type(mem);
 86	int ret;
 87
 88	ret = i915_ttm_buddy_man_init(bdev, mem_type, false,
 89				      resource_size(&mem->region),
 90				      resource_size(&mem->io),
 91				      mem->min_page_size, PAGE_SIZE);
 92	if (ret)
 93		return ret;
 94
 
 
 
 
 
 95	mem->region_private = ttm_manager_type(bdev, mem_type);
 96
 97	return 0;
 98}
 99
100/**
101 * intel_region_ttm_fini - Finalize a TTM region.
102 * @mem: The memory region
103 *
104 * This functions takes down the TTM resource manager associated with the
105 * memory region, and if it was registered with the TTM device,
106 * removes that registration.
107 */
108int intel_region_ttm_fini(struct intel_memory_region *mem)
109{
110	struct ttm_resource_manager *man = mem->region_private;
111	int ret = -EBUSY;
112	int count;
113
114	/*
115	 * Put the region's move fences. This releases requests that
116	 * may hold on to contexts and vms that may hold on to buffer
117	 * objects placed in this region.
118	 */
119	if (man)
120		ttm_resource_manager_cleanup(man);
121
122	/* Flush objects from region. */
123	for (count = 0; count < 10; ++count) {
124		i915_gem_flush_free_objects(mem->i915);
125
126		mutex_lock(&mem->objects.lock);
127		if (list_empty(&mem->objects.list))
128			ret = 0;
129		mutex_unlock(&mem->objects.lock);
130		if (!ret)
131			break;
132
133		msleep(20);
134		drain_workqueue(mem->i915->bdev.wq);
135	}
136
137	/* If we leaked objects, Don't free the region causing use after free */
138	if (ret || !man)
139		return ret;
140
141	ret = i915_ttm_buddy_man_fini(&mem->i915->bdev,
142				      intel_region_to_ttm_type(mem));
143	GEM_WARN_ON(ret);
144	mem->region_private = NULL;
145
146	return ret;
147}
148
149/**
150 * intel_region_ttm_resource_to_rsgt -
151 * Convert an opaque TTM resource manager resource to a refcounted sg_table.
152 * @mem: The memory region.
153 * @res: The resource manager resource obtained from the TTM resource manager.
154 * @page_alignment: Required page alignment for each sg entry. Power of two.
155 *
156 * The gem backends typically use sg-tables for operations on the underlying
157 * io_memory. So provide a way for the backends to translate the
158 * nodes they are handed from TTM to sg-tables.
159 *
160 * Return: A malloced sg_table on success, an error pointer on failure.
161 */
162struct i915_refct_sgt *
163intel_region_ttm_resource_to_rsgt(struct intel_memory_region *mem,
164				  struct ttm_resource *res,
165				  u32 page_alignment)
166{
167	if (mem->is_range_manager) {
168		struct ttm_range_mgr_node *range_node =
169			to_ttm_range_mgr_node(res);
170
171		return i915_rsgt_from_mm_node(&range_node->mm_nodes[0],
172					      mem->region.start,
173					      page_alignment);
174	} else {
175		return i915_rsgt_from_buddy_resource(res, mem->region.start,
176						     page_alignment);
177	}
178}
179
180#ifdef CONFIG_DRM_I915_SELFTEST
181/**
182 * intel_region_ttm_resource_alloc - Allocate memory resources from a region
183 * @mem: The memory region,
184 * @offset: BO offset
185 * @size: The requested size in bytes
186 * @flags: Allocation flags
187 *
188 * This functionality is provided only for callers that need to allocate
189 * memory from standalone TTM range managers, without the TTM eviction
190 * functionality. Don't use if you are not completely sure that's the
191 * case. The returned opaque node can be converted to an sg_table using
192 * intel_region_ttm_resource_to_st(), and can be freed using
193 * intel_region_ttm_resource_free().
194 *
195 * Return: A valid pointer on success, an error pointer on failure.
196 */
197struct ttm_resource *
198intel_region_ttm_resource_alloc(struct intel_memory_region *mem,
199				resource_size_t offset,
200				resource_size_t size,
201				unsigned int flags)
202{
203	struct ttm_resource_manager *man = mem->region_private;
204	struct ttm_place place = {};
205	struct ttm_buffer_object mock_bo = {};
206	struct ttm_resource *res;
207	int ret;
208
209	if (flags & I915_BO_ALLOC_CONTIGUOUS)
210		place.flags |= TTM_PL_FLAG_CONTIGUOUS;
211	if (offset != I915_BO_INVALID_OFFSET) {
212		if (WARN_ON(overflows_type(offset >> PAGE_SHIFT, place.fpfn))) {
213			ret = -E2BIG;
214			goto out;
215		}
216		place.fpfn = offset >> PAGE_SHIFT;
217		if (WARN_ON(overflows_type(place.fpfn + (size >> PAGE_SHIFT), place.lpfn))) {
218			ret = -E2BIG;
219			goto out;
220		}
221		place.lpfn = place.fpfn + (size >> PAGE_SHIFT);
222	} else if (resource_size(&mem->io) && resource_size(&mem->io) < mem->total) {
223		if (flags & I915_BO_ALLOC_GPU_ONLY) {
224			place.flags |= TTM_PL_FLAG_TOPDOWN;
225		} else {
226			place.fpfn = 0;
227			if (WARN_ON(overflows_type(resource_size(&mem->io) >> PAGE_SHIFT, place.lpfn))) {
228				ret = -E2BIG;
229				goto out;
230			}
231			place.lpfn = resource_size(&mem->io) >> PAGE_SHIFT;
232		}
233	}
234
235	mock_bo.base.size = size;
236	mock_bo.bdev = &mem->i915->bdev;
237
238	ret = man->func->alloc(man, &mock_bo, &place, &res);
239
240out:
241	if (ret == -ENOSPC)
242		ret = -ENXIO;
243	if (!ret)
244		res->bo = NULL; /* Rather blow up, then some uaf */
245	return ret ? ERR_PTR(ret) : res;
246}
247
248#endif
249
250/**
251 * intel_region_ttm_resource_free - Free a resource allocated from a resource manager
252 * @mem: The region the resource was allocated from.
253 * @res: The opaque resource representing an allocation.
254 */
255void intel_region_ttm_resource_free(struct intel_memory_region *mem,
256				    struct ttm_resource *res)
257{
258	struct ttm_resource_manager *man = mem->region_private;
259	struct ttm_buffer_object mock_bo = {};
260
261	mock_bo.base.size = res->size;
262	mock_bo.bdev = &mem->i915->bdev;
263	res->bo = &mock_bo;
264
265	man->func->free(man, res);
266}