Loading...
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2021 Intel Corporation
4 */
5#include <drm/ttm/ttm_bo_driver.h>
6#include <drm/ttm/ttm_device.h>
7#include <drm/ttm/ttm_range_manager.h>
8
9#include "i915_drv.h"
10#include "i915_scatterlist.h"
11
12#include "intel_region_ttm.h"
13
14/**
15 * DOC: TTM support structure
16 *
17 * The code in this file deals with setting up memory managers for TTM
18 * LMEM and MOCK regions and converting the output from
19 * the managers to struct sg_table, Basically providing the mapping from
20 * i915 GEM regions to TTM memory types and resource managers.
21 */
22
23/* A Zero-initialized driver for now. We don't have a TTM backend yet. */
24static struct ttm_device_funcs i915_ttm_bo_driver;
25
26/**
27 * intel_region_ttm_device_init - Initialize a TTM device
28 * @dev_priv: Pointer to an i915 device private structure.
29 *
30 * Return: 0 on success, negative error code on failure.
31 */
32int intel_region_ttm_device_init(struct drm_i915_private *dev_priv)
33{
34 struct drm_device *drm = &dev_priv->drm;
35
36 return ttm_device_init(&dev_priv->bdev, &i915_ttm_bo_driver,
37 drm->dev, drm->anon_inode->i_mapping,
38 drm->vma_offset_manager, false, false);
39}
40
41/**
42 * intel_region_ttm_device_fini - Finalize a TTM device
43 * @dev_priv: Pointer to an i915 device private structure.
44 */
45void intel_region_ttm_device_fini(struct drm_i915_private *dev_priv)
46{
47 ttm_device_fini(&dev_priv->bdev);
48}
49
50/*
51 * Map the i915 memory regions to TTM memory types. We use the
52 * driver-private types for now, reserving TTM_PL_VRAM for stolen
53 * memory and TTM_PL_TT for GGTT use if decided to implement this.
54 */
55static int intel_region_to_ttm_type(struct intel_memory_region *mem)
56{
57 int type;
58
59 GEM_BUG_ON(mem->type != INTEL_MEMORY_LOCAL &&
60 mem->type != INTEL_MEMORY_MOCK);
61
62 type = mem->instance + TTM_PL_PRIV;
63 GEM_BUG_ON(type >= TTM_NUM_MEM_TYPES);
64
65 return type;
66}
67
68static struct ttm_resource *
69intel_region_ttm_node_reserve(struct intel_memory_region *mem,
70 resource_size_t offset,
71 resource_size_t size)
72{
73 struct ttm_resource_manager *man = mem->region_private;
74 struct ttm_place place = {};
75 struct ttm_buffer_object mock_bo = {};
76 struct ttm_resource *res;
77 int ret;
78
79 /*
80 * Having to use a mock_bo is unfortunate but stems from some
81 * drivers having private managers that insist to know what the
82 * allocate memory is intended for, using it to send private
83 * data to the manager. Also recently the bo has been used to send
84 * alignment info to the manager. Assume that apart from the latter,
85 * none of the managers we use will ever access the buffer object
86 * members, hoping we can pass the alignment info in the
87 * struct ttm_place in the future.
88 */
89
90 place.fpfn = offset >> PAGE_SHIFT;
91 place.lpfn = place.fpfn + (size >> PAGE_SHIFT);
92 mock_bo.base.size = size;
93 ret = man->func->alloc(man, &mock_bo, &place, &res);
94 if (ret == -ENOSPC)
95 ret = -ENXIO;
96
97 return ret ? ERR_PTR(ret) : res;
98}
99
100/**
101 * intel_region_ttm_node_free - Free a node allocated from a resource manager
102 * @mem: The region the node was allocated from.
103 * @node: The opaque node representing an allocation.
104 */
105void intel_region_ttm_node_free(struct intel_memory_region *mem,
106 struct ttm_resource *res)
107{
108 struct ttm_resource_manager *man = mem->region_private;
109
110 man->func->free(man, res);
111}
112
113static const struct intel_memory_region_private_ops priv_ops = {
114 .reserve = intel_region_ttm_node_reserve,
115 .free = intel_region_ttm_node_free,
116};
117
118int intel_region_ttm_init(struct intel_memory_region *mem)
119{
120 struct ttm_device *bdev = &mem->i915->bdev;
121 int mem_type = intel_region_to_ttm_type(mem);
122 int ret;
123
124 ret = ttm_range_man_init(bdev, mem_type, false,
125 resource_size(&mem->region) >> PAGE_SHIFT);
126 if (ret)
127 return ret;
128
129 mem->chunk_size = PAGE_SIZE;
130 mem->max_order =
131 get_order(rounddown_pow_of_two(resource_size(&mem->region)));
132 mem->is_range_manager = true;
133 mem->priv_ops = &priv_ops;
134 mem->region_private = ttm_manager_type(bdev, mem_type);
135
136 return 0;
137}
138
139/**
140 * intel_region_ttm_fini - Finalize a TTM region.
141 * @mem: The memory region
142 *
143 * This functions takes down the TTM resource manager associated with the
144 * memory region, and if it was registered with the TTM device,
145 * removes that registration.
146 */
147void intel_region_ttm_fini(struct intel_memory_region *mem)
148{
149 int ret;
150
151 ret = ttm_range_man_fini(&mem->i915->bdev,
152 intel_region_to_ttm_type(mem));
153 GEM_WARN_ON(ret);
154 mem->region_private = NULL;
155}
156
157/**
158 * intel_region_ttm_node_to_st - Convert an opaque TTM resource manager node
159 * to an sg_table.
160 * @mem: The memory region.
161 * @node: The resource manager node obtained from the TTM resource manager.
162 *
163 * The gem backends typically use sg-tables for operations on the underlying
164 * io_memory. So provide a way for the backends to translate the
165 * nodes they are handed from TTM to sg-tables.
166 *
167 * Return: A malloced sg_table on success, an error pointer on failure.
168 */
169struct sg_table *intel_region_ttm_node_to_st(struct intel_memory_region *mem,
170 struct ttm_resource *res)
171{
172 struct ttm_range_mgr_node *range_node =
173 container_of(res, typeof(*range_node), base);
174
175 GEM_WARN_ON(!mem->is_range_manager);
176 return i915_sg_from_mm_node(&range_node->mm_nodes[0],
177 mem->region.start);
178}
179
180/**
181 * intel_region_ttm_node_alloc - Allocate memory resources from a region
182 * @mem: The memory region,
183 * @size: The requested size in bytes
184 * @flags: Allocation flags
185 *
186 * This functionality is provided only for callers that need to allocate
187 * memory from standalone TTM range managers, without the TTM eviction
188 * functionality. Don't use if you are not completely sure that's the
189 * case. The returned opaque node can be converted to an sg_table using
190 * intel_region_ttm_node_to_st(), and can be freed using
191 * intel_region_ttm_node_free().
192 *
193 * Return: A valid pointer on success, an error pointer on failure.
194 */
195struct ttm_resource *
196intel_region_ttm_node_alloc(struct intel_memory_region *mem,
197 resource_size_t size,
198 unsigned int flags)
199{
200 struct ttm_resource_manager *man = mem->region_private;
201 struct ttm_place place = {};
202 struct ttm_buffer_object mock_bo = {};
203 struct ttm_resource *res;
204 int ret;
205
206 /*
207 * We ignore the flags for now since we're using the range
208 * manager and contigous and min page size would be fulfilled
209 * by default if size is min page size aligned.
210 */
211 mock_bo.base.size = size;
212
213 if (mem->is_range_manager) {
214 if (size >= SZ_1G)
215 mock_bo.page_alignment = SZ_1G >> PAGE_SHIFT;
216 else if (size >= SZ_2M)
217 mock_bo.page_alignment = SZ_2M >> PAGE_SHIFT;
218 else if (size >= SZ_64K)
219 mock_bo.page_alignment = SZ_64K >> PAGE_SHIFT;
220 }
221
222 ret = man->func->alloc(man, &mock_bo, &place, &res);
223 if (ret == -ENOSPC)
224 ret = -ENXIO;
225 return ret ? ERR_PTR(ret) : res;
226}
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2021 Intel Corporation
4 */
5#include <drm/ttm/ttm_bo_driver.h>
6#include <drm/ttm/ttm_device.h>
7#include <drm/ttm/ttm_range_manager.h>
8
9#include "i915_drv.h"
10#include "i915_scatterlist.h"
11#include "i915_ttm_buddy_manager.h"
12
13#include "intel_region_ttm.h"
14
15#include "gem/i915_gem_region.h"
16#include "gem/i915_gem_ttm.h" /* For the funcs/ops export only */
17/**
18 * DOC: TTM support structure
19 *
20 * The code in this file deals with setting up memory managers for TTM
21 * LMEM and MOCK regions and converting the output from
22 * the managers to struct sg_table, Basically providing the mapping from
23 * i915 GEM regions to TTM memory types and resource managers.
24 */
25
26/**
27 * intel_region_ttm_device_init - Initialize a TTM device
28 * @dev_priv: Pointer to an i915 device private structure.
29 *
30 * Return: 0 on success, negative error code on failure.
31 */
32int intel_region_ttm_device_init(struct drm_i915_private *dev_priv)
33{
34 struct drm_device *drm = &dev_priv->drm;
35
36 return ttm_device_init(&dev_priv->bdev, i915_ttm_driver(),
37 drm->dev, drm->anon_inode->i_mapping,
38 drm->vma_offset_manager, false, false);
39}
40
41/**
42 * intel_region_ttm_device_fini - Finalize a TTM device
43 * @dev_priv: Pointer to an i915 device private structure.
44 */
45void intel_region_ttm_device_fini(struct drm_i915_private *dev_priv)
46{
47 ttm_device_fini(&dev_priv->bdev);
48}
49
50/*
51 * Map the i915 memory regions to TTM memory types. We use the
52 * driver-private types for now, reserving TTM_PL_VRAM for stolen
53 * memory and TTM_PL_TT for GGTT use if decided to implement this.
54 */
55int intel_region_to_ttm_type(const struct intel_memory_region *mem)
56{
57 int type;
58
59 GEM_BUG_ON(mem->type != INTEL_MEMORY_LOCAL &&
60 mem->type != INTEL_MEMORY_MOCK &&
61 mem->type != INTEL_MEMORY_SYSTEM);
62
63 if (mem->type == INTEL_MEMORY_SYSTEM)
64 return TTM_PL_SYSTEM;
65
66 type = mem->instance + TTM_PL_PRIV;
67 GEM_BUG_ON(type >= TTM_NUM_MEM_TYPES);
68
69 return type;
70}
71
72/**
73 * intel_region_ttm_init - Initialize a memory region for TTM.
74 * @mem: The region to initialize.
75 *
76 * This function initializes a suitable TTM resource manager for the
77 * region, and if it's a LMEM region type, attaches it to the TTM
78 * device. MOCK regions are NOT attached to the TTM device, since we don't
79 * have one for the mock selftests.
80 *
81 * Return: 0 on success, negative error code on failure.
82 */
83int intel_region_ttm_init(struct intel_memory_region *mem)
84{
85 struct ttm_device *bdev = &mem->i915->bdev;
86 int mem_type = intel_region_to_ttm_type(mem);
87 int ret;
88
89 ret = i915_ttm_buddy_man_init(bdev, mem_type, false,
90 resource_size(&mem->region),
91 mem->io_size,
92 mem->min_page_size, PAGE_SIZE);
93 if (ret)
94 return ret;
95
96 mem->region_private = ttm_manager_type(bdev, mem_type);
97
98 return 0;
99}
100
101/**
102 * intel_region_ttm_fini - Finalize a TTM region.
103 * @mem: The memory region
104 *
105 * This functions takes down the TTM resource manager associated with the
106 * memory region, and if it was registered with the TTM device,
107 * removes that registration.
108 */
109int intel_region_ttm_fini(struct intel_memory_region *mem)
110{
111 struct ttm_resource_manager *man = mem->region_private;
112 int ret = -EBUSY;
113 int count;
114
115 /*
116 * Put the region's move fences. This releases requests that
117 * may hold on to contexts and vms that may hold on to buffer
118 * objects placed in this region.
119 */
120 if (man)
121 ttm_resource_manager_cleanup(man);
122
123 /* Flush objects from region. */
124 for (count = 0; count < 10; ++count) {
125 i915_gem_flush_free_objects(mem->i915);
126
127 mutex_lock(&mem->objects.lock);
128 if (list_empty(&mem->objects.list))
129 ret = 0;
130 mutex_unlock(&mem->objects.lock);
131 if (!ret)
132 break;
133
134 msleep(20);
135 flush_delayed_work(&mem->i915->bdev.wq);
136 }
137
138 /* If we leaked objects, Don't free the region causing use after free */
139 if (ret || !man)
140 return ret;
141
142 ret = i915_ttm_buddy_man_fini(&mem->i915->bdev,
143 intel_region_to_ttm_type(mem));
144 GEM_WARN_ON(ret);
145 mem->region_private = NULL;
146
147 return ret;
148}
149
150/**
151 * intel_region_ttm_resource_to_rsgt -
152 * Convert an opaque TTM resource manager resource to a refcounted sg_table.
153 * @mem: The memory region.
154 * @res: The resource manager resource obtained from the TTM resource manager.
155 * @page_alignment: Required page alignment for each sg entry. Power of two.
156 *
157 * The gem backends typically use sg-tables for operations on the underlying
158 * io_memory. So provide a way for the backends to translate the
159 * nodes they are handed from TTM to sg-tables.
160 *
161 * Return: A malloced sg_table on success, an error pointer on failure.
162 */
163struct i915_refct_sgt *
164intel_region_ttm_resource_to_rsgt(struct intel_memory_region *mem,
165 struct ttm_resource *res,
166 u32 page_alignment)
167{
168 if (mem->is_range_manager) {
169 struct ttm_range_mgr_node *range_node =
170 to_ttm_range_mgr_node(res);
171
172 return i915_rsgt_from_mm_node(&range_node->mm_nodes[0],
173 mem->region.start,
174 page_alignment);
175 } else {
176 return i915_rsgt_from_buddy_resource(res, mem->region.start,
177 page_alignment);
178 }
179}
180
181#ifdef CONFIG_DRM_I915_SELFTEST
182/**
183 * intel_region_ttm_resource_alloc - Allocate memory resources from a region
184 * @mem: The memory region,
185 * @size: The requested size in bytes
186 * @flags: Allocation flags
187 *
188 * This functionality is provided only for callers that need to allocate
189 * memory from standalone TTM range managers, without the TTM eviction
190 * functionality. Don't use if you are not completely sure that's the
191 * case. The returned opaque node can be converted to an sg_table using
192 * intel_region_ttm_resource_to_st(), and can be freed using
193 * intel_region_ttm_resource_free().
194 *
195 * Return: A valid pointer on success, an error pointer on failure.
196 */
197struct ttm_resource *
198intel_region_ttm_resource_alloc(struct intel_memory_region *mem,
199 resource_size_t offset,
200 resource_size_t size,
201 unsigned int flags)
202{
203 struct ttm_resource_manager *man = mem->region_private;
204 struct ttm_place place = {};
205 struct ttm_buffer_object mock_bo = {};
206 struct ttm_resource *res;
207 int ret;
208
209 if (flags & I915_BO_ALLOC_CONTIGUOUS)
210 place.flags |= TTM_PL_FLAG_CONTIGUOUS;
211 if (offset != I915_BO_INVALID_OFFSET) {
212 place.fpfn = offset >> PAGE_SHIFT;
213 place.lpfn = place.fpfn + (size >> PAGE_SHIFT);
214 } else if (mem->io_size && mem->io_size < mem->total) {
215 if (flags & I915_BO_ALLOC_GPU_ONLY) {
216 place.flags |= TTM_PL_FLAG_TOPDOWN;
217 } else {
218 place.fpfn = 0;
219 place.lpfn = mem->io_size >> PAGE_SHIFT;
220 }
221 }
222
223 mock_bo.base.size = size;
224 mock_bo.bdev = &mem->i915->bdev;
225
226 ret = man->func->alloc(man, &mock_bo, &place, &res);
227 if (ret == -ENOSPC)
228 ret = -ENXIO;
229 if (!ret)
230 res->bo = NULL; /* Rather blow up, then some uaf */
231 return ret ? ERR_PTR(ret) : res;
232}
233
234#endif
235
236/**
237 * intel_region_ttm_resource_free - Free a resource allocated from a resource manager
238 * @mem: The region the resource was allocated from.
239 * @res: The opaque resource representing an allocation.
240 */
241void intel_region_ttm_resource_free(struct intel_memory_region *mem,
242 struct ttm_resource *res)
243{
244 struct ttm_resource_manager *man = mem->region_private;
245 struct ttm_buffer_object mock_bo = {};
246
247 mock_bo.base.size = res->size;
248 mock_bo.bdev = &mem->i915->bdev;
249 res->bo = &mock_bo;
250
251 man->func->free(man, res);
252}