Linux Audio

Check our new training course

Real-Time Linux with PREEMPT_RT training

Feb 18-20, 2025
Register
Loading...
v5.14.15
  1// SPDX-License-Identifier: MIT
  2/*
  3 * Copyright © 2019 Intel Corporation
  4 */
  5
  6#include "intel_memory_region.h"
  7#include "i915_drv.h"
  8
  9static const struct {
 10	u16 class;
 11	u16 instance;
 12} intel_region_map[] = {
 13	[INTEL_REGION_SMEM] = {
 14		.class = INTEL_MEMORY_SYSTEM,
 15		.instance = 0,
 16	},
 17	[INTEL_REGION_LMEM] = {
 18		.class = INTEL_MEMORY_LOCAL,
 19		.instance = 0,
 20	},
 21	[INTEL_REGION_STOLEN_SMEM] = {
 22		.class = INTEL_MEMORY_STOLEN_SYSTEM,
 23		.instance = 0,
 24	},
 25	[INTEL_REGION_STOLEN_LMEM] = {
 26		.class = INTEL_MEMORY_STOLEN_LOCAL,
 27		.instance = 0,
 28	},
 29};
 30
 31struct intel_region_reserve {
 32	struct list_head link;
 33	struct ttm_resource *res;
 34};
 35
 36struct intel_memory_region *
 37intel_memory_region_lookup(struct drm_i915_private *i915,
 38			   u16 class, u16 instance)
 39{
 40	struct intel_memory_region *mr;
 41	int id;
 42
 43	/* XXX: consider maybe converting to an rb tree at some point */
 44	for_each_memory_region(mr, i915, id) {
 45		if (mr->type == class && mr->instance == instance)
 46			return mr;
 47	}
 48
 49	return NULL;
 50}
 51
 52struct intel_memory_region *
 53intel_memory_region_by_type(struct drm_i915_private *i915,
 54			    enum intel_memory_type mem_type)
 55{
 56	struct intel_memory_region *mr;
 57	int id;
 58
 59	for_each_memory_region(mr, i915, id)
 60		if (mr->type == mem_type)
 61			return mr;
 62
 63	return NULL;
 64}
 65
 66/**
 67 * intel_memory_region_unreserve - Unreserve all previously reserved
 68 * ranges
 69 * @mem: The region containing the reserved ranges.
 70 */
 71void intel_memory_region_unreserve(struct intel_memory_region *mem)
 72{
 73	struct intel_region_reserve *reserve, *next;
 
 74
 75	if (!mem->priv_ops || !mem->priv_ops->free)
 76		return;
 
 
 
 
 
 
 77
 
 
 
 
 78	mutex_lock(&mem->mm_lock);
 79	list_for_each_entry_safe(reserve, next, &mem->reserved, link) {
 80		list_del(&reserve->link);
 81		mem->priv_ops->free(mem, reserve->res);
 82		kfree(reserve);
 83	}
 84	mutex_unlock(&mem->mm_lock);
 85}
 86
 87/**
 88 * intel_memory_region_reserve - Reserve a memory range
 89 * @mem: The region for which we want to reserve a range.
 90 * @offset: Start of the range to reserve.
 91 * @size: The size of the range to reserve.
 92 *
 93 * Return: 0 on success, negative error code on failure.
 94 */
 95int intel_memory_region_reserve(struct intel_memory_region *mem,
 96				resource_size_t offset,
 97				resource_size_t size)
 98{
 99	int ret;
100	struct intel_region_reserve *reserve;
101
102	if (!mem->priv_ops || !mem->priv_ops->reserve)
103		return -EINVAL;
 
 
104
105	reserve = kzalloc(sizeof(*reserve), GFP_KERNEL);
106	if (!reserve)
107		return -ENOMEM;
108
109	reserve->res = mem->priv_ops->reserve(mem, offset, size);
110	if (IS_ERR(reserve->res)) {
111		ret = PTR_ERR(reserve->res);
112		kfree(reserve);
113		return ret;
 
 
 
 
 
 
114	}
115
 
 
 
 
 
 
 
 
 
 
116	mutex_lock(&mem->mm_lock);
117	list_add_tail(&reserve->link, &mem->reserved);
118	mutex_unlock(&mem->mm_lock);
119
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
120	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
121}
122
123struct intel_memory_region *
124intel_memory_region_create(struct drm_i915_private *i915,
125			   resource_size_t start,
126			   resource_size_t size,
127			   resource_size_t min_page_size,
128			   resource_size_t io_start,
129			   u16 type,
130			   u16 instance,
131			   const struct intel_memory_region_ops *ops)
132{
133	struct intel_memory_region *mem;
134	int err;
135
136	mem = kzalloc(sizeof(*mem), GFP_KERNEL);
137	if (!mem)
138		return ERR_PTR(-ENOMEM);
139
140	mem->i915 = i915;
141	mem->region = (struct resource)DEFINE_RES_MEM(start, size);
142	mem->io_start = io_start;
143	mem->min_page_size = min_page_size;
144	mem->ops = ops;
145	mem->total = size;
146	mem->avail = mem->total;
147	mem->type = type;
148	mem->instance = instance;
149
150	mutex_init(&mem->objects.lock);
151	INIT_LIST_HEAD(&mem->objects.list);
152	INIT_LIST_HEAD(&mem->objects.purgeable);
153	INIT_LIST_HEAD(&mem->reserved);
154
155	mutex_init(&mem->mm_lock);
156
157	if (ops->init) {
158		err = ops->init(mem);
159		if (err)
160			goto err_free;
161	}
162
163	kref_init(&mem->kref);
164	return mem;
165
166err_free:
167	kfree(mem);
168	return ERR_PTR(err);
169}
170
171void intel_memory_region_set_name(struct intel_memory_region *mem,
172				  const char *fmt, ...)
173{
174	va_list ap;
175
176	va_start(ap, fmt);
177	vsnprintf(mem->name, sizeof(mem->name), fmt, ap);
178	va_end(ap);
179}
180
181static void __intel_memory_region_destroy(struct kref *kref)
182{
183	struct intel_memory_region *mem =
184		container_of(kref, typeof(*mem), kref);
185
186	intel_memory_region_unreserve(mem);
187	if (mem->ops->release)
188		mem->ops->release(mem);
189
190	mutex_destroy(&mem->mm_lock);
191	mutex_destroy(&mem->objects.lock);
192	kfree(mem);
193}
194
195struct intel_memory_region *
196intel_memory_region_get(struct intel_memory_region *mem)
197{
198	kref_get(&mem->kref);
199	return mem;
200}
201
202void intel_memory_region_put(struct intel_memory_region *mem)
203{
204	kref_put(&mem->kref, __intel_memory_region_destroy);
205}
206
207/* Global memory region registration -- only slight layer inversions! */
208
209int intel_memory_regions_hw_probe(struct drm_i915_private *i915)
210{
211	int err, i;
212
213	for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
214		struct intel_memory_region *mem = ERR_PTR(-ENODEV);
215		u16 type, instance;
216
217		if (!HAS_REGION(i915, BIT(i)))
218			continue;
219
220		type = intel_region_map[i].class;
221		instance = intel_region_map[i].instance;
222		switch (type) {
223		case INTEL_MEMORY_SYSTEM:
224			mem = i915_gem_shmem_setup(i915, type, instance);
225			break;
226		case INTEL_MEMORY_STOLEN_LOCAL:
227			mem = i915_gem_stolen_lmem_setup(i915, type, instance);
228			if (!IS_ERR(mem))
229				i915->mm.stolen_region = mem;
230			break;
231		case INTEL_MEMORY_STOLEN_SYSTEM:
232			mem = i915_gem_stolen_smem_setup(i915, type, instance);
233			if (!IS_ERR(mem))
234				i915->mm.stolen_region = mem;
235			break;
236		default:
237			continue;
238		}
239
240		if (IS_ERR(mem)) {
241			err = PTR_ERR(mem);
242			drm_err(&i915->drm,
243				"Failed to setup region(%d) type=%d\n",
244				err, type);
245			goto out_cleanup;
246		}
247
248		mem->id = i;
 
 
 
249		i915->mm.regions[i] = mem;
250	}
251
252	return 0;
253
254out_cleanup:
255	intel_memory_regions_driver_release(i915);
256	return err;
257}
258
259void intel_memory_regions_driver_release(struct drm_i915_private *i915)
260{
261	int i;
262
263	for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
264		struct intel_memory_region *region =
265			fetch_and_zero(&i915->mm.regions[i]);
266
267		if (region)
268			intel_memory_region_put(region);
269	}
270}
271
272#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
273#include "selftests/intel_memory_region.c"
274#include "selftests/mock_region.c"
275#endif
v5.9
  1// SPDX-License-Identifier: MIT
  2/*
  3 * Copyright © 2019 Intel Corporation
  4 */
  5
  6#include "intel_memory_region.h"
  7#include "i915_drv.h"
  8
  9/* XXX: Hysterical raisins. BIT(inst) needs to just be (inst) at some point. */
 10#define REGION_MAP(type, inst) \
 11	BIT((type) + INTEL_MEMORY_TYPE_SHIFT) | BIT(inst)
 12
 13const u32 intel_region_map[] = {
 14	[INTEL_REGION_SMEM] = REGION_MAP(INTEL_MEMORY_SYSTEM, 0),
 15	[INTEL_REGION_LMEM] = REGION_MAP(INTEL_MEMORY_LOCAL, 0),
 16	[INTEL_REGION_STOLEN] = REGION_MAP(INTEL_MEMORY_STOLEN, 0),
 
 
 
 
 
 
 
 
 
 
 
 
 17};
 18
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 19struct intel_memory_region *
 20intel_memory_region_by_type(struct drm_i915_private *i915,
 21			    enum intel_memory_type mem_type)
 22{
 23	struct intel_memory_region *mr;
 24	int id;
 25
 26	for_each_memory_region(mr, i915, id)
 27		if (mr->type == mem_type)
 28			return mr;
 29
 30	return NULL;
 31}
 32
 33static u64
 34intel_memory_region_free_pages(struct intel_memory_region *mem,
 35			       struct list_head *blocks)
 
 
 
 36{
 37	struct i915_buddy_block *block, *on;
 38	u64 size = 0;
 39
 40	list_for_each_entry_safe(block, on, blocks, link) {
 41		size += i915_buddy_block_size(&mem->mm, block);
 42		i915_buddy_free(&mem->mm, block);
 43	}
 44	INIT_LIST_HEAD(blocks);
 45
 46	return size;
 47}
 48
 49void
 50__intel_memory_region_put_pages_buddy(struct intel_memory_region *mem,
 51				      struct list_head *blocks)
 52{
 53	mutex_lock(&mem->mm_lock);
 54	mem->avail += intel_memory_region_free_pages(mem, blocks);
 
 
 
 
 55	mutex_unlock(&mem->mm_lock);
 56}
 57
 58void
 59__intel_memory_region_put_block_buddy(struct i915_buddy_block *block)
 
 
 
 
 
 
 
 
 
 60{
 61	struct list_head blocks;
 
 62
 63	INIT_LIST_HEAD(&blocks);
 64	list_add(&block->link, &blocks);
 65	__intel_memory_region_put_pages_buddy(block->private, &blocks);
 66}
 67
 68int
 69__intel_memory_region_get_pages_buddy(struct intel_memory_region *mem,
 70				      resource_size_t size,
 71				      unsigned int flags,
 72				      struct list_head *blocks)
 73{
 74	unsigned int min_order = 0;
 75	unsigned long n_pages;
 76
 77	GEM_BUG_ON(!IS_ALIGNED(size, mem->mm.chunk_size));
 78	GEM_BUG_ON(!list_empty(blocks));
 79
 80	if (flags & I915_ALLOC_MIN_PAGE_SIZE) {
 81		min_order = ilog2(mem->min_page_size) -
 82			    ilog2(mem->mm.chunk_size);
 83	}
 84
 85	if (flags & I915_ALLOC_CONTIGUOUS) {
 86		size = roundup_pow_of_two(size);
 87		min_order = ilog2(size) - ilog2(mem->mm.chunk_size);
 88	}
 89
 90	if (size > BIT(mem->mm.max_order) * mem->mm.chunk_size)
 91		return -E2BIG;
 92
 93	n_pages = size >> ilog2(mem->mm.chunk_size);
 94
 95	mutex_lock(&mem->mm_lock);
 
 
 96
 97	do {
 98		struct i915_buddy_block *block;
 99		unsigned int order;
100
101		order = fls(n_pages) - 1;
102		GEM_BUG_ON(order > mem->mm.max_order);
103		GEM_BUG_ON(order < min_order);
104
105		do {
106			block = i915_buddy_alloc(&mem->mm, order);
107			if (!IS_ERR(block))
108				break;
109
110			if (order-- == min_order)
111				goto err_free_blocks;
112		} while (1);
113
114		n_pages -= BIT(order);
115
116		block->private = mem;
117		list_add(&block->link, blocks);
118
119		if (!n_pages)
120			break;
121	} while (1);
122
123	mem->avail -= size;
124	mutex_unlock(&mem->mm_lock);
125	return 0;
126
127err_free_blocks:
128	intel_memory_region_free_pages(mem, blocks);
129	mutex_unlock(&mem->mm_lock);
130	return -ENXIO;
131}
132
133struct i915_buddy_block *
134__intel_memory_region_get_block_buddy(struct intel_memory_region *mem,
135				      resource_size_t size,
136				      unsigned int flags)
137{
138	struct i915_buddy_block *block;
139	LIST_HEAD(blocks);
140	int ret;
141
142	ret = __intel_memory_region_get_pages_buddy(mem, size, flags, &blocks);
143	if (ret)
144		return ERR_PTR(ret);
145
146	block = list_first_entry(&blocks, typeof(*block), link);
147	list_del_init(&block->link);
148	return block;
149}
150
151int intel_memory_region_init_buddy(struct intel_memory_region *mem)
152{
153	return i915_buddy_init(&mem->mm, resource_size(&mem->region),
154			       PAGE_SIZE);
155}
156
157void intel_memory_region_release_buddy(struct intel_memory_region *mem)
158{
159	i915_buddy_fini(&mem->mm);
160}
161
162struct intel_memory_region *
163intel_memory_region_create(struct drm_i915_private *i915,
164			   resource_size_t start,
165			   resource_size_t size,
166			   resource_size_t min_page_size,
167			   resource_size_t io_start,
 
 
168			   const struct intel_memory_region_ops *ops)
169{
170	struct intel_memory_region *mem;
171	int err;
172
173	mem = kzalloc(sizeof(*mem), GFP_KERNEL);
174	if (!mem)
175		return ERR_PTR(-ENOMEM);
176
177	mem->i915 = i915;
178	mem->region = (struct resource)DEFINE_RES_MEM(start, size);
179	mem->io_start = io_start;
180	mem->min_page_size = min_page_size;
181	mem->ops = ops;
182	mem->total = size;
183	mem->avail = mem->total;
 
 
184
185	mutex_init(&mem->objects.lock);
186	INIT_LIST_HEAD(&mem->objects.list);
187	INIT_LIST_HEAD(&mem->objects.purgeable);
 
188
189	mutex_init(&mem->mm_lock);
190
191	if (ops->init) {
192		err = ops->init(mem);
193		if (err)
194			goto err_free;
195	}
196
197	kref_init(&mem->kref);
198	return mem;
199
200err_free:
201	kfree(mem);
202	return ERR_PTR(err);
203}
204
205void intel_memory_region_set_name(struct intel_memory_region *mem,
206				  const char *fmt, ...)
207{
208	va_list ap;
209
210	va_start(ap, fmt);
211	vsnprintf(mem->name, sizeof(mem->name), fmt, ap);
212	va_end(ap);
213}
214
215static void __intel_memory_region_destroy(struct kref *kref)
216{
217	struct intel_memory_region *mem =
218		container_of(kref, typeof(*mem), kref);
219
 
220	if (mem->ops->release)
221		mem->ops->release(mem);
222
223	mutex_destroy(&mem->mm_lock);
224	mutex_destroy(&mem->objects.lock);
225	kfree(mem);
226}
227
228struct intel_memory_region *
229intel_memory_region_get(struct intel_memory_region *mem)
230{
231	kref_get(&mem->kref);
232	return mem;
233}
234
235void intel_memory_region_put(struct intel_memory_region *mem)
236{
237	kref_put(&mem->kref, __intel_memory_region_destroy);
238}
239
240/* Global memory region registration -- only slight layer inversions! */
241
242int intel_memory_regions_hw_probe(struct drm_i915_private *i915)
243{
244	int err, i;
245
246	for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
247		struct intel_memory_region *mem = ERR_PTR(-ENODEV);
248		u32 type;
249
250		if (!HAS_REGION(i915, BIT(i)))
251			continue;
252
253		type = MEMORY_TYPE_FROM_REGION(intel_region_map[i]);
 
254		switch (type) {
255		case INTEL_MEMORY_SYSTEM:
256			mem = i915_gem_shmem_setup(i915);
257			break;
258		case INTEL_MEMORY_STOLEN:
259			mem = i915_gem_stolen_setup(i915);
 
 
260			break;
261		case INTEL_MEMORY_LOCAL:
262			mem = intel_setup_fake_lmem(i915);
 
 
263			break;
 
 
264		}
265
266		if (IS_ERR(mem)) {
267			err = PTR_ERR(mem);
268			drm_err(&i915->drm,
269				"Failed to setup region(%d) type=%d\n",
270				err, type);
271			goto out_cleanup;
272		}
273
274		mem->id = intel_region_map[i];
275		mem->type = type;
276		mem->instance = MEMORY_INSTANCE_FROM_REGION(intel_region_map[i]);
277
278		i915->mm.regions[i] = mem;
279	}
280
281	return 0;
282
283out_cleanup:
284	intel_memory_regions_driver_release(i915);
285	return err;
286}
287
288void intel_memory_regions_driver_release(struct drm_i915_private *i915)
289{
290	int i;
291
292	for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
293		struct intel_memory_region *region =
294			fetch_and_zero(&i915->mm.regions[i]);
295
296		if (region)
297			intel_memory_region_put(region);
298	}
299}
300
301#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
302#include "selftests/intel_memory_region.c"
303#include "selftests/mock_region.c"
304#endif