Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1// SPDX-License-Identifier: MIT
  2/*
  3 * Copyright © 2019 Intel Corporation
  4 */
  5
  6#include "intel_memory_region.h"
  7#include "i915_drv.h"
  8
  9/* XXX: Hysterical raisins. BIT(inst) needs to just be (inst) at some point. */
 10#define REGION_MAP(type, inst) \
 11	BIT((type) + INTEL_MEMORY_TYPE_SHIFT) | BIT(inst)
 12
 13const u32 intel_region_map[] = {
 14	[INTEL_REGION_SMEM] = REGION_MAP(INTEL_MEMORY_SYSTEM, 0),
 15	[INTEL_REGION_LMEM] = REGION_MAP(INTEL_MEMORY_LOCAL, 0),
 16	[INTEL_REGION_STOLEN] = REGION_MAP(INTEL_MEMORY_STOLEN, 0),
 17};
 18
 19struct intel_memory_region *
 20intel_memory_region_by_type(struct drm_i915_private *i915,
 21			    enum intel_memory_type mem_type)
 22{
 23	struct intel_memory_region *mr;
 24	int id;
 25
 26	for_each_memory_region(mr, i915, id)
 27		if (mr->type == mem_type)
 28			return mr;
 29
 30	return NULL;
 31}
 32
 33static u64
 34intel_memory_region_free_pages(struct intel_memory_region *mem,
 35			       struct list_head *blocks)
 36{
 37	struct i915_buddy_block *block, *on;
 38	u64 size = 0;
 39
 40	list_for_each_entry_safe(block, on, blocks, link) {
 41		size += i915_buddy_block_size(&mem->mm, block);
 42		i915_buddy_free(&mem->mm, block);
 43	}
 44	INIT_LIST_HEAD(blocks);
 45
 46	return size;
 47}
 48
 49void
 50__intel_memory_region_put_pages_buddy(struct intel_memory_region *mem,
 51				      struct list_head *blocks)
 52{
 53	mutex_lock(&mem->mm_lock);
 54	mem->avail += intel_memory_region_free_pages(mem, blocks);
 55	mutex_unlock(&mem->mm_lock);
 56}
 57
 58void
 59__intel_memory_region_put_block_buddy(struct i915_buddy_block *block)
 60{
 61	struct list_head blocks;
 62
 63	INIT_LIST_HEAD(&blocks);
 64	list_add(&block->link, &blocks);
 65	__intel_memory_region_put_pages_buddy(block->private, &blocks);
 66}
 67
 68int
 69__intel_memory_region_get_pages_buddy(struct intel_memory_region *mem,
 70				      resource_size_t size,
 71				      unsigned int flags,
 72				      struct list_head *blocks)
 73{
 74	unsigned int min_order = 0;
 75	unsigned long n_pages;
 76
 77	GEM_BUG_ON(!IS_ALIGNED(size, mem->mm.chunk_size));
 78	GEM_BUG_ON(!list_empty(blocks));
 79
 80	if (flags & I915_ALLOC_MIN_PAGE_SIZE) {
 81		min_order = ilog2(mem->min_page_size) -
 82			    ilog2(mem->mm.chunk_size);
 83	}
 84
 85	if (flags & I915_ALLOC_CONTIGUOUS) {
 86		size = roundup_pow_of_two(size);
 87		min_order = ilog2(size) - ilog2(mem->mm.chunk_size);
 88	}
 89
 90	if (size > BIT(mem->mm.max_order) * mem->mm.chunk_size)
 91		return -E2BIG;
 92
 93	n_pages = size >> ilog2(mem->mm.chunk_size);
 94
 95	mutex_lock(&mem->mm_lock);
 96
 97	do {
 98		struct i915_buddy_block *block;
 99		unsigned int order;
100
101		order = fls(n_pages) - 1;
102		GEM_BUG_ON(order > mem->mm.max_order);
103		GEM_BUG_ON(order < min_order);
104
105		do {
106			block = i915_buddy_alloc(&mem->mm, order);
107			if (!IS_ERR(block))
108				break;
109
110			if (order-- == min_order)
111				goto err_free_blocks;
112		} while (1);
113
114		n_pages -= BIT(order);
115
116		block->private = mem;
117		list_add(&block->link, blocks);
118
119		if (!n_pages)
120			break;
121	} while (1);
122
123	mem->avail -= size;
124	mutex_unlock(&mem->mm_lock);
125	return 0;
126
127err_free_blocks:
128	intel_memory_region_free_pages(mem, blocks);
129	mutex_unlock(&mem->mm_lock);
130	return -ENXIO;
131}
132
133struct i915_buddy_block *
134__intel_memory_region_get_block_buddy(struct intel_memory_region *mem,
135				      resource_size_t size,
136				      unsigned int flags)
137{
138	struct i915_buddy_block *block;
139	LIST_HEAD(blocks);
140	int ret;
141
142	ret = __intel_memory_region_get_pages_buddy(mem, size, flags, &blocks);
143	if (ret)
144		return ERR_PTR(ret);
145
146	block = list_first_entry(&blocks, typeof(*block), link);
147	list_del_init(&block->link);
148	return block;
149}
150
151int intel_memory_region_init_buddy(struct intel_memory_region *mem)
152{
153	return i915_buddy_init(&mem->mm, resource_size(&mem->region),
154			       PAGE_SIZE);
155}
156
157void intel_memory_region_release_buddy(struct intel_memory_region *mem)
158{
159	i915_buddy_fini(&mem->mm);
160}
161
162struct intel_memory_region *
163intel_memory_region_create(struct drm_i915_private *i915,
164			   resource_size_t start,
165			   resource_size_t size,
166			   resource_size_t min_page_size,
167			   resource_size_t io_start,
168			   const struct intel_memory_region_ops *ops)
169{
170	struct intel_memory_region *mem;
171	int err;
172
173	mem = kzalloc(sizeof(*mem), GFP_KERNEL);
174	if (!mem)
175		return ERR_PTR(-ENOMEM);
176
177	mem->i915 = i915;
178	mem->region = (struct resource)DEFINE_RES_MEM(start, size);
179	mem->io_start = io_start;
180	mem->min_page_size = min_page_size;
181	mem->ops = ops;
182	mem->total = size;
183	mem->avail = mem->total;
184
185	mutex_init(&mem->objects.lock);
186	INIT_LIST_HEAD(&mem->objects.list);
187	INIT_LIST_HEAD(&mem->objects.purgeable);
188
189	mutex_init(&mem->mm_lock);
190
191	if (ops->init) {
192		err = ops->init(mem);
193		if (err)
194			goto err_free;
195	}
196
197	kref_init(&mem->kref);
198	return mem;
199
200err_free:
201	kfree(mem);
202	return ERR_PTR(err);
203}
204
205void intel_memory_region_set_name(struct intel_memory_region *mem,
206				  const char *fmt, ...)
207{
208	va_list ap;
209
210	va_start(ap, fmt);
211	vsnprintf(mem->name, sizeof(mem->name), fmt, ap);
212	va_end(ap);
213}
214
215static void __intel_memory_region_destroy(struct kref *kref)
216{
217	struct intel_memory_region *mem =
218		container_of(kref, typeof(*mem), kref);
219
220	if (mem->ops->release)
221		mem->ops->release(mem);
222
223	mutex_destroy(&mem->mm_lock);
224	mutex_destroy(&mem->objects.lock);
225	kfree(mem);
226}
227
228struct intel_memory_region *
229intel_memory_region_get(struct intel_memory_region *mem)
230{
231	kref_get(&mem->kref);
232	return mem;
233}
234
235void intel_memory_region_put(struct intel_memory_region *mem)
236{
237	kref_put(&mem->kref, __intel_memory_region_destroy);
238}
239
240/* Global memory region registration -- only slight layer inversions! */
241
242int intel_memory_regions_hw_probe(struct drm_i915_private *i915)
243{
244	int err, i;
245
246	for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
247		struct intel_memory_region *mem = ERR_PTR(-ENODEV);
248		u32 type;
249
250		if (!HAS_REGION(i915, BIT(i)))
251			continue;
252
253		type = MEMORY_TYPE_FROM_REGION(intel_region_map[i]);
254		switch (type) {
255		case INTEL_MEMORY_SYSTEM:
256			mem = i915_gem_shmem_setup(i915);
257			break;
258		case INTEL_MEMORY_STOLEN:
259			mem = i915_gem_stolen_setup(i915);
260			break;
261		case INTEL_MEMORY_LOCAL:
262			mem = intel_setup_fake_lmem(i915);
263			break;
264		}
265
266		if (IS_ERR(mem)) {
267			err = PTR_ERR(mem);
268			drm_err(&i915->drm,
269				"Failed to setup region(%d) type=%d\n",
270				err, type);
271			goto out_cleanup;
272		}
273
274		mem->id = intel_region_map[i];
275		mem->type = type;
276		mem->instance = MEMORY_INSTANCE_FROM_REGION(intel_region_map[i]);
277
278		i915->mm.regions[i] = mem;
279	}
280
281	return 0;
282
283out_cleanup:
284	intel_memory_regions_driver_release(i915);
285	return err;
286}
287
288void intel_memory_regions_driver_release(struct drm_i915_private *i915)
289{
290	int i;
291
292	for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
293		struct intel_memory_region *region =
294			fetch_and_zero(&i915->mm.regions[i]);
295
296		if (region)
297			intel_memory_region_put(region);
298	}
299}
300
301#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
302#include "selftests/intel_memory_region.c"
303#include "selftests/mock_region.c"
304#endif