Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: MIT
  2/*
  3 * Copyright © 2019 Intel Corporation
  4 */
  5
  6#include <linux/prandom.h>
  7
  8#include <uapi/drm/i915_drm.h>
  9
 10#include "intel_memory_region.h"
 11#include "i915_drv.h"
 12#include "i915_ttm_buddy_manager.h"
 13
 14static const struct {
 15	u16 class;
 16	u16 instance;
 17} intel_region_map[] = {
 18	[INTEL_REGION_SMEM] = {
 19		.class = INTEL_MEMORY_SYSTEM,
 20		.instance = 0,
 21	},
 22	[INTEL_REGION_LMEM_0] = {
 23		.class = INTEL_MEMORY_LOCAL,
 24		.instance = 0,
 25	},
 26	[INTEL_REGION_STOLEN_SMEM] = {
 27		.class = INTEL_MEMORY_STOLEN_SYSTEM,
 28		.instance = 0,
 29	},
 30	[INTEL_REGION_STOLEN_LMEM] = {
 31		.class = INTEL_MEMORY_STOLEN_LOCAL,
 32		.instance = 0,
 33	},
 34};
 35
 36static int __iopagetest(struct intel_memory_region *mem,
 37			u8 __iomem *va, int pagesize,
 38			u8 value, resource_size_t offset,
 39			const void *caller)
 40{
 41	int byte = get_random_u32_below(pagesize);
 42	u8 result[3];
 43
 44	memset_io(va, value, pagesize); /* or GPF! */
 45	wmb();
 46
 47	result[0] = ioread8(va);
 48	result[1] = ioread8(va + byte);
 49	result[2] = ioread8(va + pagesize - 1);
 50	if (memchr_inv(result, value, sizeof(result))) {
 51		dev_err(mem->i915->drm.dev,
 52			"Failed to read back from memory region:%pR at [%pa + %pa] for %ps; wrote %x, read (%x, %x, %x)\n",
 53			&mem->region, &mem->io.start, &offset, caller,
 54			value, result[0], result[1], result[2]);
 55		return -EINVAL;
 56	}
 57
 58	return 0;
 59}
 60
 61static int iopagetest(struct intel_memory_region *mem,
 62		      resource_size_t offset,
 63		      const void *caller)
 64{
 65	const u8 val[] = { 0x0, 0xa5, 0xc3, 0xf0 };
 66	void __iomem *va;
 67	int err;
 68	int i;
 69
 70	va = ioremap_wc(mem->io.start + offset, PAGE_SIZE);
 71	if (!va) {
 72		dev_err(mem->i915->drm.dev,
 73			"Failed to ioremap memory region [%pa + %pa] for %ps\n",
 74			&mem->io.start, &offset, caller);
 75		return -EFAULT;
 76	}
 77
 78	for (i = 0; i < ARRAY_SIZE(val); i++) {
 79		err = __iopagetest(mem, va, PAGE_SIZE, val[i], offset, caller);
 80		if (err)
 81			break;
 82
 83		err = __iopagetest(mem, va, PAGE_SIZE, ~val[i], offset, caller);
 84		if (err)
 85			break;
 
 
 
 
 
 
 
 86	}
 
 87
 88	iounmap(va);
 89	return err;
 90}
 91
 92static resource_size_t random_page(resource_size_t last)
 
 
 93{
 94	/* Limited to low 44b (16TiB), but should suffice for a spot check */
 95	return get_random_u32_below(last >> PAGE_SHIFT) << PAGE_SHIFT;
 
 96}
 97
 98static int iomemtest(struct intel_memory_region *mem,
 99		     bool test_all,
100		     const void *caller)
101{
102	resource_size_t last, page;
103	int err;
104
105	if (resource_size(&mem->io) < PAGE_SIZE)
106		return 0;
 
 
107
108	last = resource_size(&mem->io) - PAGE_SIZE;
 
 
 
 
 
 
 
109
110	/*
111	 * Quick test to check read/write access to the iomap (backing store).
112	 *
113	 * Write a byte, read it back. If the iomapping fails, we expect
114	 * a GPF preventing further execution. If the backing store does not
115	 * exist, the read back will return garbage. We check a couple of pages,
116	 * the first and last of the specified region to confirm the backing
117	 * store + iomap does cover the entire memory region; and we check
118	 * a random offset within as a quick spot check for bad memory.
119	 */
120
121	if (test_all) {
122		for (page = 0; page <= last; page += PAGE_SIZE) {
123			err = iopagetest(mem, page, caller);
124			if (err)
125				return err;
126		}
127	} else {
128		err = iopagetest(mem, 0, caller);
129		if (err)
130			return err;
131
132		err = iopagetest(mem, last, caller);
133		if (err)
134			return err;
 
135
136		err = iopagetest(mem, random_page(last), caller);
137		if (err)
138			return err;
139	}
140
141	return 0;
142}
143
144struct intel_memory_region *
145intel_memory_region_lookup(struct drm_i915_private *i915,
146			   u16 class, u16 instance)
147{
148	struct intel_memory_region *mr;
149	int id;
150
151	/* XXX: consider maybe converting to an rb tree at some point */
152	for_each_memory_region(mr, i915, id) {
153		if (mr->type == class && mr->instance == instance)
154			return mr;
155	}
156
157	return NULL;
158}
 
159
160struct intel_memory_region *
161intel_memory_region_by_type(struct drm_i915_private *i915,
162			    enum intel_memory_type mem_type)
163{
164	struct intel_memory_region *mr;
165	int id;
166
167	for_each_memory_region(mr, i915, id)
168		if (mr->type == mem_type)
169			return mr;
 
170
171	return NULL;
172}
 
173
174/**
175 * intel_memory_region_reserve - Reserve a memory range
176 * @mem: The region for which we want to reserve a range.
177 * @offset: Start of the range to reserve.
178 * @size: The size of the range to reserve.
179 *
180 * Return: 0 on success, negative error code on failure.
181 */
182int intel_memory_region_reserve(struct intel_memory_region *mem,
183				resource_size_t offset,
184				resource_size_t size)
185{
186	struct ttm_resource_manager *man = mem->region_private;
187
188	GEM_BUG_ON(mem->is_range_manager);
 
189
190	return i915_ttm_buddy_man_reserve(man, offset, size);
191}
 
192
193void intel_memory_region_debug(struct intel_memory_region *mr,
194			       struct drm_printer *printer)
195{
196	drm_printf(printer, "%s: ", mr->name);
197
198	if (mr->region_private)
199		ttm_resource_manager_debug(mr->region_private, printer);
200	else
201		drm_printf(printer, "total:%pa bytes\n", &mr->total);
202}
203
204static int intel_memory_region_memtest(struct intel_memory_region *mem,
205				       void *caller)
 
 
206{
207	struct drm_i915_private *i915 = mem->i915;
208	int err = 0;
 
209
210	if (!mem->io.start)
211		return 0;
 
212
213	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) || i915->params.memtest)
214		err = iomemtest(mem, i915->params.memtest, caller);
 
 
215
216	return err;
 
 
 
217}
218
219static const char *region_type_str(u16 type)
220{
221	switch (type) {
222	case INTEL_MEMORY_SYSTEM:
223		return "system";
224	case INTEL_MEMORY_LOCAL:
225		return "local";
226	case INTEL_MEMORY_STOLEN_LOCAL:
227		return "stolen-local";
228	case INTEL_MEMORY_STOLEN_SYSTEM:
229		return "stolen-system";
230	default:
231		return "unknown";
232	}
233}
234
235struct intel_memory_region *
236intel_memory_region_create(struct drm_i915_private *i915,
237			   resource_size_t start,
238			   resource_size_t size,
239			   resource_size_t min_page_size,
240			   resource_size_t io_start,
241			   resource_size_t io_size,
242			   u16 type,
243			   u16 instance,
244			   const struct intel_memory_region_ops *ops)
245{
246	struct intel_memory_region *mem;
247	int err;
248
249	mem = kzalloc(sizeof(*mem), GFP_KERNEL);
250	if (!mem)
251		return ERR_PTR(-ENOMEM);
252
253	mem->i915 = i915;
254	mem->region = DEFINE_RES_MEM(start, size);
255	mem->io = DEFINE_RES_MEM(io_start, io_size);
256	mem->min_page_size = min_page_size;
257	mem->ops = ops;
258	mem->total = size;
259	mem->type = type;
260	mem->instance = instance;
261
262	snprintf(mem->uabi_name, sizeof(mem->uabi_name), "%s%u",
263		 region_type_str(type), instance);
264
265	mutex_init(&mem->objects.lock);
266	INIT_LIST_HEAD(&mem->objects.list);
 
 
 
267
268	if (ops->init) {
269		err = ops->init(mem);
270		if (err)
271			goto err_free;
272	}
273
274	err = intel_memory_region_memtest(mem, (void *)_RET_IP_);
275	if (err)
276		goto err_release;
277
278	return mem;
279
280err_release:
281	if (mem->ops->release)
282		mem->ops->release(mem);
283err_free:
284	kfree(mem);
285	return ERR_PTR(err);
286}
287
288void intel_memory_region_set_name(struct intel_memory_region *mem,
289				  const char *fmt, ...)
290{
291	va_list ap;
292
293	va_start(ap, fmt);
294	vsnprintf(mem->name, sizeof(mem->name), fmt, ap);
295	va_end(ap);
296}
297
298void intel_memory_region_avail(struct intel_memory_region *mr,
299			       u64 *avail, u64 *visible_avail)
300{
301	if (mr->type == INTEL_MEMORY_LOCAL) {
302		i915_ttm_buddy_man_avail(mr->region_private,
303					 avail, visible_avail);
304		*avail <<= PAGE_SHIFT;
305		*visible_avail <<= PAGE_SHIFT;
306	} else {
307		*avail = mr->total;
308		*visible_avail = mr->total;
309	}
310}
311
312void intel_memory_region_destroy(struct intel_memory_region *mem)
313{
314	int ret = 0;
315
316	if (mem->ops->release)
317		ret = mem->ops->release(mem);
318
319	GEM_WARN_ON(!list_empty_careful(&mem->objects.list));
320	mutex_destroy(&mem->objects.lock);
321	if (!ret)
322		kfree(mem);
 
 
 
 
 
 
 
 
 
 
 
323}
324
325/* Global memory region registration -- only slight layer inversions! */
326
327int intel_memory_regions_hw_probe(struct drm_i915_private *i915)
328{
329	int err, i;
330
331	for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
332		struct intel_memory_region *mem = ERR_PTR(-ENODEV);
333		u16 type, instance;
334
335		if (!HAS_REGION(i915, i))
336			continue;
337
338		type = intel_region_map[i].class;
339		instance = intel_region_map[i].instance;
340		switch (type) {
341		case INTEL_MEMORY_SYSTEM:
342			if (IS_DGFX(i915))
343				mem = i915_gem_ttm_system_setup(i915, type,
344								instance);
345			else
346				mem = i915_gem_shmem_setup(i915, type,
347							   instance);
348			break;
349		case INTEL_MEMORY_STOLEN_LOCAL:
350			mem = i915_gem_stolen_lmem_setup(i915, type, instance);
351			if (!IS_ERR(mem))
352				i915->mm.stolen_region = mem;
353			break;
354		case INTEL_MEMORY_STOLEN_SYSTEM:
355			mem = i915_gem_stolen_smem_setup(i915, type, instance);
356			if (!IS_ERR(mem))
357				i915->mm.stolen_region = mem;
358			break;
359		default:
360			continue;
361		}
362
363		if (IS_ERR(mem)) {
364			err = PTR_ERR(mem);
365			drm_err(&i915->drm,
366				"Failed to setup region(%d) type=%d\n",
367				err, type);
368			goto out_cleanup;
369		}
370
371		if (mem) { /* Skip on non-fatal errors */
372			mem->id = i;
373			i915->mm.regions[i] = mem;
374		}
375	}
376
377	for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
378		struct intel_memory_region *mem = i915->mm.regions[i];
379		u64 region_size, io_size;
380
381		if (!mem)
382			continue;
383
384		region_size = resource_size(&mem->region) >> 20;
385		io_size = resource_size(&mem->io) >> 20;
386
387		if (resource_size(&mem->io))
388			drm_dbg(&i915->drm, "Memory region(%d): %s: %llu MiB %pR, io: %llu MiB %pR\n",
389				mem->id, mem->name, region_size, &mem->region, io_size, &mem->io);
390		else
391			drm_dbg(&i915->drm, "Memory region(%d): %s: %llu MiB %pR, io: n/a\n",
392				mem->id, mem->name, region_size, &mem->region);
393	}
394
395	return 0;
396
397out_cleanup:
398	intel_memory_regions_driver_release(i915);
399	return err;
400}
401
402void intel_memory_regions_driver_release(struct drm_i915_private *i915)
403{
404	int i;
405
406	for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
407		struct intel_memory_region *region =
408			fetch_and_zero(&i915->mm.regions[i]);
409
410		if (region)
411			intel_memory_region_destroy(region);
412	}
413}
414
415#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
416#include "selftests/intel_memory_region.c"
417#include "selftests/mock_region.c"
418#endif
v5.9
  1// SPDX-License-Identifier: MIT
  2/*
  3 * Copyright © 2019 Intel Corporation
  4 */
  5
 
 
 
 
  6#include "intel_memory_region.h"
  7#include "i915_drv.h"
 
  8
  9/* XXX: Hysterical raisins. BIT(inst) needs to just be (inst) at some point. */
 10#define REGION_MAP(type, inst) \
 11	BIT((type) + INTEL_MEMORY_TYPE_SHIFT) | BIT(inst)
 12
 13const u32 intel_region_map[] = {
 14	[INTEL_REGION_SMEM] = REGION_MAP(INTEL_MEMORY_SYSTEM, 0),
 15	[INTEL_REGION_LMEM] = REGION_MAP(INTEL_MEMORY_LOCAL, 0),
 16	[INTEL_REGION_STOLEN] = REGION_MAP(INTEL_MEMORY_STOLEN, 0),
 
 
 
 
 
 
 
 
 
 
 
 
 17};
 18
 19struct intel_memory_region *
 20intel_memory_region_by_type(struct drm_i915_private *i915,
 21			    enum intel_memory_type mem_type)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 22{
 23	struct intel_memory_region *mr;
 24	int id;
 
 
 25
 26	for_each_memory_region(mr, i915, id)
 27		if (mr->type == mem_type)
 28			return mr;
 
 
 
 
 29
 30	return NULL;
 31}
 
 
 32
 33static u64
 34intel_memory_region_free_pages(struct intel_memory_region *mem,
 35			       struct list_head *blocks)
 36{
 37	struct i915_buddy_block *block, *on;
 38	u64 size = 0;
 39
 40	list_for_each_entry_safe(block, on, blocks, link) {
 41		size += i915_buddy_block_size(&mem->mm, block);
 42		i915_buddy_free(&mem->mm, block);
 43	}
 44	INIT_LIST_HEAD(blocks);
 45
 46	return size;
 
 47}
 48
 49void
 50__intel_memory_region_put_pages_buddy(struct intel_memory_region *mem,
 51				      struct list_head *blocks)
 52{
 53	mutex_lock(&mem->mm_lock);
 54	mem->avail += intel_memory_region_free_pages(mem, blocks);
 55	mutex_unlock(&mem->mm_lock);
 56}
 57
 58void
 59__intel_memory_region_put_block_buddy(struct i915_buddy_block *block)
 
 60{
 61	struct list_head blocks;
 
 62
 63	INIT_LIST_HEAD(&blocks);
 64	list_add(&block->link, &blocks);
 65	__intel_memory_region_put_pages_buddy(block->private, &blocks);
 66}
 67
 68int
 69__intel_memory_region_get_pages_buddy(struct intel_memory_region *mem,
 70				      resource_size_t size,
 71				      unsigned int flags,
 72				      struct list_head *blocks)
 73{
 74	unsigned int min_order = 0;
 75	unsigned long n_pages;
 76
 77	GEM_BUG_ON(!IS_ALIGNED(size, mem->mm.chunk_size));
 78	GEM_BUG_ON(!list_empty(blocks));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 79
 80	if (flags & I915_ALLOC_MIN_PAGE_SIZE) {
 81		min_order = ilog2(mem->min_page_size) -
 82			    ilog2(mem->mm.chunk_size);
 83	}
 84
 85	if (flags & I915_ALLOC_CONTIGUOUS) {
 86		size = roundup_pow_of_two(size);
 87		min_order = ilog2(size) - ilog2(mem->mm.chunk_size);
 88	}
 89
 90	if (size > BIT(mem->mm.max_order) * mem->mm.chunk_size)
 91		return -E2BIG;
 92
 93	n_pages = size >> ilog2(mem->mm.chunk_size);
 
 
 
 
 
 94
 95	mutex_lock(&mem->mm_lock);
 
 
 
 
 96
 97	do {
 98		struct i915_buddy_block *block;
 99		unsigned int order;
100
101		order = fls(n_pages) - 1;
102		GEM_BUG_ON(order > mem->mm.max_order);
103		GEM_BUG_ON(order < min_order);
 
 
 
104
105		do {
106			block = i915_buddy_alloc(&mem->mm, order);
107			if (!IS_ERR(block))
108				break;
109
110			if (order-- == min_order)
111				goto err_free_blocks;
112		} while (1);
113
114		n_pages -= BIT(order);
 
 
 
 
 
 
 
 
 
 
 
 
115
116		block->private = mem;
117		list_add(&block->link, blocks);
118
119		if (!n_pages)
120			break;
121	} while (1);
122
123	mem->avail -= size;
124	mutex_unlock(&mem->mm_lock);
125	return 0;
 
126
127err_free_blocks:
128	intel_memory_region_free_pages(mem, blocks);
129	mutex_unlock(&mem->mm_lock);
130	return -ENXIO;
131}
132
133struct i915_buddy_block *
134__intel_memory_region_get_block_buddy(struct intel_memory_region *mem,
135				      resource_size_t size,
136				      unsigned int flags)
137{
138	struct i915_buddy_block *block;
139	LIST_HEAD(blocks);
140	int ret;
141
142	ret = __intel_memory_region_get_pages_buddy(mem, size, flags, &blocks);
143	if (ret)
144		return ERR_PTR(ret);
145
146	block = list_first_entry(&blocks, typeof(*block), link);
147	list_del_init(&block->link);
148	return block;
149}
150
151int intel_memory_region_init_buddy(struct intel_memory_region *mem)
152{
153	return i915_buddy_init(&mem->mm, resource_size(&mem->region),
154			       PAGE_SIZE);
155}
156
157void intel_memory_region_release_buddy(struct intel_memory_region *mem)
158{
159	i915_buddy_fini(&mem->mm);
 
 
 
 
 
 
 
 
 
 
 
160}
161
162struct intel_memory_region *
163intel_memory_region_create(struct drm_i915_private *i915,
164			   resource_size_t start,
165			   resource_size_t size,
166			   resource_size_t min_page_size,
167			   resource_size_t io_start,
 
 
 
168			   const struct intel_memory_region_ops *ops)
169{
170	struct intel_memory_region *mem;
171	int err;
172
173	mem = kzalloc(sizeof(*mem), GFP_KERNEL);
174	if (!mem)
175		return ERR_PTR(-ENOMEM);
176
177	mem->i915 = i915;
178	mem->region = (struct resource)DEFINE_RES_MEM(start, size);
179	mem->io_start = io_start;
180	mem->min_page_size = min_page_size;
181	mem->ops = ops;
182	mem->total = size;
183	mem->avail = mem->total;
 
 
 
 
184
185	mutex_init(&mem->objects.lock);
186	INIT_LIST_HEAD(&mem->objects.list);
187	INIT_LIST_HEAD(&mem->objects.purgeable);
188
189	mutex_init(&mem->mm_lock);
190
191	if (ops->init) {
192		err = ops->init(mem);
193		if (err)
194			goto err_free;
195	}
196
197	kref_init(&mem->kref);
 
 
 
198	return mem;
199
 
 
 
200err_free:
201	kfree(mem);
202	return ERR_PTR(err);
203}
204
205void intel_memory_region_set_name(struct intel_memory_region *mem,
206				  const char *fmt, ...)
207{
208	va_list ap;
209
210	va_start(ap, fmt);
211	vsnprintf(mem->name, sizeof(mem->name), fmt, ap);
212	va_end(ap);
213}
214
215static void __intel_memory_region_destroy(struct kref *kref)
 
216{
217	struct intel_memory_region *mem =
218		container_of(kref, typeof(*mem), kref);
 
 
 
 
 
 
 
 
 
 
 
 
219
220	if (mem->ops->release)
221		mem->ops->release(mem);
222
223	mutex_destroy(&mem->mm_lock);
224	mutex_destroy(&mem->objects.lock);
225	kfree(mem);
226}
227
228struct intel_memory_region *
229intel_memory_region_get(struct intel_memory_region *mem)
230{
231	kref_get(&mem->kref);
232	return mem;
233}
234
235void intel_memory_region_put(struct intel_memory_region *mem)
236{
237	kref_put(&mem->kref, __intel_memory_region_destroy);
238}
239
240/* Global memory region registration -- only slight layer inversions! */
241
242int intel_memory_regions_hw_probe(struct drm_i915_private *i915)
243{
244	int err, i;
245
246	for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
247		struct intel_memory_region *mem = ERR_PTR(-ENODEV);
248		u32 type;
249
250		if (!HAS_REGION(i915, BIT(i)))
251			continue;
252
253		type = MEMORY_TYPE_FROM_REGION(intel_region_map[i]);
 
254		switch (type) {
255		case INTEL_MEMORY_SYSTEM:
256			mem = i915_gem_shmem_setup(i915);
 
 
 
 
 
257			break;
258		case INTEL_MEMORY_STOLEN:
259			mem = i915_gem_stolen_setup(i915);
 
 
260			break;
261		case INTEL_MEMORY_LOCAL:
262			mem = intel_setup_fake_lmem(i915);
 
 
263			break;
 
 
264		}
265
266		if (IS_ERR(mem)) {
267			err = PTR_ERR(mem);
268			drm_err(&i915->drm,
269				"Failed to setup region(%d) type=%d\n",
270				err, type);
271			goto out_cleanup;
272		}
273
274		mem->id = intel_region_map[i];
275		mem->type = type;
276		mem->instance = MEMORY_INSTANCE_FROM_REGION(intel_region_map[i]);
 
 
 
 
 
 
 
 
 
 
 
 
277
278		i915->mm.regions[i] = mem;
 
 
 
 
 
279	}
280
281	return 0;
282
283out_cleanup:
284	intel_memory_regions_driver_release(i915);
285	return err;
286}
287
288void intel_memory_regions_driver_release(struct drm_i915_private *i915)
289{
290	int i;
291
292	for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
293		struct intel_memory_region *region =
294			fetch_and_zero(&i915->mm.regions[i]);
295
296		if (region)
297			intel_memory_region_put(region);
298	}
299}
300
301#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
302#include "selftests/intel_memory_region.c"
303#include "selftests/mock_region.c"
304#endif