Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
  1/*
  2 * SPDX-License-Identifier: MIT
  3 *
  4 * Copyright © 2019 Intel Corporation
  5 */
  6
  7#include "gt/intel_engine_pm.h"
  8#include "i915_selftest.h"
  9
 10#include "gem/selftests/mock_context.h"
 11#include "selftests/igt_reset.h"
 12#include "selftests/igt_spinner.h"
 13
 14struct live_mocs {
 15	struct drm_i915_mocs_table mocs;
 16	struct drm_i915_mocs_table l3cc;
 17	struct i915_vma *scratch;
 18	void *vaddr;
 19};
 20
 21static struct intel_context *mocs_context_create(struct intel_engine_cs *engine)
 22{
 23	struct intel_context *ce;
 24
 25	ce = intel_context_create(engine);
 26	if (IS_ERR(ce))
 27		return ce;
 28
 29	/* We build large requests to read the registers from the ring */
 30	ce->ring = __intel_context_ring_size(SZ_16K);
 31
 32	return ce;
 33}
 34
 35static int request_add_sync(struct i915_request *rq, int err)
 36{
 37	i915_request_get(rq);
 38	i915_request_add(rq);
 39	if (i915_request_wait(rq, 0, HZ / 5) < 0)
 40		err = -ETIME;
 41	i915_request_put(rq);
 42
 43	return err;
 44}
 45
 46static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin)
 47{
 48	int err = 0;
 49
 50	i915_request_get(rq);
 51	i915_request_add(rq);
 52	if (spin && !igt_wait_for_spinner(spin, rq))
 53		err = -ETIME;
 54	i915_request_put(rq);
 55
 56	return err;
 57}
 58
 59static struct i915_vma *create_scratch(struct intel_gt *gt)
 60{
 61	struct drm_i915_gem_object *obj;
 62	struct i915_vma *vma;
 63	int err;
 64
 65	obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
 66	if (IS_ERR(obj))
 67		return ERR_CAST(obj);
 68
 69	i915_gem_object_set_cache_coherency(obj, I915_CACHING_CACHED);
 70
 71	vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
 72	if (IS_ERR(vma)) {
 73		i915_gem_object_put(obj);
 74		return vma;
 75	}
 76
 77	err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
 78	if (err) {
 79		i915_gem_object_put(obj);
 80		return ERR_PTR(err);
 81	}
 82
 83	return vma;
 84}
 85
 86static int live_mocs_init(struct live_mocs *arg, struct intel_gt *gt)
 87{
 88	struct drm_i915_mocs_table table;
 89	unsigned int flags;
 90	int err;
 91
 92	memset(arg, 0, sizeof(*arg));
 93
 94	flags = get_mocs_settings(gt->i915, &table);
 95	if (!flags)
 96		return -EINVAL;
 97
 98	if (flags & HAS_RENDER_L3CC)
 99		arg->l3cc = table;
100
101	if (flags & (HAS_GLOBAL_MOCS | HAS_ENGINE_MOCS))
102		arg->mocs = table;
103
104	arg->scratch = create_scratch(gt);
105	if (IS_ERR(arg->scratch))
106		return PTR_ERR(arg->scratch);
107
108	arg->vaddr = i915_gem_object_pin_map(arg->scratch->obj, I915_MAP_WB);
109	if (IS_ERR(arg->vaddr)) {
110		err = PTR_ERR(arg->vaddr);
111		goto err_scratch;
112	}
113
114	return 0;
115
116err_scratch:
117	i915_vma_unpin_and_release(&arg->scratch, 0);
118	return err;
119}
120
121static void live_mocs_fini(struct live_mocs *arg)
122{
123	i915_vma_unpin_and_release(&arg->scratch, I915_VMA_RELEASE_MAP);
124}
125
126static int read_regs(struct i915_request *rq,
127		     u32 addr, unsigned int count,
128		     uint32_t *offset)
129{
130	unsigned int i;
131	u32 *cs;
132
133	GEM_BUG_ON(!IS_ALIGNED(*offset, sizeof(u32)));
134
135	cs = intel_ring_begin(rq, 4 * count);
136	if (IS_ERR(cs))
137		return PTR_ERR(cs);
138
139	for (i = 0; i < count; i++) {
140		*cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
141		*cs++ = addr;
142		*cs++ = *offset;
143		*cs++ = 0;
144
145		addr += sizeof(u32);
146		*offset += sizeof(u32);
147	}
148
149	intel_ring_advance(rq, cs);
150
151	return 0;
152}
153
154static int read_mocs_table(struct i915_request *rq,
155			   const struct drm_i915_mocs_table *table,
156			   uint32_t *offset)
157{
158	u32 addr;
159
160	if (HAS_GLOBAL_MOCS_REGISTERS(rq->engine->i915))
161		addr = global_mocs_offset();
162	else
163		addr = mocs_offset(rq->engine);
164
165	return read_regs(rq, addr, table->n_entries, offset);
166}
167
168static int read_l3cc_table(struct i915_request *rq,
169			   const struct drm_i915_mocs_table *table,
170			   uint32_t *offset)
171{
172	u32 addr = i915_mmio_reg_offset(GEN9_LNCFCMOCS(0));
173
174	return read_regs(rq, addr, (table->n_entries + 1) / 2, offset);
175}
176
177static int check_mocs_table(struct intel_engine_cs *engine,
178			    const struct drm_i915_mocs_table *table,
179			    uint32_t **vaddr)
180{
181	unsigned int i;
182	u32 expect;
183
184	for_each_mocs(expect, table, i) {
185		if (**vaddr != expect) {
186			pr_err("%s: Invalid MOCS[%d] entry, found %08x, expected %08x\n",
187			       engine->name, i, **vaddr, expect);
188			return -EINVAL;
189		}
190		++*vaddr;
191	}
192
193	return 0;
194}
195
196static bool mcr_range(struct drm_i915_private *i915, u32 offset)
197{
198	/*
199	 * Registers in this range are affected by the MCR selector
200	 * which only controls CPU initiated MMIO. Routing does not
201	 * work for CS access so we cannot verify them on this path.
202	 */
203	return INTEL_GEN(i915) >= 8 && offset >= 0xb000 && offset <= 0xb4ff;
204}
205
206static int check_l3cc_table(struct intel_engine_cs *engine,
207			    const struct drm_i915_mocs_table *table,
208			    uint32_t **vaddr)
209{
210	/* Can we read the MCR range 0xb00 directly? See intel_workarounds! */
211	u32 reg = i915_mmio_reg_offset(GEN9_LNCFCMOCS(0));
212	unsigned int i;
213	u32 expect;
214
215	for_each_l3cc(expect, table, i) {
216		if (!mcr_range(engine->i915, reg) && **vaddr != expect) {
217			pr_err("%s: Invalid L3CC[%d] entry, found %08x, expected %08x\n",
218			       engine->name, i, **vaddr, expect);
219			return -EINVAL;
220		}
221		++*vaddr;
222		reg += 4;
223	}
224
225	return 0;
226}
227
228static int check_mocs_engine(struct live_mocs *arg,
229			     struct intel_context *ce)
230{
231	struct i915_vma *vma = arg->scratch;
232	struct i915_request *rq;
233	u32 offset;
234	u32 *vaddr;
235	int err;
236
237	memset32(arg->vaddr, STACK_MAGIC, PAGE_SIZE / sizeof(u32));
238
239	rq = intel_context_create_request(ce);
240	if (IS_ERR(rq))
241		return PTR_ERR(rq);
242
243	i915_vma_lock(vma);
244	err = i915_request_await_object(rq, vma->obj, true);
245	if (!err)
246		err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
247	i915_vma_unlock(vma);
248
249	/* Read the mocs tables back using SRM */
250	offset = i915_ggtt_offset(vma);
251	if (!err)
252		err = read_mocs_table(rq, &arg->mocs, &offset);
253	if (!err && ce->engine->class == RENDER_CLASS)
254		err = read_l3cc_table(rq, &arg->l3cc, &offset);
255	offset -= i915_ggtt_offset(vma);
256	GEM_BUG_ON(offset > PAGE_SIZE);
257
258	err = request_add_sync(rq, err);
259	if (err)
260		return err;
261
262	/* Compare the results against the expected tables */
263	vaddr = arg->vaddr;
264	if (!err)
265		err = check_mocs_table(ce->engine, &arg->mocs, &vaddr);
266	if (!err && ce->engine->class == RENDER_CLASS)
267		err = check_l3cc_table(ce->engine, &arg->l3cc, &vaddr);
268	if (err)
269		return err;
270
271	GEM_BUG_ON(arg->vaddr + offset != vaddr);
272	return 0;
273}
274
275static int live_mocs_kernel(void *arg)
276{
277	struct intel_gt *gt = arg;
278	struct intel_engine_cs *engine;
279	enum intel_engine_id id;
280	struct live_mocs mocs;
281	int err;
282
283	/* Basic check the system is configured with the expected mocs table */
284
285	err = live_mocs_init(&mocs, gt);
286	if (err)
287		return err;
288
289	for_each_engine(engine, gt, id) {
290		intel_engine_pm_get(engine);
291		err = check_mocs_engine(&mocs, engine->kernel_context);
292		intel_engine_pm_put(engine);
293		if (err)
294			break;
295	}
296
297	live_mocs_fini(&mocs);
298	return err;
299}
300
301static int live_mocs_clean(void *arg)
302{
303	struct intel_gt *gt = arg;
304	struct intel_engine_cs *engine;
305	enum intel_engine_id id;
306	struct live_mocs mocs;
307	int err;
308
309	/* Every new context should see the same mocs table */
310
311	err = live_mocs_init(&mocs, gt);
312	if (err)
313		return err;
314
315	for_each_engine(engine, gt, id) {
316		struct intel_context *ce;
317
318		ce = mocs_context_create(engine);
319		if (IS_ERR(ce)) {
320			err = PTR_ERR(ce);
321			break;
322		}
323
324		err = check_mocs_engine(&mocs, ce);
325		intel_context_put(ce);
326		if (err)
327			break;
328	}
329
330	live_mocs_fini(&mocs);
331	return err;
332}
333
334static int active_engine_reset(struct intel_context *ce,
335			       const char *reason)
336{
337	struct igt_spinner spin;
338	struct i915_request *rq;
339	int err;
340
341	err = igt_spinner_init(&spin, ce->engine->gt);
342	if (err)
343		return err;
344
345	rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
346	if (IS_ERR(rq)) {
347		igt_spinner_fini(&spin);
348		return PTR_ERR(rq);
349	}
350
351	err = request_add_spin(rq, &spin);
352	if (err == 0)
353		err = intel_engine_reset(ce->engine, reason);
354
355	igt_spinner_end(&spin);
356	igt_spinner_fini(&spin);
357
358	return err;
359}
360
361static int __live_mocs_reset(struct live_mocs *mocs,
362			     struct intel_context *ce)
363{
364	int err;
365
366	err = intel_engine_reset(ce->engine, "mocs");
367	if (err)
368		return err;
369
370	err = check_mocs_engine(mocs, ce);
371	if (err)
372		return err;
373
374	err = active_engine_reset(ce, "mocs");
375	if (err)
376		return err;
377
378	err = check_mocs_engine(mocs, ce);
379	if (err)
380		return err;
381
382	intel_gt_reset(ce->engine->gt, ce->engine->mask, "mocs");
383
384	err = check_mocs_engine(mocs, ce);
385	if (err)
386		return err;
387
388	return 0;
389}
390
391static int live_mocs_reset(void *arg)
392{
393	struct intel_gt *gt = arg;
394	struct intel_engine_cs *engine;
395	enum intel_engine_id id;
396	struct live_mocs mocs;
397	int err = 0;
398
399	/* Check the mocs setup is retained over per-engine and global resets */
400
401	if (!intel_has_reset_engine(gt))
402		return 0;
403
404	err = live_mocs_init(&mocs, gt);
405	if (err)
406		return err;
407
408	igt_global_reset_lock(gt);
409	for_each_engine(engine, gt, id) {
410		struct intel_context *ce;
411
412		ce = mocs_context_create(engine);
413		if (IS_ERR(ce)) {
414			err = PTR_ERR(ce);
415			break;
416		}
417
418		intel_engine_pm_get(engine);
419		err = __live_mocs_reset(&mocs, ce);
420		intel_engine_pm_put(engine);
421
422		intel_context_put(ce);
423		if (err)
424			break;
425	}
426	igt_global_reset_unlock(gt);
427
428	live_mocs_fini(&mocs);
429	return err;
430}
431
432int intel_mocs_live_selftests(struct drm_i915_private *i915)
433{
434	static const struct i915_subtest tests[] = {
435		SUBTEST(live_mocs_kernel),
436		SUBTEST(live_mocs_clean),
437		SUBTEST(live_mocs_reset),
438	};
439	struct drm_i915_mocs_table table;
440
441	if (!get_mocs_settings(i915, &table))
442		return 0;
443
444	return intel_gt_live_subtests(tests, &i915->gt);
445}