Linux Audio

Check our new training course

Loading...
v6.13.7
  1/*
  2 * Copyright © 2016 Intel Corporation
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice (including the next
 12 * paragraph) shall be included in all copies or substantial portions of the
 13 * Software.
 14 *
 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 21 * IN THE SOFTWARE.
 22 *
 23 */
 24
 25#include "../i915_selftest.h"
 26
 27#include "gt/intel_gt.h"
 28
 29static int intel_fw_table_check(const struct intel_forcewake_range *ranges,
 30				unsigned int num_ranges,
 31				bool is_watertight)
 32{
 33	unsigned int i;
 34	s32 prev;
 35
 36	for (i = 0, prev = -1; i < num_ranges; i++, ranges++) {
 37		/* Check that the table is watertight */
 38		if (is_watertight && (prev + 1) != (s32)ranges->start) {
 39			pr_err("%s: entry[%d]:(%x, %x) is not watertight to previous (%x)\n",
 40			       __func__, i, ranges->start, ranges->end, prev);
 41			return -EINVAL;
 42		}
 43
 44		/* Check that the table never goes backwards */
 45		if (prev >= (s32)ranges->start) {
 46			pr_err("%s: entry[%d]:(%x, %x) is less than the previous (%x)\n",
 47			       __func__, i, ranges->start, ranges->end, prev);
 48			return -EINVAL;
 49		}
 50
 51		/* Check that the entry is valid */
 52		if (ranges->start >= ranges->end) {
 53			pr_err("%s: entry[%d]:(%x, %x) has negative length\n",
 54			       __func__, i, ranges->start, ranges->end);
 55			return -EINVAL;
 56		}
 57
 58		prev = ranges->end;
 59	}
 60
 61	return 0;
 62}
 63
 64static int intel_shadow_table_check(void)
 65{
 66	struct {
 67		const struct i915_range *regs;
 68		unsigned int size;
 69	} range_lists[] = {
 70		{ gen8_shadowed_regs, ARRAY_SIZE(gen8_shadowed_regs) },
 71		{ gen11_shadowed_regs, ARRAY_SIZE(gen11_shadowed_regs) },
 72		{ gen12_shadowed_regs, ARRAY_SIZE(gen12_shadowed_regs) },
 73		{ dg2_shadowed_regs, ARRAY_SIZE(dg2_shadowed_regs) },
 
 74		{ mtl_shadowed_regs, ARRAY_SIZE(mtl_shadowed_regs) },
 75		{ xelpmp_shadowed_regs, ARRAY_SIZE(xelpmp_shadowed_regs) },
 76	};
 77	const struct i915_range *range;
 78	unsigned int i, j;
 79	s32 prev;
 80
 81	for (j = 0; j < ARRAY_SIZE(range_lists); ++j) {
 82		range = range_lists[j].regs;
 83		for (i = 0, prev = -1; i < range_lists[j].size; i++, range++) {
 84			if (range->end < range->start) {
 85				pr_err("%s: range[%d]:(%06x-%06x) has end before start\n",
 86				       __func__, i, range->start, range->end);
 87				return -EINVAL;
 88			}
 89
 90			if (prev >= (s32)range->start) {
 91				pr_err("%s: range[%d]:(%06x-%06x) is before end of previous (%06x)\n",
 92				       __func__, i, range->start, range->end, prev);
 93				return -EINVAL;
 94			}
 95
 96			if (range->start % 4) {
 97				pr_err("%s: range[%d]:(%06x-%06x) has non-dword-aligned start\n",
 98				       __func__, i, range->start, range->end);
 99				return -EINVAL;
100			}
101
102			prev = range->end;
103		}
104	}
105
106	return 0;
107}
108
109int intel_uncore_mock_selftests(void)
110{
111	struct {
112		const struct intel_forcewake_range *ranges;
113		unsigned int num_ranges;
114		bool is_watertight;
115	} fw[] = {
116		{ __vlv_fw_ranges, ARRAY_SIZE(__vlv_fw_ranges), false },
117		{ __chv_fw_ranges, ARRAY_SIZE(__chv_fw_ranges), false },
118		{ __gen9_fw_ranges, ARRAY_SIZE(__gen9_fw_ranges), true },
119		{ __gen11_fw_ranges, ARRAY_SIZE(__gen11_fw_ranges), true },
120		{ __gen12_fw_ranges, ARRAY_SIZE(__gen12_fw_ranges), true },
 
 
121		{ __mtl_fw_ranges, ARRAY_SIZE(__mtl_fw_ranges), true },
122		{ __xelpmp_fw_ranges, ARRAY_SIZE(__xelpmp_fw_ranges), true },
123	};
124	int err, i;
125
126	for (i = 0; i < ARRAY_SIZE(fw); i++) {
127		err = intel_fw_table_check(fw[i].ranges,
128					   fw[i].num_ranges,
129					   fw[i].is_watertight);
130		if (err)
131			return err;
132	}
133
134	err = intel_shadow_table_check();
135	if (err)
136		return err;
137
138	return 0;
139}
140
141static int live_forcewake_ops(void *arg)
142{
143	static const struct reg {
144		const char *name;
145		u8 min_graphics_ver;
146		u8 max_graphics_ver;
147		unsigned long platforms;
148		unsigned int offset;
149	} registers[] = {
150		{
151			"RING_START",
152			6, 7,
153			0x38,
154		},
155		{
156			"RING_MI_MODE",
157			8, U8_MAX,
158			0x9c,
159		}
160	};
161	const struct reg *r;
162	struct intel_gt *gt = arg;
163	struct intel_uncore_forcewake_domain *domain;
164	struct intel_uncore *uncore = gt->uncore;
165	struct intel_engine_cs *engine;
166	enum intel_engine_id id;
167	intel_wakeref_t wakeref;
168	unsigned int tmp;
169	int err = 0;
170
171	GEM_BUG_ON(gt->awake);
172
173	/* vlv/chv with their pcu behave differently wrt reads */
174	if (IS_VALLEYVIEW(gt->i915) || IS_CHERRYVIEW(gt->i915)) {
175		pr_debug("PCU fakes forcewake badly; skipping\n");
176		return 0;
177	}
178
179	/*
180	 * Not quite as reliable across the gen as one would hope.
181	 *
182	 * Either our theory of operation is incorrect, or there remain
183	 * external parties interfering with the powerwells.
184	 *
185	 * https://bugs.freedesktop.org/show_bug.cgi?id=110210
186	 */
187	if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST_BROKEN))
188		return 0;
189
190	/* We have to pick carefully to get the exact behaviour we need */
191	for (r = registers; r->name; r++)
192		if (IS_GRAPHICS_VER(gt->i915, r->min_graphics_ver, r->max_graphics_ver))
193			break;
194	if (!r->name) {
195		pr_debug("Forcewaked register not known for %s; skipping\n",
196			 intel_platform_name(INTEL_INFO(gt->i915)->platform));
197		return 0;
198	}
199
200	wakeref = intel_runtime_pm_get(uncore->rpm);
201
202	for_each_fw_domain(domain, uncore, tmp) {
203		smp_store_mb(domain->active, false);
204		if (!hrtimer_cancel(&domain->timer))
205			continue;
206
207		intel_uncore_fw_release_timer(&domain->timer);
208	}
209
210	for_each_engine(engine, gt, id) {
211		i915_reg_t mmio = _MMIO(engine->mmio_base + r->offset);
212		u32 __iomem *reg = intel_uncore_regs(uncore) + engine->mmio_base + r->offset;
213		enum forcewake_domains fw_domains;
214		u32 val;
215
216		if (!engine->default_state)
217			continue;
218
219		fw_domains = intel_uncore_forcewake_for_reg(uncore, mmio,
220							    FW_REG_READ);
221		if (!fw_domains)
222			continue;
223
224		for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
225			if (!domain->wake_count)
226				continue;
227
228			pr_err("fw_domain %s still active, aborting test!\n",
229			       intel_uncore_forcewake_domain_to_str(domain->id));
230			err = -EINVAL;
231			goto out_rpm;
232		}
233
234		intel_uncore_forcewake_get(uncore, fw_domains);
235		val = readl(reg);
236		intel_uncore_forcewake_put(uncore, fw_domains);
237
238		/* Flush the forcewake release (delayed onto a timer) */
239		for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
240			smp_store_mb(domain->active, false);
241			if (hrtimer_cancel(&domain->timer))
242				intel_uncore_fw_release_timer(&domain->timer);
243
244			preempt_disable();
245			err = wait_ack_clear(domain, FORCEWAKE_KERNEL);
246			preempt_enable();
247			if (err) {
248				pr_err("Failed to clear fw_domain %s\n",
249				       intel_uncore_forcewake_domain_to_str(domain->id));
250				goto out_rpm;
251			}
252		}
253
254		if (!val) {
255			pr_err("%s:%s was zero while fw was held!\n",
256			       engine->name, r->name);
257			err = -EINVAL;
258			goto out_rpm;
259		}
260
261		/* We then expect the read to return 0 outside of the fw */
262		if (wait_for(readl(reg) == 0, 100)) {
263			pr_err("%s:%s=%0x, fw_domains 0x%x still up after 100ms!\n",
264			       engine->name, r->name, readl(reg), fw_domains);
265			err = -ETIMEDOUT;
266			goto out_rpm;
267		}
268	}
269
270out_rpm:
271	intel_runtime_pm_put(uncore->rpm, wakeref);
272	return err;
273}
274
275static int live_forcewake_domains(void *arg)
276{
277#define FW_RANGE 0x40000
278	struct intel_gt *gt = arg;
279	struct intel_uncore *uncore = gt->uncore;
280	unsigned long *valid;
281	u32 offset;
282	int err;
283
284	if (!HAS_FPGA_DBG_UNCLAIMED(gt->i915) &&
285	    !IS_VALLEYVIEW(gt->i915) &&
286	    !IS_CHERRYVIEW(gt->i915))
287		return 0;
288
289	/*
290	 * This test may lockup the machine or cause GPU hangs afterwards.
291	 */
292	if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST_BROKEN))
293		return 0;
294
295	valid = bitmap_zalloc(FW_RANGE, GFP_KERNEL);
296	if (!valid)
297		return -ENOMEM;
298
299	intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
300
301	check_for_unclaimed_mmio(uncore);
302	for (offset = 0; offset < FW_RANGE; offset += 4) {
303		i915_reg_t reg = { offset };
304
305		intel_uncore_posting_read_fw(uncore, reg);
306		if (!check_for_unclaimed_mmio(uncore))
307			set_bit(offset, valid);
308	}
309
310	intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
311
312	err = 0;
313	for_each_set_bit(offset, valid, FW_RANGE) {
314		i915_reg_t reg = { offset };
315
316		iosf_mbi_punit_acquire();
317		intel_uncore_forcewake_reset(uncore);
318		iosf_mbi_punit_release();
319
320		check_for_unclaimed_mmio(uncore);
321
322		intel_uncore_posting_read_fw(uncore, reg);
323		if (check_for_unclaimed_mmio(uncore)) {
324			pr_err("Unclaimed mmio read to register 0x%04x\n",
325			       offset);
326			err = -EINVAL;
327		}
328	}
329
330	bitmap_free(valid);
331	return err;
332}
333
334static int live_fw_table(void *arg)
335{
336	struct intel_gt *gt = arg;
337
338	/* Confirm the table we load is still valid */
339	return intel_fw_table_check(gt->uncore->fw_domains_table,
340				    gt->uncore->fw_domains_table_entries,
341				    GRAPHICS_VER(gt->i915) >= 9);
342}
343
344int intel_uncore_live_selftests(struct drm_i915_private *i915)
345{
346	static const struct i915_subtest tests[] = {
347		SUBTEST(live_fw_table),
348		SUBTEST(live_forcewake_ops),
349		SUBTEST(live_forcewake_domains),
350	};
351
352	return intel_gt_live_subtests(tests, to_gt(i915));
353}
v6.9.4
  1/*
  2 * Copyright © 2016 Intel Corporation
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice (including the next
 12 * paragraph) shall be included in all copies or substantial portions of the
 13 * Software.
 14 *
 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 21 * IN THE SOFTWARE.
 22 *
 23 */
 24
 25#include "../i915_selftest.h"
 26
 27#include "gt/intel_gt.h"
 28
 29static int intel_fw_table_check(const struct intel_forcewake_range *ranges,
 30				unsigned int num_ranges,
 31				bool is_watertight)
 32{
 33	unsigned int i;
 34	s32 prev;
 35
 36	for (i = 0, prev = -1; i < num_ranges; i++, ranges++) {
 37		/* Check that the table is watertight */
 38		if (is_watertight && (prev + 1) != (s32)ranges->start) {
 39			pr_err("%s: entry[%d]:(%x, %x) is not watertight to previous (%x)\n",
 40			       __func__, i, ranges->start, ranges->end, prev);
 41			return -EINVAL;
 42		}
 43
 44		/* Check that the table never goes backwards */
 45		if (prev >= (s32)ranges->start) {
 46			pr_err("%s: entry[%d]:(%x, %x) is less than the previous (%x)\n",
 47			       __func__, i, ranges->start, ranges->end, prev);
 48			return -EINVAL;
 49		}
 50
 51		/* Check that the entry is valid */
 52		if (ranges->start >= ranges->end) {
 53			pr_err("%s: entry[%d]:(%x, %x) has negative length\n",
 54			       __func__, i, ranges->start, ranges->end);
 55			return -EINVAL;
 56		}
 57
 58		prev = ranges->end;
 59	}
 60
 61	return 0;
 62}
 63
 64static int intel_shadow_table_check(void)
 65{
 66	struct {
 67		const struct i915_range *regs;
 68		unsigned int size;
 69	} range_lists[] = {
 70		{ gen8_shadowed_regs, ARRAY_SIZE(gen8_shadowed_regs) },
 71		{ gen11_shadowed_regs, ARRAY_SIZE(gen11_shadowed_regs) },
 72		{ gen12_shadowed_regs, ARRAY_SIZE(gen12_shadowed_regs) },
 73		{ dg2_shadowed_regs, ARRAY_SIZE(dg2_shadowed_regs) },
 74		{ pvc_shadowed_regs, ARRAY_SIZE(pvc_shadowed_regs) },
 75		{ mtl_shadowed_regs, ARRAY_SIZE(mtl_shadowed_regs) },
 76		{ xelpmp_shadowed_regs, ARRAY_SIZE(xelpmp_shadowed_regs) },
 77	};
 78	const struct i915_range *range;
 79	unsigned int i, j;
 80	s32 prev;
 81
 82	for (j = 0; j < ARRAY_SIZE(range_lists); ++j) {
 83		range = range_lists[j].regs;
 84		for (i = 0, prev = -1; i < range_lists[j].size; i++, range++) {
 85			if (range->end < range->start) {
 86				pr_err("%s: range[%d]:(%06x-%06x) has end before start\n",
 87				       __func__, i, range->start, range->end);
 88				return -EINVAL;
 89			}
 90
 91			if (prev >= (s32)range->start) {
 92				pr_err("%s: range[%d]:(%06x-%06x) is before end of previous (%06x)\n",
 93				       __func__, i, range->start, range->end, prev);
 94				return -EINVAL;
 95			}
 96
 97			if (range->start % 4) {
 98				pr_err("%s: range[%d]:(%06x-%06x) has non-dword-aligned start\n",
 99				       __func__, i, range->start, range->end);
100				return -EINVAL;
101			}
102
103			prev = range->end;
104		}
105	}
106
107	return 0;
108}
109
110int intel_uncore_mock_selftests(void)
111{
112	struct {
113		const struct intel_forcewake_range *ranges;
114		unsigned int num_ranges;
115		bool is_watertight;
116	} fw[] = {
117		{ __vlv_fw_ranges, ARRAY_SIZE(__vlv_fw_ranges), false },
118		{ __chv_fw_ranges, ARRAY_SIZE(__chv_fw_ranges), false },
119		{ __gen9_fw_ranges, ARRAY_SIZE(__gen9_fw_ranges), true },
120		{ __gen11_fw_ranges, ARRAY_SIZE(__gen11_fw_ranges), true },
121		{ __gen12_fw_ranges, ARRAY_SIZE(__gen12_fw_ranges), true },
122		{ __xehp_fw_ranges, ARRAY_SIZE(__xehp_fw_ranges), true },
123		{ __pvc_fw_ranges, ARRAY_SIZE(__pvc_fw_ranges), true },
124		{ __mtl_fw_ranges, ARRAY_SIZE(__mtl_fw_ranges), true },
125		{ __xelpmp_fw_ranges, ARRAY_SIZE(__xelpmp_fw_ranges), true },
126	};
127	int err, i;
128
129	for (i = 0; i < ARRAY_SIZE(fw); i++) {
130		err = intel_fw_table_check(fw[i].ranges,
131					   fw[i].num_ranges,
132					   fw[i].is_watertight);
133		if (err)
134			return err;
135	}
136
137	err = intel_shadow_table_check();
138	if (err)
139		return err;
140
141	return 0;
142}
143
144static int live_forcewake_ops(void *arg)
145{
146	static const struct reg {
147		const char *name;
148		u8 min_graphics_ver;
149		u8 max_graphics_ver;
150		unsigned long platforms;
151		unsigned int offset;
152	} registers[] = {
153		{
154			"RING_START",
155			6, 7,
156			0x38,
157		},
158		{
159			"RING_MI_MODE",
160			8, U8_MAX,
161			0x9c,
162		}
163	};
164	const struct reg *r;
165	struct intel_gt *gt = arg;
166	struct intel_uncore_forcewake_domain *domain;
167	struct intel_uncore *uncore = gt->uncore;
168	struct intel_engine_cs *engine;
169	enum intel_engine_id id;
170	intel_wakeref_t wakeref;
171	unsigned int tmp;
172	int err = 0;
173
174	GEM_BUG_ON(gt->awake);
175
176	/* vlv/chv with their pcu behave differently wrt reads */
177	if (IS_VALLEYVIEW(gt->i915) || IS_CHERRYVIEW(gt->i915)) {
178		pr_debug("PCU fakes forcewake badly; skipping\n");
179		return 0;
180	}
181
182	/*
183	 * Not quite as reliable across the gen as one would hope.
184	 *
185	 * Either our theory of operation is incorrect, or there remain
186	 * external parties interfering with the powerwells.
187	 *
188	 * https://bugs.freedesktop.org/show_bug.cgi?id=110210
189	 */
190	if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST_BROKEN))
191		return 0;
192
193	/* We have to pick carefully to get the exact behaviour we need */
194	for (r = registers; r->name; r++)
195		if (IS_GRAPHICS_VER(gt->i915, r->min_graphics_ver, r->max_graphics_ver))
196			break;
197	if (!r->name) {
198		pr_debug("Forcewaked register not known for %s; skipping\n",
199			 intel_platform_name(INTEL_INFO(gt->i915)->platform));
200		return 0;
201	}
202
203	wakeref = intel_runtime_pm_get(uncore->rpm);
204
205	for_each_fw_domain(domain, uncore, tmp) {
206		smp_store_mb(domain->active, false);
207		if (!hrtimer_cancel(&domain->timer))
208			continue;
209
210		intel_uncore_fw_release_timer(&domain->timer);
211	}
212
213	for_each_engine(engine, gt, id) {
214		i915_reg_t mmio = _MMIO(engine->mmio_base + r->offset);
215		u32 __iomem *reg = intel_uncore_regs(uncore) + engine->mmio_base + r->offset;
216		enum forcewake_domains fw_domains;
217		u32 val;
218
219		if (!engine->default_state)
220			continue;
221
222		fw_domains = intel_uncore_forcewake_for_reg(uncore, mmio,
223							    FW_REG_READ);
224		if (!fw_domains)
225			continue;
226
227		for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
228			if (!domain->wake_count)
229				continue;
230
231			pr_err("fw_domain %s still active, aborting test!\n",
232			       intel_uncore_forcewake_domain_to_str(domain->id));
233			err = -EINVAL;
234			goto out_rpm;
235		}
236
237		intel_uncore_forcewake_get(uncore, fw_domains);
238		val = readl(reg);
239		intel_uncore_forcewake_put(uncore, fw_domains);
240
241		/* Flush the forcewake release (delayed onto a timer) */
242		for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
243			smp_store_mb(domain->active, false);
244			if (hrtimer_cancel(&domain->timer))
245				intel_uncore_fw_release_timer(&domain->timer);
246
247			preempt_disable();
248			err = wait_ack_clear(domain, FORCEWAKE_KERNEL);
249			preempt_enable();
250			if (err) {
251				pr_err("Failed to clear fw_domain %s\n",
252				       intel_uncore_forcewake_domain_to_str(domain->id));
253				goto out_rpm;
254			}
255		}
256
257		if (!val) {
258			pr_err("%s:%s was zero while fw was held!\n",
259			       engine->name, r->name);
260			err = -EINVAL;
261			goto out_rpm;
262		}
263
264		/* We then expect the read to return 0 outside of the fw */
265		if (wait_for(readl(reg) == 0, 100)) {
266			pr_err("%s:%s=%0x, fw_domains 0x%x still up after 100ms!\n",
267			       engine->name, r->name, readl(reg), fw_domains);
268			err = -ETIMEDOUT;
269			goto out_rpm;
270		}
271	}
272
273out_rpm:
274	intel_runtime_pm_put(uncore->rpm, wakeref);
275	return err;
276}
277
278static int live_forcewake_domains(void *arg)
279{
280#define FW_RANGE 0x40000
281	struct intel_gt *gt = arg;
282	struct intel_uncore *uncore = gt->uncore;
283	unsigned long *valid;
284	u32 offset;
285	int err;
286
287	if (!HAS_FPGA_DBG_UNCLAIMED(gt->i915) &&
288	    !IS_VALLEYVIEW(gt->i915) &&
289	    !IS_CHERRYVIEW(gt->i915))
290		return 0;
291
292	/*
293	 * This test may lockup the machine or cause GPU hangs afterwards.
294	 */
295	if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST_BROKEN))
296		return 0;
297
298	valid = bitmap_zalloc(FW_RANGE, GFP_KERNEL);
299	if (!valid)
300		return -ENOMEM;
301
302	intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
303
304	check_for_unclaimed_mmio(uncore);
305	for (offset = 0; offset < FW_RANGE; offset += 4) {
306		i915_reg_t reg = { offset };
307
308		intel_uncore_posting_read_fw(uncore, reg);
309		if (!check_for_unclaimed_mmio(uncore))
310			set_bit(offset, valid);
311	}
312
313	intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
314
315	err = 0;
316	for_each_set_bit(offset, valid, FW_RANGE) {
317		i915_reg_t reg = { offset };
318
319		iosf_mbi_punit_acquire();
320		intel_uncore_forcewake_reset(uncore);
321		iosf_mbi_punit_release();
322
323		check_for_unclaimed_mmio(uncore);
324
325		intel_uncore_posting_read_fw(uncore, reg);
326		if (check_for_unclaimed_mmio(uncore)) {
327			pr_err("Unclaimed mmio read to register 0x%04x\n",
328			       offset);
329			err = -EINVAL;
330		}
331	}
332
333	bitmap_free(valid);
334	return err;
335}
336
337static int live_fw_table(void *arg)
338{
339	struct intel_gt *gt = arg;
340
341	/* Confirm the table we load is still valid */
342	return intel_fw_table_check(gt->uncore->fw_domains_table,
343				    gt->uncore->fw_domains_table_entries,
344				    GRAPHICS_VER(gt->i915) >= 9);
345}
346
347int intel_uncore_live_selftests(struct drm_i915_private *i915)
348{
349	static const struct i915_subtest tests[] = {
350		SUBTEST(live_fw_table),
351		SUBTEST(live_forcewake_ops),
352		SUBTEST(live_forcewake_domains),
353	};
354
355	return intel_gt_live_subtests(tests, to_gt(i915));
356}