Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
  1// SPDX-License-Identifier: MIT
  2/*
  3 * Copyright © 2022 Intel Corporation
  4 */
  5
  6#include "xe_gt.h"
  7
  8#include <linux/minmax.h>
  9
 10#include <drm/drm_managed.h>
 11#include <drm/xe_drm.h>
 12
 13#include "instructions/xe_gfxpipe_commands.h"
 14#include "instructions/xe_mi_commands.h"
 15#include "regs/xe_gt_regs.h"
 16#include "xe_assert.h"
 17#include "xe_bb.h"
 18#include "xe_bo.h"
 19#include "xe_device.h"
 20#include "xe_exec_queue.h"
 21#include "xe_execlist.h"
 22#include "xe_force_wake.h"
 23#include "xe_ggtt.h"
 24#include "xe_gsc.h"
 25#include "xe_gt_ccs_mode.h"
 26#include "xe_gt_clock.h"
 27#include "xe_gt_freq.h"
 28#include "xe_gt_idle.h"
 29#include "xe_gt_mcr.h"
 30#include "xe_gt_pagefault.h"
 31#include "xe_gt_printk.h"
 32#include "xe_gt_sysfs.h"
 33#include "xe_gt_tlb_invalidation.h"
 34#include "xe_gt_topology.h"
 35#include "xe_guc_exec_queue_types.h"
 36#include "xe_guc_pc.h"
 37#include "xe_hw_fence.h"
 38#include "xe_hw_engine_class_sysfs.h"
 39#include "xe_irq.h"
 40#include "xe_lmtt.h"
 41#include "xe_lrc.h"
 42#include "xe_map.h"
 43#include "xe_migrate.h"
 44#include "xe_mmio.h"
 45#include "xe_pat.h"
 46#include "xe_mocs.h"
 47#include "xe_reg_sr.h"
 48#include "xe_ring_ops.h"
 49#include "xe_sa.h"
 50#include "xe_sched_job.h"
 51#include "xe_sriov.h"
 52#include "xe_tuning.h"
 53#include "xe_uc.h"
 54#include "xe_vm.h"
 55#include "xe_wa.h"
 56#include "xe_wopcm.h"
 57
 58struct xe_gt *xe_gt_alloc(struct xe_tile *tile)
 59{
 60	struct xe_gt *gt;
 61
 62	gt = drmm_kzalloc(&tile_to_xe(tile)->drm, sizeof(*gt), GFP_KERNEL);
 63	if (!gt)
 64		return ERR_PTR(-ENOMEM);
 65
 66	gt->tile = tile;
 67	gt->ordered_wq = alloc_ordered_workqueue("gt-ordered-wq", 0);
 68
 69	return gt;
 70}
 71
 72void xe_gt_sanitize(struct xe_gt *gt)
 73{
 74	/*
 75	 * FIXME: if xe_uc_sanitize is called here, on TGL driver will not
 76	 * reload
 77	 */
 78	gt->uc.guc.submission_state.enabled = false;
 79}
 80
 81static void gt_fini(struct drm_device *drm, void *arg)
 82{
 83	struct xe_gt *gt = arg;
 84	int i;
 85
 86	destroy_workqueue(gt->ordered_wq);
 87
 88	for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
 89		xe_hw_fence_irq_finish(&gt->fence_irq[i]);
 90}
 91
 92static void gt_reset_worker(struct work_struct *w);
 93
 94static int emit_nop_job(struct xe_gt *gt, struct xe_exec_queue *q)
 95{
 96	struct xe_sched_job *job;
 97	struct xe_bb *bb;
 98	struct dma_fence *fence;
 99	long timeout;
100
101	bb = xe_bb_new(gt, 4, false);
102	if (IS_ERR(bb))
103		return PTR_ERR(bb);
104
105	job = xe_bb_create_job(q, bb);
106	if (IS_ERR(job)) {
107		xe_bb_free(bb, NULL);
108		return PTR_ERR(job);
109	}
110
111	xe_sched_job_arm(job);
112	fence = dma_fence_get(&job->drm.s_fence->finished);
113	xe_sched_job_push(job);
114
115	timeout = dma_fence_wait_timeout(fence, false, HZ);
116	dma_fence_put(fence);
117	xe_bb_free(bb, NULL);
118	if (timeout < 0)
119		return timeout;
120	else if (!timeout)
121		return -ETIME;
122
123	return 0;
124}
125
126/*
127 * Convert back from encoded value to type-safe, only to be used when reg.mcr
128 * is true
129 */
130static struct xe_reg_mcr to_xe_reg_mcr(const struct xe_reg reg)
131{
132	return (const struct xe_reg_mcr){.__reg.raw = reg.raw };
133}
134
135static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q)
136{
137	struct xe_reg_sr *sr = &q->hwe->reg_lrc;
138	struct xe_reg_sr_entry *entry;
139	unsigned long idx;
140	struct xe_sched_job *job;
141	struct xe_bb *bb;
142	struct dma_fence *fence;
143	long timeout;
144	int count = 0;
145
146	if (q->hwe->class == XE_ENGINE_CLASS_RENDER)
147		/* Big enough to emit all of the context's 3DSTATE */
148		bb = xe_bb_new(gt, xe_lrc_size(gt_to_xe(gt), q->hwe->class), false);
149	else
150		/* Just pick a large BB size */
151		bb = xe_bb_new(gt, SZ_4K, false);
152
153	if (IS_ERR(bb))
154		return PTR_ERR(bb);
155
156	xa_for_each(&sr->xa, idx, entry)
157		++count;
158
159	if (count) {
160		xe_gt_dbg(gt, "LRC WA %s save-restore batch\n", sr->name);
161
162		bb->cs[bb->len++] = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(count);
163
164		xa_for_each(&sr->xa, idx, entry) {
165			struct xe_reg reg = entry->reg;
166			struct xe_reg_mcr reg_mcr = to_xe_reg_mcr(reg);
167			u32 val;
168
169			/*
170			 * Skip reading the register if it's not really needed
171			 */
172			if (reg.masked)
173				val = entry->clr_bits << 16;
174			else if (entry->clr_bits + 1)
175				val = (reg.mcr ?
176				       xe_gt_mcr_unicast_read_any(gt, reg_mcr) :
177				       xe_mmio_read32(gt, reg)) & (~entry->clr_bits);
178			else
179				val = 0;
180
181			val |= entry->set_bits;
182
183			bb->cs[bb->len++] = reg.addr;
184			bb->cs[bb->len++] = val;
185			xe_gt_dbg(gt, "REG[0x%x] = 0x%08x", reg.addr, val);
186		}
187	}
188
189	xe_lrc_emit_hwe_state_instructions(q, bb);
190
191	job = xe_bb_create_job(q, bb);
192	if (IS_ERR(job)) {
193		xe_bb_free(bb, NULL);
194		return PTR_ERR(job);
195	}
196
197	xe_sched_job_arm(job);
198	fence = dma_fence_get(&job->drm.s_fence->finished);
199	xe_sched_job_push(job);
200
201	timeout = dma_fence_wait_timeout(fence, false, HZ);
202	dma_fence_put(fence);
203	xe_bb_free(bb, NULL);
204	if (timeout < 0)
205		return timeout;
206	else if (!timeout)
207		return -ETIME;
208
209	return 0;
210}
211
212int xe_gt_record_default_lrcs(struct xe_gt *gt)
213{
214	struct xe_device *xe = gt_to_xe(gt);
215	struct xe_hw_engine *hwe;
216	enum xe_hw_engine_id id;
217	int err = 0;
218
219	for_each_hw_engine(hwe, gt, id) {
220		struct xe_exec_queue *q, *nop_q;
221		void *default_lrc;
222
223		if (gt->default_lrc[hwe->class])
224			continue;
225
226		xe_reg_sr_init(&hwe->reg_lrc, hwe->name, xe);
227		xe_wa_process_lrc(hwe);
228		xe_hw_engine_setup_default_lrc_state(hwe);
229		xe_tuning_process_lrc(hwe);
230
231		default_lrc = drmm_kzalloc(&xe->drm,
232					   xe_lrc_size(xe, hwe->class),
233					   GFP_KERNEL);
234		if (!default_lrc)
235			return -ENOMEM;
236
237		q = xe_exec_queue_create(xe, NULL, BIT(hwe->logical_instance), 1,
238					 hwe, EXEC_QUEUE_FLAG_KERNEL);
239		if (IS_ERR(q)) {
240			err = PTR_ERR(q);
241			xe_gt_err(gt, "hwe %s: xe_exec_queue_create failed (%pe)\n",
242				  hwe->name, q);
243			return err;
244		}
245
246		/* Prime golden LRC with known good state */
247		err = emit_wa_job(gt, q);
248		if (err) {
249			xe_gt_err(gt, "hwe %s: emit_wa_job failed (%pe) guc_id=%u\n",
250				  hwe->name, ERR_PTR(err), q->guc->id);
251			goto put_exec_queue;
252		}
253
254		nop_q = xe_exec_queue_create(xe, NULL, BIT(hwe->logical_instance),
255					     1, hwe, EXEC_QUEUE_FLAG_KERNEL);
256		if (IS_ERR(nop_q)) {
257			err = PTR_ERR(nop_q);
258			xe_gt_err(gt, "hwe %s: nop xe_exec_queue_create failed (%pe)\n",
259				  hwe->name, nop_q);
260			goto put_exec_queue;
261		}
262
263		/* Switch to different LRC */
264		err = emit_nop_job(gt, nop_q);
265		if (err) {
266			xe_gt_err(gt, "hwe %s: nop emit_nop_job failed (%pe) guc_id=%u\n",
267				  hwe->name, ERR_PTR(err), nop_q->guc->id);
268			goto put_nop_q;
269		}
270
271		/* Reload golden LRC to record the effect of any indirect W/A */
272		err = emit_nop_job(gt, q);
273		if (err) {
274			xe_gt_err(gt, "hwe %s: emit_nop_job failed (%pe) guc_id=%u\n",
275				  hwe->name, ERR_PTR(err), q->guc->id);
276			goto put_nop_q;
277		}
278
279		xe_map_memcpy_from(xe, default_lrc,
280				   &q->lrc[0].bo->vmap,
281				   xe_lrc_pphwsp_offset(&q->lrc[0]),
282				   xe_lrc_size(xe, hwe->class));
283
284		gt->default_lrc[hwe->class] = default_lrc;
285put_nop_q:
286		xe_exec_queue_put(nop_q);
287put_exec_queue:
288		xe_exec_queue_put(q);
289		if (err)
290			break;
291	}
292
293	return err;
294}
295
296int xe_gt_init_early(struct xe_gt *gt)
297{
298	int err;
299
300	err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
301	if (err)
302		return err;
303
304	xe_gt_topology_init(gt);
305	xe_gt_mcr_init(gt);
306
307	err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
308	if (err)
309		return err;
310
311	xe_reg_sr_init(&gt->reg_sr, "GT", gt_to_xe(gt));
312
313	err = xe_wa_init(gt);
314	if (err)
315		return err;
316
317	xe_wa_process_gt(gt);
318	xe_wa_process_oob(gt);
319	xe_tuning_process_gt(gt);
320
321	return 0;
322}
323
324static void dump_pat_on_error(struct xe_gt *gt)
325{
326	struct drm_printer p;
327	char prefix[32];
328
329	snprintf(prefix, sizeof(prefix), "[GT%u Error]", gt->info.id);
330	p = drm_debug_printer(prefix);
331
332	xe_pat_dump(gt, &p);
333}
334
335static int gt_fw_domain_init(struct xe_gt *gt)
336{
337	int err, i;
338
339	xe_device_mem_access_get(gt_to_xe(gt));
340	err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
341	if (err)
342		goto err_hw_fence_irq;
343
344	xe_pat_init(gt);
345
346	if (!xe_gt_is_media_type(gt)) {
347		err = xe_ggtt_init(gt_to_tile(gt)->mem.ggtt);
348		if (err)
349			goto err_force_wake;
350		if (IS_SRIOV_PF(gt_to_xe(gt)))
351			xe_lmtt_init(&gt_to_tile(gt)->sriov.pf.lmtt);
352	}
353
354	err = xe_uc_init(&gt->uc);
355	if (err)
356		goto err_force_wake;
357
358	/* Raise GT freq to speed up HuC/GuC load */
359	xe_guc_pc_init_early(&gt->uc.guc.pc);
360
361	err = xe_uc_init_hwconfig(&gt->uc);
362	if (err)
363		goto err_force_wake;
364
365	xe_gt_idle_sysfs_init(&gt->gtidle);
366
367	/* XXX: Fake that we pull the engine mask from hwconfig blob */
368	gt->info.engine_mask = gt->info.__engine_mask;
369
370	/* Enable per hw engine IRQs */
371	xe_irq_enable_hwe(gt);
372
373	/* Rerun MCR init as we now have hw engine list */
374	xe_gt_mcr_init(gt);
375
376	err = xe_hw_engines_init_early(gt);
377	if (err)
378		goto err_force_wake;
379
380	err = xe_hw_engine_class_sysfs_init(gt);
381	if (err)
382		drm_warn(&gt_to_xe(gt)->drm,
383			 "failed to register engines sysfs directory, err: %d\n",
384			 err);
385
386	/* Initialize CCS mode sysfs after early initialization of HW engines */
387	xe_gt_ccs_mode_sysfs_init(gt);
388
389	err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
390	XE_WARN_ON(err);
391	xe_device_mem_access_put(gt_to_xe(gt));
392
393	return 0;
394
395err_force_wake:
396	dump_pat_on_error(gt);
397	xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
398err_hw_fence_irq:
399	for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
400		xe_hw_fence_irq_finish(&gt->fence_irq[i]);
401	xe_device_mem_access_put(gt_to_xe(gt));
402
403	return err;
404}
405
406static int all_fw_domain_init(struct xe_gt *gt)
407{
408	int err, i;
409
410	xe_device_mem_access_get(gt_to_xe(gt));
411	err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
412	if (err)
413		goto err_hw_fence_irq;
414
415	xe_gt_mcr_set_implicit_defaults(gt);
416	xe_reg_sr_apply_mmio(&gt->reg_sr, gt);
417
418	err = xe_gt_clock_init(gt);
419	if (err)
420		goto err_force_wake;
421
422	xe_mocs_init(gt);
423	err = xe_execlist_init(gt);
424	if (err)
425		goto err_force_wake;
426
427	err = xe_hw_engines_init(gt);
428	if (err)
429		goto err_force_wake;
430
431	err = xe_uc_init_post_hwconfig(&gt->uc);
432	if (err)
433		goto err_force_wake;
434
435	if (!xe_gt_is_media_type(gt)) {
436		/*
437		 * USM has its only SA pool to non-block behind user operations
438		 */
439		if (gt_to_xe(gt)->info.has_usm) {
440			struct xe_device *xe = gt_to_xe(gt);
441
442			gt->usm.bb_pool = xe_sa_bo_manager_init(gt_to_tile(gt),
443								IS_DGFX(xe) ? SZ_1M : SZ_512K, 16);
444			if (IS_ERR(gt->usm.bb_pool)) {
445				err = PTR_ERR(gt->usm.bb_pool);
446				goto err_force_wake;
447			}
448		}
449	}
450
451	if (!xe_gt_is_media_type(gt)) {
452		struct xe_tile *tile = gt_to_tile(gt);
453
454		tile->migrate = xe_migrate_init(tile);
455		if (IS_ERR(tile->migrate)) {
456			err = PTR_ERR(tile->migrate);
457			goto err_force_wake;
458		}
459	}
460
461	err = xe_uc_init_hw(&gt->uc);
462	if (err)
463		goto err_force_wake;
464
465	/* Configure default CCS mode of 1 engine with all resources */
466	if (xe_gt_ccs_mode_enabled(gt)) {
467		gt->ccs_mode = 1;
468		xe_gt_apply_ccs_mode(gt);
469	}
470
471	if (IS_SRIOV_PF(gt_to_xe(gt)) && !xe_gt_is_media_type(gt))
472		xe_lmtt_init_hw(&gt_to_tile(gt)->sriov.pf.lmtt);
473
474	err = xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
475	XE_WARN_ON(err);
476	xe_device_mem_access_put(gt_to_xe(gt));
477
478	return 0;
479
480err_force_wake:
481	xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
482err_hw_fence_irq:
483	for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
484		xe_hw_fence_irq_finish(&gt->fence_irq[i]);
485	xe_device_mem_access_put(gt_to_xe(gt));
486
487	return err;
488}
489
490int xe_gt_init(struct xe_gt *gt)
491{
492	int err;
493	int i;
494
495	INIT_WORK(&gt->reset.worker, gt_reset_worker);
496
497	for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i) {
498		gt->ring_ops[i] = xe_ring_ops_get(gt, i);
499		xe_hw_fence_irq_init(&gt->fence_irq[i]);
500	}
501
502	err = xe_gt_tlb_invalidation_init(gt);
503	if (err)
504		return err;
505
506	err = xe_gt_pagefault_init(gt);
507	if (err)
508		return err;
509
510	xe_mocs_init_early(gt);
511
512	xe_gt_sysfs_init(gt);
513
514	err = gt_fw_domain_init(gt);
515	if (err)
516		return err;
517
518	xe_gt_freq_init(gt);
519
520	xe_force_wake_init_engines(gt, gt_to_fw(gt));
521
522	err = all_fw_domain_init(gt);
523	if (err)
524		return err;
525
526	err = drmm_add_action_or_reset(&gt_to_xe(gt)->drm, gt_fini, gt);
527	if (err)
528		return err;
529
530	return 0;
531}
532
533static int do_gt_reset(struct xe_gt *gt)
534{
535	int err;
536
537	xe_gsc_wa_14015076503(gt, true);
538
539	xe_mmio_write32(gt, GDRST, GRDOM_FULL);
540	err = xe_mmio_wait32(gt, GDRST, GRDOM_FULL, 0, 5000, NULL, false);
541	if (err)
542		xe_gt_err(gt, "failed to clear GRDOM_FULL (%pe)\n",
543			  ERR_PTR(err));
544
545	xe_gsc_wa_14015076503(gt, false);
546
547	return err;
548}
549
550static int do_gt_restart(struct xe_gt *gt)
551{
552	struct xe_hw_engine *hwe;
553	enum xe_hw_engine_id id;
554	int err;
555
556	xe_pat_init(gt);
557
558	xe_gt_mcr_set_implicit_defaults(gt);
559	xe_reg_sr_apply_mmio(&gt->reg_sr, gt);
560
561	err = xe_wopcm_init(&gt->uc.wopcm);
562	if (err)
563		return err;
564
565	for_each_hw_engine(hwe, gt, id)
566		xe_hw_engine_enable_ring(hwe);
567
568	err = xe_uc_sanitize_reset(&gt->uc);
569	if (err)
570		return err;
571
572	err = xe_uc_init_hw(&gt->uc);
573	if (err)
574		return err;
575
576	if (IS_SRIOV_PF(gt_to_xe(gt)) && !xe_gt_is_media_type(gt))
577		xe_lmtt_init_hw(&gt_to_tile(gt)->sriov.pf.lmtt);
578
579	xe_mocs_init(gt);
580	err = xe_uc_start(&gt->uc);
581	if (err)
582		return err;
583
584	for_each_hw_engine(hwe, gt, id) {
585		xe_reg_sr_apply_mmio(&hwe->reg_sr, gt);
586		xe_reg_sr_apply_whitelist(hwe);
587	}
588
589	/* Get CCS mode in sync between sw/hw */
590	xe_gt_apply_ccs_mode(gt);
591
592	return 0;
593}
594
595static int gt_reset(struct xe_gt *gt)
596{
597	int err;
598
599	/* We only support GT resets with GuC submission */
600	if (!xe_device_uc_enabled(gt_to_xe(gt)))
601		return -ENODEV;
602
603	xe_gt_info(gt, "reset started\n");
604
605	if (xe_fault_inject_gt_reset()) {
606		err = -ECANCELED;
607		goto err_fail;
608	}
609
610	xe_gt_sanitize(gt);
611
612	xe_device_mem_access_get(gt_to_xe(gt));
613	err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
614	if (err)
615		goto err_msg;
616
617	xe_uc_gucrc_disable(&gt->uc);
618	xe_uc_stop_prepare(&gt->uc);
619	xe_gt_pagefault_reset(gt);
620
621	err = xe_uc_stop(&gt->uc);
622	if (err)
623		goto err_out;
624
625	err = do_gt_reset(gt);
626	if (err)
627		goto err_out;
628
629	xe_gt_tlb_invalidation_reset(gt);
630
631	err = do_gt_restart(gt);
632	if (err)
633		goto err_out;
634
635	err = xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
636	xe_device_mem_access_put(gt_to_xe(gt));
637	XE_WARN_ON(err);
638
639	xe_gt_info(gt, "reset done\n");
640
641	return 0;
642
643err_out:
644	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
645err_msg:
646	XE_WARN_ON(xe_uc_start(&gt->uc));
647	xe_device_mem_access_put(gt_to_xe(gt));
648err_fail:
649	xe_gt_err(gt, "reset failed (%pe)\n", ERR_PTR(err));
650
651	gt_to_xe(gt)->needs_flr_on_fini = true;
652
653	return err;
654}
655
656static void gt_reset_worker(struct work_struct *w)
657{
658	struct xe_gt *gt = container_of(w, typeof(*gt), reset.worker);
659
660	gt_reset(gt);
661}
662
663void xe_gt_reset_async(struct xe_gt *gt)
664{
665	xe_gt_info(gt, "trying reset\n");
666
667	/* Don't do a reset while one is already in flight */
668	if (!xe_fault_inject_gt_reset() && xe_uc_reset_prepare(&gt->uc))
669		return;
670
671	xe_gt_info(gt, "reset queued\n");
672	queue_work(gt->ordered_wq, &gt->reset.worker);
673}
674
675void xe_gt_suspend_prepare(struct xe_gt *gt)
676{
677	xe_device_mem_access_get(gt_to_xe(gt));
678	XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL));
679
680	xe_uc_stop_prepare(&gt->uc);
681
682	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
683	xe_device_mem_access_put(gt_to_xe(gt));
684}
685
686int xe_gt_suspend(struct xe_gt *gt)
687{
688	int err;
689
690	xe_gt_sanitize(gt);
691
692	xe_device_mem_access_get(gt_to_xe(gt));
693	err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
694	if (err)
695		goto err_msg;
696
697	err = xe_uc_suspend(&gt->uc);
698	if (err)
699		goto err_force_wake;
700
701	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
702	xe_device_mem_access_put(gt_to_xe(gt));
703	xe_gt_info(gt, "suspended\n");
704
705	return 0;
706
707err_force_wake:
708	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
709err_msg:
710	xe_device_mem_access_put(gt_to_xe(gt));
711	xe_gt_err(gt, "suspend failed (%pe)\n", ERR_PTR(err));
712
713	return err;
714}
715
716int xe_gt_resume(struct xe_gt *gt)
717{
718	int err;
719
720	xe_device_mem_access_get(gt_to_xe(gt));
721	err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
722	if (err)
723		goto err_msg;
724
725	err = do_gt_restart(gt);
726	if (err)
727		goto err_force_wake;
728
729	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
730	xe_device_mem_access_put(gt_to_xe(gt));
731	xe_gt_info(gt, "resumed\n");
732
733	return 0;
734
735err_force_wake:
736	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
737err_msg:
738	xe_device_mem_access_put(gt_to_xe(gt));
739	xe_gt_err(gt, "resume failed (%pe)\n", ERR_PTR(err));
740
741	return err;
742}
743
744struct xe_hw_engine *xe_gt_hw_engine(struct xe_gt *gt,
745				     enum xe_engine_class class,
746				     u16 instance, bool logical)
747{
748	struct xe_hw_engine *hwe;
749	enum xe_hw_engine_id id;
750
751	for_each_hw_engine(hwe, gt, id)
752		if (hwe->class == class &&
753		    ((!logical && hwe->instance == instance) ||
754		    (logical && hwe->logical_instance == instance)))
755			return hwe;
756
757	return NULL;
758}
759
760struct xe_hw_engine *xe_gt_any_hw_engine_by_reset_domain(struct xe_gt *gt,
761							 enum xe_engine_class class)
762{
763	struct xe_hw_engine *hwe;
764	enum xe_hw_engine_id id;
765
766	for_each_hw_engine(hwe, gt, id) {
767		switch (class) {
768		case XE_ENGINE_CLASS_RENDER:
769		case XE_ENGINE_CLASS_COMPUTE:
770			if (hwe->class == XE_ENGINE_CLASS_RENDER ||
771			    hwe->class == XE_ENGINE_CLASS_COMPUTE)
772				return hwe;
773			break;
774		default:
775			if (hwe->class == class)
776				return hwe;
777		}
778	}
779
780	return NULL;
781}