Linux Audio

Check our new training course

Loading...
Note: File does not exist in v5.4.
   1// SPDX-License-Identifier: MIT
   2/*
   3 * Copyright © 2022 Intel Corporation
   4 */
   5
   6#include "xe_gt.h"
   7
   8#include <linux/minmax.h>
   9
  10#include <drm/drm_managed.h>
  11#include <uapi/drm/xe_drm.h>
  12
  13#include <generated/xe_wa_oob.h>
  14
  15#include "instructions/xe_gfxpipe_commands.h"
  16#include "instructions/xe_mi_commands.h"
  17#include "regs/xe_gt_regs.h"
  18#include "xe_assert.h"
  19#include "xe_bb.h"
  20#include "xe_bo.h"
  21#include "xe_device.h"
  22#include "xe_exec_queue.h"
  23#include "xe_execlist.h"
  24#include "xe_force_wake.h"
  25#include "xe_ggtt.h"
  26#include "xe_gsc.h"
  27#include "xe_gt_ccs_mode.h"
  28#include "xe_gt_clock.h"
  29#include "xe_gt_freq.h"
  30#include "xe_gt_idle.h"
  31#include "xe_gt_mcr.h"
  32#include "xe_gt_pagefault.h"
  33#include "xe_gt_printk.h"
  34#include "xe_gt_sriov_pf.h"
  35#include "xe_gt_sysfs.h"
  36#include "xe_gt_tlb_invalidation.h"
  37#include "xe_gt_topology.h"
  38#include "xe_guc_exec_queue_types.h"
  39#include "xe_guc_pc.h"
  40#include "xe_hw_fence.h"
  41#include "xe_hw_engine_class_sysfs.h"
  42#include "xe_irq.h"
  43#include "xe_lmtt.h"
  44#include "xe_lrc.h"
  45#include "xe_map.h"
  46#include "xe_migrate.h"
  47#include "xe_mmio.h"
  48#include "xe_pat.h"
  49#include "xe_pm.h"
  50#include "xe_mocs.h"
  51#include "xe_reg_sr.h"
  52#include "xe_ring_ops.h"
  53#include "xe_sa.h"
  54#include "xe_sched_job.h"
  55#include "xe_sriov.h"
  56#include "xe_tuning.h"
  57#include "xe_uc.h"
  58#include "xe_uc_fw.h"
  59#include "xe_vm.h"
  60#include "xe_wa.h"
  61#include "xe_wopcm.h"
  62
  63static void gt_fini(struct drm_device *drm, void *arg)
  64{
  65	struct xe_gt *gt = arg;
  66
  67	destroy_workqueue(gt->ordered_wq);
  68}
  69
  70struct xe_gt *xe_gt_alloc(struct xe_tile *tile)
  71{
  72	struct xe_gt *gt;
  73	int err;
  74
  75	gt = drmm_kzalloc(&tile_to_xe(tile)->drm, sizeof(*gt), GFP_KERNEL);
  76	if (!gt)
  77		return ERR_PTR(-ENOMEM);
  78
  79	gt->tile = tile;
  80	gt->ordered_wq = alloc_ordered_workqueue("gt-ordered-wq",
  81						 WQ_MEM_RECLAIM);
  82
  83	err = drmm_add_action_or_reset(&gt_to_xe(gt)->drm, gt_fini, gt);
  84	if (err)
  85		return ERR_PTR(err);
  86
  87	return gt;
  88}
  89
  90void xe_gt_sanitize(struct xe_gt *gt)
  91{
  92	/*
  93	 * FIXME: if xe_uc_sanitize is called here, on TGL driver will not
  94	 * reload
  95	 */
  96	gt->uc.guc.submission_state.enabled = false;
  97}
  98
  99static void xe_gt_enable_host_l2_vram(struct xe_gt *gt)
 100{
 101	unsigned int fw_ref;
 102	u32 reg;
 103
 104	if (!XE_WA(gt, 16023588340))
 105		return;
 106
 107	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
 108	if (!fw_ref)
 109		return;
 110
 111	if (!xe_gt_is_media_type(gt)) {
 112		reg = xe_gt_mcr_unicast_read_any(gt, XE2_GAMREQSTRM_CTRL);
 113		reg |= CG_DIS_CNTLBUS;
 114		xe_gt_mcr_multicast_write(gt, XE2_GAMREQSTRM_CTRL, reg);
 115	}
 116
 117	xe_gt_mcr_multicast_write(gt, XEHPC_L3CLOS_MASK(3), 0x3);
 118	xe_force_wake_put(gt_to_fw(gt), fw_ref);
 119}
 120
 121static void xe_gt_disable_host_l2_vram(struct xe_gt *gt)
 122{
 123	unsigned int fw_ref;
 124	u32 reg;
 125
 126	if (!XE_WA(gt, 16023588340))
 127		return;
 128
 129	if (xe_gt_is_media_type(gt))
 130		return;
 131
 132	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
 133	if (!fw_ref)
 134		return;
 135
 136	reg = xe_gt_mcr_unicast_read_any(gt, XE2_GAMREQSTRM_CTRL);
 137	reg &= ~CG_DIS_CNTLBUS;
 138	xe_gt_mcr_multicast_write(gt, XE2_GAMREQSTRM_CTRL, reg);
 139
 140	xe_force_wake_put(gt_to_fw(gt), fw_ref);
 141}
 142
 143/**
 144 * xe_gt_remove() - Clean up the GT structures before driver removal
 145 * @gt: the GT object
 146 *
 147 * This function should only act on objects/structures that must be cleaned
 148 * before the driver removal callback is complete and therefore can't be
 149 * deferred to a drmm action.
 150 */
 151void xe_gt_remove(struct xe_gt *gt)
 152{
 153	int i;
 154
 155	xe_uc_remove(&gt->uc);
 156
 157	for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
 158		xe_hw_fence_irq_finish(&gt->fence_irq[i]);
 159
 160	xe_gt_disable_host_l2_vram(gt);
 161}
 162
 163static void gt_reset_worker(struct work_struct *w);
 164
 165static int emit_nop_job(struct xe_gt *gt, struct xe_exec_queue *q)
 166{
 167	struct xe_sched_job *job;
 168	struct xe_bb *bb;
 169	struct dma_fence *fence;
 170	long timeout;
 171
 172	bb = xe_bb_new(gt, 4, false);
 173	if (IS_ERR(bb))
 174		return PTR_ERR(bb);
 175
 176	job = xe_bb_create_job(q, bb);
 177	if (IS_ERR(job)) {
 178		xe_bb_free(bb, NULL);
 179		return PTR_ERR(job);
 180	}
 181
 182	xe_sched_job_arm(job);
 183	fence = dma_fence_get(&job->drm.s_fence->finished);
 184	xe_sched_job_push(job);
 185
 186	timeout = dma_fence_wait_timeout(fence, false, HZ);
 187	dma_fence_put(fence);
 188	xe_bb_free(bb, NULL);
 189	if (timeout < 0)
 190		return timeout;
 191	else if (!timeout)
 192		return -ETIME;
 193
 194	return 0;
 195}
 196
 197/*
 198 * Convert back from encoded value to type-safe, only to be used when reg.mcr
 199 * is true
 200 */
 201static struct xe_reg_mcr to_xe_reg_mcr(const struct xe_reg reg)
 202{
 203	return (const struct xe_reg_mcr){.__reg.raw = reg.raw };
 204}
 205
 206static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q)
 207{
 208	struct xe_reg_sr *sr = &q->hwe->reg_lrc;
 209	struct xe_reg_sr_entry *entry;
 210	unsigned long idx;
 211	struct xe_sched_job *job;
 212	struct xe_bb *bb;
 213	struct dma_fence *fence;
 214	long timeout;
 215	int count = 0;
 216
 217	if (q->hwe->class == XE_ENGINE_CLASS_RENDER)
 218		/* Big enough to emit all of the context's 3DSTATE */
 219		bb = xe_bb_new(gt, xe_gt_lrc_size(gt, q->hwe->class), false);
 220	else
 221		/* Just pick a large BB size */
 222		bb = xe_bb_new(gt, SZ_4K, false);
 223
 224	if (IS_ERR(bb))
 225		return PTR_ERR(bb);
 226
 227	xa_for_each(&sr->xa, idx, entry)
 228		++count;
 229
 230	if (count) {
 231		xe_gt_dbg(gt, "LRC WA %s save-restore batch\n", sr->name);
 232
 233		bb->cs[bb->len++] = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(count);
 234
 235		xa_for_each(&sr->xa, idx, entry) {
 236			struct xe_reg reg = entry->reg;
 237			struct xe_reg_mcr reg_mcr = to_xe_reg_mcr(reg);
 238			u32 val;
 239
 240			/*
 241			 * Skip reading the register if it's not really needed
 242			 */
 243			if (reg.masked)
 244				val = entry->clr_bits << 16;
 245			else if (entry->clr_bits + 1)
 246				val = (reg.mcr ?
 247				       xe_gt_mcr_unicast_read_any(gt, reg_mcr) :
 248				       xe_mmio_read32(&gt->mmio, reg)) & (~entry->clr_bits);
 249			else
 250				val = 0;
 251
 252			val |= entry->set_bits;
 253
 254			bb->cs[bb->len++] = reg.addr;
 255			bb->cs[bb->len++] = val;
 256			xe_gt_dbg(gt, "REG[0x%x] = 0x%08x", reg.addr, val);
 257		}
 258	}
 259
 260	xe_lrc_emit_hwe_state_instructions(q, bb);
 261
 262	job = xe_bb_create_job(q, bb);
 263	if (IS_ERR(job)) {
 264		xe_bb_free(bb, NULL);
 265		return PTR_ERR(job);
 266	}
 267
 268	xe_sched_job_arm(job);
 269	fence = dma_fence_get(&job->drm.s_fence->finished);
 270	xe_sched_job_push(job);
 271
 272	timeout = dma_fence_wait_timeout(fence, false, HZ);
 273	dma_fence_put(fence);
 274	xe_bb_free(bb, NULL);
 275	if (timeout < 0)
 276		return timeout;
 277	else if (!timeout)
 278		return -ETIME;
 279
 280	return 0;
 281}
 282
 283int xe_gt_record_default_lrcs(struct xe_gt *gt)
 284{
 285	struct xe_device *xe = gt_to_xe(gt);
 286	struct xe_hw_engine *hwe;
 287	enum xe_hw_engine_id id;
 288	int err = 0;
 289
 290	for_each_hw_engine(hwe, gt, id) {
 291		struct xe_exec_queue *q, *nop_q;
 292		void *default_lrc;
 293
 294		if (gt->default_lrc[hwe->class])
 295			continue;
 296
 297		xe_reg_sr_init(&hwe->reg_lrc, hwe->name, xe);
 298		xe_wa_process_lrc(hwe);
 299		xe_hw_engine_setup_default_lrc_state(hwe);
 300		xe_tuning_process_lrc(hwe);
 301
 302		default_lrc = drmm_kzalloc(&xe->drm,
 303					   xe_gt_lrc_size(gt, hwe->class),
 304					   GFP_KERNEL);
 305		if (!default_lrc)
 306			return -ENOMEM;
 307
 308		q = xe_exec_queue_create(xe, NULL, BIT(hwe->logical_instance), 1,
 309					 hwe, EXEC_QUEUE_FLAG_KERNEL, 0);
 310		if (IS_ERR(q)) {
 311			err = PTR_ERR(q);
 312			xe_gt_err(gt, "hwe %s: xe_exec_queue_create failed (%pe)\n",
 313				  hwe->name, q);
 314			return err;
 315		}
 316
 317		/* Prime golden LRC with known good state */
 318		err = emit_wa_job(gt, q);
 319		if (err) {
 320			xe_gt_err(gt, "hwe %s: emit_wa_job failed (%pe) guc_id=%u\n",
 321				  hwe->name, ERR_PTR(err), q->guc->id);
 322			goto put_exec_queue;
 323		}
 324
 325		nop_q = xe_exec_queue_create(xe, NULL, BIT(hwe->logical_instance),
 326					     1, hwe, EXEC_QUEUE_FLAG_KERNEL, 0);
 327		if (IS_ERR(nop_q)) {
 328			err = PTR_ERR(nop_q);
 329			xe_gt_err(gt, "hwe %s: nop xe_exec_queue_create failed (%pe)\n",
 330				  hwe->name, nop_q);
 331			goto put_exec_queue;
 332		}
 333
 334		/* Switch to different LRC */
 335		err = emit_nop_job(gt, nop_q);
 336		if (err) {
 337			xe_gt_err(gt, "hwe %s: nop emit_nop_job failed (%pe) guc_id=%u\n",
 338				  hwe->name, ERR_PTR(err), nop_q->guc->id);
 339			goto put_nop_q;
 340		}
 341
 342		/* Reload golden LRC to record the effect of any indirect W/A */
 343		err = emit_nop_job(gt, q);
 344		if (err) {
 345			xe_gt_err(gt, "hwe %s: emit_nop_job failed (%pe) guc_id=%u\n",
 346				  hwe->name, ERR_PTR(err), q->guc->id);
 347			goto put_nop_q;
 348		}
 349
 350		xe_map_memcpy_from(xe, default_lrc,
 351				   &q->lrc[0]->bo->vmap,
 352				   xe_lrc_pphwsp_offset(q->lrc[0]),
 353				   xe_gt_lrc_size(gt, hwe->class));
 354
 355		gt->default_lrc[hwe->class] = default_lrc;
 356put_nop_q:
 357		xe_exec_queue_put(nop_q);
 358put_exec_queue:
 359		xe_exec_queue_put(q);
 360		if (err)
 361			break;
 362	}
 363
 364	return err;
 365}
 366
 367int xe_gt_init_early(struct xe_gt *gt)
 368{
 369	int err;
 370
 371	if (IS_SRIOV_PF(gt_to_xe(gt))) {
 372		err = xe_gt_sriov_pf_init_early(gt);
 373		if (err)
 374			return err;
 375	}
 376
 377	xe_reg_sr_init(&gt->reg_sr, "GT", gt_to_xe(gt));
 378
 379	err = xe_wa_init(gt);
 380	if (err)
 381		return err;
 382
 383	xe_wa_process_oob(gt);
 384
 385	xe_force_wake_init_gt(gt, gt_to_fw(gt));
 386	spin_lock_init(&gt->global_invl_lock);
 387
 388	err = xe_gt_tlb_invalidation_init_early(gt);
 389	if (err)
 390		return err;
 391
 392	return 0;
 393}
 394
 395static void dump_pat_on_error(struct xe_gt *gt)
 396{
 397	struct drm_printer p;
 398	char prefix[32];
 399
 400	snprintf(prefix, sizeof(prefix), "[GT%u Error]", gt->info.id);
 401	p = drm_dbg_printer(&gt_to_xe(gt)->drm, DRM_UT_DRIVER, prefix);
 402
 403	xe_pat_dump(gt, &p);
 404}
 405
 406static int gt_fw_domain_init(struct xe_gt *gt)
 407{
 408	unsigned int fw_ref;
 409	int err, i;
 410
 411	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
 412	if (!fw_ref) {
 413		err = -ETIMEDOUT;
 414		goto err_hw_fence_irq;
 415	}
 416
 417	if (!xe_gt_is_media_type(gt)) {
 418		err = xe_ggtt_init(gt_to_tile(gt)->mem.ggtt);
 419		if (err)
 420			goto err_force_wake;
 421		if (IS_SRIOV_PF(gt_to_xe(gt)))
 422			xe_lmtt_init(&gt_to_tile(gt)->sriov.pf.lmtt);
 423	}
 424
 425	/* Enable per hw engine IRQs */
 426	xe_irq_enable_hwe(gt);
 427
 428	/* Rerun MCR init as we now have hw engine list */
 429	xe_gt_mcr_init(gt);
 430
 431	err = xe_hw_engines_init_early(gt);
 432	if (err)
 433		goto err_force_wake;
 434
 435	err = xe_hw_engine_class_sysfs_init(gt);
 436	if (err)
 437		goto err_force_wake;
 438
 439	/* Initialize CCS mode sysfs after early initialization of HW engines */
 440	err = xe_gt_ccs_mode_sysfs_init(gt);
 441	if (err)
 442		goto err_force_wake;
 443
 444	/*
 445	 * Stash hardware-reported version.  Since this register does not exist
 446	 * on pre-MTL platforms, reading it there will (correctly) return 0.
 447	 */
 448	gt->info.gmdid = xe_mmio_read32(&gt->mmio, GMD_ID);
 449
 450	xe_force_wake_put(gt_to_fw(gt), fw_ref);
 451	return 0;
 452
 453err_force_wake:
 454	dump_pat_on_error(gt);
 455	xe_force_wake_put(gt_to_fw(gt), fw_ref);
 456err_hw_fence_irq:
 457	for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
 458		xe_hw_fence_irq_finish(&gt->fence_irq[i]);
 459
 460	return err;
 461}
 462
 463static int all_fw_domain_init(struct xe_gt *gt)
 464{
 465	unsigned int fw_ref;
 466	int err, i;
 467
 468	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
 469	if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
 470		err = -ETIMEDOUT;
 471		goto err_force_wake;
 472	}
 473
 474	xe_gt_mcr_set_implicit_defaults(gt);
 475	xe_wa_process_gt(gt);
 476	xe_tuning_process_gt(gt);
 477	xe_reg_sr_apply_mmio(&gt->reg_sr, gt);
 478
 479	err = xe_gt_clock_init(gt);
 480	if (err)
 481		goto err_force_wake;
 482
 483	xe_mocs_init(gt);
 484	err = xe_execlist_init(gt);
 485	if (err)
 486		goto err_force_wake;
 487
 488	err = xe_hw_engines_init(gt);
 489	if (err)
 490		goto err_force_wake;
 491
 492	err = xe_uc_init_post_hwconfig(&gt->uc);
 493	if (err)
 494		goto err_force_wake;
 495
 496	if (!xe_gt_is_media_type(gt)) {
 497		/*
 498		 * USM has its only SA pool to non-block behind user operations
 499		 */
 500		if (gt_to_xe(gt)->info.has_usm) {
 501			struct xe_device *xe = gt_to_xe(gt);
 502
 503			gt->usm.bb_pool = xe_sa_bo_manager_init(gt_to_tile(gt),
 504								IS_DGFX(xe) ? SZ_1M : SZ_512K, 16);
 505			if (IS_ERR(gt->usm.bb_pool)) {
 506				err = PTR_ERR(gt->usm.bb_pool);
 507				goto err_force_wake;
 508			}
 509		}
 510	}
 511
 512	if (!xe_gt_is_media_type(gt)) {
 513		struct xe_tile *tile = gt_to_tile(gt);
 514
 515		tile->migrate = xe_migrate_init(tile);
 516		if (IS_ERR(tile->migrate)) {
 517			err = PTR_ERR(tile->migrate);
 518			goto err_force_wake;
 519		}
 520	}
 521
 522	err = xe_uc_init_hw(&gt->uc);
 523	if (err)
 524		goto err_force_wake;
 525
 526	/* Configure default CCS mode of 1 engine with all resources */
 527	if (xe_gt_ccs_mode_enabled(gt)) {
 528		gt->ccs_mode = 1;
 529		xe_gt_apply_ccs_mode(gt);
 530	}
 531
 532	if (IS_SRIOV_PF(gt_to_xe(gt)) && !xe_gt_is_media_type(gt))
 533		xe_lmtt_init_hw(&gt_to_tile(gt)->sriov.pf.lmtt);
 534
 535	if (IS_SRIOV_PF(gt_to_xe(gt))) {
 536		xe_gt_sriov_pf_init(gt);
 537		xe_gt_sriov_pf_init_hw(gt);
 538	}
 539
 540	xe_force_wake_put(gt_to_fw(gt), fw_ref);
 541
 542	return 0;
 543
 544err_force_wake:
 545	xe_force_wake_put(gt_to_fw(gt), fw_ref);
 546	for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
 547		xe_hw_fence_irq_finish(&gt->fence_irq[i]);
 548
 549	return err;
 550}
 551
 552/*
 553 * Initialize enough GT to be able to load GuC in order to obtain hwconfig and
 554 * enable CTB communication.
 555 */
 556int xe_gt_init_hwconfig(struct xe_gt *gt)
 557{
 558	unsigned int fw_ref;
 559	int err;
 560
 561	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
 562	if (!fw_ref)
 563		return -ETIMEDOUT;
 564
 565	xe_gt_mcr_init_early(gt);
 566	xe_pat_init(gt);
 567
 568	err = xe_uc_init(&gt->uc);
 569	if (err)
 570		goto out_fw;
 571
 572	err = xe_uc_init_hwconfig(&gt->uc);
 573	if (err)
 574		goto out_fw;
 575
 576	xe_gt_topology_init(gt);
 577	xe_gt_mcr_init(gt);
 578	xe_gt_enable_host_l2_vram(gt);
 579
 580out_fw:
 581	xe_force_wake_put(gt_to_fw(gt), fw_ref);
 582	return err;
 583}
 584
 585int xe_gt_init(struct xe_gt *gt)
 586{
 587	int err;
 588	int i;
 589
 590	INIT_WORK(&gt->reset.worker, gt_reset_worker);
 591
 592	for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i) {
 593		gt->ring_ops[i] = xe_ring_ops_get(gt, i);
 594		xe_hw_fence_irq_init(&gt->fence_irq[i]);
 595	}
 596
 597	err = xe_gt_pagefault_init(gt);
 598	if (err)
 599		return err;
 600
 601	xe_mocs_init_early(gt);
 602
 603	err = xe_gt_sysfs_init(gt);
 604	if (err)
 605		return err;
 606
 607	err = gt_fw_domain_init(gt);
 608	if (err)
 609		return err;
 610
 611	err = xe_gt_idle_init(&gt->gtidle);
 612	if (err)
 613		return err;
 614
 615	err = xe_gt_freq_init(gt);
 616	if (err)
 617		return err;
 618
 619	xe_force_wake_init_engines(gt, gt_to_fw(gt));
 620
 621	err = all_fw_domain_init(gt);
 622	if (err)
 623		return err;
 624
 625	xe_gt_record_user_engines(gt);
 626
 627	return 0;
 628}
 629
 630/**
 631 * xe_gt_mmio_init() - Initialize GT's MMIO access
 632 * @gt: the GT object
 633 *
 634 * Initialize GT's MMIO accessor, which will be used to access registers inside
 635 * this GT.
 636 */
 637void xe_gt_mmio_init(struct xe_gt *gt)
 638{
 639	struct xe_tile *tile = gt_to_tile(gt);
 640
 641	gt->mmio.regs = tile->mmio.regs;
 642	gt->mmio.regs_size = tile->mmio.regs_size;
 643	gt->mmio.tile = tile;
 644
 645	if (gt->info.type == XE_GT_TYPE_MEDIA) {
 646		gt->mmio.adj_offset = MEDIA_GT_GSI_OFFSET;
 647		gt->mmio.adj_limit = MEDIA_GT_GSI_LENGTH;
 648	}
 649
 650	if (IS_SRIOV_VF(gt_to_xe(gt)))
 651		gt->mmio.sriov_vf_gt = gt;
 652}
 653
 654void xe_gt_record_user_engines(struct xe_gt *gt)
 655{
 656	struct xe_hw_engine *hwe;
 657	enum xe_hw_engine_id id;
 658
 659	gt->user_engines.mask = 0;
 660	memset(gt->user_engines.instances_per_class, 0,
 661	       sizeof(gt->user_engines.instances_per_class));
 662
 663	for_each_hw_engine(hwe, gt, id) {
 664		if (xe_hw_engine_is_reserved(hwe))
 665			continue;
 666
 667		gt->user_engines.mask |= BIT_ULL(id);
 668		gt->user_engines.instances_per_class[hwe->class]++;
 669	}
 670
 671	xe_gt_assert(gt, (gt->user_engines.mask | gt->info.engine_mask)
 672		     == gt->info.engine_mask);
 673}
 674
 675static int do_gt_reset(struct xe_gt *gt)
 676{
 677	int err;
 678
 679	xe_gsc_wa_14015076503(gt, true);
 680
 681	xe_mmio_write32(&gt->mmio, GDRST, GRDOM_FULL);
 682	err = xe_mmio_wait32(&gt->mmio, GDRST, GRDOM_FULL, 0, 5000, NULL, false);
 683	if (err)
 684		xe_gt_err(gt, "failed to clear GRDOM_FULL (%pe)\n",
 685			  ERR_PTR(err));
 686
 687	xe_gsc_wa_14015076503(gt, false);
 688
 689	return err;
 690}
 691
 692static int vf_gt_restart(struct xe_gt *gt)
 693{
 694	int err;
 695
 696	err = xe_uc_sanitize_reset(&gt->uc);
 697	if (err)
 698		return err;
 699
 700	err = xe_uc_init_hw(&gt->uc);
 701	if (err)
 702		return err;
 703
 704	err = xe_uc_start(&gt->uc);
 705	if (err)
 706		return err;
 707
 708	return 0;
 709}
 710
 711static int do_gt_restart(struct xe_gt *gt)
 712{
 713	struct xe_hw_engine *hwe;
 714	enum xe_hw_engine_id id;
 715	int err;
 716
 717	if (IS_SRIOV_VF(gt_to_xe(gt)))
 718		return vf_gt_restart(gt);
 719
 720	xe_pat_init(gt);
 721
 722	xe_gt_enable_host_l2_vram(gt);
 723
 724	xe_gt_mcr_set_implicit_defaults(gt);
 725	xe_reg_sr_apply_mmio(&gt->reg_sr, gt);
 726
 727	err = xe_wopcm_init(&gt->uc.wopcm);
 728	if (err)
 729		return err;
 730
 731	for_each_hw_engine(hwe, gt, id)
 732		xe_hw_engine_enable_ring(hwe);
 733
 734	err = xe_uc_sanitize_reset(&gt->uc);
 735	if (err)
 736		return err;
 737
 738	err = xe_uc_init_hw(&gt->uc);
 739	if (err)
 740		return err;
 741
 742	if (IS_SRIOV_PF(gt_to_xe(gt)) && !xe_gt_is_media_type(gt))
 743		xe_lmtt_init_hw(&gt_to_tile(gt)->sriov.pf.lmtt);
 744
 745	if (IS_SRIOV_PF(gt_to_xe(gt)))
 746		xe_gt_sriov_pf_init_hw(gt);
 747
 748	xe_mocs_init(gt);
 749	err = xe_uc_start(&gt->uc);
 750	if (err)
 751		return err;
 752
 753	for_each_hw_engine(hwe, gt, id) {
 754		xe_reg_sr_apply_mmio(&hwe->reg_sr, gt);
 755		xe_reg_sr_apply_whitelist(hwe);
 756	}
 757
 758	/* Get CCS mode in sync between sw/hw */
 759	xe_gt_apply_ccs_mode(gt);
 760
 761	/* Restore GT freq to expected values */
 762	xe_gt_sanitize_freq(gt);
 763
 764	if (IS_SRIOV_PF(gt_to_xe(gt)))
 765		xe_gt_sriov_pf_restart(gt);
 766
 767	return 0;
 768}
 769
 770static int gt_reset(struct xe_gt *gt)
 771{
 772	unsigned int fw_ref;
 773	int err;
 774
 775	if (xe_device_wedged(gt_to_xe(gt)))
 776		return -ECANCELED;
 777
 778	/* We only support GT resets with GuC submission */
 779	if (!xe_device_uc_enabled(gt_to_xe(gt)))
 780		return -ENODEV;
 781
 782	xe_gt_info(gt, "reset started\n");
 783
 784	xe_pm_runtime_get(gt_to_xe(gt));
 785
 786	if (xe_fault_inject_gt_reset()) {
 787		err = -ECANCELED;
 788		goto err_fail;
 789	}
 790
 791	xe_gt_sanitize(gt);
 792
 793	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
 794	if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
 795		err = -ETIMEDOUT;
 796		goto err_out;
 797	}
 798
 799	xe_uc_gucrc_disable(&gt->uc);
 800	xe_uc_stop_prepare(&gt->uc);
 801	xe_gt_pagefault_reset(gt);
 802
 803	xe_uc_stop(&gt->uc);
 804
 805	xe_gt_tlb_invalidation_reset(gt);
 806
 807	err = do_gt_reset(gt);
 808	if (err)
 809		goto err_out;
 810
 811	err = do_gt_restart(gt);
 812	if (err)
 813		goto err_out;
 814
 815	xe_force_wake_put(gt_to_fw(gt), fw_ref);
 816	xe_pm_runtime_put(gt_to_xe(gt));
 817
 818	xe_gt_info(gt, "reset done\n");
 819
 820	return 0;
 821
 822err_out:
 823	xe_force_wake_put(gt_to_fw(gt), fw_ref);
 824	XE_WARN_ON(xe_uc_start(&gt->uc));
 825err_fail:
 826	xe_gt_err(gt, "reset failed (%pe)\n", ERR_PTR(err));
 827
 828	xe_device_declare_wedged(gt_to_xe(gt));
 829	xe_pm_runtime_put(gt_to_xe(gt));
 830
 831	return err;
 832}
 833
 834static void gt_reset_worker(struct work_struct *w)
 835{
 836	struct xe_gt *gt = container_of(w, typeof(*gt), reset.worker);
 837
 838	gt_reset(gt);
 839}
 840
 841void xe_gt_reset_async(struct xe_gt *gt)
 842{
 843	xe_gt_info(gt, "trying reset from %ps\n", __builtin_return_address(0));
 844
 845	/* Don't do a reset while one is already in flight */
 846	if (!xe_fault_inject_gt_reset() && xe_uc_reset_prepare(&gt->uc))
 847		return;
 848
 849	xe_gt_info(gt, "reset queued\n");
 850	queue_work(gt->ordered_wq, &gt->reset.worker);
 851}
 852
 853void xe_gt_suspend_prepare(struct xe_gt *gt)
 854{
 855	unsigned int fw_ref;
 856
 857	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
 858
 859	xe_uc_stop_prepare(&gt->uc);
 860
 861	xe_force_wake_put(gt_to_fw(gt), fw_ref);
 862}
 863
 864int xe_gt_suspend(struct xe_gt *gt)
 865{
 866	unsigned int fw_ref;
 867	int err;
 868
 869	xe_gt_dbg(gt, "suspending\n");
 870	xe_gt_sanitize(gt);
 871
 872	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
 873	if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL))
 874		goto err_msg;
 875
 876	err = xe_uc_suspend(&gt->uc);
 877	if (err)
 878		goto err_force_wake;
 879
 880	xe_gt_idle_disable_pg(gt);
 881
 882	xe_gt_disable_host_l2_vram(gt);
 883
 884	xe_force_wake_put(gt_to_fw(gt), fw_ref);
 885	xe_gt_dbg(gt, "suspended\n");
 886
 887	return 0;
 888
 889err_msg:
 890	err = -ETIMEDOUT;
 891err_force_wake:
 892	xe_force_wake_put(gt_to_fw(gt), fw_ref);
 893	xe_gt_err(gt, "suspend failed (%pe)\n", ERR_PTR(err));
 894
 895	return err;
 896}
 897
 898void xe_gt_shutdown(struct xe_gt *gt)
 899{
 900	unsigned int fw_ref;
 901
 902	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
 903	do_gt_reset(gt);
 904	xe_force_wake_put(gt_to_fw(gt), fw_ref);
 905}
 906
 907/**
 908 * xe_gt_sanitize_freq() - Restore saved frequencies if necessary.
 909 * @gt: the GT object
 910 *
 911 * Called after driver init/GSC load completes to restore GT frequencies if we
 912 * limited them for any WAs.
 913 */
 914int xe_gt_sanitize_freq(struct xe_gt *gt)
 915{
 916	int ret = 0;
 917
 918	if ((!xe_uc_fw_is_available(&gt->uc.gsc.fw) ||
 919	     xe_uc_fw_is_loaded(&gt->uc.gsc.fw) ||
 920	     xe_uc_fw_is_in_error_state(&gt->uc.gsc.fw)) &&
 921	    XE_WA(gt, 22019338487))
 922		ret = xe_guc_pc_restore_stashed_freq(&gt->uc.guc.pc);
 923
 924	return ret;
 925}
 926
 927int xe_gt_resume(struct xe_gt *gt)
 928{
 929	unsigned int fw_ref;
 930	int err;
 931
 932	xe_gt_dbg(gt, "resuming\n");
 933	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
 934	if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL))
 935		goto err_msg;
 936
 937	err = do_gt_restart(gt);
 938	if (err)
 939		goto err_force_wake;
 940
 941	xe_gt_idle_enable_pg(gt);
 942
 943	xe_force_wake_put(gt_to_fw(gt), fw_ref);
 944	xe_gt_dbg(gt, "resumed\n");
 945
 946	return 0;
 947
 948err_msg:
 949	err = -ETIMEDOUT;
 950err_force_wake:
 951	xe_force_wake_put(gt_to_fw(gt), fw_ref);
 952	xe_gt_err(gt, "resume failed (%pe)\n", ERR_PTR(err));
 953
 954	return err;
 955}
 956
 957struct xe_hw_engine *xe_gt_hw_engine(struct xe_gt *gt,
 958				     enum xe_engine_class class,
 959				     u16 instance, bool logical)
 960{
 961	struct xe_hw_engine *hwe;
 962	enum xe_hw_engine_id id;
 963
 964	for_each_hw_engine(hwe, gt, id)
 965		if (hwe->class == class &&
 966		    ((!logical && hwe->instance == instance) ||
 967		    (logical && hwe->logical_instance == instance)))
 968			return hwe;
 969
 970	return NULL;
 971}
 972
 973struct xe_hw_engine *xe_gt_any_hw_engine_by_reset_domain(struct xe_gt *gt,
 974							 enum xe_engine_class class)
 975{
 976	struct xe_hw_engine *hwe;
 977	enum xe_hw_engine_id id;
 978
 979	for_each_hw_engine(hwe, gt, id) {
 980		switch (class) {
 981		case XE_ENGINE_CLASS_RENDER:
 982		case XE_ENGINE_CLASS_COMPUTE:
 983			if (hwe->class == XE_ENGINE_CLASS_RENDER ||
 984			    hwe->class == XE_ENGINE_CLASS_COMPUTE)
 985				return hwe;
 986			break;
 987		default:
 988			if (hwe->class == class)
 989				return hwe;
 990		}
 991	}
 992
 993	return NULL;
 994}
 995
 996struct xe_hw_engine *xe_gt_any_hw_engine(struct xe_gt *gt)
 997{
 998	struct xe_hw_engine *hwe;
 999	enum xe_hw_engine_id id;
1000
1001	for_each_hw_engine(hwe, gt, id)
1002		return hwe;
1003
1004	return NULL;
1005}
1006
1007/**
1008 * xe_gt_declare_wedged() - Declare GT wedged
1009 * @gt: the GT object
1010 *
1011 * Wedge the GT which stops all submission, saves desired debug state, and
1012 * cleans up anything which could timeout.
1013 */
1014void xe_gt_declare_wedged(struct xe_gt *gt)
1015{
1016	xe_gt_assert(gt, gt_to_xe(gt)->wedged.mode);
1017
1018	xe_uc_declare_wedged(&gt->uc);
1019	xe_gt_tlb_invalidation_reset(gt);
1020}