Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.2.
   1// SPDX-License-Identifier: MIT
   2/*
   3 * Copyright © 2022 Intel Corporation
   4 */
   5
   6#include "xe_guc.h"
   7
   8#include <drm/drm_managed.h>
   9
  10#include <generated/xe_wa_oob.h>
  11
  12#include "abi/guc_actions_abi.h"
  13#include "abi/guc_errors_abi.h"
  14#include "regs/xe_gt_regs.h"
  15#include "regs/xe_gtt_defs.h"
  16#include "regs/xe_guc_regs.h"
  17#include "regs/xe_irq_regs.h"
  18#include "xe_bo.h"
  19#include "xe_device.h"
  20#include "xe_force_wake.h"
  21#include "xe_gt.h"
  22#include "xe_gt_printk.h"
  23#include "xe_gt_sriov_vf.h"
  24#include "xe_gt_throttle.h"
  25#include "xe_guc_ads.h"
  26#include "xe_guc_capture.h"
  27#include "xe_guc_ct.h"
  28#include "xe_guc_db_mgr.h"
  29#include "xe_guc_hwconfig.h"
  30#include "xe_guc_log.h"
  31#include "xe_guc_pc.h"
  32#include "xe_guc_relay.h"
  33#include "xe_guc_submit.h"
  34#include "xe_memirq.h"
  35#include "xe_mmio.h"
  36#include "xe_platform_types.h"
  37#include "xe_sriov.h"
  38#include "xe_uc.h"
  39#include "xe_uc_fw.h"
  40#include "xe_wa.h"
  41#include "xe_wopcm.h"
  42
  43static u32 guc_bo_ggtt_addr(struct xe_guc *guc,
  44			    struct xe_bo *bo)
  45{
  46	struct xe_device *xe = guc_to_xe(guc);
  47	u32 addr = xe_bo_ggtt_addr(bo);
  48
  49	/* GuC addresses above GUC_GGTT_TOP don't map through the GTT */
  50	xe_assert(xe, addr >= xe_wopcm_size(guc_to_xe(guc)));
  51	xe_assert(xe, addr < GUC_GGTT_TOP);
  52	xe_assert(xe, bo->size <= GUC_GGTT_TOP - addr);
  53
  54	return addr;
  55}
  56
  57static u32 guc_ctl_debug_flags(struct xe_guc *guc)
  58{
  59	u32 level = xe_guc_log_get_level(&guc->log);
  60	u32 flags = 0;
  61
  62	if (!GUC_LOG_LEVEL_IS_VERBOSE(level))
  63		flags |= GUC_LOG_DISABLED;
  64	else
  65		flags |= GUC_LOG_LEVEL_TO_VERBOSITY(level) <<
  66			 GUC_LOG_VERBOSITY_SHIFT;
  67
  68	return flags;
  69}
  70
  71static u32 guc_ctl_feature_flags(struct xe_guc *guc)
  72{
  73	u32 flags = GUC_CTL_ENABLE_LITE_RESTORE;
  74
  75	if (!guc_to_xe(guc)->info.skip_guc_pc)
  76		flags |= GUC_CTL_ENABLE_SLPC;
  77
  78	return flags;
  79}
  80
  81static u32 guc_ctl_log_params_flags(struct xe_guc *guc)
  82{
  83	u32 offset = guc_bo_ggtt_addr(guc, guc->log.bo) >> PAGE_SHIFT;
  84	u32 flags;
  85
  86	#if (((CRASH_BUFFER_SIZE) % SZ_1M) == 0)
  87	#define LOG_UNIT SZ_1M
  88	#define LOG_FLAG GUC_LOG_LOG_ALLOC_UNITS
  89	#else
  90	#define LOG_UNIT SZ_4K
  91	#define LOG_FLAG 0
  92	#endif
  93
  94	#if (((CAPTURE_BUFFER_SIZE) % SZ_1M) == 0)
  95	#define CAPTURE_UNIT SZ_1M
  96	#define CAPTURE_FLAG GUC_LOG_CAPTURE_ALLOC_UNITS
  97	#else
  98	#define CAPTURE_UNIT SZ_4K
  99	#define CAPTURE_FLAG 0
 100	#endif
 101
 102	BUILD_BUG_ON(!CRASH_BUFFER_SIZE);
 103	BUILD_BUG_ON(!IS_ALIGNED(CRASH_BUFFER_SIZE, LOG_UNIT));
 104	BUILD_BUG_ON(!DEBUG_BUFFER_SIZE);
 105	BUILD_BUG_ON(!IS_ALIGNED(DEBUG_BUFFER_SIZE, LOG_UNIT));
 106	BUILD_BUG_ON(!CAPTURE_BUFFER_SIZE);
 107	BUILD_BUG_ON(!IS_ALIGNED(CAPTURE_BUFFER_SIZE, CAPTURE_UNIT));
 108
 109	BUILD_BUG_ON((CRASH_BUFFER_SIZE / LOG_UNIT - 1) >
 110			(GUC_LOG_CRASH_MASK >> GUC_LOG_CRASH_SHIFT));
 111	BUILD_BUG_ON((DEBUG_BUFFER_SIZE / LOG_UNIT - 1) >
 112			(GUC_LOG_DEBUG_MASK >> GUC_LOG_DEBUG_SHIFT));
 113	BUILD_BUG_ON((CAPTURE_BUFFER_SIZE / CAPTURE_UNIT - 1) >
 114			(GUC_LOG_CAPTURE_MASK >> GUC_LOG_CAPTURE_SHIFT));
 115
 116	flags = GUC_LOG_VALID |
 117		GUC_LOG_NOTIFY_ON_HALF_FULL |
 118		CAPTURE_FLAG |
 119		LOG_FLAG |
 120		((CRASH_BUFFER_SIZE / LOG_UNIT - 1) << GUC_LOG_CRASH_SHIFT) |
 121		((DEBUG_BUFFER_SIZE / LOG_UNIT - 1) << GUC_LOG_DEBUG_SHIFT) |
 122		((CAPTURE_BUFFER_SIZE / CAPTURE_UNIT - 1) <<
 123		 GUC_LOG_CAPTURE_SHIFT) |
 124		(offset << GUC_LOG_BUF_ADDR_SHIFT);
 125
 126	#undef LOG_UNIT
 127	#undef LOG_FLAG
 128	#undef CAPTURE_UNIT
 129	#undef CAPTURE_FLAG
 130
 131	return flags;
 132}
 133
 134static u32 guc_ctl_ads_flags(struct xe_guc *guc)
 135{
 136	u32 ads = guc_bo_ggtt_addr(guc, guc->ads.bo) >> PAGE_SHIFT;
 137	u32 flags = ads << GUC_ADS_ADDR_SHIFT;
 138
 139	return flags;
 140}
 141
 142static u32 guc_ctl_wa_flags(struct xe_guc *guc)
 143{
 144	struct xe_device *xe = guc_to_xe(guc);
 145	struct xe_gt *gt = guc_to_gt(guc);
 146	u32 flags = 0;
 147
 148	if (XE_WA(gt, 22012773006))
 149		flags |= GUC_WA_POLLCS;
 150
 151	if (XE_WA(gt, 14014475959))
 152		flags |= GUC_WA_HOLD_CCS_SWITCHOUT;
 153
 154	if (XE_WA(gt, 22011391025))
 155		flags |= GUC_WA_DUAL_QUEUE;
 156
 157	/*
 158	 * Wa_22011802037: FIXME - there's more to be done than simply setting
 159	 * this flag: make sure each CS is stopped when preparing for GT reset
 160	 * and wait for pending MI_FW.
 161	 */
 162	if (GRAPHICS_VERx100(xe) < 1270)
 163		flags |= GUC_WA_PRE_PARSER;
 164
 165	if (XE_WA(gt, 22012727170) || XE_WA(gt, 22012727685))
 166		flags |= GUC_WA_CONTEXT_ISOLATION;
 167
 168	if (XE_WA(gt, 18020744125) &&
 169	    !xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_RENDER))
 170		flags |= GUC_WA_RCS_REGS_IN_CCS_REGS_LIST;
 171
 172	if (XE_WA(gt, 1509372804))
 173		flags |= GUC_WA_RENDER_RST_RC6_EXIT;
 174
 175	if (XE_WA(gt, 14018913170))
 176		flags |= GUC_WA_ENABLE_TSC_CHECK_ON_RC6;
 177
 178	return flags;
 179}
 180
 181static u32 guc_ctl_devid(struct xe_guc *guc)
 182{
 183	struct xe_device *xe = guc_to_xe(guc);
 184
 185	return (((u32)xe->info.devid) << 16) | xe->info.revid;
 186}
 187
 188static void guc_print_params(struct xe_guc *guc)
 189{
 190	struct xe_gt *gt = guc_to_gt(guc);
 191	u32 *params = guc->params;
 192	int i;
 193
 194	BUILD_BUG_ON(sizeof(guc->params) != GUC_CTL_MAX_DWORDS * sizeof(u32));
 195	BUILD_BUG_ON(GUC_CTL_MAX_DWORDS + 2 != SOFT_SCRATCH_COUNT);
 196
 197	for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
 198		xe_gt_dbg(gt, "GuC param[%2d] = 0x%08x\n", i, params[i]);
 199}
 200
 201static void guc_init_params(struct xe_guc *guc)
 202{
 203	u32 *params = guc->params;
 204
 205	params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc);
 206	params[GUC_CTL_FEATURE] = 0;
 207	params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc);
 208	params[GUC_CTL_ADS] = guc_ctl_ads_flags(guc);
 209	params[GUC_CTL_WA] = 0;
 210	params[GUC_CTL_DEVID] = guc_ctl_devid(guc);
 211
 212	guc_print_params(guc);
 213}
 214
 215static void guc_init_params_post_hwconfig(struct xe_guc *guc)
 216{
 217	u32 *params = guc->params;
 218
 219	params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc);
 220	params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc);
 221	params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc);
 222	params[GUC_CTL_ADS] = guc_ctl_ads_flags(guc);
 223	params[GUC_CTL_WA] = guc_ctl_wa_flags(guc);
 224	params[GUC_CTL_DEVID] = guc_ctl_devid(guc);
 225
 226	guc_print_params(guc);
 227}
 228
 229/*
 230 * Initialize the GuC parameter block before starting the firmware
 231 * transfer. These parameters are read by the firmware on startup
 232 * and cannot be changed thereafter.
 233 */
 234static void guc_write_params(struct xe_guc *guc)
 235{
 236	struct xe_gt *gt = guc_to_gt(guc);
 237	int i;
 238
 239	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
 240
 241	xe_mmio_write32(&gt->mmio, SOFT_SCRATCH(0), 0);
 242
 243	for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
 244		xe_mmio_write32(&gt->mmio, SOFT_SCRATCH(1 + i), guc->params[i]);
 245}
 246
 247static void guc_fini_hw(void *arg)
 248{
 249	struct xe_guc *guc = arg;
 250	struct xe_gt *gt = guc_to_gt(guc);
 251	unsigned int fw_ref;
 252
 253	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
 254	xe_uc_fini_hw(&guc_to_gt(guc)->uc);
 255	xe_force_wake_put(gt_to_fw(gt), fw_ref);
 256}
 257
 258/**
 259 * xe_guc_comm_init_early - early initialization of GuC communication
 260 * @guc: the &xe_guc to initialize
 261 *
 262 * Must be called prior to first MMIO communication with GuC firmware.
 263 */
 264void xe_guc_comm_init_early(struct xe_guc *guc)
 265{
 266	struct xe_gt *gt = guc_to_gt(guc);
 267
 268	if (xe_gt_is_media_type(gt))
 269		guc->notify_reg = MED_GUC_HOST_INTERRUPT;
 270	else
 271		guc->notify_reg = GUC_HOST_INTERRUPT;
 272}
 273
 274static int xe_guc_realloc_post_hwconfig(struct xe_guc *guc)
 275{
 276	struct xe_tile *tile = gt_to_tile(guc_to_gt(guc));
 277	struct xe_device *xe = guc_to_xe(guc);
 278	int ret;
 279
 280	if (!IS_DGFX(guc_to_xe(guc)))
 281		return 0;
 282
 283	ret = xe_managed_bo_reinit_in_vram(xe, tile, &guc->fw.bo);
 284	if (ret)
 285		return ret;
 286
 287	ret = xe_managed_bo_reinit_in_vram(xe, tile, &guc->log.bo);
 288	if (ret)
 289		return ret;
 290
 291	ret = xe_managed_bo_reinit_in_vram(xe, tile, &guc->ads.bo);
 292	if (ret)
 293		return ret;
 294
 295	ret = xe_managed_bo_reinit_in_vram(xe, tile, &guc->ct.bo);
 296	if (ret)
 297		return ret;
 298
 299	return 0;
 300}
 301
 302static int vf_guc_init(struct xe_guc *guc)
 303{
 304	int err;
 305
 306	xe_guc_comm_init_early(guc);
 307
 308	err = xe_guc_ct_init(&guc->ct);
 309	if (err)
 310		return err;
 311
 312	err = xe_guc_relay_init(&guc->relay);
 313	if (err)
 314		return err;
 315
 316	return 0;
 317}
 318
 319int xe_guc_init(struct xe_guc *guc)
 320{
 321	struct xe_device *xe = guc_to_xe(guc);
 322	struct xe_gt *gt = guc_to_gt(guc);
 323	int ret;
 324
 325	guc->fw.type = XE_UC_FW_TYPE_GUC;
 326	ret = xe_uc_fw_init(&guc->fw);
 327	if (ret)
 328		goto out;
 329
 330	if (!xe_uc_fw_is_enabled(&guc->fw))
 331		return 0;
 332
 333	if (IS_SRIOV_VF(xe)) {
 334		ret = vf_guc_init(guc);
 335		if (ret)
 336			goto out;
 337		return 0;
 338	}
 339
 340	ret = xe_guc_log_init(&guc->log);
 341	if (ret)
 342		goto out;
 343
 344	ret = xe_guc_capture_init(guc);
 345	if (ret)
 346		goto out;
 347
 348	ret = xe_guc_ads_init(&guc->ads);
 349	if (ret)
 350		goto out;
 351
 352	ret = xe_guc_ct_init(&guc->ct);
 353	if (ret)
 354		goto out;
 355
 356	ret = xe_guc_relay_init(&guc->relay);
 357	if (ret)
 358		goto out;
 359
 360	xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_LOADABLE);
 361
 362	ret = devm_add_action_or_reset(xe->drm.dev, guc_fini_hw, guc);
 363	if (ret)
 364		goto out;
 365
 366	guc_init_params(guc);
 367
 368	xe_guc_comm_init_early(guc);
 369
 370	return 0;
 371
 372out:
 373	xe_gt_err(gt, "GuC init failed with %pe\n", ERR_PTR(ret));
 374	return ret;
 375}
 376
 377static int vf_guc_init_post_hwconfig(struct xe_guc *guc)
 378{
 379	int err;
 380
 381	err = xe_guc_submit_init(guc, xe_gt_sriov_vf_guc_ids(guc_to_gt(guc)));
 382	if (err)
 383		return err;
 384
 385	/* XXX xe_guc_db_mgr_init not needed for now */
 386
 387	return 0;
 388}
 389
 390/**
 391 * xe_guc_init_post_hwconfig - initialize GuC post hwconfig load
 392 * @guc: The GuC object
 393 *
 394 * Return: 0 on success, negative error code on error.
 395 */
 396int xe_guc_init_post_hwconfig(struct xe_guc *guc)
 397{
 398	int ret;
 399
 400	if (IS_SRIOV_VF(guc_to_xe(guc)))
 401		return vf_guc_init_post_hwconfig(guc);
 402
 403	ret = xe_guc_realloc_post_hwconfig(guc);
 404	if (ret)
 405		return ret;
 406
 407	guc_init_params_post_hwconfig(guc);
 408
 409	ret = xe_guc_submit_init(guc, ~0);
 410	if (ret)
 411		return ret;
 412
 413	ret = xe_guc_db_mgr_init(&guc->dbm, ~0);
 414	if (ret)
 415		return ret;
 416
 417	ret = xe_guc_pc_init(&guc->pc);
 418	if (ret)
 419		return ret;
 420
 421	return xe_guc_ads_init_post_hwconfig(&guc->ads);
 422}
 423
 424int xe_guc_post_load_init(struct xe_guc *guc)
 425{
 426	xe_guc_ads_populate_post_load(&guc->ads);
 427	guc->submission_state.enabled = true;
 428
 429	return 0;
 430}
 431
 432int xe_guc_reset(struct xe_guc *guc)
 433{
 434	struct xe_gt *gt = guc_to_gt(guc);
 435	struct xe_mmio *mmio = &gt->mmio;
 436	u32 guc_status, gdrst;
 437	int ret;
 438
 439	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
 440
 441	if (IS_SRIOV_VF(gt_to_xe(gt)))
 442		return xe_gt_sriov_vf_bootstrap(gt);
 443
 444	xe_mmio_write32(mmio, GDRST, GRDOM_GUC);
 445
 446	ret = xe_mmio_wait32(mmio, GDRST, GRDOM_GUC, 0, 5000, &gdrst, false);
 447	if (ret) {
 448		xe_gt_err(gt, "GuC reset timed out, GDRST=%#x\n", gdrst);
 449		goto err_out;
 450	}
 451
 452	guc_status = xe_mmio_read32(mmio, GUC_STATUS);
 453	if (!(guc_status & GS_MIA_IN_RESET)) {
 454		xe_gt_err(gt, "GuC status: %#x, MIA core expected to be in reset\n",
 455			  guc_status);
 456		ret = -EIO;
 457		goto err_out;
 458	}
 459
 460	return 0;
 461
 462err_out:
 463
 464	return ret;
 465}
 466
 467static void guc_prepare_xfer(struct xe_guc *guc)
 468{
 469	struct xe_gt *gt = guc_to_gt(guc);
 470	struct xe_mmio *mmio = &gt->mmio;
 471	struct xe_device *xe =  guc_to_xe(guc);
 472	u32 shim_flags = GUC_ENABLE_READ_CACHE_LOGIC |
 473		GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA |
 474		GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA |
 475		GUC_ENABLE_MIA_CLOCK_GATING;
 476
 477	if (GRAPHICS_VERx100(xe) < 1250)
 478		shim_flags |= GUC_DISABLE_SRAM_INIT_TO_ZEROES |
 479				GUC_ENABLE_MIA_CACHING;
 480
 481	if (GRAPHICS_VER(xe) >= 20 || xe->info.platform == XE_PVC)
 482		shim_flags |= REG_FIELD_PREP(GUC_MOCS_INDEX_MASK, gt->mocs.uc_index);
 483
 484	/* Must program this register before loading the ucode with DMA */
 485	xe_mmio_write32(mmio, GUC_SHIM_CONTROL, shim_flags);
 486
 487	xe_mmio_write32(mmio, GT_PM_CONFIG, GT_DOORBELL_ENABLE);
 488
 489	/* Make sure GuC receives ARAT interrupts */
 490	xe_mmio_rmw32(mmio, PMINTRMSK, ARAT_EXPIRED_INTRMSK, 0);
 491}
 492
 493/*
 494 * Supporting MMIO & in memory RSA
 495 */
 496static int guc_xfer_rsa(struct xe_guc *guc)
 497{
 498	struct xe_gt *gt = guc_to_gt(guc);
 499	u32 rsa[UOS_RSA_SCRATCH_COUNT];
 500	size_t copied;
 501	int i;
 502
 503	if (guc->fw.rsa_size > 256) {
 504		u32 rsa_ggtt_addr = xe_bo_ggtt_addr(guc->fw.bo) +
 505				    xe_uc_fw_rsa_offset(&guc->fw);
 506		xe_mmio_write32(&gt->mmio, UOS_RSA_SCRATCH(0), rsa_ggtt_addr);
 507		return 0;
 508	}
 509
 510	copied = xe_uc_fw_copy_rsa(&guc->fw, rsa, sizeof(rsa));
 511	if (copied < sizeof(rsa))
 512		return -ENOMEM;
 513
 514	for (i = 0; i < UOS_RSA_SCRATCH_COUNT; i++)
 515		xe_mmio_write32(&gt->mmio, UOS_RSA_SCRATCH(i), rsa[i]);
 516
 517	return 0;
 518}
 519
 520/*
 521 * Check a previously read GuC status register (GUC_STATUS) looking for
 522 * known terminal states (either completion or failure) of either the
 523 * microkernel status field or the boot ROM status field. Returns +1 for
 524 * successful completion, -1 for failure and 0 for any intermediate state.
 525 */
 526static int guc_load_done(u32 status)
 527{
 528	u32 uk_val = REG_FIELD_GET(GS_UKERNEL_MASK, status);
 529	u32 br_val = REG_FIELD_GET(GS_BOOTROM_MASK, status);
 530
 531	switch (uk_val) {
 532	case XE_GUC_LOAD_STATUS_READY:
 533		return 1;
 534
 535	case XE_GUC_LOAD_STATUS_ERROR_DEVID_BUILD_MISMATCH:
 536	case XE_GUC_LOAD_STATUS_GUC_PREPROD_BUILD_MISMATCH:
 537	case XE_GUC_LOAD_STATUS_ERROR_DEVID_INVALID_GUCTYPE:
 538	case XE_GUC_LOAD_STATUS_HWCONFIG_ERROR:
 539	case XE_GUC_LOAD_STATUS_DPC_ERROR:
 540	case XE_GUC_LOAD_STATUS_EXCEPTION:
 541	case XE_GUC_LOAD_STATUS_INIT_DATA_INVALID:
 542	case XE_GUC_LOAD_STATUS_MPU_DATA_INVALID:
 543	case XE_GUC_LOAD_STATUS_INIT_MMIO_SAVE_RESTORE_INVALID:
 544		return -1;
 545	}
 546
 547	switch (br_val) {
 548	case XE_BOOTROM_STATUS_NO_KEY_FOUND:
 549	case XE_BOOTROM_STATUS_RSA_FAILED:
 550	case XE_BOOTROM_STATUS_PAVPC_FAILED:
 551	case XE_BOOTROM_STATUS_WOPCM_FAILED:
 552	case XE_BOOTROM_STATUS_LOADLOC_FAILED:
 553	case XE_BOOTROM_STATUS_JUMP_FAILED:
 554	case XE_BOOTROM_STATUS_RC6CTXCONFIG_FAILED:
 555	case XE_BOOTROM_STATUS_MPUMAP_INCORRECT:
 556	case XE_BOOTROM_STATUS_EXCEPTION:
 557	case XE_BOOTROM_STATUS_PROD_KEY_CHECK_FAILURE:
 558		return -1;
 559	}
 560
 561	return 0;
 562}
 563
 564static s32 guc_pc_get_cur_freq(struct xe_guc_pc *guc_pc)
 565{
 566	u32 freq;
 567	int ret = xe_guc_pc_get_cur_freq(guc_pc, &freq);
 568
 569	return ret ? ret : freq;
 570}
 571
 572/*
 573 * Wait for the GuC to start up.
 574 *
 575 * Measurements indicate this should take no more than 20ms (assuming the GT
 576 * clock is at maximum frequency). However, thermal throttling and other issues
 577 * can prevent the clock hitting max and thus making the load take significantly
 578 * longer. Allow up to 200ms as a safety margin for real world worst case situations.
 579 *
 580 * However, bugs anywhere from KMD to GuC to PCODE to fan failure in a CI farm can
 581 * lead to even longer times. E.g. if the GT is clamped to minimum frequency then
 582 * the load times can be in the seconds range. So the timeout is increased for debug
 583 * builds to ensure that problems can be correctly analysed. For release builds, the
 584 * timeout is kept short so that users don't wait forever to find out that there is a
 585 * problem. In either case, if the load took longer than is reasonable even with some
 586 * 'sensible' throttling, then flag a warning because something is not right.
 587 *
 588 * Note that there is a limit on how long an individual usleep_range() can wait for,
 589 * hence longer waits require wrapping a shorter wait in a loop.
 590 *
 591 * Note that the only reason an end user should hit the shorter timeout is in case of
 592 * extreme thermal throttling. And a system that is that hot during boot is probably
 593 * dead anyway!
 594 */
 595#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
 596#define GUC_LOAD_RETRY_LIMIT	20
 597#else
 598#define GUC_LOAD_RETRY_LIMIT	3
 599#endif
 600#define GUC_LOAD_TIME_WARN_MS      200
 601
 602static void guc_wait_ucode(struct xe_guc *guc)
 603{
 604	struct xe_gt *gt = guc_to_gt(guc);
 605	struct xe_mmio *mmio = &gt->mmio;
 606	struct xe_guc_pc *guc_pc = &gt->uc.guc.pc;
 607	ktime_t before, after, delta;
 608	int load_done;
 609	u32 status = 0;
 610	int count = 0;
 611	u64 delta_ms;
 612	u32 before_freq;
 613
 614	before_freq = xe_guc_pc_get_act_freq(guc_pc);
 615	before = ktime_get();
 616	/*
 617	 * Note, can't use any kind of timing information from the call to xe_mmio_wait.
 618	 * It could return a thousand intermediate stages at random times. Instead, must
 619	 * manually track the total time taken and locally implement the timeout.
 620	 */
 621	do {
 622		u32 last_status = status & (GS_UKERNEL_MASK | GS_BOOTROM_MASK);
 623		int ret;
 624
 625		/*
 626		 * Wait for any change (intermediate or terminal) in the status register.
 627		 * Note, the return value is a don't care. The only failure code is timeout
 628		 * but the timeouts need to be accumulated over all the intermediate partial
 629		 * timeouts rather than allowing a huge timeout each time. So basically, need
 630		 * to treat a timeout no different to a value change.
 631		 */
 632		ret = xe_mmio_wait32_not(mmio, GUC_STATUS, GS_UKERNEL_MASK | GS_BOOTROM_MASK,
 633					 last_status, 1000 * 1000, &status, false);
 634		if (ret < 0)
 635			count++;
 636		after = ktime_get();
 637		delta = ktime_sub(after, before);
 638		delta_ms = ktime_to_ms(delta);
 639
 640		load_done = guc_load_done(status);
 641		if (load_done != 0)
 642			break;
 643
 644		if (delta_ms >= (GUC_LOAD_RETRY_LIMIT * 1000))
 645			break;
 646
 647		xe_gt_dbg(gt, "load still in progress, timeouts = %d, freq = %dMHz (req %dMHz), status = 0x%08X [0x%02X/%02X]\n",
 648			  count, xe_guc_pc_get_act_freq(guc_pc),
 649			  guc_pc_get_cur_freq(guc_pc), status,
 650			  REG_FIELD_GET(GS_BOOTROM_MASK, status),
 651			  REG_FIELD_GET(GS_UKERNEL_MASK, status));
 652	} while (1);
 653
 654	if (load_done != 1) {
 655		u32 ukernel = REG_FIELD_GET(GS_UKERNEL_MASK, status);
 656		u32 bootrom = REG_FIELD_GET(GS_BOOTROM_MASK, status);
 657
 658		xe_gt_err(gt, "load failed: status = 0x%08X, time = %lldms, freq = %dMHz (req %dMHz), done = %d\n",
 659			  status, delta_ms, xe_guc_pc_get_act_freq(guc_pc),
 660			  guc_pc_get_cur_freq(guc_pc), load_done);
 661		xe_gt_err(gt, "load failed: status: Reset = %d, BootROM = 0x%02X, UKernel = 0x%02X, MIA = 0x%02X, Auth = 0x%02X\n",
 662			  REG_FIELD_GET(GS_MIA_IN_RESET, status),
 663			  bootrom, ukernel,
 664			  REG_FIELD_GET(GS_MIA_MASK, status),
 665			  REG_FIELD_GET(GS_AUTH_STATUS_MASK, status));
 666
 667		switch (bootrom) {
 668		case XE_BOOTROM_STATUS_NO_KEY_FOUND:
 669			xe_gt_err(gt, "invalid key requested, header = 0x%08X\n",
 670				  xe_mmio_read32(mmio, GUC_HEADER_INFO));
 671			break;
 672
 673		case XE_BOOTROM_STATUS_RSA_FAILED:
 674			xe_gt_err(gt, "firmware signature verification failed\n");
 675			break;
 676
 677		case XE_BOOTROM_STATUS_PROD_KEY_CHECK_FAILURE:
 678			xe_gt_err(gt, "firmware production part check failure\n");
 679			break;
 680		}
 681
 682		switch (ukernel) {
 683		case XE_GUC_LOAD_STATUS_EXCEPTION:
 684			xe_gt_err(gt, "firmware exception. EIP: %#x\n",
 685				  xe_mmio_read32(mmio, SOFT_SCRATCH(13)));
 686			break;
 687
 688		case XE_GUC_LOAD_STATUS_INIT_MMIO_SAVE_RESTORE_INVALID:
 689			xe_gt_err(gt, "illegal register in save/restore workaround list\n");
 690			break;
 691
 692		case XE_GUC_LOAD_STATUS_HWCONFIG_START:
 693			xe_gt_err(gt, "still extracting hwconfig table.\n");
 694			break;
 695		}
 696
 697		xe_device_declare_wedged(gt_to_xe(gt));
 698	} else if (delta_ms > GUC_LOAD_TIME_WARN_MS) {
 699		xe_gt_warn(gt, "excessive init time: %lldms! [status = 0x%08X, timeouts = %d]\n",
 700			   delta_ms, status, count);
 701		xe_gt_warn(gt, "excessive init time: [freq = %dMHz (req = %dMHz), before = %dMHz, perf_limit_reasons = 0x%08X]\n",
 702			   xe_guc_pc_get_act_freq(guc_pc), guc_pc_get_cur_freq(guc_pc),
 703			   before_freq, xe_gt_throttle_get_limit_reasons(gt));
 704	} else {
 705		xe_gt_dbg(gt, "init took %lldms, freq = %dMHz (req = %dMHz), before = %dMHz, status = 0x%08X, timeouts = %d\n",
 706			  delta_ms, xe_guc_pc_get_act_freq(guc_pc), guc_pc_get_cur_freq(guc_pc),
 707			  before_freq, status, count);
 708	}
 709}
 710
 711static int __xe_guc_upload(struct xe_guc *guc)
 712{
 713	int ret;
 714
 715	/* Raise GT freq to speed up HuC/GuC load */
 716	xe_guc_pc_raise_unslice(&guc->pc);
 717
 718	guc_write_params(guc);
 719	guc_prepare_xfer(guc);
 720
 721	/*
 722	 * Note that GuC needs the CSS header plus uKernel code to be copied
 723	 * by the DMA engine in one operation, whereas the RSA signature is
 724	 * loaded separately, either by copying it to the UOS_RSA_SCRATCH
 725	 * register (if key size <= 256) or through a ggtt-pinned vma (if key
 726	 * size > 256). The RSA size and therefore the way we provide it to the
 727	 * HW is fixed for each platform and hard-coded in the bootrom.
 728	 */
 729	ret = guc_xfer_rsa(guc);
 730	if (ret)
 731		goto out;
 732	/*
 733	 * Current uCode expects the code to be loaded at 8k; locations below
 734	 * this are used for the stack.
 735	 */
 736	ret = xe_uc_fw_upload(&guc->fw, 0x2000, UOS_MOVE);
 737	if (ret)
 738		goto out;
 739
 740	/* Wait for authentication */
 741	guc_wait_ucode(guc);
 742
 743	xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_RUNNING);
 744	return 0;
 745
 746out:
 747	xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_LOAD_FAIL);
 748	return 0	/* FIXME: ret, don't want to stop load currently */;
 749}
 750
 751static int vf_guc_min_load_for_hwconfig(struct xe_guc *guc)
 752{
 753	struct xe_gt *gt = guc_to_gt(guc);
 754	int ret;
 755
 756	ret = xe_gt_sriov_vf_bootstrap(gt);
 757	if (ret)
 758		return ret;
 759
 760	ret = xe_gt_sriov_vf_query_config(gt);
 761	if (ret)
 762		return ret;
 763
 764	ret = xe_guc_hwconfig_init(guc);
 765	if (ret)
 766		return ret;
 767
 768	ret = xe_guc_enable_communication(guc);
 769	if (ret)
 770		return ret;
 771
 772	ret = xe_gt_sriov_vf_connect(gt);
 773	if (ret)
 774		return ret;
 775
 776	ret = xe_gt_sriov_vf_query_runtime(gt);
 777	if (ret)
 778		return ret;
 779
 780	return 0;
 781}
 782
 783/**
 784 * xe_guc_min_load_for_hwconfig - load minimal GuC and read hwconfig table
 785 * @guc: The GuC object
 786 *
 787 * This function uploads a minimal GuC that does not support submissions but
 788 * in a state where the hwconfig table can be read. Next, it reads and parses
 789 * the hwconfig table so it can be used for subsequent steps in the driver load.
 790 * Lastly, it enables CT communication (XXX: this is needed for PFs/VFs only).
 791 *
 792 * Return: 0 on success, negative error code on error.
 793 */
 794int xe_guc_min_load_for_hwconfig(struct xe_guc *guc)
 795{
 796	int ret;
 797
 798	if (IS_SRIOV_VF(guc_to_xe(guc)))
 799		return vf_guc_min_load_for_hwconfig(guc);
 800
 801	xe_guc_ads_populate_minimal(&guc->ads);
 802
 803	xe_guc_pc_init_early(&guc->pc);
 804
 805	ret = __xe_guc_upload(guc);
 806	if (ret)
 807		return ret;
 808
 809	ret = xe_guc_hwconfig_init(guc);
 810	if (ret)
 811		return ret;
 812
 813	ret = xe_guc_enable_communication(guc);
 814	if (ret)
 815		return ret;
 816
 817	return 0;
 818}
 819
 820int xe_guc_upload(struct xe_guc *guc)
 821{
 822	xe_guc_ads_populate(&guc->ads);
 823
 824	return __xe_guc_upload(guc);
 825}
 826
 827static void guc_handle_mmio_msg(struct xe_guc *guc)
 828{
 829	struct xe_gt *gt = guc_to_gt(guc);
 830	u32 msg;
 831
 832	if (IS_SRIOV_VF(guc_to_xe(guc)))
 833		return;
 834
 835	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
 836
 837	msg = xe_mmio_read32(&gt->mmio, SOFT_SCRATCH(15));
 838	msg &= XE_GUC_RECV_MSG_EXCEPTION |
 839		XE_GUC_RECV_MSG_CRASH_DUMP_POSTED;
 840	xe_mmio_write32(&gt->mmio, SOFT_SCRATCH(15), 0);
 841
 842	if (msg & XE_GUC_RECV_MSG_CRASH_DUMP_POSTED)
 843		xe_gt_err(gt, "Received early GuC crash dump notification!\n");
 844
 845	if (msg & XE_GUC_RECV_MSG_EXCEPTION)
 846		xe_gt_err(gt, "Received early GuC exception notification!\n");
 847}
 848
 849static void guc_enable_irq(struct xe_guc *guc)
 850{
 851	struct xe_gt *gt = guc_to_gt(guc);
 852	u32 events = xe_gt_is_media_type(gt) ?
 853		REG_FIELD_PREP(ENGINE0_MASK, GUC_INTR_GUC2HOST)  :
 854		REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST);
 855
 856	/* Primary GuC and media GuC share a single enable bit */
 857	xe_mmio_write32(&gt->mmio, GUC_SG_INTR_ENABLE,
 858			REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST));
 859
 860	/*
 861	 * There are separate mask bits for primary and media GuCs, so use
 862	 * a RMW operation to avoid clobbering the other GuC's setting.
 863	 */
 864	xe_mmio_rmw32(&gt->mmio, GUC_SG_INTR_MASK, events, 0);
 865}
 866
 867int xe_guc_enable_communication(struct xe_guc *guc)
 868{
 869	struct xe_device *xe = guc_to_xe(guc);
 870	int err;
 871
 872	if (IS_SRIOV_VF(xe) && xe_device_has_memirq(xe)) {
 873		struct xe_gt *gt = guc_to_gt(guc);
 874		struct xe_tile *tile = gt_to_tile(gt);
 875
 876		err = xe_memirq_init_guc(&tile->memirq, guc);
 877		if (err)
 878			return err;
 879	} else {
 880		guc_enable_irq(guc);
 881	}
 882
 883	err = xe_guc_ct_enable(&guc->ct);
 884	if (err)
 885		return err;
 886
 887	guc_handle_mmio_msg(guc);
 888
 889	return 0;
 890}
 891
 892int xe_guc_suspend(struct xe_guc *guc)
 893{
 894	struct xe_gt *gt = guc_to_gt(guc);
 895	u32 action[] = {
 896		XE_GUC_ACTION_CLIENT_SOFT_RESET,
 897	};
 898	int ret;
 899
 900	ret = xe_guc_mmio_send(guc, action, ARRAY_SIZE(action));
 901	if (ret) {
 902		xe_gt_err(gt, "GuC suspend failed: %pe\n", ERR_PTR(ret));
 903		return ret;
 904	}
 905
 906	xe_guc_sanitize(guc);
 907	return 0;
 908}
 909
 910void xe_guc_notify(struct xe_guc *guc)
 911{
 912	struct xe_gt *gt = guc_to_gt(guc);
 913	const u32 default_notify_data = 0;
 914
 915	/*
 916	 * Both GUC_HOST_INTERRUPT and MED_GUC_HOST_INTERRUPT can pass
 917	 * additional payload data to the GuC but this capability is not
 918	 * used by the firmware yet. Use default value in the meantime.
 919	 */
 920	xe_mmio_write32(&gt->mmio, guc->notify_reg, default_notify_data);
 921}
 922
 923int xe_guc_auth_huc(struct xe_guc *guc, u32 rsa_addr)
 924{
 925	u32 action[] = {
 926		XE_GUC_ACTION_AUTHENTICATE_HUC,
 927		rsa_addr
 928	};
 929
 930	return xe_guc_ct_send_block(&guc->ct, action, ARRAY_SIZE(action));
 931}
 932
 933int xe_guc_mmio_send_recv(struct xe_guc *guc, const u32 *request,
 934			  u32 len, u32 *response_buf)
 935{
 936	struct xe_device *xe = guc_to_xe(guc);
 937	struct xe_gt *gt = guc_to_gt(guc);
 938	struct xe_mmio *mmio = &gt->mmio;
 939	u32 header, reply;
 940	struct xe_reg reply_reg = xe_gt_is_media_type(gt) ?
 941		MED_VF_SW_FLAG(0) : VF_SW_FLAG(0);
 942	const u32 LAST_INDEX = VF_SW_FLAG_COUNT - 1;
 943	int ret;
 944	int i;
 945
 946	BUILD_BUG_ON(VF_SW_FLAG_COUNT != MED_VF_SW_FLAG_COUNT);
 947
 948	xe_assert(xe, !xe_guc_ct_enabled(&guc->ct));
 949	xe_assert(xe, len);
 950	xe_assert(xe, len <= VF_SW_FLAG_COUNT);
 951	xe_assert(xe, len <= MED_VF_SW_FLAG_COUNT);
 952	xe_assert(xe, FIELD_GET(GUC_HXG_MSG_0_ORIGIN, request[0]) ==
 953		  GUC_HXG_ORIGIN_HOST);
 954	xe_assert(xe, FIELD_GET(GUC_HXG_MSG_0_TYPE, request[0]) ==
 955		  GUC_HXG_TYPE_REQUEST);
 956
 957retry:
 958	/* Not in critical data-path, just do if else for GT type */
 959	if (xe_gt_is_media_type(gt)) {
 960		for (i = 0; i < len; ++i)
 961			xe_mmio_write32(mmio, MED_VF_SW_FLAG(i),
 962					request[i]);
 963		xe_mmio_read32(mmio, MED_VF_SW_FLAG(LAST_INDEX));
 964	} else {
 965		for (i = 0; i < len; ++i)
 966			xe_mmio_write32(mmio, VF_SW_FLAG(i),
 967					request[i]);
 968		xe_mmio_read32(mmio, VF_SW_FLAG(LAST_INDEX));
 969	}
 970
 971	xe_guc_notify(guc);
 972
 973	ret = xe_mmio_wait32(mmio, reply_reg, GUC_HXG_MSG_0_ORIGIN,
 974			     FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_GUC),
 975			     50000, &reply, false);
 976	if (ret) {
 977timeout:
 978		xe_gt_err(gt, "GuC mmio request %#x: no reply %#x\n",
 979			  request[0], reply);
 980		return ret;
 981	}
 982
 983	header = xe_mmio_read32(mmio, reply_reg);
 984	if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) ==
 985	    GUC_HXG_TYPE_NO_RESPONSE_BUSY) {
 986		/*
 987		 * Once we got a BUSY reply we must wait again for the final
 988		 * response but this time we can't use ORIGIN mask anymore.
 989		 * To spot a right change in the reply, we take advantage that
 990		 * response SUCCESS and FAILURE differ only by the single bit
 991		 * and all other bits are set and can be used as a new mask.
 992		 */
 993		u32 resp_bits = GUC_HXG_TYPE_RESPONSE_SUCCESS & GUC_HXG_TYPE_RESPONSE_FAILURE;
 994		u32 resp_mask = FIELD_PREP(GUC_HXG_MSG_0_TYPE, resp_bits);
 995
 996		BUILD_BUG_ON(FIELD_MAX(GUC_HXG_MSG_0_TYPE) != GUC_HXG_TYPE_RESPONSE_SUCCESS);
 997		BUILD_BUG_ON((GUC_HXG_TYPE_RESPONSE_SUCCESS ^ GUC_HXG_TYPE_RESPONSE_FAILURE) != 1);
 998
 999		ret = xe_mmio_wait32(mmio, reply_reg, resp_mask, resp_mask,
1000				     1000000, &header, false);
1001
1002		if (unlikely(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, header) !=
1003			     GUC_HXG_ORIGIN_GUC))
1004			goto proto;
1005		if (unlikely(ret)) {
1006			if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) !=
1007			    GUC_HXG_TYPE_NO_RESPONSE_BUSY)
1008				goto proto;
1009			goto timeout;
1010		}
1011	}
1012
1013	if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) ==
1014	    GUC_HXG_TYPE_NO_RESPONSE_RETRY) {
1015		u32 reason = FIELD_GET(GUC_HXG_RETRY_MSG_0_REASON, header);
1016
1017		xe_gt_dbg(gt, "GuC mmio request %#x: retrying, reason %#x\n",
1018			  request[0], reason);
1019		goto retry;
1020	}
1021
1022	if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) ==
1023	    GUC_HXG_TYPE_RESPONSE_FAILURE) {
1024		u32 hint = FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, header);
1025		u32 error = FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, header);
1026
1027		xe_gt_err(gt, "GuC mmio request %#x: failure %#x hint %#x\n",
1028			  request[0], error, hint);
1029		return -ENXIO;
1030	}
1031
1032	if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) !=
1033	    GUC_HXG_TYPE_RESPONSE_SUCCESS) {
1034proto:
1035		xe_gt_err(gt, "GuC mmio request %#x: unexpected reply %#x\n",
1036			  request[0], header);
1037		return -EPROTO;
1038	}
1039
1040	/* Just copy entire possible message response */
1041	if (response_buf) {
1042		response_buf[0] = header;
1043
1044		for (i = 1; i < VF_SW_FLAG_COUNT; i++) {
1045			reply_reg.addr += sizeof(u32);
1046			response_buf[i] = xe_mmio_read32(mmio, reply_reg);
1047		}
1048	}
1049
1050	/* Use data from the GuC response as our return value */
1051	return FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, header);
1052}
1053
1054int xe_guc_mmio_send(struct xe_guc *guc, const u32 *request, u32 len)
1055{
1056	return xe_guc_mmio_send_recv(guc, request, len, NULL);
1057}
1058
1059static int guc_self_cfg(struct xe_guc *guc, u16 key, u16 len, u64 val)
1060{
1061	struct xe_device *xe = guc_to_xe(guc);
1062	u32 request[HOST2GUC_SELF_CFG_REQUEST_MSG_LEN] = {
1063		FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
1064		FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
1065		FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION,
1066			   GUC_ACTION_HOST2GUC_SELF_CFG),
1067		FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_KEY, key) |
1068		FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_LEN, len),
1069		FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_2_VALUE32,
1070			   lower_32_bits(val)),
1071		FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_3_VALUE64,
1072			   upper_32_bits(val)),
1073	};
1074	int ret;
1075
1076	xe_assert(xe, len <= 2);
1077	xe_assert(xe, len != 1 || !upper_32_bits(val));
1078
1079	/* Self config must go over MMIO */
1080	ret = xe_guc_mmio_send(guc, request, ARRAY_SIZE(request));
1081
1082	if (unlikely(ret < 0))
1083		return ret;
1084	if (unlikely(ret > 1))
1085		return -EPROTO;
1086	if (unlikely(!ret))
1087		return -ENOKEY;
1088
1089	return 0;
1090}
1091
1092int xe_guc_self_cfg32(struct xe_guc *guc, u16 key, u32 val)
1093{
1094	return guc_self_cfg(guc, key, 1, val);
1095}
1096
1097int xe_guc_self_cfg64(struct xe_guc *guc, u16 key, u64 val)
1098{
1099	return guc_self_cfg(guc, key, 2, val);
1100}
1101
1102void xe_guc_irq_handler(struct xe_guc *guc, const u16 iir)
1103{
1104	if (iir & GUC_INTR_GUC2HOST)
1105		xe_guc_ct_irq_handler(&guc->ct);
1106}
1107
1108void xe_guc_sanitize(struct xe_guc *guc)
1109{
1110	xe_uc_fw_sanitize(&guc->fw);
1111	xe_guc_ct_disable(&guc->ct);
1112	guc->submission_state.enabled = false;
1113}
1114
1115int xe_guc_reset_prepare(struct xe_guc *guc)
1116{
1117	return xe_guc_submit_reset_prepare(guc);
1118}
1119
1120void xe_guc_reset_wait(struct xe_guc *guc)
1121{
1122	xe_guc_submit_reset_wait(guc);
1123}
1124
1125void xe_guc_stop_prepare(struct xe_guc *guc)
1126{
1127	if (!IS_SRIOV_VF(guc_to_xe(guc))) {
1128		int err;
1129
1130		err = xe_guc_pc_stop(&guc->pc);
1131		xe_gt_WARN(guc_to_gt(guc), err, "Failed to stop GuC PC: %pe\n",
1132			   ERR_PTR(err));
1133	}
1134}
1135
1136void xe_guc_stop(struct xe_guc *guc)
1137{
1138	xe_guc_ct_stop(&guc->ct);
1139
1140	xe_guc_submit_stop(guc);
1141}
1142
1143int xe_guc_start(struct xe_guc *guc)
1144{
1145	if (!IS_SRIOV_VF(guc_to_xe(guc))) {
1146		int err;
1147
1148		err = xe_guc_pc_start(&guc->pc);
1149		xe_gt_WARN(guc_to_gt(guc), err, "Failed to start GuC PC: %pe\n",
1150			   ERR_PTR(err));
1151	}
1152
1153	return xe_guc_submit_start(guc);
1154}
1155
1156void xe_guc_print_info(struct xe_guc *guc, struct drm_printer *p)
1157{
1158	struct xe_gt *gt = guc_to_gt(guc);
1159	unsigned int fw_ref;
1160	u32 status;
1161	int i;
1162
1163	xe_uc_fw_print(&guc->fw, p);
1164
1165	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
1166	if (!fw_ref)
1167		return;
1168
1169	status = xe_mmio_read32(&gt->mmio, GUC_STATUS);
1170
1171	drm_printf(p, "\nGuC status 0x%08x:\n", status);
1172	drm_printf(p, "\tBootrom status = 0x%x\n",
1173		   REG_FIELD_GET(GS_BOOTROM_MASK, status));
1174	drm_printf(p, "\tuKernel status = 0x%x\n",
1175		   REG_FIELD_GET(GS_UKERNEL_MASK, status));
1176	drm_printf(p, "\tMIA Core status = 0x%x\n",
1177		   REG_FIELD_GET(GS_MIA_MASK, status));
1178	drm_printf(p, "\tLog level = %d\n",
1179		   xe_guc_log_get_level(&guc->log));
1180
1181	drm_puts(p, "\nScratch registers:\n");
1182	for (i = 0; i < SOFT_SCRATCH_COUNT; i++) {
1183		drm_printf(p, "\t%2d: \t0x%x\n",
1184			   i, xe_mmio_read32(&gt->mmio, SOFT_SCRATCH(i)));
1185	}
1186
1187	xe_force_wake_put(gt_to_fw(gt), fw_ref);
1188
1189	drm_puts(p, "\n");
1190	xe_guc_ct_print(&guc->ct, p, false);
1191
1192	drm_puts(p, "\n");
1193	xe_guc_submit_print(guc, p);
1194}
1195
1196/**
1197 * xe_guc_declare_wedged() - Declare GuC wedged
1198 * @guc: the GuC object
1199 *
1200 * Wedge the GuC which stops all submission, saves desired debug state, and
1201 * cleans up anything which could timeout.
1202 */
1203void xe_guc_declare_wedged(struct xe_guc *guc)
1204{
1205	xe_gt_assert(guc_to_gt(guc), guc_to_xe(guc)->wedged.mode);
1206
1207	xe_guc_reset_prepare(guc);
1208	xe_guc_ct_stop(&guc->ct);
1209	xe_guc_submit_wedge(guc);
1210}