Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.15.
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2020-2023 Intel Corporation
   4 */
   5
   6#include "ivpu_drv.h"
   7#include "ivpu_fw.h"
   8#include "ivpu_hw_37xx_reg.h"
   9#include "ivpu_hw_reg_io.h"
  10#include "ivpu_hw.h"
  11#include "ivpu_ipc.h"
  12#include "ivpu_mmu.h"
  13#include "ivpu_pm.h"
  14
  15#define TILE_FUSE_ENABLE_BOTH        0x0
  16#define TILE_SKU_BOTH_MTL            0x3630
  17
  18/* Work point configuration values */
  19#define CONFIG_1_TILE                0x01
  20#define CONFIG_2_TILE                0x02
  21#define PLL_RATIO_5_3                0x01
  22#define PLL_RATIO_4_3                0x02
  23#define WP_CONFIG(tile, ratio)       (((tile) << 8) | (ratio))
  24#define WP_CONFIG_1_TILE_5_3_RATIO   WP_CONFIG(CONFIG_1_TILE, PLL_RATIO_5_3)
  25#define WP_CONFIG_1_TILE_4_3_RATIO   WP_CONFIG(CONFIG_1_TILE, PLL_RATIO_4_3)
  26#define WP_CONFIG_2_TILE_5_3_RATIO   WP_CONFIG(CONFIG_2_TILE, PLL_RATIO_5_3)
  27#define WP_CONFIG_2_TILE_4_3_RATIO   WP_CONFIG(CONFIG_2_TILE, PLL_RATIO_4_3)
  28#define WP_CONFIG_0_TILE_PLL_OFF     WP_CONFIG(0, 0)
  29
  30#define PLL_REF_CLK_FREQ	     (50 * 1000000)
  31#define PLL_SIMULATION_FREQ	     (10 * 1000000)
  32#define PLL_PROF_CLK_FREQ	     (38400 * 1000)
  33#define PLL_DEFAULT_EPP_VALUE	     0x80
  34
  35#define TIM_SAFE_ENABLE		     0xf1d0dead
  36#define TIM_WATCHDOG_RESET_VALUE     0xffffffff
  37
  38#define TIMEOUT_US		     (150 * USEC_PER_MSEC)
  39#define PWR_ISLAND_STATUS_TIMEOUT_US (5 * USEC_PER_MSEC)
  40#define PLL_TIMEOUT_US		     (1500 * USEC_PER_MSEC)
  41#define IDLE_TIMEOUT_US		     (5 * USEC_PER_MSEC)
  42
  43#define ICB_0_IRQ_MASK ((REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT)) | \
  44			(REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT)) | \
  45			(REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT)) | \
  46			(REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT)) | \
  47			(REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT)) | \
  48			(REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT)) | \
  49			(REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT)))
  50
  51#define ICB_1_IRQ_MASK ((REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_2_INT)) | \
  52			(REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_3_INT)) | \
  53			(REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_4_INT)))
  54
  55#define ICB_0_1_IRQ_MASK ((((u64)ICB_1_IRQ_MASK) << 32) | ICB_0_IRQ_MASK)
  56
  57#define BUTTRESS_IRQ_MASK ((REG_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR)) | \
  58			   (REG_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, UFI_ERR)))
  59
  60#define BUTTRESS_ALL_IRQ_MASK (BUTTRESS_IRQ_MASK | \
  61			       (REG_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE)))
  62
  63#define BUTTRESS_IRQ_ENABLE_MASK ((u32)~BUTTRESS_IRQ_MASK)
  64#define BUTTRESS_IRQ_DISABLE_MASK ((u32)-1)
  65
  66#define ITF_FIREWALL_VIOLATION_MASK ((REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, CSS_ROM_CMX)) | \
  67				     (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, CSS_DBG)) | \
  68				     (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, CSS_CTRL)) | \
  69				     (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, DEC400)) | \
  70				     (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, MSS_NCE)) | \
  71				     (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI)) | \
  72				     (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI_CMX)))
  73
  74static void ivpu_hw_wa_init(struct ivpu_device *vdev)
  75{
  76	vdev->wa.punit_disabled = false;
  77	vdev->wa.clear_runtime_mem = false;
  78	vdev->wa.d3hot_after_power_off = true;
  79
  80	REGB_WR32(VPU_37XX_BUTTRESS_INTERRUPT_STAT, BUTTRESS_ALL_IRQ_MASK);
  81	if (REGB_RD32(VPU_37XX_BUTTRESS_INTERRUPT_STAT) == BUTTRESS_ALL_IRQ_MASK) {
  82		/* Writing 1s does not clear the interrupt status register */
  83		vdev->wa.interrupt_clear_with_0 = true;
  84		REGB_WR32(VPU_37XX_BUTTRESS_INTERRUPT_STAT, 0x0);
  85	}
  86
  87	IVPU_PRINT_WA(punit_disabled);
  88	IVPU_PRINT_WA(clear_runtime_mem);
  89	IVPU_PRINT_WA(d3hot_after_power_off);
  90	IVPU_PRINT_WA(interrupt_clear_with_0);
  91}
  92
  93static void ivpu_hw_timeouts_init(struct ivpu_device *vdev)
  94{
  95	vdev->timeout.boot = 1000;
  96	vdev->timeout.jsm = 500;
  97	vdev->timeout.tdr = 2000;
  98	vdev->timeout.reschedule_suspend = 10;
  99	vdev->timeout.autosuspend = 10;
 100	vdev->timeout.d0i3_entry_msg = 5;
 101}
 102
 103static int ivpu_pll_wait_for_cmd_send(struct ivpu_device *vdev)
 104{
 105	return REGB_POLL_FLD(VPU_37XX_BUTTRESS_WP_REQ_CMD, SEND, 0, PLL_TIMEOUT_US);
 106}
 107
 108/* Send KMD initiated workpoint change */
 109static int ivpu_pll_cmd_send(struct ivpu_device *vdev, u16 min_ratio, u16 max_ratio,
 110			     u16 target_ratio, u16 config)
 111{
 112	int ret;
 113	u32 val;
 114
 115	ret = ivpu_pll_wait_for_cmd_send(vdev);
 116	if (ret) {
 117		ivpu_err(vdev, "Failed to sync before WP request: %d\n", ret);
 118		return ret;
 119	}
 120
 121	val = REGB_RD32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0);
 122	val = REG_SET_FLD_NUM(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0, MIN_RATIO, min_ratio, val);
 123	val = REG_SET_FLD_NUM(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0, MAX_RATIO, max_ratio, val);
 124	REGB_WR32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0, val);
 125
 126	val = REGB_RD32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1);
 127	val = REG_SET_FLD_NUM(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1, TARGET_RATIO, target_ratio, val);
 128	val = REG_SET_FLD_NUM(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1, EPP, PLL_DEFAULT_EPP_VALUE, val);
 129	REGB_WR32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1, val);
 130
 131	val = REGB_RD32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD2);
 132	val = REG_SET_FLD_NUM(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD2, CONFIG, config, val);
 133	REGB_WR32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD2, val);
 134
 135	val = REGB_RD32(VPU_37XX_BUTTRESS_WP_REQ_CMD);
 136	val = REG_SET_FLD(VPU_37XX_BUTTRESS_WP_REQ_CMD, SEND, val);
 137	REGB_WR32(VPU_37XX_BUTTRESS_WP_REQ_CMD, val);
 138
 139	ret = ivpu_pll_wait_for_cmd_send(vdev);
 140	if (ret)
 141		ivpu_err(vdev, "Failed to sync after WP request: %d\n", ret);
 142
 143	return ret;
 144}
 145
 146static int ivpu_pll_wait_for_lock(struct ivpu_device *vdev, bool enable)
 147{
 148	u32 exp_val = enable ? 0x1 : 0x0;
 149
 150	if (IVPU_WA(punit_disabled))
 151		return 0;
 152
 153	return REGB_POLL_FLD(VPU_37XX_BUTTRESS_PLL_STATUS, LOCK, exp_val, PLL_TIMEOUT_US);
 154}
 155
 156static int ivpu_pll_wait_for_status_ready(struct ivpu_device *vdev)
 157{
 158	if (IVPU_WA(punit_disabled))
 159		return 0;
 160
 161	return REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_STATUS, READY, 1, PLL_TIMEOUT_US);
 162}
 163
 164static void ivpu_pll_init_frequency_ratios(struct ivpu_device *vdev)
 165{
 166	struct ivpu_hw_info *hw = vdev->hw;
 167	u8 fuse_min_ratio, fuse_max_ratio, fuse_pn_ratio;
 168	u32 fmin_fuse, fmax_fuse;
 169
 170	fmin_fuse = REGB_RD32(VPU_37XX_BUTTRESS_FMIN_FUSE);
 171	fuse_min_ratio = REG_GET_FLD(VPU_37XX_BUTTRESS_FMIN_FUSE, MIN_RATIO, fmin_fuse);
 172	fuse_pn_ratio = REG_GET_FLD(VPU_37XX_BUTTRESS_FMIN_FUSE, PN_RATIO, fmin_fuse);
 173
 174	fmax_fuse = REGB_RD32(VPU_37XX_BUTTRESS_FMAX_FUSE);
 175	fuse_max_ratio = REG_GET_FLD(VPU_37XX_BUTTRESS_FMAX_FUSE, MAX_RATIO, fmax_fuse);
 176
 177	hw->pll.min_ratio = clamp_t(u8, ivpu_pll_min_ratio, fuse_min_ratio, fuse_max_ratio);
 178	hw->pll.max_ratio = clamp_t(u8, ivpu_pll_max_ratio, hw->pll.min_ratio, fuse_max_ratio);
 179	hw->pll.pn_ratio = clamp_t(u8, fuse_pn_ratio, hw->pll.min_ratio, hw->pll.max_ratio);
 180}
 181
 182static int ivpu_hw_37xx_wait_for_vpuip_bar(struct ivpu_device *vdev)
 183{
 184	return REGV_POLL_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, AON, 0, 100);
 185}
 186
 187static int ivpu_pll_drive(struct ivpu_device *vdev, bool enable)
 188{
 189	struct ivpu_hw_info *hw = vdev->hw;
 190	u16 target_ratio;
 191	u16 config;
 192	int ret;
 193
 194	if (IVPU_WA(punit_disabled)) {
 195		ivpu_dbg(vdev, PM, "Skipping PLL request\n");
 196		return 0;
 197	}
 198
 199	if (enable) {
 200		target_ratio = hw->pll.pn_ratio;
 201		config = hw->config;
 202	} else {
 203		target_ratio = 0;
 204		config = 0;
 205	}
 206
 207	ivpu_dbg(vdev, PM, "PLL workpoint request: config 0x%04x pll ratio 0x%x\n",
 208		 config, target_ratio);
 209
 210	ret = ivpu_pll_cmd_send(vdev, hw->pll.min_ratio, hw->pll.max_ratio, target_ratio, config);
 211	if (ret) {
 212		ivpu_err(vdev, "Failed to send PLL workpoint request: %d\n", ret);
 213		return ret;
 214	}
 215
 216	ret = ivpu_pll_wait_for_lock(vdev, enable);
 217	if (ret) {
 218		ivpu_err(vdev, "Timed out waiting for PLL lock\n");
 219		return ret;
 220	}
 221
 222	if (enable) {
 223		ret = ivpu_pll_wait_for_status_ready(vdev);
 224		if (ret) {
 225			ivpu_err(vdev, "Timed out waiting for PLL ready status\n");
 226			return ret;
 227		}
 228
 229		ret = ivpu_hw_37xx_wait_for_vpuip_bar(vdev);
 230		if (ret) {
 231			ivpu_err(vdev, "Timed out waiting for VPUIP bar\n");
 232			return ret;
 233		}
 234	}
 235
 236	return 0;
 237}
 238
 239static int ivpu_pll_enable(struct ivpu_device *vdev)
 240{
 241	return ivpu_pll_drive(vdev, true);
 242}
 243
 244static int ivpu_pll_disable(struct ivpu_device *vdev)
 245{
 246	return ivpu_pll_drive(vdev, false);
 247}
 248
 249static void ivpu_boot_host_ss_rst_clr_assert(struct ivpu_device *vdev)
 250{
 251	u32 val = 0;
 252
 253	val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, TOP_NOC, val);
 254	val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, DSS_MAS, val);
 255	val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, MSS_MAS, val);
 256
 257	REGV_WR32(VPU_37XX_HOST_SS_CPR_RST_CLR, val);
 258}
 259
 260static void ivpu_boot_host_ss_rst_drive(struct ivpu_device *vdev, bool enable)
 261{
 262	u32 val = REGV_RD32(VPU_37XX_HOST_SS_CPR_RST_SET);
 263
 264	if (enable) {
 265		val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, TOP_NOC, val);
 266		val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, DSS_MAS, val);
 267		val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, MSS_MAS, val);
 268	} else {
 269		val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, TOP_NOC, val);
 270		val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, DSS_MAS, val);
 271		val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, MSS_MAS, val);
 272	}
 273
 274	REGV_WR32(VPU_37XX_HOST_SS_CPR_RST_SET, val);
 275}
 276
 277static void ivpu_boot_host_ss_clk_drive(struct ivpu_device *vdev, bool enable)
 278{
 279	u32 val = REGV_RD32(VPU_37XX_HOST_SS_CPR_CLK_SET);
 280
 281	if (enable) {
 282		val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, TOP_NOC, val);
 283		val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, DSS_MAS, val);
 284		val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, MSS_MAS, val);
 285	} else {
 286		val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, TOP_NOC, val);
 287		val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, DSS_MAS, val);
 288		val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, MSS_MAS, val);
 289	}
 290
 291	REGV_WR32(VPU_37XX_HOST_SS_CPR_CLK_SET, val);
 292}
 293
 294static int ivpu_boot_noc_qreqn_check(struct ivpu_device *vdev, u32 exp_val)
 295{
 296	u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QREQN);
 297
 298	if (!REG_TEST_FLD_NUM(VPU_37XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, exp_val, val))
 299		return -EIO;
 300
 301	return 0;
 302}
 303
 304static int ivpu_boot_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val)
 305{
 306	u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QACCEPTN);
 307
 308	if (!REG_TEST_FLD_NUM(VPU_37XX_HOST_SS_NOC_QACCEPTN, TOP_SOCMMIO, exp_val, val))
 309		return -EIO;
 310
 311	return 0;
 312}
 313
 314static int ivpu_boot_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val)
 315{
 316	u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QDENY);
 317
 318	if (!REG_TEST_FLD_NUM(VPU_37XX_HOST_SS_NOC_QDENY, TOP_SOCMMIO, exp_val, val))
 319		return -EIO;
 320
 321	return 0;
 322}
 323
 324static int ivpu_boot_top_noc_qrenqn_check(struct ivpu_device *vdev, u32 exp_val)
 325{
 326	u32 val = REGV_RD32(VPU_37XX_TOP_NOC_QREQN);
 327
 328	if (!REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QREQN, CPU_CTRL, exp_val, val) ||
 329	    !REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, exp_val, val))
 330		return -EIO;
 331
 332	return 0;
 333}
 334
 335static int ivpu_boot_top_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val)
 336{
 337	u32 val = REGV_RD32(VPU_37XX_TOP_NOC_QACCEPTN);
 338
 339	if (!REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QACCEPTN, CPU_CTRL, exp_val, val) ||
 340	    !REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QACCEPTN, HOSTIF_L2CACHE, exp_val, val))
 341		return -EIO;
 342
 343	return 0;
 344}
 345
 346static int ivpu_boot_top_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val)
 347{
 348	u32 val = REGV_RD32(VPU_37XX_TOP_NOC_QDENY);
 349
 350	if (!REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QDENY, CPU_CTRL, exp_val, val) ||
 351	    !REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QDENY, HOSTIF_L2CACHE, exp_val, val))
 352		return -EIO;
 353
 354	return 0;
 355}
 356
 357static int ivpu_boot_host_ss_configure(struct ivpu_device *vdev)
 358{
 359	ivpu_boot_host_ss_rst_clr_assert(vdev);
 360
 361	return ivpu_boot_noc_qreqn_check(vdev, 0x0);
 362}
 363
 364static void ivpu_boot_vpu_idle_gen_disable(struct ivpu_device *vdev)
 365{
 366	REGV_WR32(VPU_37XX_HOST_SS_AON_VPU_IDLE_GEN, 0x0);
 367}
 368
 369static int ivpu_boot_host_ss_axi_drive(struct ivpu_device *vdev, bool enable)
 370{
 371	int ret;
 372	u32 val;
 373
 374	val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QREQN);
 375	if (enable)
 376		val = REG_SET_FLD(VPU_37XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
 377	else
 378		val = REG_CLR_FLD(VPU_37XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
 379	REGV_WR32(VPU_37XX_HOST_SS_NOC_QREQN, val);
 380
 381	ret = ivpu_boot_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0);
 382	if (ret) {
 383		ivpu_err(vdev, "Failed qacceptn check: %d\n", ret);
 384		return ret;
 385	}
 386
 387	ret = ivpu_boot_noc_qdeny_check(vdev, 0x0);
 388	if (ret)
 389		ivpu_err(vdev, "Failed qdeny check: %d\n", ret);
 390
 391	return ret;
 392}
 393
 394static int ivpu_boot_host_ss_axi_enable(struct ivpu_device *vdev)
 395{
 396	return ivpu_boot_host_ss_axi_drive(vdev, true);
 397}
 398
 399static int ivpu_boot_host_ss_top_noc_drive(struct ivpu_device *vdev, bool enable)
 400{
 401	int ret;
 402	u32 val;
 403
 404	val = REGV_RD32(VPU_37XX_TOP_NOC_QREQN);
 405	if (enable) {
 406		val = REG_SET_FLD(VPU_37XX_TOP_NOC_QREQN, CPU_CTRL, val);
 407		val = REG_SET_FLD(VPU_37XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, val);
 408	} else {
 409		val = REG_CLR_FLD(VPU_37XX_TOP_NOC_QREQN, CPU_CTRL, val);
 410		val = REG_CLR_FLD(VPU_37XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, val);
 411	}
 412	REGV_WR32(VPU_37XX_TOP_NOC_QREQN, val);
 413
 414	ret = ivpu_boot_top_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0);
 415	if (ret) {
 416		ivpu_err(vdev, "Failed qacceptn check: %d\n", ret);
 417		return ret;
 418	}
 419
 420	ret = ivpu_boot_top_noc_qdeny_check(vdev, 0x0);
 421	if (ret)
 422		ivpu_err(vdev, "Failed qdeny check: %d\n", ret);
 423
 424	return ret;
 425}
 426
 427static int ivpu_boot_host_ss_top_noc_enable(struct ivpu_device *vdev)
 428{
 429	return ivpu_boot_host_ss_top_noc_drive(vdev, true);
 430}
 431
 432static void ivpu_boot_pwr_island_trickle_drive(struct ivpu_device *vdev, bool enable)
 433{
 434	u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0);
 435
 436	if (enable)
 437		val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, MSS_CPU, val);
 438	else
 439		val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, MSS_CPU, val);
 440
 441	REGV_WR32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, val);
 442}
 443
 444static void ivpu_boot_pwr_island_drive(struct ivpu_device *vdev, bool enable)
 445{
 446	u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0);
 447
 448	if (enable)
 449		val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0, MSS_CPU, val);
 450	else
 451		val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0, MSS_CPU, val);
 452
 453	REGV_WR32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0, val);
 454}
 455
 456static int ivpu_boot_wait_for_pwr_island_status(struct ivpu_device *vdev, u32 exp_val)
 457{
 458	return REGV_POLL_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_STATUS0, MSS_CPU,
 459			     exp_val, PWR_ISLAND_STATUS_TIMEOUT_US);
 460}
 461
 462static void ivpu_boot_pwr_island_isolation_drive(struct ivpu_device *vdev, bool enable)
 463{
 464	u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0);
 465
 466	if (enable)
 467		val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0, MSS_CPU, val);
 468	else
 469		val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0, MSS_CPU, val);
 470
 471	REGV_WR32(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0, val);
 472}
 473
 474static void ivpu_boot_dpu_active_drive(struct ivpu_device *vdev, bool enable)
 475{
 476	u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_DPU_ACTIVE);
 477
 478	if (enable)
 479		val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, DPU_ACTIVE, val);
 480	else
 481		val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, DPU_ACTIVE, val);
 482
 483	REGV_WR32(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, val);
 484}
 485
 486static int ivpu_boot_pwr_domain_enable(struct ivpu_device *vdev)
 487{
 488	int ret;
 489
 490	ivpu_boot_pwr_island_trickle_drive(vdev, true);
 491	ivpu_boot_pwr_island_drive(vdev, true);
 492
 493	ret = ivpu_boot_wait_for_pwr_island_status(vdev, 0x1);
 494	if (ret) {
 495		ivpu_err(vdev, "Timed out waiting for power island status\n");
 496		return ret;
 497	}
 498
 499	ret = ivpu_boot_top_noc_qrenqn_check(vdev, 0x0);
 500	if (ret) {
 501		ivpu_err(vdev, "Failed qrenqn check %d\n", ret);
 502		return ret;
 503	}
 504
 505	ivpu_boot_host_ss_clk_drive(vdev, true);
 506	ivpu_boot_pwr_island_isolation_drive(vdev, false);
 507	ivpu_boot_host_ss_rst_drive(vdev, true);
 508	ivpu_boot_dpu_active_drive(vdev, true);
 509
 510	return ret;
 511}
 512
 513static void ivpu_boot_no_snoop_enable(struct ivpu_device *vdev)
 514{
 515	u32 val = REGV_RD32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES);
 516
 517	val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, NOSNOOP_OVERRIDE_EN, val);
 518	val = REG_CLR_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AW_NOSNOOP_OVERRIDE, val);
 519	val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AR_NOSNOOP_OVERRIDE, val);
 520
 521	REGV_WR32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, val);
 522}
 523
 524static void ivpu_boot_tbu_mmu_enable(struct ivpu_device *vdev)
 525{
 526	u32 val = REGV_RD32(VPU_37XX_HOST_IF_TBU_MMUSSIDV);
 527
 528	val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU0_AWMMUSSIDV, val);
 529	val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU0_ARMMUSSIDV, val);
 530	val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU2_AWMMUSSIDV, val);
 531	val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU2_ARMMUSSIDV, val);
 532
 533	REGV_WR32(VPU_37XX_HOST_IF_TBU_MMUSSIDV, val);
 534}
 535
 536static void ivpu_boot_soc_cpu_boot(struct ivpu_device *vdev)
 537{
 538	u32 val;
 539
 540	val = REGV_RD32(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC);
 541	val = REG_SET_FLD(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RSTRUN0, val);
 542
 543	val = REG_CLR_FLD(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RSTVEC, val);
 544	REGV_WR32(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val);
 545
 546	val = REG_SET_FLD(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RESUME0, val);
 547	REGV_WR32(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val);
 548
 549	val = REG_CLR_FLD(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RESUME0, val);
 550	REGV_WR32(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val);
 551
 552	val = vdev->fw->entry_point >> 9;
 553	REGV_WR32(VPU_37XX_HOST_SS_LOADING_ADDRESS_LO, val);
 554
 555	val = REG_SET_FLD(VPU_37XX_HOST_SS_LOADING_ADDRESS_LO, DONE, val);
 556	REGV_WR32(VPU_37XX_HOST_SS_LOADING_ADDRESS_LO, val);
 557
 558	ivpu_dbg(vdev, PM, "Booting firmware, mode: %s\n",
 559		 vdev->fw->entry_point == vdev->fw->cold_boot_entry_point ? "cold boot" : "resume");
 560}
 561
 562static int ivpu_boot_d0i3_drive(struct ivpu_device *vdev, bool enable)
 563{
 564	int ret;
 565	u32 val;
 566
 567	ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
 568	if (ret) {
 569		ivpu_err(vdev, "Failed to sync before D0i3 transition: %d\n", ret);
 570		return ret;
 571	}
 572
 573	val = REGB_RD32(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL);
 574	if (enable)
 575		val = REG_SET_FLD(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL, I3, val);
 576	else
 577		val = REG_CLR_FLD(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL, I3, val);
 578	REGB_WR32(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL, val);
 579
 580	ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
 581	if (ret)
 582		ivpu_err(vdev, "Failed to sync after D0i3 transition: %d\n", ret);
 583
 584	return ret;
 585}
 586
 587static int ivpu_hw_37xx_info_init(struct ivpu_device *vdev)
 588{
 589	struct ivpu_hw_info *hw = vdev->hw;
 590
 591	hw->tile_fuse = TILE_FUSE_ENABLE_BOTH;
 592	hw->sku = TILE_SKU_BOTH_MTL;
 593	hw->config = WP_CONFIG_2_TILE_4_3_RATIO;
 594
 595	ivpu_pll_init_frequency_ratios(vdev);
 596
 597	ivpu_hw_init_range(&hw->ranges.global, 0x80000000, SZ_512M);
 598	ivpu_hw_init_range(&hw->ranges.user,   0xc0000000, 255 * SZ_1M);
 599	ivpu_hw_init_range(&hw->ranges.shave, 0x180000000, SZ_2G);
 600	ivpu_hw_init_range(&hw->ranges.dma,   0x200000000, SZ_8G);
 601
 602	vdev->platform = IVPU_PLATFORM_SILICON;
 603	ivpu_hw_wa_init(vdev);
 604	ivpu_hw_timeouts_init(vdev);
 605
 606	return 0;
 607}
 608
 609static int ivpu_hw_37xx_ip_reset(struct ivpu_device *vdev)
 610{
 611	int ret;
 612	u32 val;
 613
 614	if (IVPU_WA(punit_disabled))
 615		return 0;
 616
 617	ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
 618	if (ret) {
 619		ivpu_err(vdev, "Timed out waiting for TRIGGER bit\n");
 620		return ret;
 621	}
 622
 623	val = REGB_RD32(VPU_37XX_BUTTRESS_VPU_IP_RESET);
 624	val = REG_SET_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, val);
 625	REGB_WR32(VPU_37XX_BUTTRESS_VPU_IP_RESET, val);
 626
 627	ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
 628	if (ret)
 629		ivpu_err(vdev, "Timed out waiting for RESET completion\n");
 630
 631	return ret;
 632}
 633
 634static int ivpu_hw_37xx_reset(struct ivpu_device *vdev)
 635{
 636	int ret = 0;
 637
 638	if (ivpu_hw_37xx_ip_reset(vdev)) {
 639		ivpu_err(vdev, "Failed to reset NPU\n");
 640		ret = -EIO;
 641	}
 642
 643	if (ivpu_pll_disable(vdev)) {
 644		ivpu_err(vdev, "Failed to disable PLL\n");
 645		ret = -EIO;
 646	}
 647
 648	return ret;
 649}
 650
 651static int ivpu_hw_37xx_d0i3_enable(struct ivpu_device *vdev)
 652{
 653	int ret;
 654
 655	ret = ivpu_boot_d0i3_drive(vdev, true);
 656	if (ret)
 657		ivpu_err(vdev, "Failed to enable D0i3: %d\n", ret);
 658
 659	udelay(5); /* VPU requires 5 us to complete the transition */
 660
 661	return ret;
 662}
 663
 664static int ivpu_hw_37xx_d0i3_disable(struct ivpu_device *vdev)
 665{
 666	int ret;
 667
 668	ret = ivpu_boot_d0i3_drive(vdev, false);
 669	if (ret)
 670		ivpu_err(vdev, "Failed to disable D0i3: %d\n", ret);
 671
 672	return ret;
 673}
 674
 675static int ivpu_hw_37xx_power_up(struct ivpu_device *vdev)
 676{
 677	int ret;
 678
 679	/* PLL requests may fail when powering down, so issue WP 0 here */
 680	ret = ivpu_pll_disable(vdev);
 681	if (ret)
 682		ivpu_warn(vdev, "Failed to disable PLL: %d\n", ret);
 683
 684	ret = ivpu_hw_37xx_d0i3_disable(vdev);
 685	if (ret)
 686		ivpu_warn(vdev, "Failed to disable D0I3: %d\n", ret);
 687
 688	ret = ivpu_pll_enable(vdev);
 689	if (ret) {
 690		ivpu_err(vdev, "Failed to enable PLL: %d\n", ret);
 691		return ret;
 692	}
 693
 694	ret = ivpu_boot_host_ss_configure(vdev);
 695	if (ret) {
 696		ivpu_err(vdev, "Failed to configure host SS: %d\n", ret);
 697		return ret;
 698	}
 699
 700	/*
 701	 * The control circuitry for vpu_idle indication logic powers up active.
 702	 * To ensure unnecessary low power mode signal from LRT during bring up,
 703	 * KMD disables the circuitry prior to bringing up the Main Power island.
 704	 */
 705	ivpu_boot_vpu_idle_gen_disable(vdev);
 706
 707	ret = ivpu_boot_pwr_domain_enable(vdev);
 708	if (ret) {
 709		ivpu_err(vdev, "Failed to enable power domain: %d\n", ret);
 710		return ret;
 711	}
 712
 713	ret = ivpu_boot_host_ss_axi_enable(vdev);
 714	if (ret) {
 715		ivpu_err(vdev, "Failed to enable AXI: %d\n", ret);
 716		return ret;
 717	}
 718
 719	ret = ivpu_boot_host_ss_top_noc_enable(vdev);
 720	if (ret)
 721		ivpu_err(vdev, "Failed to enable TOP NOC: %d\n", ret);
 722
 723	return ret;
 724}
 725
 726static int ivpu_hw_37xx_boot_fw(struct ivpu_device *vdev)
 727{
 728	ivpu_boot_no_snoop_enable(vdev);
 729	ivpu_boot_tbu_mmu_enable(vdev);
 730	ivpu_boot_soc_cpu_boot(vdev);
 731
 732	return 0;
 733}
 734
 735static bool ivpu_hw_37xx_is_idle(struct ivpu_device *vdev)
 736{
 737	u32 val;
 738
 739	if (IVPU_WA(punit_disabled))
 740		return true;
 741
 742	val = REGB_RD32(VPU_37XX_BUTTRESS_VPU_STATUS);
 743	return REG_TEST_FLD(VPU_37XX_BUTTRESS_VPU_STATUS, READY, val) &&
 744	       REG_TEST_FLD(VPU_37XX_BUTTRESS_VPU_STATUS, IDLE, val);
 745}
 746
 747static int ivpu_hw_37xx_wait_for_idle(struct ivpu_device *vdev)
 748{
 749	return REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_STATUS, IDLE, 0x1, IDLE_TIMEOUT_US);
 750}
 751
 752static void ivpu_hw_37xx_save_d0i3_entry_timestamp(struct ivpu_device *vdev)
 753{
 754	vdev->hw->d0i3_entry_host_ts = ktime_get_boottime();
 755	vdev->hw->d0i3_entry_vpu_ts = REGV_RD64(VPU_37XX_CPU_SS_TIM_PERF_FREE_CNT);
 756}
 757
 758static int ivpu_hw_37xx_power_down(struct ivpu_device *vdev)
 759{
 760	int ret = 0;
 761
 762	ivpu_hw_37xx_save_d0i3_entry_timestamp(vdev);
 763
 764	if (!ivpu_hw_37xx_is_idle(vdev))
 765		ivpu_warn(vdev, "VPU not idle during power down\n");
 766
 767	if (ivpu_hw_37xx_reset(vdev)) {
 768		ivpu_err(vdev, "Failed to reset VPU\n");
 769		ret = -EIO;
 770	}
 771
 772	if (ivpu_hw_37xx_d0i3_enable(vdev)) {
 773		ivpu_err(vdev, "Failed to enter D0I3\n");
 774		ret = -EIO;
 775	}
 776
 777	return ret;
 778}
 779
 780static void ivpu_hw_37xx_wdt_disable(struct ivpu_device *vdev)
 781{
 782	u32 val;
 783
 784	/* Enable writing and set non-zero WDT value */
 785	REGV_WR32(VPU_37XX_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE);
 786	REGV_WR32(VPU_37XX_CPU_SS_TIM_WATCHDOG, TIM_WATCHDOG_RESET_VALUE);
 787
 788	/* Enable writing and disable watchdog timer */
 789	REGV_WR32(VPU_37XX_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE);
 790	REGV_WR32(VPU_37XX_CPU_SS_TIM_WDOG_EN, 0);
 791
 792	/* Now clear the timeout interrupt */
 793	val = REGV_RD32(VPU_37XX_CPU_SS_TIM_GEN_CONFIG);
 794	val = REG_CLR_FLD(VPU_37XX_CPU_SS_TIM_GEN_CONFIG, WDOG_TO_INT_CLR, val);
 795	REGV_WR32(VPU_37XX_CPU_SS_TIM_GEN_CONFIG, val);
 796}
 797
 798static u32 ivpu_hw_37xx_profiling_freq_get(struct ivpu_device *vdev)
 799{
 800	return PLL_PROF_CLK_FREQ;
 801}
 802
 803static void ivpu_hw_37xx_profiling_freq_drive(struct ivpu_device *vdev, bool enable)
 804{
 805	/* Profiling freq - is a debug feature. Unavailable on VPU 37XX. */
 806}
 807
 808static u32 ivpu_hw_37xx_pll_to_freq(u32 ratio, u32 config)
 809{
 810	u32 pll_clock = PLL_REF_CLK_FREQ * ratio;
 811	u32 cpu_clock;
 812
 813	if ((config & 0xff) == PLL_RATIO_4_3)
 814		cpu_clock = pll_clock * 2 / 4;
 815	else
 816		cpu_clock = pll_clock * 2 / 5;
 817
 818	return cpu_clock;
 819}
 820
 821/* Register indirect accesses */
 822static u32 ivpu_hw_37xx_reg_pll_freq_get(struct ivpu_device *vdev)
 823{
 824	u32 pll_curr_ratio;
 825
 826	pll_curr_ratio = REGB_RD32(VPU_37XX_BUTTRESS_CURRENT_PLL);
 827	pll_curr_ratio &= VPU_37XX_BUTTRESS_CURRENT_PLL_RATIO_MASK;
 828
 829	if (!ivpu_is_silicon(vdev))
 830		return PLL_SIMULATION_FREQ;
 831
 832	return ivpu_hw_37xx_pll_to_freq(pll_curr_ratio, vdev->hw->config);
 833}
 834
 835static u32 ivpu_hw_37xx_reg_telemetry_offset_get(struct ivpu_device *vdev)
 836{
 837	return REGB_RD32(VPU_37XX_BUTTRESS_VPU_TELEMETRY_OFFSET);
 838}
 839
 840static u32 ivpu_hw_37xx_reg_telemetry_size_get(struct ivpu_device *vdev)
 841{
 842	return REGB_RD32(VPU_37XX_BUTTRESS_VPU_TELEMETRY_SIZE);
 843}
 844
 845static u32 ivpu_hw_37xx_reg_telemetry_enable_get(struct ivpu_device *vdev)
 846{
 847	return REGB_RD32(VPU_37XX_BUTTRESS_VPU_TELEMETRY_ENABLE);
 848}
 849
 850static void ivpu_hw_37xx_reg_db_set(struct ivpu_device *vdev, u32 db_id)
 851{
 852	u32 reg_stride = VPU_37XX_CPU_SS_DOORBELL_1 - VPU_37XX_CPU_SS_DOORBELL_0;
 853	u32 val = REG_FLD(VPU_37XX_CPU_SS_DOORBELL_0, SET);
 854
 855	REGV_WR32I(VPU_37XX_CPU_SS_DOORBELL_0, reg_stride, db_id, val);
 856}
 857
 858static u32 ivpu_hw_37xx_reg_ipc_rx_addr_get(struct ivpu_device *vdev)
 859{
 860	return REGV_RD32(VPU_37XX_HOST_SS_TIM_IPC_FIFO_ATM);
 861}
 862
 863static u32 ivpu_hw_37xx_reg_ipc_rx_count_get(struct ivpu_device *vdev)
 864{
 865	u32 count = REGV_RD32_SILENT(VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT);
 866
 867	return REG_GET_FLD(VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT, FILL_LEVEL, count);
 868}
 869
 870static void ivpu_hw_37xx_reg_ipc_tx_set(struct ivpu_device *vdev, u32 vpu_addr)
 871{
 872	REGV_WR32(VPU_37XX_CPU_SS_TIM_IPC_FIFO, vpu_addr);
 873}
 874
 875static void ivpu_hw_37xx_irq_clear(struct ivpu_device *vdev)
 876{
 877	REGV_WR64(VPU_37XX_HOST_SS_ICB_CLEAR_0, ICB_0_1_IRQ_MASK);
 878}
 879
 880static void ivpu_hw_37xx_irq_enable(struct ivpu_device *vdev)
 881{
 882	REGV_WR32(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, ITF_FIREWALL_VIOLATION_MASK);
 883	REGV_WR64(VPU_37XX_HOST_SS_ICB_ENABLE_0, ICB_0_1_IRQ_MASK);
 884	REGB_WR32(VPU_37XX_BUTTRESS_LOCAL_INT_MASK, BUTTRESS_IRQ_ENABLE_MASK);
 885	REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x0);
 886}
 887
 888static void ivpu_hw_37xx_irq_disable(struct ivpu_device *vdev)
 889{
 890	REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x1);
 891	REGB_WR32(VPU_37XX_BUTTRESS_LOCAL_INT_MASK, BUTTRESS_IRQ_DISABLE_MASK);
 892	REGV_WR64(VPU_37XX_HOST_SS_ICB_ENABLE_0, 0x0ull);
 893	REGV_WR32(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, 0x0);
 894}
 895
 896static void ivpu_hw_37xx_irq_wdt_nce_handler(struct ivpu_device *vdev)
 897{
 898	ivpu_pm_trigger_recovery(vdev, "WDT NCE IRQ");
 899}
 900
 901static void ivpu_hw_37xx_irq_wdt_mss_handler(struct ivpu_device *vdev)
 902{
 903	ivpu_hw_wdt_disable(vdev);
 904	ivpu_pm_trigger_recovery(vdev, "WDT MSS IRQ");
 905}
 906
 907static void ivpu_hw_37xx_irq_noc_firewall_handler(struct ivpu_device *vdev)
 908{
 909	ivpu_pm_trigger_recovery(vdev, "NOC Firewall IRQ");
 910}
 911
 912/* Handler for IRQs from VPU core (irqV) */
 913static bool ivpu_hw_37xx_irqv_handler(struct ivpu_device *vdev, int irq, bool *wake_thread)
 914{
 915	u32 status = REGV_RD32(VPU_37XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK;
 916
 917	if (!status)
 918		return false;
 919
 920	REGV_WR32(VPU_37XX_HOST_SS_ICB_CLEAR_0, status);
 921
 922	if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT, status))
 923		ivpu_mmu_irq_evtq_handler(vdev);
 924
 925	if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT, status))
 926		ivpu_ipc_irq_handler(vdev, wake_thread);
 927
 928	if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT, status))
 929		ivpu_dbg(vdev, IRQ, "MMU sync complete\n");
 930
 931	if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT, status))
 932		ivpu_mmu_irq_gerr_handler(vdev);
 933
 934	if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, status))
 935		ivpu_hw_37xx_irq_wdt_mss_handler(vdev);
 936
 937	if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, status))
 938		ivpu_hw_37xx_irq_wdt_nce_handler(vdev);
 939
 940	if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, status))
 941		ivpu_hw_37xx_irq_noc_firewall_handler(vdev);
 942
 943	return true;
 944}
 945
 946/* Handler for IRQs from Buttress core (irqB) */
 947static bool ivpu_hw_37xx_irqb_handler(struct ivpu_device *vdev, int irq)
 948{
 949	u32 status = REGB_RD32(VPU_37XX_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK;
 950	bool schedule_recovery = false;
 951
 952	if (!status)
 953		return false;
 954
 955	if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE, status))
 956		ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq: %08x",
 957			 REGB_RD32(VPU_37XX_BUTTRESS_CURRENT_PLL));
 958
 959	if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR, status)) {
 960		ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(VPU_37XX_BUTTRESS_ATS_ERR_LOG_0));
 961		REGB_WR32(VPU_37XX_BUTTRESS_ATS_ERR_CLEAR, 0x1);
 962		schedule_recovery = true;
 963	}
 964
 965	if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, UFI_ERR, status)) {
 966		u32 ufi_log = REGB_RD32(VPU_37XX_BUTTRESS_UFI_ERR_LOG);
 967
 968		ivpu_err(vdev, "UFI_ERR irq (0x%08x) opcode: 0x%02lx axi_id: 0x%02lx cq_id: 0x%03lx",
 969			 ufi_log, REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, OPCODE, ufi_log),
 970			 REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, AXI_ID, ufi_log),
 971			 REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, CQ_ID, ufi_log));
 972		REGB_WR32(VPU_37XX_BUTTRESS_UFI_ERR_CLEAR, 0x1);
 973		schedule_recovery = true;
 974	}
 975
 976	/* This must be done after interrupts are cleared at the source. */
 977	if (IVPU_WA(interrupt_clear_with_0))
 978		/*
 979		 * Writing 1 triggers an interrupt, so we can't perform read update write.
 980		 * Clear local interrupt status by writing 0 to all bits.
 981		 */
 982		REGB_WR32(VPU_37XX_BUTTRESS_INTERRUPT_STAT, 0x0);
 983	else
 984		REGB_WR32(VPU_37XX_BUTTRESS_INTERRUPT_STAT, status);
 985
 986	if (schedule_recovery)
 987		ivpu_pm_trigger_recovery(vdev, "Buttress IRQ");
 988
 989	return true;
 990}
 991
 992static irqreturn_t ivpu_hw_37xx_irq_handler(int irq, void *ptr)
 993{
 994	struct ivpu_device *vdev = ptr;
 995	bool irqv_handled, irqb_handled, wake_thread = false;
 996
 997	REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x1);
 998
 999	irqv_handled = ivpu_hw_37xx_irqv_handler(vdev, irq, &wake_thread);
1000	irqb_handled = ivpu_hw_37xx_irqb_handler(vdev, irq);
1001
1002	/* Re-enable global interrupts to re-trigger MSI for pending interrupts */
1003	REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x0);
1004
1005	if (wake_thread)
1006		return IRQ_WAKE_THREAD;
1007	if (irqv_handled || irqb_handled)
1008		return IRQ_HANDLED;
1009	return IRQ_NONE;
1010}
1011
1012static void ivpu_hw_37xx_diagnose_failure(struct ivpu_device *vdev)
1013{
1014	u32 irqv = REGV_RD32(VPU_37XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK;
1015	u32 irqb = REGB_RD32(VPU_37XX_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK;
1016
1017	if (ivpu_hw_37xx_reg_ipc_rx_count_get(vdev))
1018		ivpu_err(vdev, "IPC FIFO queue not empty, missed IPC IRQ");
1019
1020	if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, irqv))
1021		ivpu_err(vdev, "WDT MSS timeout detected\n");
1022
1023	if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, irqv))
1024		ivpu_err(vdev, "WDT NCE timeout detected\n");
1025
1026	if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, irqv))
1027		ivpu_err(vdev, "NOC Firewall irq detected\n");
1028
1029	if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR, irqb))
1030		ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(VPU_37XX_BUTTRESS_ATS_ERR_LOG_0));
1031
1032	if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, UFI_ERR, irqb)) {
1033		u32 ufi_log = REGB_RD32(VPU_37XX_BUTTRESS_UFI_ERR_LOG);
1034
1035		ivpu_err(vdev, "UFI_ERR irq (0x%08x) opcode: 0x%02lx axi_id: 0x%02lx cq_id: 0x%03lx",
1036			 ufi_log, REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, OPCODE, ufi_log),
1037			 REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, AXI_ID, ufi_log),
1038			 REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, CQ_ID, ufi_log));
1039	}
1040}
1041
1042const struct ivpu_hw_ops ivpu_hw_37xx_ops = {
1043	.info_init = ivpu_hw_37xx_info_init,
1044	.power_up = ivpu_hw_37xx_power_up,
1045	.is_idle = ivpu_hw_37xx_is_idle,
1046	.wait_for_idle = ivpu_hw_37xx_wait_for_idle,
1047	.power_down = ivpu_hw_37xx_power_down,
1048	.reset = ivpu_hw_37xx_reset,
1049	.boot_fw = ivpu_hw_37xx_boot_fw,
1050	.wdt_disable = ivpu_hw_37xx_wdt_disable,
1051	.diagnose_failure = ivpu_hw_37xx_diagnose_failure,
1052	.profiling_freq_get = ivpu_hw_37xx_profiling_freq_get,
1053	.profiling_freq_drive = ivpu_hw_37xx_profiling_freq_drive,
1054	.reg_pll_freq_get = ivpu_hw_37xx_reg_pll_freq_get,
1055	.reg_telemetry_offset_get = ivpu_hw_37xx_reg_telemetry_offset_get,
1056	.reg_telemetry_size_get = ivpu_hw_37xx_reg_telemetry_size_get,
1057	.reg_telemetry_enable_get = ivpu_hw_37xx_reg_telemetry_enable_get,
1058	.reg_db_set = ivpu_hw_37xx_reg_db_set,
1059	.reg_ipc_rx_addr_get = ivpu_hw_37xx_reg_ipc_rx_addr_get,
1060	.reg_ipc_rx_count_get = ivpu_hw_37xx_reg_ipc_rx_count_get,
1061	.reg_ipc_tx_set = ivpu_hw_37xx_reg_ipc_tx_set,
1062	.irq_clear = ivpu_hw_37xx_irq_clear,
1063	.irq_enable = ivpu_hw_37xx_irq_enable,
1064	.irq_disable = ivpu_hw_37xx_irq_disable,
1065	.irq_handler = ivpu_hw_37xx_irq_handler,
1066};