Linux Audio

Check our new training course

Loading...
v5.9
   1/*
   2 * Copyright 2016 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23#include <linux/firmware.h>
  24#include <linux/slab.h>
  25#include <linux/module.h>
  26#include <linux/pci.h>
  27
  28#include "amdgpu.h"
  29#include "amdgpu_atombios.h"
  30#include "amdgpu_ih.h"
  31#include "amdgpu_uvd.h"
  32#include "amdgpu_vce.h"
  33#include "amdgpu_ucode.h"
  34#include "amdgpu_psp.h"
  35#include "atom.h"
  36#include "amd_pcie.h"
  37
  38#include "uvd/uvd_7_0_offset.h"
  39#include "gc/gc_9_0_offset.h"
  40#include "gc/gc_9_0_sh_mask.h"
  41#include "sdma0/sdma0_4_0_offset.h"
  42#include "sdma1/sdma1_4_0_offset.h"
  43#include "hdp/hdp_4_0_offset.h"
  44#include "hdp/hdp_4_0_sh_mask.h"
  45#include "smuio/smuio_9_0_offset.h"
  46#include "smuio/smuio_9_0_sh_mask.h"
  47#include "nbio/nbio_7_0_default.h"
  48#include "nbio/nbio_7_0_offset.h"
  49#include "nbio/nbio_7_0_sh_mask.h"
  50#include "nbio/nbio_7_0_smn.h"
  51#include "mp/mp_9_0_offset.h"
  52
  53#include "soc15.h"
  54#include "soc15_common.h"
  55#include "gfx_v9_0.h"
  56#include "gmc_v9_0.h"
  57#include "gfxhub_v1_0.h"
  58#include "mmhub_v1_0.h"
  59#include "df_v1_7.h"
  60#include "df_v3_6.h"
  61#include "nbio_v6_1.h"
  62#include "nbio_v7_0.h"
  63#include "nbio_v7_4.h"
  64#include "vega10_ih.h"
  65#include "sdma_v4_0.h"
  66#include "uvd_v7_0.h"
  67#include "vce_v4_0.h"
  68#include "vcn_v1_0.h"
  69#include "vcn_v2_0.h"
  70#include "jpeg_v2_0.h"
  71#include "vcn_v2_5.h"
  72#include "jpeg_v2_5.h"
  73#include "dce_virtual.h"
  74#include "mxgpu_ai.h"
  75#include "amdgpu_smu.h"
  76#include "amdgpu_ras.h"
  77#include "amdgpu_xgmi.h"
  78#include <uapi/linux/kfd_ioctl.h>
  79
  80#define mmMP0_MISC_CGTT_CTRL0                                                                   0x01b9
  81#define mmMP0_MISC_CGTT_CTRL0_BASE_IDX                                                          0
  82#define mmMP0_MISC_LIGHT_SLEEP_CTRL                                                             0x01ba
  83#define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX                                                    0
  84
  85/* for Vega20 register name change */
  86#define mmHDP_MEM_POWER_CTRL	0x00d4
  87#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK	0x00000001L
  88#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK	0x00000002L
  89#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK	0x00010000L
  90#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK		0x00020000L
  91#define mmHDP_MEM_POWER_CTRL_BASE_IDX	0
  92
  93/* for Vega20/arcturus regiter offset change */
  94#define	mmROM_INDEX_VG20				0x00e4
  95#define	mmROM_INDEX_VG20_BASE_IDX			0
  96#define	mmROM_DATA_VG20					0x00e5
  97#define	mmROM_DATA_VG20_BASE_IDX			0
  98
  99/*
 100 * Indirect registers accessor
 101 */
 102static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg)
 103{
 104	unsigned long flags, address, data;
 105	u32 r;
 106	address = adev->nbio.funcs->get_pcie_index_offset(adev);
 107	data = adev->nbio.funcs->get_pcie_data_offset(adev);
 108
 109	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
 110	WREG32(address, reg);
 111	(void)RREG32(address);
 112	r = RREG32(data);
 113	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 114	return r;
 115}
 116
 117static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
 118{
 119	unsigned long flags, address, data;
 120
 121	address = adev->nbio.funcs->get_pcie_index_offset(adev);
 122	data = adev->nbio.funcs->get_pcie_data_offset(adev);
 123
 124	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
 125	WREG32(address, reg);
 126	(void)RREG32(address);
 127	WREG32(data, v);
 128	(void)RREG32(data);
 129	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 130}
 131
 132static u64 soc15_pcie_rreg64(struct amdgpu_device *adev, u32 reg)
 133{
 134	unsigned long flags, address, data;
 135	u64 r;
 136	address = adev->nbio.funcs->get_pcie_index_offset(adev);
 137	data = adev->nbio.funcs->get_pcie_data_offset(adev);
 138
 139	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
 140	/* read low 32 bit */
 141	WREG32(address, reg);
 142	(void)RREG32(address);
 143	r = RREG32(data);
 144
 145	/* read high 32 bit*/
 146	WREG32(address, reg + 4);
 147	(void)RREG32(address);
 148	r |= ((u64)RREG32(data) << 32);
 149	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 150	return r;
 151}
 152
 153static void soc15_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v)
 154{
 155	unsigned long flags, address, data;
 156
 157	address = adev->nbio.funcs->get_pcie_index_offset(adev);
 158	data = adev->nbio.funcs->get_pcie_data_offset(adev);
 159
 160	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
 161	/* write low 32 bit */
 162	WREG32(address, reg);
 163	(void)RREG32(address);
 164	WREG32(data, (u32)(v & 0xffffffffULL));
 165	(void)RREG32(data);
 166
 167	/* write high 32 bit */
 168	WREG32(address, reg + 4);
 169	(void)RREG32(address);
 170	WREG32(data, (u32)(v >> 32));
 171	(void)RREG32(data);
 172	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 173}
 174
 175static u32 soc15_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
 176{
 177	unsigned long flags, address, data;
 178	u32 r;
 179
 180	address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX);
 181	data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA);
 182
 183	spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
 184	WREG32(address, ((reg) & 0x1ff));
 185	r = RREG32(data);
 186	spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
 187	return r;
 188}
 189
 190static void soc15_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
 191{
 192	unsigned long flags, address, data;
 193
 194	address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX);
 195	data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA);
 196
 197	spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
 198	WREG32(address, ((reg) & 0x1ff));
 199	WREG32(data, (v));
 200	spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
 201}
 202
 203static u32 soc15_didt_rreg(struct amdgpu_device *adev, u32 reg)
 204{
 205	unsigned long flags, address, data;
 206	u32 r;
 207
 208	address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
 209	data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
 210
 211	spin_lock_irqsave(&adev->didt_idx_lock, flags);
 212	WREG32(address, (reg));
 213	r = RREG32(data);
 214	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
 215	return r;
 216}
 217
 218static void soc15_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
 219{
 220	unsigned long flags, address, data;
 221
 222	address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
 223	data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
 224
 225	spin_lock_irqsave(&adev->didt_idx_lock, flags);
 226	WREG32(address, (reg));
 227	WREG32(data, (v));
 228	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
 229}
 230
 231static u32 soc15_gc_cac_rreg(struct amdgpu_device *adev, u32 reg)
 232{
 233	unsigned long flags;
 234	u32 r;
 235
 236	spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
 237	WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg));
 238	r = RREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA);
 239	spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
 240	return r;
 241}
 242
 243static void soc15_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
 244{
 245	unsigned long flags;
 246
 247	spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
 248	WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg));
 249	WREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA, (v));
 250	spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
 251}
 252
 253static u32 soc15_se_cac_rreg(struct amdgpu_device *adev, u32 reg)
 254{
 255	unsigned long flags;
 256	u32 r;
 257
 258	spin_lock_irqsave(&adev->se_cac_idx_lock, flags);
 259	WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg));
 260	r = RREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA);
 261	spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags);
 262	return r;
 263}
 264
 265static void soc15_se_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
 266{
 267	unsigned long flags;
 268
 269	spin_lock_irqsave(&adev->se_cac_idx_lock, flags);
 270	WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg));
 271	WREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA, (v));
 272	spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags);
 273}
 274
 275static u32 soc15_get_config_memsize(struct amdgpu_device *adev)
 276{
 277	return adev->nbio.funcs->get_memsize(adev);
 278}
 279
 280static u32 soc15_get_xclk(struct amdgpu_device *adev)
 281{
 282	u32 reference_clock = adev->clock.spll.reference_freq;
 283
 284	if (adev->asic_type == CHIP_RAVEN)
 285		return reference_clock / 4;
 286
 287	return reference_clock;
 288}
 289
 290
 291void soc15_grbm_select(struct amdgpu_device *adev,
 292		     u32 me, u32 pipe, u32 queue, u32 vmid)
 293{
 294	u32 grbm_gfx_cntl = 0;
 295	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe);
 296	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me);
 297	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
 298	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
 299
 300	WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_CNTL, grbm_gfx_cntl);
 301}
 302
 303static void soc15_vga_set_state(struct amdgpu_device *adev, bool state)
 304{
 305	/* todo */
 306}
 307
 308static bool soc15_read_disabled_bios(struct amdgpu_device *adev)
 309{
 310	/* todo */
 311	return false;
 312}
 313
 314static bool soc15_read_bios_from_rom(struct amdgpu_device *adev,
 315				     u8 *bios, u32 length_bytes)
 316{
 317	u32 *dw_ptr;
 318	u32 i, length_dw;
 319	uint32_t rom_index_offset;
 320	uint32_t rom_data_offset;
 321
 322	if (bios == NULL)
 323		return false;
 324	if (length_bytes == 0)
 325		return false;
 326	/* APU vbios image is part of sbios image */
 327	if (adev->flags & AMD_IS_APU)
 328		return false;
 329
 330	dw_ptr = (u32 *)bios;
 331	length_dw = ALIGN(length_bytes, 4) / 4;
 332
 333	switch (adev->asic_type) {
 334	case CHIP_VEGA20:
 335	case CHIP_ARCTURUS:
 336		rom_index_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX_VG20);
 337		rom_data_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA_VG20);
 338		break;
 339	default:
 340		rom_index_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX);
 341		rom_data_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA);
 342		break;
 343	}
 344
 345	/* set rom index to 0 */
 346	WREG32(rom_index_offset, 0);
 347	/* read out the rom data */
 348	for (i = 0; i < length_dw; i++)
 349		dw_ptr[i] = RREG32(rom_data_offset);
 350
 351	return true;
 352}
 353
 354static struct soc15_allowed_register_entry soc15_allowed_read_registers[] = {
 355	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)},
 356	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)},
 357	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)},
 358	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)},
 359	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)},
 360	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)},
 361	{ SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)},
 362	{ SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)},
 363	{ SOC15_REG_ENTRY(GC, 0, mmCP_STAT)},
 364	{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)},
 365	{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)},
 366	{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)},
 367	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)},
 368	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)},
 369	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)},
 370	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_BUSY_STAT)},
 371	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)},
 372	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)},
 373	{ SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)},
 374	{ SOC15_REG_ENTRY(GC, 0, mmDB_DEBUG2)},
 375};
 376
 377static uint32_t soc15_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
 378					 u32 sh_num, u32 reg_offset)
 379{
 380	uint32_t val;
 381
 382	mutex_lock(&adev->grbm_idx_mutex);
 383	if (se_num != 0xffffffff || sh_num != 0xffffffff)
 384		amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
 385
 386	val = RREG32(reg_offset);
 387
 388	if (se_num != 0xffffffff || sh_num != 0xffffffff)
 389		amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
 390	mutex_unlock(&adev->grbm_idx_mutex);
 391	return val;
 392}
 393
 394static uint32_t soc15_get_register_value(struct amdgpu_device *adev,
 395					 bool indexed, u32 se_num,
 396					 u32 sh_num, u32 reg_offset)
 397{
 398	if (indexed) {
 399		return soc15_read_indexed_register(adev, se_num, sh_num, reg_offset);
 400	} else {
 401		if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG))
 402			return adev->gfx.config.gb_addr_config;
 403		else if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG2))
 404			return adev->gfx.config.db_debug2;
 405		return RREG32(reg_offset);
 406	}
 407}
 408
 409static int soc15_read_register(struct amdgpu_device *adev, u32 se_num,
 410			    u32 sh_num, u32 reg_offset, u32 *value)
 411{
 412	uint32_t i;
 413	struct soc15_allowed_register_entry  *en;
 414
 415	*value = 0;
 416	for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) {
 417		en = &soc15_allowed_read_registers[i];
 418		if (adev->reg_offset[en->hwip][en->inst] &&
 419			reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
 420					+ en->reg_offset))
 421			continue;
 422
 423		*value = soc15_get_register_value(adev,
 424						  soc15_allowed_read_registers[i].grbm_indexed,
 425						  se_num, sh_num, reg_offset);
 426		return 0;
 427	}
 428	return -EINVAL;
 429}
 430
 431
 432/**
 433 * soc15_program_register_sequence - program an array of registers.
 434 *
 435 * @adev: amdgpu_device pointer
 436 * @regs: pointer to the register array
 437 * @array_size: size of the register array
 438 *
 439 * Programs an array or registers with and and or masks.
 440 * This is a helper for setting golden registers.
 441 */
 442
 443void soc15_program_register_sequence(struct amdgpu_device *adev,
 444					     const struct soc15_reg_golden *regs,
 445					     const u32 array_size)
 446{
 447	const struct soc15_reg_golden *entry;
 448	u32 tmp, reg;
 449	int i;
 450
 451	for (i = 0; i < array_size; ++i) {
 452		entry = &regs[i];
 453		reg =  adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg;
 454
 455		if (entry->and_mask == 0xffffffff) {
 456			tmp = entry->or_mask;
 457		} else {
 458			tmp = RREG32(reg);
 459			tmp &= ~(entry->and_mask);
 460			tmp |= (entry->or_mask & entry->and_mask);
 461		}
 462
 463		if (reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3) ||
 464			reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE) ||
 465			reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE_1) ||
 466			reg == SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG))
 467			WREG32_RLC(reg, tmp);
 468		else
 469			WREG32(reg, tmp);
 470
 471	}
 472
 473}
 474
 475static int soc15_asic_mode1_reset(struct amdgpu_device *adev)
 476{
 477	u32 i;
 478	int ret = 0;
 479
 480	amdgpu_atombios_scratch_regs_engine_hung(adev, true);
 481
 482	dev_info(adev->dev, "GPU mode1 reset\n");
 483
 484	/* disable BM */
 485	pci_clear_master(adev->pdev);
 486
 487	pci_save_state(adev->pdev);
 488
 489	ret = psp_gpu_reset(adev);
 490	if (ret)
 491		dev_err(adev->dev, "GPU mode1 reset failed\n");
 492
 493	pci_restore_state(adev->pdev);
 494
 495	/* wait for asic to come out of reset */
 496	for (i = 0; i < adev->usec_timeout; i++) {
 497		u32 memsize = adev->nbio.funcs->get_memsize(adev);
 498
 499		if (memsize != 0xffffffff)
 500			break;
 501		udelay(1);
 502	}
 503
 504	amdgpu_atombios_scratch_regs_engine_hung(adev, false);
 505
 506	return ret;
 507}
 508
 
 
 
 
 
 
 
 
 
 
 
 
 
 509static int soc15_asic_baco_reset(struct amdgpu_device *adev)
 510{
 511	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
 512	int ret = 0;
 513
 514	/* avoid NBIF got stuck when do RAS recovery in BACO reset */
 515	if (ras && ras->supported)
 516		adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
 517
 518	ret = amdgpu_dpm_baco_reset(adev);
 519	if (ret)
 520		return ret;
 
 
 
 
 
 
 521
 522	/* re-enable doorbell interrupt after BACO exit */
 523	if (ras && ras->supported)
 524		adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
 525
 526	return 0;
 527}
 528
 
 
 
 
 
 
 
 
 
 529static enum amd_reset_method
 530soc15_asic_reset_method(struct amdgpu_device *adev)
 531{
 532	bool baco_reset = false;
 533	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
 534
 535	if (amdgpu_reset_method == AMD_RESET_METHOD_MODE1 ||
 536	    amdgpu_reset_method == AMD_RESET_METHOD_MODE2 ||
 537		amdgpu_reset_method == AMD_RESET_METHOD_BACO)
 538		return amdgpu_reset_method;
 539
 540	if (amdgpu_reset_method != -1)
 541		dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n",
 542				  amdgpu_reset_method);
 543
 544	switch (adev->asic_type) {
 545	case CHIP_RAVEN:
 546	case CHIP_RENOIR:
 547		return AMD_RESET_METHOD_MODE2;
 548	case CHIP_VEGA10:
 549	case CHIP_VEGA12:
 550	case CHIP_ARCTURUS:
 551		baco_reset = amdgpu_dpm_is_baco_supported(adev);
 552		break;
 553	case CHIP_VEGA20:
 554		if (adev->psp.sos_fw_version >= 0x80067)
 555			baco_reset = amdgpu_dpm_is_baco_supported(adev);
 556
 557		/*
 558		 * 1. PMFW version > 0x284300: all cases use baco
 559		 * 2. PMFW version <= 0x284300: only sGPU w/o RAS use baco
 560		 */
 561		if ((ras && ras->supported) && adev->pm.fw_version <= 0x283400)
 562			baco_reset = false;
 
 
 
 
 
 
 
 563		break;
 564	default:
 
 565		break;
 566	}
 567
 568	if (baco_reset)
 569		return AMD_RESET_METHOD_BACO;
 570	else
 571		return AMD_RESET_METHOD_MODE1;
 572}
 573
 574static int soc15_asic_reset(struct amdgpu_device *adev)
 575{
 576	/* original raven doesn't have full asic reset */
 577	if ((adev->apu_flags & AMD_APU_IS_RAVEN) &&
 578	    !(adev->apu_flags & AMD_APU_IS_RAVEN2))
 579		return 0;
 580
 581	switch (soc15_asic_reset_method(adev)) {
 582		case AMD_RESET_METHOD_BACO:
 
 
 583			return soc15_asic_baco_reset(adev);
 584		case AMD_RESET_METHOD_MODE2:
 585			return amdgpu_dpm_mode2_reset(adev);
 586		default:
 
 
 587			return soc15_asic_mode1_reset(adev);
 588	}
 589}
 590
 591static bool soc15_supports_baco(struct amdgpu_device *adev)
 592{
 593	switch (adev->asic_type) {
 594	case CHIP_VEGA10:
 595	case CHIP_VEGA12:
 596	case CHIP_ARCTURUS:
 597		return amdgpu_dpm_is_baco_supported(adev);
 598	case CHIP_VEGA20:
 599		if (adev->psp.sos_fw_version >= 0x80067)
 600			return amdgpu_dpm_is_baco_supported(adev);
 601		return false;
 602	default:
 603		return false;
 604	}
 605}
 606
 607/*static int soc15_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
 608			u32 cntl_reg, u32 status_reg)
 609{
 610	return 0;
 611}*/
 612
 613static int soc15_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
 614{
 615	/*int r;
 616
 617	r = soc15_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
 618	if (r)
 619		return r;
 620
 621	r = soc15_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
 622	*/
 623	return 0;
 624}
 625
 626static int soc15_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
 627{
 628	/* todo */
 629
 630	return 0;
 631}
 632
 633static void soc15_pcie_gen3_enable(struct amdgpu_device *adev)
 634{
 635	if (pci_is_root_bus(adev->pdev->bus))
 636		return;
 637
 638	if (amdgpu_pcie_gen2 == 0)
 639		return;
 640
 641	if (adev->flags & AMD_IS_APU)
 642		return;
 643
 644	if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
 645					CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
 646		return;
 647
 648	/* todo */
 649}
 650
 651static void soc15_program_aspm(struct amdgpu_device *adev)
 652{
 653
 654	if (amdgpu_aspm == 0)
 655		return;
 656
 657	/* todo */
 658}
 659
 660static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev,
 661					   bool enable)
 662{
 663	adev->nbio.funcs->enable_doorbell_aperture(adev, enable);
 664	adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
 665}
 666
 667static const struct amdgpu_ip_block_version vega10_common_ip_block =
 668{
 669	.type = AMD_IP_BLOCK_TYPE_COMMON,
 670	.major = 2,
 671	.minor = 0,
 672	.rev = 0,
 673	.funcs = &soc15_common_ip_funcs,
 674};
 675
 676static uint32_t soc15_get_rev_id(struct amdgpu_device *adev)
 677{
 678	return adev->nbio.funcs->get_rev_id(adev);
 679}
 680
 681static void soc15_reg_base_init(struct amdgpu_device *adev)
 682{
 683	int r;
 684
 685	/* Set IP register base before any HW register access */
 686	switch (adev->asic_type) {
 687	case CHIP_VEGA10:
 688	case CHIP_VEGA12:
 689	case CHIP_RAVEN:
 690		vega10_reg_base_init(adev);
 691		break;
 692	case CHIP_RENOIR:
 693		/* It's safe to do ip discovery here for Renior,
 694		 * it doesn't support SRIOV. */
 695		if (amdgpu_discovery) {
 696			r = amdgpu_discovery_reg_base_init(adev);
 697			if (r == 0)
 698				break;
 699			DRM_WARN("failed to init reg base from ip discovery table, "
 700				 "fallback to legacy init method\n");
 701		}
 702		vega10_reg_base_init(adev);
 703		break;
 704	case CHIP_VEGA20:
 705		vega20_reg_base_init(adev);
 706		break;
 707	case CHIP_ARCTURUS:
 708		arct_reg_base_init(adev);
 709		break;
 710	default:
 711		DRM_ERROR("Unsupported asic type: %d!\n", adev->asic_type);
 712		break;
 713	}
 714}
 715
 716void soc15_set_virt_ops(struct amdgpu_device *adev)
 717{
 718	adev->virt.ops = &xgpu_ai_virt_ops;
 719
 720	/* init soc15 reg base early enough so we can
 721	 * request request full access for sriov before
 722	 * set_ip_blocks. */
 723	soc15_reg_base_init(adev);
 724}
 725
 726int soc15_set_ip_blocks(struct amdgpu_device *adev)
 727{
 728	/* for bare metal case */
 729	if (!amdgpu_sriov_vf(adev))
 730		soc15_reg_base_init(adev);
 731
 732	if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS)
 733		adev->gmc.xgmi.supported = true;
 734
 735	if (adev->flags & AMD_IS_APU) {
 736		adev->nbio.funcs = &nbio_v7_0_funcs;
 737		adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg;
 738	} else if (adev->asic_type == CHIP_VEGA20 ||
 739		   adev->asic_type == CHIP_ARCTURUS) {
 740		adev->nbio.funcs = &nbio_v7_4_funcs;
 741		adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg;
 742	} else {
 743		adev->nbio.funcs = &nbio_v6_1_funcs;
 744		adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg;
 745	}
 746
 747	if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS)
 748		adev->df.funcs = &df_v3_6_funcs;
 749	else
 750		adev->df.funcs = &df_v1_7_funcs;
 751
 752	adev->rev_id = soc15_get_rev_id(adev);
 
 
 
 
 753
 754	switch (adev->asic_type) {
 755	case CHIP_VEGA10:
 756	case CHIP_VEGA12:
 757	case CHIP_VEGA20:
 758		amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
 759		amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
 760
 761		/* For Vega10 SR-IOV, PSP need to be initialized before IH */
 762		if (amdgpu_sriov_vf(adev)) {
 763			if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
 764				if (adev->asic_type == CHIP_VEGA20)
 765					amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
 766				else
 767					amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
 768			}
 769			amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
 770		} else {
 771			amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
 772			if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
 773				if (adev->asic_type == CHIP_VEGA20)
 774					amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
 775				else
 776					amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
 777			}
 778		}
 779		amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
 780		amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
 781		if (is_support_sw_smu(adev)) {
 782			if (!amdgpu_sriov_vf(adev))
 783				amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
 784		} else {
 785			amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
 786		}
 787		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
 788			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
 789#if defined(CONFIG_DRM_AMD_DC)
 790		else if (amdgpu_device_has_dc_support(adev))
 791			amdgpu_device_ip_block_add(adev, &dm_ip_block);
 792#endif
 793		if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev))) {
 794			amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
 795			amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
 796		}
 797		break;
 798	case CHIP_RAVEN:
 799		amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
 800		amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
 801		amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
 802		if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
 803			amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
 804		amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
 805		amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
 806		amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
 807		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
 808			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
 809#if defined(CONFIG_DRM_AMD_DC)
 810		else if (amdgpu_device_has_dc_support(adev))
 811			amdgpu_device_ip_block_add(adev, &dm_ip_block);
 812#endif
 813		amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
 814		break;
 815	case CHIP_ARCTURUS:
 816		amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
 817		amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
 818
 819		if (amdgpu_sriov_vf(adev)) {
 820			if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
 821				amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
 822			amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
 823		} else {
 824			amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
 825			if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
 826				amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
 827		}
 828
 829		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
 830			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
 831		amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
 832		amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
 833		amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
 834
 835		if (amdgpu_sriov_vf(adev)) {
 836			if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
 837				amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
 838		} else {
 839			amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
 840		}
 841		if (!amdgpu_sriov_vf(adev))
 842			amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block);
 843		break;
 844	case CHIP_RENOIR:
 845		amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
 846		amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
 847		amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
 848		if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
 849			amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block);
 850		amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block);
 
 851		amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
 852		amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
 853		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
 854			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
 855#if defined(CONFIG_DRM_AMD_DC)
 856                else if (amdgpu_device_has_dc_support(adev))
 857                        amdgpu_device_ip_block_add(adev, &dm_ip_block);
 858#endif
 859		amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
 860		amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
 861		break;
 862	default:
 863		return -EINVAL;
 864	}
 865
 866	return 0;
 867}
 868
 869static void soc15_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
 870{
 871	adev->nbio.funcs->hdp_flush(adev, ring);
 872}
 873
 874static void soc15_invalidate_hdp(struct amdgpu_device *adev,
 875				 struct amdgpu_ring *ring)
 876{
 877	if (!ring || !ring->funcs->emit_wreg)
 878		WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
 879	else
 880		amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
 881			HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
 882}
 883
 884static bool soc15_need_full_reset(struct amdgpu_device *adev)
 885{
 886	/* change this when we implement soft reset */
 887	return true;
 888}
 889
 890static void vega20_reset_hdp_ras_error_count(struct amdgpu_device *adev)
 891{
 892	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__HDP))
 893		return;
 894	/*read back hdp ras counter to reset it to 0 */
 895	RREG32_SOC15(HDP, 0, mmHDP_EDC_CNT);
 896}
 897
 898static void soc15_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
 899				 uint64_t *count1)
 900{
 901	uint32_t perfctr = 0;
 902	uint64_t cnt0_of, cnt1_of;
 903	int tmp;
 904
 905	/* This reports 0 on APUs, so return to avoid writing/reading registers
 906	 * that may or may not be different from their GPU counterparts
 907	 */
 908	if (adev->flags & AMD_IS_APU)
 909		return;
 910
 911	/* Set the 2 events that we wish to watch, defined above */
 912	/* Reg 40 is # received msgs */
 913	/* Reg 104 is # of posted requests sent */
 914	perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40);
 915	perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104);
 916
 917	/* Write to enable desired perf counters */
 918	WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK, perfctr);
 919	/* Zero out and enable the perf counters
 920	 * Write 0x5:
 921	 * Bit 0 = Start all counters(1)
 922	 * Bit 2 = Global counter reset enable(1)
 923	 */
 924	WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000005);
 925
 926	msleep(1000);
 927
 928	/* Load the shadow and disable the perf counters
 929	 * Write 0x2:
 930	 * Bit 0 = Stop counters(0)
 931	 * Bit 1 = Load the shadow counters(1)
 932	 */
 933	WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000002);
 934
 935	/* Read register values to get any >32bit overflow */
 936	tmp = RREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK);
 937	cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER);
 938	cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER);
 939
 940	/* Get the values and add the overflow */
 941	*count0 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32);
 942	*count1 = RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32);
 943}
 944
 945static void vega20_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
 946				 uint64_t *count1)
 947{
 948	uint32_t perfctr = 0;
 949	uint64_t cnt0_of, cnt1_of;
 950	int tmp;
 951
 952	/* This reports 0 on APUs, so return to avoid writing/reading registers
 953	 * that may or may not be different from their GPU counterparts
 954	 */
 955	if (adev->flags & AMD_IS_APU)
 956		return;
 957
 958	/* Set the 2 events that we wish to watch, defined above */
 959	/* Reg 40 is # received msgs */
 960	/* Reg 108 is # of posted requests sent on VG20 */
 961	perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK3,
 962				EVENT0_SEL, 40);
 963	perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK3,
 964				EVENT1_SEL, 108);
 965
 966	/* Write to enable desired perf counters */
 967	WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK3, perfctr);
 968	/* Zero out and enable the perf counters
 969	 * Write 0x5:
 970	 * Bit 0 = Start all counters(1)
 971	 * Bit 2 = Global counter reset enable(1)
 972	 */
 973	WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000005);
 974
 975	msleep(1000);
 976
 977	/* Load the shadow and disable the perf counters
 978	 * Write 0x2:
 979	 * Bit 0 = Stop counters(0)
 980	 * Bit 1 = Load the shadow counters(1)
 981	 */
 982	WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000002);
 983
 984	/* Read register values to get any >32bit overflow */
 985	tmp = RREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK3);
 986	cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK3, COUNTER0_UPPER);
 987	cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK3, COUNTER1_UPPER);
 988
 989	/* Get the values and add the overflow */
 990	*count0 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK3) | (cnt0_of << 32);
 991	*count1 = RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK3) | (cnt1_of << 32);
 992}
 993
 994static bool soc15_need_reset_on_init(struct amdgpu_device *adev)
 995{
 996	u32 sol_reg;
 997
 998	/* Just return false for soc15 GPUs.  Reset does not seem to
 999	 * be necessary.
1000	 */
1001	if (!amdgpu_passthrough(adev))
1002		return false;
1003
1004	if (adev->flags & AMD_IS_APU)
1005		return false;
1006
1007	/* Check sOS sign of life register to confirm sys driver and sOS
1008	 * are already been loaded.
1009	 */
1010	sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
1011	if (sol_reg)
1012		return true;
1013
1014	return false;
1015}
1016
1017static uint64_t soc15_get_pcie_replay_count(struct amdgpu_device *adev)
1018{
1019	uint64_t nak_r, nak_g;
1020
1021	/* Get the number of NAKs received and generated */
1022	nak_r = RREG32_PCIE(smnPCIE_RX_NUM_NAK);
1023	nak_g = RREG32_PCIE(smnPCIE_RX_NUM_NAK_GENERATED);
1024
1025	/* Add the total number of NAKs, i.e the number of replays */
1026	return (nak_r + nak_g);
1027}
1028
1029static const struct amdgpu_asic_funcs soc15_asic_funcs =
1030{
1031	.read_disabled_bios = &soc15_read_disabled_bios,
1032	.read_bios_from_rom = &soc15_read_bios_from_rom,
1033	.read_register = &soc15_read_register,
1034	.reset = &soc15_asic_reset,
1035	.reset_method = &soc15_asic_reset_method,
1036	.set_vga_state = &soc15_vga_set_state,
1037	.get_xclk = &soc15_get_xclk,
1038	.set_uvd_clocks = &soc15_set_uvd_clocks,
1039	.set_vce_clocks = &soc15_set_vce_clocks,
1040	.get_config_memsize = &soc15_get_config_memsize,
1041	.flush_hdp = &soc15_flush_hdp,
1042	.invalidate_hdp = &soc15_invalidate_hdp,
1043	.need_full_reset = &soc15_need_full_reset,
1044	.init_doorbell_index = &vega10_doorbell_index_init,
1045	.get_pcie_usage = &soc15_get_pcie_usage,
1046	.need_reset_on_init = &soc15_need_reset_on_init,
1047	.get_pcie_replay_count = &soc15_get_pcie_replay_count,
1048	.supports_baco = &soc15_supports_baco,
1049};
1050
1051static const struct amdgpu_asic_funcs vega20_asic_funcs =
1052{
1053	.read_disabled_bios = &soc15_read_disabled_bios,
1054	.read_bios_from_rom = &soc15_read_bios_from_rom,
1055	.read_register = &soc15_read_register,
1056	.reset = &soc15_asic_reset,
1057	.reset_method = &soc15_asic_reset_method,
1058	.set_vga_state = &soc15_vga_set_state,
1059	.get_xclk = &soc15_get_xclk,
1060	.set_uvd_clocks = &soc15_set_uvd_clocks,
1061	.set_vce_clocks = &soc15_set_vce_clocks,
1062	.get_config_memsize = &soc15_get_config_memsize,
1063	.flush_hdp = &soc15_flush_hdp,
1064	.invalidate_hdp = &soc15_invalidate_hdp,
1065	.reset_hdp_ras_error_count = &vega20_reset_hdp_ras_error_count,
1066	.need_full_reset = &soc15_need_full_reset,
1067	.init_doorbell_index = &vega20_doorbell_index_init,
1068	.get_pcie_usage = &vega20_get_pcie_usage,
1069	.need_reset_on_init = &soc15_need_reset_on_init,
1070	.get_pcie_replay_count = &soc15_get_pcie_replay_count,
1071	.supports_baco = &soc15_supports_baco,
1072};
1073
1074static int soc15_common_early_init(void *handle)
1075{
1076#define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
1077	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1078
1079	adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
1080	adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
1081	adev->smc_rreg = NULL;
1082	adev->smc_wreg = NULL;
1083	adev->pcie_rreg = &soc15_pcie_rreg;
1084	adev->pcie_wreg = &soc15_pcie_wreg;
1085	adev->pcie_rreg64 = &soc15_pcie_rreg64;
1086	adev->pcie_wreg64 = &soc15_pcie_wreg64;
1087	adev->uvd_ctx_rreg = &soc15_uvd_ctx_rreg;
1088	adev->uvd_ctx_wreg = &soc15_uvd_ctx_wreg;
1089	adev->didt_rreg = &soc15_didt_rreg;
1090	adev->didt_wreg = &soc15_didt_wreg;
1091	adev->gc_cac_rreg = &soc15_gc_cac_rreg;
1092	adev->gc_cac_wreg = &soc15_gc_cac_wreg;
1093	adev->se_cac_rreg = &soc15_se_cac_rreg;
1094	adev->se_cac_wreg = &soc15_se_cac_wreg;
1095
1096
1097	adev->external_rev_id = 0xFF;
1098	switch (adev->asic_type) {
1099	case CHIP_VEGA10:
1100		adev->asic_funcs = &soc15_asic_funcs;
1101		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1102			AMD_CG_SUPPORT_GFX_MGLS |
1103			AMD_CG_SUPPORT_GFX_RLC_LS |
1104			AMD_CG_SUPPORT_GFX_CP_LS |
1105			AMD_CG_SUPPORT_GFX_3D_CGCG |
1106			AMD_CG_SUPPORT_GFX_3D_CGLS |
1107			AMD_CG_SUPPORT_GFX_CGCG |
1108			AMD_CG_SUPPORT_GFX_CGLS |
1109			AMD_CG_SUPPORT_BIF_MGCG |
1110			AMD_CG_SUPPORT_BIF_LS |
1111			AMD_CG_SUPPORT_HDP_LS |
1112			AMD_CG_SUPPORT_DRM_MGCG |
1113			AMD_CG_SUPPORT_DRM_LS |
1114			AMD_CG_SUPPORT_ROM_MGCG |
1115			AMD_CG_SUPPORT_DF_MGCG |
1116			AMD_CG_SUPPORT_SDMA_MGCG |
1117			AMD_CG_SUPPORT_SDMA_LS |
1118			AMD_CG_SUPPORT_MC_MGCG |
1119			AMD_CG_SUPPORT_MC_LS;
1120		adev->pg_flags = 0;
1121		adev->external_rev_id = 0x1;
1122		break;
1123	case CHIP_VEGA12:
1124		adev->asic_funcs = &soc15_asic_funcs;
1125		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1126			AMD_CG_SUPPORT_GFX_MGLS |
1127			AMD_CG_SUPPORT_GFX_CGCG |
1128			AMD_CG_SUPPORT_GFX_CGLS |
1129			AMD_CG_SUPPORT_GFX_3D_CGCG |
1130			AMD_CG_SUPPORT_GFX_3D_CGLS |
1131			AMD_CG_SUPPORT_GFX_CP_LS |
1132			AMD_CG_SUPPORT_MC_LS |
1133			AMD_CG_SUPPORT_MC_MGCG |
1134			AMD_CG_SUPPORT_SDMA_MGCG |
1135			AMD_CG_SUPPORT_SDMA_LS |
1136			AMD_CG_SUPPORT_BIF_MGCG |
1137			AMD_CG_SUPPORT_BIF_LS |
1138			AMD_CG_SUPPORT_HDP_MGCG |
1139			AMD_CG_SUPPORT_HDP_LS |
1140			AMD_CG_SUPPORT_ROM_MGCG |
1141			AMD_CG_SUPPORT_VCE_MGCG |
1142			AMD_CG_SUPPORT_UVD_MGCG;
1143		adev->pg_flags = 0;
1144		adev->external_rev_id = adev->rev_id + 0x14;
1145		break;
1146	case CHIP_VEGA20:
1147		adev->asic_funcs = &vega20_asic_funcs;
1148		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1149			AMD_CG_SUPPORT_GFX_MGLS |
1150			AMD_CG_SUPPORT_GFX_CGCG |
1151			AMD_CG_SUPPORT_GFX_CGLS |
1152			AMD_CG_SUPPORT_GFX_3D_CGCG |
1153			AMD_CG_SUPPORT_GFX_3D_CGLS |
1154			AMD_CG_SUPPORT_GFX_CP_LS |
1155			AMD_CG_SUPPORT_MC_LS |
1156			AMD_CG_SUPPORT_MC_MGCG |
1157			AMD_CG_SUPPORT_SDMA_MGCG |
1158			AMD_CG_SUPPORT_SDMA_LS |
1159			AMD_CG_SUPPORT_BIF_MGCG |
1160			AMD_CG_SUPPORT_BIF_LS |
1161			AMD_CG_SUPPORT_HDP_MGCG |
1162			AMD_CG_SUPPORT_HDP_LS |
1163			AMD_CG_SUPPORT_ROM_MGCG |
1164			AMD_CG_SUPPORT_VCE_MGCG |
1165			AMD_CG_SUPPORT_UVD_MGCG;
1166		adev->pg_flags = 0;
1167		adev->external_rev_id = adev->rev_id + 0x28;
1168		break;
1169	case CHIP_RAVEN:
1170		adev->asic_funcs = &soc15_asic_funcs;
1171		if (adev->pdev->device == 0x15dd)
1172			adev->apu_flags |= AMD_APU_IS_RAVEN;
1173		if (adev->pdev->device == 0x15d8)
1174			adev->apu_flags |= AMD_APU_IS_PICASSO;
1175		if (adev->rev_id >= 0x8)
1176			adev->apu_flags |= AMD_APU_IS_RAVEN2;
1177
1178		if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1179			adev->external_rev_id = adev->rev_id + 0x79;
1180		else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1181			adev->external_rev_id = adev->rev_id + 0x41;
1182		else if (adev->rev_id == 1)
1183			adev->external_rev_id = adev->rev_id + 0x20;
1184		else
1185			adev->external_rev_id = adev->rev_id + 0x01;
1186
1187		if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
1188			adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1189				AMD_CG_SUPPORT_GFX_MGLS |
1190				AMD_CG_SUPPORT_GFX_CP_LS |
1191				AMD_CG_SUPPORT_GFX_3D_CGCG |
1192				AMD_CG_SUPPORT_GFX_3D_CGLS |
1193				AMD_CG_SUPPORT_GFX_CGCG |
1194				AMD_CG_SUPPORT_GFX_CGLS |
1195				AMD_CG_SUPPORT_BIF_LS |
1196				AMD_CG_SUPPORT_HDP_LS |
1197				AMD_CG_SUPPORT_ROM_MGCG |
1198				AMD_CG_SUPPORT_MC_MGCG |
1199				AMD_CG_SUPPORT_MC_LS |
1200				AMD_CG_SUPPORT_SDMA_MGCG |
1201				AMD_CG_SUPPORT_SDMA_LS |
1202				AMD_CG_SUPPORT_VCN_MGCG;
1203
1204			adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
1205		} else if (adev->apu_flags & AMD_APU_IS_PICASSO) {
1206			adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1207				AMD_CG_SUPPORT_GFX_MGLS |
1208				AMD_CG_SUPPORT_GFX_CP_LS |
1209				AMD_CG_SUPPORT_GFX_3D_CGCG |
1210				AMD_CG_SUPPORT_GFX_3D_CGLS |
1211				AMD_CG_SUPPORT_GFX_CGCG |
1212				AMD_CG_SUPPORT_GFX_CGLS |
1213				AMD_CG_SUPPORT_BIF_LS |
1214				AMD_CG_SUPPORT_HDP_LS |
1215				AMD_CG_SUPPORT_ROM_MGCG |
1216				AMD_CG_SUPPORT_MC_MGCG |
1217				AMD_CG_SUPPORT_MC_LS |
1218				AMD_CG_SUPPORT_SDMA_MGCG |
1219				AMD_CG_SUPPORT_SDMA_LS;
1220
1221			adev->pg_flags = AMD_PG_SUPPORT_SDMA |
1222				AMD_PG_SUPPORT_MMHUB |
1223				AMD_PG_SUPPORT_VCN |
1224				AMD_PG_SUPPORT_VCN_DPG;
1225		} else {
1226			adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1227				AMD_CG_SUPPORT_GFX_MGLS |
1228				AMD_CG_SUPPORT_GFX_RLC_LS |
1229				AMD_CG_SUPPORT_GFX_CP_LS |
1230				AMD_CG_SUPPORT_GFX_3D_CGCG |
1231				AMD_CG_SUPPORT_GFX_3D_CGLS |
1232				AMD_CG_SUPPORT_GFX_CGCG |
1233				AMD_CG_SUPPORT_GFX_CGLS |
1234				AMD_CG_SUPPORT_BIF_MGCG |
1235				AMD_CG_SUPPORT_BIF_LS |
1236				AMD_CG_SUPPORT_HDP_MGCG |
1237				AMD_CG_SUPPORT_HDP_LS |
1238				AMD_CG_SUPPORT_DRM_MGCG |
1239				AMD_CG_SUPPORT_DRM_LS |
1240				AMD_CG_SUPPORT_ROM_MGCG |
1241				AMD_CG_SUPPORT_MC_MGCG |
1242				AMD_CG_SUPPORT_MC_LS |
1243				AMD_CG_SUPPORT_SDMA_MGCG |
1244				AMD_CG_SUPPORT_SDMA_LS |
1245				AMD_CG_SUPPORT_VCN_MGCG;
1246
1247			adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
1248		}
1249		break;
1250	case CHIP_ARCTURUS:
1251		adev->asic_funcs = &vega20_asic_funcs;
1252		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1253			AMD_CG_SUPPORT_GFX_MGLS |
1254			AMD_CG_SUPPORT_GFX_CGCG |
1255			AMD_CG_SUPPORT_GFX_CGLS |
1256			AMD_CG_SUPPORT_GFX_CP_LS |
1257			AMD_CG_SUPPORT_HDP_MGCG |
1258			AMD_CG_SUPPORT_HDP_LS |
1259			AMD_CG_SUPPORT_SDMA_MGCG |
1260			AMD_CG_SUPPORT_SDMA_LS |
1261			AMD_CG_SUPPORT_MC_MGCG |
1262			AMD_CG_SUPPORT_MC_LS |
1263			AMD_CG_SUPPORT_IH_CG |
1264			AMD_CG_SUPPORT_VCN_MGCG |
1265			AMD_CG_SUPPORT_JPEG_MGCG;
1266		adev->pg_flags = AMD_PG_SUPPORT_VCN | AMD_PG_SUPPORT_VCN_DPG;
1267		adev->external_rev_id = adev->rev_id + 0x32;
1268		break;
1269	case CHIP_RENOIR:
1270		adev->asic_funcs = &soc15_asic_funcs;
1271		adev->apu_flags |= AMD_APU_IS_RENOIR;
1272		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1273				 AMD_CG_SUPPORT_GFX_MGLS |
1274				 AMD_CG_SUPPORT_GFX_3D_CGCG |
1275				 AMD_CG_SUPPORT_GFX_3D_CGLS |
1276				 AMD_CG_SUPPORT_GFX_CGCG |
1277				 AMD_CG_SUPPORT_GFX_CGLS |
1278				 AMD_CG_SUPPORT_GFX_CP_LS |
1279				 AMD_CG_SUPPORT_MC_MGCG |
1280				 AMD_CG_SUPPORT_MC_LS |
1281				 AMD_CG_SUPPORT_SDMA_MGCG |
1282				 AMD_CG_SUPPORT_SDMA_LS |
1283				 AMD_CG_SUPPORT_BIF_LS |
1284				 AMD_CG_SUPPORT_HDP_LS |
1285				 AMD_CG_SUPPORT_ROM_MGCG |
1286				 AMD_CG_SUPPORT_VCN_MGCG |
1287				 AMD_CG_SUPPORT_JPEG_MGCG |
1288				 AMD_CG_SUPPORT_IH_CG |
1289				 AMD_CG_SUPPORT_ATHUB_LS |
1290				 AMD_CG_SUPPORT_ATHUB_MGCG |
1291				 AMD_CG_SUPPORT_DF_MGCG;
1292		adev->pg_flags = AMD_PG_SUPPORT_SDMA |
1293				 AMD_PG_SUPPORT_VCN |
1294				 AMD_PG_SUPPORT_JPEG |
1295				 AMD_PG_SUPPORT_VCN_DPG;
1296		adev->external_rev_id = adev->rev_id + 0x91;
1297		break;
1298	default:
1299		/* FIXME: not supported yet */
1300		return -EINVAL;
1301	}
1302
1303	if (amdgpu_sriov_vf(adev)) {
1304		amdgpu_virt_init_setting(adev);
1305		xgpu_ai_mailbox_set_irq_funcs(adev);
1306	}
1307
1308	return 0;
1309}
1310
1311static int soc15_common_late_init(void *handle)
1312{
1313	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1314	int r = 0;
1315
1316	if (amdgpu_sriov_vf(adev))
1317		xgpu_ai_mailbox_get_irq(adev);
1318
1319	if (adev->asic_funcs &&
1320	    adev->asic_funcs->reset_hdp_ras_error_count)
1321		adev->asic_funcs->reset_hdp_ras_error_count(adev);
1322
1323	if (adev->nbio.funcs->ras_late_init)
1324		r = adev->nbio.funcs->ras_late_init(adev);
1325
1326	return r;
1327}
1328
1329static int soc15_common_sw_init(void *handle)
1330{
1331	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1332
1333	if (amdgpu_sriov_vf(adev))
1334		xgpu_ai_mailbox_add_irq_id(adev);
1335
1336	adev->df.funcs->sw_init(adev);
1337
1338	return 0;
1339}
1340
1341static int soc15_common_sw_fini(void *handle)
1342{
1343	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1344
1345	amdgpu_nbio_ras_fini(adev);
1346	adev->df.funcs->sw_fini(adev);
1347	return 0;
1348}
1349
1350static void soc15_doorbell_range_init(struct amdgpu_device *adev)
1351{
1352	int i;
1353	struct amdgpu_ring *ring;
1354
1355	/* sdma/ih doorbell range are programed by hypervisor */
1356	if (!amdgpu_sriov_vf(adev)) {
1357		for (i = 0; i < adev->sdma.num_instances; i++) {
1358			ring = &adev->sdma.instance[i].ring;
1359			adev->nbio.funcs->sdma_doorbell_range(adev, i,
1360				ring->use_doorbell, ring->doorbell_index,
1361				adev->doorbell_index.sdma_doorbell_range);
1362		}
1363
1364		adev->nbio.funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
1365						adev->irq.ih.doorbell_index);
1366	}
1367}
1368
1369static int soc15_common_hw_init(void *handle)
1370{
1371	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1372
1373	/* enable pcie gen2/3 link */
1374	soc15_pcie_gen3_enable(adev);
1375	/* enable aspm */
1376	soc15_program_aspm(adev);
1377	/* setup nbio registers */
1378	adev->nbio.funcs->init_registers(adev);
1379	/* remap HDP registers to a hole in mmio space,
1380	 * for the purpose of expose those registers
1381	 * to process space
1382	 */
1383	if (adev->nbio.funcs->remap_hdp_registers)
1384		adev->nbio.funcs->remap_hdp_registers(adev);
1385
1386	/* enable the doorbell aperture */
1387	soc15_enable_doorbell_aperture(adev, true);
1388	/* HW doorbell routing policy: doorbell writing not
1389	 * in SDMA/IH/MM/ACV range will be routed to CP. So
1390	 * we need to init SDMA/IH/MM/ACV doorbell range prior
1391	 * to CP ip block init and ring test.
1392	 */
1393	soc15_doorbell_range_init(adev);
1394
1395	return 0;
1396}
1397
1398static int soc15_common_hw_fini(void *handle)
1399{
1400	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1401
1402	/* disable the doorbell aperture */
1403	soc15_enable_doorbell_aperture(adev, false);
1404	if (amdgpu_sriov_vf(adev))
1405		xgpu_ai_mailbox_put_irq(adev);
1406
1407	if (adev->nbio.ras_if &&
1408	    amdgpu_ras_is_supported(adev, adev->nbio.ras_if->block)) {
1409		if (adev->nbio.funcs->init_ras_controller_interrupt)
1410			amdgpu_irq_put(adev, &adev->nbio.ras_controller_irq, 0);
1411		if (adev->nbio.funcs->init_ras_err_event_athub_interrupt)
1412			amdgpu_irq_put(adev, &adev->nbio.ras_err_event_athub_irq, 0);
1413	}
1414
1415	return 0;
1416}
1417
1418static int soc15_common_suspend(void *handle)
1419{
1420	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1421
1422	return soc15_common_hw_fini(adev);
1423}
1424
1425static int soc15_common_resume(void *handle)
1426{
1427	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1428
1429	return soc15_common_hw_init(adev);
1430}
1431
1432static bool soc15_common_is_idle(void *handle)
1433{
1434	return true;
1435}
1436
1437static int soc15_common_wait_for_idle(void *handle)
1438{
1439	return 0;
1440}
1441
1442static int soc15_common_soft_reset(void *handle)
1443{
1444	return 0;
1445}
1446
1447static void soc15_update_hdp_light_sleep(struct amdgpu_device *adev, bool enable)
1448{
1449	uint32_t def, data;
1450
1451	if (adev->asic_type == CHIP_VEGA20 ||
1452		adev->asic_type == CHIP_ARCTURUS) {
1453		def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL));
1454
1455		if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
1456			data |= HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK |
1457				HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK |
1458				HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK |
1459				HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK;
1460		else
1461			data &= ~(HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK |
1462				HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK |
1463				HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK |
1464				HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK);
1465
1466		if (def != data)
1467			WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL), data);
1468	} else {
1469		def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
1470
1471		if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
1472			data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1473		else
1474			data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1475
1476		if (def != data)
1477			WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS), data);
1478	}
1479}
1480
1481static void soc15_update_drm_clock_gating(struct amdgpu_device *adev, bool enable)
1482{
1483	uint32_t def, data;
1484
1485	def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
1486
1487	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_MGCG))
1488		data &= ~(0x01000000 |
1489			  0x02000000 |
1490			  0x04000000 |
1491			  0x08000000 |
1492			  0x10000000 |
1493			  0x20000000 |
1494			  0x40000000 |
1495			  0x80000000);
1496	else
1497		data |= (0x01000000 |
1498			 0x02000000 |
1499			 0x04000000 |
1500			 0x08000000 |
1501			 0x10000000 |
1502			 0x20000000 |
1503			 0x40000000 |
1504			 0x80000000);
1505
1506	if (def != data)
1507		WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0), data);
1508}
1509
1510static void soc15_update_drm_light_sleep(struct amdgpu_device *adev, bool enable)
1511{
1512	uint32_t def, data;
1513
1514	def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL));
1515
1516	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS))
1517		data |= 1;
1518	else
1519		data &= ~1;
1520
1521	if (def != data)
1522		WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL), data);
1523}
1524
1525static void soc15_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,
1526						       bool enable)
1527{
1528	uint32_t def, data;
1529
1530	def = data = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0));
1531
1532	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG))
1533		data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1534			CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK);
1535	else
1536		data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1537			CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK;
1538
1539	if (def != data)
1540		WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0), data);
1541}
1542
1543static int soc15_common_set_clockgating_state(void *handle,
1544					    enum amd_clockgating_state state)
1545{
1546	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1547
1548	if (amdgpu_sriov_vf(adev))
1549		return 0;
1550
1551	switch (adev->asic_type) {
1552	case CHIP_VEGA10:
1553	case CHIP_VEGA12:
1554	case CHIP_VEGA20:
1555		adev->nbio.funcs->update_medium_grain_clock_gating(adev,
1556				state == AMD_CG_STATE_GATE);
1557		adev->nbio.funcs->update_medium_grain_light_sleep(adev,
1558				state == AMD_CG_STATE_GATE);
1559		soc15_update_hdp_light_sleep(adev,
1560				state == AMD_CG_STATE_GATE);
1561		soc15_update_drm_clock_gating(adev,
1562				state == AMD_CG_STATE_GATE);
1563		soc15_update_drm_light_sleep(adev,
1564				state == AMD_CG_STATE_GATE);
1565		soc15_update_rom_medium_grain_clock_gating(adev,
1566				state == AMD_CG_STATE_GATE);
1567		adev->df.funcs->update_medium_grain_clock_gating(adev,
1568				state == AMD_CG_STATE_GATE);
1569		break;
1570	case CHIP_RAVEN:
1571	case CHIP_RENOIR:
1572		adev->nbio.funcs->update_medium_grain_clock_gating(adev,
1573				state == AMD_CG_STATE_GATE);
1574		adev->nbio.funcs->update_medium_grain_light_sleep(adev,
1575				state == AMD_CG_STATE_GATE);
1576		soc15_update_hdp_light_sleep(adev,
1577				state == AMD_CG_STATE_GATE);
1578		soc15_update_drm_clock_gating(adev,
1579				state == AMD_CG_STATE_GATE);
1580		soc15_update_drm_light_sleep(adev,
1581				state == AMD_CG_STATE_GATE);
1582		soc15_update_rom_medium_grain_clock_gating(adev,
1583				state == AMD_CG_STATE_GATE);
1584		break;
1585	case CHIP_ARCTURUS:
1586		soc15_update_hdp_light_sleep(adev,
1587				state == AMD_CG_STATE_GATE);
1588		break;
1589	default:
1590		break;
1591	}
1592	return 0;
1593}
1594
1595static void soc15_common_get_clockgating_state(void *handle, u32 *flags)
1596{
1597	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1598	int data;
1599
1600	if (amdgpu_sriov_vf(adev))
1601		*flags = 0;
1602
1603	adev->nbio.funcs->get_clockgating_state(adev, flags);
1604
1605	/* AMD_CG_SUPPORT_HDP_LS */
1606	data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
1607	if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK)
1608		*flags |= AMD_CG_SUPPORT_HDP_LS;
1609
1610	/* AMD_CG_SUPPORT_DRM_MGCG */
1611	data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
1612	if (!(data & 0x01000000))
1613		*flags |= AMD_CG_SUPPORT_DRM_MGCG;
1614
1615	/* AMD_CG_SUPPORT_DRM_LS */
1616	data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL));
1617	if (data & 0x1)
1618		*flags |= AMD_CG_SUPPORT_DRM_LS;
1619
1620	/* AMD_CG_SUPPORT_ROM_MGCG */
1621	data = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0));
1622	if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK))
1623		*flags |= AMD_CG_SUPPORT_ROM_MGCG;
1624
1625	adev->df.funcs->get_clockgating_state(adev, flags);
1626}
1627
1628static int soc15_common_set_powergating_state(void *handle,
1629					    enum amd_powergating_state state)
1630{
1631	/* todo */
1632	return 0;
1633}
1634
1635const struct amd_ip_funcs soc15_common_ip_funcs = {
1636	.name = "soc15_common",
1637	.early_init = soc15_common_early_init,
1638	.late_init = soc15_common_late_init,
1639	.sw_init = soc15_common_sw_init,
1640	.sw_fini = soc15_common_sw_fini,
1641	.hw_init = soc15_common_hw_init,
1642	.hw_fini = soc15_common_hw_fini,
1643	.suspend = soc15_common_suspend,
1644	.resume = soc15_common_resume,
1645	.is_idle = soc15_common_is_idle,
1646	.wait_for_idle = soc15_common_wait_for_idle,
1647	.soft_reset = soc15_common_soft_reset,
1648	.set_clockgating_state = soc15_common_set_clockgating_state,
1649	.set_powergating_state = soc15_common_set_powergating_state,
1650	.get_clockgating_state= soc15_common_get_clockgating_state,
1651};
v5.4
   1/*
   2 * Copyright 2016 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23#include <linux/firmware.h>
  24#include <linux/slab.h>
  25#include <linux/module.h>
  26#include <linux/pci.h>
  27
  28#include "amdgpu.h"
  29#include "amdgpu_atombios.h"
  30#include "amdgpu_ih.h"
  31#include "amdgpu_uvd.h"
  32#include "amdgpu_vce.h"
  33#include "amdgpu_ucode.h"
  34#include "amdgpu_psp.h"
  35#include "atom.h"
  36#include "amd_pcie.h"
  37
  38#include "uvd/uvd_7_0_offset.h"
  39#include "gc/gc_9_0_offset.h"
  40#include "gc/gc_9_0_sh_mask.h"
  41#include "sdma0/sdma0_4_0_offset.h"
  42#include "sdma1/sdma1_4_0_offset.h"
  43#include "hdp/hdp_4_0_offset.h"
  44#include "hdp/hdp_4_0_sh_mask.h"
  45#include "smuio/smuio_9_0_offset.h"
  46#include "smuio/smuio_9_0_sh_mask.h"
  47#include "nbio/nbio_7_0_default.h"
  48#include "nbio/nbio_7_0_offset.h"
  49#include "nbio/nbio_7_0_sh_mask.h"
  50#include "nbio/nbio_7_0_smn.h"
  51#include "mp/mp_9_0_offset.h"
  52
  53#include "soc15.h"
  54#include "soc15_common.h"
  55#include "gfx_v9_0.h"
  56#include "gmc_v9_0.h"
  57#include "gfxhub_v1_0.h"
  58#include "mmhub_v1_0.h"
  59#include "df_v1_7.h"
  60#include "df_v3_6.h"
 
 
 
  61#include "vega10_ih.h"
  62#include "sdma_v4_0.h"
  63#include "uvd_v7_0.h"
  64#include "vce_v4_0.h"
  65#include "vcn_v1_0.h"
  66#include "vcn_v2_0.h"
 
  67#include "vcn_v2_5.h"
 
  68#include "dce_virtual.h"
  69#include "mxgpu_ai.h"
  70#include "amdgpu_smu.h"
  71#include "amdgpu_ras.h"
  72#include "amdgpu_xgmi.h"
  73#include <uapi/linux/kfd_ioctl.h>
  74
  75#define mmMP0_MISC_CGTT_CTRL0                                                                   0x01b9
  76#define mmMP0_MISC_CGTT_CTRL0_BASE_IDX                                                          0
  77#define mmMP0_MISC_LIGHT_SLEEP_CTRL                                                             0x01ba
  78#define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX                                                    0
  79
  80/* for Vega20 register name change */
  81#define mmHDP_MEM_POWER_CTRL	0x00d4
  82#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK	0x00000001L
  83#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK	0x00000002L
  84#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK	0x00010000L
  85#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK		0x00020000L
  86#define mmHDP_MEM_POWER_CTRL_BASE_IDX	0
 
 
 
 
 
 
 
  87/*
  88 * Indirect registers accessor
  89 */
  90static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg)
  91{
  92	unsigned long flags, address, data;
  93	u32 r;
  94	address = adev->nbio_funcs->get_pcie_index_offset(adev);
  95	data = adev->nbio_funcs->get_pcie_data_offset(adev);
  96
  97	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
  98	WREG32(address, reg);
  99	(void)RREG32(address);
 100	r = RREG32(data);
 101	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 102	return r;
 103}
 104
 105static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
 106{
 107	unsigned long flags, address, data;
 108
 109	address = adev->nbio_funcs->get_pcie_index_offset(adev);
 110	data = adev->nbio_funcs->get_pcie_data_offset(adev);
 111
 112	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
 113	WREG32(address, reg);
 114	(void)RREG32(address);
 115	WREG32(data, v);
 116	(void)RREG32(data);
 117	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 118}
 119
 120static u64 soc15_pcie_rreg64(struct amdgpu_device *adev, u32 reg)
 121{
 122	unsigned long flags, address, data;
 123	u64 r;
 124	address = adev->nbio_funcs->get_pcie_index_offset(adev);
 125	data = adev->nbio_funcs->get_pcie_data_offset(adev);
 126
 127	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
 128	/* read low 32 bit */
 129	WREG32(address, reg);
 130	(void)RREG32(address);
 131	r = RREG32(data);
 132
 133	/* read high 32 bit*/
 134	WREG32(address, reg + 4);
 135	(void)RREG32(address);
 136	r |= ((u64)RREG32(data) << 32);
 137	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 138	return r;
 139}
 140
 141static void soc15_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v)
 142{
 143	unsigned long flags, address, data;
 144
 145	address = adev->nbio_funcs->get_pcie_index_offset(adev);
 146	data = adev->nbio_funcs->get_pcie_data_offset(adev);
 147
 148	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
 149	/* write low 32 bit */
 150	WREG32(address, reg);
 151	(void)RREG32(address);
 152	WREG32(data, (u32)(v & 0xffffffffULL));
 153	(void)RREG32(data);
 154
 155	/* write high 32 bit */
 156	WREG32(address, reg + 4);
 157	(void)RREG32(address);
 158	WREG32(data, (u32)(v >> 32));
 159	(void)RREG32(data);
 160	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 161}
 162
 163static u32 soc15_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
 164{
 165	unsigned long flags, address, data;
 166	u32 r;
 167
 168	address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX);
 169	data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA);
 170
 171	spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
 172	WREG32(address, ((reg) & 0x1ff));
 173	r = RREG32(data);
 174	spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
 175	return r;
 176}
 177
 178static void soc15_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
 179{
 180	unsigned long flags, address, data;
 181
 182	address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX);
 183	data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA);
 184
 185	spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
 186	WREG32(address, ((reg) & 0x1ff));
 187	WREG32(data, (v));
 188	spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
 189}
 190
 191static u32 soc15_didt_rreg(struct amdgpu_device *adev, u32 reg)
 192{
 193	unsigned long flags, address, data;
 194	u32 r;
 195
 196	address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
 197	data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
 198
 199	spin_lock_irqsave(&adev->didt_idx_lock, flags);
 200	WREG32(address, (reg));
 201	r = RREG32(data);
 202	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
 203	return r;
 204}
 205
 206static void soc15_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
 207{
 208	unsigned long flags, address, data;
 209
 210	address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
 211	data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
 212
 213	spin_lock_irqsave(&adev->didt_idx_lock, flags);
 214	WREG32(address, (reg));
 215	WREG32(data, (v));
 216	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
 217}
 218
 219static u32 soc15_gc_cac_rreg(struct amdgpu_device *adev, u32 reg)
 220{
 221	unsigned long flags;
 222	u32 r;
 223
 224	spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
 225	WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg));
 226	r = RREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA);
 227	spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
 228	return r;
 229}
 230
 231static void soc15_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
 232{
 233	unsigned long flags;
 234
 235	spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
 236	WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg));
 237	WREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA, (v));
 238	spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
 239}
 240
 241static u32 soc15_se_cac_rreg(struct amdgpu_device *adev, u32 reg)
 242{
 243	unsigned long flags;
 244	u32 r;
 245
 246	spin_lock_irqsave(&adev->se_cac_idx_lock, flags);
 247	WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg));
 248	r = RREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA);
 249	spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags);
 250	return r;
 251}
 252
 253static void soc15_se_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
 254{
 255	unsigned long flags;
 256
 257	spin_lock_irqsave(&adev->se_cac_idx_lock, flags);
 258	WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg));
 259	WREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA, (v));
 260	spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags);
 261}
 262
 263static u32 soc15_get_config_memsize(struct amdgpu_device *adev)
 264{
 265	return adev->nbio_funcs->get_memsize(adev);
 266}
 267
 268static u32 soc15_get_xclk(struct amdgpu_device *adev)
 269{
 270	return adev->clock.spll.reference_freq;
 
 
 
 
 
 271}
 272
 273
 274void soc15_grbm_select(struct amdgpu_device *adev,
 275		     u32 me, u32 pipe, u32 queue, u32 vmid)
 276{
 277	u32 grbm_gfx_cntl = 0;
 278	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe);
 279	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me);
 280	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
 281	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
 282
 283	WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_CNTL, grbm_gfx_cntl);
 284}
 285
 286static void soc15_vga_set_state(struct amdgpu_device *adev, bool state)
 287{
 288	/* todo */
 289}
 290
 291static bool soc15_read_disabled_bios(struct amdgpu_device *adev)
 292{
 293	/* todo */
 294	return false;
 295}
 296
 297static bool soc15_read_bios_from_rom(struct amdgpu_device *adev,
 298				     u8 *bios, u32 length_bytes)
 299{
 300	u32 *dw_ptr;
 301	u32 i, length_dw;
 
 
 302
 303	if (bios == NULL)
 304		return false;
 305	if (length_bytes == 0)
 306		return false;
 307	/* APU vbios image is part of sbios image */
 308	if (adev->flags & AMD_IS_APU)
 309		return false;
 310
 311	dw_ptr = (u32 *)bios;
 312	length_dw = ALIGN(length_bytes, 4) / 4;
 313
 
 
 
 
 
 
 
 
 
 
 
 
 314	/* set rom index to 0 */
 315	WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0);
 316	/* read out the rom data */
 317	for (i = 0; i < length_dw; i++)
 318		dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA));
 319
 320	return true;
 321}
 322
 323static struct soc15_allowed_register_entry soc15_allowed_read_registers[] = {
 324	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)},
 325	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)},
 326	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)},
 327	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)},
 328	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)},
 329	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)},
 330	{ SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)},
 331	{ SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)},
 332	{ SOC15_REG_ENTRY(GC, 0, mmCP_STAT)},
 333	{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)},
 334	{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)},
 335	{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)},
 336	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)},
 337	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)},
 338	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)},
 
 339	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)},
 340	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)},
 341	{ SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)},
 342	{ SOC15_REG_ENTRY(GC, 0, mmDB_DEBUG2)},
 343};
 344
 345static uint32_t soc15_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
 346					 u32 sh_num, u32 reg_offset)
 347{
 348	uint32_t val;
 349
 350	mutex_lock(&adev->grbm_idx_mutex);
 351	if (se_num != 0xffffffff || sh_num != 0xffffffff)
 352		amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
 353
 354	val = RREG32(reg_offset);
 355
 356	if (se_num != 0xffffffff || sh_num != 0xffffffff)
 357		amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
 358	mutex_unlock(&adev->grbm_idx_mutex);
 359	return val;
 360}
 361
 362static uint32_t soc15_get_register_value(struct amdgpu_device *adev,
 363					 bool indexed, u32 se_num,
 364					 u32 sh_num, u32 reg_offset)
 365{
 366	if (indexed) {
 367		return soc15_read_indexed_register(adev, se_num, sh_num, reg_offset);
 368	} else {
 369		if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG))
 370			return adev->gfx.config.gb_addr_config;
 371		else if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG2))
 372			return adev->gfx.config.db_debug2;
 373		return RREG32(reg_offset);
 374	}
 375}
 376
 377static int soc15_read_register(struct amdgpu_device *adev, u32 se_num,
 378			    u32 sh_num, u32 reg_offset, u32 *value)
 379{
 380	uint32_t i;
 381	struct soc15_allowed_register_entry  *en;
 382
 383	*value = 0;
 384	for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) {
 385		en = &soc15_allowed_read_registers[i];
 386		if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
 
 387					+ en->reg_offset))
 388			continue;
 389
 390		*value = soc15_get_register_value(adev,
 391						  soc15_allowed_read_registers[i].grbm_indexed,
 392						  se_num, sh_num, reg_offset);
 393		return 0;
 394	}
 395	return -EINVAL;
 396}
 397
 398
 399/**
 400 * soc15_program_register_sequence - program an array of registers.
 401 *
 402 * @adev: amdgpu_device pointer
 403 * @regs: pointer to the register array
 404 * @array_size: size of the register array
 405 *
 406 * Programs an array or registers with and and or masks.
 407 * This is a helper for setting golden registers.
 408 */
 409
 410void soc15_program_register_sequence(struct amdgpu_device *adev,
 411					     const struct soc15_reg_golden *regs,
 412					     const u32 array_size)
 413{
 414	const struct soc15_reg_golden *entry;
 415	u32 tmp, reg;
 416	int i;
 417
 418	for (i = 0; i < array_size; ++i) {
 419		entry = &regs[i];
 420		reg =  adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg;
 421
 422		if (entry->and_mask == 0xffffffff) {
 423			tmp = entry->or_mask;
 424		} else {
 425			tmp = RREG32(reg);
 426			tmp &= ~(entry->and_mask);
 427			tmp |= (entry->or_mask & entry->and_mask);
 428		}
 429
 430		if (reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3) ||
 431			reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE) ||
 432			reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE_1) ||
 433			reg == SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG))
 434			WREG32_RLC(reg, tmp);
 435		else
 436			WREG32(reg, tmp);
 437
 438	}
 439
 440}
 441
 442static int soc15_asic_mode1_reset(struct amdgpu_device *adev)
 443{
 444	u32 i;
 445	int ret = 0;
 446
 447	amdgpu_atombios_scratch_regs_engine_hung(adev, true);
 448
 449	dev_info(adev->dev, "GPU mode1 reset\n");
 450
 451	/* disable BM */
 452	pci_clear_master(adev->pdev);
 453
 454	pci_save_state(adev->pdev);
 455
 456	ret = psp_gpu_reset(adev);
 457	if (ret)
 458		dev_err(adev->dev, "GPU mode1 reset failed\n");
 459
 460	pci_restore_state(adev->pdev);
 461
 462	/* wait for asic to come out of reset */
 463	for (i = 0; i < adev->usec_timeout; i++) {
 464		u32 memsize = adev->nbio_funcs->get_memsize(adev);
 465
 466		if (memsize != 0xffffffff)
 467			break;
 468		udelay(1);
 469	}
 470
 471	amdgpu_atombios_scratch_regs_engine_hung(adev, false);
 472
 473	return ret;
 474}
 475
 476static int soc15_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap)
 477{
 478	void *pp_handle = adev->powerplay.pp_handle;
 479	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 480
 481	if (!pp_funcs || !pp_funcs->get_asic_baco_capability) {
 482		*cap = false;
 483		return -ENOENT;
 484	}
 485
 486	return pp_funcs->get_asic_baco_capability(pp_handle, cap);
 487}
 488
 489static int soc15_asic_baco_reset(struct amdgpu_device *adev)
 490{
 491	void *pp_handle = adev->powerplay.pp_handle;
 492	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 493
 494	if (!pp_funcs ||!pp_funcs->get_asic_baco_state ||!pp_funcs->set_asic_baco_state)
 495		return -ENOENT;
 
 496
 497	/* enter BACO state */
 498	if (pp_funcs->set_asic_baco_state(pp_handle, 1))
 499		return -EIO;
 500
 501	/* exit BACO state */
 502	if (pp_funcs->set_asic_baco_state(pp_handle, 0))
 503		return -EIO;
 504
 505	dev_info(adev->dev, "GPU BACO reset\n");
 506
 507	adev->in_baco_reset = 1;
 
 
 508
 509	return 0;
 510}
 511
 512static int soc15_mode2_reset(struct amdgpu_device *adev)
 513{
 514	if (!adev->powerplay.pp_funcs ||
 515	    !adev->powerplay.pp_funcs->asic_reset_mode_2)
 516		return -ENOENT;
 517
 518	return adev->powerplay.pp_funcs->asic_reset_mode_2(adev->powerplay.pp_handle);
 519}
 520
 521static enum amd_reset_method
 522soc15_asic_reset_method(struct amdgpu_device *adev)
 523{
 524	bool baco_reset;
 
 
 
 
 
 
 
 
 
 
 525
 526	switch (adev->asic_type) {
 527	case CHIP_RAVEN:
 
 528		return AMD_RESET_METHOD_MODE2;
 529	case CHIP_VEGA10:
 530	case CHIP_VEGA12:
 531		soc15_asic_get_baco_capability(adev, &baco_reset);
 
 532		break;
 533	case CHIP_VEGA20:
 534		if (adev->psp.sos_fw_version >= 0x80067)
 535			soc15_asic_get_baco_capability(adev, &baco_reset);
 536		else
 
 
 
 
 
 537			baco_reset = false;
 538		if (baco_reset) {
 539			struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0);
 540			struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
 541
 542			if (hive || (ras && ras->supported))
 543				baco_reset = false;
 544		}
 545		break;
 546	default:
 547		baco_reset = false;
 548		break;
 549	}
 550
 551	if (baco_reset)
 552		return AMD_RESET_METHOD_BACO;
 553	else
 554		return AMD_RESET_METHOD_MODE1;
 555}
 556
 557static int soc15_asic_reset(struct amdgpu_device *adev)
 558{
 
 
 
 
 
 559	switch (soc15_asic_reset_method(adev)) {
 560		case AMD_RESET_METHOD_BACO:
 561			if (!adev->in_suspend)
 562				amdgpu_inc_vram_lost(adev);
 563			return soc15_asic_baco_reset(adev);
 564		case AMD_RESET_METHOD_MODE2:
 565			return soc15_mode2_reset(adev);
 566		default:
 567			if (!adev->in_suspend)
 568				amdgpu_inc_vram_lost(adev);
 569			return soc15_asic_mode1_reset(adev);
 570	}
 571}
 572
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 573/*static int soc15_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
 574			u32 cntl_reg, u32 status_reg)
 575{
 576	return 0;
 577}*/
 578
 579static int soc15_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
 580{
 581	/*int r;
 582
 583	r = soc15_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
 584	if (r)
 585		return r;
 586
 587	r = soc15_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
 588	*/
 589	return 0;
 590}
 591
 592static int soc15_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
 593{
 594	/* todo */
 595
 596	return 0;
 597}
 598
 599static void soc15_pcie_gen3_enable(struct amdgpu_device *adev)
 600{
 601	if (pci_is_root_bus(adev->pdev->bus))
 602		return;
 603
 604	if (amdgpu_pcie_gen2 == 0)
 605		return;
 606
 607	if (adev->flags & AMD_IS_APU)
 608		return;
 609
 610	if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
 611					CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
 612		return;
 613
 614	/* todo */
 615}
 616
 617static void soc15_program_aspm(struct amdgpu_device *adev)
 618{
 619
 620	if (amdgpu_aspm == 0)
 621		return;
 622
 623	/* todo */
 624}
 625
 626static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev,
 627					   bool enable)
 628{
 629	adev->nbio_funcs->enable_doorbell_aperture(adev, enable);
 630	adev->nbio_funcs->enable_doorbell_selfring_aperture(adev, enable);
 631}
 632
 633static const struct amdgpu_ip_block_version vega10_common_ip_block =
 634{
 635	.type = AMD_IP_BLOCK_TYPE_COMMON,
 636	.major = 2,
 637	.minor = 0,
 638	.rev = 0,
 639	.funcs = &soc15_common_ip_funcs,
 640};
 641
 642static uint32_t soc15_get_rev_id(struct amdgpu_device *adev)
 643{
 644	return adev->nbio_funcs->get_rev_id(adev);
 645}
 646
 647int soc15_set_ip_blocks(struct amdgpu_device *adev)
 648{
 
 
 649	/* Set IP register base before any HW register access */
 650	switch (adev->asic_type) {
 651	case CHIP_VEGA10:
 652	case CHIP_VEGA12:
 653	case CHIP_RAVEN:
 
 
 654	case CHIP_RENOIR:
 
 
 
 
 
 
 
 
 
 655		vega10_reg_base_init(adev);
 656		break;
 657	case CHIP_VEGA20:
 658		vega20_reg_base_init(adev);
 659		break;
 660	case CHIP_ARCTURUS:
 661		arct_reg_base_init(adev);
 662		break;
 663	default:
 664		return -EINVAL;
 
 665	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 666
 667	if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS)
 668		adev->gmc.xgmi.supported = true;
 669
 670	if (adev->flags & AMD_IS_APU)
 671		adev->nbio_funcs = &nbio_v7_0_funcs;
 672	else if (adev->asic_type == CHIP_VEGA20 ||
 673		adev->asic_type == CHIP_ARCTURUS)
 674		adev->nbio_funcs = &nbio_v7_4_funcs;
 675	else
 676		adev->nbio_funcs = &nbio_v6_1_funcs;
 
 
 
 
 677
 678	if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS)
 679		adev->df_funcs = &df_v3_6_funcs;
 680	else
 681		adev->df_funcs = &df_v1_7_funcs;
 682
 683	adev->rev_id = soc15_get_rev_id(adev);
 684	adev->nbio_funcs->detect_hw_virt(adev);
 685
 686	if (amdgpu_sriov_vf(adev))
 687		adev->virt.ops = &xgpu_ai_virt_ops;
 688
 689	switch (adev->asic_type) {
 690	case CHIP_VEGA10:
 691	case CHIP_VEGA12:
 692	case CHIP_VEGA20:
 693		amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
 694		amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
 695
 696		/* For Vega10 SR-IOV, PSP need to be initialized before IH */
 697		if (amdgpu_sriov_vf(adev)) {
 698			if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
 699				if (adev->asic_type == CHIP_VEGA20)
 700					amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
 701				else
 702					amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
 703			}
 704			amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
 705		} else {
 706			amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
 707			if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
 708				if (adev->asic_type == CHIP_VEGA20)
 709					amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
 710				else
 711					amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
 712			}
 713		}
 714		amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
 715		amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
 716		if (!amdgpu_sriov_vf(adev)) {
 717			if (is_support_sw_smu(adev))
 718				amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
 719			else
 720				amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
 721		}
 722		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
 723			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
 724#if defined(CONFIG_DRM_AMD_DC)
 725		else if (amdgpu_device_has_dc_support(adev))
 726			amdgpu_device_ip_block_add(adev, &dm_ip_block);
 727#endif
 728		if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev))) {
 729			amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
 730			amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
 731		}
 732		break;
 733	case CHIP_RAVEN:
 734		amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
 735		amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
 736		amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
 737		if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
 738			amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
 739		amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
 740		amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
 741		amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
 742		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
 743			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
 744#if defined(CONFIG_DRM_AMD_DC)
 745		else if (amdgpu_device_has_dc_support(adev))
 746			amdgpu_device_ip_block_add(adev, &dm_ip_block);
 747#endif
 748		amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
 749		break;
 750	case CHIP_ARCTURUS:
 751		amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
 752		amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
 753		amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
 
 
 
 
 
 
 
 
 
 
 754		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
 755			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
 756		amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
 757		amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
 758		amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
 759		amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
 
 
 
 
 
 
 
 
 760		break;
 761	case CHIP_RENOIR:
 762		amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
 763		amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
 764		amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
 765		if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
 766			amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block);
 767		if (is_support_sw_smu(adev))
 768			amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block);
 769		amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
 770		amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
 771		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
 772			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
 773#if defined(CONFIG_DRM_AMD_DC)
 774                else if (amdgpu_device_has_dc_support(adev))
 775                        amdgpu_device_ip_block_add(adev, &dm_ip_block);
 776#endif
 777		amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
 
 778		break;
 779	default:
 780		return -EINVAL;
 781	}
 782
 783	return 0;
 784}
 785
 786static void soc15_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
 787{
 788	adev->nbio_funcs->hdp_flush(adev, ring);
 789}
 790
 791static void soc15_invalidate_hdp(struct amdgpu_device *adev,
 792				 struct amdgpu_ring *ring)
 793{
 794	if (!ring || !ring->funcs->emit_wreg)
 795		WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
 796	else
 797		amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
 798			HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
 799}
 800
 801static bool soc15_need_full_reset(struct amdgpu_device *adev)
 802{
 803	/* change this when we implement soft reset */
 804	return true;
 805}
 
 
 
 
 
 
 
 
 
 806static void soc15_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
 807				 uint64_t *count1)
 808{
 809	uint32_t perfctr = 0;
 810	uint64_t cnt0_of, cnt1_of;
 811	int tmp;
 812
 813	/* This reports 0 on APUs, so return to avoid writing/reading registers
 814	 * that may or may not be different from their GPU counterparts
 815	 */
 816	if (adev->flags & AMD_IS_APU)
 817		return;
 818
 819	/* Set the 2 events that we wish to watch, defined above */
 820	/* Reg 40 is # received msgs */
 821	/* Reg 104 is # of posted requests sent */
 822	perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40);
 823	perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104);
 824
 825	/* Write to enable desired perf counters */
 826	WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK, perfctr);
 827	/* Zero out and enable the perf counters
 828	 * Write 0x5:
 829	 * Bit 0 = Start all counters(1)
 830	 * Bit 2 = Global counter reset enable(1)
 831	 */
 832	WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000005);
 833
 834	msleep(1000);
 835
 836	/* Load the shadow and disable the perf counters
 837	 * Write 0x2:
 838	 * Bit 0 = Stop counters(0)
 839	 * Bit 1 = Load the shadow counters(1)
 840	 */
 841	WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000002);
 842
 843	/* Read register values to get any >32bit overflow */
 844	tmp = RREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK);
 845	cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER);
 846	cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER);
 847
 848	/* Get the values and add the overflow */
 849	*count0 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32);
 850	*count1 = RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32);
 851}
 852
 853static void vega20_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
 854				 uint64_t *count1)
 855{
 856	uint32_t perfctr = 0;
 857	uint64_t cnt0_of, cnt1_of;
 858	int tmp;
 859
 860	/* This reports 0 on APUs, so return to avoid writing/reading registers
 861	 * that may or may not be different from their GPU counterparts
 862	 */
 863	if (adev->flags & AMD_IS_APU)
 864		return;
 865
 866	/* Set the 2 events that we wish to watch, defined above */
 867	/* Reg 40 is # received msgs */
 868	/* Reg 108 is # of posted requests sent on VG20 */
 869	perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK3,
 870				EVENT0_SEL, 40);
 871	perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK3,
 872				EVENT1_SEL, 108);
 873
 874	/* Write to enable desired perf counters */
 875	WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK3, perfctr);
 876	/* Zero out and enable the perf counters
 877	 * Write 0x5:
 878	 * Bit 0 = Start all counters(1)
 879	 * Bit 2 = Global counter reset enable(1)
 880	 */
 881	WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000005);
 882
 883	msleep(1000);
 884
 885	/* Load the shadow and disable the perf counters
 886	 * Write 0x2:
 887	 * Bit 0 = Stop counters(0)
 888	 * Bit 1 = Load the shadow counters(1)
 889	 */
 890	WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000002);
 891
 892	/* Read register values to get any >32bit overflow */
 893	tmp = RREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK3);
 894	cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK3, COUNTER0_UPPER);
 895	cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK3, COUNTER1_UPPER);
 896
 897	/* Get the values and add the overflow */
 898	*count0 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK3) | (cnt0_of << 32);
 899	*count1 = RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK3) | (cnt1_of << 32);
 900}
 901
 902static bool soc15_need_reset_on_init(struct amdgpu_device *adev)
 903{
 904	u32 sol_reg;
 905
 906	/* Just return false for soc15 GPUs.  Reset does not seem to
 907	 * be necessary.
 908	 */
 909	if (!amdgpu_passthrough(adev))
 910		return false;
 911
 912	if (adev->flags & AMD_IS_APU)
 913		return false;
 914
 915	/* Check sOS sign of life register to confirm sys driver and sOS
 916	 * are already been loaded.
 917	 */
 918	sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
 919	if (sol_reg)
 920		return true;
 921
 922	return false;
 923}
 924
 925static uint64_t soc15_get_pcie_replay_count(struct amdgpu_device *adev)
 926{
 927	uint64_t nak_r, nak_g;
 928
 929	/* Get the number of NAKs received and generated */
 930	nak_r = RREG32_PCIE(smnPCIE_RX_NUM_NAK);
 931	nak_g = RREG32_PCIE(smnPCIE_RX_NUM_NAK_GENERATED);
 932
 933	/* Add the total number of NAKs, i.e the number of replays */
 934	return (nak_r + nak_g);
 935}
 936
 937static const struct amdgpu_asic_funcs soc15_asic_funcs =
 938{
 939	.read_disabled_bios = &soc15_read_disabled_bios,
 940	.read_bios_from_rom = &soc15_read_bios_from_rom,
 941	.read_register = &soc15_read_register,
 942	.reset = &soc15_asic_reset,
 943	.reset_method = &soc15_asic_reset_method,
 944	.set_vga_state = &soc15_vga_set_state,
 945	.get_xclk = &soc15_get_xclk,
 946	.set_uvd_clocks = &soc15_set_uvd_clocks,
 947	.set_vce_clocks = &soc15_set_vce_clocks,
 948	.get_config_memsize = &soc15_get_config_memsize,
 949	.flush_hdp = &soc15_flush_hdp,
 950	.invalidate_hdp = &soc15_invalidate_hdp,
 951	.need_full_reset = &soc15_need_full_reset,
 952	.init_doorbell_index = &vega10_doorbell_index_init,
 953	.get_pcie_usage = &soc15_get_pcie_usage,
 954	.need_reset_on_init = &soc15_need_reset_on_init,
 955	.get_pcie_replay_count = &soc15_get_pcie_replay_count,
 
 956};
 957
 958static const struct amdgpu_asic_funcs vega20_asic_funcs =
 959{
 960	.read_disabled_bios = &soc15_read_disabled_bios,
 961	.read_bios_from_rom = &soc15_read_bios_from_rom,
 962	.read_register = &soc15_read_register,
 963	.reset = &soc15_asic_reset,
 
 964	.set_vga_state = &soc15_vga_set_state,
 965	.get_xclk = &soc15_get_xclk,
 966	.set_uvd_clocks = &soc15_set_uvd_clocks,
 967	.set_vce_clocks = &soc15_set_vce_clocks,
 968	.get_config_memsize = &soc15_get_config_memsize,
 969	.flush_hdp = &soc15_flush_hdp,
 970	.invalidate_hdp = &soc15_invalidate_hdp,
 
 971	.need_full_reset = &soc15_need_full_reset,
 972	.init_doorbell_index = &vega20_doorbell_index_init,
 973	.get_pcie_usage = &vega20_get_pcie_usage,
 974	.need_reset_on_init = &soc15_need_reset_on_init,
 975	.get_pcie_replay_count = &soc15_get_pcie_replay_count,
 976	.reset_method = &soc15_asic_reset_method
 977};
 978
 979static int soc15_common_early_init(void *handle)
 980{
 981#define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
 982	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 983
 984	adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
 985	adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
 986	adev->smc_rreg = NULL;
 987	adev->smc_wreg = NULL;
 988	adev->pcie_rreg = &soc15_pcie_rreg;
 989	adev->pcie_wreg = &soc15_pcie_wreg;
 990	adev->pcie_rreg64 = &soc15_pcie_rreg64;
 991	adev->pcie_wreg64 = &soc15_pcie_wreg64;
 992	adev->uvd_ctx_rreg = &soc15_uvd_ctx_rreg;
 993	adev->uvd_ctx_wreg = &soc15_uvd_ctx_wreg;
 994	adev->didt_rreg = &soc15_didt_rreg;
 995	adev->didt_wreg = &soc15_didt_wreg;
 996	adev->gc_cac_rreg = &soc15_gc_cac_rreg;
 997	adev->gc_cac_wreg = &soc15_gc_cac_wreg;
 998	adev->se_cac_rreg = &soc15_se_cac_rreg;
 999	adev->se_cac_wreg = &soc15_se_cac_wreg;
1000
1001
1002	adev->external_rev_id = 0xFF;
1003	switch (adev->asic_type) {
1004	case CHIP_VEGA10:
1005		adev->asic_funcs = &soc15_asic_funcs;
1006		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1007			AMD_CG_SUPPORT_GFX_MGLS |
1008			AMD_CG_SUPPORT_GFX_RLC_LS |
1009			AMD_CG_SUPPORT_GFX_CP_LS |
1010			AMD_CG_SUPPORT_GFX_3D_CGCG |
1011			AMD_CG_SUPPORT_GFX_3D_CGLS |
1012			AMD_CG_SUPPORT_GFX_CGCG |
1013			AMD_CG_SUPPORT_GFX_CGLS |
1014			AMD_CG_SUPPORT_BIF_MGCG |
1015			AMD_CG_SUPPORT_BIF_LS |
1016			AMD_CG_SUPPORT_HDP_LS |
1017			AMD_CG_SUPPORT_DRM_MGCG |
1018			AMD_CG_SUPPORT_DRM_LS |
1019			AMD_CG_SUPPORT_ROM_MGCG |
1020			AMD_CG_SUPPORT_DF_MGCG |
1021			AMD_CG_SUPPORT_SDMA_MGCG |
1022			AMD_CG_SUPPORT_SDMA_LS |
1023			AMD_CG_SUPPORT_MC_MGCG |
1024			AMD_CG_SUPPORT_MC_LS;
1025		adev->pg_flags = 0;
1026		adev->external_rev_id = 0x1;
1027		break;
1028	case CHIP_VEGA12:
1029		adev->asic_funcs = &soc15_asic_funcs;
1030		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1031			AMD_CG_SUPPORT_GFX_MGLS |
1032			AMD_CG_SUPPORT_GFX_CGCG |
1033			AMD_CG_SUPPORT_GFX_CGLS |
1034			AMD_CG_SUPPORT_GFX_3D_CGCG |
1035			AMD_CG_SUPPORT_GFX_3D_CGLS |
1036			AMD_CG_SUPPORT_GFX_CP_LS |
1037			AMD_CG_SUPPORT_MC_LS |
1038			AMD_CG_SUPPORT_MC_MGCG |
1039			AMD_CG_SUPPORT_SDMA_MGCG |
1040			AMD_CG_SUPPORT_SDMA_LS |
1041			AMD_CG_SUPPORT_BIF_MGCG |
1042			AMD_CG_SUPPORT_BIF_LS |
1043			AMD_CG_SUPPORT_HDP_MGCG |
1044			AMD_CG_SUPPORT_HDP_LS |
1045			AMD_CG_SUPPORT_ROM_MGCG |
1046			AMD_CG_SUPPORT_VCE_MGCG |
1047			AMD_CG_SUPPORT_UVD_MGCG;
1048		adev->pg_flags = 0;
1049		adev->external_rev_id = adev->rev_id + 0x14;
1050		break;
1051	case CHIP_VEGA20:
1052		adev->asic_funcs = &vega20_asic_funcs;
1053		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1054			AMD_CG_SUPPORT_GFX_MGLS |
1055			AMD_CG_SUPPORT_GFX_CGCG |
1056			AMD_CG_SUPPORT_GFX_CGLS |
1057			AMD_CG_SUPPORT_GFX_3D_CGCG |
1058			AMD_CG_SUPPORT_GFX_3D_CGLS |
1059			AMD_CG_SUPPORT_GFX_CP_LS |
1060			AMD_CG_SUPPORT_MC_LS |
1061			AMD_CG_SUPPORT_MC_MGCG |
1062			AMD_CG_SUPPORT_SDMA_MGCG |
1063			AMD_CG_SUPPORT_SDMA_LS |
1064			AMD_CG_SUPPORT_BIF_MGCG |
1065			AMD_CG_SUPPORT_BIF_LS |
1066			AMD_CG_SUPPORT_HDP_MGCG |
1067			AMD_CG_SUPPORT_HDP_LS |
1068			AMD_CG_SUPPORT_ROM_MGCG |
1069			AMD_CG_SUPPORT_VCE_MGCG |
1070			AMD_CG_SUPPORT_UVD_MGCG;
1071		adev->pg_flags = 0;
1072		adev->external_rev_id = adev->rev_id + 0x28;
1073		break;
1074	case CHIP_RAVEN:
1075		adev->asic_funcs = &soc15_asic_funcs;
 
 
 
 
1076		if (adev->rev_id >= 0x8)
 
 
 
1077			adev->external_rev_id = adev->rev_id + 0x79;
1078		else if (adev->pdev->device == 0x15d8)
1079			adev->external_rev_id = adev->rev_id + 0x41;
1080		else if (adev->rev_id == 1)
1081			adev->external_rev_id = adev->rev_id + 0x20;
1082		else
1083			adev->external_rev_id = adev->rev_id + 0x01;
1084
1085		if (adev->rev_id >= 0x8) {
1086			adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1087				AMD_CG_SUPPORT_GFX_MGLS |
1088				AMD_CG_SUPPORT_GFX_CP_LS |
1089				AMD_CG_SUPPORT_GFX_3D_CGCG |
1090				AMD_CG_SUPPORT_GFX_3D_CGLS |
1091				AMD_CG_SUPPORT_GFX_CGCG |
1092				AMD_CG_SUPPORT_GFX_CGLS |
1093				AMD_CG_SUPPORT_BIF_LS |
1094				AMD_CG_SUPPORT_HDP_LS |
1095				AMD_CG_SUPPORT_ROM_MGCG |
1096				AMD_CG_SUPPORT_MC_MGCG |
1097				AMD_CG_SUPPORT_MC_LS |
1098				AMD_CG_SUPPORT_SDMA_MGCG |
1099				AMD_CG_SUPPORT_SDMA_LS |
1100				AMD_CG_SUPPORT_VCN_MGCG;
1101
1102			adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
1103		} else if (adev->pdev->device == 0x15d8) {
1104			adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1105				AMD_CG_SUPPORT_GFX_MGLS |
1106				AMD_CG_SUPPORT_GFX_CP_LS |
1107				AMD_CG_SUPPORT_GFX_3D_CGCG |
1108				AMD_CG_SUPPORT_GFX_3D_CGLS |
1109				AMD_CG_SUPPORT_GFX_CGCG |
1110				AMD_CG_SUPPORT_GFX_CGLS |
1111				AMD_CG_SUPPORT_BIF_LS |
1112				AMD_CG_SUPPORT_HDP_LS |
1113				AMD_CG_SUPPORT_ROM_MGCG |
1114				AMD_CG_SUPPORT_MC_MGCG |
1115				AMD_CG_SUPPORT_MC_LS |
1116				AMD_CG_SUPPORT_SDMA_MGCG |
1117				AMD_CG_SUPPORT_SDMA_LS;
1118
1119			adev->pg_flags = AMD_PG_SUPPORT_SDMA |
1120				AMD_PG_SUPPORT_MMHUB |
1121				AMD_PG_SUPPORT_VCN |
1122				AMD_PG_SUPPORT_VCN_DPG;
1123		} else {
1124			adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1125				AMD_CG_SUPPORT_GFX_MGLS |
1126				AMD_CG_SUPPORT_GFX_RLC_LS |
1127				AMD_CG_SUPPORT_GFX_CP_LS |
1128				AMD_CG_SUPPORT_GFX_3D_CGCG |
1129				AMD_CG_SUPPORT_GFX_3D_CGLS |
1130				AMD_CG_SUPPORT_GFX_CGCG |
1131				AMD_CG_SUPPORT_GFX_CGLS |
1132				AMD_CG_SUPPORT_BIF_MGCG |
1133				AMD_CG_SUPPORT_BIF_LS |
1134				AMD_CG_SUPPORT_HDP_MGCG |
1135				AMD_CG_SUPPORT_HDP_LS |
1136				AMD_CG_SUPPORT_DRM_MGCG |
1137				AMD_CG_SUPPORT_DRM_LS |
1138				AMD_CG_SUPPORT_ROM_MGCG |
1139				AMD_CG_SUPPORT_MC_MGCG |
1140				AMD_CG_SUPPORT_MC_LS |
1141				AMD_CG_SUPPORT_SDMA_MGCG |
1142				AMD_CG_SUPPORT_SDMA_LS |
1143				AMD_CG_SUPPORT_VCN_MGCG;
1144
1145			adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
1146		}
1147		break;
1148	case CHIP_ARCTURUS:
1149		adev->asic_funcs = &vega20_asic_funcs;
1150		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1151			AMD_CG_SUPPORT_GFX_MGLS |
1152			AMD_CG_SUPPORT_GFX_CGCG |
1153			AMD_CG_SUPPORT_GFX_CGLS |
1154			AMD_CG_SUPPORT_GFX_CP_LS |
1155			AMD_CG_SUPPORT_HDP_MGCG |
1156			AMD_CG_SUPPORT_HDP_LS |
1157			AMD_CG_SUPPORT_SDMA_MGCG |
1158			AMD_CG_SUPPORT_SDMA_LS |
1159			AMD_CG_SUPPORT_MC_MGCG |
1160			AMD_CG_SUPPORT_MC_LS;
1161		adev->pg_flags = 0;
 
 
 
1162		adev->external_rev_id = adev->rev_id + 0x32;
1163		break;
1164	case CHIP_RENOIR:
1165		adev->asic_funcs = &soc15_asic_funcs;
 
1166		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1167				 AMD_CG_SUPPORT_GFX_MGLS |
1168				 AMD_CG_SUPPORT_GFX_3D_CGCG |
1169				 AMD_CG_SUPPORT_GFX_3D_CGLS |
1170				 AMD_CG_SUPPORT_GFX_CGCG |
1171				 AMD_CG_SUPPORT_GFX_CGLS |
1172				 AMD_CG_SUPPORT_GFX_CP_LS |
1173				 AMD_CG_SUPPORT_MC_MGCG |
1174				 AMD_CG_SUPPORT_MC_LS |
1175				 AMD_CG_SUPPORT_SDMA_MGCG |
1176				 AMD_CG_SUPPORT_SDMA_LS |
1177				 AMD_CG_SUPPORT_BIF_LS |
1178				 AMD_CG_SUPPORT_HDP_LS |
1179				 AMD_CG_SUPPORT_ROM_MGCG |
1180				 AMD_CG_SUPPORT_VCN_MGCG |
 
1181				 AMD_CG_SUPPORT_IH_CG |
1182				 AMD_CG_SUPPORT_ATHUB_LS |
1183				 AMD_CG_SUPPORT_ATHUB_MGCG |
1184				 AMD_CG_SUPPORT_DF_MGCG;
1185		adev->pg_flags = AMD_PG_SUPPORT_SDMA |
1186				 AMD_PG_SUPPORT_VCN |
 
1187				 AMD_PG_SUPPORT_VCN_DPG;
1188		adev->external_rev_id = adev->rev_id + 0x91;
1189		break;
1190	default:
1191		/* FIXME: not supported yet */
1192		return -EINVAL;
1193	}
1194
1195	if (amdgpu_sriov_vf(adev)) {
1196		amdgpu_virt_init_setting(adev);
1197		xgpu_ai_mailbox_set_irq_funcs(adev);
1198	}
1199
1200	return 0;
1201}
1202
1203static int soc15_common_late_init(void *handle)
1204{
1205	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
1206
1207	if (amdgpu_sriov_vf(adev))
1208		xgpu_ai_mailbox_get_irq(adev);
1209
1210	return 0;
 
 
 
 
 
 
 
1211}
1212
1213static int soc15_common_sw_init(void *handle)
1214{
1215	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1216
1217	if (amdgpu_sriov_vf(adev))
1218		xgpu_ai_mailbox_add_irq_id(adev);
1219
1220	adev->df_funcs->sw_init(adev);
1221
1222	return 0;
1223}
1224
1225static int soc15_common_sw_fini(void *handle)
1226{
 
 
 
 
1227	return 0;
1228}
1229
1230static void soc15_doorbell_range_init(struct amdgpu_device *adev)
1231{
1232	int i;
1233	struct amdgpu_ring *ring;
1234
1235	/* sdma/ih doorbell range are programed by hypervisor */
1236	if (!amdgpu_sriov_vf(adev)) {
1237		for (i = 0; i < adev->sdma.num_instances; i++) {
1238			ring = &adev->sdma.instance[i].ring;
1239			adev->nbio_funcs->sdma_doorbell_range(adev, i,
1240				ring->use_doorbell, ring->doorbell_index,
1241				adev->doorbell_index.sdma_doorbell_range);
1242		}
1243
1244		adev->nbio_funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
1245						adev->irq.ih.doorbell_index);
1246	}
1247}
1248
1249static int soc15_common_hw_init(void *handle)
1250{
1251	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1252
1253	/* enable pcie gen2/3 link */
1254	soc15_pcie_gen3_enable(adev);
1255	/* enable aspm */
1256	soc15_program_aspm(adev);
1257	/* setup nbio registers */
1258	adev->nbio_funcs->init_registers(adev);
1259	/* remap HDP registers to a hole in mmio space,
1260	 * for the purpose of expose those registers
1261	 * to process space
1262	 */
1263	if (adev->nbio_funcs->remap_hdp_registers)
1264		adev->nbio_funcs->remap_hdp_registers(adev);
1265
1266	/* enable the doorbell aperture */
1267	soc15_enable_doorbell_aperture(adev, true);
1268	/* HW doorbell routing policy: doorbell writing not
1269	 * in SDMA/IH/MM/ACV range will be routed to CP. So
1270	 * we need to init SDMA/IH/MM/ACV doorbell range prior
1271	 * to CP ip block init and ring test.
1272	 */
1273	soc15_doorbell_range_init(adev);
1274
1275	return 0;
1276}
1277
1278static int soc15_common_hw_fini(void *handle)
1279{
1280	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1281
1282	/* disable the doorbell aperture */
1283	soc15_enable_doorbell_aperture(adev, false);
1284	if (amdgpu_sriov_vf(adev))
1285		xgpu_ai_mailbox_put_irq(adev);
1286
 
 
 
 
 
 
 
 
1287	return 0;
1288}
1289
1290static int soc15_common_suspend(void *handle)
1291{
1292	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1293
1294	return soc15_common_hw_fini(adev);
1295}
1296
1297static int soc15_common_resume(void *handle)
1298{
1299	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1300
1301	return soc15_common_hw_init(adev);
1302}
1303
1304static bool soc15_common_is_idle(void *handle)
1305{
1306	return true;
1307}
1308
1309static int soc15_common_wait_for_idle(void *handle)
1310{
1311	return 0;
1312}
1313
1314static int soc15_common_soft_reset(void *handle)
1315{
1316	return 0;
1317}
1318
1319static void soc15_update_hdp_light_sleep(struct amdgpu_device *adev, bool enable)
1320{
1321	uint32_t def, data;
1322
1323	if (adev->asic_type == CHIP_VEGA20 ||
1324		adev->asic_type == CHIP_ARCTURUS) {
1325		def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL));
1326
1327		if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
1328			data |= HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK |
1329				HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK |
1330				HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK |
1331				HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK;
1332		else
1333			data &= ~(HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK |
1334				HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK |
1335				HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK |
1336				HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK);
1337
1338		if (def != data)
1339			WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL), data);
1340	} else {
1341		def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
1342
1343		if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
1344			data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1345		else
1346			data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1347
1348		if (def != data)
1349			WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS), data);
1350	}
1351}
1352
1353static void soc15_update_drm_clock_gating(struct amdgpu_device *adev, bool enable)
1354{
1355	uint32_t def, data;
1356
1357	def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
1358
1359	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_MGCG))
1360		data &= ~(0x01000000 |
1361			  0x02000000 |
1362			  0x04000000 |
1363			  0x08000000 |
1364			  0x10000000 |
1365			  0x20000000 |
1366			  0x40000000 |
1367			  0x80000000);
1368	else
1369		data |= (0x01000000 |
1370			 0x02000000 |
1371			 0x04000000 |
1372			 0x08000000 |
1373			 0x10000000 |
1374			 0x20000000 |
1375			 0x40000000 |
1376			 0x80000000);
1377
1378	if (def != data)
1379		WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0), data);
1380}
1381
1382static void soc15_update_drm_light_sleep(struct amdgpu_device *adev, bool enable)
1383{
1384	uint32_t def, data;
1385
1386	def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL));
1387
1388	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS))
1389		data |= 1;
1390	else
1391		data &= ~1;
1392
1393	if (def != data)
1394		WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL), data);
1395}
1396
1397static void soc15_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,
1398						       bool enable)
1399{
1400	uint32_t def, data;
1401
1402	def = data = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0));
1403
1404	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG))
1405		data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1406			CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK);
1407	else
1408		data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1409			CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK;
1410
1411	if (def != data)
1412		WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0), data);
1413}
1414
1415static int soc15_common_set_clockgating_state(void *handle,
1416					    enum amd_clockgating_state state)
1417{
1418	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1419
1420	if (amdgpu_sriov_vf(adev))
1421		return 0;
1422
1423	switch (adev->asic_type) {
1424	case CHIP_VEGA10:
1425	case CHIP_VEGA12:
1426	case CHIP_VEGA20:
1427		adev->nbio_funcs->update_medium_grain_clock_gating(adev,
1428				state == AMD_CG_STATE_GATE ? true : false);
1429		adev->nbio_funcs->update_medium_grain_light_sleep(adev,
1430				state == AMD_CG_STATE_GATE ? true : false);
1431		soc15_update_hdp_light_sleep(adev,
1432				state == AMD_CG_STATE_GATE ? true : false);
1433		soc15_update_drm_clock_gating(adev,
1434				state == AMD_CG_STATE_GATE ? true : false);
1435		soc15_update_drm_light_sleep(adev,
1436				state == AMD_CG_STATE_GATE ? true : false);
1437		soc15_update_rom_medium_grain_clock_gating(adev,
1438				state == AMD_CG_STATE_GATE ? true : false);
1439		adev->df_funcs->update_medium_grain_clock_gating(adev,
1440				state == AMD_CG_STATE_GATE ? true : false);
1441		break;
1442	case CHIP_RAVEN:
1443	case CHIP_RENOIR:
1444		adev->nbio_funcs->update_medium_grain_clock_gating(adev,
1445				state == AMD_CG_STATE_GATE ? true : false);
1446		adev->nbio_funcs->update_medium_grain_light_sleep(adev,
1447				state == AMD_CG_STATE_GATE ? true : false);
1448		soc15_update_hdp_light_sleep(adev,
1449				state == AMD_CG_STATE_GATE ? true : false);
1450		soc15_update_drm_clock_gating(adev,
1451				state == AMD_CG_STATE_GATE ? true : false);
1452		soc15_update_drm_light_sleep(adev,
1453				state == AMD_CG_STATE_GATE ? true : false);
1454		soc15_update_rom_medium_grain_clock_gating(adev,
1455				state == AMD_CG_STATE_GATE ? true : false);
1456		break;
1457	case CHIP_ARCTURUS:
1458		soc15_update_hdp_light_sleep(adev,
1459				state == AMD_CG_STATE_GATE ? true : false);
1460		break;
1461	default:
1462		break;
1463	}
1464	return 0;
1465}
1466
1467static void soc15_common_get_clockgating_state(void *handle, u32 *flags)
1468{
1469	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1470	int data;
1471
1472	if (amdgpu_sriov_vf(adev))
1473		*flags = 0;
1474
1475	adev->nbio_funcs->get_clockgating_state(adev, flags);
1476
1477	/* AMD_CG_SUPPORT_HDP_LS */
1478	data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
1479	if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK)
1480		*flags |= AMD_CG_SUPPORT_HDP_LS;
1481
1482	/* AMD_CG_SUPPORT_DRM_MGCG */
1483	data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
1484	if (!(data & 0x01000000))
1485		*flags |= AMD_CG_SUPPORT_DRM_MGCG;
1486
1487	/* AMD_CG_SUPPORT_DRM_LS */
1488	data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL));
1489	if (data & 0x1)
1490		*flags |= AMD_CG_SUPPORT_DRM_LS;
1491
1492	/* AMD_CG_SUPPORT_ROM_MGCG */
1493	data = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0));
1494	if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK))
1495		*flags |= AMD_CG_SUPPORT_ROM_MGCG;
1496
1497	adev->df_funcs->get_clockgating_state(adev, flags);
1498}
1499
1500static int soc15_common_set_powergating_state(void *handle,
1501					    enum amd_powergating_state state)
1502{
1503	/* todo */
1504	return 0;
1505}
1506
1507const struct amd_ip_funcs soc15_common_ip_funcs = {
1508	.name = "soc15_common",
1509	.early_init = soc15_common_early_init,
1510	.late_init = soc15_common_late_init,
1511	.sw_init = soc15_common_sw_init,
1512	.sw_fini = soc15_common_sw_fini,
1513	.hw_init = soc15_common_hw_init,
1514	.hw_fini = soc15_common_hw_fini,
1515	.suspend = soc15_common_suspend,
1516	.resume = soc15_common_resume,
1517	.is_idle = soc15_common_is_idle,
1518	.wait_for_idle = soc15_common_wait_for_idle,
1519	.soft_reset = soc15_common_soft_reset,
1520	.set_clockgating_state = soc15_common_set_clockgating_state,
1521	.set_powergating_state = soc15_common_set_powergating_state,
1522	.get_clockgating_state= soc15_common_get_clockgating_state,
1523};