Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1/*
   2 * Copyright 2014 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23#include <linux/firmware.h>
  24#include <linux/slab.h>
  25#include <linux/module.h>
  26#include "drmP.h"
  27#include "amdgpu.h"
  28#include "amdgpu_atombios.h"
  29#include "amdgpu_ih.h"
  30#include "amdgpu_uvd.h"
  31#include "amdgpu_vce.h"
  32#include "amdgpu_ucode.h"
  33#include "atom.h"
  34#include "amd_pcie.h"
  35
  36#include "gmc/gmc_8_1_d.h"
  37#include "gmc/gmc_8_1_sh_mask.h"
  38
  39#include "oss/oss_3_0_d.h"
  40#include "oss/oss_3_0_sh_mask.h"
  41
  42#include "bif/bif_5_0_d.h"
  43#include "bif/bif_5_0_sh_mask.h"
  44
  45#include "gca/gfx_8_0_d.h"
  46#include "gca/gfx_8_0_sh_mask.h"
  47
  48#include "smu/smu_7_1_1_d.h"
  49#include "smu/smu_7_1_1_sh_mask.h"
  50
  51#include "uvd/uvd_5_0_d.h"
  52#include "uvd/uvd_5_0_sh_mask.h"
  53
  54#include "vce/vce_3_0_d.h"
  55#include "vce/vce_3_0_sh_mask.h"
  56
  57#include "dce/dce_10_0_d.h"
  58#include "dce/dce_10_0_sh_mask.h"
  59
  60#include "vid.h"
  61#include "vi.h"
  62#include "vi_dpm.h"
  63#include "gmc_v8_0.h"
  64#include "gmc_v7_0.h"
  65#include "gfx_v8_0.h"
  66#include "sdma_v2_4.h"
  67#include "sdma_v3_0.h"
  68#include "dce_v10_0.h"
  69#include "dce_v11_0.h"
  70#include "iceland_ih.h"
  71#include "tonga_ih.h"
  72#include "cz_ih.h"
  73#include "uvd_v5_0.h"
  74#include "uvd_v6_0.h"
  75#include "vce_v3_0.h"
  76#include "amdgpu_powerplay.h"
  77#if defined(CONFIG_DRM_AMD_ACP)
  78#include "amdgpu_acp.h"
  79#endif
  80#include "dce_virtual.h"
  81
  82MODULE_FIRMWARE("amdgpu/topaz_smc.bin");
  83MODULE_FIRMWARE("amdgpu/topaz_k_smc.bin");
  84MODULE_FIRMWARE("amdgpu/tonga_smc.bin");
  85MODULE_FIRMWARE("amdgpu/tonga_k_smc.bin");
  86MODULE_FIRMWARE("amdgpu/fiji_smc.bin");
  87MODULE_FIRMWARE("amdgpu/polaris10_smc.bin");
  88MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin");
  89MODULE_FIRMWARE("amdgpu/polaris11_smc.bin");
  90MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin");
  91MODULE_FIRMWARE("amdgpu/polaris12_smc.bin");
  92
  93/*
  94 * Indirect registers accessor
  95 */
  96static u32 vi_pcie_rreg(struct amdgpu_device *adev, u32 reg)
  97{
  98	unsigned long flags;
  99	u32 r;
 100
 101	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
 102	WREG32(mmPCIE_INDEX, reg);
 103	(void)RREG32(mmPCIE_INDEX);
 104	r = RREG32(mmPCIE_DATA);
 105	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 106	return r;
 107}
 108
 109static void vi_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
 110{
 111	unsigned long flags;
 112
 113	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
 114	WREG32(mmPCIE_INDEX, reg);
 115	(void)RREG32(mmPCIE_INDEX);
 116	WREG32(mmPCIE_DATA, v);
 117	(void)RREG32(mmPCIE_DATA);
 118	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 119}
 120
 121static u32 vi_smc_rreg(struct amdgpu_device *adev, u32 reg)
 122{
 123	unsigned long flags;
 124	u32 r;
 125
 126	spin_lock_irqsave(&adev->smc_idx_lock, flags);
 127	WREG32(mmSMC_IND_INDEX_11, (reg));
 128	r = RREG32(mmSMC_IND_DATA_11);
 129	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
 130	return r;
 131}
 132
 133static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
 134{
 135	unsigned long flags;
 136
 137	spin_lock_irqsave(&adev->smc_idx_lock, flags);
 138	WREG32(mmSMC_IND_INDEX_11, (reg));
 139	WREG32(mmSMC_IND_DATA_11, (v));
 140	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
 141}
 142
 143/* smu_8_0_d.h */
 144#define mmMP0PUB_IND_INDEX                                                      0x180
 145#define mmMP0PUB_IND_DATA                                                       0x181
 146
 147static u32 cz_smc_rreg(struct amdgpu_device *adev, u32 reg)
 148{
 149	unsigned long flags;
 150	u32 r;
 151
 152	spin_lock_irqsave(&adev->smc_idx_lock, flags);
 153	WREG32(mmMP0PUB_IND_INDEX, (reg));
 154	r = RREG32(mmMP0PUB_IND_DATA);
 155	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
 156	return r;
 157}
 158
 159static void cz_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
 160{
 161	unsigned long flags;
 162
 163	spin_lock_irqsave(&adev->smc_idx_lock, flags);
 164	WREG32(mmMP0PUB_IND_INDEX, (reg));
 165	WREG32(mmMP0PUB_IND_DATA, (v));
 166	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
 167}
 168
 169static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
 170{
 171	unsigned long flags;
 172	u32 r;
 173
 174	spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
 175	WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
 176	r = RREG32(mmUVD_CTX_DATA);
 177	spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
 178	return r;
 179}
 180
 181static void vi_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
 182{
 183	unsigned long flags;
 184
 185	spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
 186	WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
 187	WREG32(mmUVD_CTX_DATA, (v));
 188	spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
 189}
 190
 191static u32 vi_didt_rreg(struct amdgpu_device *adev, u32 reg)
 192{
 193	unsigned long flags;
 194	u32 r;
 195
 196	spin_lock_irqsave(&adev->didt_idx_lock, flags);
 197	WREG32(mmDIDT_IND_INDEX, (reg));
 198	r = RREG32(mmDIDT_IND_DATA);
 199	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
 200	return r;
 201}
 202
 203static void vi_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
 204{
 205	unsigned long flags;
 206
 207	spin_lock_irqsave(&adev->didt_idx_lock, flags);
 208	WREG32(mmDIDT_IND_INDEX, (reg));
 209	WREG32(mmDIDT_IND_DATA, (v));
 210	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
 211}
 212
 213static u32 vi_gc_cac_rreg(struct amdgpu_device *adev, u32 reg)
 214{
 215	unsigned long flags;
 216	u32 r;
 217
 218	spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
 219	WREG32(mmGC_CAC_IND_INDEX, (reg));
 220	r = RREG32(mmGC_CAC_IND_DATA);
 221	spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
 222	return r;
 223}
 224
 225static void vi_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
 226{
 227	unsigned long flags;
 228
 229	spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
 230	WREG32(mmGC_CAC_IND_INDEX, (reg));
 231	WREG32(mmGC_CAC_IND_DATA, (v));
 232	spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
 233}
 234
 235
 236static const u32 tonga_mgcg_cgcg_init[] =
 237{
 238	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
 239	mmPCIE_INDEX, 0xffffffff, 0x0140001c,
 240	mmPCIE_DATA, 0x000f0000, 0x00000000,
 241	mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
 242	mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
 243	mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
 244	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
 245};
 246
 247static const u32 fiji_mgcg_cgcg_init[] =
 248{
 249	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
 250	mmPCIE_INDEX, 0xffffffff, 0x0140001c,
 251	mmPCIE_DATA, 0x000f0000, 0x00000000,
 252	mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
 253	mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
 254	mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
 255	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
 256};
 257
 258static const u32 iceland_mgcg_cgcg_init[] =
 259{
 260	mmPCIE_INDEX, 0xffffffff, ixPCIE_CNTL2,
 261	mmPCIE_DATA, 0x000f0000, 0x00000000,
 262	mmSMC_IND_INDEX_4, 0xffffffff, ixCGTT_ROM_CLK_CTRL0,
 263	mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
 264	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
 265};
 266
 267static const u32 cz_mgcg_cgcg_init[] =
 268{
 269	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
 270	mmPCIE_INDEX, 0xffffffff, 0x0140001c,
 271	mmPCIE_DATA, 0x000f0000, 0x00000000,
 272	mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
 273	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
 274};
 275
 276static const u32 stoney_mgcg_cgcg_init[] =
 277{
 278	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00000100,
 279	mmHDP_XDP_CGTT_BLK_CTRL, 0xffffffff, 0x00000104,
 280	mmHDP_HOST_PATH_CNTL, 0xffffffff, 0x0f000027,
 281};
 282
 283static void vi_init_golden_registers(struct amdgpu_device *adev)
 284{
 285	/* Some of the registers might be dependent on GRBM_GFX_INDEX */
 286	mutex_lock(&adev->grbm_idx_mutex);
 287
 288	switch (adev->asic_type) {
 289	case CHIP_TOPAZ:
 290		amdgpu_program_register_sequence(adev,
 291						 iceland_mgcg_cgcg_init,
 292						 (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
 293		break;
 294	case CHIP_FIJI:
 295		amdgpu_program_register_sequence(adev,
 296						 fiji_mgcg_cgcg_init,
 297						 (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init));
 298		break;
 299	case CHIP_TONGA:
 300		amdgpu_program_register_sequence(adev,
 301						 tonga_mgcg_cgcg_init,
 302						 (const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init));
 303		break;
 304	case CHIP_CARRIZO:
 305		amdgpu_program_register_sequence(adev,
 306						 cz_mgcg_cgcg_init,
 307						 (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init));
 308		break;
 309	case CHIP_STONEY:
 310		amdgpu_program_register_sequence(adev,
 311						 stoney_mgcg_cgcg_init,
 312						 (const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init));
 313		break;
 314	case CHIP_POLARIS11:
 315	case CHIP_POLARIS10:
 316	case CHIP_POLARIS12:
 317	default:
 318		break;
 319	}
 320	mutex_unlock(&adev->grbm_idx_mutex);
 321}
 322
 323/**
 324 * vi_get_xclk - get the xclk
 325 *
 326 * @adev: amdgpu_device pointer
 327 *
 328 * Returns the reference clock used by the gfx engine
 329 * (VI).
 330 */
 331static u32 vi_get_xclk(struct amdgpu_device *adev)
 332{
 333	u32 reference_clock = adev->clock.spll.reference_freq;
 334	u32 tmp;
 335
 336	if (adev->flags & AMD_IS_APU)
 337		return reference_clock;
 338
 339	tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2);
 340	if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK))
 341		return 1000;
 342
 343	tmp = RREG32_SMC(ixCG_CLKPIN_CNTL);
 344	if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL, XTALIN_DIVIDE))
 345		return reference_clock / 4;
 346
 347	return reference_clock;
 348}
 349
 350/**
 351 * vi_srbm_select - select specific register instances
 352 *
 353 * @adev: amdgpu_device pointer
 354 * @me: selected ME (micro engine)
 355 * @pipe: pipe
 356 * @queue: queue
 357 * @vmid: VMID
 358 *
 359 * Switches the currently active registers instances.  Some
 360 * registers are instanced per VMID, others are instanced per
 361 * me/pipe/queue combination.
 362 */
 363void vi_srbm_select(struct amdgpu_device *adev,
 364		     u32 me, u32 pipe, u32 queue, u32 vmid)
 365{
 366	u32 srbm_gfx_cntl = 0;
 367	srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, PIPEID, pipe);
 368	srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, MEID, me);
 369	srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, vmid);
 370	srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, QUEUEID, queue);
 371	WREG32(mmSRBM_GFX_CNTL, srbm_gfx_cntl);
 372}
 373
 374static void vi_vga_set_state(struct amdgpu_device *adev, bool state)
 375{
 376	/* todo */
 377}
 378
 379static bool vi_read_disabled_bios(struct amdgpu_device *adev)
 380{
 381	u32 bus_cntl;
 382	u32 d1vga_control = 0;
 383	u32 d2vga_control = 0;
 384	u32 vga_render_control = 0;
 385	u32 rom_cntl;
 386	bool r;
 387
 388	bus_cntl = RREG32(mmBUS_CNTL);
 389	if (adev->mode_info.num_crtc) {
 390		d1vga_control = RREG32(mmD1VGA_CONTROL);
 391		d2vga_control = RREG32(mmD2VGA_CONTROL);
 392		vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
 393	}
 394	rom_cntl = RREG32_SMC(ixROM_CNTL);
 395
 396	/* enable the rom */
 397	WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK));
 398	if (adev->mode_info.num_crtc) {
 399		/* Disable VGA mode */
 400		WREG32(mmD1VGA_CONTROL,
 401		       (d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK |
 402					  D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK)));
 403		WREG32(mmD2VGA_CONTROL,
 404		       (d2vga_control & ~(D2VGA_CONTROL__D2VGA_MODE_ENABLE_MASK |
 405					  D2VGA_CONTROL__D2VGA_TIMING_SELECT_MASK)));
 406		WREG32(mmVGA_RENDER_CONTROL,
 407		       (vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK));
 408	}
 409	WREG32_SMC(ixROM_CNTL, rom_cntl | ROM_CNTL__SCK_OVERWRITE_MASK);
 410
 411	r = amdgpu_read_bios(adev);
 412
 413	/* restore regs */
 414	WREG32(mmBUS_CNTL, bus_cntl);
 415	if (adev->mode_info.num_crtc) {
 416		WREG32(mmD1VGA_CONTROL, d1vga_control);
 417		WREG32(mmD2VGA_CONTROL, d2vga_control);
 418		WREG32(mmVGA_RENDER_CONTROL, vga_render_control);
 419	}
 420	WREG32_SMC(ixROM_CNTL, rom_cntl);
 421	return r;
 422}
 423
 424static bool vi_read_bios_from_rom(struct amdgpu_device *adev,
 425				  u8 *bios, u32 length_bytes)
 426{
 427	u32 *dw_ptr;
 428	unsigned long flags;
 429	u32 i, length_dw;
 430
 431	if (bios == NULL)
 432		return false;
 433	if (length_bytes == 0)
 434		return false;
 435	/* APU vbios image is part of sbios image */
 436	if (adev->flags & AMD_IS_APU)
 437		return false;
 438
 439	dw_ptr = (u32 *)bios;
 440	length_dw = ALIGN(length_bytes, 4) / 4;
 441	/* take the smc lock since we are using the smc index */
 442	spin_lock_irqsave(&adev->smc_idx_lock, flags);
 443	/* set rom index to 0 */
 444	WREG32(mmSMC_IND_INDEX_11, ixROM_INDEX);
 445	WREG32(mmSMC_IND_DATA_11, 0);
 446	/* set index to data for continous read */
 447	WREG32(mmSMC_IND_INDEX_11, ixROM_DATA);
 448	for (i = 0; i < length_dw; i++)
 449		dw_ptr[i] = RREG32(mmSMC_IND_DATA_11);
 450	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
 451
 452	return true;
 453}
 454
 455static void vi_detect_hw_virtualization(struct amdgpu_device *adev)
 456{
 457	uint32_t reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
 458	/* bit0: 0 means pf and 1 means vf */
 459	/* bit31: 0 means disable IOV and 1 means enable */
 460	if (reg & 1)
 461		adev->virtualization.virtual_caps |= AMDGPU_SRIOV_CAPS_IS_VF;
 462
 463	if (reg & 0x80000000)
 464		adev->virtualization.virtual_caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
 465
 466	if (reg == 0) {
 467		if (is_virtual_machine()) /* passthrough mode exclus sr-iov mode */
 468			adev->virtualization.virtual_caps |= AMDGPU_PASSTHROUGH_MODE;
 469	}
 470}
 471
 472static const struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = {
 473	{mmGB_MACROTILE_MODE7, true},
 474};
 475
 476static const struct amdgpu_allowed_register_entry cz_allowed_read_registers[] = {
 477	{mmGB_TILE_MODE7, true},
 478	{mmGB_TILE_MODE12, true},
 479	{mmGB_TILE_MODE17, true},
 480	{mmGB_TILE_MODE23, true},
 481	{mmGB_MACROTILE_MODE7, true},
 482};
 483
 484static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = {
 485	{mmGRBM_STATUS, false},
 486	{mmGRBM_STATUS2, false},
 487	{mmGRBM_STATUS_SE0, false},
 488	{mmGRBM_STATUS_SE1, false},
 489	{mmGRBM_STATUS_SE2, false},
 490	{mmGRBM_STATUS_SE3, false},
 491	{mmSRBM_STATUS, false},
 492	{mmSRBM_STATUS2, false},
 493	{mmSRBM_STATUS3, false},
 494	{mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET, false},
 495	{mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET, false},
 496	{mmCP_STAT, false},
 497	{mmCP_STALLED_STAT1, false},
 498	{mmCP_STALLED_STAT2, false},
 499	{mmCP_STALLED_STAT3, false},
 500	{mmCP_CPF_BUSY_STAT, false},
 501	{mmCP_CPF_STALLED_STAT1, false},
 502	{mmCP_CPF_STATUS, false},
 503	{mmCP_CPC_BUSY_STAT, false},
 504	{mmCP_CPC_STALLED_STAT1, false},
 505	{mmCP_CPC_STATUS, false},
 506	{mmGB_ADDR_CONFIG, false},
 507	{mmMC_ARB_RAMCFG, false},
 508	{mmGB_TILE_MODE0, false},
 509	{mmGB_TILE_MODE1, false},
 510	{mmGB_TILE_MODE2, false},
 511	{mmGB_TILE_MODE3, false},
 512	{mmGB_TILE_MODE4, false},
 513	{mmGB_TILE_MODE5, false},
 514	{mmGB_TILE_MODE6, false},
 515	{mmGB_TILE_MODE7, false},
 516	{mmGB_TILE_MODE8, false},
 517	{mmGB_TILE_MODE9, false},
 518	{mmGB_TILE_MODE10, false},
 519	{mmGB_TILE_MODE11, false},
 520	{mmGB_TILE_MODE12, false},
 521	{mmGB_TILE_MODE13, false},
 522	{mmGB_TILE_MODE14, false},
 523	{mmGB_TILE_MODE15, false},
 524	{mmGB_TILE_MODE16, false},
 525	{mmGB_TILE_MODE17, false},
 526	{mmGB_TILE_MODE18, false},
 527	{mmGB_TILE_MODE19, false},
 528	{mmGB_TILE_MODE20, false},
 529	{mmGB_TILE_MODE21, false},
 530	{mmGB_TILE_MODE22, false},
 531	{mmGB_TILE_MODE23, false},
 532	{mmGB_TILE_MODE24, false},
 533	{mmGB_TILE_MODE25, false},
 534	{mmGB_TILE_MODE26, false},
 535	{mmGB_TILE_MODE27, false},
 536	{mmGB_TILE_MODE28, false},
 537	{mmGB_TILE_MODE29, false},
 538	{mmGB_TILE_MODE30, false},
 539	{mmGB_TILE_MODE31, false},
 540	{mmGB_MACROTILE_MODE0, false},
 541	{mmGB_MACROTILE_MODE1, false},
 542	{mmGB_MACROTILE_MODE2, false},
 543	{mmGB_MACROTILE_MODE3, false},
 544	{mmGB_MACROTILE_MODE4, false},
 545	{mmGB_MACROTILE_MODE5, false},
 546	{mmGB_MACROTILE_MODE6, false},
 547	{mmGB_MACROTILE_MODE7, false},
 548	{mmGB_MACROTILE_MODE8, false},
 549	{mmGB_MACROTILE_MODE9, false},
 550	{mmGB_MACROTILE_MODE10, false},
 551	{mmGB_MACROTILE_MODE11, false},
 552	{mmGB_MACROTILE_MODE12, false},
 553	{mmGB_MACROTILE_MODE13, false},
 554	{mmGB_MACROTILE_MODE14, false},
 555	{mmGB_MACROTILE_MODE15, false},
 556	{mmCC_RB_BACKEND_DISABLE, false, true},
 557	{mmGC_USER_RB_BACKEND_DISABLE, false, true},
 558	{mmGB_BACKEND_MAP, false, false},
 559	{mmPA_SC_RASTER_CONFIG, false, true},
 560	{mmPA_SC_RASTER_CONFIG_1, false, true},
 561};
 562
 563static uint32_t vi_get_register_value(struct amdgpu_device *adev,
 564				      bool indexed, u32 se_num,
 565				      u32 sh_num, u32 reg_offset)
 566{
 567	if (indexed) {
 568		uint32_t val;
 569		unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num;
 570		unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num;
 571
 572		switch (reg_offset) {
 573		case mmCC_RB_BACKEND_DISABLE:
 574			return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable;
 575		case mmGC_USER_RB_BACKEND_DISABLE:
 576			return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable;
 577		case mmPA_SC_RASTER_CONFIG:
 578			return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config;
 579		case mmPA_SC_RASTER_CONFIG_1:
 580			return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config_1;
 581		}
 582
 583		mutex_lock(&adev->grbm_idx_mutex);
 584		if (se_num != 0xffffffff || sh_num != 0xffffffff)
 585			amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
 586
 587		val = RREG32(reg_offset);
 588
 589		if (se_num != 0xffffffff || sh_num != 0xffffffff)
 590			amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
 591		mutex_unlock(&adev->grbm_idx_mutex);
 592		return val;
 593	} else {
 594		unsigned idx;
 595
 596		switch (reg_offset) {
 597		case mmGB_ADDR_CONFIG:
 598			return adev->gfx.config.gb_addr_config;
 599		case mmMC_ARB_RAMCFG:
 600			return adev->gfx.config.mc_arb_ramcfg;
 601		case mmGB_TILE_MODE0:
 602		case mmGB_TILE_MODE1:
 603		case mmGB_TILE_MODE2:
 604		case mmGB_TILE_MODE3:
 605		case mmGB_TILE_MODE4:
 606		case mmGB_TILE_MODE5:
 607		case mmGB_TILE_MODE6:
 608		case mmGB_TILE_MODE7:
 609		case mmGB_TILE_MODE8:
 610		case mmGB_TILE_MODE9:
 611		case mmGB_TILE_MODE10:
 612		case mmGB_TILE_MODE11:
 613		case mmGB_TILE_MODE12:
 614		case mmGB_TILE_MODE13:
 615		case mmGB_TILE_MODE14:
 616		case mmGB_TILE_MODE15:
 617		case mmGB_TILE_MODE16:
 618		case mmGB_TILE_MODE17:
 619		case mmGB_TILE_MODE18:
 620		case mmGB_TILE_MODE19:
 621		case mmGB_TILE_MODE20:
 622		case mmGB_TILE_MODE21:
 623		case mmGB_TILE_MODE22:
 624		case mmGB_TILE_MODE23:
 625		case mmGB_TILE_MODE24:
 626		case mmGB_TILE_MODE25:
 627		case mmGB_TILE_MODE26:
 628		case mmGB_TILE_MODE27:
 629		case mmGB_TILE_MODE28:
 630		case mmGB_TILE_MODE29:
 631		case mmGB_TILE_MODE30:
 632		case mmGB_TILE_MODE31:
 633			idx = (reg_offset - mmGB_TILE_MODE0);
 634			return adev->gfx.config.tile_mode_array[idx];
 635		case mmGB_MACROTILE_MODE0:
 636		case mmGB_MACROTILE_MODE1:
 637		case mmGB_MACROTILE_MODE2:
 638		case mmGB_MACROTILE_MODE3:
 639		case mmGB_MACROTILE_MODE4:
 640		case mmGB_MACROTILE_MODE5:
 641		case mmGB_MACROTILE_MODE6:
 642		case mmGB_MACROTILE_MODE7:
 643		case mmGB_MACROTILE_MODE8:
 644		case mmGB_MACROTILE_MODE9:
 645		case mmGB_MACROTILE_MODE10:
 646		case mmGB_MACROTILE_MODE11:
 647		case mmGB_MACROTILE_MODE12:
 648		case mmGB_MACROTILE_MODE13:
 649		case mmGB_MACROTILE_MODE14:
 650		case mmGB_MACROTILE_MODE15:
 651			idx = (reg_offset - mmGB_MACROTILE_MODE0);
 652			return adev->gfx.config.macrotile_mode_array[idx];
 653		default:
 654			return RREG32(reg_offset);
 655		}
 656	}
 657}
 658
 659static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
 660			    u32 sh_num, u32 reg_offset, u32 *value)
 661{
 662	const struct amdgpu_allowed_register_entry *asic_register_table = NULL;
 663	const struct amdgpu_allowed_register_entry *asic_register_entry;
 664	uint32_t size, i;
 665
 666	*value = 0;
 667	switch (adev->asic_type) {
 668	case CHIP_TOPAZ:
 669		asic_register_table = tonga_allowed_read_registers;
 670		size = ARRAY_SIZE(tonga_allowed_read_registers);
 671		break;
 672	case CHIP_FIJI:
 673	case CHIP_TONGA:
 674	case CHIP_POLARIS11:
 675	case CHIP_POLARIS10:
 676	case CHIP_POLARIS12:
 677	case CHIP_CARRIZO:
 678	case CHIP_STONEY:
 679		asic_register_table = cz_allowed_read_registers;
 680		size = ARRAY_SIZE(cz_allowed_read_registers);
 681		break;
 682	default:
 683		return -EINVAL;
 684	}
 685
 686	if (asic_register_table) {
 687		for (i = 0; i < size; i++) {
 688			asic_register_entry = asic_register_table + i;
 689			if (reg_offset != asic_register_entry->reg_offset)
 690				continue;
 691			if (!asic_register_entry->untouched)
 692				*value = vi_get_register_value(adev,
 693							       asic_register_entry->grbm_indexed,
 694							       se_num, sh_num, reg_offset);
 695			return 0;
 696		}
 697	}
 698
 699	for (i = 0; i < ARRAY_SIZE(vi_allowed_read_registers); i++) {
 700		if (reg_offset != vi_allowed_read_registers[i].reg_offset)
 701			continue;
 702
 703		if (!vi_allowed_read_registers[i].untouched)
 704			*value = vi_get_register_value(adev,
 705						       vi_allowed_read_registers[i].grbm_indexed,
 706						       se_num, sh_num, reg_offset);
 707		return 0;
 708	}
 709	return -EINVAL;
 710}
 711
 712static int vi_gpu_pci_config_reset(struct amdgpu_device *adev)
 713{
 714	u32 i;
 715
 716	dev_info(adev->dev, "GPU pci config reset\n");
 717
 718	/* disable BM */
 719	pci_clear_master(adev->pdev);
 720	/* reset */
 721	amdgpu_pci_config_reset(adev);
 722
 723	udelay(100);
 724
 725	/* wait for asic to come out of reset */
 726	for (i = 0; i < adev->usec_timeout; i++) {
 727		if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) {
 728			/* enable BM */
 729			pci_set_master(adev->pdev);
 730			return 0;
 731		}
 732		udelay(1);
 733	}
 734	return -EINVAL;
 735}
 736
 737/**
 738 * vi_asic_reset - soft reset GPU
 739 *
 740 * @adev: amdgpu_device pointer
 741 *
 742 * Look up which blocks are hung and attempt
 743 * to reset them.
 744 * Returns 0 for success.
 745 */
 746static int vi_asic_reset(struct amdgpu_device *adev)
 747{
 748	int r;
 749
 750	amdgpu_atombios_scratch_regs_engine_hung(adev, true);
 751
 752	r = vi_gpu_pci_config_reset(adev);
 753
 754	amdgpu_atombios_scratch_regs_engine_hung(adev, false);
 755
 756	return r;
 757}
 758
 759static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
 760			u32 cntl_reg, u32 status_reg)
 761{
 762	int r, i;
 763	struct atom_clock_dividers dividers;
 764	uint32_t tmp;
 765
 766	r = amdgpu_atombios_get_clock_dividers(adev,
 767					       COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
 768					       clock, false, &dividers);
 769	if (r)
 770		return r;
 771
 772	tmp = RREG32_SMC(cntl_reg);
 773	tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK |
 774		CG_DCLK_CNTL__DCLK_DIVIDER_MASK);
 775	tmp |= dividers.post_divider;
 776	WREG32_SMC(cntl_reg, tmp);
 777
 778	for (i = 0; i < 100; i++) {
 779		if (RREG32_SMC(status_reg) & CG_DCLK_STATUS__DCLK_STATUS_MASK)
 780			break;
 781		mdelay(10);
 782	}
 783	if (i == 100)
 784		return -ETIMEDOUT;
 785
 786	return 0;
 787}
 788
 789static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
 790{
 791	int r;
 792
 793	r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
 794	if (r)
 795		return r;
 796
 797	r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
 798
 799	return 0;
 800}
 801
 802static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
 803{
 804	/* todo */
 805
 806	return 0;
 807}
 808
 809static void vi_pcie_gen3_enable(struct amdgpu_device *adev)
 810{
 811	if (pci_is_root_bus(adev->pdev->bus))
 812		return;
 813
 814	if (amdgpu_pcie_gen2 == 0)
 815		return;
 816
 817	if (adev->flags & AMD_IS_APU)
 818		return;
 819
 820	if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
 821					CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
 822		return;
 823
 824	/* todo */
 825}
 826
 827static void vi_program_aspm(struct amdgpu_device *adev)
 828{
 829
 830	if (amdgpu_aspm == 0)
 831		return;
 832
 833	/* todo */
 834}
 835
 836static void vi_enable_doorbell_aperture(struct amdgpu_device *adev,
 837					bool enable)
 838{
 839	u32 tmp;
 840
 841	/* not necessary on CZ */
 842	if (adev->flags & AMD_IS_APU)
 843		return;
 844
 845	tmp = RREG32(mmBIF_DOORBELL_APER_EN);
 846	if (enable)
 847		tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 1);
 848	else
 849		tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 0);
 850
 851	WREG32(mmBIF_DOORBELL_APER_EN, tmp);
 852}
 853
 854#define ATI_REV_ID_FUSE_MACRO__ADDRESS      0xC0014044
 855#define ATI_REV_ID_FUSE_MACRO__SHIFT        9
 856#define ATI_REV_ID_FUSE_MACRO__MASK         0x00001E00
 857
 858static uint32_t vi_get_rev_id(struct amdgpu_device *adev)
 859{
 860	if (adev->flags & AMD_IS_APU)
 861		return (RREG32_SMC(ATI_REV_ID_FUSE_MACRO__ADDRESS) & ATI_REV_ID_FUSE_MACRO__MASK)
 862			>> ATI_REV_ID_FUSE_MACRO__SHIFT;
 863	else
 864		return (RREG32(mmPCIE_EFUSE4) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK)
 865			>> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT;
 866}
 867
 868static const struct amdgpu_asic_funcs vi_asic_funcs =
 869{
 870	.read_disabled_bios = &vi_read_disabled_bios,
 871	.read_bios_from_rom = &vi_read_bios_from_rom,
 872	.detect_hw_virtualization = vi_detect_hw_virtualization,
 873	.read_register = &vi_read_register,
 874	.reset = &vi_asic_reset,
 875	.set_vga_state = &vi_vga_set_state,
 876	.get_xclk = &vi_get_xclk,
 877	.set_uvd_clocks = &vi_set_uvd_clocks,
 878	.set_vce_clocks = &vi_set_vce_clocks,
 879};
 880
 881static int vi_common_early_init(void *handle)
 882{
 883	bool smc_enabled = false;
 884	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 885
 886	if (adev->flags & AMD_IS_APU) {
 887		adev->smc_rreg = &cz_smc_rreg;
 888		adev->smc_wreg = &cz_smc_wreg;
 889	} else {
 890		adev->smc_rreg = &vi_smc_rreg;
 891		adev->smc_wreg = &vi_smc_wreg;
 892	}
 893	adev->pcie_rreg = &vi_pcie_rreg;
 894	adev->pcie_wreg = &vi_pcie_wreg;
 895	adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg;
 896	adev->uvd_ctx_wreg = &vi_uvd_ctx_wreg;
 897	adev->didt_rreg = &vi_didt_rreg;
 898	adev->didt_wreg = &vi_didt_wreg;
 899	adev->gc_cac_rreg = &vi_gc_cac_rreg;
 900	adev->gc_cac_wreg = &vi_gc_cac_wreg;
 901
 902	adev->asic_funcs = &vi_asic_funcs;
 903
 904	if (amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_SMC) &&
 905		(amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_SMC)))
 906		smc_enabled = true;
 907
 908	adev->rev_id = vi_get_rev_id(adev);
 909	adev->external_rev_id = 0xFF;
 910	switch (adev->asic_type) {
 911	case CHIP_TOPAZ:
 912		adev->cg_flags = 0;
 913		adev->pg_flags = 0;
 914		adev->external_rev_id = 0x1;
 915		break;
 916	case CHIP_FIJI:
 917		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
 918			AMD_CG_SUPPORT_GFX_MGLS |
 919			AMD_CG_SUPPORT_GFX_RLC_LS |
 920			AMD_CG_SUPPORT_GFX_CP_LS |
 921			AMD_CG_SUPPORT_GFX_CGTS |
 922			AMD_CG_SUPPORT_GFX_CGTS_LS |
 923			AMD_CG_SUPPORT_GFX_CGCG |
 924			AMD_CG_SUPPORT_GFX_CGLS |
 925			AMD_CG_SUPPORT_SDMA_MGCG |
 926			AMD_CG_SUPPORT_SDMA_LS |
 927			AMD_CG_SUPPORT_BIF_LS |
 928			AMD_CG_SUPPORT_HDP_MGCG |
 929			AMD_CG_SUPPORT_HDP_LS |
 930			AMD_CG_SUPPORT_ROM_MGCG |
 931			AMD_CG_SUPPORT_MC_MGCG |
 932			AMD_CG_SUPPORT_MC_LS |
 933			AMD_CG_SUPPORT_UVD_MGCG;
 934		adev->pg_flags = 0;
 935		adev->external_rev_id = adev->rev_id + 0x3c;
 936		break;
 937	case CHIP_TONGA:
 938		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
 939			AMD_CG_SUPPORT_GFX_CGCG |
 940			AMD_CG_SUPPORT_GFX_CGLS |
 941			AMD_CG_SUPPORT_SDMA_MGCG |
 942			AMD_CG_SUPPORT_SDMA_LS |
 943			AMD_CG_SUPPORT_BIF_LS |
 944			AMD_CG_SUPPORT_HDP_MGCG |
 945			AMD_CG_SUPPORT_HDP_LS |
 946			AMD_CG_SUPPORT_ROM_MGCG |
 947			AMD_CG_SUPPORT_MC_MGCG |
 948			AMD_CG_SUPPORT_MC_LS |
 949			AMD_CG_SUPPORT_DRM_LS |
 950			AMD_CG_SUPPORT_UVD_MGCG;
 951		adev->pg_flags = 0;
 952		adev->external_rev_id = adev->rev_id + 0x14;
 953		break;
 954	case CHIP_POLARIS11:
 955		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
 956			AMD_CG_SUPPORT_GFX_RLC_LS |
 957			AMD_CG_SUPPORT_GFX_CP_LS |
 958			AMD_CG_SUPPORT_GFX_CGCG |
 959			AMD_CG_SUPPORT_GFX_CGLS |
 960			AMD_CG_SUPPORT_GFX_3D_CGCG |
 961			AMD_CG_SUPPORT_GFX_3D_CGLS |
 962			AMD_CG_SUPPORT_SDMA_MGCG |
 963			AMD_CG_SUPPORT_SDMA_LS |
 964			AMD_CG_SUPPORT_BIF_MGCG |
 965			AMD_CG_SUPPORT_BIF_LS |
 966			AMD_CG_SUPPORT_HDP_MGCG |
 967			AMD_CG_SUPPORT_HDP_LS |
 968			AMD_CG_SUPPORT_ROM_MGCG |
 969			AMD_CG_SUPPORT_MC_MGCG |
 970			AMD_CG_SUPPORT_MC_LS |
 971			AMD_CG_SUPPORT_DRM_LS |
 972			AMD_CG_SUPPORT_UVD_MGCG |
 973			AMD_CG_SUPPORT_VCE_MGCG;
 974		adev->pg_flags = 0;
 975		adev->external_rev_id = adev->rev_id + 0x5A;
 976		break;
 977	case CHIP_POLARIS10:
 978		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
 979			AMD_CG_SUPPORT_GFX_RLC_LS |
 980			AMD_CG_SUPPORT_GFX_CP_LS |
 981			AMD_CG_SUPPORT_GFX_CGCG |
 982			AMD_CG_SUPPORT_GFX_CGLS |
 983			AMD_CG_SUPPORT_GFX_3D_CGCG |
 984			AMD_CG_SUPPORT_GFX_3D_CGLS |
 985			AMD_CG_SUPPORT_SDMA_MGCG |
 986			AMD_CG_SUPPORT_SDMA_LS |
 987			AMD_CG_SUPPORT_BIF_MGCG |
 988			AMD_CG_SUPPORT_BIF_LS |
 989			AMD_CG_SUPPORT_HDP_MGCG |
 990			AMD_CG_SUPPORT_HDP_LS |
 991			AMD_CG_SUPPORT_ROM_MGCG |
 992			AMD_CG_SUPPORT_MC_MGCG |
 993			AMD_CG_SUPPORT_MC_LS |
 994			AMD_CG_SUPPORT_DRM_LS |
 995			AMD_CG_SUPPORT_UVD_MGCG |
 996			AMD_CG_SUPPORT_VCE_MGCG;
 997		adev->pg_flags = 0;
 998		adev->external_rev_id = adev->rev_id + 0x50;
 999		break;
1000	case CHIP_POLARIS12:
1001		adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG;
1002		adev->pg_flags = 0;
1003		adev->external_rev_id = adev->rev_id + 0x64;
1004		break;
1005	case CHIP_CARRIZO:
1006		adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
1007			AMD_CG_SUPPORT_GFX_MGCG |
1008			AMD_CG_SUPPORT_GFX_MGLS |
1009			AMD_CG_SUPPORT_GFX_RLC_LS |
1010			AMD_CG_SUPPORT_GFX_CP_LS |
1011			AMD_CG_SUPPORT_GFX_CGTS |
1012			AMD_CG_SUPPORT_GFX_MGLS |
1013			AMD_CG_SUPPORT_GFX_CGTS_LS |
1014			AMD_CG_SUPPORT_GFX_CGCG |
1015			AMD_CG_SUPPORT_GFX_CGLS |
1016			AMD_CG_SUPPORT_BIF_LS |
1017			AMD_CG_SUPPORT_HDP_MGCG |
1018			AMD_CG_SUPPORT_HDP_LS |
1019			AMD_CG_SUPPORT_SDMA_MGCG |
1020			AMD_CG_SUPPORT_SDMA_LS |
1021			AMD_CG_SUPPORT_VCE_MGCG;
1022		/* rev0 hardware requires workarounds to support PG */
1023		adev->pg_flags = 0;
1024		if (adev->rev_id != 0x00) {
1025			adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
1026				AMD_PG_SUPPORT_GFX_SMG |
1027				AMD_PG_SUPPORT_GFX_PIPELINE |
1028				AMD_PG_SUPPORT_CP |
1029				AMD_PG_SUPPORT_UVD |
1030				AMD_PG_SUPPORT_VCE;
1031		}
1032		adev->external_rev_id = adev->rev_id + 0x1;
1033		break;
1034	case CHIP_STONEY:
1035		adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
1036			AMD_CG_SUPPORT_GFX_MGCG |
1037			AMD_CG_SUPPORT_GFX_MGLS |
1038			AMD_CG_SUPPORT_GFX_RLC_LS |
1039			AMD_CG_SUPPORT_GFX_CP_LS |
1040			AMD_CG_SUPPORT_GFX_CGTS |
1041			AMD_CG_SUPPORT_GFX_MGLS |
1042			AMD_CG_SUPPORT_GFX_CGTS_LS |
1043			AMD_CG_SUPPORT_GFX_CGCG |
1044			AMD_CG_SUPPORT_GFX_CGLS |
1045			AMD_CG_SUPPORT_BIF_LS |
1046			AMD_CG_SUPPORT_HDP_MGCG |
1047			AMD_CG_SUPPORT_HDP_LS |
1048			AMD_CG_SUPPORT_SDMA_MGCG |
1049			AMD_CG_SUPPORT_SDMA_LS |
1050			AMD_CG_SUPPORT_VCE_MGCG;
1051		adev->pg_flags = AMD_PG_SUPPORT_GFX_PG |
1052			AMD_PG_SUPPORT_GFX_SMG |
1053			AMD_PG_SUPPORT_GFX_PIPELINE |
1054			AMD_PG_SUPPORT_CP |
1055			AMD_PG_SUPPORT_UVD |
1056			AMD_PG_SUPPORT_VCE;
1057		adev->external_rev_id = adev->rev_id + 0x61;
1058		break;
1059	default:
1060		/* FIXME: not supported yet */
1061		return -EINVAL;
1062	}
1063
1064	/* in early init stage, vbios code won't work */
1065	if (adev->asic_funcs->detect_hw_virtualization)
1066		amdgpu_asic_detect_hw_virtualization(adev);
1067
1068	if (amdgpu_smc_load_fw && smc_enabled)
1069		adev->firmware.smu_load = true;
1070
1071	amdgpu_get_pcie_info(adev);
1072
1073	return 0;
1074}
1075
1076static int vi_common_sw_init(void *handle)
1077{
1078	return 0;
1079}
1080
1081static int vi_common_sw_fini(void *handle)
1082{
1083	return 0;
1084}
1085
1086static int vi_common_hw_init(void *handle)
1087{
1088	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1089
1090	/* move the golden regs per IP block */
1091	vi_init_golden_registers(adev);
1092	/* enable pcie gen2/3 link */
1093	vi_pcie_gen3_enable(adev);
1094	/* enable aspm */
1095	vi_program_aspm(adev);
1096	/* enable the doorbell aperture */
1097	vi_enable_doorbell_aperture(adev, true);
1098
1099	return 0;
1100}
1101
1102static int vi_common_hw_fini(void *handle)
1103{
1104	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1105
1106	/* enable the doorbell aperture */
1107	vi_enable_doorbell_aperture(adev, false);
1108
1109	return 0;
1110}
1111
1112static int vi_common_suspend(void *handle)
1113{
1114	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1115
1116	return vi_common_hw_fini(adev);
1117}
1118
1119static int vi_common_resume(void *handle)
1120{
1121	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1122
1123	return vi_common_hw_init(adev);
1124}
1125
1126static bool vi_common_is_idle(void *handle)
1127{
1128	return true;
1129}
1130
1131static int vi_common_wait_for_idle(void *handle)
1132{
1133	return 0;
1134}
1135
1136static int vi_common_soft_reset(void *handle)
1137{
1138	return 0;
1139}
1140
1141static void vi_update_bif_medium_grain_light_sleep(struct amdgpu_device *adev,
1142						   bool enable)
1143{
1144	uint32_t temp, data;
1145
1146	temp = data = RREG32_PCIE(ixPCIE_CNTL2);
1147
1148	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS))
1149		data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
1150				PCIE_CNTL2__MST_MEM_LS_EN_MASK |
1151				PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK;
1152	else
1153		data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
1154				PCIE_CNTL2__MST_MEM_LS_EN_MASK |
1155				PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
1156
1157	if (temp != data)
1158		WREG32_PCIE(ixPCIE_CNTL2, data);
1159}
1160
1161static void vi_update_hdp_medium_grain_clock_gating(struct amdgpu_device *adev,
1162						    bool enable)
1163{
1164	uint32_t temp, data;
1165
1166	temp = data = RREG32(mmHDP_HOST_PATH_CNTL);
1167
1168	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
1169		data &= ~HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK;
1170	else
1171		data |= HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK;
1172
1173	if (temp != data)
1174		WREG32(mmHDP_HOST_PATH_CNTL, data);
1175}
1176
1177static void vi_update_hdp_light_sleep(struct amdgpu_device *adev,
1178				      bool enable)
1179{
1180	uint32_t temp, data;
1181
1182	temp = data = RREG32(mmHDP_MEM_POWER_LS);
1183
1184	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
1185		data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1186	else
1187		data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1188
1189	if (temp != data)
1190		WREG32(mmHDP_MEM_POWER_LS, data);
1191}
1192
1193static void vi_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,
1194						    bool enable)
1195{
1196	uint32_t temp, data;
1197
1198	temp = data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0);
1199
1200	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG))
1201		data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1202				CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK);
1203	else
1204		data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1205				CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK;
1206
1207	if (temp != data)
1208		WREG32_SMC(ixCGTT_ROM_CLK_CTRL0, data);
1209}
1210
1211static int vi_common_set_clockgating_state_by_smu(void *handle,
1212					   enum amd_clockgating_state state)
1213{
1214	uint32_t msg_id, pp_state = 0;
1215	uint32_t pp_support_state = 0;
1216	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1217	void *pp_handle = adev->powerplay.pp_handle;
1218
1219	if (adev->cg_flags & (AMD_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_MC_MGCG)) {
1220		if (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) {
1221			pp_support_state = AMD_CG_SUPPORT_MC_LS;
1222			pp_state = PP_STATE_LS;
1223		}
1224		if (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG) {
1225			pp_support_state |= AMD_CG_SUPPORT_MC_MGCG;
1226			pp_state |= PP_STATE_CG;
1227		}
1228		if (state == AMD_CG_STATE_UNGATE)
1229			pp_state = 0;
1230		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1231			       PP_BLOCK_SYS_MC,
1232			       pp_support_state,
1233			       pp_state);
1234		amd_set_clockgating_by_smu(pp_handle, msg_id);
1235	}
1236
1237	if (adev->cg_flags & (AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_SDMA_MGCG)) {
1238		if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS) {
1239			pp_support_state = AMD_CG_SUPPORT_SDMA_LS;
1240			pp_state = PP_STATE_LS;
1241		}
1242		if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG) {
1243			pp_support_state |= AMD_CG_SUPPORT_SDMA_MGCG;
1244			pp_state |= PP_STATE_CG;
1245		}
1246		if (state == AMD_CG_STATE_UNGATE)
1247			pp_state = 0;
1248		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1249			       PP_BLOCK_SYS_SDMA,
1250			       pp_support_state,
1251			       pp_state);
1252		amd_set_clockgating_by_smu(pp_handle, msg_id);
1253	}
1254
1255	if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_MGCG)) {
1256		if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
1257			pp_support_state = AMD_CG_SUPPORT_HDP_LS;
1258			pp_state = PP_STATE_LS;
1259		}
1260		if (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG) {
1261			pp_support_state |= AMD_CG_SUPPORT_HDP_MGCG;
1262			pp_state |= PP_STATE_CG;
1263		}
1264		if (state == AMD_CG_STATE_UNGATE)
1265			pp_state = 0;
1266		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1267			       PP_BLOCK_SYS_HDP,
1268			       pp_support_state,
1269			       pp_state);
1270		amd_set_clockgating_by_smu(pp_handle, msg_id);
1271	}
1272
1273
1274	if (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS) {
1275		if (state == AMD_CG_STATE_UNGATE)
1276			pp_state = 0;
1277		else
1278			pp_state = PP_STATE_LS;
1279
1280		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1281			       PP_BLOCK_SYS_BIF,
1282			       PP_STATE_SUPPORT_LS,
1283			        pp_state);
1284		amd_set_clockgating_by_smu(pp_handle, msg_id);
1285	}
1286	if (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG) {
1287		if (state == AMD_CG_STATE_UNGATE)
1288			pp_state = 0;
1289		else
1290			pp_state = PP_STATE_CG;
1291
1292		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1293			       PP_BLOCK_SYS_BIF,
1294			       PP_STATE_SUPPORT_CG,
1295			       pp_state);
1296		amd_set_clockgating_by_smu(pp_handle, msg_id);
1297	}
1298
1299	if (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS) {
1300
1301		if (state == AMD_CG_STATE_UNGATE)
1302			pp_state = 0;
1303		else
1304			pp_state = PP_STATE_LS;
1305
1306		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1307			       PP_BLOCK_SYS_DRM,
1308			       PP_STATE_SUPPORT_LS,
1309			       pp_state);
1310		amd_set_clockgating_by_smu(pp_handle, msg_id);
1311	}
1312
1313	if (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG) {
1314
1315		if (state == AMD_CG_STATE_UNGATE)
1316			pp_state = 0;
1317		else
1318			pp_state = PP_STATE_CG;
1319
1320		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1321			       PP_BLOCK_SYS_ROM,
1322			       PP_STATE_SUPPORT_CG,
1323			       pp_state);
1324		amd_set_clockgating_by_smu(pp_handle, msg_id);
1325	}
1326	return 0;
1327}
1328
1329static int vi_common_set_clockgating_state(void *handle,
1330					   enum amd_clockgating_state state)
1331{
1332	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1333
1334	switch (adev->asic_type) {
1335	case CHIP_FIJI:
1336		vi_update_bif_medium_grain_light_sleep(adev,
1337				state == AMD_CG_STATE_GATE ? true : false);
1338		vi_update_hdp_medium_grain_clock_gating(adev,
1339				state == AMD_CG_STATE_GATE ? true : false);
1340		vi_update_hdp_light_sleep(adev,
1341				state == AMD_CG_STATE_GATE ? true : false);
1342		vi_update_rom_medium_grain_clock_gating(adev,
1343				state == AMD_CG_STATE_GATE ? true : false);
1344		break;
1345	case CHIP_CARRIZO:
1346	case CHIP_STONEY:
1347		vi_update_bif_medium_grain_light_sleep(adev,
1348				state == AMD_CG_STATE_GATE ? true : false);
1349		vi_update_hdp_medium_grain_clock_gating(adev,
1350				state == AMD_CG_STATE_GATE ? true : false);
1351		vi_update_hdp_light_sleep(adev,
1352				state == AMD_CG_STATE_GATE ? true : false);
1353		break;
1354	case CHIP_TONGA:
1355	case CHIP_POLARIS10:
1356	case CHIP_POLARIS11:
1357	case CHIP_POLARIS12:
1358		vi_common_set_clockgating_state_by_smu(adev, state);
1359	default:
1360		break;
1361	}
1362	return 0;
1363}
1364
1365static int vi_common_set_powergating_state(void *handle,
1366					    enum amd_powergating_state state)
1367{
1368	return 0;
1369}
1370
1371static const struct amd_ip_funcs vi_common_ip_funcs = {
1372	.name = "vi_common",
1373	.early_init = vi_common_early_init,
1374	.late_init = NULL,
1375	.sw_init = vi_common_sw_init,
1376	.sw_fini = vi_common_sw_fini,
1377	.hw_init = vi_common_hw_init,
1378	.hw_fini = vi_common_hw_fini,
1379	.suspend = vi_common_suspend,
1380	.resume = vi_common_resume,
1381	.is_idle = vi_common_is_idle,
1382	.wait_for_idle = vi_common_wait_for_idle,
1383	.soft_reset = vi_common_soft_reset,
1384	.set_clockgating_state = vi_common_set_clockgating_state,
1385	.set_powergating_state = vi_common_set_powergating_state,
1386};
1387
1388static const struct amdgpu_ip_block_version vi_common_ip_block =
1389{
1390	.type = AMD_IP_BLOCK_TYPE_COMMON,
1391	.major = 1,
1392	.minor = 0,
1393	.rev = 0,
1394	.funcs = &vi_common_ip_funcs,
1395};
1396
1397int vi_set_ip_blocks(struct amdgpu_device *adev)
1398{
1399	switch (adev->asic_type) {
1400	case CHIP_TOPAZ:
1401		/* topaz has no DCE, UVD, VCE */
1402		amdgpu_ip_block_add(adev, &vi_common_ip_block);
1403		amdgpu_ip_block_add(adev, &gmc_v7_4_ip_block);
1404		amdgpu_ip_block_add(adev, &iceland_ih_ip_block);
1405		amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
1406		if (adev->enable_virtual_display)
1407			amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
1408		amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
1409		amdgpu_ip_block_add(adev, &sdma_v2_4_ip_block);
1410		break;
1411	case CHIP_FIJI:
1412		amdgpu_ip_block_add(adev, &vi_common_ip_block);
1413		amdgpu_ip_block_add(adev, &gmc_v8_5_ip_block);
1414		amdgpu_ip_block_add(adev, &tonga_ih_ip_block);
1415		amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
1416		if (adev->enable_virtual_display)
1417			amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
1418		else
1419			amdgpu_ip_block_add(adev, &dce_v10_1_ip_block);
1420		amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
1421		amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block);
1422		amdgpu_ip_block_add(adev, &uvd_v6_0_ip_block);
1423		amdgpu_ip_block_add(adev, &vce_v3_0_ip_block);
1424		break;
1425	case CHIP_TONGA:
1426		amdgpu_ip_block_add(adev, &vi_common_ip_block);
1427		amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block);
1428		amdgpu_ip_block_add(adev, &tonga_ih_ip_block);
1429		amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
1430		if (adev->enable_virtual_display)
1431			amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
1432		else
1433			amdgpu_ip_block_add(adev, &dce_v10_0_ip_block);
1434		amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
1435		amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block);
1436		amdgpu_ip_block_add(adev, &uvd_v5_0_ip_block);
1437		amdgpu_ip_block_add(adev, &vce_v3_0_ip_block);
1438		break;
1439	case CHIP_POLARIS11:
1440	case CHIP_POLARIS10:
1441	case CHIP_POLARIS12:
1442		amdgpu_ip_block_add(adev, &vi_common_ip_block);
1443		amdgpu_ip_block_add(adev, &gmc_v8_1_ip_block);
1444		amdgpu_ip_block_add(adev, &tonga_ih_ip_block);
1445		amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
1446		if (adev->enable_virtual_display)
1447			amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
1448		else
1449			amdgpu_ip_block_add(adev, &dce_v11_2_ip_block);
1450		amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
1451		amdgpu_ip_block_add(adev, &sdma_v3_1_ip_block);
1452		amdgpu_ip_block_add(adev, &uvd_v6_3_ip_block);
1453		amdgpu_ip_block_add(adev, &vce_v3_4_ip_block);
1454		break;
1455	case CHIP_CARRIZO:
1456		amdgpu_ip_block_add(adev, &vi_common_ip_block);
1457		amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block);
1458		amdgpu_ip_block_add(adev, &cz_ih_ip_block);
1459		amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
1460		if (adev->enable_virtual_display)
1461			amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
1462		else
1463			amdgpu_ip_block_add(adev, &dce_v11_0_ip_block);
1464		amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
1465		amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block);
1466		amdgpu_ip_block_add(adev, &uvd_v6_0_ip_block);
1467		amdgpu_ip_block_add(adev, &vce_v3_1_ip_block);
1468#if defined(CONFIG_DRM_AMD_ACP)
1469		amdgpu_ip_block_add(adev, &acp_ip_block);
1470#endif
1471		break;
1472	case CHIP_STONEY:
1473		amdgpu_ip_block_add(adev, &vi_common_ip_block);
1474		amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block);
1475		amdgpu_ip_block_add(adev, &cz_ih_ip_block);
1476		amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
1477		if (adev->enable_virtual_display)
1478			amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
1479		else
1480			amdgpu_ip_block_add(adev, &dce_v11_0_ip_block);
1481		amdgpu_ip_block_add(adev, &gfx_v8_1_ip_block);
1482		amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block);
1483		amdgpu_ip_block_add(adev, &uvd_v6_2_ip_block);
1484		amdgpu_ip_block_add(adev, &vce_v3_4_ip_block);
1485#if defined(CONFIG_DRM_AMD_ACP)
1486		amdgpu_ip_block_add(adev, &acp_ip_block);
1487#endif
1488		break;
1489	default:
1490		/* FIXME: not supported yet */
1491		return -EINVAL;
1492	}
1493
1494	return 0;
1495}