Linux Audio

Check our new training course

Loading...
v5.14.15
   1/*
   2 * Copyright 2014 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23
  24#include <linux/delay.h>
  25#include <linux/kernel.h>
  26#include <linux/firmware.h>
  27#include <linux/module.h>
  28#include <linux/pci.h>
  29
  30#include "amdgpu.h"
  31#include "amdgpu_gfx.h"
  32#include "amdgpu_ring.h"
  33#include "vi.h"
  34#include "vi_structs.h"
  35#include "vid.h"
  36#include "amdgpu_ucode.h"
  37#include "amdgpu_atombios.h"
  38#include "atombios_i2c.h"
  39#include "clearstate_vi.h"
  40
  41#include "gmc/gmc_8_2_d.h"
  42#include "gmc/gmc_8_2_sh_mask.h"
  43
  44#include "oss/oss_3_0_d.h"
  45#include "oss/oss_3_0_sh_mask.h"
  46
  47#include "bif/bif_5_0_d.h"
  48#include "bif/bif_5_0_sh_mask.h"
  49#include "gca/gfx_8_0_d.h"
  50#include "gca/gfx_8_0_enum.h"
  51#include "gca/gfx_8_0_sh_mask.h"
  52
  53#include "dce/dce_10_0_d.h"
  54#include "dce/dce_10_0_sh_mask.h"
  55
  56#include "smu/smu_7_1_3_d.h"
  57
  58#include "ivsrcid/ivsrcid_vislands30.h"
  59
  60#define GFX8_NUM_GFX_RINGS     1
  61#define GFX8_MEC_HPD_SIZE 4096
  62
  63#define TOPAZ_GB_ADDR_CONFIG_GOLDEN 0x22010001
  64#define CARRIZO_GB_ADDR_CONFIG_GOLDEN 0x22010001
  65#define POLARIS11_GB_ADDR_CONFIG_GOLDEN 0x22011002
  66#define TONGA_GB_ADDR_CONFIG_GOLDEN 0x22011003
  67
  68#define ARRAY_MODE(x)					((x) << GB_TILE_MODE0__ARRAY_MODE__SHIFT)
  69#define PIPE_CONFIG(x)					((x) << GB_TILE_MODE0__PIPE_CONFIG__SHIFT)
  70#define TILE_SPLIT(x)					((x) << GB_TILE_MODE0__TILE_SPLIT__SHIFT)
  71#define MICRO_TILE_MODE_NEW(x)				((x) << GB_TILE_MODE0__MICRO_TILE_MODE_NEW__SHIFT)
  72#define SAMPLE_SPLIT(x)					((x) << GB_TILE_MODE0__SAMPLE_SPLIT__SHIFT)
  73#define BANK_WIDTH(x)					((x) << GB_MACROTILE_MODE0__BANK_WIDTH__SHIFT)
  74#define BANK_HEIGHT(x)					((x) << GB_MACROTILE_MODE0__BANK_HEIGHT__SHIFT)
  75#define MACRO_TILE_ASPECT(x)				((x) << GB_MACROTILE_MODE0__MACRO_TILE_ASPECT__SHIFT)
  76#define NUM_BANKS(x)					((x) << GB_MACROTILE_MODE0__NUM_BANKS__SHIFT)
  77
  78#define RLC_CGTT_MGCG_OVERRIDE__CPF_MASK            0x00000001L
  79#define RLC_CGTT_MGCG_OVERRIDE__RLC_MASK            0x00000002L
  80#define RLC_CGTT_MGCG_OVERRIDE__MGCG_MASK           0x00000004L
  81#define RLC_CGTT_MGCG_OVERRIDE__CGCG_MASK           0x00000008L
  82#define RLC_CGTT_MGCG_OVERRIDE__CGLS_MASK           0x00000010L
  83#define RLC_CGTT_MGCG_OVERRIDE__GRBM_MASK           0x00000020L
  84
  85/* BPM SERDES CMD */
  86#define SET_BPM_SERDES_CMD    1
  87#define CLE_BPM_SERDES_CMD    0
  88
  89/* BPM Register Address*/
  90enum {
  91	BPM_REG_CGLS_EN = 0,        /* Enable/Disable CGLS */
  92	BPM_REG_CGLS_ON,            /* ON/OFF CGLS: shall be controlled by RLC FW */
  93	BPM_REG_CGCG_OVERRIDE,      /* Set/Clear CGCG Override */
  94	BPM_REG_MGCG_OVERRIDE,      /* Set/Clear MGCG Override */
  95	BPM_REG_FGCG_OVERRIDE,      /* Set/Clear FGCG Override */
  96	BPM_REG_FGCG_MAX
  97};
  98
  99#define RLC_FormatDirectRegListLength        14
 100
 101MODULE_FIRMWARE("amdgpu/carrizo_ce.bin");
 102MODULE_FIRMWARE("amdgpu/carrizo_pfp.bin");
 103MODULE_FIRMWARE("amdgpu/carrizo_me.bin");
 104MODULE_FIRMWARE("amdgpu/carrizo_mec.bin");
 105MODULE_FIRMWARE("amdgpu/carrizo_mec2.bin");
 106MODULE_FIRMWARE("amdgpu/carrizo_rlc.bin");
 107
 108MODULE_FIRMWARE("amdgpu/stoney_ce.bin");
 109MODULE_FIRMWARE("amdgpu/stoney_pfp.bin");
 110MODULE_FIRMWARE("amdgpu/stoney_me.bin");
 111MODULE_FIRMWARE("amdgpu/stoney_mec.bin");
 112MODULE_FIRMWARE("amdgpu/stoney_rlc.bin");
 113
 114MODULE_FIRMWARE("amdgpu/tonga_ce.bin");
 115MODULE_FIRMWARE("amdgpu/tonga_pfp.bin");
 116MODULE_FIRMWARE("amdgpu/tonga_me.bin");
 117MODULE_FIRMWARE("amdgpu/tonga_mec.bin");
 118MODULE_FIRMWARE("amdgpu/tonga_mec2.bin");
 119MODULE_FIRMWARE("amdgpu/tonga_rlc.bin");
 120
 121MODULE_FIRMWARE("amdgpu/topaz_ce.bin");
 122MODULE_FIRMWARE("amdgpu/topaz_pfp.bin");
 123MODULE_FIRMWARE("amdgpu/topaz_me.bin");
 124MODULE_FIRMWARE("amdgpu/topaz_mec.bin");
 125MODULE_FIRMWARE("amdgpu/topaz_rlc.bin");
 126
 127MODULE_FIRMWARE("amdgpu/fiji_ce.bin");
 128MODULE_FIRMWARE("amdgpu/fiji_pfp.bin");
 129MODULE_FIRMWARE("amdgpu/fiji_me.bin");
 130MODULE_FIRMWARE("amdgpu/fiji_mec.bin");
 131MODULE_FIRMWARE("amdgpu/fiji_mec2.bin");
 132MODULE_FIRMWARE("amdgpu/fiji_rlc.bin");
 133
 134MODULE_FIRMWARE("amdgpu/polaris10_ce.bin");
 135MODULE_FIRMWARE("amdgpu/polaris10_ce_2.bin");
 136MODULE_FIRMWARE("amdgpu/polaris10_pfp.bin");
 137MODULE_FIRMWARE("amdgpu/polaris10_pfp_2.bin");
 138MODULE_FIRMWARE("amdgpu/polaris10_me.bin");
 139MODULE_FIRMWARE("amdgpu/polaris10_me_2.bin");
 140MODULE_FIRMWARE("amdgpu/polaris10_mec.bin");
 141MODULE_FIRMWARE("amdgpu/polaris10_mec_2.bin");
 142MODULE_FIRMWARE("amdgpu/polaris10_mec2.bin");
 143MODULE_FIRMWARE("amdgpu/polaris10_mec2_2.bin");
 144MODULE_FIRMWARE("amdgpu/polaris10_rlc.bin");
 145
 146MODULE_FIRMWARE("amdgpu/polaris11_ce.bin");
 147MODULE_FIRMWARE("amdgpu/polaris11_ce_2.bin");
 148MODULE_FIRMWARE("amdgpu/polaris11_pfp.bin");
 149MODULE_FIRMWARE("amdgpu/polaris11_pfp_2.bin");
 150MODULE_FIRMWARE("amdgpu/polaris11_me.bin");
 151MODULE_FIRMWARE("amdgpu/polaris11_me_2.bin");
 152MODULE_FIRMWARE("amdgpu/polaris11_mec.bin");
 153MODULE_FIRMWARE("amdgpu/polaris11_mec_2.bin");
 154MODULE_FIRMWARE("amdgpu/polaris11_mec2.bin");
 155MODULE_FIRMWARE("amdgpu/polaris11_mec2_2.bin");
 156MODULE_FIRMWARE("amdgpu/polaris11_rlc.bin");
 157
 158MODULE_FIRMWARE("amdgpu/polaris12_ce.bin");
 159MODULE_FIRMWARE("amdgpu/polaris12_ce_2.bin");
 160MODULE_FIRMWARE("amdgpu/polaris12_pfp.bin");
 161MODULE_FIRMWARE("amdgpu/polaris12_pfp_2.bin");
 162MODULE_FIRMWARE("amdgpu/polaris12_me.bin");
 163MODULE_FIRMWARE("amdgpu/polaris12_me_2.bin");
 164MODULE_FIRMWARE("amdgpu/polaris12_mec.bin");
 165MODULE_FIRMWARE("amdgpu/polaris12_mec_2.bin");
 166MODULE_FIRMWARE("amdgpu/polaris12_mec2.bin");
 167MODULE_FIRMWARE("amdgpu/polaris12_mec2_2.bin");
 168MODULE_FIRMWARE("amdgpu/polaris12_rlc.bin");
 169
 170MODULE_FIRMWARE("amdgpu/vegam_ce.bin");
 171MODULE_FIRMWARE("amdgpu/vegam_pfp.bin");
 172MODULE_FIRMWARE("amdgpu/vegam_me.bin");
 173MODULE_FIRMWARE("amdgpu/vegam_mec.bin");
 174MODULE_FIRMWARE("amdgpu/vegam_mec2.bin");
 175MODULE_FIRMWARE("amdgpu/vegam_rlc.bin");
 176
 177static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
 178{
 179	{mmGDS_VMID0_BASE, mmGDS_VMID0_SIZE, mmGDS_GWS_VMID0, mmGDS_OA_VMID0},
 180	{mmGDS_VMID1_BASE, mmGDS_VMID1_SIZE, mmGDS_GWS_VMID1, mmGDS_OA_VMID1},
 181	{mmGDS_VMID2_BASE, mmGDS_VMID2_SIZE, mmGDS_GWS_VMID2, mmGDS_OA_VMID2},
 182	{mmGDS_VMID3_BASE, mmGDS_VMID3_SIZE, mmGDS_GWS_VMID3, mmGDS_OA_VMID3},
 183	{mmGDS_VMID4_BASE, mmGDS_VMID4_SIZE, mmGDS_GWS_VMID4, mmGDS_OA_VMID4},
 184	{mmGDS_VMID5_BASE, mmGDS_VMID5_SIZE, mmGDS_GWS_VMID5, mmGDS_OA_VMID5},
 185	{mmGDS_VMID6_BASE, mmGDS_VMID6_SIZE, mmGDS_GWS_VMID6, mmGDS_OA_VMID6},
 186	{mmGDS_VMID7_BASE, mmGDS_VMID7_SIZE, mmGDS_GWS_VMID7, mmGDS_OA_VMID7},
 187	{mmGDS_VMID8_BASE, mmGDS_VMID8_SIZE, mmGDS_GWS_VMID8, mmGDS_OA_VMID8},
 188	{mmGDS_VMID9_BASE, mmGDS_VMID9_SIZE, mmGDS_GWS_VMID9, mmGDS_OA_VMID9},
 189	{mmGDS_VMID10_BASE, mmGDS_VMID10_SIZE, mmGDS_GWS_VMID10, mmGDS_OA_VMID10},
 190	{mmGDS_VMID11_BASE, mmGDS_VMID11_SIZE, mmGDS_GWS_VMID11, mmGDS_OA_VMID11},
 191	{mmGDS_VMID12_BASE, mmGDS_VMID12_SIZE, mmGDS_GWS_VMID12, mmGDS_OA_VMID12},
 192	{mmGDS_VMID13_BASE, mmGDS_VMID13_SIZE, mmGDS_GWS_VMID13, mmGDS_OA_VMID13},
 193	{mmGDS_VMID14_BASE, mmGDS_VMID14_SIZE, mmGDS_GWS_VMID14, mmGDS_OA_VMID14},
 194	{mmGDS_VMID15_BASE, mmGDS_VMID15_SIZE, mmGDS_GWS_VMID15, mmGDS_OA_VMID15}
 195};
 196
 197static const u32 golden_settings_tonga_a11[] =
 198{
 199	mmCB_HW_CONTROL, 0xfffdf3cf, 0x00007208,
 200	mmCB_HW_CONTROL_3, 0x00000040, 0x00000040,
 201	mmDB_DEBUG2, 0xf00fffff, 0x00000400,
 202	mmGB_GPU_ID, 0x0000000f, 0x00000000,
 203	mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
 204	mmPA_SC_FIFO_DEPTH_CNTL, 0x000003ff, 0x000000fc,
 205	mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
 206	mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c,
 207	mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
 208	mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
 209	mmTCC_CTRL, 0x00100000, 0xf31fff7f,
 210	mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
 211	mmTCP_ADDR_CONFIG, 0x000003ff, 0x000002fb,
 212	mmTCP_CHAN_STEER_HI, 0xffffffff, 0x0000543b,
 213	mmTCP_CHAN_STEER_LO, 0xffffffff, 0xa9210876,
 214	mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
 215};
 216
 217static const u32 tonga_golden_common_all[] =
 218{
 219	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 220	mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x16000012,
 221	mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002A,
 222	mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
 223	mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
 224	mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
 225	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
 226	mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF
 227};
 228
 229static const u32 tonga_mgcg_cgcg_init[] =
 230{
 231	mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
 232	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 233	mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 234	mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
 235	mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
 236	mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
 237	mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100,
 238	mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
 239	mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
 240	mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
 241	mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
 242	mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
 243	mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
 244	mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
 245	mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
 246	mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
 247	mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
 248	mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
 249	mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
 250	mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
 251	mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
 252	mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
 253	mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
 254	mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
 255	mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
 256	mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
 257	mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
 258	mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 259	mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 260	mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
 261	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 262	mmCGTS_CU0_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 263	mmCGTS_CU0_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 264	mmCGTS_CU0_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
 265	mmCGTS_CU0_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 266	mmCGTS_CU0_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 267	mmCGTS_CU1_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 268	mmCGTS_CU1_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 269	mmCGTS_CU1_TA_CTRL_REG, 0xffffffff, 0x00040007,
 270	mmCGTS_CU1_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 271	mmCGTS_CU1_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 272	mmCGTS_CU2_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 273	mmCGTS_CU2_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 274	mmCGTS_CU2_TA_CTRL_REG, 0xffffffff, 0x00040007,
 275	mmCGTS_CU2_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 276	mmCGTS_CU2_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 277	mmCGTS_CU3_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 278	mmCGTS_CU3_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 279	mmCGTS_CU3_TA_CTRL_REG, 0xffffffff, 0x00040007,
 280	mmCGTS_CU3_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 281	mmCGTS_CU3_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 282	mmCGTS_CU4_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 283	mmCGTS_CU4_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 284	mmCGTS_CU4_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
 285	mmCGTS_CU4_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 286	mmCGTS_CU4_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 287	mmCGTS_CU5_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 288	mmCGTS_CU5_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 289	mmCGTS_CU5_TA_CTRL_REG, 0xffffffff, 0x00040007,
 290	mmCGTS_CU5_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 291	mmCGTS_CU5_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 292	mmCGTS_CU6_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 293	mmCGTS_CU6_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 294	mmCGTS_CU6_TA_CTRL_REG, 0xffffffff, 0x00040007,
 295	mmCGTS_CU6_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 296	mmCGTS_CU6_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 297	mmCGTS_CU7_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 298	mmCGTS_CU7_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 299	mmCGTS_CU7_TA_CTRL_REG, 0xffffffff, 0x00040007,
 300	mmCGTS_CU7_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 301	mmCGTS_CU7_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 302	mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
 303	mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
 304	mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
 305	mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
 306};
 307
 308static const u32 golden_settings_vegam_a11[] =
 309{
 310	mmCB_HW_CONTROL, 0x0001f3cf, 0x00007208,
 311	mmCB_HW_CONTROL_2, 0x0f000000, 0x0d000000,
 312	mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
 313	mmDB_DEBUG2, 0xf00fffff, 0x00000400,
 314	mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
 315	mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
 316	mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x3a00161a,
 317	mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x0000002e,
 318	mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
 319	mmRLC_CGCG_CGLS_CTRL_3D, 0xffffffff, 0x0001003c,
 320	mmSQ_CONFIG, 0x07f80000, 0x01180000,
 321	mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
 322	mmTCC_CTRL, 0x00100000, 0xf31fff7f,
 323	mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f7,
 324	mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
 325	mmTCP_CHAN_STEER_LO, 0xffffffff, 0x32761054,
 326	mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
 327};
 328
 329static const u32 vegam_golden_common_all[] =
 330{
 331	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 332	mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
 333	mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
 334	mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
 335	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
 336	mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF,
 337};
 338
 339static const u32 golden_settings_polaris11_a11[] =
 340{
 341	mmCB_HW_CONTROL, 0x0000f3cf, 0x00007208,
 342	mmCB_HW_CONTROL_2, 0x0f000000, 0x0f000000,
 343	mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
 344	mmDB_DEBUG2, 0xf00fffff, 0x00000400,
 345	mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
 346	mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
 347	mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x16000012,
 348	mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x00000000,
 349	mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
 350	mmRLC_CGCG_CGLS_CTRL_3D, 0xffffffff, 0x0001003c,
 351	mmSQ_CONFIG, 0x07f80000, 0x01180000,
 352	mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
 353	mmTCC_CTRL, 0x00100000, 0xf31fff7f,
 354	mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f3,
 355	mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
 356	mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003210,
 357	mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
 358};
 359
 360static const u32 polaris11_golden_common_all[] =
 361{
 362	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 363	mmGB_ADDR_CONFIG, 0xffffffff, 0x22011002,
 364	mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
 365	mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
 366	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
 367	mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF,
 368};
 369
 370static const u32 golden_settings_polaris10_a11[] =
 371{
 372	mmATC_MISC_CG, 0x000c0fc0, 0x000c0200,
 373	mmCB_HW_CONTROL, 0x0001f3cf, 0x00007208,
 374	mmCB_HW_CONTROL_2, 0x0f000000, 0x0f000000,
 375	mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
 376	mmDB_DEBUG2, 0xf00fffff, 0x00000400,
 377	mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
 378	mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
 379	mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x16000012,
 380	mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x0000002a,
 381	mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
 382	mmRLC_CGCG_CGLS_CTRL_3D, 0xffffffff, 0x0001003c,
 383	mmSQ_CONFIG, 0x07f80000, 0x07180000,
 384	mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
 385	mmTCC_CTRL, 0x00100000, 0xf31fff7f,
 386	mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f7,
 387	mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
 388	mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
 389};
 390
 391static const u32 polaris10_golden_common_all[] =
 392{
 393	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 394	mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x16000012,
 395	mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002A,
 396	mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
 397	mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
 398	mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
 399	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
 400	mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF,
 401};
 402
 403static const u32 fiji_golden_common_all[] =
 404{
 405	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 406	mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x3a00161a,
 407	mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002e,
 408	mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
 409	mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
 410	mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
 411	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
 412	mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF,
 413	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 414	mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x00000009,
 415};
 416
 417static const u32 golden_settings_fiji_a10[] =
 418{
 419	mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
 420	mmDB_DEBUG2, 0xf00fffff, 0x00000400,
 421	mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
 422	mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
 423	mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
 424	mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
 425	mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
 426	mmTCC_CTRL, 0x00100000, 0xf31fff7f,
 427	mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
 428	mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000ff,
 429	mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
 430};
 431
 432static const u32 fiji_mgcg_cgcg_init[] =
 433{
 434	mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
 435	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 436	mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 437	mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
 438	mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
 439	mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
 440	mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100,
 441	mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
 442	mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
 443	mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
 444	mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
 445	mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
 446	mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
 447	mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
 448	mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
 449	mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
 450	mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
 451	mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
 452	mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
 453	mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
 454	mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
 455	mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
 456	mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
 457	mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
 458	mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
 459	mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
 460	mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
 461	mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 462	mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 463	mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
 464	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 465	mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
 466	mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
 467	mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
 468	mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
 469};
 470
 471static const u32 golden_settings_iceland_a11[] =
 472{
 473	mmCB_HW_CONTROL_3, 0x00000040, 0x00000040,
 474	mmDB_DEBUG2, 0xf00fffff, 0x00000400,
 475	mmDB_DEBUG3, 0xc0000000, 0xc0000000,
 476	mmGB_GPU_ID, 0x0000000f, 0x00000000,
 477	mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
 478	mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
 479	mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x00000002,
 480	mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x00000000,
 481	mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c,
 482	mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
 483	mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
 484	mmTCC_CTRL, 0x00100000, 0xf31fff7f,
 485	mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
 486	mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f1,
 487	mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
 488	mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00000010,
 489};
 490
 491static const u32 iceland_golden_common_all[] =
 492{
 493	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 494	mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x00000002,
 495	mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x00000000,
 496	mmGB_ADDR_CONFIG, 0xffffffff, 0x22010001,
 497	mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
 498	mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
 499	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
 500	mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF
 501};
 502
 503static const u32 iceland_mgcg_cgcg_init[] =
 504{
 505	mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
 506	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 507	mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 508	mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
 509	mmCGTT_CP_CLK_CTRL, 0xffffffff, 0xc0000100,
 510	mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0xc0000100,
 511	mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0xc0000100,
 512	mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
 513	mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
 514	mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
 515	mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
 516	mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
 517	mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
 518	mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
 519	mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
 520	mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
 521	mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
 522	mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
 523	mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
 524	mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
 525	mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
 526	mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
 527	mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0xff000100,
 528	mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
 529	mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
 530	mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
 531	mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
 532	mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 533	mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 534	mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
 535	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 536	mmCGTS_CU0_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 537	mmCGTS_CU0_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 538	mmCGTS_CU0_TA_SQC_CTRL_REG, 0xffffffff, 0x0f840f87,
 539	mmCGTS_CU0_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 540	mmCGTS_CU0_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 541	mmCGTS_CU1_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 542	mmCGTS_CU1_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 543	mmCGTS_CU1_TA_CTRL_REG, 0xffffffff, 0x00040007,
 544	mmCGTS_CU1_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 545	mmCGTS_CU1_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 546	mmCGTS_CU2_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 547	mmCGTS_CU2_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 548	mmCGTS_CU2_TA_CTRL_REG, 0xffffffff, 0x00040007,
 549	mmCGTS_CU2_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 550	mmCGTS_CU2_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 551	mmCGTS_CU3_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 552	mmCGTS_CU3_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 553	mmCGTS_CU3_TA_CTRL_REG, 0xffffffff, 0x00040007,
 554	mmCGTS_CU3_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 555	mmCGTS_CU3_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 556	mmCGTS_CU4_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 557	mmCGTS_CU4_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 558	mmCGTS_CU4_TA_SQC_CTRL_REG, 0xffffffff, 0x0f840f87,
 559	mmCGTS_CU4_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 560	mmCGTS_CU4_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 561	mmCGTS_CU5_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 562	mmCGTS_CU5_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 563	mmCGTS_CU5_TA_CTRL_REG, 0xffffffff, 0x00040007,
 564	mmCGTS_CU5_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 565	mmCGTS_CU5_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 566	mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
 567	mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
 568	mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
 569};
 570
 571static const u32 cz_golden_settings_a11[] =
 572{
 573	mmCB_HW_CONTROL_3, 0x00000040, 0x00000040,
 574	mmDB_DEBUG2, 0xf00fffff, 0x00000400,
 575	mmGB_GPU_ID, 0x0000000f, 0x00000000,
 576	mmPA_SC_ENHANCE, 0xffffffff, 0x00000001,
 577	mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
 578	mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c,
 579	mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
 580	mmTA_CNTL_AUX, 0x000f000f, 0x00010000,
 581	mmTCC_CTRL, 0x00100000, 0xf31fff7f,
 582	mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
 583	mmTCP_ADDR_CONFIG, 0x0000000f, 0x000000f3,
 584	mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00001302
 585};
 586
 587static const u32 cz_golden_common_all[] =
 588{
 589	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 590	mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x00000002,
 591	mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x00000000,
 592	mmGB_ADDR_CONFIG, 0xffffffff, 0x22010001,
 593	mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
 594	mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
 595	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
 596	mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF
 597};
 598
 599static const u32 cz_mgcg_cgcg_init[] =
 600{
 601	mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
 602	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 603	mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 604	mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
 605	mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
 606	mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
 607	mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x00000100,
 608	mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
 609	mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
 610	mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
 611	mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
 612	mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
 613	mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
 614	mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
 615	mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
 616	mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
 617	mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
 618	mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
 619	mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
 620	mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
 621	mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
 622	mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
 623	mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
 624	mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
 625	mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
 626	mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
 627	mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
 628	mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 629	mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 630	mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
 631	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 632	mmCGTS_CU0_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 633	mmCGTS_CU0_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 634	mmCGTS_CU0_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
 635	mmCGTS_CU0_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 636	mmCGTS_CU0_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 637	mmCGTS_CU1_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 638	mmCGTS_CU1_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 639	mmCGTS_CU1_TA_CTRL_REG, 0xffffffff, 0x00040007,
 640	mmCGTS_CU1_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 641	mmCGTS_CU1_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 642	mmCGTS_CU2_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 643	mmCGTS_CU2_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 644	mmCGTS_CU2_TA_CTRL_REG, 0xffffffff, 0x00040007,
 645	mmCGTS_CU2_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 646	mmCGTS_CU2_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 647	mmCGTS_CU3_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 648	mmCGTS_CU3_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 649	mmCGTS_CU3_TA_CTRL_REG, 0xffffffff, 0x00040007,
 650	mmCGTS_CU3_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 651	mmCGTS_CU3_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 652	mmCGTS_CU4_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 653	mmCGTS_CU4_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 654	mmCGTS_CU4_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
 655	mmCGTS_CU4_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 656	mmCGTS_CU4_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 657	mmCGTS_CU5_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 658	mmCGTS_CU5_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 659	mmCGTS_CU5_TA_CTRL_REG, 0xffffffff, 0x00040007,
 660	mmCGTS_CU5_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 661	mmCGTS_CU5_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 662	mmCGTS_CU6_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 663	mmCGTS_CU6_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 664	mmCGTS_CU6_TA_CTRL_REG, 0xffffffff, 0x00040007,
 665	mmCGTS_CU6_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 666	mmCGTS_CU6_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 667	mmCGTS_CU7_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 668	mmCGTS_CU7_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 669	mmCGTS_CU7_TA_CTRL_REG, 0xffffffff, 0x00040007,
 670	mmCGTS_CU7_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 671	mmCGTS_CU7_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 672	mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
 673	mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
 674	mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003f,
 675	mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
 676};
 677
 678static const u32 stoney_golden_settings_a11[] =
 679{
 680	mmDB_DEBUG2, 0xf00fffff, 0x00000400,
 681	mmGB_GPU_ID, 0x0000000f, 0x00000000,
 682	mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
 683	mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
 684	mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
 685	mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
 686	mmTCC_CTRL, 0x00100000, 0xf31fff7f,
 687	mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
 688	mmTCP_ADDR_CONFIG, 0x0000000f, 0x000000f1,
 689	mmTCP_CHAN_STEER_LO, 0xffffffff, 0x10101010,
 690};
 691
 692static const u32 stoney_golden_common_all[] =
 693{
 694	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 695	mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x00000000,
 696	mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x00000000,
 697	mmGB_ADDR_CONFIG, 0xffffffff, 0x12010001,
 698	mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
 699	mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
 700	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
 701	mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF,
 702};
 703
 704static const u32 stoney_mgcg_cgcg_init[] =
 705{
 706	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 707	mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003f,
 708	mmCP_MEM_SLP_CNTL, 0xffffffff, 0x00020201,
 709	mmRLC_MEM_SLP_CNTL, 0xffffffff, 0x00020201,
 710	mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96940200,
 711};
 712
 713
 714static const char * const sq_edc_source_names[] = {
 715	"SQ_EDC_INFO_SOURCE_INVALID: No EDC error has occurred",
 716	"SQ_EDC_INFO_SOURCE_INST: EDC source is Instruction Fetch",
 717	"SQ_EDC_INFO_SOURCE_SGPR: EDC source is SGPR or SQC data return",
 718	"SQ_EDC_INFO_SOURCE_VGPR: EDC source is VGPR",
 719	"SQ_EDC_INFO_SOURCE_LDS: EDC source is LDS",
 720	"SQ_EDC_INFO_SOURCE_GDS: EDC source is GDS",
 721	"SQ_EDC_INFO_SOURCE_TA: EDC source is TA",
 722};
 723
 724static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev);
 725static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev);
 726static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev);
 727static void gfx_v8_0_set_rlc_funcs(struct amdgpu_device *adev);
 728static u32 gfx_v8_0_get_csb_size(struct amdgpu_device *adev);
 729static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev);
 730static void gfx_v8_0_ring_emit_ce_meta(struct amdgpu_ring *ring);
 731static void gfx_v8_0_ring_emit_de_meta(struct amdgpu_ring *ring);
 732
 733#define CG_ACLK_CNTL__ACLK_DIVIDER_MASK                    0x0000007fL
 734#define CG_ACLK_CNTL__ACLK_DIVIDER__SHIFT                  0x00000000L
 735
 736static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev)
 737{
 738	uint32_t data;
 739
 740	switch (adev->asic_type) {
 741	case CHIP_TOPAZ:
 742		amdgpu_device_program_register_sequence(adev,
 743							iceland_mgcg_cgcg_init,
 744							ARRAY_SIZE(iceland_mgcg_cgcg_init));
 745		amdgpu_device_program_register_sequence(adev,
 746							golden_settings_iceland_a11,
 747							ARRAY_SIZE(golden_settings_iceland_a11));
 748		amdgpu_device_program_register_sequence(adev,
 749							iceland_golden_common_all,
 750							ARRAY_SIZE(iceland_golden_common_all));
 751		break;
 752	case CHIP_FIJI:
 753		amdgpu_device_program_register_sequence(adev,
 754							fiji_mgcg_cgcg_init,
 755							ARRAY_SIZE(fiji_mgcg_cgcg_init));
 756		amdgpu_device_program_register_sequence(adev,
 757							golden_settings_fiji_a10,
 758							ARRAY_SIZE(golden_settings_fiji_a10));
 759		amdgpu_device_program_register_sequence(adev,
 760							fiji_golden_common_all,
 761							ARRAY_SIZE(fiji_golden_common_all));
 762		break;
 763
 764	case CHIP_TONGA:
 765		amdgpu_device_program_register_sequence(adev,
 766							tonga_mgcg_cgcg_init,
 767							ARRAY_SIZE(tonga_mgcg_cgcg_init));
 768		amdgpu_device_program_register_sequence(adev,
 769							golden_settings_tonga_a11,
 770							ARRAY_SIZE(golden_settings_tonga_a11));
 771		amdgpu_device_program_register_sequence(adev,
 772							tonga_golden_common_all,
 773							ARRAY_SIZE(tonga_golden_common_all));
 774		break;
 775	case CHIP_VEGAM:
 776		amdgpu_device_program_register_sequence(adev,
 777							golden_settings_vegam_a11,
 778							ARRAY_SIZE(golden_settings_vegam_a11));
 779		amdgpu_device_program_register_sequence(adev,
 780							vegam_golden_common_all,
 781							ARRAY_SIZE(vegam_golden_common_all));
 782		break;
 783	case CHIP_POLARIS11:
 784	case CHIP_POLARIS12:
 785		amdgpu_device_program_register_sequence(adev,
 786							golden_settings_polaris11_a11,
 787							ARRAY_SIZE(golden_settings_polaris11_a11));
 788		amdgpu_device_program_register_sequence(adev,
 789							polaris11_golden_common_all,
 790							ARRAY_SIZE(polaris11_golden_common_all));
 791		break;
 792	case CHIP_POLARIS10:
 793		amdgpu_device_program_register_sequence(adev,
 794							golden_settings_polaris10_a11,
 795							ARRAY_SIZE(golden_settings_polaris10_a11));
 796		amdgpu_device_program_register_sequence(adev,
 797							polaris10_golden_common_all,
 798							ARRAY_SIZE(polaris10_golden_common_all));
 799		data = RREG32_SMC(ixCG_ACLK_CNTL);
 800		data &= ~CG_ACLK_CNTL__ACLK_DIVIDER_MASK;
 801		data |= 0x18 << CG_ACLK_CNTL__ACLK_DIVIDER__SHIFT;
 802		WREG32_SMC(ixCG_ACLK_CNTL, data);
 803		if ((adev->pdev->device == 0x67DF) && (adev->pdev->revision == 0xc7) &&
 804		    ((adev->pdev->subsystem_device == 0xb37 && adev->pdev->subsystem_vendor == 0x1002) ||
 805		     (adev->pdev->subsystem_device == 0x4a8 && adev->pdev->subsystem_vendor == 0x1043) ||
 806		     (adev->pdev->subsystem_device == 0x9480 && adev->pdev->subsystem_vendor == 0x1680))) {
 807			amdgpu_atombios_i2c_channel_trans(adev, 0x10, 0x96, 0x1E, 0xDD);
 808			amdgpu_atombios_i2c_channel_trans(adev, 0x10, 0x96, 0x1F, 0xD0);
 809		}
 810		break;
 811	case CHIP_CARRIZO:
 812		amdgpu_device_program_register_sequence(adev,
 813							cz_mgcg_cgcg_init,
 814							ARRAY_SIZE(cz_mgcg_cgcg_init));
 815		amdgpu_device_program_register_sequence(adev,
 816							cz_golden_settings_a11,
 817							ARRAY_SIZE(cz_golden_settings_a11));
 818		amdgpu_device_program_register_sequence(adev,
 819							cz_golden_common_all,
 820							ARRAY_SIZE(cz_golden_common_all));
 821		break;
 822	case CHIP_STONEY:
 823		amdgpu_device_program_register_sequence(adev,
 824							stoney_mgcg_cgcg_init,
 825							ARRAY_SIZE(stoney_mgcg_cgcg_init));
 826		amdgpu_device_program_register_sequence(adev,
 827							stoney_golden_settings_a11,
 828							ARRAY_SIZE(stoney_golden_settings_a11));
 829		amdgpu_device_program_register_sequence(adev,
 830							stoney_golden_common_all,
 831							ARRAY_SIZE(stoney_golden_common_all));
 832		break;
 833	default:
 834		break;
 835	}
 836}
 837
 838static void gfx_v8_0_scratch_init(struct amdgpu_device *adev)
 839{
 840	adev->gfx.scratch.num_reg = 8;
 841	adev->gfx.scratch.reg_base = mmSCRATCH_REG0;
 842	adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1;
 843}
 844
 845static int gfx_v8_0_ring_test_ring(struct amdgpu_ring *ring)
 846{
 847	struct amdgpu_device *adev = ring->adev;
 848	uint32_t scratch;
 849	uint32_t tmp = 0;
 850	unsigned i;
 851	int r;
 852
 853	r = amdgpu_gfx_scratch_get(adev, &scratch);
 854	if (r)
 855		return r;
 856
 857	WREG32(scratch, 0xCAFEDEAD);
 858	r = amdgpu_ring_alloc(ring, 3);
 859	if (r)
 860		goto error_free_scratch;
 861
 862	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
 863	amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
 864	amdgpu_ring_write(ring, 0xDEADBEEF);
 865	amdgpu_ring_commit(ring);
 866
 867	for (i = 0; i < adev->usec_timeout; i++) {
 868		tmp = RREG32(scratch);
 869		if (tmp == 0xDEADBEEF)
 870			break;
 871		udelay(1);
 872	}
 873
 874	if (i >= adev->usec_timeout)
 875		r = -ETIMEDOUT;
 876
 877error_free_scratch:
 878	amdgpu_gfx_scratch_free(adev, scratch);
 879	return r;
 880}
 881
 882static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 883{
 884	struct amdgpu_device *adev = ring->adev;
 885	struct amdgpu_ib ib;
 886	struct dma_fence *f = NULL;
 887
 888	unsigned int index;
 889	uint64_t gpu_addr;
 890	uint32_t tmp;
 891	long r;
 892
 893	r = amdgpu_device_wb_get(adev, &index);
 894	if (r)
 895		return r;
 896
 897	gpu_addr = adev->wb.gpu_addr + (index * 4);
 898	adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
 899	memset(&ib, 0, sizeof(ib));
 900	r = amdgpu_ib_get(adev, NULL, 16,
 901					AMDGPU_IB_POOL_DIRECT, &ib);
 902	if (r)
 903		goto err1;
 904
 905	ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
 906	ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
 907	ib.ptr[2] = lower_32_bits(gpu_addr);
 908	ib.ptr[3] = upper_32_bits(gpu_addr);
 909	ib.ptr[4] = 0xDEADBEEF;
 910	ib.length_dw = 5;
 911
 912	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
 913	if (r)
 914		goto err2;
 915
 916	r = dma_fence_wait_timeout(f, false, timeout);
 917	if (r == 0) {
 918		r = -ETIMEDOUT;
 919		goto err2;
 920	} else if (r < 0) {
 921		goto err2;
 922	}
 923
 924	tmp = adev->wb.wb[index];
 925	if (tmp == 0xDEADBEEF)
 926		r = 0;
 927	else
 928		r = -EINVAL;
 929
 930err2:
 931	amdgpu_ib_free(adev, &ib, NULL);
 932	dma_fence_put(f);
 933err1:
 934	amdgpu_device_wb_free(adev, index);
 935	return r;
 936}
 937
 938
 939static void gfx_v8_0_free_microcode(struct amdgpu_device *adev)
 940{
 941	release_firmware(adev->gfx.pfp_fw);
 942	adev->gfx.pfp_fw = NULL;
 943	release_firmware(adev->gfx.me_fw);
 944	adev->gfx.me_fw = NULL;
 945	release_firmware(adev->gfx.ce_fw);
 946	adev->gfx.ce_fw = NULL;
 947	release_firmware(adev->gfx.rlc_fw);
 948	adev->gfx.rlc_fw = NULL;
 949	release_firmware(adev->gfx.mec_fw);
 950	adev->gfx.mec_fw = NULL;
 951	if ((adev->asic_type != CHIP_STONEY) &&
 952	    (adev->asic_type != CHIP_TOPAZ))
 953		release_firmware(adev->gfx.mec2_fw);
 954	adev->gfx.mec2_fw = NULL;
 955
 956	kfree(adev->gfx.rlc.register_list_format);
 957}
 958
 959static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
 960{
 961	const char *chip_name;
 962	char fw_name[30];
 963	int err;
 964	struct amdgpu_firmware_info *info = NULL;
 965	const struct common_firmware_header *header = NULL;
 966	const struct gfx_firmware_header_v1_0 *cp_hdr;
 967	const struct rlc_firmware_header_v2_0 *rlc_hdr;
 968	unsigned int *tmp = NULL, i;
 969
 970	DRM_DEBUG("\n");
 971
 972	switch (adev->asic_type) {
 973	case CHIP_TOPAZ:
 974		chip_name = "topaz";
 975		break;
 976	case CHIP_TONGA:
 977		chip_name = "tonga";
 978		break;
 979	case CHIP_CARRIZO:
 980		chip_name = "carrizo";
 981		break;
 982	case CHIP_FIJI:
 983		chip_name = "fiji";
 984		break;
 985	case CHIP_STONEY:
 986		chip_name = "stoney";
 987		break;
 988	case CHIP_POLARIS10:
 989		chip_name = "polaris10";
 990		break;
 991	case CHIP_POLARIS11:
 992		chip_name = "polaris11";
 993		break;
 994	case CHIP_POLARIS12:
 995		chip_name = "polaris12";
 996		break;
 997	case CHIP_VEGAM:
 998		chip_name = "vegam";
 999		break;
1000	default:
1001		BUG();
1002	}
1003
1004	if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
1005		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp_2.bin", chip_name);
1006		err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
1007		if (err == -ENOENT) {
1008			snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
1009			err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
1010		}
1011	} else {
1012		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
1013		err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
1014	}
1015	if (err)
1016		goto out;
1017	err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
1018	if (err)
1019		goto out;
1020	cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
1021	adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1022	adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1023
1024	if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
1025		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me_2.bin", chip_name);
1026		err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
1027		if (err == -ENOENT) {
1028			snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
1029			err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
1030		}
1031	} else {
1032		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
1033		err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
1034	}
1035	if (err)
1036		goto out;
1037	err = amdgpu_ucode_validate(adev->gfx.me_fw);
1038	if (err)
1039		goto out;
1040	cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
1041	adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1042
1043	adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1044
1045	if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
1046		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce_2.bin", chip_name);
1047		err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
1048		if (err == -ENOENT) {
1049			snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
1050			err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
1051		}
1052	} else {
1053		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
1054		err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
1055	}
1056	if (err)
1057		goto out;
1058	err = amdgpu_ucode_validate(adev->gfx.ce_fw);
1059	if (err)
1060		goto out;
1061	cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
1062	adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1063	adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1064
1065	/*
1066	 * Support for MCBP/Virtualization in combination with chained IBs is
1067	 * formal released on feature version #46
1068	 */
1069	if (adev->gfx.ce_feature_version >= 46 &&
1070	    adev->gfx.pfp_feature_version >= 46) {
1071		adev->virt.chained_ib_support = true;
1072		DRM_INFO("Chained IB support enabled!\n");
1073	} else
1074		adev->virt.chained_ib_support = false;
1075
1076	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
1077	err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
1078	if (err)
1079		goto out;
1080	err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
1081	rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1082	adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
1083	adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
1084
1085	adev->gfx.rlc.save_and_restore_offset =
1086			le32_to_cpu(rlc_hdr->save_and_restore_offset);
1087	adev->gfx.rlc.clear_state_descriptor_offset =
1088			le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
1089	adev->gfx.rlc.avail_scratch_ram_locations =
1090			le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
1091	adev->gfx.rlc.reg_restore_list_size =
1092			le32_to_cpu(rlc_hdr->reg_restore_list_size);
1093	adev->gfx.rlc.reg_list_format_start =
1094			le32_to_cpu(rlc_hdr->reg_list_format_start);
1095	adev->gfx.rlc.reg_list_format_separate_start =
1096			le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
1097	adev->gfx.rlc.starting_offsets_start =
1098			le32_to_cpu(rlc_hdr->starting_offsets_start);
1099	adev->gfx.rlc.reg_list_format_size_bytes =
1100			le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
1101	adev->gfx.rlc.reg_list_size_bytes =
1102			le32_to_cpu(rlc_hdr->reg_list_size_bytes);
1103
1104	adev->gfx.rlc.register_list_format =
1105			kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
1106					adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
1107
1108	if (!adev->gfx.rlc.register_list_format) {
1109		err = -ENOMEM;
1110		goto out;
1111	}
1112
1113	tmp = (unsigned int *)((uintptr_t)rlc_hdr +
1114			le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
1115	for (i = 0 ; i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2); i++)
1116		adev->gfx.rlc.register_list_format[i] =	le32_to_cpu(tmp[i]);
1117
1118	adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
1119
1120	tmp = (unsigned int *)((uintptr_t)rlc_hdr +
1121			le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
1122	for (i = 0 ; i < (adev->gfx.rlc.reg_list_size_bytes >> 2); i++)
1123		adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
1124
1125	if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
1126		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec_2.bin", chip_name);
1127		err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
1128		if (err == -ENOENT) {
1129			snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
1130			err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
1131		}
1132	} else {
1133		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
1134		err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
1135	}
1136	if (err)
1137		goto out;
1138	err = amdgpu_ucode_validate(adev->gfx.mec_fw);
1139	if (err)
1140		goto out;
1141	cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1142	adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1143	adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1144
1145	if ((adev->asic_type != CHIP_STONEY) &&
1146	    (adev->asic_type != CHIP_TOPAZ)) {
1147		if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
1148			snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2_2.bin", chip_name);
1149			err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
1150			if (err == -ENOENT) {
1151				snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
1152				err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
1153			}
1154		} else {
1155			snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
1156			err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
1157		}
1158		if (!err) {
1159			err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
1160			if (err)
1161				goto out;
1162			cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1163				adev->gfx.mec2_fw->data;
1164			adev->gfx.mec2_fw_version =
1165				le32_to_cpu(cp_hdr->header.ucode_version);
1166			adev->gfx.mec2_feature_version =
1167				le32_to_cpu(cp_hdr->ucode_feature_version);
1168		} else {
1169			err = 0;
1170			adev->gfx.mec2_fw = NULL;
1171		}
1172	}
1173
1174	info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
1175	info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
1176	info->fw = adev->gfx.pfp_fw;
1177	header = (const struct common_firmware_header *)info->fw->data;
1178	adev->firmware.fw_size +=
1179		ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1180
1181	info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
1182	info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
1183	info->fw = adev->gfx.me_fw;
1184	header = (const struct common_firmware_header *)info->fw->data;
1185	adev->firmware.fw_size +=
1186		ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1187
1188	info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
1189	info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
1190	info->fw = adev->gfx.ce_fw;
1191	header = (const struct common_firmware_header *)info->fw->data;
1192	adev->firmware.fw_size +=
1193		ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1194
1195	info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
1196	info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
1197	info->fw = adev->gfx.rlc_fw;
1198	header = (const struct common_firmware_header *)info->fw->data;
1199	adev->firmware.fw_size +=
1200		ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1201
1202	info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
1203	info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
1204	info->fw = adev->gfx.mec_fw;
1205	header = (const struct common_firmware_header *)info->fw->data;
1206	adev->firmware.fw_size +=
1207		ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1208
1209	/* we need account JT in */
1210	cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1211	adev->firmware.fw_size +=
1212		ALIGN(le32_to_cpu(cp_hdr->jt_size) << 2, PAGE_SIZE);
1213
1214	if (amdgpu_sriov_vf(adev)) {
1215		info = &adev->firmware.ucode[AMDGPU_UCODE_ID_STORAGE];
1216		info->ucode_id = AMDGPU_UCODE_ID_STORAGE;
1217		info->fw = adev->gfx.mec_fw;
1218		adev->firmware.fw_size +=
1219			ALIGN(le32_to_cpu(64 * PAGE_SIZE), PAGE_SIZE);
1220	}
1221
1222	if (adev->gfx.mec2_fw) {
1223		info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
1224		info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
1225		info->fw = adev->gfx.mec2_fw;
1226		header = (const struct common_firmware_header *)info->fw->data;
1227		adev->firmware.fw_size +=
1228			ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1229	}
1230
1231out:
1232	if (err) {
1233		dev_err(adev->dev,
1234			"gfx8: Failed to load firmware \"%s\"\n",
1235			fw_name);
1236		release_firmware(adev->gfx.pfp_fw);
1237		adev->gfx.pfp_fw = NULL;
1238		release_firmware(adev->gfx.me_fw);
1239		adev->gfx.me_fw = NULL;
1240		release_firmware(adev->gfx.ce_fw);
1241		adev->gfx.ce_fw = NULL;
1242		release_firmware(adev->gfx.rlc_fw);
1243		adev->gfx.rlc_fw = NULL;
1244		release_firmware(adev->gfx.mec_fw);
1245		adev->gfx.mec_fw = NULL;
1246		release_firmware(adev->gfx.mec2_fw);
1247		adev->gfx.mec2_fw = NULL;
1248	}
1249	return err;
1250}
1251
1252static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev,
1253				    volatile u32 *buffer)
1254{
1255	u32 count = 0, i;
1256	const struct cs_section_def *sect = NULL;
1257	const struct cs_extent_def *ext = NULL;
1258
1259	if (adev->gfx.rlc.cs_data == NULL)
1260		return;
1261	if (buffer == NULL)
1262		return;
1263
1264	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1265	buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
1266
1267	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
1268	buffer[count++] = cpu_to_le32(0x80000000);
1269	buffer[count++] = cpu_to_le32(0x80000000);
1270
1271	for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
1272		for (ext = sect->section; ext->extent != NULL; ++ext) {
1273			if (sect->id == SECT_CONTEXT) {
1274				buffer[count++] =
1275					cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
1276				buffer[count++] = cpu_to_le32(ext->reg_index -
1277						PACKET3_SET_CONTEXT_REG_START);
1278				for (i = 0; i < ext->reg_count; i++)
1279					buffer[count++] = cpu_to_le32(ext->extent[i]);
1280			} else {
1281				return;
1282			}
1283		}
1284	}
1285
1286	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2));
1287	buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG -
1288			PACKET3_SET_CONTEXT_REG_START);
1289	buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config);
1290	buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config_1);
1291
1292	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1293	buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
1294
1295	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
1296	buffer[count++] = cpu_to_le32(0);
1297}
1298
1299static int gfx_v8_0_cp_jump_table_num(struct amdgpu_device *adev)
1300{
1301	if (adev->asic_type == CHIP_CARRIZO)
1302		return 5;
1303	else
1304		return 4;
1305}
1306
1307static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
1308{
1309	const struct cs_section_def *cs_data;
1310	int r;
1311
1312	adev->gfx.rlc.cs_data = vi_cs_data;
1313
1314	cs_data = adev->gfx.rlc.cs_data;
1315
1316	if (cs_data) {
1317		/* init clear state block */
1318		r = amdgpu_gfx_rlc_init_csb(adev);
1319		if (r)
1320			return r;
1321	}
1322
1323	if ((adev->asic_type == CHIP_CARRIZO) ||
1324	    (adev->asic_type == CHIP_STONEY)) {
1325		adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
1326		r = amdgpu_gfx_rlc_init_cpt(adev);
1327		if (r)
1328			return r;
1329	}
1330
1331	/* init spm vmid with 0xf */
1332	if (adev->gfx.rlc.funcs->update_spm_vmid)
1333		adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
1334
1335	return 0;
1336}
1337
1338static void gfx_v8_0_mec_fini(struct amdgpu_device *adev)
1339{
1340	amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
1341}
1342
1343static int gfx_v8_0_mec_init(struct amdgpu_device *adev)
1344{
1345	int r;
1346	u32 *hpd;
1347	size_t mec_hpd_size;
1348
1349	bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
1350
1351	/* take ownership of the relevant compute queues */
1352	amdgpu_gfx_compute_queue_acquire(adev);
1353
1354	mec_hpd_size = adev->gfx.num_compute_rings * GFX8_MEC_HPD_SIZE;
1355	if (mec_hpd_size) {
1356		r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
1357					      AMDGPU_GEM_DOMAIN_VRAM,
1358					      &adev->gfx.mec.hpd_eop_obj,
1359					      &adev->gfx.mec.hpd_eop_gpu_addr,
1360					      (void **)&hpd);
1361		if (r) {
1362			dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
1363			return r;
1364		}
1365
1366		memset(hpd, 0, mec_hpd_size);
1367
1368		amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
1369		amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
 
 
 
 
 
 
1370	}
1371
 
 
 
 
 
1372	return 0;
1373}
1374
1375static const u32 vgpr_init_compute_shader[] =
1376{
1377	0x7e000209, 0x7e020208,
1378	0x7e040207, 0x7e060206,
1379	0x7e080205, 0x7e0a0204,
1380	0x7e0c0203, 0x7e0e0202,
1381	0x7e100201, 0x7e120200,
1382	0x7e140209, 0x7e160208,
1383	0x7e180207, 0x7e1a0206,
1384	0x7e1c0205, 0x7e1e0204,
1385	0x7e200203, 0x7e220202,
1386	0x7e240201, 0x7e260200,
1387	0x7e280209, 0x7e2a0208,
1388	0x7e2c0207, 0x7e2e0206,
1389	0x7e300205, 0x7e320204,
1390	0x7e340203, 0x7e360202,
1391	0x7e380201, 0x7e3a0200,
1392	0x7e3c0209, 0x7e3e0208,
1393	0x7e400207, 0x7e420206,
1394	0x7e440205, 0x7e460204,
1395	0x7e480203, 0x7e4a0202,
1396	0x7e4c0201, 0x7e4e0200,
1397	0x7e500209, 0x7e520208,
1398	0x7e540207, 0x7e560206,
1399	0x7e580205, 0x7e5a0204,
1400	0x7e5c0203, 0x7e5e0202,
1401	0x7e600201, 0x7e620200,
1402	0x7e640209, 0x7e660208,
1403	0x7e680207, 0x7e6a0206,
1404	0x7e6c0205, 0x7e6e0204,
1405	0x7e700203, 0x7e720202,
1406	0x7e740201, 0x7e760200,
1407	0x7e780209, 0x7e7a0208,
1408	0x7e7c0207, 0x7e7e0206,
1409	0xbf8a0000, 0xbf810000,
1410};
1411
1412static const u32 sgpr_init_compute_shader[] =
1413{
1414	0xbe8a0100, 0xbe8c0102,
1415	0xbe8e0104, 0xbe900106,
1416	0xbe920108, 0xbe940100,
1417	0xbe960102, 0xbe980104,
1418	0xbe9a0106, 0xbe9c0108,
1419	0xbe9e0100, 0xbea00102,
1420	0xbea20104, 0xbea40106,
1421	0xbea60108, 0xbea80100,
1422	0xbeaa0102, 0xbeac0104,
1423	0xbeae0106, 0xbeb00108,
1424	0xbeb20100, 0xbeb40102,
1425	0xbeb60104, 0xbeb80106,
1426	0xbeba0108, 0xbebc0100,
1427	0xbebe0102, 0xbec00104,
1428	0xbec20106, 0xbec40108,
1429	0xbec60100, 0xbec80102,
1430	0xbee60004, 0xbee70005,
1431	0xbeea0006, 0xbeeb0007,
1432	0xbee80008, 0xbee90009,
1433	0xbefc0000, 0xbf8a0000,
1434	0xbf810000, 0x00000000,
1435};
1436
1437static const u32 vgpr_init_regs[] =
1438{
1439	mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0xffffffff,
1440	mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, /* CU_GROUP_COUNT=1 */
1441	mmCOMPUTE_NUM_THREAD_X, 256*4,
1442	mmCOMPUTE_NUM_THREAD_Y, 1,
1443	mmCOMPUTE_NUM_THREAD_Z, 1,
1444	mmCOMPUTE_PGM_RSRC1, 0x100004f, /* VGPRS=15 (64 logical VGPRs), SGPRS=1 (16 SGPRs), BULKY=1 */
1445	mmCOMPUTE_PGM_RSRC2, 20,
1446	mmCOMPUTE_USER_DATA_0, 0xedcedc00,
1447	mmCOMPUTE_USER_DATA_1, 0xedcedc01,
1448	mmCOMPUTE_USER_DATA_2, 0xedcedc02,
1449	mmCOMPUTE_USER_DATA_3, 0xedcedc03,
1450	mmCOMPUTE_USER_DATA_4, 0xedcedc04,
1451	mmCOMPUTE_USER_DATA_5, 0xedcedc05,
1452	mmCOMPUTE_USER_DATA_6, 0xedcedc06,
1453	mmCOMPUTE_USER_DATA_7, 0xedcedc07,
1454	mmCOMPUTE_USER_DATA_8, 0xedcedc08,
1455	mmCOMPUTE_USER_DATA_9, 0xedcedc09,
1456};
1457
1458static const u32 sgpr1_init_regs[] =
1459{
1460	mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0x0f,
1461	mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, /* CU_GROUP_COUNT=1 */
1462	mmCOMPUTE_NUM_THREAD_X, 256*5,
1463	mmCOMPUTE_NUM_THREAD_Y, 1,
1464	mmCOMPUTE_NUM_THREAD_Z, 1,
1465	mmCOMPUTE_PGM_RSRC1, 0x240, /* SGPRS=9 (80 GPRS) */
1466	mmCOMPUTE_PGM_RSRC2, 20,
1467	mmCOMPUTE_USER_DATA_0, 0xedcedc00,
1468	mmCOMPUTE_USER_DATA_1, 0xedcedc01,
1469	mmCOMPUTE_USER_DATA_2, 0xedcedc02,
1470	mmCOMPUTE_USER_DATA_3, 0xedcedc03,
1471	mmCOMPUTE_USER_DATA_4, 0xedcedc04,
1472	mmCOMPUTE_USER_DATA_5, 0xedcedc05,
1473	mmCOMPUTE_USER_DATA_6, 0xedcedc06,
1474	mmCOMPUTE_USER_DATA_7, 0xedcedc07,
1475	mmCOMPUTE_USER_DATA_8, 0xedcedc08,
1476	mmCOMPUTE_USER_DATA_9, 0xedcedc09,
1477};
1478
1479static const u32 sgpr2_init_regs[] =
1480{
1481	mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0xf0,
1482	mmCOMPUTE_RESOURCE_LIMITS, 0x1000000,
1483	mmCOMPUTE_NUM_THREAD_X, 256*5,
1484	mmCOMPUTE_NUM_THREAD_Y, 1,
1485	mmCOMPUTE_NUM_THREAD_Z, 1,
1486	mmCOMPUTE_PGM_RSRC1, 0x240, /* SGPRS=9 (80 GPRS) */
1487	mmCOMPUTE_PGM_RSRC2, 20,
1488	mmCOMPUTE_USER_DATA_0, 0xedcedc00,
1489	mmCOMPUTE_USER_DATA_1, 0xedcedc01,
1490	mmCOMPUTE_USER_DATA_2, 0xedcedc02,
1491	mmCOMPUTE_USER_DATA_3, 0xedcedc03,
1492	mmCOMPUTE_USER_DATA_4, 0xedcedc04,
1493	mmCOMPUTE_USER_DATA_5, 0xedcedc05,
1494	mmCOMPUTE_USER_DATA_6, 0xedcedc06,
1495	mmCOMPUTE_USER_DATA_7, 0xedcedc07,
1496	mmCOMPUTE_USER_DATA_8, 0xedcedc08,
1497	mmCOMPUTE_USER_DATA_9, 0xedcedc09,
1498};
1499
1500static const u32 sec_ded_counter_registers[] =
1501{
1502	mmCPC_EDC_ATC_CNT,
1503	mmCPC_EDC_SCRATCH_CNT,
1504	mmCPC_EDC_UCODE_CNT,
1505	mmCPF_EDC_ATC_CNT,
1506	mmCPF_EDC_ROQ_CNT,
1507	mmCPF_EDC_TAG_CNT,
1508	mmCPG_EDC_ATC_CNT,
1509	mmCPG_EDC_DMA_CNT,
1510	mmCPG_EDC_TAG_CNT,
1511	mmDC_EDC_CSINVOC_CNT,
1512	mmDC_EDC_RESTORE_CNT,
1513	mmDC_EDC_STATE_CNT,
1514	mmGDS_EDC_CNT,
1515	mmGDS_EDC_GRBM_CNT,
1516	mmGDS_EDC_OA_DED,
1517	mmSPI_EDC_CNT,
1518	mmSQC_ATC_EDC_GATCL1_CNT,
1519	mmSQC_EDC_CNT,
1520	mmSQ_EDC_DED_CNT,
1521	mmSQ_EDC_INFO,
1522	mmSQ_EDC_SEC_CNT,
1523	mmTCC_EDC_CNT,
1524	mmTCP_ATC_EDC_GATCL1_CNT,
1525	mmTCP_EDC_CNT,
1526	mmTD_EDC_CNT
1527};
1528
1529static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
1530{
1531	struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
1532	struct amdgpu_ib ib;
1533	struct dma_fence *f = NULL;
1534	int r, i;
1535	u32 tmp;
1536	unsigned total_size, vgpr_offset, sgpr_offset;
1537	u64 gpu_addr;
1538
1539	/* only supported on CZ */
1540	if (adev->asic_type != CHIP_CARRIZO)
1541		return 0;
1542
1543	/* bail if the compute ring is not ready */
1544	if (!ring->sched.ready)
1545		return 0;
1546
1547	tmp = RREG32(mmGB_EDC_MODE);
1548	WREG32(mmGB_EDC_MODE, 0);
1549
1550	total_size =
1551		(((ARRAY_SIZE(vgpr_init_regs) / 2) * 3) + 4 + 5 + 2) * 4;
1552	total_size +=
1553		(((ARRAY_SIZE(sgpr1_init_regs) / 2) * 3) + 4 + 5 + 2) * 4;
1554	total_size +=
1555		(((ARRAY_SIZE(sgpr2_init_regs) / 2) * 3) + 4 + 5 + 2) * 4;
1556	total_size = ALIGN(total_size, 256);
1557	vgpr_offset = total_size;
1558	total_size += ALIGN(sizeof(vgpr_init_compute_shader), 256);
1559	sgpr_offset = total_size;
1560	total_size += sizeof(sgpr_init_compute_shader);
1561
1562	/* allocate an indirect buffer to put the commands in */
1563	memset(&ib, 0, sizeof(ib));
1564	r = amdgpu_ib_get(adev, NULL, total_size,
1565					AMDGPU_IB_POOL_DIRECT, &ib);
1566	if (r) {
1567		DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
1568		return r;
1569	}
1570
1571	/* load the compute shaders */
1572	for (i = 0; i < ARRAY_SIZE(vgpr_init_compute_shader); i++)
1573		ib.ptr[i + (vgpr_offset / 4)] = vgpr_init_compute_shader[i];
1574
1575	for (i = 0; i < ARRAY_SIZE(sgpr_init_compute_shader); i++)
1576		ib.ptr[i + (sgpr_offset / 4)] = sgpr_init_compute_shader[i];
1577
1578	/* init the ib length to 0 */
1579	ib.length_dw = 0;
1580
1581	/* VGPR */
1582	/* write the register state for the compute dispatch */
1583	for (i = 0; i < ARRAY_SIZE(vgpr_init_regs); i += 2) {
1584		ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
1585		ib.ptr[ib.length_dw++] = vgpr_init_regs[i] - PACKET3_SET_SH_REG_START;
1586		ib.ptr[ib.length_dw++] = vgpr_init_regs[i + 1];
1587	}
1588	/* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
1589	gpu_addr = (ib.gpu_addr + (u64)vgpr_offset) >> 8;
1590	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
1591	ib.ptr[ib.length_dw++] = mmCOMPUTE_PGM_LO - PACKET3_SET_SH_REG_START;
1592	ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
1593	ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
1594
1595	/* write dispatch packet */
1596	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
1597	ib.ptr[ib.length_dw++] = 8; /* x */
1598	ib.ptr[ib.length_dw++] = 1; /* y */
1599	ib.ptr[ib.length_dw++] = 1; /* z */
1600	ib.ptr[ib.length_dw++] =
1601		REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
1602
1603	/* write CS partial flush packet */
1604	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
1605	ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
1606
1607	/* SGPR1 */
1608	/* write the register state for the compute dispatch */
1609	for (i = 0; i < ARRAY_SIZE(sgpr1_init_regs); i += 2) {
1610		ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
1611		ib.ptr[ib.length_dw++] = sgpr1_init_regs[i] - PACKET3_SET_SH_REG_START;
1612		ib.ptr[ib.length_dw++] = sgpr1_init_regs[i + 1];
1613	}
1614	/* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
1615	gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
1616	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
1617	ib.ptr[ib.length_dw++] = mmCOMPUTE_PGM_LO - PACKET3_SET_SH_REG_START;
1618	ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
1619	ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
1620
1621	/* write dispatch packet */
1622	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
1623	ib.ptr[ib.length_dw++] = 8; /* x */
1624	ib.ptr[ib.length_dw++] = 1; /* y */
1625	ib.ptr[ib.length_dw++] = 1; /* z */
1626	ib.ptr[ib.length_dw++] =
1627		REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
1628
1629	/* write CS partial flush packet */
1630	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
1631	ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
1632
1633	/* SGPR2 */
1634	/* write the register state for the compute dispatch */
1635	for (i = 0; i < ARRAY_SIZE(sgpr2_init_regs); i += 2) {
1636		ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
1637		ib.ptr[ib.length_dw++] = sgpr2_init_regs[i] - PACKET3_SET_SH_REG_START;
1638		ib.ptr[ib.length_dw++] = sgpr2_init_regs[i + 1];
1639	}
1640	/* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
1641	gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
1642	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
1643	ib.ptr[ib.length_dw++] = mmCOMPUTE_PGM_LO - PACKET3_SET_SH_REG_START;
1644	ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
1645	ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
1646
1647	/* write dispatch packet */
1648	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
1649	ib.ptr[ib.length_dw++] = 8; /* x */
1650	ib.ptr[ib.length_dw++] = 1; /* y */
1651	ib.ptr[ib.length_dw++] = 1; /* z */
1652	ib.ptr[ib.length_dw++] =
1653		REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
1654
1655	/* write CS partial flush packet */
1656	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
1657	ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
1658
1659	/* shedule the ib on the ring */
1660	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
1661	if (r) {
1662		DRM_ERROR("amdgpu: ib submit failed (%d).\n", r);
1663		goto fail;
1664	}
1665
1666	/* wait for the GPU to finish processing the IB */
1667	r = dma_fence_wait(f, false);
1668	if (r) {
1669		DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
1670		goto fail;
1671	}
1672
1673	tmp = REG_SET_FIELD(tmp, GB_EDC_MODE, DED_MODE, 2);
1674	tmp = REG_SET_FIELD(tmp, GB_EDC_MODE, PROP_FED, 1);
1675	WREG32(mmGB_EDC_MODE, tmp);
1676
1677	tmp = RREG32(mmCC_GC_EDC_CONFIG);
1678	tmp = REG_SET_FIELD(tmp, CC_GC_EDC_CONFIG, DIS_EDC, 0) | 1;
1679	WREG32(mmCC_GC_EDC_CONFIG, tmp);
1680
1681
1682	/* read back registers to clear the counters */
1683	for (i = 0; i < ARRAY_SIZE(sec_ded_counter_registers); i++)
1684		RREG32(sec_ded_counter_registers[i]);
1685
1686fail:
1687	amdgpu_ib_free(adev, &ib, NULL);
1688	dma_fence_put(f);
1689
1690	return r;
1691}
1692
1693static int gfx_v8_0_gpu_early_init(struct amdgpu_device *adev)
1694{
1695	u32 gb_addr_config;
1696	u32 mc_arb_ramcfg;
1697	u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map;
1698	u32 tmp;
1699	int ret;
1700
1701	switch (adev->asic_type) {
1702	case CHIP_TOPAZ:
1703		adev->gfx.config.max_shader_engines = 1;
1704		adev->gfx.config.max_tile_pipes = 2;
1705		adev->gfx.config.max_cu_per_sh = 6;
1706		adev->gfx.config.max_sh_per_se = 1;
1707		adev->gfx.config.max_backends_per_se = 2;
1708		adev->gfx.config.max_texture_channel_caches = 2;
1709		adev->gfx.config.max_gprs = 256;
1710		adev->gfx.config.max_gs_threads = 32;
1711		adev->gfx.config.max_hw_contexts = 8;
1712
1713		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1714		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1715		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1716		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1717		gb_addr_config = TOPAZ_GB_ADDR_CONFIG_GOLDEN;
1718		break;
1719	case CHIP_FIJI:
1720		adev->gfx.config.max_shader_engines = 4;
1721		adev->gfx.config.max_tile_pipes = 16;
1722		adev->gfx.config.max_cu_per_sh = 16;
1723		adev->gfx.config.max_sh_per_se = 1;
1724		adev->gfx.config.max_backends_per_se = 4;
1725		adev->gfx.config.max_texture_channel_caches = 16;
1726		adev->gfx.config.max_gprs = 256;
1727		adev->gfx.config.max_gs_threads = 32;
1728		adev->gfx.config.max_hw_contexts = 8;
1729
1730		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1731		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1732		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1733		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1734		gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
1735		break;
1736	case CHIP_POLARIS11:
1737	case CHIP_POLARIS12:
1738		ret = amdgpu_atombios_get_gfx_info(adev);
1739		if (ret)
1740			return ret;
1741		adev->gfx.config.max_gprs = 256;
1742		adev->gfx.config.max_gs_threads = 32;
1743		adev->gfx.config.max_hw_contexts = 8;
1744
1745		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1746		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1747		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1748		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1749		gb_addr_config = POLARIS11_GB_ADDR_CONFIG_GOLDEN;
1750		break;
1751	case CHIP_POLARIS10:
1752	case CHIP_VEGAM:
1753		ret = amdgpu_atombios_get_gfx_info(adev);
1754		if (ret)
1755			return ret;
1756		adev->gfx.config.max_gprs = 256;
1757		adev->gfx.config.max_gs_threads = 32;
1758		adev->gfx.config.max_hw_contexts = 8;
1759
1760		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1761		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1762		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1763		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1764		gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
1765		break;
1766	case CHIP_TONGA:
1767		adev->gfx.config.max_shader_engines = 4;
1768		adev->gfx.config.max_tile_pipes = 8;
1769		adev->gfx.config.max_cu_per_sh = 8;
1770		adev->gfx.config.max_sh_per_se = 1;
1771		adev->gfx.config.max_backends_per_se = 2;
1772		adev->gfx.config.max_texture_channel_caches = 8;
1773		adev->gfx.config.max_gprs = 256;
1774		adev->gfx.config.max_gs_threads = 32;
1775		adev->gfx.config.max_hw_contexts = 8;
1776
1777		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1778		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1779		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1780		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1781		gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
1782		break;
1783	case CHIP_CARRIZO:
1784		adev->gfx.config.max_shader_engines = 1;
1785		adev->gfx.config.max_tile_pipes = 2;
1786		adev->gfx.config.max_sh_per_se = 1;
1787		adev->gfx.config.max_backends_per_se = 2;
1788		adev->gfx.config.max_cu_per_sh = 8;
1789		adev->gfx.config.max_texture_channel_caches = 2;
1790		adev->gfx.config.max_gprs = 256;
1791		adev->gfx.config.max_gs_threads = 32;
1792		adev->gfx.config.max_hw_contexts = 8;
1793
1794		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1795		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1796		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1797		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1798		gb_addr_config = CARRIZO_GB_ADDR_CONFIG_GOLDEN;
1799		break;
1800	case CHIP_STONEY:
1801		adev->gfx.config.max_shader_engines = 1;
1802		adev->gfx.config.max_tile_pipes = 2;
1803		adev->gfx.config.max_sh_per_se = 1;
1804		adev->gfx.config.max_backends_per_se = 1;
1805		adev->gfx.config.max_cu_per_sh = 3;
1806		adev->gfx.config.max_texture_channel_caches = 2;
1807		adev->gfx.config.max_gprs = 256;
1808		adev->gfx.config.max_gs_threads = 16;
1809		adev->gfx.config.max_hw_contexts = 8;
1810
1811		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1812		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1813		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1814		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1815		gb_addr_config = CARRIZO_GB_ADDR_CONFIG_GOLDEN;
1816		break;
1817	default:
1818		adev->gfx.config.max_shader_engines = 2;
1819		adev->gfx.config.max_tile_pipes = 4;
1820		adev->gfx.config.max_cu_per_sh = 2;
1821		adev->gfx.config.max_sh_per_se = 1;
1822		adev->gfx.config.max_backends_per_se = 2;
1823		adev->gfx.config.max_texture_channel_caches = 4;
1824		adev->gfx.config.max_gprs = 256;
1825		adev->gfx.config.max_gs_threads = 32;
1826		adev->gfx.config.max_hw_contexts = 8;
1827
1828		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1829		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1830		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1831		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1832		gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
1833		break;
1834	}
1835
1836	adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG);
1837	mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg;
1838
1839	adev->gfx.config.num_banks = REG_GET_FIELD(mc_arb_ramcfg,
1840				MC_ARB_RAMCFG, NOOFBANK);
1841	adev->gfx.config.num_ranks = REG_GET_FIELD(mc_arb_ramcfg,
1842				MC_ARB_RAMCFG, NOOFRANKS);
1843
1844	adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
1845	adev->gfx.config.mem_max_burst_length_bytes = 256;
1846	if (adev->flags & AMD_IS_APU) {
1847		/* Get memory bank mapping mode. */
1848		tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING);
1849		dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
1850		dimm01_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
1851
1852		tmp = RREG32(mmMC_FUS_DRAM1_BANK_ADDR_MAPPING);
1853		dimm10_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
1854		dimm11_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
1855
1856		/* Validate settings in case only one DIMM installed. */
1857		if ((dimm00_addr_map == 0) || (dimm00_addr_map == 3) || (dimm00_addr_map == 4) || (dimm00_addr_map > 12))
1858			dimm00_addr_map = 0;
1859		if ((dimm01_addr_map == 0) || (dimm01_addr_map == 3) || (dimm01_addr_map == 4) || (dimm01_addr_map > 12))
1860			dimm01_addr_map = 0;
1861		if ((dimm10_addr_map == 0) || (dimm10_addr_map == 3) || (dimm10_addr_map == 4) || (dimm10_addr_map > 12))
1862			dimm10_addr_map = 0;
1863		if ((dimm11_addr_map == 0) || (dimm11_addr_map == 3) || (dimm11_addr_map == 4) || (dimm11_addr_map > 12))
1864			dimm11_addr_map = 0;
1865
1866		/* If DIMM Addr map is 8GB, ROW size should be 2KB. Otherwise 1KB. */
1867		/* If ROW size(DIMM1) != ROW size(DMIMM0), ROW size should be larger one. */
1868		if ((dimm00_addr_map == 11) || (dimm01_addr_map == 11) || (dimm10_addr_map == 11) || (dimm11_addr_map == 11))
1869			adev->gfx.config.mem_row_size_in_kb = 2;
1870		else
1871			adev->gfx.config.mem_row_size_in_kb = 1;
1872	} else {
1873		tmp = REG_GET_FIELD(mc_arb_ramcfg, MC_ARB_RAMCFG, NOOFCOLS);
1874		adev->gfx.config.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
1875		if (adev->gfx.config.mem_row_size_in_kb > 4)
1876			adev->gfx.config.mem_row_size_in_kb = 4;
1877	}
1878
1879	adev->gfx.config.shader_engine_tile_size = 32;
1880	adev->gfx.config.num_gpus = 1;
1881	adev->gfx.config.multi_gpu_tile_size = 64;
1882
1883	/* fix up row size */
1884	switch (adev->gfx.config.mem_row_size_in_kb) {
1885	case 1:
1886	default:
1887		gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 0);
1888		break;
1889	case 2:
1890		gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 1);
1891		break;
1892	case 4:
1893		gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 2);
1894		break;
1895	}
1896	adev->gfx.config.gb_addr_config = gb_addr_config;
1897
1898	return 0;
1899}
1900
1901static int gfx_v8_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
1902					int mec, int pipe, int queue)
1903{
1904	int r;
1905	unsigned irq_type;
1906	struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
1907	unsigned int hw_prio;
1908
1909	ring = &adev->gfx.compute_ring[ring_id];
1910
1911	/* mec0 is me1 */
1912	ring->me = mec + 1;
1913	ring->pipe = pipe;
1914	ring->queue = queue;
1915
1916	ring->ring_obj = NULL;
1917	ring->use_doorbell = true;
1918	ring->doorbell_index = adev->doorbell_index.mec_ring0 + ring_id;
1919	ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
1920				+ (ring_id * GFX8_MEC_HPD_SIZE);
1921	sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
1922
1923	irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
1924		+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
1925		+ ring->pipe;
1926
1927	hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
1928			AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_RING_PRIO_DEFAULT;
1929	/* type-2 packets are deprecated on MEC, use type-3 instead */
1930	r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
1931			     hw_prio, NULL);
1932	if (r)
1933		return r;
1934
1935
1936	return 0;
1937}
1938
1939static void gfx_v8_0_sq_irq_work_func(struct work_struct *work);
1940
1941static int gfx_v8_0_sw_init(void *handle)
1942{
1943	int i, j, k, r, ring_id;
1944	struct amdgpu_ring *ring;
1945	struct amdgpu_kiq *kiq;
1946	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1947
1948	switch (adev->asic_type) {
1949	case CHIP_TONGA:
1950	case CHIP_CARRIZO:
1951	case CHIP_FIJI:
1952	case CHIP_POLARIS10:
1953	case CHIP_POLARIS11:
1954	case CHIP_POLARIS12:
1955	case CHIP_VEGAM:
1956		adev->gfx.mec.num_mec = 2;
1957		break;
1958	case CHIP_TOPAZ:
1959	case CHIP_STONEY:
1960	default:
1961		adev->gfx.mec.num_mec = 1;
1962		break;
1963	}
1964
1965	adev->gfx.mec.num_pipe_per_mec = 4;
1966	adev->gfx.mec.num_queue_per_pipe = 8;
1967
1968	/* EOP Event */
1969	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_END_OF_PIPE, &adev->gfx.eop_irq);
1970	if (r)
1971		return r;
1972
1973	/* Privileged reg */
1974	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_PRIV_REG_FAULT,
1975			      &adev->gfx.priv_reg_irq);
1976	if (r)
1977		return r;
1978
1979	/* Privileged inst */
1980	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_PRIV_INSTR_FAULT,
1981			      &adev->gfx.priv_inst_irq);
1982	if (r)
1983		return r;
1984
1985	/* Add CP EDC/ECC irq  */
1986	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_ECC_ERROR,
1987			      &adev->gfx.cp_ecc_error_irq);
1988	if (r)
1989		return r;
1990
1991	/* SQ interrupts. */
1992	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SQ_INTERRUPT_MSG,
1993			      &adev->gfx.sq_irq);
1994	if (r) {
1995		DRM_ERROR("amdgpu_irq_add() for SQ failed: %d\n", r);
1996		return r;
1997	}
1998
1999	INIT_WORK(&adev->gfx.sq_work.work, gfx_v8_0_sq_irq_work_func);
2000
2001	adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
2002
2003	gfx_v8_0_scratch_init(adev);
2004
2005	r = gfx_v8_0_init_microcode(adev);
2006	if (r) {
2007		DRM_ERROR("Failed to load gfx firmware!\n");
2008		return r;
2009	}
2010
2011	r = adev->gfx.rlc.funcs->init(adev);
2012	if (r) {
2013		DRM_ERROR("Failed to init rlc BOs!\n");
2014		return r;
2015	}
2016
2017	r = gfx_v8_0_mec_init(adev);
2018	if (r) {
2019		DRM_ERROR("Failed to init MEC BOs!\n");
2020		return r;
2021	}
2022
2023	/* set up the gfx ring */
2024	for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
2025		ring = &adev->gfx.gfx_ring[i];
2026		ring->ring_obj = NULL;
2027		sprintf(ring->name, "gfx");
2028		/* no gfx doorbells on iceland */
2029		if (adev->asic_type != CHIP_TOPAZ) {
2030			ring->use_doorbell = true;
2031			ring->doorbell_index = adev->doorbell_index.gfx_ring0;
2032		}
2033
2034		r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
2035				     AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
2036				     AMDGPU_RING_PRIO_DEFAULT, NULL);
2037		if (r)
2038			return r;
2039	}
2040
2041
2042	/* set up the compute queues - allocate horizontally across pipes */
2043	ring_id = 0;
2044	for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
2045		for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
2046			for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
2047				if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k, j))
2048					continue;
2049
2050				r = gfx_v8_0_compute_ring_init(adev,
2051								ring_id,
2052								i, k, j);
2053				if (r)
2054					return r;
2055
2056				ring_id++;
2057			}
2058		}
2059	}
2060
2061	r = amdgpu_gfx_kiq_init(adev, GFX8_MEC_HPD_SIZE);
2062	if (r) {
2063		DRM_ERROR("Failed to init KIQ BOs!\n");
2064		return r;
2065	}
2066
2067	kiq = &adev->gfx.kiq;
2068	r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq);
2069	if (r)
2070		return r;
2071
2072	/* create MQD for all compute queues as well as KIQ for SRIOV case */
2073	r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct vi_mqd_allocation));
2074	if (r)
2075		return r;
2076
2077	adev->gfx.ce_ram_size = 0x8000;
2078
2079	r = gfx_v8_0_gpu_early_init(adev);
2080	if (r)
2081		return r;
2082
2083	return 0;
2084}
2085
2086static int gfx_v8_0_sw_fini(void *handle)
2087{
2088	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2089	int i;
2090
2091	for (i = 0; i < adev->gfx.num_gfx_rings; i++)
2092		amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
2093	for (i = 0; i < adev->gfx.num_compute_rings; i++)
2094		amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
2095
2096	amdgpu_gfx_mqd_sw_fini(adev);
2097	amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring);
2098	amdgpu_gfx_kiq_fini(adev);
2099
2100	gfx_v8_0_mec_fini(adev);
2101	amdgpu_gfx_rlc_fini(adev);
2102	amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
2103				&adev->gfx.rlc.clear_state_gpu_addr,
2104				(void **)&adev->gfx.rlc.cs_ptr);
2105	if ((adev->asic_type == CHIP_CARRIZO) ||
2106	    (adev->asic_type == CHIP_STONEY)) {
2107		amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
2108				&adev->gfx.rlc.cp_table_gpu_addr,
2109				(void **)&adev->gfx.rlc.cp_table_ptr);
2110	}
2111	gfx_v8_0_free_microcode(adev);
2112
2113	return 0;
2114}
2115
2116static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev)
2117{
2118	uint32_t *modearray, *mod2array;
2119	const u32 num_tile_mode_states = ARRAY_SIZE(adev->gfx.config.tile_mode_array);
2120	const u32 num_secondary_tile_mode_states = ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
2121	u32 reg_offset;
2122
2123	modearray = adev->gfx.config.tile_mode_array;
2124	mod2array = adev->gfx.config.macrotile_mode_array;
2125
2126	for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
2127		modearray[reg_offset] = 0;
2128
2129	for (reg_offset = 0; reg_offset <  num_secondary_tile_mode_states; reg_offset++)
2130		mod2array[reg_offset] = 0;
2131
2132	switch (adev->asic_type) {
2133	case CHIP_TOPAZ:
2134		modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2135				PIPE_CONFIG(ADDR_SURF_P2) |
2136				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2137				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2138		modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2139				PIPE_CONFIG(ADDR_SURF_P2) |
2140				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2141				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2142		modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2143				PIPE_CONFIG(ADDR_SURF_P2) |
2144				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2145				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2146		modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2147				PIPE_CONFIG(ADDR_SURF_P2) |
2148				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2149				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2150		modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2151				PIPE_CONFIG(ADDR_SURF_P2) |
2152				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2153				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2154		modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2155				PIPE_CONFIG(ADDR_SURF_P2) |
2156				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2157				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2158		modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2159				PIPE_CONFIG(ADDR_SURF_P2) |
2160				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2161				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2162		modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2163				PIPE_CONFIG(ADDR_SURF_P2));
2164		modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2165				PIPE_CONFIG(ADDR_SURF_P2) |
2166				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2167				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2168		modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2169				 PIPE_CONFIG(ADDR_SURF_P2) |
2170				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2171				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2172		modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2173				 PIPE_CONFIG(ADDR_SURF_P2) |
2174				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2175				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2176		modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2177				 PIPE_CONFIG(ADDR_SURF_P2) |
2178				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2179				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2180		modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2181				 PIPE_CONFIG(ADDR_SURF_P2) |
2182				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2183				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2184		modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
2185				 PIPE_CONFIG(ADDR_SURF_P2) |
2186				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2187				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2188		modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2189				 PIPE_CONFIG(ADDR_SURF_P2) |
2190				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2191				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2192		modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2193				 PIPE_CONFIG(ADDR_SURF_P2) |
2194				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2195				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2196		modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2197				 PIPE_CONFIG(ADDR_SURF_P2) |
2198				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2199				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2200		modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2201				 PIPE_CONFIG(ADDR_SURF_P2) |
2202				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2203				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2204		modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
2205				 PIPE_CONFIG(ADDR_SURF_P2) |
2206				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2207				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2208		modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2209				 PIPE_CONFIG(ADDR_SURF_P2) |
2210				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2211				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2212		modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2213				 PIPE_CONFIG(ADDR_SURF_P2) |
2214				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2215				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2216		modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
2217				 PIPE_CONFIG(ADDR_SURF_P2) |
2218				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2219				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2220		modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
2221				 PIPE_CONFIG(ADDR_SURF_P2) |
2222				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2223				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2224		modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2225				 PIPE_CONFIG(ADDR_SURF_P2) |
2226				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2227				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2228		modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2229				 PIPE_CONFIG(ADDR_SURF_P2) |
2230				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2231				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2232		modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2233				 PIPE_CONFIG(ADDR_SURF_P2) |
2234				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2235				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2236
2237		mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
2238				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2239				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2240				NUM_BANKS(ADDR_SURF_8_BANK));
2241		mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
2242				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2243				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2244				NUM_BANKS(ADDR_SURF_8_BANK));
2245		mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2246				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2247				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2248				NUM_BANKS(ADDR_SURF_8_BANK));
2249		mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2250				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2251				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2252				NUM_BANKS(ADDR_SURF_8_BANK));
2253		mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2254				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2255				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2256				NUM_BANKS(ADDR_SURF_8_BANK));
2257		mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2258				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2259				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2260				NUM_BANKS(ADDR_SURF_8_BANK));
2261		mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2262				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2263				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2264				NUM_BANKS(ADDR_SURF_8_BANK));
2265		mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
2266				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
2267				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2268				NUM_BANKS(ADDR_SURF_16_BANK));
2269		mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
2270				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2271				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2272				NUM_BANKS(ADDR_SURF_16_BANK));
2273		mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2274				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2275				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2276				 NUM_BANKS(ADDR_SURF_16_BANK));
2277		mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2278				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2279				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2280				 NUM_BANKS(ADDR_SURF_16_BANK));
2281		mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2282				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2283				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2284				 NUM_BANKS(ADDR_SURF_16_BANK));
2285		mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2286				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2287				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2288				 NUM_BANKS(ADDR_SURF_16_BANK));
2289		mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2290				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2291				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2292				 NUM_BANKS(ADDR_SURF_8_BANK));
2293
2294		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
2295			if (reg_offset != 7 && reg_offset != 12 && reg_offset != 17 &&
2296			    reg_offset != 23)
2297				WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
2298
2299		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
2300			if (reg_offset != 7)
2301				WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
2302
2303		break;
2304	case CHIP_FIJI:
2305	case CHIP_VEGAM:
2306		modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2307				PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2308				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2309				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2310		modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2311				PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2312				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2313				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2314		modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2315				PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2316				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2317				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2318		modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2319				PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2320				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2321				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2322		modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2323				PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2324				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2325				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2326		modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2327				PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2328				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2329				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2330		modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2331				PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2332				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2333				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2334		modearray[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2335				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2336				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2337				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2338		modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2339				PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16));
2340		modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2341				PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2342				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2343				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2344		modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2345				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2346				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2347				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2348		modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2349				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2350				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2351				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2352		modearray[12] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2353				 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2354				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2355				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2356		modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2357				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2358				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2359				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2360		modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2361				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2362				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2363				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2364		modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
2365				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2366				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2367				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2368		modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2369				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2370				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2371				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2372		modearray[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2373				 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2374				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2375				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2376		modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2377				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2378				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2379				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2380		modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2381				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2382				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2383				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2384		modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2385				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2386				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2387				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2388		modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
2389				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2390				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2391				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2392		modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2393				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2394				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2395				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2396		modearray[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2397				 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2398				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2399				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2400		modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2401				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2402				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2403				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2404		modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
2405				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2406				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2407				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2408		modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
2409				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2410				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2411				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2412		modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2413				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2414				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2415				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2416		modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2417				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2418				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2419				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2420		modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2421				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2422				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2423				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2424		modearray[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2425				 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2426				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2427				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2428
2429		mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2430				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2431				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2432				NUM_BANKS(ADDR_SURF_8_BANK));
2433		mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2434				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2435				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2436				NUM_BANKS(ADDR_SURF_8_BANK));
2437		mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2438				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2439				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2440				NUM_BANKS(ADDR_SURF_8_BANK));
2441		mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2442				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2443				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2444				NUM_BANKS(ADDR_SURF_8_BANK));
2445		mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2446				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2447				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2448				NUM_BANKS(ADDR_SURF_8_BANK));
2449		mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2450				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2451				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2452				NUM_BANKS(ADDR_SURF_8_BANK));
2453		mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2454				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2455				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2456				NUM_BANKS(ADDR_SURF_8_BANK));
2457		mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2458				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
2459				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2460				NUM_BANKS(ADDR_SURF_8_BANK));
2461		mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2462				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2463				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2464				NUM_BANKS(ADDR_SURF_8_BANK));
2465		mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2466				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2467				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2468				 NUM_BANKS(ADDR_SURF_8_BANK));
2469		mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2470				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2471				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2472				 NUM_BANKS(ADDR_SURF_8_BANK));
2473		mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2474				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2475				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2476				 NUM_BANKS(ADDR_SURF_8_BANK));
2477		mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2478				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2479				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2480				 NUM_BANKS(ADDR_SURF_8_BANK));
2481		mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2482				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2483				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2484				 NUM_BANKS(ADDR_SURF_4_BANK));
2485
2486		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
2487			WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
2488
2489		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
2490			if (reg_offset != 7)
2491				WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
2492
2493		break;
2494	case CHIP_TONGA:
2495		modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2496				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2497				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2498				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2499		modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2500				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2501				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2502				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2503		modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2504				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2505				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2506				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2507		modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2508				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2509				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2510				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2511		modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2512				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2513				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2514				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2515		modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2516				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2517				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2518				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2519		modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2520				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2521				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2522				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2523		modearray[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2524				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2525				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2526				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2527		modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2528				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16));
2529		modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2530				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2531				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2532				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2533		modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2534				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2535				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2536				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2537		modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2538				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2539				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2540				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2541		modearray[12] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2542				 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2543				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2544				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2545		modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2546				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2547				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2548				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2549		modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2550				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2551				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2552				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2553		modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
2554				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2555				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2556				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2557		modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2558				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2559				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2560				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2561		modearray[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2562				 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2563				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2564				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2565		modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2566				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2567				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2568				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2569		modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2570				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2571				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2572				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2573		modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2574				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2575				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2576				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2577		modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
2578				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2579				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2580				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2581		modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2582				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2583				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2584				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2585		modearray[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2586				 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2587				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2588				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2589		modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2590				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2591				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2592				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2593		modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
2594				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2595				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2596				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2597		modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
2598				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2599				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2600				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2601		modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2602				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2603				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2604				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2605		modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2606				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2607				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2608				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2609		modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2610				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2611				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2612				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2613		modearray[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2614				 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2615				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2616				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2617
2618		mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2619				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2620				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2621				NUM_BANKS(ADDR_SURF_16_BANK));
2622		mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2623				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2624				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2625				NUM_BANKS(ADDR_SURF_16_BANK));
2626		mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2627				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2628				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2629				NUM_BANKS(ADDR_SURF_16_BANK));
2630		mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2631				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2632				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2633				NUM_BANKS(ADDR_SURF_16_BANK));
2634		mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2635				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2636				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2637				NUM_BANKS(ADDR_SURF_16_BANK));
2638		mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2639				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2640				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2641				NUM_BANKS(ADDR_SURF_16_BANK));
2642		mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2643				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2644				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2645				NUM_BANKS(ADDR_SURF_16_BANK));
2646		mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2647				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
2648				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2649				NUM_BANKS(ADDR_SURF_16_BANK));
2650		mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2651				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2652				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2653				NUM_BANKS(ADDR_SURF_16_BANK));
2654		mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2655				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2656				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2657				 NUM_BANKS(ADDR_SURF_16_BANK));
2658		mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2659				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2660				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2661				 NUM_BANKS(ADDR_SURF_16_BANK));
2662		mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2663				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2664				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2665				 NUM_BANKS(ADDR_SURF_8_BANK));
2666		mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2667				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2668				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2669				 NUM_BANKS(ADDR_SURF_4_BANK));
2670		mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2671				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2672				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2673				 NUM_BANKS(ADDR_SURF_4_BANK));
2674
2675		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
2676			WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
2677
2678		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
2679			if (reg_offset != 7)
2680				WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
2681
2682		break;
2683	case CHIP_POLARIS11:
2684	case CHIP_POLARIS12:
2685		modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2686				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2687				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2688				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2689		modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2690				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2691				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2692				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2693		modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2694				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2695				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2696				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2697		modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2698				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2699				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2700				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2701		modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2702				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2703				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2704				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2705		modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2706				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2707				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2708				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2709		modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2710				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2711				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2712				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2713		modearray[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2714				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2715				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2716				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2717		modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2718				PIPE_CONFIG(ADDR_SURF_P4_16x16));
2719		modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2720				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2721				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2722				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2723		modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2724				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2725				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2726				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2727		modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2728				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2729				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2730				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2731		modearray[12] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2732				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2733				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2734				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2735		modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2736				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2737				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2738				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2739		modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2740				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2741				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2742				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2743		modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
2744				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2745				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2746				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2747		modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2748				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2749				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2750				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2751		modearray[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2752				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2753				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2754				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2755		modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2756				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2757				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2758				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2759		modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2760				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2761				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2762				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2763		modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2764				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2765				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2766				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2767		modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
2768				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2769				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2770				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2771		modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2772				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2773				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2774				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2775		modearray[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2776				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2777				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2778				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2779		modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2780				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2781				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2782				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2783		modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
2784				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2785				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2786				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2787		modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
2788				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2789				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2790				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2791		modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2792				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2793				MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2794				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2795		modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2796				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2797				MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2798				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2799		modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2800				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2801				MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2802				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2803		modearray[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2804				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2805				MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2806				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2807
2808		mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2809				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2810				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2811				NUM_BANKS(ADDR_SURF_16_BANK));
2812
2813		mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2814				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2815				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2816				NUM_BANKS(ADDR_SURF_16_BANK));
2817
2818		mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2819				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2820				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2821				NUM_BANKS(ADDR_SURF_16_BANK));
2822
2823		mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2824				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2825				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2826				NUM_BANKS(ADDR_SURF_16_BANK));
2827
2828		mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2829				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2830				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2831				NUM_BANKS(ADDR_SURF_16_BANK));
2832
2833		mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2834				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2835				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2836				NUM_BANKS(ADDR_SURF_16_BANK));
2837
2838		mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2839				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2840				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2841				NUM_BANKS(ADDR_SURF_16_BANK));
2842
2843		mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2844				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
2845				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2846				NUM_BANKS(ADDR_SURF_16_BANK));
2847
2848		mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2849				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2850				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2851				NUM_BANKS(ADDR_SURF_16_BANK));
2852
2853		mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2854				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2855				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2856				NUM_BANKS(ADDR_SURF_16_BANK));
2857
2858		mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2859				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2860				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2861				NUM_BANKS(ADDR_SURF_16_BANK));
2862
2863		mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2864				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2865				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2866				NUM_BANKS(ADDR_SURF_16_BANK));
2867
2868		mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2869				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2870				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2871				NUM_BANKS(ADDR_SURF_8_BANK));
2872
2873		mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2874				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2875				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2876				NUM_BANKS(ADDR_SURF_4_BANK));
2877
2878		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
2879			WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
2880
2881		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
2882			if (reg_offset != 7)
2883				WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
2884
2885		break;
2886	case CHIP_POLARIS10:
2887		modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2888				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2889				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2890				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2891		modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2892				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2893				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2894				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2895		modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2896				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2897				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2898				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2899		modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2900				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2901				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2902				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2903		modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2904				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2905				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2906				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2907		modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2908				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2909				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2910				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2911		modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2912				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2913				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2914				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2915		modearray[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2916				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2917				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2918				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2919		modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2920				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16));
2921		modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2922				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2923				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2924				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2925		modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2926				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2927				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2928				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2929		modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2930				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2931				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2932				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2933		modearray[12] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2934				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2935				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2936				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2937		modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2938				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2939				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2940				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2941		modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2942				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2943				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2944				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2945		modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
2946				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2947				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2948				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2949		modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2950				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2951				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2952				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2953		modearray[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2954				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2955				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2956				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2957		modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2958				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2959				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2960				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2961		modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2962				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2963				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2964				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2965		modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2966				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2967				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2968				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2969		modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
2970				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2971				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2972				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2973		modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2974				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2975				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2976				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2977		modearray[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2978				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2979				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2980				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2981		modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2982				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2983				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2984				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2985		modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
2986				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2987				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2988				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2989		modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
2990				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2991				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2992				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2993		modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2994				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2995				MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2996				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2997		modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2998				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2999				MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
3000				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3001		modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3002				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
3003				MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
3004				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
3005		modearray[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3006				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
3007				MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
3008				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
3009
3010		mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3011				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3012				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3013				NUM_BANKS(ADDR_SURF_16_BANK));
3014
3015		mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3016				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3017				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3018				NUM_BANKS(ADDR_SURF_16_BANK));
3019
3020		mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3021				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3022				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3023				NUM_BANKS(ADDR_SURF_16_BANK));
3024
3025		mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3026				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3027				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3028				NUM_BANKS(ADDR_SURF_16_BANK));
3029
3030		mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3031				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
3032				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3033				NUM_BANKS(ADDR_SURF_16_BANK));
3034
3035		mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3036				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3037				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
3038				NUM_BANKS(ADDR_SURF_16_BANK));
3039
3040		mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3041				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3042				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
3043				NUM_BANKS(ADDR_SURF_16_BANK));
3044
3045		mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3046				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
3047				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3048				NUM_BANKS(ADDR_SURF_16_BANK));
3049
3050		mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3051				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3052				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3053				NUM_BANKS(ADDR_SURF_16_BANK));
3054
3055		mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3056				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
3057				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3058				NUM_BANKS(ADDR_SURF_16_BANK));
3059
3060		mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3061				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3062				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3063				NUM_BANKS(ADDR_SURF_16_BANK));
3064
3065		mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3066				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3067				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
3068				NUM_BANKS(ADDR_SURF_8_BANK));
3069
3070		mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3071				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3072				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
3073				NUM_BANKS(ADDR_SURF_4_BANK));
3074
3075		mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3076				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3077				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
3078				NUM_BANKS(ADDR_SURF_4_BANK));
3079
3080		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
3081			WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
3082
3083		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
3084			if (reg_offset != 7)
3085				WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
3086
3087		break;
3088	case CHIP_STONEY:
3089		modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3090				PIPE_CONFIG(ADDR_SURF_P2) |
3091				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
3092				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3093		modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3094				PIPE_CONFIG(ADDR_SURF_P2) |
3095				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
3096				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3097		modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3098				PIPE_CONFIG(ADDR_SURF_P2) |
3099				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
3100				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3101		modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3102				PIPE_CONFIG(ADDR_SURF_P2) |
3103				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
3104				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3105		modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3106				PIPE_CONFIG(ADDR_SURF_P2) |
3107				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
3108				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3109		modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3110				PIPE_CONFIG(ADDR_SURF_P2) |
3111				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
3112				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3113		modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3114				PIPE_CONFIG(ADDR_SURF_P2) |
3115				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
3116				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3117		modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
3118				PIPE_CONFIG(ADDR_SURF_P2));
3119		modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3120				PIPE_CONFIG(ADDR_SURF_P2) |
3121				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
3122				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3123		modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3124				 PIPE_CONFIG(ADDR_SURF_P2) |
3125				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
3126				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3127		modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3128				 PIPE_CONFIG(ADDR_SURF_P2) |
3129				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
3130				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
3131		modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3132				 PIPE_CONFIG(ADDR_SURF_P2) |
3133				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3134				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3135		modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3136				 PIPE_CONFIG(ADDR_SURF_P2) |
3137				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3138				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3139		modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
3140				 PIPE_CONFIG(ADDR_SURF_P2) |
3141				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3142				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3143		modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3144				 PIPE_CONFIG(ADDR_SURF_P2) |
3145				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3146				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
3147		modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
3148				 PIPE_CONFIG(ADDR_SURF_P2) |
3149				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3150				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3151		modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
3152				 PIPE_CONFIG(ADDR_SURF_P2) |
3153				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3154				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3155		modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
3156				 PIPE_CONFIG(ADDR_SURF_P2) |
3157				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3158				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3159		modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
3160				 PIPE_CONFIG(ADDR_SURF_P2) |
3161				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3162				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3163		modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
3164				 PIPE_CONFIG(ADDR_SURF_P2) |
3165				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3166				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3167		modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
3168				 PIPE_CONFIG(ADDR_SURF_P2) |
3169				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3170				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3171		modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
3172				 PIPE_CONFIG(ADDR_SURF_P2) |
3173				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3174				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3175		modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
3176				 PIPE_CONFIG(ADDR_SURF_P2) |
3177				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3178				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3179		modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3180				 PIPE_CONFIG(ADDR_SURF_P2) |
3181				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
3182				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3183		modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3184				 PIPE_CONFIG(ADDR_SURF_P2) |
3185				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
3186				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3187		modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3188				 PIPE_CONFIG(ADDR_SURF_P2) |
3189				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
3190				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
3191
3192		mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3193				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3194				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3195				NUM_BANKS(ADDR_SURF_8_BANK));
3196		mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3197				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
3198				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3199				NUM_BANKS(ADDR_SURF_8_BANK));
3200		mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3201				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3202				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3203				NUM_BANKS(ADDR_SURF_8_BANK));
3204		mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3205				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3206				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3207				NUM_BANKS(ADDR_SURF_8_BANK));
3208		mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3209				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3210				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3211				NUM_BANKS(ADDR_SURF_8_BANK));
3212		mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3213				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3214				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3215				NUM_BANKS(ADDR_SURF_8_BANK));
3216		mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3217				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3218				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3219				NUM_BANKS(ADDR_SURF_8_BANK));
3220		mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
3221				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
3222				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3223				NUM_BANKS(ADDR_SURF_16_BANK));
3224		mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
3225				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3226				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3227				NUM_BANKS(ADDR_SURF_16_BANK));
3228		mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
3229				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3230				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3231				 NUM_BANKS(ADDR_SURF_16_BANK));
3232		mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
3233				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
3234				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3235				 NUM_BANKS(ADDR_SURF_16_BANK));
3236		mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3237				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
3238				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3239				 NUM_BANKS(ADDR_SURF_16_BANK));
3240		mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3241				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3242				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3243				 NUM_BANKS(ADDR_SURF_16_BANK));
3244		mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3245				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3246				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3247				 NUM_BANKS(ADDR_SURF_8_BANK));
3248
3249		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
3250			if (reg_offset != 7 && reg_offset != 12 && reg_offset != 17 &&
3251			    reg_offset != 23)
3252				WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
3253
3254		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
3255			if (reg_offset != 7)
3256				WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
3257
3258		break;
3259	default:
3260		dev_warn(adev->dev,
3261			 "Unknown chip type (%d) in function gfx_v8_0_tiling_mode_table_init() falling through to CHIP_CARRIZO\n",
3262			 adev->asic_type);
3263		fallthrough;
3264
3265	case CHIP_CARRIZO:
3266		modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3267				PIPE_CONFIG(ADDR_SURF_P2) |
3268				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
3269				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3270		modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3271				PIPE_CONFIG(ADDR_SURF_P2) |
3272				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
3273				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3274		modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3275				PIPE_CONFIG(ADDR_SURF_P2) |
3276				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
3277				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3278		modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3279				PIPE_CONFIG(ADDR_SURF_P2) |
3280				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
3281				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3282		modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3283				PIPE_CONFIG(ADDR_SURF_P2) |
3284				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
3285				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3286		modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3287				PIPE_CONFIG(ADDR_SURF_P2) |
3288				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
3289				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3290		modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3291				PIPE_CONFIG(ADDR_SURF_P2) |
3292				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
3293				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3294		modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
3295				PIPE_CONFIG(ADDR_SURF_P2));
3296		modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3297				PIPE_CONFIG(ADDR_SURF_P2) |
3298				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
3299				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3300		modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3301				 PIPE_CONFIG(ADDR_SURF_P2) |
3302				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
3303				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3304		modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3305				 PIPE_CONFIG(ADDR_SURF_P2) |
3306				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
3307				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
3308		modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3309				 PIPE_CONFIG(ADDR_SURF_P2) |
3310				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3311				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3312		modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3313				 PIPE_CONFIG(ADDR_SURF_P2) |
3314				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3315				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3316		modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
3317				 PIPE_CONFIG(ADDR_SURF_P2) |
3318				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3319				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3320		modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3321				 PIPE_CONFIG(ADDR_SURF_P2) |
3322				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3323				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
3324		modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
3325				 PIPE_CONFIG(ADDR_SURF_P2) |
3326				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3327				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3328		modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
3329				 PIPE_CONFIG(ADDR_SURF_P2) |
3330				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3331				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3332		modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
3333				 PIPE_CONFIG(ADDR_SURF_P2) |
3334				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3335				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3336		modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
3337				 PIPE_CONFIG(ADDR_SURF_P2) |
3338				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3339				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3340		modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
3341				 PIPE_CONFIG(ADDR_SURF_P2) |
3342				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3343				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3344		modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
3345				 PIPE_CONFIG(ADDR_SURF_P2) |
3346				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3347				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3348		modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
3349				 PIPE_CONFIG(ADDR_SURF_P2) |
3350				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3351				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3352		modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
3353				 PIPE_CONFIG(ADDR_SURF_P2) |
3354				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3355				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3356		modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3357				 PIPE_CONFIG(ADDR_SURF_P2) |
3358				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
3359				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3360		modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3361				 PIPE_CONFIG(ADDR_SURF_P2) |
3362				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
3363				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3364		modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3365				 PIPE_CONFIG(ADDR_SURF_P2) |
3366				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
3367				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
3368
3369		mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3370				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3371				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3372				NUM_BANKS(ADDR_SURF_8_BANK));
3373		mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3374				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
3375				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3376				NUM_BANKS(ADDR_SURF_8_BANK));
3377		mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3378				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3379				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3380				NUM_BANKS(ADDR_SURF_8_BANK));
3381		mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3382				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3383				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3384				NUM_BANKS(ADDR_SURF_8_BANK));
3385		mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3386				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3387				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3388				NUM_BANKS(ADDR_SURF_8_BANK));
3389		mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3390				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3391				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3392				NUM_BANKS(ADDR_SURF_8_BANK));
3393		mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3394				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3395				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3396				NUM_BANKS(ADDR_SURF_8_BANK));
3397		mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
3398				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
3399				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3400				NUM_BANKS(ADDR_SURF_16_BANK));
3401		mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
3402				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3403				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3404				NUM_BANKS(ADDR_SURF_16_BANK));
3405		mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
3406				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3407				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3408				 NUM_BANKS(ADDR_SURF_16_BANK));
3409		mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
3410				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
3411				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3412				 NUM_BANKS(ADDR_SURF_16_BANK));
3413		mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3414				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
3415				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3416				 NUM_BANKS(ADDR_SURF_16_BANK));
3417		mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3418				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3419				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3420				 NUM_BANKS(ADDR_SURF_16_BANK));
3421		mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3422				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3423				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3424				 NUM_BANKS(ADDR_SURF_8_BANK));
3425
3426		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
3427			if (reg_offset != 7 && reg_offset != 12 && reg_offset != 17 &&
3428			    reg_offset != 23)
3429				WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
3430
3431		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
3432			if (reg_offset != 7)
3433				WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
3434
3435		break;
3436	}
3437}
3438
3439static void gfx_v8_0_select_se_sh(struct amdgpu_device *adev,
3440				  u32 se_num, u32 sh_num, u32 instance)
3441{
3442	u32 data;
3443
3444	if (instance == 0xffffffff)
3445		data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
3446	else
3447		data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
3448
3449	if (se_num == 0xffffffff)
3450		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
3451	else
3452		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
3453
3454	if (sh_num == 0xffffffff)
3455		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
3456	else
3457		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
3458
3459	WREG32(mmGRBM_GFX_INDEX, data);
3460}
3461
3462static void gfx_v8_0_select_me_pipe_q(struct amdgpu_device *adev,
3463				  u32 me, u32 pipe, u32 q, u32 vm)
3464{
3465	vi_srbm_select(adev, me, pipe, q, vm);
3466}
3467
3468static u32 gfx_v8_0_get_rb_active_bitmap(struct amdgpu_device *adev)
3469{
3470	u32 data, mask;
3471
3472	data =  RREG32(mmCC_RB_BACKEND_DISABLE) |
3473		RREG32(mmGC_USER_RB_BACKEND_DISABLE);
3474
3475	data = REG_GET_FIELD(data, GC_USER_RB_BACKEND_DISABLE, BACKEND_DISABLE);
3476
3477	mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se /
3478					 adev->gfx.config.max_sh_per_se);
3479
3480	return (~data) & mask;
3481}
3482
3483static void
3484gfx_v8_0_raster_config(struct amdgpu_device *adev, u32 *rconf, u32 *rconf1)
3485{
3486	switch (adev->asic_type) {
3487	case CHIP_FIJI:
3488	case CHIP_VEGAM:
3489		*rconf |= RB_MAP_PKR0(2) | RB_MAP_PKR1(2) |
3490			  RB_XSEL2(1) | PKR_MAP(2) |
3491			  PKR_XSEL(1) | PKR_YSEL(1) |
3492			  SE_MAP(2) | SE_XSEL(2) | SE_YSEL(3);
3493		*rconf1 |= SE_PAIR_MAP(2) | SE_PAIR_XSEL(3) |
3494			   SE_PAIR_YSEL(2);
3495		break;
3496	case CHIP_TONGA:
3497	case CHIP_POLARIS10:
3498		*rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) |
3499			  SE_XSEL(1) | SE_YSEL(1);
3500		*rconf1 |= SE_PAIR_MAP(2) | SE_PAIR_XSEL(2) |
3501			   SE_PAIR_YSEL(2);
3502		break;
3503	case CHIP_TOPAZ:
3504	case CHIP_CARRIZO:
3505		*rconf |= RB_MAP_PKR0(2);
3506		*rconf1 |= 0x0;
3507		break;
3508	case CHIP_POLARIS11:
3509	case CHIP_POLARIS12:
3510		*rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) |
3511			  SE_XSEL(1) | SE_YSEL(1);
3512		*rconf1 |= 0x0;
3513		break;
3514	case CHIP_STONEY:
3515		*rconf |= 0x0;
3516		*rconf1 |= 0x0;
3517		break;
3518	default:
3519		DRM_ERROR("unknown asic: 0x%x\n", adev->asic_type);
3520		break;
3521	}
3522}
3523
3524static void
3525gfx_v8_0_write_harvested_raster_configs(struct amdgpu_device *adev,
3526					u32 raster_config, u32 raster_config_1,
3527					unsigned rb_mask, unsigned num_rb)
3528{
3529	unsigned sh_per_se = max_t(unsigned, adev->gfx.config.max_sh_per_se, 1);
3530	unsigned num_se = max_t(unsigned, adev->gfx.config.max_shader_engines, 1);
3531	unsigned rb_per_pkr = min_t(unsigned, num_rb / num_se / sh_per_se, 2);
3532	unsigned rb_per_se = num_rb / num_se;
3533	unsigned se_mask[4];
3534	unsigned se;
3535
3536	se_mask[0] = ((1 << rb_per_se) - 1) & rb_mask;
3537	se_mask[1] = (se_mask[0] << rb_per_se) & rb_mask;
3538	se_mask[2] = (se_mask[1] << rb_per_se) & rb_mask;
3539	se_mask[3] = (se_mask[2] << rb_per_se) & rb_mask;
3540
3541	WARN_ON(!(num_se == 1 || num_se == 2 || num_se == 4));
3542	WARN_ON(!(sh_per_se == 1 || sh_per_se == 2));
3543	WARN_ON(!(rb_per_pkr == 1 || rb_per_pkr == 2));
3544
3545	if ((num_se > 2) && ((!se_mask[0] && !se_mask[1]) ||
3546			     (!se_mask[2] && !se_mask[3]))) {
3547		raster_config_1 &= ~SE_PAIR_MAP_MASK;
3548
3549		if (!se_mask[0] && !se_mask[1]) {
3550			raster_config_1 |=
3551				SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_3);
3552		} else {
3553			raster_config_1 |=
3554				SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_0);
3555		}
3556	}
3557
3558	for (se = 0; se < num_se; se++) {
3559		unsigned raster_config_se = raster_config;
3560		unsigned pkr0_mask = ((1 << rb_per_pkr) - 1) << (se * rb_per_se);
3561		unsigned pkr1_mask = pkr0_mask << rb_per_pkr;
3562		int idx = (se / 2) * 2;
3563
3564		if ((num_se > 1) && (!se_mask[idx] || !se_mask[idx + 1])) {
3565			raster_config_se &= ~SE_MAP_MASK;
3566
3567			if (!se_mask[idx]) {
3568				raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_3);
3569			} else {
3570				raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_0);
3571			}
3572		}
3573
3574		pkr0_mask &= rb_mask;
3575		pkr1_mask &= rb_mask;
3576		if (rb_per_se > 2 && (!pkr0_mask || !pkr1_mask)) {
3577			raster_config_se &= ~PKR_MAP_MASK;
3578
3579			if (!pkr0_mask) {
3580				raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_3);
3581			} else {
3582				raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_0);
3583			}
3584		}
3585
3586		if (rb_per_se >= 2) {
3587			unsigned rb0_mask = 1 << (se * rb_per_se);
3588			unsigned rb1_mask = rb0_mask << 1;
3589
3590			rb0_mask &= rb_mask;
3591			rb1_mask &= rb_mask;
3592			if (!rb0_mask || !rb1_mask) {
3593				raster_config_se &= ~RB_MAP_PKR0_MASK;
3594
3595				if (!rb0_mask) {
3596					raster_config_se |=
3597						RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_3);
3598				} else {
3599					raster_config_se |=
3600						RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_0);
3601				}
3602			}
3603
3604			if (rb_per_se > 2) {
3605				rb0_mask = 1 << (se * rb_per_se + rb_per_pkr);
3606				rb1_mask = rb0_mask << 1;
3607				rb0_mask &= rb_mask;
3608				rb1_mask &= rb_mask;
3609				if (!rb0_mask || !rb1_mask) {
3610					raster_config_se &= ~RB_MAP_PKR1_MASK;
3611
3612					if (!rb0_mask) {
3613						raster_config_se |=
3614							RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_3);
3615					} else {
3616						raster_config_se |=
3617							RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_0);
3618					}
3619				}
3620			}
3621		}
3622
3623		/* GRBM_GFX_INDEX has a different offset on VI */
3624		gfx_v8_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff);
3625		WREG32(mmPA_SC_RASTER_CONFIG, raster_config_se);
3626		WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
3627	}
3628
3629	/* GRBM_GFX_INDEX has a different offset on VI */
3630	gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3631}
3632
3633static void gfx_v8_0_setup_rb(struct amdgpu_device *adev)
3634{
3635	int i, j;
3636	u32 data;
3637	u32 raster_config = 0, raster_config_1 = 0;
3638	u32 active_rbs = 0;
3639	u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
3640					adev->gfx.config.max_sh_per_se;
3641	unsigned num_rb_pipes;
3642
3643	mutex_lock(&adev->grbm_idx_mutex);
3644	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
3645		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
3646			gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff);
3647			data = gfx_v8_0_get_rb_active_bitmap(adev);
3648			active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
3649					       rb_bitmap_width_per_sh);
3650		}
3651	}
3652	gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3653
3654	adev->gfx.config.backend_enable_mask = active_rbs;
3655	adev->gfx.config.num_rbs = hweight32(active_rbs);
3656
3657	num_rb_pipes = min_t(unsigned, adev->gfx.config.max_backends_per_se *
3658			     adev->gfx.config.max_shader_engines, 16);
3659
3660	gfx_v8_0_raster_config(adev, &raster_config, &raster_config_1);
3661
3662	if (!adev->gfx.config.backend_enable_mask ||
3663			adev->gfx.config.num_rbs >= num_rb_pipes) {
3664		WREG32(mmPA_SC_RASTER_CONFIG, raster_config);
3665		WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
3666	} else {
3667		gfx_v8_0_write_harvested_raster_configs(adev, raster_config, raster_config_1,
3668							adev->gfx.config.backend_enable_mask,
3669							num_rb_pipes);
3670	}
3671
3672	/* cache the values for userspace */
3673	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
3674		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
3675			gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff);
3676			adev->gfx.config.rb_config[i][j].rb_backend_disable =
3677				RREG32(mmCC_RB_BACKEND_DISABLE);
3678			adev->gfx.config.rb_config[i][j].user_rb_backend_disable =
3679				RREG32(mmGC_USER_RB_BACKEND_DISABLE);
3680			adev->gfx.config.rb_config[i][j].raster_config =
3681				RREG32(mmPA_SC_RASTER_CONFIG);
3682			adev->gfx.config.rb_config[i][j].raster_config_1 =
3683				RREG32(mmPA_SC_RASTER_CONFIG_1);
3684		}
3685	}
3686	gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3687	mutex_unlock(&adev->grbm_idx_mutex);
3688}
3689
3690#define DEFAULT_SH_MEM_BASES	(0x6000)
3691/**
3692 * gfx_v8_0_init_compute_vmid - gart enable
3693 *
3694 * @adev: amdgpu_device pointer
3695 *
3696 * Initialize compute vmid sh_mem registers
3697 *
3698 */
 
3699static void gfx_v8_0_init_compute_vmid(struct amdgpu_device *adev)
3700{
3701	int i;
3702	uint32_t sh_mem_config;
3703	uint32_t sh_mem_bases;
3704
3705	/*
3706	 * Configure apertures:
3707	 * LDS:         0x60000000'00000000 - 0x60000001'00000000 (4GB)
3708	 * Scratch:     0x60000001'00000000 - 0x60000002'00000000 (4GB)
3709	 * GPUVM:       0x60010000'00000000 - 0x60020000'00000000 (1TB)
3710	 */
3711	sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
3712
3713	sh_mem_config = SH_MEM_ADDRESS_MODE_HSA64 <<
3714			SH_MEM_CONFIG__ADDRESS_MODE__SHIFT |
3715			SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
3716			SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT |
3717			MTYPE_CC << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT |
3718			SH_MEM_CONFIG__PRIVATE_ATC_MASK;
3719
3720	mutex_lock(&adev->srbm_mutex);
3721	for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
3722		vi_srbm_select(adev, 0, 0, 0, i);
3723		/* CP and shaders */
3724		WREG32(mmSH_MEM_CONFIG, sh_mem_config);
3725		WREG32(mmSH_MEM_APE1_BASE, 1);
3726		WREG32(mmSH_MEM_APE1_LIMIT, 0);
3727		WREG32(mmSH_MEM_BASES, sh_mem_bases);
3728	}
3729	vi_srbm_select(adev, 0, 0, 0, 0);
3730	mutex_unlock(&adev->srbm_mutex);
3731
3732	/* Initialize all compute VMIDs to have no GDS, GWS, or OA
3733	   acccess. These should be enabled by FW for target VMIDs. */
3734	for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
3735		WREG32(amdgpu_gds_reg_offset[i].mem_base, 0);
3736		WREG32(amdgpu_gds_reg_offset[i].mem_size, 0);
3737		WREG32(amdgpu_gds_reg_offset[i].gws, 0);
3738		WREG32(amdgpu_gds_reg_offset[i].oa, 0);
3739	}
3740}
3741
3742static void gfx_v8_0_init_gds_vmid(struct amdgpu_device *adev)
3743{
3744	int vmid;
3745
3746	/*
3747	 * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA
3748	 * access. Compute VMIDs should be enabled by FW for target VMIDs,
3749	 * the driver can enable them for graphics. VMID0 should maintain
3750	 * access so that HWS firmware can save/restore entries.
3751	 */
3752	for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) {
3753		WREG32(amdgpu_gds_reg_offset[vmid].mem_base, 0);
3754		WREG32(amdgpu_gds_reg_offset[vmid].mem_size, 0);
3755		WREG32(amdgpu_gds_reg_offset[vmid].gws, 0);
3756		WREG32(amdgpu_gds_reg_offset[vmid].oa, 0);
3757	}
3758}
3759
3760static void gfx_v8_0_config_init(struct amdgpu_device *adev)
3761{
3762	switch (adev->asic_type) {
3763	default:
3764		adev->gfx.config.double_offchip_lds_buf = 1;
3765		break;
3766	case CHIP_CARRIZO:
3767	case CHIP_STONEY:
3768		adev->gfx.config.double_offchip_lds_buf = 0;
3769		break;
3770	}
3771}
3772
3773static void gfx_v8_0_constants_init(struct amdgpu_device *adev)
3774{
3775	u32 tmp, sh_static_mem_cfg;
3776	int i;
3777
3778	WREG32_FIELD(GRBM_CNTL, READ_TIMEOUT, 0xFF);
3779	WREG32(mmGB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
3780	WREG32(mmHDP_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
3781	WREG32(mmDMIF_ADDR_CALC, adev->gfx.config.gb_addr_config);
3782
3783	gfx_v8_0_tiling_mode_table_init(adev);
3784	gfx_v8_0_setup_rb(adev);
3785	gfx_v8_0_get_cu_info(adev);
3786	gfx_v8_0_config_init(adev);
3787
3788	/* XXX SH_MEM regs */
3789	/* where to put LDS, scratch, GPUVM in FSA64 space */
3790	sh_static_mem_cfg = REG_SET_FIELD(0, SH_STATIC_MEM_CONFIG,
3791				   SWIZZLE_ENABLE, 1);
3792	sh_static_mem_cfg = REG_SET_FIELD(sh_static_mem_cfg, SH_STATIC_MEM_CONFIG,
3793				   ELEMENT_SIZE, 1);
3794	sh_static_mem_cfg = REG_SET_FIELD(sh_static_mem_cfg, SH_STATIC_MEM_CONFIG,
3795				   INDEX_STRIDE, 3);
3796	WREG32(mmSH_STATIC_MEM_CONFIG, sh_static_mem_cfg);
3797
3798	mutex_lock(&adev->srbm_mutex);
3799	for (i = 0; i < adev->vm_manager.id_mgr[0].num_ids; i++) {
3800		vi_srbm_select(adev, 0, 0, 0, i);
3801		/* CP and shaders */
3802		if (i == 0) {
3803			tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, DEFAULT_MTYPE, MTYPE_UC);
3804			tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, APE1_MTYPE, MTYPE_UC);
3805			tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE,
3806					    SH_MEM_ALIGNMENT_MODE_UNALIGNED);
3807			WREG32(mmSH_MEM_CONFIG, tmp);
3808			WREG32(mmSH_MEM_BASES, 0);
3809		} else {
3810			tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, DEFAULT_MTYPE, MTYPE_NC);
3811			tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, APE1_MTYPE, MTYPE_UC);
3812			tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE,
3813					    SH_MEM_ALIGNMENT_MODE_UNALIGNED);
3814			WREG32(mmSH_MEM_CONFIG, tmp);
3815			tmp = adev->gmc.shared_aperture_start >> 48;
3816			WREG32(mmSH_MEM_BASES, tmp);
3817		}
3818
3819		WREG32(mmSH_MEM_APE1_BASE, 1);
3820		WREG32(mmSH_MEM_APE1_LIMIT, 0);
3821	}
3822	vi_srbm_select(adev, 0, 0, 0, 0);
3823	mutex_unlock(&adev->srbm_mutex);
3824
3825	gfx_v8_0_init_compute_vmid(adev);
3826	gfx_v8_0_init_gds_vmid(adev);
3827
3828	mutex_lock(&adev->grbm_idx_mutex);
3829	/*
3830	 * making sure that the following register writes will be broadcasted
3831	 * to all the shaders
3832	 */
3833	gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3834
3835	WREG32(mmPA_SC_FIFO_SIZE,
3836		   (adev->gfx.config.sc_prim_fifo_size_frontend <<
3837			PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) |
3838		   (adev->gfx.config.sc_prim_fifo_size_backend <<
3839			PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT) |
3840		   (adev->gfx.config.sc_hiz_tile_fifo_size <<
3841			PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) |
3842		   (adev->gfx.config.sc_earlyz_tile_fifo_size <<
3843			PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT));
3844
3845	tmp = RREG32(mmSPI_ARB_PRIORITY);
3846	tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS0, 2);
3847	tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS1, 2);
3848	tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS2, 2);
3849	tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS3, 2);
3850	WREG32(mmSPI_ARB_PRIORITY, tmp);
3851
3852	mutex_unlock(&adev->grbm_idx_mutex);
3853
3854}
3855
3856static void gfx_v8_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
3857{
3858	u32 i, j, k;
3859	u32 mask;
3860
3861	mutex_lock(&adev->grbm_idx_mutex);
3862	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
3863		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
3864			gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff);
3865			for (k = 0; k < adev->usec_timeout; k++) {
3866				if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY) == 0)
3867					break;
3868				udelay(1);
3869			}
3870			if (k == adev->usec_timeout) {
3871				gfx_v8_0_select_se_sh(adev, 0xffffffff,
3872						      0xffffffff, 0xffffffff);
3873				mutex_unlock(&adev->grbm_idx_mutex);
3874				DRM_INFO("Timeout wait for RLC serdes %u,%u\n",
3875					 i, j);
3876				return;
3877			}
3878		}
3879	}
3880	gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3881	mutex_unlock(&adev->grbm_idx_mutex);
3882
3883	mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
3884		RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
3885		RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
3886		RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
3887	for (k = 0; k < adev->usec_timeout; k++) {
3888		if ((RREG32(mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
3889			break;
3890		udelay(1);
3891	}
3892}
3893
3894static void gfx_v8_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
3895					       bool enable)
3896{
3897	u32 tmp = RREG32(mmCP_INT_CNTL_RING0);
3898
3899	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
3900	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
3901	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
3902	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0);
3903
3904	WREG32(mmCP_INT_CNTL_RING0, tmp);
3905}
3906
3907static void gfx_v8_0_init_csb(struct amdgpu_device *adev)
3908{
3909	adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
3910	/* csib */
3911	WREG32(mmRLC_CSIB_ADDR_HI,
3912			adev->gfx.rlc.clear_state_gpu_addr >> 32);
3913	WREG32(mmRLC_CSIB_ADDR_LO,
3914			adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
3915	WREG32(mmRLC_CSIB_LENGTH,
3916			adev->gfx.rlc.clear_state_size);
3917}
3918
3919static void gfx_v8_0_parse_ind_reg_list(int *register_list_format,
3920				int ind_offset,
3921				int list_size,
3922				int *unique_indices,
3923				int *indices_count,
3924				int max_indices,
3925				int *ind_start_offsets,
3926				int *offset_count,
3927				int max_offset)
3928{
3929	int indices;
3930	bool new_entry = true;
3931
3932	for (; ind_offset < list_size; ind_offset++) {
3933
3934		if (new_entry) {
3935			new_entry = false;
3936			ind_start_offsets[*offset_count] = ind_offset;
3937			*offset_count = *offset_count + 1;
3938			BUG_ON(*offset_count >= max_offset);
3939		}
3940
3941		if (register_list_format[ind_offset] == 0xFFFFFFFF) {
3942			new_entry = true;
3943			continue;
3944		}
3945
3946		ind_offset += 2;
3947
3948		/* look for the matching indice */
3949		for (indices = 0;
3950			indices < *indices_count;
3951			indices++) {
3952			if (unique_indices[indices] ==
3953				register_list_format[ind_offset])
3954				break;
3955		}
3956
3957		if (indices >= *indices_count) {
3958			unique_indices[*indices_count] =
3959				register_list_format[ind_offset];
3960			indices = *indices_count;
3961			*indices_count = *indices_count + 1;
3962			BUG_ON(*indices_count >= max_indices);
3963		}
3964
3965		register_list_format[ind_offset] = indices;
3966	}
3967}
3968
3969static int gfx_v8_0_init_save_restore_list(struct amdgpu_device *adev)
3970{
3971	int i, temp, data;
3972	int unique_indices[] = {0, 0, 0, 0, 0, 0, 0, 0};
3973	int indices_count = 0;
3974	int indirect_start_offsets[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
3975	int offset_count = 0;
3976
3977	int list_size;
3978	unsigned int *register_list_format =
3979		kmemdup(adev->gfx.rlc.register_list_format,
3980			adev->gfx.rlc.reg_list_format_size_bytes, GFP_KERNEL);
3981	if (!register_list_format)
3982		return -ENOMEM;
3983
3984	gfx_v8_0_parse_ind_reg_list(register_list_format,
3985				RLC_FormatDirectRegListLength,
3986				adev->gfx.rlc.reg_list_format_size_bytes >> 2,
3987				unique_indices,
3988				&indices_count,
3989				ARRAY_SIZE(unique_indices),
3990				indirect_start_offsets,
3991				&offset_count,
3992				ARRAY_SIZE(indirect_start_offsets));
3993
3994	/* save and restore list */
3995	WREG32_FIELD(RLC_SRM_CNTL, AUTO_INCR_ADDR, 1);
3996
3997	WREG32(mmRLC_SRM_ARAM_ADDR, 0);
3998	for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++)
3999		WREG32(mmRLC_SRM_ARAM_DATA, adev->gfx.rlc.register_restore[i]);
4000
4001	/* indirect list */
4002	WREG32(mmRLC_GPM_SCRATCH_ADDR, adev->gfx.rlc.reg_list_format_start);
4003	for (i = 0; i < adev->gfx.rlc.reg_list_format_size_bytes >> 2; i++)
4004		WREG32(mmRLC_GPM_SCRATCH_DATA, register_list_format[i]);
4005
4006	list_size = adev->gfx.rlc.reg_list_size_bytes >> 2;
4007	list_size = list_size >> 1;
4008	WREG32(mmRLC_GPM_SCRATCH_ADDR, adev->gfx.rlc.reg_restore_list_size);
4009	WREG32(mmRLC_GPM_SCRATCH_DATA, list_size);
4010
4011	/* starting offsets starts */
4012	WREG32(mmRLC_GPM_SCRATCH_ADDR,
4013		adev->gfx.rlc.starting_offsets_start);
4014	for (i = 0; i < ARRAY_SIZE(indirect_start_offsets); i++)
4015		WREG32(mmRLC_GPM_SCRATCH_DATA,
4016				indirect_start_offsets[i]);
4017
4018	/* unique indices */
4019	temp = mmRLC_SRM_INDEX_CNTL_ADDR_0;
4020	data = mmRLC_SRM_INDEX_CNTL_DATA_0;
4021	for (i = 0; i < ARRAY_SIZE(unique_indices); i++) {
4022		if (unique_indices[i] != 0) {
4023			WREG32(temp + i, unique_indices[i] & 0x3FFFF);
4024			WREG32(data + i, unique_indices[i] >> 20);
4025		}
4026	}
4027	kfree(register_list_format);
4028
4029	return 0;
4030}
4031
4032static void gfx_v8_0_enable_save_restore_machine(struct amdgpu_device *adev)
4033{
4034	WREG32_FIELD(RLC_SRM_CNTL, SRM_ENABLE, 1);
4035}
4036
4037static void gfx_v8_0_init_power_gating(struct amdgpu_device *adev)
4038{
4039	uint32_t data;
4040
4041	WREG32_FIELD(CP_RB_WPTR_POLL_CNTL, IDLE_POLL_COUNT, 0x60);
4042
4043	data = REG_SET_FIELD(0, RLC_PG_DELAY, POWER_UP_DELAY, 0x10);
4044	data = REG_SET_FIELD(data, RLC_PG_DELAY, POWER_DOWN_DELAY, 0x10);
4045	data = REG_SET_FIELD(data, RLC_PG_DELAY, CMD_PROPAGATE_DELAY, 0x10);
4046	data = REG_SET_FIELD(data, RLC_PG_DELAY, MEM_SLEEP_DELAY, 0x10);
4047	WREG32(mmRLC_PG_DELAY, data);
4048
4049	WREG32_FIELD(RLC_PG_DELAY_2, SERDES_CMD_DELAY, 0x3);
4050	WREG32_FIELD(RLC_AUTO_PG_CTRL, GRBM_REG_SAVE_GFX_IDLE_THRESHOLD, 0x55f0);
4051
4052}
4053
4054static void cz_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev,
4055						bool enable)
4056{
4057	WREG32_FIELD(RLC_PG_CNTL, SMU_CLK_SLOWDOWN_ON_PU_ENABLE, enable ? 1 : 0);
4058}
4059
4060static void cz_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev,
4061						  bool enable)
4062{
4063	WREG32_FIELD(RLC_PG_CNTL, SMU_CLK_SLOWDOWN_ON_PD_ENABLE, enable ? 1 : 0);
4064}
4065
4066static void cz_enable_cp_power_gating(struct amdgpu_device *adev, bool enable)
4067{
4068	WREG32_FIELD(RLC_PG_CNTL, CP_PG_DISABLE, enable ? 0 : 1);
4069}
4070
4071static void gfx_v8_0_init_pg(struct amdgpu_device *adev)
4072{
4073	if ((adev->asic_type == CHIP_CARRIZO) ||
4074	    (adev->asic_type == CHIP_STONEY)) {
4075		gfx_v8_0_init_csb(adev);
4076		gfx_v8_0_init_save_restore_list(adev);
4077		gfx_v8_0_enable_save_restore_machine(adev);
4078		WREG32(mmRLC_JUMP_TABLE_RESTORE, adev->gfx.rlc.cp_table_gpu_addr >> 8);
4079		gfx_v8_0_init_power_gating(adev);
4080		WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask);
4081	} else if ((adev->asic_type == CHIP_POLARIS11) ||
4082		   (adev->asic_type == CHIP_POLARIS12) ||
4083		   (adev->asic_type == CHIP_VEGAM)) {
4084		gfx_v8_0_init_csb(adev);
4085		gfx_v8_0_init_save_restore_list(adev);
4086		gfx_v8_0_enable_save_restore_machine(adev);
4087		gfx_v8_0_init_power_gating(adev);
4088	}
4089
4090}
4091
4092static void gfx_v8_0_rlc_stop(struct amdgpu_device *adev)
4093{
4094	WREG32_FIELD(RLC_CNTL, RLC_ENABLE_F32, 0);
4095
4096	gfx_v8_0_enable_gui_idle_interrupt(adev, false);
4097	gfx_v8_0_wait_for_rlc_serdes(adev);
4098}
4099
4100static void gfx_v8_0_rlc_reset(struct amdgpu_device *adev)
4101{
4102	WREG32_FIELD(GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
4103	udelay(50);
4104
4105	WREG32_FIELD(GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
4106	udelay(50);
4107}
4108
4109static void gfx_v8_0_rlc_start(struct amdgpu_device *adev)
4110{
4111	WREG32_FIELD(RLC_CNTL, RLC_ENABLE_F32, 1);
4112
4113	/* carrizo do enable cp interrupt after cp inited */
4114	if (!(adev->flags & AMD_IS_APU))
4115		gfx_v8_0_enable_gui_idle_interrupt(adev, true);
4116
4117	udelay(50);
4118}
4119
4120static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
4121{
4122	if (amdgpu_sriov_vf(adev)) {
4123		gfx_v8_0_init_csb(adev);
4124		return 0;
4125	}
4126
4127	adev->gfx.rlc.funcs->stop(adev);
4128	adev->gfx.rlc.funcs->reset(adev);
4129	gfx_v8_0_init_pg(adev);
4130	adev->gfx.rlc.funcs->start(adev);
4131
4132	return 0;
4133}
4134
4135static void gfx_v8_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
4136{
4137	u32 tmp = RREG32(mmCP_ME_CNTL);
4138
4139	if (enable) {
4140		tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 0);
4141		tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 0);
4142		tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 0);
4143	} else {
4144		tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 1);
4145		tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1);
4146		tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1);
4147	}
4148	WREG32(mmCP_ME_CNTL, tmp);
4149	udelay(50);
4150}
4151
4152static u32 gfx_v8_0_get_csb_size(struct amdgpu_device *adev)
4153{
4154	u32 count = 0;
4155	const struct cs_section_def *sect = NULL;
4156	const struct cs_extent_def *ext = NULL;
4157
4158	/* begin clear state */
4159	count += 2;
4160	/* context control state */
4161	count += 3;
4162
4163	for (sect = vi_cs_data; sect->section != NULL; ++sect) {
4164		for (ext = sect->section; ext->extent != NULL; ++ext) {
4165			if (sect->id == SECT_CONTEXT)
4166				count += 2 + ext->reg_count;
4167			else
4168				return 0;
4169		}
4170	}
4171	/* pa_sc_raster_config/pa_sc_raster_config1 */
4172	count += 4;
4173	/* end clear state */
4174	count += 2;
4175	/* clear state */
4176	count += 2;
4177
4178	return count;
4179}
4180
4181static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev)
4182{
4183	struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
4184	const struct cs_section_def *sect = NULL;
4185	const struct cs_extent_def *ext = NULL;
4186	int r, i;
4187
4188	/* init the CP */
4189	WREG32(mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1);
4190	WREG32(mmCP_ENDIAN_SWAP, 0);
4191	WREG32(mmCP_DEVICE_ID, 1);
4192
4193	gfx_v8_0_cp_gfx_enable(adev, true);
4194
4195	r = amdgpu_ring_alloc(ring, gfx_v8_0_get_csb_size(adev) + 4);
4196	if (r) {
4197		DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
4198		return r;
4199	}
4200
4201	/* clear state buffer */
4202	amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
4203	amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
4204
4205	amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
4206	amdgpu_ring_write(ring, 0x80000000);
4207	amdgpu_ring_write(ring, 0x80000000);
4208
4209	for (sect = vi_cs_data; sect->section != NULL; ++sect) {
4210		for (ext = sect->section; ext->extent != NULL; ++ext) {
4211			if (sect->id == SECT_CONTEXT) {
4212				amdgpu_ring_write(ring,
4213				       PACKET3(PACKET3_SET_CONTEXT_REG,
4214					       ext->reg_count));
4215				amdgpu_ring_write(ring,
4216				       ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
4217				for (i = 0; i < ext->reg_count; i++)
4218					amdgpu_ring_write(ring, ext->extent[i]);
4219			}
4220		}
4221	}
4222
4223	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
4224	amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
4225	amdgpu_ring_write(ring, adev->gfx.config.rb_config[0][0].raster_config);
4226	amdgpu_ring_write(ring, adev->gfx.config.rb_config[0][0].raster_config_1);
4227
4228	amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
4229	amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
4230
4231	amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
4232	amdgpu_ring_write(ring, 0);
4233
4234	/* init the CE partitions */
4235	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
4236	amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
4237	amdgpu_ring_write(ring, 0x8000);
4238	amdgpu_ring_write(ring, 0x8000);
4239
4240	amdgpu_ring_commit(ring);
4241
4242	return 0;
4243}
4244static void gfx_v8_0_set_cpg_door_bell(struct amdgpu_device *adev, struct amdgpu_ring *ring)
4245{
4246	u32 tmp;
4247	/* no gfx doorbells on iceland */
4248	if (adev->asic_type == CHIP_TOPAZ)
4249		return;
4250
4251	tmp = RREG32(mmCP_RB_DOORBELL_CONTROL);
4252
4253	if (ring->use_doorbell) {
4254		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
4255				DOORBELL_OFFSET, ring->doorbell_index);
4256		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
4257						DOORBELL_HIT, 0);
4258		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
4259					    DOORBELL_EN, 1);
4260	} else {
4261		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, DOORBELL_EN, 0);
4262	}
4263
4264	WREG32(mmCP_RB_DOORBELL_CONTROL, tmp);
4265
4266	if (adev->flags & AMD_IS_APU)
4267		return;
4268
4269	tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
4270					DOORBELL_RANGE_LOWER,
4271					adev->doorbell_index.gfx_ring0);
4272	WREG32(mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
4273
4274	WREG32(mmCP_RB_DOORBELL_RANGE_UPPER,
4275		CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK);
4276}
4277
4278static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
4279{
4280	struct amdgpu_ring *ring;
4281	u32 tmp;
4282	u32 rb_bufsz;
4283	u64 rb_addr, rptr_addr, wptr_gpu_addr;
4284
4285	/* Set the write pointer delay */
4286	WREG32(mmCP_RB_WPTR_DELAY, 0);
4287
4288	/* set the RB to use vmid 0 */
4289	WREG32(mmCP_RB_VMID, 0);
4290
4291	/* Set ring buffer size */
4292	ring = &adev->gfx.gfx_ring[0];
4293	rb_bufsz = order_base_2(ring->ring_size / 8);
4294	tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz);
4295	tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2);
4296	tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, MTYPE, 3);
4297	tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, MIN_IB_AVAILSZ, 1);
4298#ifdef __BIG_ENDIAN
4299	tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, BUF_SWAP, 1);
4300#endif
4301	WREG32(mmCP_RB0_CNTL, tmp);
4302
4303	/* Initialize the ring buffer's read and write pointers */
4304	WREG32(mmCP_RB0_CNTL, tmp | CP_RB0_CNTL__RB_RPTR_WR_ENA_MASK);
4305	ring->wptr = 0;
4306	WREG32(mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
4307
4308	/* set the wb address wether it's enabled or not */
4309	rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
4310	WREG32(mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
4311	WREG32(mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
4312
4313	wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
4314	WREG32(mmCP_RB_WPTR_POLL_ADDR_LO, lower_32_bits(wptr_gpu_addr));
4315	WREG32(mmCP_RB_WPTR_POLL_ADDR_HI, upper_32_bits(wptr_gpu_addr));
4316	mdelay(1);
4317	WREG32(mmCP_RB0_CNTL, tmp);
4318
4319	rb_addr = ring->gpu_addr >> 8;
4320	WREG32(mmCP_RB0_BASE, rb_addr);
4321	WREG32(mmCP_RB0_BASE_HI, upper_32_bits(rb_addr));
4322
4323	gfx_v8_0_set_cpg_door_bell(adev, ring);
4324	/* start the ring */
4325	amdgpu_ring_clear_ring(ring);
4326	gfx_v8_0_cp_gfx_start(adev);
4327	ring->sched.ready = true;
4328
4329	return 0;
4330}
4331
4332static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
4333{
4334	if (enable) {
4335		WREG32(mmCP_MEC_CNTL, 0);
4336	} else {
4337		WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
4338		adev->gfx.kiq.ring.sched.ready = false;
4339	}
4340	udelay(50);
4341}
4342
4343/* KIQ functions */
4344static void gfx_v8_0_kiq_setting(struct amdgpu_ring *ring)
4345{
4346	uint32_t tmp;
4347	struct amdgpu_device *adev = ring->adev;
4348
4349	/* tell RLC which is KIQ queue */
4350	tmp = RREG32(mmRLC_CP_SCHEDULERS);
4351	tmp &= 0xffffff00;
4352	tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
4353	WREG32(mmRLC_CP_SCHEDULERS, tmp);
4354	tmp |= 0x80;
4355	WREG32(mmRLC_CP_SCHEDULERS, tmp);
4356}
4357
4358static int gfx_v8_0_kiq_kcq_enable(struct amdgpu_device *adev)
4359{
4360	struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
4361	uint64_t queue_mask = 0;
4362	int r, i;
4363
4364	for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
4365		if (!test_bit(i, adev->gfx.mec.queue_bitmap))
4366			continue;
4367
4368		/* This situation may be hit in the future if a new HW
4369		 * generation exposes more than 64 queues. If so, the
4370		 * definition of queue_mask needs updating */
4371		if (WARN_ON(i >= (sizeof(queue_mask)*8))) {
4372			DRM_ERROR("Invalid KCQ enabled: %d\n", i);
4373			break;
4374		}
4375
4376		queue_mask |= (1ull << i);
4377	}
4378
4379	r = amdgpu_ring_alloc(kiq_ring, (8 * adev->gfx.num_compute_rings) + 8);
4380	if (r) {
4381		DRM_ERROR("Failed to lock KIQ (%d).\n", r);
4382		return r;
4383	}
4384	/* set resources */
4385	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
4386	amdgpu_ring_write(kiq_ring, 0);	/* vmid_mask:0 queue_type:0 (KIQ) */
4387	amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask));	/* queue mask lo */
4388	amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask));	/* queue mask hi */
4389	amdgpu_ring_write(kiq_ring, 0);	/* gws mask lo */
4390	amdgpu_ring_write(kiq_ring, 0);	/* gws mask hi */
4391	amdgpu_ring_write(kiq_ring, 0);	/* oac mask */
4392	amdgpu_ring_write(kiq_ring, 0);	/* gds heap base:0, gds heap size:0 */
4393	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4394		struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
4395		uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
4396		uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
4397
4398		/* map queues */
4399		amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
4400		/* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
4401		amdgpu_ring_write(kiq_ring,
4402				  PACKET3_MAP_QUEUES_NUM_QUEUES(1));
4403		amdgpu_ring_write(kiq_ring,
4404				  PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index) |
4405				  PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
4406				  PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
4407				  PACKET3_MAP_QUEUES_ME(ring->me == 1 ? 0 : 1)); /* doorbell */
4408		amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
4409		amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
4410		amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
4411		amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
4412	}
4413
4414	amdgpu_ring_commit(kiq_ring);
4415
4416	return 0;
4417}
4418
4419static int gfx_v8_0_deactivate_hqd(struct amdgpu_device *adev, u32 req)
4420{
4421	int i, r = 0;
4422
4423	if (RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK) {
4424		WREG32_FIELD(CP_HQD_DEQUEUE_REQUEST, DEQUEUE_REQ, req);
4425		for (i = 0; i < adev->usec_timeout; i++) {
4426			if (!(RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK))
4427				break;
4428			udelay(1);
4429		}
4430		if (i == adev->usec_timeout)
4431			r = -ETIMEDOUT;
4432	}
4433	WREG32(mmCP_HQD_DEQUEUE_REQUEST, 0);
4434	WREG32(mmCP_HQD_PQ_RPTR, 0);
4435	WREG32(mmCP_HQD_PQ_WPTR, 0);
4436
4437	return r;
4438}
4439
4440static void gfx_v8_0_mqd_set_priority(struct amdgpu_ring *ring, struct vi_mqd *mqd)
4441{
4442	struct amdgpu_device *adev = ring->adev;
4443
4444	if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
4445		if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) {
4446			mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
4447			mqd->cp_hqd_queue_priority =
4448				AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
4449		}
4450	}
4451}
4452
4453static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring)
4454{
4455	struct amdgpu_device *adev = ring->adev;
4456	struct vi_mqd *mqd = ring->mqd_ptr;
4457	uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
4458	uint32_t tmp;
4459
4460	mqd->header = 0xC0310800;
4461	mqd->compute_pipelinestat_enable = 0x00000001;
4462	mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
4463	mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
4464	mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
4465	mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
4466	mqd->compute_misc_reserved = 0x00000003;
4467	mqd->dynamic_cu_mask_addr_lo = lower_32_bits(ring->mqd_gpu_addr
4468						     + offsetof(struct vi_mqd_allocation, dynamic_cu_mask));
4469	mqd->dynamic_cu_mask_addr_hi = upper_32_bits(ring->mqd_gpu_addr
4470						     + offsetof(struct vi_mqd_allocation, dynamic_cu_mask));
4471	eop_base_addr = ring->eop_gpu_addr >> 8;
4472	mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
4473	mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
4474
4475	/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
4476	tmp = RREG32(mmCP_HQD_EOP_CONTROL);
4477	tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
4478			(order_base_2(GFX8_MEC_HPD_SIZE / 4) - 1));
4479
4480	mqd->cp_hqd_eop_control = tmp;
4481
4482	/* enable doorbell? */
4483	tmp = REG_SET_FIELD(RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL),
4484			    CP_HQD_PQ_DOORBELL_CONTROL,
4485			    DOORBELL_EN,
4486			    ring->use_doorbell ? 1 : 0);
4487
4488	mqd->cp_hqd_pq_doorbell_control = tmp;
4489
4490	/* set the pointer to the MQD */
4491	mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
4492	mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
4493
4494	/* set MQD vmid to 0 */
4495	tmp = RREG32(mmCP_MQD_CONTROL);
4496	tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
4497	mqd->cp_mqd_control = tmp;
4498
4499	/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
4500	hqd_gpu_addr = ring->gpu_addr >> 8;
4501	mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
4502	mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
4503
4504	/* set up the HQD, this is similar to CP_RB0_CNTL */
4505	tmp = RREG32(mmCP_HQD_PQ_CONTROL);
4506	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
4507			    (order_base_2(ring->ring_size / 4) - 1));
4508	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
4509			((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
4510#ifdef __BIG_ENDIAN
4511	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
4512#endif
4513	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
4514	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0);
4515	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
4516	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
4517	mqd->cp_hqd_pq_control = tmp;
4518
4519	/* set the wb address whether it's enabled or not */
4520	wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
4521	mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
4522	mqd->cp_hqd_pq_rptr_report_addr_hi =
4523		upper_32_bits(wb_gpu_addr) & 0xffff;
4524
4525	/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
4526	wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
4527	mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
4528	mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
4529
4530	tmp = 0;
4531	/* enable the doorbell if requested */
4532	if (ring->use_doorbell) {
4533		tmp = RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
4534		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4535				DOORBELL_OFFSET, ring->doorbell_index);
4536
4537		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4538					 DOORBELL_EN, 1);
4539		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4540					 DOORBELL_SOURCE, 0);
4541		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4542					 DOORBELL_HIT, 0);
4543	}
4544
4545	mqd->cp_hqd_pq_doorbell_control = tmp;
4546
4547	/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
4548	ring->wptr = 0;
4549	mqd->cp_hqd_pq_wptr = ring->wptr;
4550	mqd->cp_hqd_pq_rptr = RREG32(mmCP_HQD_PQ_RPTR);
4551
4552	/* set the vmid for the queue */
4553	mqd->cp_hqd_vmid = 0;
4554
4555	tmp = RREG32(mmCP_HQD_PERSISTENT_STATE);
4556	tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
4557	mqd->cp_hqd_persistent_state = tmp;
4558
4559	/* set MTYPE */
4560	tmp = RREG32(mmCP_HQD_IB_CONTROL);
4561	tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
4562	tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MTYPE, 3);
4563	mqd->cp_hqd_ib_control = tmp;
4564
4565	tmp = RREG32(mmCP_HQD_IQ_TIMER);
4566	tmp = REG_SET_FIELD(tmp, CP_HQD_IQ_TIMER, MTYPE, 3);
4567	mqd->cp_hqd_iq_timer = tmp;
4568
4569	tmp = RREG32(mmCP_HQD_CTX_SAVE_CONTROL);
4570	tmp = REG_SET_FIELD(tmp, CP_HQD_CTX_SAVE_CONTROL, MTYPE, 3);
4571	mqd->cp_hqd_ctx_save_control = tmp;
4572
4573	/* defaults */
4574	mqd->cp_hqd_eop_rptr = RREG32(mmCP_HQD_EOP_RPTR);
4575	mqd->cp_hqd_eop_wptr = RREG32(mmCP_HQD_EOP_WPTR);
4576	mqd->cp_hqd_ctx_save_base_addr_lo = RREG32(mmCP_HQD_CTX_SAVE_BASE_ADDR_LO);
4577	mqd->cp_hqd_ctx_save_base_addr_hi = RREG32(mmCP_HQD_CTX_SAVE_BASE_ADDR_HI);
4578	mqd->cp_hqd_cntl_stack_offset = RREG32(mmCP_HQD_CNTL_STACK_OFFSET);
4579	mqd->cp_hqd_cntl_stack_size = RREG32(mmCP_HQD_CNTL_STACK_SIZE);
4580	mqd->cp_hqd_wg_state_offset = RREG32(mmCP_HQD_WG_STATE_OFFSET);
4581	mqd->cp_hqd_ctx_save_size = RREG32(mmCP_HQD_CTX_SAVE_SIZE);
4582	mqd->cp_hqd_eop_done_events = RREG32(mmCP_HQD_EOP_EVENTS);
4583	mqd->cp_hqd_error = RREG32(mmCP_HQD_ERROR);
4584	mqd->cp_hqd_eop_wptr_mem = RREG32(mmCP_HQD_EOP_WPTR_MEM);
4585	mqd->cp_hqd_eop_dones = RREG32(mmCP_HQD_EOP_DONES);
4586
4587	/* set static priority for a queue/ring */
4588	gfx_v8_0_mqd_set_priority(ring, mqd);
4589	mqd->cp_hqd_quantum = RREG32(mmCP_HQD_QUANTUM);
4590
4591	/* map_queues packet doesn't need activate the queue,
4592	 * so only kiq need set this field.
4593	 */
4594	if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
4595		mqd->cp_hqd_active = 1;
4596
4597	return 0;
4598}
4599
4600static int gfx_v8_0_mqd_commit(struct amdgpu_device *adev,
4601			struct vi_mqd *mqd)
4602{
4603	uint32_t mqd_reg;
4604	uint32_t *mqd_data;
4605
4606	/* HQD registers extend from mmCP_MQD_BASE_ADDR to mmCP_HQD_ERROR */
4607	mqd_data = &mqd->cp_mqd_base_addr_lo;
4608
4609	/* disable wptr polling */
4610	WREG32_FIELD(CP_PQ_WPTR_POLL_CNTL, EN, 0);
4611
4612	/* program all HQD registers */
4613	for (mqd_reg = mmCP_HQD_VMID; mqd_reg <= mmCP_HQD_EOP_CONTROL; mqd_reg++)
4614		WREG32(mqd_reg, mqd_data[mqd_reg - mmCP_MQD_BASE_ADDR]);
4615
4616	/* Tonga errata: EOP RPTR/WPTR should be left unmodified.
4617	 * This is safe since EOP RPTR==WPTR for any inactive HQD
4618	 * on ASICs that do not support context-save.
4619	 * EOP writes/reads can start anywhere in the ring.
4620	 */
4621	if (adev->asic_type != CHIP_TONGA) {
4622		WREG32(mmCP_HQD_EOP_RPTR, mqd->cp_hqd_eop_rptr);
4623		WREG32(mmCP_HQD_EOP_WPTR, mqd->cp_hqd_eop_wptr);
4624		WREG32(mmCP_HQD_EOP_WPTR_MEM, mqd->cp_hqd_eop_wptr_mem);
4625	}
4626
4627	for (mqd_reg = mmCP_HQD_EOP_EVENTS; mqd_reg <= mmCP_HQD_ERROR; mqd_reg++)
4628		WREG32(mqd_reg, mqd_data[mqd_reg - mmCP_MQD_BASE_ADDR]);
4629
4630	/* activate the HQD */
4631	for (mqd_reg = mmCP_MQD_BASE_ADDR; mqd_reg <= mmCP_HQD_ACTIVE; mqd_reg++)
4632		WREG32(mqd_reg, mqd_data[mqd_reg - mmCP_MQD_BASE_ADDR]);
4633
4634	return 0;
4635}
4636
4637static int gfx_v8_0_kiq_init_queue(struct amdgpu_ring *ring)
4638{
4639	struct amdgpu_device *adev = ring->adev;
4640	struct vi_mqd *mqd = ring->mqd_ptr;
4641	int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS;
4642
4643	gfx_v8_0_kiq_setting(ring);
4644
4645	if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
4646		/* reset MQD to a clean status */
4647		if (adev->gfx.mec.mqd_backup[mqd_idx])
4648			memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation));
4649
4650		/* reset ring buffer */
4651		ring->wptr = 0;
4652		amdgpu_ring_clear_ring(ring);
4653		mutex_lock(&adev->srbm_mutex);
4654		vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
4655		gfx_v8_0_mqd_commit(adev, mqd);
4656		vi_srbm_select(adev, 0, 0, 0, 0);
4657		mutex_unlock(&adev->srbm_mutex);
4658	} else {
4659		memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation));
4660		((struct vi_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
4661		((struct vi_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
4662		mutex_lock(&adev->srbm_mutex);
4663		vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
4664		gfx_v8_0_mqd_init(ring);
4665		gfx_v8_0_mqd_commit(adev, mqd);
4666		vi_srbm_select(adev, 0, 0, 0, 0);
4667		mutex_unlock(&adev->srbm_mutex);
4668
4669		if (adev->gfx.mec.mqd_backup[mqd_idx])
4670			memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct vi_mqd_allocation));
4671	}
4672
4673	return 0;
4674}
4675
4676static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
4677{
4678	struct amdgpu_device *adev = ring->adev;
4679	struct vi_mqd *mqd = ring->mqd_ptr;
4680	int mqd_idx = ring - &adev->gfx.compute_ring[0];
4681
4682	if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
4683		memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation));
4684		((struct vi_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
4685		((struct vi_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
4686		mutex_lock(&adev->srbm_mutex);
4687		vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
4688		gfx_v8_0_mqd_init(ring);
4689		vi_srbm_select(adev, 0, 0, 0, 0);
4690		mutex_unlock(&adev->srbm_mutex);
4691
4692		if (adev->gfx.mec.mqd_backup[mqd_idx])
4693			memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct vi_mqd_allocation));
4694	} else if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
4695		/* reset MQD to a clean status */
4696		if (adev->gfx.mec.mqd_backup[mqd_idx])
4697			memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation));
4698		/* reset ring buffer */
4699		ring->wptr = 0;
4700		amdgpu_ring_clear_ring(ring);
4701	} else {
4702		amdgpu_ring_clear_ring(ring);
4703	}
4704	return 0;
4705}
4706
4707static void gfx_v8_0_set_mec_doorbell_range(struct amdgpu_device *adev)
4708{
4709	if (adev->asic_type > CHIP_TONGA) {
4710		WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER, adev->doorbell_index.kiq << 2);
4711		WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER, adev->doorbell_index.mec_ring7 << 2);
4712	}
4713	/* enable doorbells */
4714	WREG32_FIELD(CP_PQ_STATUS, DOORBELL_ENABLE, 1);
4715}
4716
4717static int gfx_v8_0_kiq_resume(struct amdgpu_device *adev)
4718{
4719	struct amdgpu_ring *ring;
4720	int r;
4721
4722	ring = &adev->gfx.kiq.ring;
4723
4724	r = amdgpu_bo_reserve(ring->mqd_obj, false);
4725	if (unlikely(r != 0))
4726		return r;
4727
4728	r = amdgpu_bo_kmap(ring->mqd_obj, &ring->mqd_ptr);
4729	if (unlikely(r != 0))
4730		return r;
4731
4732	gfx_v8_0_kiq_init_queue(ring);
4733	amdgpu_bo_kunmap(ring->mqd_obj);
4734	ring->mqd_ptr = NULL;
4735	amdgpu_bo_unreserve(ring->mqd_obj);
4736	ring->sched.ready = true;
4737	return 0;
4738}
4739
4740static int gfx_v8_0_kcq_resume(struct amdgpu_device *adev)
4741{
4742	struct amdgpu_ring *ring = NULL;
4743	int r = 0, i;
4744
4745	gfx_v8_0_cp_compute_enable(adev, true);
4746
4747	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4748		ring = &adev->gfx.compute_ring[i];
4749
4750		r = amdgpu_bo_reserve(ring->mqd_obj, false);
4751		if (unlikely(r != 0))
4752			goto done;
4753		r = amdgpu_bo_kmap(ring->mqd_obj, &ring->mqd_ptr);
4754		if (!r) {
4755			r = gfx_v8_0_kcq_init_queue(ring);
4756			amdgpu_bo_kunmap(ring->mqd_obj);
4757			ring->mqd_ptr = NULL;
4758		}
4759		amdgpu_bo_unreserve(ring->mqd_obj);
4760		if (r)
4761			goto done;
4762	}
4763
4764	gfx_v8_0_set_mec_doorbell_range(adev);
4765
4766	r = gfx_v8_0_kiq_kcq_enable(adev);
4767	if (r)
4768		goto done;
4769
4770done:
4771	return r;
4772}
4773
4774static int gfx_v8_0_cp_test_all_rings(struct amdgpu_device *adev)
4775{
4776	int r, i;
4777	struct amdgpu_ring *ring;
4778
4779	/* collect all the ring_tests here, gfx, kiq, compute */
4780	ring = &adev->gfx.gfx_ring[0];
4781	r = amdgpu_ring_test_helper(ring);
4782	if (r)
4783		return r;
4784
4785	ring = &adev->gfx.kiq.ring;
4786	r = amdgpu_ring_test_helper(ring);
4787	if (r)
4788		return r;
4789
4790	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4791		ring = &adev->gfx.compute_ring[i];
4792		amdgpu_ring_test_helper(ring);
4793	}
4794
4795	return 0;
4796}
4797
4798static int gfx_v8_0_cp_resume(struct amdgpu_device *adev)
4799{
4800	int r;
4801
4802	if (!(adev->flags & AMD_IS_APU))
4803		gfx_v8_0_enable_gui_idle_interrupt(adev, false);
4804
4805	r = gfx_v8_0_kiq_resume(adev);
4806	if (r)
4807		return r;
4808
4809	r = gfx_v8_0_cp_gfx_resume(adev);
4810	if (r)
4811		return r;
4812
4813	r = gfx_v8_0_kcq_resume(adev);
4814	if (r)
4815		return r;
4816
4817	r = gfx_v8_0_cp_test_all_rings(adev);
4818	if (r)
4819		return r;
4820
4821	gfx_v8_0_enable_gui_idle_interrupt(adev, true);
4822
4823	return 0;
4824}
4825
4826static void gfx_v8_0_cp_enable(struct amdgpu_device *adev, bool enable)
4827{
4828	gfx_v8_0_cp_gfx_enable(adev, enable);
4829	gfx_v8_0_cp_compute_enable(adev, enable);
4830}
4831
4832static int gfx_v8_0_hw_init(void *handle)
4833{
4834	int r;
4835	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4836
4837	gfx_v8_0_init_golden_registers(adev);
4838	gfx_v8_0_constants_init(adev);
4839
4840	r = adev->gfx.rlc.funcs->resume(adev);
4841	if (r)
4842		return r;
4843
4844	r = gfx_v8_0_cp_resume(adev);
4845
4846	return r;
4847}
4848
4849static int gfx_v8_0_kcq_disable(struct amdgpu_device *adev)
4850{
4851	int r, i;
4852	struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
4853
4854	r = amdgpu_ring_alloc(kiq_ring, 6 * adev->gfx.num_compute_rings);
4855	if (r)
4856		DRM_ERROR("Failed to lock KIQ (%d).\n", r);
4857
4858	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4859		struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
4860
4861		amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
4862		amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
4863						PACKET3_UNMAP_QUEUES_ACTION(1) | /* RESET_QUEUES */
4864						PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
4865						PACKET3_UNMAP_QUEUES_ENGINE_SEL(0) |
4866						PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
4867		amdgpu_ring_write(kiq_ring, PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
4868		amdgpu_ring_write(kiq_ring, 0);
4869		amdgpu_ring_write(kiq_ring, 0);
4870		amdgpu_ring_write(kiq_ring, 0);
4871	}
4872	r = amdgpu_ring_test_helper(kiq_ring);
4873	if (r)
4874		DRM_ERROR("KCQ disable failed\n");
4875
4876	return r;
4877}
4878
4879static bool gfx_v8_0_is_idle(void *handle)
4880{
4881	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4882
4883	if (REG_GET_FIELD(RREG32(mmGRBM_STATUS), GRBM_STATUS, GUI_ACTIVE)
4884		|| RREG32(mmGRBM_STATUS2) != 0x8)
4885		return false;
4886	else
4887		return true;
4888}
4889
4890static bool gfx_v8_0_rlc_is_idle(void *handle)
4891{
4892	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4893
4894	if (RREG32(mmGRBM_STATUS2) != 0x8)
4895		return false;
4896	else
4897		return true;
4898}
4899
4900static int gfx_v8_0_wait_for_rlc_idle(void *handle)
4901{
4902	unsigned int i;
4903	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4904
4905	for (i = 0; i < adev->usec_timeout; i++) {
4906		if (gfx_v8_0_rlc_is_idle(handle))
4907			return 0;
4908
4909		udelay(1);
4910	}
4911	return -ETIMEDOUT;
4912}
4913
4914static int gfx_v8_0_wait_for_idle(void *handle)
4915{
4916	unsigned int i;
4917	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4918
4919	for (i = 0; i < adev->usec_timeout; i++) {
4920		if (gfx_v8_0_is_idle(handle))
4921			return 0;
4922
4923		udelay(1);
4924	}
4925	return -ETIMEDOUT;
4926}
4927
4928static int gfx_v8_0_hw_fini(void *handle)
4929{
4930	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4931
4932	amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
4933	amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
4934
4935	amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
4936
4937	amdgpu_irq_put(adev, &adev->gfx.sq_irq, 0);
4938
4939	/* disable KCQ to avoid CPC touch memory not valid anymore */
4940	gfx_v8_0_kcq_disable(adev);
4941
4942	if (amdgpu_sriov_vf(adev)) {
4943		pr_debug("For SRIOV client, shouldn't do anything.\n");
4944		return 0;
4945	}
4946	amdgpu_gfx_rlc_enter_safe_mode(adev);
4947	if (!gfx_v8_0_wait_for_idle(adev))
4948		gfx_v8_0_cp_enable(adev, false);
4949	else
4950		pr_err("cp is busy, skip halt cp\n");
4951	if (!gfx_v8_0_wait_for_rlc_idle(adev))
4952		adev->gfx.rlc.funcs->stop(adev);
4953	else
4954		pr_err("rlc is busy, skip halt rlc\n");
4955	amdgpu_gfx_rlc_exit_safe_mode(adev);
4956
4957	return 0;
4958}
4959
4960static int gfx_v8_0_suspend(void *handle)
4961{
4962	return gfx_v8_0_hw_fini(handle);
4963}
4964
4965static int gfx_v8_0_resume(void *handle)
4966{
4967	return gfx_v8_0_hw_init(handle);
4968}
4969
4970static bool gfx_v8_0_check_soft_reset(void *handle)
4971{
4972	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4973	u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
4974	u32 tmp;
4975
4976	/* GRBM_STATUS */
4977	tmp = RREG32(mmGRBM_STATUS);
4978	if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
4979		   GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
4980		   GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
4981		   GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
4982		   GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
4983		   GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK |
4984		   GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
4985		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4986						GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
4987		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4988						GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
4989		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
4990						SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1);
4991	}
4992
4993	/* GRBM_STATUS2 */
4994	tmp = RREG32(mmGRBM_STATUS2);
4995	if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
4996		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4997						GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
4998
4999	if (REG_GET_FIELD(tmp, GRBM_STATUS2, CPF_BUSY) ||
5000	    REG_GET_FIELD(tmp, GRBM_STATUS2, CPC_BUSY) ||
5001	    REG_GET_FIELD(tmp, GRBM_STATUS2, CPG_BUSY)) {
5002		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
5003						SOFT_RESET_CPF, 1);
5004		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
5005						SOFT_RESET_CPC, 1);
5006		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
5007						SOFT_RESET_CPG, 1);
5008		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET,
5009						SOFT_RESET_GRBM, 1);
5010	}
5011
5012	/* SRBM_STATUS */
5013	tmp = RREG32(mmSRBM_STATUS);
5014	if (REG_GET_FIELD(tmp, SRBM_STATUS, GRBM_RQ_PENDING))
5015		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
5016						SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1);
5017	if (REG_GET_FIELD(tmp, SRBM_STATUS, SEM_BUSY))
5018		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
5019						SRBM_SOFT_RESET, SOFT_RESET_SEM, 1);
5020
5021	if (grbm_soft_reset || srbm_soft_reset) {
5022		adev->gfx.grbm_soft_reset = grbm_soft_reset;
5023		adev->gfx.srbm_soft_reset = srbm_soft_reset;
5024		return true;
5025	} else {
5026		adev->gfx.grbm_soft_reset = 0;
5027		adev->gfx.srbm_soft_reset = 0;
5028		return false;
5029	}
5030}
5031
5032static int gfx_v8_0_pre_soft_reset(void *handle)
5033{
5034	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5035	u32 grbm_soft_reset = 0;
5036
5037	if ((!adev->gfx.grbm_soft_reset) &&
5038	    (!adev->gfx.srbm_soft_reset))
5039		return 0;
5040
5041	grbm_soft_reset = adev->gfx.grbm_soft_reset;
5042
5043	/* stop the rlc */
5044	adev->gfx.rlc.funcs->stop(adev);
5045
5046	if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
5047	    REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX))
5048		/* Disable GFX parsing/prefetching */
5049		gfx_v8_0_cp_gfx_enable(adev, false);
5050
5051	if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
5052	    REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPF) ||
5053	    REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPC) ||
5054	    REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPG)) {
5055		int i;
5056
5057		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
5058			struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
5059
5060			mutex_lock(&adev->srbm_mutex);
5061			vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
5062			gfx_v8_0_deactivate_hqd(adev, 2);
5063			vi_srbm_select(adev, 0, 0, 0, 0);
5064			mutex_unlock(&adev->srbm_mutex);
5065		}
5066		/* Disable MEC parsing/prefetching */
5067		gfx_v8_0_cp_compute_enable(adev, false);
5068	}
5069
5070	return 0;
5071}
5072
5073static int gfx_v8_0_soft_reset(void *handle)
5074{
5075	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5076	u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
5077	u32 tmp;
5078
5079	if ((!adev->gfx.grbm_soft_reset) &&
5080	    (!adev->gfx.srbm_soft_reset))
5081		return 0;
5082
5083	grbm_soft_reset = adev->gfx.grbm_soft_reset;
5084	srbm_soft_reset = adev->gfx.srbm_soft_reset;
5085
5086	if (grbm_soft_reset || srbm_soft_reset) {
5087		tmp = RREG32(mmGMCON_DEBUG);
5088		tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_STALL, 1);
5089		tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_CLEAR, 1);
5090		WREG32(mmGMCON_DEBUG, tmp);
5091		udelay(50);
5092	}
5093
5094	if (grbm_soft_reset) {
5095		tmp = RREG32(mmGRBM_SOFT_RESET);
5096		tmp |= grbm_soft_reset;
5097		dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
5098		WREG32(mmGRBM_SOFT_RESET, tmp);
5099		tmp = RREG32(mmGRBM_SOFT_RESET);
5100
5101		udelay(50);
5102
5103		tmp &= ~grbm_soft_reset;
5104		WREG32(mmGRBM_SOFT_RESET, tmp);
5105		tmp = RREG32(mmGRBM_SOFT_RESET);
5106	}
5107
5108	if (srbm_soft_reset) {
5109		tmp = RREG32(mmSRBM_SOFT_RESET);
5110		tmp |= srbm_soft_reset;
5111		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
5112		WREG32(mmSRBM_SOFT_RESET, tmp);
5113		tmp = RREG32(mmSRBM_SOFT_RESET);
5114
5115		udelay(50);
5116
5117		tmp &= ~srbm_soft_reset;
5118		WREG32(mmSRBM_SOFT_RESET, tmp);
5119		tmp = RREG32(mmSRBM_SOFT_RESET);
5120	}
5121
5122	if (grbm_soft_reset || srbm_soft_reset) {
5123		tmp = RREG32(mmGMCON_DEBUG);
5124		tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_STALL, 0);
5125		tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_CLEAR, 0);
5126		WREG32(mmGMCON_DEBUG, tmp);
5127	}
5128
5129	/* Wait a little for things to settle down */
5130	udelay(50);
5131
5132	return 0;
5133}
5134
5135static int gfx_v8_0_post_soft_reset(void *handle)
5136{
5137	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5138	u32 grbm_soft_reset = 0;
5139
5140	if ((!adev->gfx.grbm_soft_reset) &&
5141	    (!adev->gfx.srbm_soft_reset))
5142		return 0;
5143
5144	grbm_soft_reset = adev->gfx.grbm_soft_reset;
5145
5146	if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
5147	    REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPF) ||
5148	    REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPC) ||
5149	    REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPG)) {
5150		int i;
5151
5152		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
5153			struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
5154
5155			mutex_lock(&adev->srbm_mutex);
5156			vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
5157			gfx_v8_0_deactivate_hqd(adev, 2);
5158			vi_srbm_select(adev, 0, 0, 0, 0);
5159			mutex_unlock(&adev->srbm_mutex);
5160		}
5161		gfx_v8_0_kiq_resume(adev);
5162		gfx_v8_0_kcq_resume(adev);
5163	}
5164
5165	if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
5166	    REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX))
5167		gfx_v8_0_cp_gfx_resume(adev);
5168
5169	gfx_v8_0_cp_test_all_rings(adev);
5170
5171	adev->gfx.rlc.funcs->start(adev);
5172
5173	return 0;
5174}
5175
5176/**
5177 * gfx_v8_0_get_gpu_clock_counter - return GPU clock counter snapshot
5178 *
5179 * @adev: amdgpu_device pointer
5180 *
5181 * Fetches a GPU clock counter snapshot.
5182 * Returns the 64 bit clock counter snapshot.
5183 */
5184static uint64_t gfx_v8_0_get_gpu_clock_counter(struct amdgpu_device *adev)
5185{
5186	uint64_t clock;
5187
5188	mutex_lock(&adev->gfx.gpu_clock_mutex);
5189	WREG32(mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
5190	clock = (uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_LSB) |
5191		((uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
5192	mutex_unlock(&adev->gfx.gpu_clock_mutex);
5193	return clock;
5194}
5195
5196static void gfx_v8_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
5197					  uint32_t vmid,
5198					  uint32_t gds_base, uint32_t gds_size,
5199					  uint32_t gws_base, uint32_t gws_size,
5200					  uint32_t oa_base, uint32_t oa_size)
5201{
5202	/* GDS Base */
5203	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5204	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5205				WRITE_DATA_DST_SEL(0)));
5206	amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_base);
5207	amdgpu_ring_write(ring, 0);
5208	amdgpu_ring_write(ring, gds_base);
5209
5210	/* GDS Size */
5211	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5212	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5213				WRITE_DATA_DST_SEL(0)));
5214	amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_size);
5215	amdgpu_ring_write(ring, 0);
5216	amdgpu_ring_write(ring, gds_size);
5217
5218	/* GWS */
5219	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5220	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5221				WRITE_DATA_DST_SEL(0)));
5222	amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].gws);
5223	amdgpu_ring_write(ring, 0);
5224	amdgpu_ring_write(ring, gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
5225
5226	/* OA */
5227	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5228	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5229				WRITE_DATA_DST_SEL(0)));
5230	amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].oa);
5231	amdgpu_ring_write(ring, 0);
5232	amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));
5233}
5234
5235static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
5236{
5237	WREG32(mmSQ_IND_INDEX,
5238		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
5239		(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
5240		(address << SQ_IND_INDEX__INDEX__SHIFT) |
5241		(SQ_IND_INDEX__FORCE_READ_MASK));
5242	return RREG32(mmSQ_IND_DATA);
5243}
5244
5245static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
5246			   uint32_t wave, uint32_t thread,
5247			   uint32_t regno, uint32_t num, uint32_t *out)
5248{
5249	WREG32(mmSQ_IND_INDEX,
5250		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
5251		(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
5252		(regno << SQ_IND_INDEX__INDEX__SHIFT) |
5253		(thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
5254		(SQ_IND_INDEX__FORCE_READ_MASK) |
5255		(SQ_IND_INDEX__AUTO_INCR_MASK));
5256	while (num--)
5257		*(out++) = RREG32(mmSQ_IND_DATA);
5258}
5259
5260static void gfx_v8_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
5261{
5262	/* type 0 wave data */
5263	dst[(*no_fields)++] = 0;
5264	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
5265	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
5266	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
5267	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
5268	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
5269	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
5270	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
5271	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
5272	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
5273	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
5274	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
5275	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
5276	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_LO);
5277	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_HI);
5278	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_LO);
5279	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_HI);
5280	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
5281	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
5282}
5283
5284static void gfx_v8_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
5285				     uint32_t wave, uint32_t start,
5286				     uint32_t size, uint32_t *dst)
5287{
5288	wave_read_regs(
5289		adev, simd, wave, 0,
5290		start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
5291}
5292
5293
5294static const struct amdgpu_gfx_funcs gfx_v8_0_gfx_funcs = {
5295	.get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter,
5296	.select_se_sh = &gfx_v8_0_select_se_sh,
5297	.read_wave_data = &gfx_v8_0_read_wave_data,
5298	.read_wave_sgprs = &gfx_v8_0_read_wave_sgprs,
5299	.select_me_pipe_q = &gfx_v8_0_select_me_pipe_q
5300};
5301
5302static int gfx_v8_0_early_init(void *handle)
5303{
5304	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5305
5306	adev->gfx.num_gfx_rings = GFX8_NUM_GFX_RINGS;
5307	adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
5308					  AMDGPU_MAX_COMPUTE_RINGS);
5309	adev->gfx.funcs = &gfx_v8_0_gfx_funcs;
5310	gfx_v8_0_set_ring_funcs(adev);
5311	gfx_v8_0_set_irq_funcs(adev);
5312	gfx_v8_0_set_gds_init(adev);
5313	gfx_v8_0_set_rlc_funcs(adev);
5314
5315	return 0;
5316}
5317
5318static int gfx_v8_0_late_init(void *handle)
5319{
5320	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5321	int r;
5322
5323	r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
5324	if (r)
5325		return r;
5326
5327	r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
5328	if (r)
5329		return r;
5330
5331	/* requires IBs so do in late init after IB pool is initialized */
5332	r = gfx_v8_0_do_edc_gpr_workarounds(adev);
5333	if (r)
5334		return r;
5335
5336	r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
5337	if (r) {
5338		DRM_ERROR("amdgpu_irq_get() failed to get IRQ for EDC, r: %d.\n", r);
5339		return r;
5340	}
5341
5342	r = amdgpu_irq_get(adev, &adev->gfx.sq_irq, 0);
5343	if (r) {
5344		DRM_ERROR(
5345			"amdgpu_irq_get() failed to get IRQ for SQ, r: %d.\n",
5346			r);
5347		return r;
5348	}
5349
5350	return 0;
5351}
5352
5353static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
5354						       bool enable)
5355{
5356	if ((adev->asic_type == CHIP_POLARIS11) ||
5357	    (adev->asic_type == CHIP_POLARIS12) ||
5358	    (adev->asic_type == CHIP_VEGAM))
 
5359		/* Send msg to SMU via Powerplay */
5360		amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, enable);
5361
5362	WREG32_FIELD(RLC_PG_CNTL, STATIC_PER_CU_PG_ENABLE, enable ? 1 : 0);
5363}
5364
5365static void gfx_v8_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev,
5366							bool enable)
5367{
5368	WREG32_FIELD(RLC_PG_CNTL, DYN_PER_CU_PG_ENABLE, enable ? 1 : 0);
5369}
5370
5371static void polaris11_enable_gfx_quick_mg_power_gating(struct amdgpu_device *adev,
5372		bool enable)
5373{
5374	WREG32_FIELD(RLC_PG_CNTL, QUICK_PG_ENABLE, enable ? 1 : 0);
5375}
5376
5377static void cz_enable_gfx_cg_power_gating(struct amdgpu_device *adev,
5378					  bool enable)
5379{
5380	WREG32_FIELD(RLC_PG_CNTL, GFX_POWER_GATING_ENABLE, enable ? 1 : 0);
5381}
5382
5383static void cz_enable_gfx_pipeline_power_gating(struct amdgpu_device *adev,
5384						bool enable)
5385{
5386	WREG32_FIELD(RLC_PG_CNTL, GFX_PIPELINE_PG_ENABLE, enable ? 1 : 0);
5387
5388	/* Read any GFX register to wake up GFX. */
5389	if (!enable)
5390		RREG32(mmDB_RENDER_CONTROL);
5391}
5392
5393static void cz_update_gfx_cg_power_gating(struct amdgpu_device *adev,
5394					  bool enable)
5395{
5396	if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
5397		cz_enable_gfx_cg_power_gating(adev, true);
5398		if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
5399			cz_enable_gfx_pipeline_power_gating(adev, true);
5400	} else {
5401		cz_enable_gfx_cg_power_gating(adev, false);
5402		cz_enable_gfx_pipeline_power_gating(adev, false);
5403	}
5404}
5405
5406static int gfx_v8_0_set_powergating_state(void *handle,
5407					  enum amd_powergating_state state)
5408{
5409	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5410	bool enable = (state == AMD_PG_STATE_GATE);
5411
5412	if (amdgpu_sriov_vf(adev))
5413		return 0;
5414
5415	if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG |
5416				AMD_PG_SUPPORT_RLC_SMU_HS |
5417				AMD_PG_SUPPORT_CP |
5418				AMD_PG_SUPPORT_GFX_DMG))
5419		amdgpu_gfx_rlc_enter_safe_mode(adev);
5420	switch (adev->asic_type) {
5421	case CHIP_CARRIZO:
5422	case CHIP_STONEY:
5423
5424		if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
5425			cz_enable_sck_slow_down_on_power_up(adev, true);
5426			cz_enable_sck_slow_down_on_power_down(adev, true);
5427		} else {
5428			cz_enable_sck_slow_down_on_power_up(adev, false);
5429			cz_enable_sck_slow_down_on_power_down(adev, false);
5430		}
5431		if (adev->pg_flags & AMD_PG_SUPPORT_CP)
5432			cz_enable_cp_power_gating(adev, true);
5433		else
5434			cz_enable_cp_power_gating(adev, false);
5435
5436		cz_update_gfx_cg_power_gating(adev, enable);
5437
5438		if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
5439			gfx_v8_0_enable_gfx_static_mg_power_gating(adev, true);
5440		else
5441			gfx_v8_0_enable_gfx_static_mg_power_gating(adev, false);
5442
5443		if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable)
5444			gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, true);
5445		else
5446			gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, false);
5447		break;
5448	case CHIP_POLARIS11:
5449	case CHIP_POLARIS12:
5450	case CHIP_VEGAM:
5451		if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
5452			gfx_v8_0_enable_gfx_static_mg_power_gating(adev, true);
5453		else
5454			gfx_v8_0_enable_gfx_static_mg_power_gating(adev, false);
5455
5456		if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable)
5457			gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, true);
5458		else
5459			gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, false);
5460
5461		if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_QUICK_MG) && enable)
5462			polaris11_enable_gfx_quick_mg_power_gating(adev, true);
5463		else
5464			polaris11_enable_gfx_quick_mg_power_gating(adev, false);
5465		break;
5466	default:
5467		break;
5468	}
5469	if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG |
5470				AMD_PG_SUPPORT_RLC_SMU_HS |
5471				AMD_PG_SUPPORT_CP |
5472				AMD_PG_SUPPORT_GFX_DMG))
5473		amdgpu_gfx_rlc_exit_safe_mode(adev);
5474	return 0;
5475}
5476
5477static void gfx_v8_0_get_clockgating_state(void *handle, u32 *flags)
5478{
5479	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5480	int data;
5481
5482	if (amdgpu_sriov_vf(adev))
5483		*flags = 0;
5484
5485	/* AMD_CG_SUPPORT_GFX_MGCG */
5486	data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
5487	if (!(data & RLC_CGTT_MGCG_OVERRIDE__CPF_MASK))
5488		*flags |= AMD_CG_SUPPORT_GFX_MGCG;
5489
5490	/* AMD_CG_SUPPORT_GFX_CGLG */
5491	data = RREG32(mmRLC_CGCG_CGLS_CTRL);
5492	if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
5493		*flags |= AMD_CG_SUPPORT_GFX_CGCG;
5494
5495	/* AMD_CG_SUPPORT_GFX_CGLS */
5496	if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
5497		*flags |= AMD_CG_SUPPORT_GFX_CGLS;
5498
5499	/* AMD_CG_SUPPORT_GFX_CGTS */
5500	data = RREG32(mmCGTS_SM_CTRL_REG);
5501	if (!(data & CGTS_SM_CTRL_REG__OVERRIDE_MASK))
5502		*flags |= AMD_CG_SUPPORT_GFX_CGTS;
5503
5504	/* AMD_CG_SUPPORT_GFX_CGTS_LS */
5505	if (!(data & CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK))
5506		*flags |= AMD_CG_SUPPORT_GFX_CGTS_LS;
5507
5508	/* AMD_CG_SUPPORT_GFX_RLC_LS */
5509	data = RREG32(mmRLC_MEM_SLP_CNTL);
5510	if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
5511		*flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
5512
5513	/* AMD_CG_SUPPORT_GFX_CP_LS */
5514	data = RREG32(mmCP_MEM_SLP_CNTL);
5515	if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
5516		*flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
5517}
5518
5519static void gfx_v8_0_send_serdes_cmd(struct amdgpu_device *adev,
5520				     uint32_t reg_addr, uint32_t cmd)
5521{
5522	uint32_t data;
5523
5524	gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
5525
5526	WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
5527	WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
5528
5529	data = RREG32(mmRLC_SERDES_WR_CTRL);
5530	if (adev->asic_type == CHIP_STONEY)
5531		data &= ~(RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK |
5532			  RLC_SERDES_WR_CTRL__READ_COMMAND_MASK |
5533			  RLC_SERDES_WR_CTRL__P1_SELECT_MASK |
5534			  RLC_SERDES_WR_CTRL__P2_SELECT_MASK |
5535			  RLC_SERDES_WR_CTRL__RDDATA_RESET_MASK |
5536			  RLC_SERDES_WR_CTRL__POWER_DOWN_MASK |
5537			  RLC_SERDES_WR_CTRL__POWER_UP_MASK |
5538			  RLC_SERDES_WR_CTRL__SHORT_FORMAT_MASK |
5539			  RLC_SERDES_WR_CTRL__SRBM_OVERRIDE_MASK);
5540	else
5541		data &= ~(RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK |
5542			  RLC_SERDES_WR_CTRL__READ_COMMAND_MASK |
5543			  RLC_SERDES_WR_CTRL__P1_SELECT_MASK |
5544			  RLC_SERDES_WR_CTRL__P2_SELECT_MASK |
5545			  RLC_SERDES_WR_CTRL__RDDATA_RESET_MASK |
5546			  RLC_SERDES_WR_CTRL__POWER_DOWN_MASK |
5547			  RLC_SERDES_WR_CTRL__POWER_UP_MASK |
5548			  RLC_SERDES_WR_CTRL__SHORT_FORMAT_MASK |
5549			  RLC_SERDES_WR_CTRL__BPM_DATA_MASK |
5550			  RLC_SERDES_WR_CTRL__REG_ADDR_MASK |
5551			  RLC_SERDES_WR_CTRL__SRBM_OVERRIDE_MASK);
5552	data |= (RLC_SERDES_WR_CTRL__RSVD_BPM_ADDR_MASK |
5553		 (cmd << RLC_SERDES_WR_CTRL__BPM_DATA__SHIFT) |
5554		 (reg_addr << RLC_SERDES_WR_CTRL__REG_ADDR__SHIFT) |
5555		 (0xff << RLC_SERDES_WR_CTRL__BPM_ADDR__SHIFT));
5556
5557	WREG32(mmRLC_SERDES_WR_CTRL, data);
5558}
5559
5560#define MSG_ENTER_RLC_SAFE_MODE     1
5561#define MSG_EXIT_RLC_SAFE_MODE      0
5562#define RLC_GPR_REG2__REQ_MASK 0x00000001
5563#define RLC_GPR_REG2__REQ__SHIFT 0
5564#define RLC_GPR_REG2__MESSAGE__SHIFT 0x00000001
5565#define RLC_GPR_REG2__MESSAGE_MASK 0x0000001e
5566
5567static bool gfx_v8_0_is_rlc_enabled(struct amdgpu_device *adev)
5568{
5569	uint32_t rlc_setting;
5570
5571	rlc_setting = RREG32(mmRLC_CNTL);
5572	if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
5573		return false;
5574
5575	return true;
5576}
5577
5578static void gfx_v8_0_set_safe_mode(struct amdgpu_device *adev)
5579{
5580	uint32_t data;
5581	unsigned i;
5582	data = RREG32(mmRLC_CNTL);
5583	data |= RLC_SAFE_MODE__CMD_MASK;
5584	data &= ~RLC_SAFE_MODE__MESSAGE_MASK;
5585	data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
5586	WREG32(mmRLC_SAFE_MODE, data);
5587
5588	/* wait for RLC_SAFE_MODE */
5589	for (i = 0; i < adev->usec_timeout; i++) {
5590		if ((RREG32(mmRLC_GPM_STAT) &
5591		     (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
5592		      RLC_GPM_STAT__GFX_POWER_STATUS_MASK)) ==
5593		    (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
5594		     RLC_GPM_STAT__GFX_POWER_STATUS_MASK))
5595			break;
5596		udelay(1);
5597	}
5598	for (i = 0; i < adev->usec_timeout; i++) {
5599		if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
5600			break;
5601		udelay(1);
5602	}
5603}
5604
5605static void gfx_v8_0_unset_safe_mode(struct amdgpu_device *adev)
5606{
5607	uint32_t data;
5608	unsigned i;
5609
5610	data = RREG32(mmRLC_CNTL);
5611	data |= RLC_SAFE_MODE__CMD_MASK;
5612	data &= ~RLC_SAFE_MODE__MESSAGE_MASK;
5613	WREG32(mmRLC_SAFE_MODE, data);
5614
5615	for (i = 0; i < adev->usec_timeout; i++) {
5616		if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
5617			break;
5618		udelay(1);
5619	}
5620}
5621
5622static void gfx_v8_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
5623{
5624	u32 data;
5625
5626	if (amdgpu_sriov_is_pp_one_vf(adev))
5627		data = RREG32_NO_KIQ(mmRLC_SPM_VMID);
5628	else
5629		data = RREG32(mmRLC_SPM_VMID);
5630
5631	data &= ~RLC_SPM_VMID__RLC_SPM_VMID_MASK;
5632	data |= (vmid & RLC_SPM_VMID__RLC_SPM_VMID_MASK) << RLC_SPM_VMID__RLC_SPM_VMID__SHIFT;
5633
5634	if (amdgpu_sriov_is_pp_one_vf(adev))
5635		WREG32_NO_KIQ(mmRLC_SPM_VMID, data);
5636	else
5637		WREG32(mmRLC_SPM_VMID, data);
5638}
5639
5640static const struct amdgpu_rlc_funcs iceland_rlc_funcs = {
5641	.is_rlc_enabled = gfx_v8_0_is_rlc_enabled,
5642	.set_safe_mode = gfx_v8_0_set_safe_mode,
5643	.unset_safe_mode = gfx_v8_0_unset_safe_mode,
5644	.init = gfx_v8_0_rlc_init,
5645	.get_csb_size = gfx_v8_0_get_csb_size,
5646	.get_csb_buffer = gfx_v8_0_get_csb_buffer,
5647	.get_cp_table_num = gfx_v8_0_cp_jump_table_num,
5648	.resume = gfx_v8_0_rlc_resume,
5649	.stop = gfx_v8_0_rlc_stop,
5650	.reset = gfx_v8_0_rlc_reset,
5651	.start = gfx_v8_0_rlc_start,
5652	.update_spm_vmid = gfx_v8_0_update_spm_vmid
5653};
5654
5655static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
5656						      bool enable)
5657{
5658	uint32_t temp, data;
5659
5660	amdgpu_gfx_rlc_enter_safe_mode(adev);
5661
5662	/* It is disabled by HW by default */
5663	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
5664		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
5665			if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS)
5666				/* 1 - RLC memory Light sleep */
5667				WREG32_FIELD(RLC_MEM_SLP_CNTL, RLC_MEM_LS_EN, 1);
5668
5669			if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS)
5670				WREG32_FIELD(CP_MEM_SLP_CNTL, CP_MEM_LS_EN, 1);
5671		}
5672
5673		/* 3 - RLC_CGTT_MGCG_OVERRIDE */
5674		temp = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
5675		if (adev->flags & AMD_IS_APU)
5676			data &= ~(RLC_CGTT_MGCG_OVERRIDE__CPF_MASK |
5677				  RLC_CGTT_MGCG_OVERRIDE__RLC_MASK |
5678				  RLC_CGTT_MGCG_OVERRIDE__MGCG_MASK);
5679		else
5680			data &= ~(RLC_CGTT_MGCG_OVERRIDE__CPF_MASK |
5681				  RLC_CGTT_MGCG_OVERRIDE__RLC_MASK |
5682				  RLC_CGTT_MGCG_OVERRIDE__MGCG_MASK |
5683				  RLC_CGTT_MGCG_OVERRIDE__GRBM_MASK);
5684
5685		if (temp != data)
5686			WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data);
5687
5688		/* 4 - wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
5689		gfx_v8_0_wait_for_rlc_serdes(adev);
5690
5691		/* 5 - clear mgcg override */
5692		gfx_v8_0_send_serdes_cmd(adev, BPM_REG_MGCG_OVERRIDE, CLE_BPM_SERDES_CMD);
5693
5694		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS) {
5695			/* 6 - Enable CGTS(Tree Shade) MGCG /MGLS */
5696			temp = data = RREG32(mmCGTS_SM_CTRL_REG);
5697			data &= ~(CGTS_SM_CTRL_REG__SM_MODE_MASK);
5698			data |= (0x2 << CGTS_SM_CTRL_REG__SM_MODE__SHIFT);
5699			data |= CGTS_SM_CTRL_REG__SM_MODE_ENABLE_MASK;
5700			data &= ~CGTS_SM_CTRL_REG__OVERRIDE_MASK;
5701			if ((adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) &&
5702			    (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS_LS))
5703				data &= ~CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK;
5704			data |= CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN_MASK;
5705			data |= (0x96 << CGTS_SM_CTRL_REG__ON_MONITOR_ADD__SHIFT);
5706			if (temp != data)
5707				WREG32(mmCGTS_SM_CTRL_REG, data);
5708		}
5709		udelay(50);
5710
5711		/* 7 - wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
5712		gfx_v8_0_wait_for_rlc_serdes(adev);
5713	} else {
5714		/* 1 - MGCG_OVERRIDE[0] for CP and MGCG_OVERRIDE[1] for RLC */
5715		temp = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
5716		data |= (RLC_CGTT_MGCG_OVERRIDE__CPF_MASK |
5717				RLC_CGTT_MGCG_OVERRIDE__RLC_MASK |
5718				RLC_CGTT_MGCG_OVERRIDE__MGCG_MASK |
5719				RLC_CGTT_MGCG_OVERRIDE__GRBM_MASK);
5720		if (temp != data)
5721			WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data);
5722
5723		/* 2 - disable MGLS in RLC */
5724		data = RREG32(mmRLC_MEM_SLP_CNTL);
5725		if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
5726			data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
5727			WREG32(mmRLC_MEM_SLP_CNTL, data);
5728		}
5729
5730		/* 3 - disable MGLS in CP */
5731		data = RREG32(mmCP_MEM_SLP_CNTL);
5732		if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
5733			data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
5734			WREG32(mmCP_MEM_SLP_CNTL, data);
5735		}
5736
5737		/* 4 - Disable CGTS(Tree Shade) MGCG and MGLS */
5738		temp = data = RREG32(mmCGTS_SM_CTRL_REG);
5739		data |= (CGTS_SM_CTRL_REG__OVERRIDE_MASK |
5740				CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK);
5741		if (temp != data)
5742			WREG32(mmCGTS_SM_CTRL_REG, data);
5743
5744		/* 5 - wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
5745		gfx_v8_0_wait_for_rlc_serdes(adev);
5746
5747		/* 6 - set mgcg override */
5748		gfx_v8_0_send_serdes_cmd(adev, BPM_REG_MGCG_OVERRIDE, SET_BPM_SERDES_CMD);
5749
5750		udelay(50);
5751
5752		/* 7- wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
5753		gfx_v8_0_wait_for_rlc_serdes(adev);
5754	}
5755
5756	amdgpu_gfx_rlc_exit_safe_mode(adev);
5757}
5758
5759static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
5760						      bool enable)
5761{
5762	uint32_t temp, temp1, data, data1;
5763
5764	temp = data = RREG32(mmRLC_CGCG_CGLS_CTRL);
5765
5766	amdgpu_gfx_rlc_enter_safe_mode(adev);
5767
5768	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
5769		temp1 = data1 =	RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
5770		data1 &= ~RLC_CGTT_MGCG_OVERRIDE__CGCG_MASK;
5771		if (temp1 != data1)
5772			WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data1);
5773
5774		/* : wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
5775		gfx_v8_0_wait_for_rlc_serdes(adev);
5776
5777		/* 2 - clear cgcg override */
5778		gfx_v8_0_send_serdes_cmd(adev, BPM_REG_CGCG_OVERRIDE, CLE_BPM_SERDES_CMD);
5779
5780		/* wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
5781		gfx_v8_0_wait_for_rlc_serdes(adev);
5782
5783		/* 3 - write cmd to set CGLS */
5784		gfx_v8_0_send_serdes_cmd(adev, BPM_REG_CGLS_EN, SET_BPM_SERDES_CMD);
5785
5786		/* 4 - enable cgcg */
5787		data |= RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
5788
5789		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
5790			/* enable cgls*/
5791			data |= RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
5792
5793			temp1 = data1 =	RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
5794			data1 &= ~RLC_CGTT_MGCG_OVERRIDE__CGLS_MASK;
5795
5796			if (temp1 != data1)
5797				WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data1);
5798		} else {
5799			data &= ~RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
5800		}
5801
5802		if (temp != data)
5803			WREG32(mmRLC_CGCG_CGLS_CTRL, data);
5804
5805		/* 5 enable cntx_empty_int_enable/cntx_busy_int_enable/
5806		 * Cmp_busy/GFX_Idle interrupts
5807		 */
5808		gfx_v8_0_enable_gui_idle_interrupt(adev, true);
5809	} else {
5810		/* disable cntx_empty_int_enable & GFX Idle interrupt */
5811		gfx_v8_0_enable_gui_idle_interrupt(adev, false);
5812
5813		/* TEST CGCG */
5814		temp1 = data1 =	RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
5815		data1 |= (RLC_CGTT_MGCG_OVERRIDE__CGCG_MASK |
5816				RLC_CGTT_MGCG_OVERRIDE__CGLS_MASK);
5817		if (temp1 != data1)
5818			WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data1);
5819
5820		/* read gfx register to wake up cgcg */
5821		RREG32(mmCB_CGTT_SCLK_CTRL);
5822		RREG32(mmCB_CGTT_SCLK_CTRL);
5823		RREG32(mmCB_CGTT_SCLK_CTRL);
5824		RREG32(mmCB_CGTT_SCLK_CTRL);
5825
5826		/* wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
5827		gfx_v8_0_wait_for_rlc_serdes(adev);
5828
5829		/* write cmd to Set CGCG Overrride */
5830		gfx_v8_0_send_serdes_cmd(adev, BPM_REG_CGCG_OVERRIDE, SET_BPM_SERDES_CMD);
5831
5832		/* wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
5833		gfx_v8_0_wait_for_rlc_serdes(adev);
5834
5835		/* write cmd to Clear CGLS */
5836		gfx_v8_0_send_serdes_cmd(adev, BPM_REG_CGLS_EN, CLE_BPM_SERDES_CMD);
5837
5838		/* disable cgcg, cgls should be disabled too. */
5839		data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK |
5840			  RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
5841		if (temp != data)
5842			WREG32(mmRLC_CGCG_CGLS_CTRL, data);
5843		/* enable interrupts again for PG */
5844		gfx_v8_0_enable_gui_idle_interrupt(adev, true);
5845	}
5846
5847	gfx_v8_0_wait_for_rlc_serdes(adev);
5848
5849	amdgpu_gfx_rlc_exit_safe_mode(adev);
5850}
5851static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev,
5852					    bool enable)
5853{
5854	if (enable) {
5855		/* CGCG/CGLS should be enabled after MGCG/MGLS/TS(CG/LS)
5856		 * ===  MGCG + MGLS + TS(CG/LS) ===
5857		 */
5858		gfx_v8_0_update_medium_grain_clock_gating(adev, enable);
5859		gfx_v8_0_update_coarse_grain_clock_gating(adev, enable);
5860	} else {
5861		/* CGCG/CGLS should be disabled before MGCG/MGLS/TS(CG/LS)
5862		 * ===  CGCG + CGLS ===
5863		 */
5864		gfx_v8_0_update_coarse_grain_clock_gating(adev, enable);
5865		gfx_v8_0_update_medium_grain_clock_gating(adev, enable);
5866	}
5867	return 0;
5868}
5869
5870static int gfx_v8_0_tonga_update_gfx_clock_gating(struct amdgpu_device *adev,
5871					  enum amd_clockgating_state state)
5872{
5873	uint32_t msg_id, pp_state = 0;
5874	uint32_t pp_support_state = 0;
5875
5876	if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_CGLS)) {
5877		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
5878			pp_support_state = PP_STATE_SUPPORT_LS;
5879			pp_state = PP_STATE_LS;
5880		}
5881		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) {
5882			pp_support_state |= PP_STATE_SUPPORT_CG;
5883			pp_state |= PP_STATE_CG;
5884		}
5885		if (state == AMD_CG_STATE_UNGATE)
5886			pp_state = 0;
5887
5888		msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
5889				PP_BLOCK_GFX_CG,
5890				pp_support_state,
5891				pp_state);
5892		amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
 
5893	}
5894
5895	if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)) {
5896		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
5897			pp_support_state = PP_STATE_SUPPORT_LS;
5898			pp_state = PP_STATE_LS;
5899		}
5900
5901		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) {
5902			pp_support_state |= PP_STATE_SUPPORT_CG;
5903			pp_state |= PP_STATE_CG;
5904		}
5905
5906		if (state == AMD_CG_STATE_UNGATE)
5907			pp_state = 0;
5908
5909		msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
5910				PP_BLOCK_GFX_MG,
5911				pp_support_state,
5912				pp_state);
5913		amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
 
5914	}
5915
5916	return 0;
5917}
5918
5919static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
5920					  enum amd_clockgating_state state)
5921{
5922
5923	uint32_t msg_id, pp_state = 0;
5924	uint32_t pp_support_state = 0;
5925
5926	if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_CGLS)) {
5927		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
5928			pp_support_state = PP_STATE_SUPPORT_LS;
5929			pp_state = PP_STATE_LS;
5930		}
5931		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) {
5932			pp_support_state |= PP_STATE_SUPPORT_CG;
5933			pp_state |= PP_STATE_CG;
5934		}
5935		if (state == AMD_CG_STATE_UNGATE)
5936			pp_state = 0;
5937
5938		msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
5939				PP_BLOCK_GFX_CG,
5940				pp_support_state,
5941				pp_state);
5942		amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
 
5943	}
5944
5945	if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_3D_CGCG | AMD_CG_SUPPORT_GFX_3D_CGLS)) {
5946		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) {
5947			pp_support_state = PP_STATE_SUPPORT_LS;
5948			pp_state = PP_STATE_LS;
5949		}
5950		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG) {
5951			pp_support_state |= PP_STATE_SUPPORT_CG;
5952			pp_state |= PP_STATE_CG;
5953		}
5954		if (state == AMD_CG_STATE_UNGATE)
5955			pp_state = 0;
5956
5957		msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
5958				PP_BLOCK_GFX_3D,
5959				pp_support_state,
5960				pp_state);
5961		amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
 
5962	}
5963
5964	if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)) {
5965		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
5966			pp_support_state = PP_STATE_SUPPORT_LS;
5967			pp_state = PP_STATE_LS;
5968		}
5969
5970		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) {
5971			pp_support_state |= PP_STATE_SUPPORT_CG;
5972			pp_state |= PP_STATE_CG;
5973		}
5974
5975		if (state == AMD_CG_STATE_UNGATE)
5976			pp_state = 0;
5977
5978		msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
5979				PP_BLOCK_GFX_MG,
5980				pp_support_state,
5981				pp_state);
5982		amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
 
5983	}
5984
5985	if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
5986		pp_support_state = PP_STATE_SUPPORT_LS;
5987
5988		if (state == AMD_CG_STATE_UNGATE)
5989			pp_state = 0;
5990		else
5991			pp_state = PP_STATE_LS;
5992
5993		msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
5994				PP_BLOCK_GFX_RLC,
5995				pp_support_state,
5996				pp_state);
5997		amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
 
5998	}
5999
6000	if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
6001		pp_support_state = PP_STATE_SUPPORT_LS;
6002
6003		if (state == AMD_CG_STATE_UNGATE)
6004			pp_state = 0;
6005		else
6006			pp_state = PP_STATE_LS;
6007		msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
6008			PP_BLOCK_GFX_CP,
6009			pp_support_state,
6010			pp_state);
6011		amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
 
6012	}
6013
6014	return 0;
6015}
6016
6017static int gfx_v8_0_set_clockgating_state(void *handle,
6018					  enum amd_clockgating_state state)
6019{
6020	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6021
6022	if (amdgpu_sriov_vf(adev))
6023		return 0;
6024
6025	switch (adev->asic_type) {
6026	case CHIP_FIJI:
6027	case CHIP_CARRIZO:
6028	case CHIP_STONEY:
6029		gfx_v8_0_update_gfx_clock_gating(adev,
6030						 state == AMD_CG_STATE_GATE);
6031		break;
6032	case CHIP_TONGA:
6033		gfx_v8_0_tonga_update_gfx_clock_gating(adev, state);
6034		break;
6035	case CHIP_POLARIS10:
6036	case CHIP_POLARIS11:
6037	case CHIP_POLARIS12:
6038	case CHIP_VEGAM:
6039		gfx_v8_0_polaris_update_gfx_clock_gating(adev, state);
6040		break;
6041	default:
6042		break;
6043	}
6044	return 0;
6045}
6046
6047static u64 gfx_v8_0_ring_get_rptr(struct amdgpu_ring *ring)
6048{
6049	return ring->adev->wb.wb[ring->rptr_offs];
6050}
6051
6052static u64 gfx_v8_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
6053{
6054	struct amdgpu_device *adev = ring->adev;
6055
6056	if (ring->use_doorbell)
6057		/* XXX check if swapping is necessary on BE */
6058		return ring->adev->wb.wb[ring->wptr_offs];
6059	else
6060		return RREG32(mmCP_RB0_WPTR);
6061}
6062
6063static void gfx_v8_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
6064{
6065	struct amdgpu_device *adev = ring->adev;
6066
6067	if (ring->use_doorbell) {
6068		/* XXX check if swapping is necessary on BE */
6069		adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
6070		WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
6071	} else {
6072		WREG32(mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
6073		(void)RREG32(mmCP_RB0_WPTR);
6074	}
6075}
6076
6077static void gfx_v8_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
6078{
6079	u32 ref_and_mask, reg_mem_engine;
6080
6081	if ((ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) ||
6082	    (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)) {
6083		switch (ring->me) {
6084		case 1:
6085			ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe;
6086			break;
6087		case 2:
6088			ref_and_mask = GPU_HDP_FLUSH_DONE__CP6_MASK << ring->pipe;
6089			break;
6090		default:
6091			return;
6092		}
6093		reg_mem_engine = 0;
6094	} else {
6095		ref_and_mask = GPU_HDP_FLUSH_DONE__CP0_MASK;
6096		reg_mem_engine = WAIT_REG_MEM_ENGINE(1); /* pfp */
6097	}
6098
6099	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
6100	amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(1) | /* write, wait, write */
6101				 WAIT_REG_MEM_FUNCTION(3) |  /* == */
6102				 reg_mem_engine));
6103	amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ);
6104	amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE);
6105	amdgpu_ring_write(ring, ref_and_mask);
6106	amdgpu_ring_write(ring, ref_and_mask);
6107	amdgpu_ring_write(ring, 0x20); /* poll interval */
6108}
6109
6110static void gfx_v8_0_ring_emit_vgt_flush(struct amdgpu_ring *ring)
6111{
6112	amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
6113	amdgpu_ring_write(ring, EVENT_TYPE(VS_PARTIAL_FLUSH) |
6114		EVENT_INDEX(4));
6115
6116	amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
6117	amdgpu_ring_write(ring, EVENT_TYPE(VGT_FLUSH) |
6118		EVENT_INDEX(0));
6119}
6120
6121static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
6122					struct amdgpu_job *job,
6123					struct amdgpu_ib *ib,
6124					uint32_t flags)
6125{
6126	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
6127	u32 header, control = 0;
6128
6129	if (ib->flags & AMDGPU_IB_FLAG_CE)
6130		header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
6131	else
6132		header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
6133
6134	control |= ib->length_dw | (vmid << 24);
6135
6136	if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
6137		control |= INDIRECT_BUFFER_PRE_ENB(1);
6138
6139		if (!(ib->flags & AMDGPU_IB_FLAG_CE) && vmid)
6140			gfx_v8_0_ring_emit_de_meta(ring);
6141	}
6142
6143	amdgpu_ring_write(ring, header);
6144	amdgpu_ring_write(ring,
6145#ifdef __BIG_ENDIAN
6146			  (2 << 0) |
6147#endif
6148			  (ib->gpu_addr & 0xFFFFFFFC));
6149	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
6150	amdgpu_ring_write(ring, control);
6151}
6152
6153static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
6154					  struct amdgpu_job *job,
6155					  struct amdgpu_ib *ib,
6156					  uint32_t flags)
6157{
6158	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
6159	u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
6160
6161	/* Currently, there is a high possibility to get wave ID mismatch
6162	 * between ME and GDS, leading to a hw deadlock, because ME generates
6163	 * different wave IDs than the GDS expects. This situation happens
6164	 * randomly when at least 5 compute pipes use GDS ordered append.
6165	 * The wave IDs generated by ME are also wrong after suspend/resume.
6166	 * Those are probably bugs somewhere else in the kernel driver.
6167	 *
6168	 * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
6169	 * GDS to 0 for this ring (me/pipe).
6170	 */
6171	if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
6172		amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
6173		amdgpu_ring_write(ring, mmGDS_COMPUTE_MAX_WAVE_ID - PACKET3_SET_CONFIG_REG_START);
6174		amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
6175	}
6176
6177	amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
6178	amdgpu_ring_write(ring,
6179#ifdef __BIG_ENDIAN
6180				(2 << 0) |
6181#endif
6182				(ib->gpu_addr & 0xFFFFFFFC));
6183	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
6184	amdgpu_ring_write(ring, control);
6185}
6186
6187static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
6188					 u64 seq, unsigned flags)
6189{
6190	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
6191	bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
6192
6193	/* Workaround for cache flush problems. First send a dummy EOP
6194	 * event down the pipe with seq one below.
6195	 */
6196	amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
6197	amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
6198				 EOP_TC_ACTION_EN |
6199				 EOP_TC_WB_ACTION_EN |
6200				 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
6201				 EVENT_INDEX(5)));
6202	amdgpu_ring_write(ring, addr & 0xfffffffc);
6203	amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
6204				DATA_SEL(1) | INT_SEL(0));
6205	amdgpu_ring_write(ring, lower_32_bits(seq - 1));
6206	amdgpu_ring_write(ring, upper_32_bits(seq - 1));
6207
6208	/* Then send the real EOP event down the pipe:
6209	 * EVENT_WRITE_EOP - flush caches, send int */
6210	amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
6211	amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
6212				 EOP_TC_ACTION_EN |
6213				 EOP_TC_WB_ACTION_EN |
6214				 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
6215				 EVENT_INDEX(5)));
6216	amdgpu_ring_write(ring, addr & 0xfffffffc);
6217	amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
6218			  DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
6219	amdgpu_ring_write(ring, lower_32_bits(seq));
6220	amdgpu_ring_write(ring, upper_32_bits(seq));
6221
6222}
6223
6224static void gfx_v8_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
6225{
6226	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
6227	uint32_t seq = ring->fence_drv.sync_seq;
6228	uint64_t addr = ring->fence_drv.gpu_addr;
6229
6230	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
6231	amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
6232				 WAIT_REG_MEM_FUNCTION(3) | /* equal */
6233				 WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */
6234	amdgpu_ring_write(ring, addr & 0xfffffffc);
6235	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
6236	amdgpu_ring_write(ring, seq);
6237	amdgpu_ring_write(ring, 0xffffffff);
6238	amdgpu_ring_write(ring, 4); /* poll interval */
6239}
6240
6241static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
6242					unsigned vmid, uint64_t pd_addr)
6243{
6244	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
6245
6246	amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
6247
6248	/* wait for the invalidate to complete */
6249	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
6250	amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */
6251				 WAIT_REG_MEM_FUNCTION(0) |  /* always */
6252				 WAIT_REG_MEM_ENGINE(0))); /* me */
6253	amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
6254	amdgpu_ring_write(ring, 0);
6255	amdgpu_ring_write(ring, 0); /* ref */
6256	amdgpu_ring_write(ring, 0); /* mask */
6257	amdgpu_ring_write(ring, 0x20); /* poll interval */
6258
6259	/* compute doesn't have PFP */
6260	if (usepfp) {
6261		/* sync PFP to ME, otherwise we might get invalid PFP reads */
6262		amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
6263		amdgpu_ring_write(ring, 0x0);
6264	}
6265}
6266
6267static u64 gfx_v8_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
6268{
6269	return ring->adev->wb.wb[ring->wptr_offs];
6270}
6271
6272static void gfx_v8_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
6273{
6274	struct amdgpu_device *adev = ring->adev;
6275
6276	/* XXX check if swapping is necessary on BE */
6277	adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
6278	WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
6279}
6280
6281static void gfx_v8_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
6282					     u64 addr, u64 seq,
6283					     unsigned flags)
6284{
6285	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
6286	bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
6287
6288	/* RELEASE_MEM - flush caches, send int */
6289	amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 5));
6290	amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
6291				 EOP_TC_ACTION_EN |
6292				 EOP_TC_WB_ACTION_EN |
6293				 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
6294				 EVENT_INDEX(5)));
6295	amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
6296	amdgpu_ring_write(ring, addr & 0xfffffffc);
6297	amdgpu_ring_write(ring, upper_32_bits(addr));
6298	amdgpu_ring_write(ring, lower_32_bits(seq));
6299	amdgpu_ring_write(ring, upper_32_bits(seq));
6300}
6301
6302static void gfx_v8_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
6303					 u64 seq, unsigned int flags)
6304{
6305	/* we only allocate 32bit for each seq wb address */
6306	BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
6307
6308	/* write fence seq to the "addr" */
6309	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
6310	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
6311				 WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
6312	amdgpu_ring_write(ring, lower_32_bits(addr));
6313	amdgpu_ring_write(ring, upper_32_bits(addr));
6314	amdgpu_ring_write(ring, lower_32_bits(seq));
6315
6316	if (flags & AMDGPU_FENCE_FLAG_INT) {
6317		/* set register to trigger INT */
6318		amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
6319		amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
6320					 WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
6321		amdgpu_ring_write(ring, mmCPC_INT_STATUS);
6322		amdgpu_ring_write(ring, 0);
6323		amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
6324	}
6325}
6326
6327static void gfx_v8_ring_emit_sb(struct amdgpu_ring *ring)
6328{
6329	amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
6330	amdgpu_ring_write(ring, 0);
6331}
6332
6333static void gfx_v8_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
6334{
6335	uint32_t dw2 = 0;
6336
6337	if (amdgpu_sriov_vf(ring->adev))
6338		gfx_v8_0_ring_emit_ce_meta(ring);
6339
6340	dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
6341	if (flags & AMDGPU_HAVE_CTX_SWITCH) {
6342		gfx_v8_0_ring_emit_vgt_flush(ring);
6343		/* set load_global_config & load_global_uconfig */
6344		dw2 |= 0x8001;
6345		/* set load_cs_sh_regs */
6346		dw2 |= 0x01000000;
6347		/* set load_per_context_state & load_gfx_sh_regs for GFX */
6348		dw2 |= 0x10002;
6349
6350		/* set load_ce_ram if preamble presented */
6351		if (AMDGPU_PREAMBLE_IB_PRESENT & flags)
6352			dw2 |= 0x10000000;
6353	} else {
6354		/* still load_ce_ram if this is the first time preamble presented
6355		 * although there is no context switch happens.
6356		 */
6357		if (AMDGPU_PREAMBLE_IB_PRESENT_FIRST & flags)
6358			dw2 |= 0x10000000;
6359	}
6360
6361	amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
6362	amdgpu_ring_write(ring, dw2);
6363	amdgpu_ring_write(ring, 0);
6364}
6365
6366static unsigned gfx_v8_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring)
6367{
6368	unsigned ret;
6369
6370	amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
6371	amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
6372	amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
6373	amdgpu_ring_write(ring, 0); /* discard following DWs if *cond_exec_gpu_addr==0 */
6374	ret = ring->wptr & ring->buf_mask;
6375	amdgpu_ring_write(ring, 0x55aa55aa); /* patch dummy value later */
6376	return ret;
6377}
6378
6379static void gfx_v8_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned offset)
6380{
6381	unsigned cur;
6382
6383	BUG_ON(offset > ring->buf_mask);
6384	BUG_ON(ring->ring[offset] != 0x55aa55aa);
6385
6386	cur = (ring->wptr & ring->buf_mask) - 1;
6387	if (likely(cur > offset))
6388		ring->ring[offset] = cur - offset;
6389	else
6390		ring->ring[offset] = (ring->ring_size >> 2) - offset + cur;
6391}
6392
6393static void gfx_v8_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
6394				    uint32_t reg_val_offs)
6395{
6396	struct amdgpu_device *adev = ring->adev;
6397
6398	amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
6399	amdgpu_ring_write(ring, 0 |	/* src: register*/
6400				(5 << 8) |	/* dst: memory */
6401				(1 << 20));	/* write confirm */
6402	amdgpu_ring_write(ring, reg);
6403	amdgpu_ring_write(ring, 0);
6404	amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
6405				reg_val_offs * 4));
6406	amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
6407				reg_val_offs * 4));
6408}
6409
6410static void gfx_v8_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
6411				  uint32_t val)
6412{
6413	uint32_t cmd;
6414
6415	switch (ring->funcs->type) {
6416	case AMDGPU_RING_TYPE_GFX:
6417		cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
6418		break;
6419	case AMDGPU_RING_TYPE_KIQ:
6420		cmd = 1 << 16; /* no inc addr */
6421		break;
6422	default:
6423		cmd = WR_CONFIRM;
6424		break;
6425	}
6426
6427	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
6428	amdgpu_ring_write(ring, cmd);
6429	amdgpu_ring_write(ring, reg);
6430	amdgpu_ring_write(ring, 0);
6431	amdgpu_ring_write(ring, val);
6432}
6433
6434static void gfx_v8_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid)
6435{
6436	struct amdgpu_device *adev = ring->adev;
6437	uint32_t value = 0;
6438
6439	value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
6440	value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
6441	value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
6442	value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
6443	WREG32(mmSQ_CMD, value);
6444}
6445
6446static void gfx_v8_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
6447						 enum amdgpu_interrupt_state state)
6448{
6449	WREG32_FIELD(CP_INT_CNTL_RING0, TIME_STAMP_INT_ENABLE,
6450		     state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
6451}
6452
6453static void gfx_v8_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
6454						     int me, int pipe,
6455						     enum amdgpu_interrupt_state state)
6456{
6457	u32 mec_int_cntl, mec_int_cntl_reg;
6458
6459	/*
6460	 * amdgpu controls only the first MEC. That's why this function only
6461	 * handles the setting of interrupts for this specific MEC. All other
6462	 * pipes' interrupts are set by amdkfd.
6463	 */
6464
6465	if (me == 1) {
6466		switch (pipe) {
6467		case 0:
6468			mec_int_cntl_reg = mmCP_ME1_PIPE0_INT_CNTL;
6469			break;
6470		case 1:
6471			mec_int_cntl_reg = mmCP_ME1_PIPE1_INT_CNTL;
6472			break;
6473		case 2:
6474			mec_int_cntl_reg = mmCP_ME1_PIPE2_INT_CNTL;
6475			break;
6476		case 3:
6477			mec_int_cntl_reg = mmCP_ME1_PIPE3_INT_CNTL;
6478			break;
6479		default:
6480			DRM_DEBUG("invalid pipe %d\n", pipe);
6481			return;
6482		}
6483	} else {
6484		DRM_DEBUG("invalid me %d\n", me);
6485		return;
6486	}
6487
6488	switch (state) {
6489	case AMDGPU_IRQ_STATE_DISABLE:
6490		mec_int_cntl = RREG32(mec_int_cntl_reg);
6491		mec_int_cntl &= ~CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
6492		WREG32(mec_int_cntl_reg, mec_int_cntl);
6493		break;
6494	case AMDGPU_IRQ_STATE_ENABLE:
6495		mec_int_cntl = RREG32(mec_int_cntl_reg);
6496		mec_int_cntl |= CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
6497		WREG32(mec_int_cntl_reg, mec_int_cntl);
6498		break;
6499	default:
6500		break;
6501	}
6502}
6503
6504static int gfx_v8_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
6505					     struct amdgpu_irq_src *source,
6506					     unsigned type,
6507					     enum amdgpu_interrupt_state state)
6508{
6509	WREG32_FIELD(CP_INT_CNTL_RING0, PRIV_REG_INT_ENABLE,
6510		     state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
6511
6512	return 0;
6513}
6514
6515static int gfx_v8_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
6516					      struct amdgpu_irq_src *source,
6517					      unsigned type,
6518					      enum amdgpu_interrupt_state state)
6519{
6520	WREG32_FIELD(CP_INT_CNTL_RING0, PRIV_INSTR_INT_ENABLE,
6521		     state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
6522
6523	return 0;
6524}
6525
6526static int gfx_v8_0_set_eop_interrupt_state(struct amdgpu_device *adev,
6527					    struct amdgpu_irq_src *src,
6528					    unsigned type,
6529					    enum amdgpu_interrupt_state state)
6530{
6531	switch (type) {
6532	case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP:
6533		gfx_v8_0_set_gfx_eop_interrupt_state(adev, state);
6534		break;
6535	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
6536		gfx_v8_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
6537		break;
6538	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
6539		gfx_v8_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
6540		break;
6541	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
6542		gfx_v8_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
6543		break;
6544	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
6545		gfx_v8_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
6546		break;
6547	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
6548		gfx_v8_0_set_compute_eop_interrupt_state(adev, 2, 0, state);
6549		break;
6550	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
6551		gfx_v8_0_set_compute_eop_interrupt_state(adev, 2, 1, state);
6552		break;
6553	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
6554		gfx_v8_0_set_compute_eop_interrupt_state(adev, 2, 2, state);
6555		break;
6556	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
6557		gfx_v8_0_set_compute_eop_interrupt_state(adev, 2, 3, state);
6558		break;
6559	default:
6560		break;
6561	}
6562	return 0;
6563}
6564
6565static int gfx_v8_0_set_cp_ecc_int_state(struct amdgpu_device *adev,
6566					 struct amdgpu_irq_src *source,
6567					 unsigned int type,
6568					 enum amdgpu_interrupt_state state)
6569{
6570	int enable_flag;
6571
6572	switch (state) {
6573	case AMDGPU_IRQ_STATE_DISABLE:
6574		enable_flag = 0;
6575		break;
6576
6577	case AMDGPU_IRQ_STATE_ENABLE:
6578		enable_flag = 1;
6579		break;
6580
6581	default:
6582		return -EINVAL;
6583	}
6584
6585	WREG32_FIELD(CP_INT_CNTL, CP_ECC_ERROR_INT_ENABLE, enable_flag);
6586	WREG32_FIELD(CP_INT_CNTL_RING0, CP_ECC_ERROR_INT_ENABLE, enable_flag);
6587	WREG32_FIELD(CP_INT_CNTL_RING1, CP_ECC_ERROR_INT_ENABLE, enable_flag);
6588	WREG32_FIELD(CP_INT_CNTL_RING2, CP_ECC_ERROR_INT_ENABLE, enable_flag);
6589	WREG32_FIELD(CPC_INT_CNTL, CP_ECC_ERROR_INT_ENABLE, enable_flag);
6590	WREG32_FIELD(CP_ME1_PIPE0_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
6591		     enable_flag);
6592	WREG32_FIELD(CP_ME1_PIPE1_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
6593		     enable_flag);
6594	WREG32_FIELD(CP_ME1_PIPE2_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
6595		     enable_flag);
6596	WREG32_FIELD(CP_ME1_PIPE3_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
6597		     enable_flag);
6598	WREG32_FIELD(CP_ME2_PIPE0_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
6599		     enable_flag);
6600	WREG32_FIELD(CP_ME2_PIPE1_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
6601		     enable_flag);
6602	WREG32_FIELD(CP_ME2_PIPE2_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
6603		     enable_flag);
6604	WREG32_FIELD(CP_ME2_PIPE3_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
6605		     enable_flag);
6606
6607	return 0;
6608}
6609
6610static int gfx_v8_0_set_sq_int_state(struct amdgpu_device *adev,
6611				     struct amdgpu_irq_src *source,
6612				     unsigned int type,
6613				     enum amdgpu_interrupt_state state)
6614{
6615	int enable_flag;
6616
6617	switch (state) {
6618	case AMDGPU_IRQ_STATE_DISABLE:
6619		enable_flag = 1;
6620		break;
6621
6622	case AMDGPU_IRQ_STATE_ENABLE:
6623		enable_flag = 0;
6624		break;
6625
6626	default:
6627		return -EINVAL;
6628	}
6629
6630	WREG32_FIELD(SQ_INTERRUPT_MSG_CTRL, STALL,
6631		     enable_flag);
6632
6633	return 0;
6634}
6635
6636static int gfx_v8_0_eop_irq(struct amdgpu_device *adev,
6637			    struct amdgpu_irq_src *source,
6638			    struct amdgpu_iv_entry *entry)
6639{
6640	int i;
6641	u8 me_id, pipe_id, queue_id;
6642	struct amdgpu_ring *ring;
6643
6644	DRM_DEBUG("IH: CP EOP\n");
6645	me_id = (entry->ring_id & 0x0c) >> 2;
6646	pipe_id = (entry->ring_id & 0x03) >> 0;
6647	queue_id = (entry->ring_id & 0x70) >> 4;
6648
6649	switch (me_id) {
6650	case 0:
6651		amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
6652		break;
6653	case 1:
6654	case 2:
6655		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
6656			ring = &adev->gfx.compute_ring[i];
6657			/* Per-queue interrupt is supported for MEC starting from VI.
6658			  * The interrupt can only be enabled/disabled per pipe instead of per queue.
6659			  */
6660			if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
6661				amdgpu_fence_process(ring);
6662		}
6663		break;
6664	}
6665	return 0;
6666}
6667
6668static void gfx_v8_0_fault(struct amdgpu_device *adev,
6669			   struct amdgpu_iv_entry *entry)
6670{
6671	u8 me_id, pipe_id, queue_id;
6672	struct amdgpu_ring *ring;
6673	int i;
6674
6675	me_id = (entry->ring_id & 0x0c) >> 2;
6676	pipe_id = (entry->ring_id & 0x03) >> 0;
6677	queue_id = (entry->ring_id & 0x70) >> 4;
6678
6679	switch (me_id) {
6680	case 0:
6681		drm_sched_fault(&adev->gfx.gfx_ring[0].sched);
6682		break;
6683	case 1:
6684	case 2:
6685		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
6686			ring = &adev->gfx.compute_ring[i];
6687			if (ring->me == me_id && ring->pipe == pipe_id &&
6688			    ring->queue == queue_id)
6689				drm_sched_fault(&ring->sched);
6690		}
6691		break;
6692	}
6693}
6694
6695static int gfx_v8_0_priv_reg_irq(struct amdgpu_device *adev,
6696				 struct amdgpu_irq_src *source,
6697				 struct amdgpu_iv_entry *entry)
6698{
6699	DRM_ERROR("Illegal register access in command stream\n");
6700	gfx_v8_0_fault(adev, entry);
6701	return 0;
6702}
6703
6704static int gfx_v8_0_priv_inst_irq(struct amdgpu_device *adev,
6705				  struct amdgpu_irq_src *source,
6706				  struct amdgpu_iv_entry *entry)
6707{
6708	DRM_ERROR("Illegal instruction in command stream\n");
6709	gfx_v8_0_fault(adev, entry);
6710	return 0;
6711}
6712
6713static int gfx_v8_0_cp_ecc_error_irq(struct amdgpu_device *adev,
6714				     struct amdgpu_irq_src *source,
6715				     struct amdgpu_iv_entry *entry)
6716{
6717	DRM_ERROR("CP EDC/ECC error detected.");
6718	return 0;
6719}
6720
6721static void gfx_v8_0_parse_sq_irq(struct amdgpu_device *adev, unsigned ih_data,
6722				  bool from_wq)
6723{
6724	u32 enc, se_id, sh_id, cu_id;
6725	char type[20];
6726	int sq_edc_source = -1;
6727
6728	enc = REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_CMN, ENCODING);
6729	se_id = REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_CMN, SE_ID);
6730
6731	switch (enc) {
6732		case 0:
6733			DRM_INFO("SQ general purpose intr detected:"
6734					"se_id %d, immed_overflow %d, host_reg_overflow %d,"
6735					"host_cmd_overflow %d, cmd_timestamp %d,"
6736					"reg_timestamp %d, thread_trace_buff_full %d,"
6737					"wlt %d, thread_trace %d.\n",
6738					se_id,
6739					REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, IMMED_OVERFLOW),
6740					REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, HOST_REG_OVERFLOW),
6741					REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, HOST_CMD_OVERFLOW),
6742					REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, CMD_TIMESTAMP),
6743					REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, REG_TIMESTAMP),
6744					REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, THREAD_TRACE_BUF_FULL),
6745					REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, WLT),
6746					REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, THREAD_TRACE)
6747					);
6748			break;
6749		case 1:
6750		case 2:
6751
6752			cu_id = REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, CU_ID);
6753			sh_id = REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, SH_ID);
6754
6755			/*
6756			 * This function can be called either directly from ISR
6757			 * or from BH in which case we can access SQ_EDC_INFO
6758			 * instance
6759			 */
6760			if (from_wq) {
6761				mutex_lock(&adev->grbm_idx_mutex);
6762				gfx_v8_0_select_se_sh(adev, se_id, sh_id, cu_id);
6763
6764				sq_edc_source = REG_GET_FIELD(RREG32(mmSQ_EDC_INFO), SQ_EDC_INFO, SOURCE);
6765
6766				gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
6767				mutex_unlock(&adev->grbm_idx_mutex);
6768			}
6769
6770			if (enc == 1)
6771				sprintf(type, "instruction intr");
6772			else
6773				sprintf(type, "EDC/ECC error");
6774
6775			DRM_INFO(
6776				"SQ %s detected: "
6777					"se_id %d, sh_id %d, cu_id %d, simd_id %d, wave_id %d, vm_id %d "
6778					"trap %s, sq_ed_info.source %s.\n",
6779					type, se_id, sh_id, cu_id,
6780					REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, SIMD_ID),
6781					REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, WAVE_ID),
6782					REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, VM_ID),
6783					REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, PRIV) ? "true" : "false",
6784					(sq_edc_source != -1) ? sq_edc_source_names[sq_edc_source] : "unavailable"
6785				);
6786			break;
6787		default:
6788			DRM_ERROR("SQ invalid encoding type\n.");
6789	}
6790}
6791
6792static void gfx_v8_0_sq_irq_work_func(struct work_struct *work)
6793{
6794
6795	struct amdgpu_device *adev = container_of(work, struct amdgpu_device, gfx.sq_work.work);
6796	struct sq_work *sq_work = container_of(work, struct sq_work, work);
6797
6798	gfx_v8_0_parse_sq_irq(adev, sq_work->ih_data, true);
6799}
6800
6801static int gfx_v8_0_sq_irq(struct amdgpu_device *adev,
6802			   struct amdgpu_irq_src *source,
6803			   struct amdgpu_iv_entry *entry)
6804{
6805	unsigned ih_data = entry->src_data[0];
6806
6807	/*
6808	 * Try to submit work so SQ_EDC_INFO can be accessed from
6809	 * BH. If previous work submission hasn't finished yet
6810	 * just print whatever info is possible directly from the ISR.
6811	 */
6812	if (work_pending(&adev->gfx.sq_work.work)) {
6813		gfx_v8_0_parse_sq_irq(adev, ih_data, false);
6814	} else {
6815		adev->gfx.sq_work.ih_data = ih_data;
6816		schedule_work(&adev->gfx.sq_work.work);
6817	}
6818
6819	return 0;
6820}
6821
6822static void gfx_v8_0_emit_mem_sync(struct amdgpu_ring *ring)
6823{
6824	amdgpu_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
6825	amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
6826			  PACKET3_TC_ACTION_ENA |
6827			  PACKET3_SH_KCACHE_ACTION_ENA |
6828			  PACKET3_SH_ICACHE_ACTION_ENA |
6829			  PACKET3_TC_WB_ACTION_ENA);  /* CP_COHER_CNTL */
6830	amdgpu_ring_write(ring, 0xffffffff);  /* CP_COHER_SIZE */
6831	amdgpu_ring_write(ring, 0);  /* CP_COHER_BASE */
6832	amdgpu_ring_write(ring, 0x0000000A); /* poll interval */
6833}
6834
6835static void gfx_v8_0_emit_mem_sync_compute(struct amdgpu_ring *ring)
6836{
6837	amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5));
6838	amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
6839			  PACKET3_TC_ACTION_ENA |
6840			  PACKET3_SH_KCACHE_ACTION_ENA |
6841			  PACKET3_SH_ICACHE_ACTION_ENA |
6842			  PACKET3_TC_WB_ACTION_ENA);  /* CP_COHER_CNTL */
6843	amdgpu_ring_write(ring, 0xffffffff);	/* CP_COHER_SIZE */
6844	amdgpu_ring_write(ring, 0xff);		/* CP_COHER_SIZE_HI */
6845	amdgpu_ring_write(ring, 0);		/* CP_COHER_BASE */
6846	amdgpu_ring_write(ring, 0);		/* CP_COHER_BASE_HI */
6847	amdgpu_ring_write(ring, 0x0000000A);	/* poll interval */
6848}
6849
6850
6851/* mmSPI_WCL_PIPE_PERCENT_CS[0-7]_DEFAULT values are same */
6852#define mmSPI_WCL_PIPE_PERCENT_CS_DEFAULT	0x0000007f
6853static void gfx_v8_0_emit_wave_limit_cs(struct amdgpu_ring *ring,
6854					uint32_t pipe, bool enable)
6855{
6856	uint32_t val;
6857	uint32_t wcl_cs_reg;
6858
6859	val = enable ? 0x1 : mmSPI_WCL_PIPE_PERCENT_CS_DEFAULT;
6860
6861	switch (pipe) {
6862	case 0:
6863		wcl_cs_reg = mmSPI_WCL_PIPE_PERCENT_CS0;
6864		break;
6865	case 1:
6866		wcl_cs_reg = mmSPI_WCL_PIPE_PERCENT_CS1;
6867		break;
6868	case 2:
6869		wcl_cs_reg = mmSPI_WCL_PIPE_PERCENT_CS2;
6870		break;
6871	case 3:
6872		wcl_cs_reg = mmSPI_WCL_PIPE_PERCENT_CS3;
6873		break;
6874	default:
6875		DRM_DEBUG("invalid pipe %d\n", pipe);
6876		return;
6877	}
6878
6879	amdgpu_ring_emit_wreg(ring, wcl_cs_reg, val);
6880
6881}
6882
6883#define mmSPI_WCL_PIPE_PERCENT_GFX_DEFAULT	0x07ffffff
6884static void gfx_v8_0_emit_wave_limit(struct amdgpu_ring *ring, bool enable)
6885{
6886	struct amdgpu_device *adev = ring->adev;
6887	uint32_t val;
6888	int i;
6889
6890	/* mmSPI_WCL_PIPE_PERCENT_GFX is 7 bit multiplier register to limit
6891	 * number of gfx waves. Setting 5 bit will make sure gfx only gets
6892	 * around 25% of gpu resources.
6893	 */
6894	val = enable ? 0x1f : mmSPI_WCL_PIPE_PERCENT_GFX_DEFAULT;
6895	amdgpu_ring_emit_wreg(ring, mmSPI_WCL_PIPE_PERCENT_GFX, val);
6896
6897	/* Restrict waves for normal/low priority compute queues as well
6898	 * to get best QoS for high priority compute jobs.
6899	 *
6900	 * amdgpu controls only 1st ME(0-3 CS pipes).
6901	 */
6902	for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
6903		if (i != ring->pipe)
6904			gfx_v8_0_emit_wave_limit_cs(ring, i, enable);
6905
6906	}
6907
6908}
6909
6910static const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
6911	.name = "gfx_v8_0",
6912	.early_init = gfx_v8_0_early_init,
6913	.late_init = gfx_v8_0_late_init,
6914	.sw_init = gfx_v8_0_sw_init,
6915	.sw_fini = gfx_v8_0_sw_fini,
6916	.hw_init = gfx_v8_0_hw_init,
6917	.hw_fini = gfx_v8_0_hw_fini,
6918	.suspend = gfx_v8_0_suspend,
6919	.resume = gfx_v8_0_resume,
6920	.is_idle = gfx_v8_0_is_idle,
6921	.wait_for_idle = gfx_v8_0_wait_for_idle,
6922	.check_soft_reset = gfx_v8_0_check_soft_reset,
6923	.pre_soft_reset = gfx_v8_0_pre_soft_reset,
6924	.soft_reset = gfx_v8_0_soft_reset,
6925	.post_soft_reset = gfx_v8_0_post_soft_reset,
6926	.set_clockgating_state = gfx_v8_0_set_clockgating_state,
6927	.set_powergating_state = gfx_v8_0_set_powergating_state,
6928	.get_clockgating_state = gfx_v8_0_get_clockgating_state,
6929};
6930
6931static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
6932	.type = AMDGPU_RING_TYPE_GFX,
6933	.align_mask = 0xff,
6934	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
6935	.support_64bit_ptrs = false,
6936	.get_rptr = gfx_v8_0_ring_get_rptr,
6937	.get_wptr = gfx_v8_0_ring_get_wptr_gfx,
6938	.set_wptr = gfx_v8_0_ring_set_wptr_gfx,
6939	.emit_frame_size = /* maximum 215dw if count 16 IBs in */
6940		5 +  /* COND_EXEC */
6941		7 +  /* PIPELINE_SYNC */
6942		VI_FLUSH_GPU_TLB_NUM_WREG * 5 + 9 + /* VM_FLUSH */
6943		12 +  /* FENCE for VM_FLUSH */
6944		20 + /* GDS switch */
6945		4 + /* double SWITCH_BUFFER,
6946		       the first COND_EXEC jump to the place just
6947			   prior to this double SWITCH_BUFFER  */
6948		5 + /* COND_EXEC */
6949		7 +	 /*	HDP_flush */
6950		4 +	 /*	VGT_flush */
6951		14 + /*	CE_META */
6952		31 + /*	DE_META */
6953		3 + /* CNTX_CTRL */
6954		5 + /* HDP_INVL */
6955		12 + 12 + /* FENCE x2 */
6956		2 + /* SWITCH_BUFFER */
6957		5, /* SURFACE_SYNC */
6958	.emit_ib_size =	4, /* gfx_v8_0_ring_emit_ib_gfx */
6959	.emit_ib = gfx_v8_0_ring_emit_ib_gfx,
6960	.emit_fence = gfx_v8_0_ring_emit_fence_gfx,
6961	.emit_pipeline_sync = gfx_v8_0_ring_emit_pipeline_sync,
6962	.emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
6963	.emit_gds_switch = gfx_v8_0_ring_emit_gds_switch,
6964	.emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush,
6965	.test_ring = gfx_v8_0_ring_test_ring,
6966	.test_ib = gfx_v8_0_ring_test_ib,
6967	.insert_nop = amdgpu_ring_insert_nop,
6968	.pad_ib = amdgpu_ring_generic_pad_ib,
6969	.emit_switch_buffer = gfx_v8_ring_emit_sb,
6970	.emit_cntxcntl = gfx_v8_ring_emit_cntxcntl,
6971	.init_cond_exec = gfx_v8_0_ring_emit_init_cond_exec,
6972	.patch_cond_exec = gfx_v8_0_ring_emit_patch_cond_exec,
6973	.emit_wreg = gfx_v8_0_ring_emit_wreg,
6974	.soft_recovery = gfx_v8_0_ring_soft_recovery,
6975	.emit_mem_sync = gfx_v8_0_emit_mem_sync,
6976};
6977
6978static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
6979	.type = AMDGPU_RING_TYPE_COMPUTE,
6980	.align_mask = 0xff,
6981	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
6982	.support_64bit_ptrs = false,
6983	.get_rptr = gfx_v8_0_ring_get_rptr,
6984	.get_wptr = gfx_v8_0_ring_get_wptr_compute,
6985	.set_wptr = gfx_v8_0_ring_set_wptr_compute,
6986	.emit_frame_size =
6987		20 + /* gfx_v8_0_ring_emit_gds_switch */
6988		7 + /* gfx_v8_0_ring_emit_hdp_flush */
6989		5 + /* hdp_invalidate */
6990		7 + /* gfx_v8_0_ring_emit_pipeline_sync */
6991		VI_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + /* gfx_v8_0_ring_emit_vm_flush */
6992		7 + 7 + 7 + /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */
6993		7 + /* gfx_v8_0_emit_mem_sync_compute */
6994		5 + /* gfx_v8_0_emit_wave_limit for updating mmSPI_WCL_PIPE_PERCENT_GFX register */
6995		15, /* for updating 3 mmSPI_WCL_PIPE_PERCENT_CS registers */
6996	.emit_ib_size =	7, /* gfx_v8_0_ring_emit_ib_compute */
6997	.emit_ib = gfx_v8_0_ring_emit_ib_compute,
6998	.emit_fence = gfx_v8_0_ring_emit_fence_compute,
6999	.emit_pipeline_sync = gfx_v8_0_ring_emit_pipeline_sync,
7000	.emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
7001	.emit_gds_switch = gfx_v8_0_ring_emit_gds_switch,
7002	.emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush,
7003	.test_ring = gfx_v8_0_ring_test_ring,
7004	.test_ib = gfx_v8_0_ring_test_ib,
7005	.insert_nop = amdgpu_ring_insert_nop,
7006	.pad_ib = amdgpu_ring_generic_pad_ib,
7007	.emit_wreg = gfx_v8_0_ring_emit_wreg,
7008	.emit_mem_sync = gfx_v8_0_emit_mem_sync_compute,
7009	.emit_wave_limit = gfx_v8_0_emit_wave_limit,
7010};
7011
7012static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_kiq = {
7013	.type = AMDGPU_RING_TYPE_KIQ,
7014	.align_mask = 0xff,
7015	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
7016	.support_64bit_ptrs = false,
7017	.get_rptr = gfx_v8_0_ring_get_rptr,
7018	.get_wptr = gfx_v8_0_ring_get_wptr_compute,
7019	.set_wptr = gfx_v8_0_ring_set_wptr_compute,
7020	.emit_frame_size =
7021		20 + /* gfx_v8_0_ring_emit_gds_switch */
7022		7 + /* gfx_v8_0_ring_emit_hdp_flush */
7023		5 + /* hdp_invalidate */
7024		7 + /* gfx_v8_0_ring_emit_pipeline_sync */
7025		17 + /* gfx_v8_0_ring_emit_vm_flush */
7026		7 + 7 + 7, /* gfx_v8_0_ring_emit_fence_kiq x3 for user fence, vm fence */
7027	.emit_ib_size =	7, /* gfx_v8_0_ring_emit_ib_compute */
7028	.emit_fence = gfx_v8_0_ring_emit_fence_kiq,
7029	.test_ring = gfx_v8_0_ring_test_ring,
7030	.insert_nop = amdgpu_ring_insert_nop,
7031	.pad_ib = amdgpu_ring_generic_pad_ib,
7032	.emit_rreg = gfx_v8_0_ring_emit_rreg,
7033	.emit_wreg = gfx_v8_0_ring_emit_wreg,
7034};
7035
7036static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev)
7037{
7038	int i;
7039
7040	adev->gfx.kiq.ring.funcs = &gfx_v8_0_ring_funcs_kiq;
7041
7042	for (i = 0; i < adev->gfx.num_gfx_rings; i++)
7043		adev->gfx.gfx_ring[i].funcs = &gfx_v8_0_ring_funcs_gfx;
7044
7045	for (i = 0; i < adev->gfx.num_compute_rings; i++)
7046		adev->gfx.compute_ring[i].funcs = &gfx_v8_0_ring_funcs_compute;
7047}
7048
7049static const struct amdgpu_irq_src_funcs gfx_v8_0_eop_irq_funcs = {
7050	.set = gfx_v8_0_set_eop_interrupt_state,
7051	.process = gfx_v8_0_eop_irq,
7052};
7053
7054static const struct amdgpu_irq_src_funcs gfx_v8_0_priv_reg_irq_funcs = {
7055	.set = gfx_v8_0_set_priv_reg_fault_state,
7056	.process = gfx_v8_0_priv_reg_irq,
7057};
7058
7059static const struct amdgpu_irq_src_funcs gfx_v8_0_priv_inst_irq_funcs = {
7060	.set = gfx_v8_0_set_priv_inst_fault_state,
7061	.process = gfx_v8_0_priv_inst_irq,
7062};
7063
7064static const struct amdgpu_irq_src_funcs gfx_v8_0_cp_ecc_error_irq_funcs = {
7065	.set = gfx_v8_0_set_cp_ecc_int_state,
7066	.process = gfx_v8_0_cp_ecc_error_irq,
7067};
7068
7069static const struct amdgpu_irq_src_funcs gfx_v8_0_sq_irq_funcs = {
7070	.set = gfx_v8_0_set_sq_int_state,
7071	.process = gfx_v8_0_sq_irq,
7072};
7073
7074static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev)
7075{
7076	adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
7077	adev->gfx.eop_irq.funcs = &gfx_v8_0_eop_irq_funcs;
7078
7079	adev->gfx.priv_reg_irq.num_types = 1;
7080	adev->gfx.priv_reg_irq.funcs = &gfx_v8_0_priv_reg_irq_funcs;
7081
7082	adev->gfx.priv_inst_irq.num_types = 1;
7083	adev->gfx.priv_inst_irq.funcs = &gfx_v8_0_priv_inst_irq_funcs;
7084
7085	adev->gfx.cp_ecc_error_irq.num_types = 1;
7086	adev->gfx.cp_ecc_error_irq.funcs = &gfx_v8_0_cp_ecc_error_irq_funcs;
7087
7088	adev->gfx.sq_irq.num_types = 1;
7089	adev->gfx.sq_irq.funcs = &gfx_v8_0_sq_irq_funcs;
7090}
7091
7092static void gfx_v8_0_set_rlc_funcs(struct amdgpu_device *adev)
7093{
7094	adev->gfx.rlc.funcs = &iceland_rlc_funcs;
7095}
7096
7097static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev)
7098{
7099	/* init asci gds info */
7100	adev->gds.gds_size = RREG32(mmGDS_VMID0_SIZE);
7101	adev->gds.gws_size = 64;
7102	adev->gds.oa_size = 16;
7103	adev->gds.gds_compute_max_wave_id = RREG32(mmGDS_COMPUTE_MAX_WAVE_ID);
7104}
7105
7106static void gfx_v8_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
7107						 u32 bitmap)
7108{
7109	u32 data;
7110
7111	if (!bitmap)
7112		return;
7113
7114	data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
7115	data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
7116
7117	WREG32(mmGC_USER_SHADER_ARRAY_CONFIG, data);
7118}
7119
7120static u32 gfx_v8_0_get_cu_active_bitmap(struct amdgpu_device *adev)
7121{
7122	u32 data, mask;
7123
7124	data =  RREG32(mmCC_GC_SHADER_ARRAY_CONFIG) |
7125		RREG32(mmGC_USER_SHADER_ARRAY_CONFIG);
7126
7127	mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh);
7128
7129	return ~REG_GET_FIELD(data, CC_GC_SHADER_ARRAY_CONFIG, INACTIVE_CUS) & mask;
7130}
7131
7132static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev)
7133{
7134	int i, j, k, counter, active_cu_number = 0;
7135	u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
7136	struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
7137	unsigned disable_masks[4 * 2];
7138	u32 ao_cu_num;
7139
7140	memset(cu_info, 0, sizeof(*cu_info));
7141
7142	if (adev->flags & AMD_IS_APU)
7143		ao_cu_num = 2;
7144	else
7145		ao_cu_num = adev->gfx.config.max_cu_per_sh;
7146
7147	amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2);
7148
7149	mutex_lock(&adev->grbm_idx_mutex);
7150	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
7151		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
7152			mask = 1;
7153			ao_bitmap = 0;
7154			counter = 0;
7155			gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff);
7156			if (i < 4 && j < 2)
7157				gfx_v8_0_set_user_cu_inactive_bitmap(
7158					adev, disable_masks[i * 2 + j]);
7159			bitmap = gfx_v8_0_get_cu_active_bitmap(adev);
7160			cu_info->bitmap[i][j] = bitmap;
7161
7162			for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
7163				if (bitmap & mask) {
7164					if (counter < ao_cu_num)
7165						ao_bitmap |= mask;
7166					counter ++;
7167				}
7168				mask <<= 1;
7169			}
7170			active_cu_number += counter;
7171			if (i < 2 && j < 2)
7172				ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
7173			cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
7174		}
7175	}
7176	gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
7177	mutex_unlock(&adev->grbm_idx_mutex);
7178
7179	cu_info->number = active_cu_number;
7180	cu_info->ao_cu_mask = ao_cu_mask;
7181	cu_info->simd_per_cu = NUM_SIMD_PER_CU;
7182	cu_info->max_waves_per_simd = 10;
7183	cu_info->max_scratch_slots_per_cu = 32;
7184	cu_info->wave_front_size = 64;
7185	cu_info->lds_size = 64;
7186}
7187
7188const struct amdgpu_ip_block_version gfx_v8_0_ip_block =
7189{
7190	.type = AMD_IP_BLOCK_TYPE_GFX,
7191	.major = 8,
7192	.minor = 0,
7193	.rev = 0,
7194	.funcs = &gfx_v8_0_ip_funcs,
7195};
7196
7197const struct amdgpu_ip_block_version gfx_v8_1_ip_block =
7198{
7199	.type = AMD_IP_BLOCK_TYPE_GFX,
7200	.major = 8,
7201	.minor = 1,
7202	.rev = 0,
7203	.funcs = &gfx_v8_0_ip_funcs,
7204};
7205
7206static void gfx_v8_0_ring_emit_ce_meta(struct amdgpu_ring *ring)
7207{
7208	uint64_t ce_payload_addr;
7209	int cnt_ce;
7210	union {
7211		struct vi_ce_ib_state regular;
7212		struct vi_ce_ib_state_chained_ib chained;
7213	} ce_payload = {};
7214
7215	if (ring->adev->virt.chained_ib_support) {
7216		ce_payload_addr = amdgpu_csa_vaddr(ring->adev) +
7217			offsetof(struct vi_gfx_meta_data_chained_ib, ce_payload);
7218		cnt_ce = (sizeof(ce_payload.chained) >> 2) + 4 - 2;
7219	} else {
7220		ce_payload_addr = amdgpu_csa_vaddr(ring->adev) +
7221			offsetof(struct vi_gfx_meta_data, ce_payload);
7222		cnt_ce = (sizeof(ce_payload.regular) >> 2) + 4 - 2;
7223	}
7224
7225	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt_ce));
7226	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
7227				WRITE_DATA_DST_SEL(8) |
7228				WR_CONFIRM) |
7229				WRITE_DATA_CACHE_POLICY(0));
7230	amdgpu_ring_write(ring, lower_32_bits(ce_payload_addr));
7231	amdgpu_ring_write(ring, upper_32_bits(ce_payload_addr));
7232	amdgpu_ring_write_multiple(ring, (void *)&ce_payload, cnt_ce - 2);
7233}
7234
7235static void gfx_v8_0_ring_emit_de_meta(struct amdgpu_ring *ring)
7236{
7237	uint64_t de_payload_addr, gds_addr, csa_addr;
7238	int cnt_de;
7239	union {
7240		struct vi_de_ib_state regular;
7241		struct vi_de_ib_state_chained_ib chained;
7242	} de_payload = {};
7243
7244	csa_addr = amdgpu_csa_vaddr(ring->adev);
7245	gds_addr = csa_addr + 4096;
7246	if (ring->adev->virt.chained_ib_support) {
7247		de_payload.chained.gds_backup_addrlo = lower_32_bits(gds_addr);
7248		de_payload.chained.gds_backup_addrhi = upper_32_bits(gds_addr);
7249		de_payload_addr = csa_addr + offsetof(struct vi_gfx_meta_data_chained_ib, de_payload);
7250		cnt_de = (sizeof(de_payload.chained) >> 2) + 4 - 2;
7251	} else {
7252		de_payload.regular.gds_backup_addrlo = lower_32_bits(gds_addr);
7253		de_payload.regular.gds_backup_addrhi = upper_32_bits(gds_addr);
7254		de_payload_addr = csa_addr + offsetof(struct vi_gfx_meta_data, de_payload);
7255		cnt_de = (sizeof(de_payload.regular) >> 2) + 4 - 2;
7256	}
7257
7258	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt_de));
7259	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
7260				WRITE_DATA_DST_SEL(8) |
7261				WR_CONFIRM) |
7262				WRITE_DATA_CACHE_POLICY(0));
7263	amdgpu_ring_write(ring, lower_32_bits(de_payload_addr));
7264	amdgpu_ring_write(ring, upper_32_bits(de_payload_addr));
7265	amdgpu_ring_write_multiple(ring, (void *)&de_payload, cnt_de - 2);
7266}
v5.9
   1/*
   2 * Copyright 2014 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23
  24#include <linux/delay.h>
  25#include <linux/kernel.h>
  26#include <linux/firmware.h>
  27#include <linux/module.h>
  28#include <linux/pci.h>
  29
  30#include "amdgpu.h"
  31#include "amdgpu_gfx.h"
 
  32#include "vi.h"
  33#include "vi_structs.h"
  34#include "vid.h"
  35#include "amdgpu_ucode.h"
  36#include "amdgpu_atombios.h"
  37#include "atombios_i2c.h"
  38#include "clearstate_vi.h"
  39
  40#include "gmc/gmc_8_2_d.h"
  41#include "gmc/gmc_8_2_sh_mask.h"
  42
  43#include "oss/oss_3_0_d.h"
  44#include "oss/oss_3_0_sh_mask.h"
  45
  46#include "bif/bif_5_0_d.h"
  47#include "bif/bif_5_0_sh_mask.h"
  48#include "gca/gfx_8_0_d.h"
  49#include "gca/gfx_8_0_enum.h"
  50#include "gca/gfx_8_0_sh_mask.h"
  51
  52#include "dce/dce_10_0_d.h"
  53#include "dce/dce_10_0_sh_mask.h"
  54
  55#include "smu/smu_7_1_3_d.h"
  56
  57#include "ivsrcid/ivsrcid_vislands30.h"
  58
  59#define GFX8_NUM_GFX_RINGS     1
  60#define GFX8_MEC_HPD_SIZE 4096
  61
  62#define TOPAZ_GB_ADDR_CONFIG_GOLDEN 0x22010001
  63#define CARRIZO_GB_ADDR_CONFIG_GOLDEN 0x22010001
  64#define POLARIS11_GB_ADDR_CONFIG_GOLDEN 0x22011002
  65#define TONGA_GB_ADDR_CONFIG_GOLDEN 0x22011003
  66
  67#define ARRAY_MODE(x)					((x) << GB_TILE_MODE0__ARRAY_MODE__SHIFT)
  68#define PIPE_CONFIG(x)					((x) << GB_TILE_MODE0__PIPE_CONFIG__SHIFT)
  69#define TILE_SPLIT(x)					((x) << GB_TILE_MODE0__TILE_SPLIT__SHIFT)
  70#define MICRO_TILE_MODE_NEW(x)				((x) << GB_TILE_MODE0__MICRO_TILE_MODE_NEW__SHIFT)
  71#define SAMPLE_SPLIT(x)					((x) << GB_TILE_MODE0__SAMPLE_SPLIT__SHIFT)
  72#define BANK_WIDTH(x)					((x) << GB_MACROTILE_MODE0__BANK_WIDTH__SHIFT)
  73#define BANK_HEIGHT(x)					((x) << GB_MACROTILE_MODE0__BANK_HEIGHT__SHIFT)
  74#define MACRO_TILE_ASPECT(x)				((x) << GB_MACROTILE_MODE0__MACRO_TILE_ASPECT__SHIFT)
  75#define NUM_BANKS(x)					((x) << GB_MACROTILE_MODE0__NUM_BANKS__SHIFT)
  76
  77#define RLC_CGTT_MGCG_OVERRIDE__CPF_MASK            0x00000001L
  78#define RLC_CGTT_MGCG_OVERRIDE__RLC_MASK            0x00000002L
  79#define RLC_CGTT_MGCG_OVERRIDE__MGCG_MASK           0x00000004L
  80#define RLC_CGTT_MGCG_OVERRIDE__CGCG_MASK           0x00000008L
  81#define RLC_CGTT_MGCG_OVERRIDE__CGLS_MASK           0x00000010L
  82#define RLC_CGTT_MGCG_OVERRIDE__GRBM_MASK           0x00000020L
  83
  84/* BPM SERDES CMD */
  85#define SET_BPM_SERDES_CMD    1
  86#define CLE_BPM_SERDES_CMD    0
  87
  88/* BPM Register Address*/
  89enum {
  90	BPM_REG_CGLS_EN = 0,        /* Enable/Disable CGLS */
  91	BPM_REG_CGLS_ON,            /* ON/OFF CGLS: shall be controlled by RLC FW */
  92	BPM_REG_CGCG_OVERRIDE,      /* Set/Clear CGCG Override */
  93	BPM_REG_MGCG_OVERRIDE,      /* Set/Clear MGCG Override */
  94	BPM_REG_FGCG_OVERRIDE,      /* Set/Clear FGCG Override */
  95	BPM_REG_FGCG_MAX
  96};
  97
  98#define RLC_FormatDirectRegListLength        14
  99
 100MODULE_FIRMWARE("amdgpu/carrizo_ce.bin");
 101MODULE_FIRMWARE("amdgpu/carrizo_pfp.bin");
 102MODULE_FIRMWARE("amdgpu/carrizo_me.bin");
 103MODULE_FIRMWARE("amdgpu/carrizo_mec.bin");
 104MODULE_FIRMWARE("amdgpu/carrizo_mec2.bin");
 105MODULE_FIRMWARE("amdgpu/carrizo_rlc.bin");
 106
 107MODULE_FIRMWARE("amdgpu/stoney_ce.bin");
 108MODULE_FIRMWARE("amdgpu/stoney_pfp.bin");
 109MODULE_FIRMWARE("amdgpu/stoney_me.bin");
 110MODULE_FIRMWARE("amdgpu/stoney_mec.bin");
 111MODULE_FIRMWARE("amdgpu/stoney_rlc.bin");
 112
 113MODULE_FIRMWARE("amdgpu/tonga_ce.bin");
 114MODULE_FIRMWARE("amdgpu/tonga_pfp.bin");
 115MODULE_FIRMWARE("amdgpu/tonga_me.bin");
 116MODULE_FIRMWARE("amdgpu/tonga_mec.bin");
 117MODULE_FIRMWARE("amdgpu/tonga_mec2.bin");
 118MODULE_FIRMWARE("amdgpu/tonga_rlc.bin");
 119
 120MODULE_FIRMWARE("amdgpu/topaz_ce.bin");
 121MODULE_FIRMWARE("amdgpu/topaz_pfp.bin");
 122MODULE_FIRMWARE("amdgpu/topaz_me.bin");
 123MODULE_FIRMWARE("amdgpu/topaz_mec.bin");
 124MODULE_FIRMWARE("amdgpu/topaz_rlc.bin");
 125
 126MODULE_FIRMWARE("amdgpu/fiji_ce.bin");
 127MODULE_FIRMWARE("amdgpu/fiji_pfp.bin");
 128MODULE_FIRMWARE("amdgpu/fiji_me.bin");
 129MODULE_FIRMWARE("amdgpu/fiji_mec.bin");
 130MODULE_FIRMWARE("amdgpu/fiji_mec2.bin");
 131MODULE_FIRMWARE("amdgpu/fiji_rlc.bin");
 132
 133MODULE_FIRMWARE("amdgpu/polaris10_ce.bin");
 134MODULE_FIRMWARE("amdgpu/polaris10_ce_2.bin");
 135MODULE_FIRMWARE("amdgpu/polaris10_pfp.bin");
 136MODULE_FIRMWARE("amdgpu/polaris10_pfp_2.bin");
 137MODULE_FIRMWARE("amdgpu/polaris10_me.bin");
 138MODULE_FIRMWARE("amdgpu/polaris10_me_2.bin");
 139MODULE_FIRMWARE("amdgpu/polaris10_mec.bin");
 140MODULE_FIRMWARE("amdgpu/polaris10_mec_2.bin");
 141MODULE_FIRMWARE("amdgpu/polaris10_mec2.bin");
 142MODULE_FIRMWARE("amdgpu/polaris10_mec2_2.bin");
 143MODULE_FIRMWARE("amdgpu/polaris10_rlc.bin");
 144
 145MODULE_FIRMWARE("amdgpu/polaris11_ce.bin");
 146MODULE_FIRMWARE("amdgpu/polaris11_ce_2.bin");
 147MODULE_FIRMWARE("amdgpu/polaris11_pfp.bin");
 148MODULE_FIRMWARE("amdgpu/polaris11_pfp_2.bin");
 149MODULE_FIRMWARE("amdgpu/polaris11_me.bin");
 150MODULE_FIRMWARE("amdgpu/polaris11_me_2.bin");
 151MODULE_FIRMWARE("amdgpu/polaris11_mec.bin");
 152MODULE_FIRMWARE("amdgpu/polaris11_mec_2.bin");
 153MODULE_FIRMWARE("amdgpu/polaris11_mec2.bin");
 154MODULE_FIRMWARE("amdgpu/polaris11_mec2_2.bin");
 155MODULE_FIRMWARE("amdgpu/polaris11_rlc.bin");
 156
 157MODULE_FIRMWARE("amdgpu/polaris12_ce.bin");
 158MODULE_FIRMWARE("amdgpu/polaris12_ce_2.bin");
 159MODULE_FIRMWARE("amdgpu/polaris12_pfp.bin");
 160MODULE_FIRMWARE("amdgpu/polaris12_pfp_2.bin");
 161MODULE_FIRMWARE("amdgpu/polaris12_me.bin");
 162MODULE_FIRMWARE("amdgpu/polaris12_me_2.bin");
 163MODULE_FIRMWARE("amdgpu/polaris12_mec.bin");
 164MODULE_FIRMWARE("amdgpu/polaris12_mec_2.bin");
 165MODULE_FIRMWARE("amdgpu/polaris12_mec2.bin");
 166MODULE_FIRMWARE("amdgpu/polaris12_mec2_2.bin");
 167MODULE_FIRMWARE("amdgpu/polaris12_rlc.bin");
 168
 169MODULE_FIRMWARE("amdgpu/vegam_ce.bin");
 170MODULE_FIRMWARE("amdgpu/vegam_pfp.bin");
 171MODULE_FIRMWARE("amdgpu/vegam_me.bin");
 172MODULE_FIRMWARE("amdgpu/vegam_mec.bin");
 173MODULE_FIRMWARE("amdgpu/vegam_mec2.bin");
 174MODULE_FIRMWARE("amdgpu/vegam_rlc.bin");
 175
 176static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
 177{
 178	{mmGDS_VMID0_BASE, mmGDS_VMID0_SIZE, mmGDS_GWS_VMID0, mmGDS_OA_VMID0},
 179	{mmGDS_VMID1_BASE, mmGDS_VMID1_SIZE, mmGDS_GWS_VMID1, mmGDS_OA_VMID1},
 180	{mmGDS_VMID2_BASE, mmGDS_VMID2_SIZE, mmGDS_GWS_VMID2, mmGDS_OA_VMID2},
 181	{mmGDS_VMID3_BASE, mmGDS_VMID3_SIZE, mmGDS_GWS_VMID3, mmGDS_OA_VMID3},
 182	{mmGDS_VMID4_BASE, mmGDS_VMID4_SIZE, mmGDS_GWS_VMID4, mmGDS_OA_VMID4},
 183	{mmGDS_VMID5_BASE, mmGDS_VMID5_SIZE, mmGDS_GWS_VMID5, mmGDS_OA_VMID5},
 184	{mmGDS_VMID6_BASE, mmGDS_VMID6_SIZE, mmGDS_GWS_VMID6, mmGDS_OA_VMID6},
 185	{mmGDS_VMID7_BASE, mmGDS_VMID7_SIZE, mmGDS_GWS_VMID7, mmGDS_OA_VMID7},
 186	{mmGDS_VMID8_BASE, mmGDS_VMID8_SIZE, mmGDS_GWS_VMID8, mmGDS_OA_VMID8},
 187	{mmGDS_VMID9_BASE, mmGDS_VMID9_SIZE, mmGDS_GWS_VMID9, mmGDS_OA_VMID9},
 188	{mmGDS_VMID10_BASE, mmGDS_VMID10_SIZE, mmGDS_GWS_VMID10, mmGDS_OA_VMID10},
 189	{mmGDS_VMID11_BASE, mmGDS_VMID11_SIZE, mmGDS_GWS_VMID11, mmGDS_OA_VMID11},
 190	{mmGDS_VMID12_BASE, mmGDS_VMID12_SIZE, mmGDS_GWS_VMID12, mmGDS_OA_VMID12},
 191	{mmGDS_VMID13_BASE, mmGDS_VMID13_SIZE, mmGDS_GWS_VMID13, mmGDS_OA_VMID13},
 192	{mmGDS_VMID14_BASE, mmGDS_VMID14_SIZE, mmGDS_GWS_VMID14, mmGDS_OA_VMID14},
 193	{mmGDS_VMID15_BASE, mmGDS_VMID15_SIZE, mmGDS_GWS_VMID15, mmGDS_OA_VMID15}
 194};
 195
 196static const u32 golden_settings_tonga_a11[] =
 197{
 198	mmCB_HW_CONTROL, 0xfffdf3cf, 0x00007208,
 199	mmCB_HW_CONTROL_3, 0x00000040, 0x00000040,
 200	mmDB_DEBUG2, 0xf00fffff, 0x00000400,
 201	mmGB_GPU_ID, 0x0000000f, 0x00000000,
 202	mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
 203	mmPA_SC_FIFO_DEPTH_CNTL, 0x000003ff, 0x000000fc,
 204	mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
 205	mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c,
 206	mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
 207	mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
 208	mmTCC_CTRL, 0x00100000, 0xf31fff7f,
 209	mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
 210	mmTCP_ADDR_CONFIG, 0x000003ff, 0x000002fb,
 211	mmTCP_CHAN_STEER_HI, 0xffffffff, 0x0000543b,
 212	mmTCP_CHAN_STEER_LO, 0xffffffff, 0xa9210876,
 213	mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
 214};
 215
 216static const u32 tonga_golden_common_all[] =
 217{
 218	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 219	mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x16000012,
 220	mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002A,
 221	mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
 222	mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
 223	mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
 224	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
 225	mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF
 226};
 227
 228static const u32 tonga_mgcg_cgcg_init[] =
 229{
 230	mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
 231	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 232	mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 233	mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
 234	mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
 235	mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
 236	mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100,
 237	mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
 238	mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
 239	mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
 240	mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
 241	mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
 242	mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
 243	mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
 244	mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
 245	mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
 246	mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
 247	mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
 248	mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
 249	mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
 250	mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
 251	mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
 252	mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
 253	mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
 254	mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
 255	mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
 256	mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
 257	mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 258	mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 259	mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
 260	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 261	mmCGTS_CU0_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 262	mmCGTS_CU0_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 263	mmCGTS_CU0_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
 264	mmCGTS_CU0_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 265	mmCGTS_CU0_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 266	mmCGTS_CU1_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 267	mmCGTS_CU1_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 268	mmCGTS_CU1_TA_CTRL_REG, 0xffffffff, 0x00040007,
 269	mmCGTS_CU1_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 270	mmCGTS_CU1_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 271	mmCGTS_CU2_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 272	mmCGTS_CU2_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 273	mmCGTS_CU2_TA_CTRL_REG, 0xffffffff, 0x00040007,
 274	mmCGTS_CU2_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 275	mmCGTS_CU2_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 276	mmCGTS_CU3_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 277	mmCGTS_CU3_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 278	mmCGTS_CU3_TA_CTRL_REG, 0xffffffff, 0x00040007,
 279	mmCGTS_CU3_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 280	mmCGTS_CU3_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 281	mmCGTS_CU4_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 282	mmCGTS_CU4_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 283	mmCGTS_CU4_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
 284	mmCGTS_CU4_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 285	mmCGTS_CU4_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 286	mmCGTS_CU5_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 287	mmCGTS_CU5_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 288	mmCGTS_CU5_TA_CTRL_REG, 0xffffffff, 0x00040007,
 289	mmCGTS_CU5_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 290	mmCGTS_CU5_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 291	mmCGTS_CU6_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 292	mmCGTS_CU6_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 293	mmCGTS_CU6_TA_CTRL_REG, 0xffffffff, 0x00040007,
 294	mmCGTS_CU6_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 295	mmCGTS_CU6_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 296	mmCGTS_CU7_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 297	mmCGTS_CU7_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 298	mmCGTS_CU7_TA_CTRL_REG, 0xffffffff, 0x00040007,
 299	mmCGTS_CU7_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 300	mmCGTS_CU7_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 301	mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
 302	mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
 303	mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
 304	mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
 305};
 306
 307static const u32 golden_settings_vegam_a11[] =
 308{
 309	mmCB_HW_CONTROL, 0x0001f3cf, 0x00007208,
 310	mmCB_HW_CONTROL_2, 0x0f000000, 0x0d000000,
 311	mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
 312	mmDB_DEBUG2, 0xf00fffff, 0x00000400,
 313	mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
 314	mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
 315	mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x3a00161a,
 316	mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x0000002e,
 317	mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
 318	mmRLC_CGCG_CGLS_CTRL_3D, 0xffffffff, 0x0001003c,
 319	mmSQ_CONFIG, 0x07f80000, 0x01180000,
 320	mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
 321	mmTCC_CTRL, 0x00100000, 0xf31fff7f,
 322	mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f7,
 323	mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
 324	mmTCP_CHAN_STEER_LO, 0xffffffff, 0x32761054,
 325	mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
 326};
 327
 328static const u32 vegam_golden_common_all[] =
 329{
 330	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 331	mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
 332	mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
 333	mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
 334	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
 335	mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF,
 336};
 337
 338static const u32 golden_settings_polaris11_a11[] =
 339{
 340	mmCB_HW_CONTROL, 0x0000f3cf, 0x00007208,
 341	mmCB_HW_CONTROL_2, 0x0f000000, 0x0f000000,
 342	mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
 343	mmDB_DEBUG2, 0xf00fffff, 0x00000400,
 344	mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
 345	mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
 346	mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x16000012,
 347	mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x00000000,
 348	mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
 349	mmRLC_CGCG_CGLS_CTRL_3D, 0xffffffff, 0x0001003c,
 350	mmSQ_CONFIG, 0x07f80000, 0x01180000,
 351	mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
 352	mmTCC_CTRL, 0x00100000, 0xf31fff7f,
 353	mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f3,
 354	mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
 355	mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003210,
 356	mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
 357};
 358
 359static const u32 polaris11_golden_common_all[] =
 360{
 361	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 362	mmGB_ADDR_CONFIG, 0xffffffff, 0x22011002,
 363	mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
 364	mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
 365	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
 366	mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF,
 367};
 368
 369static const u32 golden_settings_polaris10_a11[] =
 370{
 371	mmATC_MISC_CG, 0x000c0fc0, 0x000c0200,
 372	mmCB_HW_CONTROL, 0x0001f3cf, 0x00007208,
 373	mmCB_HW_CONTROL_2, 0x0f000000, 0x0f000000,
 374	mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
 375	mmDB_DEBUG2, 0xf00fffff, 0x00000400,
 376	mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
 377	mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
 378	mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x16000012,
 379	mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x0000002a,
 380	mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
 381	mmRLC_CGCG_CGLS_CTRL_3D, 0xffffffff, 0x0001003c,
 382	mmSQ_CONFIG, 0x07f80000, 0x07180000,
 383	mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
 384	mmTCC_CTRL, 0x00100000, 0xf31fff7f,
 385	mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f7,
 386	mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
 387	mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
 388};
 389
 390static const u32 polaris10_golden_common_all[] =
 391{
 392	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 393	mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x16000012,
 394	mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002A,
 395	mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
 396	mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
 397	mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
 398	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
 399	mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF,
 400};
 401
 402static const u32 fiji_golden_common_all[] =
 403{
 404	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 405	mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x3a00161a,
 406	mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002e,
 407	mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
 408	mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
 409	mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
 410	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
 411	mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF,
 412	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 413	mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x00000009,
 414};
 415
 416static const u32 golden_settings_fiji_a10[] =
 417{
 418	mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
 419	mmDB_DEBUG2, 0xf00fffff, 0x00000400,
 420	mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
 421	mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
 422	mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
 423	mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
 424	mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
 425	mmTCC_CTRL, 0x00100000, 0xf31fff7f,
 426	mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
 427	mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000ff,
 428	mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
 429};
 430
 431static const u32 fiji_mgcg_cgcg_init[] =
 432{
 433	mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
 434	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 435	mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 436	mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
 437	mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
 438	mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
 439	mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100,
 440	mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
 441	mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
 442	mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
 443	mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
 444	mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
 445	mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
 446	mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
 447	mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
 448	mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
 449	mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
 450	mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
 451	mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
 452	mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
 453	mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
 454	mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
 455	mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
 456	mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
 457	mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
 458	mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
 459	mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
 460	mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 461	mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 462	mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
 463	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 464	mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
 465	mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
 466	mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
 467	mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
 468};
 469
 470static const u32 golden_settings_iceland_a11[] =
 471{
 472	mmCB_HW_CONTROL_3, 0x00000040, 0x00000040,
 473	mmDB_DEBUG2, 0xf00fffff, 0x00000400,
 474	mmDB_DEBUG3, 0xc0000000, 0xc0000000,
 475	mmGB_GPU_ID, 0x0000000f, 0x00000000,
 476	mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
 477	mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
 478	mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x00000002,
 479	mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x00000000,
 480	mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c,
 481	mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
 482	mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
 483	mmTCC_CTRL, 0x00100000, 0xf31fff7f,
 484	mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
 485	mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f1,
 486	mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
 487	mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00000010,
 488};
 489
 490static const u32 iceland_golden_common_all[] =
 491{
 492	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 493	mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x00000002,
 494	mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x00000000,
 495	mmGB_ADDR_CONFIG, 0xffffffff, 0x22010001,
 496	mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
 497	mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
 498	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
 499	mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF
 500};
 501
 502static const u32 iceland_mgcg_cgcg_init[] =
 503{
 504	mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
 505	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 506	mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 507	mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
 508	mmCGTT_CP_CLK_CTRL, 0xffffffff, 0xc0000100,
 509	mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0xc0000100,
 510	mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0xc0000100,
 511	mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
 512	mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
 513	mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
 514	mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
 515	mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
 516	mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
 517	mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
 518	mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
 519	mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
 520	mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
 521	mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
 522	mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
 523	mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
 524	mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
 525	mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
 526	mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0xff000100,
 527	mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
 528	mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
 529	mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
 530	mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
 531	mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 532	mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 533	mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
 534	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 535	mmCGTS_CU0_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 536	mmCGTS_CU0_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 537	mmCGTS_CU0_TA_SQC_CTRL_REG, 0xffffffff, 0x0f840f87,
 538	mmCGTS_CU0_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 539	mmCGTS_CU0_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 540	mmCGTS_CU1_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 541	mmCGTS_CU1_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 542	mmCGTS_CU1_TA_CTRL_REG, 0xffffffff, 0x00040007,
 543	mmCGTS_CU1_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 544	mmCGTS_CU1_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 545	mmCGTS_CU2_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 546	mmCGTS_CU2_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 547	mmCGTS_CU2_TA_CTRL_REG, 0xffffffff, 0x00040007,
 548	mmCGTS_CU2_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 549	mmCGTS_CU2_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 550	mmCGTS_CU3_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 551	mmCGTS_CU3_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 552	mmCGTS_CU3_TA_CTRL_REG, 0xffffffff, 0x00040007,
 553	mmCGTS_CU3_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 554	mmCGTS_CU3_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 555	mmCGTS_CU4_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 556	mmCGTS_CU4_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 557	mmCGTS_CU4_TA_SQC_CTRL_REG, 0xffffffff, 0x0f840f87,
 558	mmCGTS_CU4_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 559	mmCGTS_CU4_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 560	mmCGTS_CU5_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 561	mmCGTS_CU5_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 562	mmCGTS_CU5_TA_CTRL_REG, 0xffffffff, 0x00040007,
 563	mmCGTS_CU5_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 564	mmCGTS_CU5_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 565	mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
 566	mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
 567	mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
 568};
 569
 570static const u32 cz_golden_settings_a11[] =
 571{
 572	mmCB_HW_CONTROL_3, 0x00000040, 0x00000040,
 573	mmDB_DEBUG2, 0xf00fffff, 0x00000400,
 574	mmGB_GPU_ID, 0x0000000f, 0x00000000,
 575	mmPA_SC_ENHANCE, 0xffffffff, 0x00000001,
 576	mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
 577	mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c,
 578	mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
 579	mmTA_CNTL_AUX, 0x000f000f, 0x00010000,
 580	mmTCC_CTRL, 0x00100000, 0xf31fff7f,
 581	mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
 582	mmTCP_ADDR_CONFIG, 0x0000000f, 0x000000f3,
 583	mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00001302
 584};
 585
 586static const u32 cz_golden_common_all[] =
 587{
 588	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 589	mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x00000002,
 590	mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x00000000,
 591	mmGB_ADDR_CONFIG, 0xffffffff, 0x22010001,
 592	mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
 593	mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
 594	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
 595	mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF
 596};
 597
 598static const u32 cz_mgcg_cgcg_init[] =
 599{
 600	mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
 601	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 602	mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 603	mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
 604	mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
 605	mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
 606	mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x00000100,
 607	mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
 608	mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
 609	mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
 610	mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
 611	mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
 612	mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
 613	mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
 614	mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
 615	mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
 616	mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
 617	mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
 618	mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
 619	mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
 620	mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
 621	mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
 622	mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
 623	mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
 624	mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
 625	mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
 626	mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
 627	mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 628	mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 629	mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
 630	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 631	mmCGTS_CU0_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 632	mmCGTS_CU0_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 633	mmCGTS_CU0_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
 634	mmCGTS_CU0_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 635	mmCGTS_CU0_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 636	mmCGTS_CU1_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 637	mmCGTS_CU1_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 638	mmCGTS_CU1_TA_CTRL_REG, 0xffffffff, 0x00040007,
 639	mmCGTS_CU1_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 640	mmCGTS_CU1_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 641	mmCGTS_CU2_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 642	mmCGTS_CU2_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 643	mmCGTS_CU2_TA_CTRL_REG, 0xffffffff, 0x00040007,
 644	mmCGTS_CU2_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 645	mmCGTS_CU2_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 646	mmCGTS_CU3_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 647	mmCGTS_CU3_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 648	mmCGTS_CU3_TA_CTRL_REG, 0xffffffff, 0x00040007,
 649	mmCGTS_CU3_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 650	mmCGTS_CU3_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 651	mmCGTS_CU4_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 652	mmCGTS_CU4_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 653	mmCGTS_CU4_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
 654	mmCGTS_CU4_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 655	mmCGTS_CU4_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 656	mmCGTS_CU5_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 657	mmCGTS_CU5_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 658	mmCGTS_CU5_TA_CTRL_REG, 0xffffffff, 0x00040007,
 659	mmCGTS_CU5_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 660	mmCGTS_CU5_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 661	mmCGTS_CU6_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 662	mmCGTS_CU6_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 663	mmCGTS_CU6_TA_CTRL_REG, 0xffffffff, 0x00040007,
 664	mmCGTS_CU6_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 665	mmCGTS_CU6_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 666	mmCGTS_CU7_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 667	mmCGTS_CU7_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 668	mmCGTS_CU7_TA_CTRL_REG, 0xffffffff, 0x00040007,
 669	mmCGTS_CU7_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 670	mmCGTS_CU7_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 671	mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
 672	mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
 673	mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003f,
 674	mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
 675};
 676
 677static const u32 stoney_golden_settings_a11[] =
 678{
 679	mmDB_DEBUG2, 0xf00fffff, 0x00000400,
 680	mmGB_GPU_ID, 0x0000000f, 0x00000000,
 681	mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
 682	mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
 683	mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
 684	mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
 685	mmTCC_CTRL, 0x00100000, 0xf31fff7f,
 686	mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
 687	mmTCP_ADDR_CONFIG, 0x0000000f, 0x000000f1,
 688	mmTCP_CHAN_STEER_LO, 0xffffffff, 0x10101010,
 689};
 690
 691static const u32 stoney_golden_common_all[] =
 692{
 693	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 694	mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x00000000,
 695	mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x00000000,
 696	mmGB_ADDR_CONFIG, 0xffffffff, 0x12010001,
 697	mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
 698	mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
 699	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
 700	mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF,
 701};
 702
 703static const u32 stoney_mgcg_cgcg_init[] =
 704{
 705	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 706	mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003f,
 707	mmCP_MEM_SLP_CNTL, 0xffffffff, 0x00020201,
 708	mmRLC_MEM_SLP_CNTL, 0xffffffff, 0x00020201,
 709	mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96940200,
 710};
 711
 712
 713static const char * const sq_edc_source_names[] = {
 714	"SQ_EDC_INFO_SOURCE_INVALID: No EDC error has occurred",
 715	"SQ_EDC_INFO_SOURCE_INST: EDC source is Instruction Fetch",
 716	"SQ_EDC_INFO_SOURCE_SGPR: EDC source is SGPR or SQC data return",
 717	"SQ_EDC_INFO_SOURCE_VGPR: EDC source is VGPR",
 718	"SQ_EDC_INFO_SOURCE_LDS: EDC source is LDS",
 719	"SQ_EDC_INFO_SOURCE_GDS: EDC source is GDS",
 720	"SQ_EDC_INFO_SOURCE_TA: EDC source is TA",
 721};
 722
 723static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev);
 724static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev);
 725static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev);
 726static void gfx_v8_0_set_rlc_funcs(struct amdgpu_device *adev);
 727static u32 gfx_v8_0_get_csb_size(struct amdgpu_device *adev);
 728static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev);
 729static void gfx_v8_0_ring_emit_ce_meta(struct amdgpu_ring *ring);
 730static void gfx_v8_0_ring_emit_de_meta(struct amdgpu_ring *ring);
 731
 
 
 
 732static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev)
 733{
 
 
 734	switch (adev->asic_type) {
 735	case CHIP_TOPAZ:
 736		amdgpu_device_program_register_sequence(adev,
 737							iceland_mgcg_cgcg_init,
 738							ARRAY_SIZE(iceland_mgcg_cgcg_init));
 739		amdgpu_device_program_register_sequence(adev,
 740							golden_settings_iceland_a11,
 741							ARRAY_SIZE(golden_settings_iceland_a11));
 742		amdgpu_device_program_register_sequence(adev,
 743							iceland_golden_common_all,
 744							ARRAY_SIZE(iceland_golden_common_all));
 745		break;
 746	case CHIP_FIJI:
 747		amdgpu_device_program_register_sequence(adev,
 748							fiji_mgcg_cgcg_init,
 749							ARRAY_SIZE(fiji_mgcg_cgcg_init));
 750		amdgpu_device_program_register_sequence(adev,
 751							golden_settings_fiji_a10,
 752							ARRAY_SIZE(golden_settings_fiji_a10));
 753		amdgpu_device_program_register_sequence(adev,
 754							fiji_golden_common_all,
 755							ARRAY_SIZE(fiji_golden_common_all));
 756		break;
 757
 758	case CHIP_TONGA:
 759		amdgpu_device_program_register_sequence(adev,
 760							tonga_mgcg_cgcg_init,
 761							ARRAY_SIZE(tonga_mgcg_cgcg_init));
 762		amdgpu_device_program_register_sequence(adev,
 763							golden_settings_tonga_a11,
 764							ARRAY_SIZE(golden_settings_tonga_a11));
 765		amdgpu_device_program_register_sequence(adev,
 766							tonga_golden_common_all,
 767							ARRAY_SIZE(tonga_golden_common_all));
 768		break;
 769	case CHIP_VEGAM:
 770		amdgpu_device_program_register_sequence(adev,
 771							golden_settings_vegam_a11,
 772							ARRAY_SIZE(golden_settings_vegam_a11));
 773		amdgpu_device_program_register_sequence(adev,
 774							vegam_golden_common_all,
 775							ARRAY_SIZE(vegam_golden_common_all));
 776		break;
 777	case CHIP_POLARIS11:
 778	case CHIP_POLARIS12:
 779		amdgpu_device_program_register_sequence(adev,
 780							golden_settings_polaris11_a11,
 781							ARRAY_SIZE(golden_settings_polaris11_a11));
 782		amdgpu_device_program_register_sequence(adev,
 783							polaris11_golden_common_all,
 784							ARRAY_SIZE(polaris11_golden_common_all));
 785		break;
 786	case CHIP_POLARIS10:
 787		amdgpu_device_program_register_sequence(adev,
 788							golden_settings_polaris10_a11,
 789							ARRAY_SIZE(golden_settings_polaris10_a11));
 790		amdgpu_device_program_register_sequence(adev,
 791							polaris10_golden_common_all,
 792							ARRAY_SIZE(polaris10_golden_common_all));
 793		WREG32_SMC(ixCG_ACLK_CNTL, 0x0000001C);
 794		if (adev->pdev->revision == 0xc7 &&
 
 
 
 795		    ((adev->pdev->subsystem_device == 0xb37 && adev->pdev->subsystem_vendor == 0x1002) ||
 796		     (adev->pdev->subsystem_device == 0x4a8 && adev->pdev->subsystem_vendor == 0x1043) ||
 797		     (adev->pdev->subsystem_device == 0x9480 && adev->pdev->subsystem_vendor == 0x1682))) {
 798			amdgpu_atombios_i2c_channel_trans(adev, 0x10, 0x96, 0x1E, 0xDD);
 799			amdgpu_atombios_i2c_channel_trans(adev, 0x10, 0x96, 0x1F, 0xD0);
 800		}
 801		break;
 802	case CHIP_CARRIZO:
 803		amdgpu_device_program_register_sequence(adev,
 804							cz_mgcg_cgcg_init,
 805							ARRAY_SIZE(cz_mgcg_cgcg_init));
 806		amdgpu_device_program_register_sequence(adev,
 807							cz_golden_settings_a11,
 808							ARRAY_SIZE(cz_golden_settings_a11));
 809		amdgpu_device_program_register_sequence(adev,
 810							cz_golden_common_all,
 811							ARRAY_SIZE(cz_golden_common_all));
 812		break;
 813	case CHIP_STONEY:
 814		amdgpu_device_program_register_sequence(adev,
 815							stoney_mgcg_cgcg_init,
 816							ARRAY_SIZE(stoney_mgcg_cgcg_init));
 817		amdgpu_device_program_register_sequence(adev,
 818							stoney_golden_settings_a11,
 819							ARRAY_SIZE(stoney_golden_settings_a11));
 820		amdgpu_device_program_register_sequence(adev,
 821							stoney_golden_common_all,
 822							ARRAY_SIZE(stoney_golden_common_all));
 823		break;
 824	default:
 825		break;
 826	}
 827}
 828
 829static void gfx_v8_0_scratch_init(struct amdgpu_device *adev)
 830{
 831	adev->gfx.scratch.num_reg = 8;
 832	adev->gfx.scratch.reg_base = mmSCRATCH_REG0;
 833	adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1;
 834}
 835
 836static int gfx_v8_0_ring_test_ring(struct amdgpu_ring *ring)
 837{
 838	struct amdgpu_device *adev = ring->adev;
 839	uint32_t scratch;
 840	uint32_t tmp = 0;
 841	unsigned i;
 842	int r;
 843
 844	r = amdgpu_gfx_scratch_get(adev, &scratch);
 845	if (r)
 846		return r;
 847
 848	WREG32(scratch, 0xCAFEDEAD);
 849	r = amdgpu_ring_alloc(ring, 3);
 850	if (r)
 851		goto error_free_scratch;
 852
 853	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
 854	amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
 855	amdgpu_ring_write(ring, 0xDEADBEEF);
 856	amdgpu_ring_commit(ring);
 857
 858	for (i = 0; i < adev->usec_timeout; i++) {
 859		tmp = RREG32(scratch);
 860		if (tmp == 0xDEADBEEF)
 861			break;
 862		udelay(1);
 863	}
 864
 865	if (i >= adev->usec_timeout)
 866		r = -ETIMEDOUT;
 867
 868error_free_scratch:
 869	amdgpu_gfx_scratch_free(adev, scratch);
 870	return r;
 871}
 872
 873static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 874{
 875	struct amdgpu_device *adev = ring->adev;
 876	struct amdgpu_ib ib;
 877	struct dma_fence *f = NULL;
 878
 879	unsigned int index;
 880	uint64_t gpu_addr;
 881	uint32_t tmp;
 882	long r;
 883
 884	r = amdgpu_device_wb_get(adev, &index);
 885	if (r)
 886		return r;
 887
 888	gpu_addr = adev->wb.gpu_addr + (index * 4);
 889	adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
 890	memset(&ib, 0, sizeof(ib));
 891	r = amdgpu_ib_get(adev, NULL, 16,
 892					AMDGPU_IB_POOL_DIRECT, &ib);
 893	if (r)
 894		goto err1;
 895
 896	ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
 897	ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
 898	ib.ptr[2] = lower_32_bits(gpu_addr);
 899	ib.ptr[3] = upper_32_bits(gpu_addr);
 900	ib.ptr[4] = 0xDEADBEEF;
 901	ib.length_dw = 5;
 902
 903	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
 904	if (r)
 905		goto err2;
 906
 907	r = dma_fence_wait_timeout(f, false, timeout);
 908	if (r == 0) {
 909		r = -ETIMEDOUT;
 910		goto err2;
 911	} else if (r < 0) {
 912		goto err2;
 913	}
 914
 915	tmp = adev->wb.wb[index];
 916	if (tmp == 0xDEADBEEF)
 917		r = 0;
 918	else
 919		r = -EINVAL;
 920
 921err2:
 922	amdgpu_ib_free(adev, &ib, NULL);
 923	dma_fence_put(f);
 924err1:
 925	amdgpu_device_wb_free(adev, index);
 926	return r;
 927}
 928
 929
 930static void gfx_v8_0_free_microcode(struct amdgpu_device *adev)
 931{
 932	release_firmware(adev->gfx.pfp_fw);
 933	adev->gfx.pfp_fw = NULL;
 934	release_firmware(adev->gfx.me_fw);
 935	adev->gfx.me_fw = NULL;
 936	release_firmware(adev->gfx.ce_fw);
 937	adev->gfx.ce_fw = NULL;
 938	release_firmware(adev->gfx.rlc_fw);
 939	adev->gfx.rlc_fw = NULL;
 940	release_firmware(adev->gfx.mec_fw);
 941	adev->gfx.mec_fw = NULL;
 942	if ((adev->asic_type != CHIP_STONEY) &&
 943	    (adev->asic_type != CHIP_TOPAZ))
 944		release_firmware(adev->gfx.mec2_fw);
 945	adev->gfx.mec2_fw = NULL;
 946
 947	kfree(adev->gfx.rlc.register_list_format);
 948}
 949
 950static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
 951{
 952	const char *chip_name;
 953	char fw_name[30];
 954	int err;
 955	struct amdgpu_firmware_info *info = NULL;
 956	const struct common_firmware_header *header = NULL;
 957	const struct gfx_firmware_header_v1_0 *cp_hdr;
 958	const struct rlc_firmware_header_v2_0 *rlc_hdr;
 959	unsigned int *tmp = NULL, i;
 960
 961	DRM_DEBUG("\n");
 962
 963	switch (adev->asic_type) {
 964	case CHIP_TOPAZ:
 965		chip_name = "topaz";
 966		break;
 967	case CHIP_TONGA:
 968		chip_name = "tonga";
 969		break;
 970	case CHIP_CARRIZO:
 971		chip_name = "carrizo";
 972		break;
 973	case CHIP_FIJI:
 974		chip_name = "fiji";
 975		break;
 976	case CHIP_STONEY:
 977		chip_name = "stoney";
 978		break;
 979	case CHIP_POLARIS10:
 980		chip_name = "polaris10";
 981		break;
 982	case CHIP_POLARIS11:
 983		chip_name = "polaris11";
 984		break;
 985	case CHIP_POLARIS12:
 986		chip_name = "polaris12";
 987		break;
 988	case CHIP_VEGAM:
 989		chip_name = "vegam";
 990		break;
 991	default:
 992		BUG();
 993	}
 994
 995	if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
 996		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp_2.bin", chip_name);
 997		err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
 998		if (err == -ENOENT) {
 999			snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
1000			err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
1001		}
1002	} else {
1003		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
1004		err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
1005	}
1006	if (err)
1007		goto out;
1008	err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
1009	if (err)
1010		goto out;
1011	cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
1012	adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1013	adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1014
1015	if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
1016		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me_2.bin", chip_name);
1017		err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
1018		if (err == -ENOENT) {
1019			snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
1020			err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
1021		}
1022	} else {
1023		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
1024		err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
1025	}
1026	if (err)
1027		goto out;
1028	err = amdgpu_ucode_validate(adev->gfx.me_fw);
1029	if (err)
1030		goto out;
1031	cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
1032	adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1033
1034	adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1035
1036	if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
1037		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce_2.bin", chip_name);
1038		err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
1039		if (err == -ENOENT) {
1040			snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
1041			err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
1042		}
1043	} else {
1044		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
1045		err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
1046	}
1047	if (err)
1048		goto out;
1049	err = amdgpu_ucode_validate(adev->gfx.ce_fw);
1050	if (err)
1051		goto out;
1052	cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
1053	adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1054	adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1055
1056	/*
1057	 * Support for MCBP/Virtualization in combination with chained IBs is
1058	 * formal released on feature version #46
1059	 */
1060	if (adev->gfx.ce_feature_version >= 46 &&
1061	    adev->gfx.pfp_feature_version >= 46) {
1062		adev->virt.chained_ib_support = true;
1063		DRM_INFO("Chained IB support enabled!\n");
1064	} else
1065		adev->virt.chained_ib_support = false;
1066
1067	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
1068	err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
1069	if (err)
1070		goto out;
1071	err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
1072	rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1073	adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
1074	adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
1075
1076	adev->gfx.rlc.save_and_restore_offset =
1077			le32_to_cpu(rlc_hdr->save_and_restore_offset);
1078	adev->gfx.rlc.clear_state_descriptor_offset =
1079			le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
1080	adev->gfx.rlc.avail_scratch_ram_locations =
1081			le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
1082	adev->gfx.rlc.reg_restore_list_size =
1083			le32_to_cpu(rlc_hdr->reg_restore_list_size);
1084	adev->gfx.rlc.reg_list_format_start =
1085			le32_to_cpu(rlc_hdr->reg_list_format_start);
1086	adev->gfx.rlc.reg_list_format_separate_start =
1087			le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
1088	adev->gfx.rlc.starting_offsets_start =
1089			le32_to_cpu(rlc_hdr->starting_offsets_start);
1090	adev->gfx.rlc.reg_list_format_size_bytes =
1091			le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
1092	adev->gfx.rlc.reg_list_size_bytes =
1093			le32_to_cpu(rlc_hdr->reg_list_size_bytes);
1094
1095	adev->gfx.rlc.register_list_format =
1096			kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
1097					adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
1098
1099	if (!adev->gfx.rlc.register_list_format) {
1100		err = -ENOMEM;
1101		goto out;
1102	}
1103
1104	tmp = (unsigned int *)((uintptr_t)rlc_hdr +
1105			le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
1106	for (i = 0 ; i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2); i++)
1107		adev->gfx.rlc.register_list_format[i] =	le32_to_cpu(tmp[i]);
1108
1109	adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
1110
1111	tmp = (unsigned int *)((uintptr_t)rlc_hdr +
1112			le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
1113	for (i = 0 ; i < (adev->gfx.rlc.reg_list_size_bytes >> 2); i++)
1114		adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
1115
1116	if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
1117		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec_2.bin", chip_name);
1118		err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
1119		if (err == -ENOENT) {
1120			snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
1121			err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
1122		}
1123	} else {
1124		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
1125		err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
1126	}
1127	if (err)
1128		goto out;
1129	err = amdgpu_ucode_validate(adev->gfx.mec_fw);
1130	if (err)
1131		goto out;
1132	cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1133	adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1134	adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1135
1136	if ((adev->asic_type != CHIP_STONEY) &&
1137	    (adev->asic_type != CHIP_TOPAZ)) {
1138		if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
1139			snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2_2.bin", chip_name);
1140			err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
1141			if (err == -ENOENT) {
1142				snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
1143				err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
1144			}
1145		} else {
1146			snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
1147			err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
1148		}
1149		if (!err) {
1150			err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
1151			if (err)
1152				goto out;
1153			cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1154				adev->gfx.mec2_fw->data;
1155			adev->gfx.mec2_fw_version =
1156				le32_to_cpu(cp_hdr->header.ucode_version);
1157			adev->gfx.mec2_feature_version =
1158				le32_to_cpu(cp_hdr->ucode_feature_version);
1159		} else {
1160			err = 0;
1161			adev->gfx.mec2_fw = NULL;
1162		}
1163	}
1164
1165	info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
1166	info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
1167	info->fw = adev->gfx.pfp_fw;
1168	header = (const struct common_firmware_header *)info->fw->data;
1169	adev->firmware.fw_size +=
1170		ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1171
1172	info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
1173	info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
1174	info->fw = adev->gfx.me_fw;
1175	header = (const struct common_firmware_header *)info->fw->data;
1176	adev->firmware.fw_size +=
1177		ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1178
1179	info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
1180	info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
1181	info->fw = adev->gfx.ce_fw;
1182	header = (const struct common_firmware_header *)info->fw->data;
1183	adev->firmware.fw_size +=
1184		ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1185
1186	info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
1187	info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
1188	info->fw = adev->gfx.rlc_fw;
1189	header = (const struct common_firmware_header *)info->fw->data;
1190	adev->firmware.fw_size +=
1191		ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1192
1193	info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
1194	info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
1195	info->fw = adev->gfx.mec_fw;
1196	header = (const struct common_firmware_header *)info->fw->data;
1197	adev->firmware.fw_size +=
1198		ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1199
1200	/* we need account JT in */
1201	cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1202	adev->firmware.fw_size +=
1203		ALIGN(le32_to_cpu(cp_hdr->jt_size) << 2, PAGE_SIZE);
1204
1205	if (amdgpu_sriov_vf(adev)) {
1206		info = &adev->firmware.ucode[AMDGPU_UCODE_ID_STORAGE];
1207		info->ucode_id = AMDGPU_UCODE_ID_STORAGE;
1208		info->fw = adev->gfx.mec_fw;
1209		adev->firmware.fw_size +=
1210			ALIGN(le32_to_cpu(64 * PAGE_SIZE), PAGE_SIZE);
1211	}
1212
1213	if (adev->gfx.mec2_fw) {
1214		info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
1215		info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
1216		info->fw = adev->gfx.mec2_fw;
1217		header = (const struct common_firmware_header *)info->fw->data;
1218		adev->firmware.fw_size +=
1219			ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1220	}
1221
1222out:
1223	if (err) {
1224		dev_err(adev->dev,
1225			"gfx8: Failed to load firmware \"%s\"\n",
1226			fw_name);
1227		release_firmware(adev->gfx.pfp_fw);
1228		adev->gfx.pfp_fw = NULL;
1229		release_firmware(adev->gfx.me_fw);
1230		adev->gfx.me_fw = NULL;
1231		release_firmware(adev->gfx.ce_fw);
1232		adev->gfx.ce_fw = NULL;
1233		release_firmware(adev->gfx.rlc_fw);
1234		adev->gfx.rlc_fw = NULL;
1235		release_firmware(adev->gfx.mec_fw);
1236		adev->gfx.mec_fw = NULL;
1237		release_firmware(adev->gfx.mec2_fw);
1238		adev->gfx.mec2_fw = NULL;
1239	}
1240	return err;
1241}
1242
1243static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev,
1244				    volatile u32 *buffer)
1245{
1246	u32 count = 0, i;
1247	const struct cs_section_def *sect = NULL;
1248	const struct cs_extent_def *ext = NULL;
1249
1250	if (adev->gfx.rlc.cs_data == NULL)
1251		return;
1252	if (buffer == NULL)
1253		return;
1254
1255	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1256	buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
1257
1258	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
1259	buffer[count++] = cpu_to_le32(0x80000000);
1260	buffer[count++] = cpu_to_le32(0x80000000);
1261
1262	for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
1263		for (ext = sect->section; ext->extent != NULL; ++ext) {
1264			if (sect->id == SECT_CONTEXT) {
1265				buffer[count++] =
1266					cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
1267				buffer[count++] = cpu_to_le32(ext->reg_index -
1268						PACKET3_SET_CONTEXT_REG_START);
1269				for (i = 0; i < ext->reg_count; i++)
1270					buffer[count++] = cpu_to_le32(ext->extent[i]);
1271			} else {
1272				return;
1273			}
1274		}
1275	}
1276
1277	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2));
1278	buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG -
1279			PACKET3_SET_CONTEXT_REG_START);
1280	buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config);
1281	buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config_1);
1282
1283	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1284	buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
1285
1286	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
1287	buffer[count++] = cpu_to_le32(0);
1288}
1289
1290static int gfx_v8_0_cp_jump_table_num(struct amdgpu_device *adev)
1291{
1292	if (adev->asic_type == CHIP_CARRIZO)
1293		return 5;
1294	else
1295		return 4;
1296}
1297
1298static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
1299{
1300	const struct cs_section_def *cs_data;
1301	int r;
1302
1303	adev->gfx.rlc.cs_data = vi_cs_data;
1304
1305	cs_data = adev->gfx.rlc.cs_data;
1306
1307	if (cs_data) {
1308		/* init clear state block */
1309		r = amdgpu_gfx_rlc_init_csb(adev);
1310		if (r)
1311			return r;
1312	}
1313
1314	if ((adev->asic_type == CHIP_CARRIZO) ||
1315	    (adev->asic_type == CHIP_STONEY)) {
1316		adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
1317		r = amdgpu_gfx_rlc_init_cpt(adev);
1318		if (r)
1319			return r;
1320	}
1321
1322	/* init spm vmid with 0xf */
1323	if (adev->gfx.rlc.funcs->update_spm_vmid)
1324		adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
1325
1326	return 0;
1327}
1328
1329static void gfx_v8_0_mec_fini(struct amdgpu_device *adev)
1330{
1331	amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
1332}
1333
1334static int gfx_v8_0_mec_init(struct amdgpu_device *adev)
1335{
1336	int r;
1337	u32 *hpd;
1338	size_t mec_hpd_size;
1339
1340	bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
1341
1342	/* take ownership of the relevant compute queues */
1343	amdgpu_gfx_compute_queue_acquire(adev);
1344
1345	mec_hpd_size = adev->gfx.num_compute_rings * GFX8_MEC_HPD_SIZE;
 
 
 
 
 
 
 
 
 
 
 
 
1346
1347	r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
1348				      AMDGPU_GEM_DOMAIN_VRAM,
1349				      &adev->gfx.mec.hpd_eop_obj,
1350				      &adev->gfx.mec.hpd_eop_gpu_addr,
1351				      (void **)&hpd);
1352	if (r) {
1353		dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
1354		return r;
1355	}
1356
1357	memset(hpd, 0, mec_hpd_size);
1358
1359	amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
1360	amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
1361
1362	return 0;
1363}
1364
1365static const u32 vgpr_init_compute_shader[] =
1366{
1367	0x7e000209, 0x7e020208,
1368	0x7e040207, 0x7e060206,
1369	0x7e080205, 0x7e0a0204,
1370	0x7e0c0203, 0x7e0e0202,
1371	0x7e100201, 0x7e120200,
1372	0x7e140209, 0x7e160208,
1373	0x7e180207, 0x7e1a0206,
1374	0x7e1c0205, 0x7e1e0204,
1375	0x7e200203, 0x7e220202,
1376	0x7e240201, 0x7e260200,
1377	0x7e280209, 0x7e2a0208,
1378	0x7e2c0207, 0x7e2e0206,
1379	0x7e300205, 0x7e320204,
1380	0x7e340203, 0x7e360202,
1381	0x7e380201, 0x7e3a0200,
1382	0x7e3c0209, 0x7e3e0208,
1383	0x7e400207, 0x7e420206,
1384	0x7e440205, 0x7e460204,
1385	0x7e480203, 0x7e4a0202,
1386	0x7e4c0201, 0x7e4e0200,
1387	0x7e500209, 0x7e520208,
1388	0x7e540207, 0x7e560206,
1389	0x7e580205, 0x7e5a0204,
1390	0x7e5c0203, 0x7e5e0202,
1391	0x7e600201, 0x7e620200,
1392	0x7e640209, 0x7e660208,
1393	0x7e680207, 0x7e6a0206,
1394	0x7e6c0205, 0x7e6e0204,
1395	0x7e700203, 0x7e720202,
1396	0x7e740201, 0x7e760200,
1397	0x7e780209, 0x7e7a0208,
1398	0x7e7c0207, 0x7e7e0206,
1399	0xbf8a0000, 0xbf810000,
1400};
1401
1402static const u32 sgpr_init_compute_shader[] =
1403{
1404	0xbe8a0100, 0xbe8c0102,
1405	0xbe8e0104, 0xbe900106,
1406	0xbe920108, 0xbe940100,
1407	0xbe960102, 0xbe980104,
1408	0xbe9a0106, 0xbe9c0108,
1409	0xbe9e0100, 0xbea00102,
1410	0xbea20104, 0xbea40106,
1411	0xbea60108, 0xbea80100,
1412	0xbeaa0102, 0xbeac0104,
1413	0xbeae0106, 0xbeb00108,
1414	0xbeb20100, 0xbeb40102,
1415	0xbeb60104, 0xbeb80106,
1416	0xbeba0108, 0xbebc0100,
1417	0xbebe0102, 0xbec00104,
1418	0xbec20106, 0xbec40108,
1419	0xbec60100, 0xbec80102,
1420	0xbee60004, 0xbee70005,
1421	0xbeea0006, 0xbeeb0007,
1422	0xbee80008, 0xbee90009,
1423	0xbefc0000, 0xbf8a0000,
1424	0xbf810000, 0x00000000,
1425};
1426
1427static const u32 vgpr_init_regs[] =
1428{
1429	mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0xffffffff,
1430	mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, /* CU_GROUP_COUNT=1 */
1431	mmCOMPUTE_NUM_THREAD_X, 256*4,
1432	mmCOMPUTE_NUM_THREAD_Y, 1,
1433	mmCOMPUTE_NUM_THREAD_Z, 1,
1434	mmCOMPUTE_PGM_RSRC1, 0x100004f, /* VGPRS=15 (64 logical VGPRs), SGPRS=1 (16 SGPRs), BULKY=1 */
1435	mmCOMPUTE_PGM_RSRC2, 20,
1436	mmCOMPUTE_USER_DATA_0, 0xedcedc00,
1437	mmCOMPUTE_USER_DATA_1, 0xedcedc01,
1438	mmCOMPUTE_USER_DATA_2, 0xedcedc02,
1439	mmCOMPUTE_USER_DATA_3, 0xedcedc03,
1440	mmCOMPUTE_USER_DATA_4, 0xedcedc04,
1441	mmCOMPUTE_USER_DATA_5, 0xedcedc05,
1442	mmCOMPUTE_USER_DATA_6, 0xedcedc06,
1443	mmCOMPUTE_USER_DATA_7, 0xedcedc07,
1444	mmCOMPUTE_USER_DATA_8, 0xedcedc08,
1445	mmCOMPUTE_USER_DATA_9, 0xedcedc09,
1446};
1447
1448static const u32 sgpr1_init_regs[] =
1449{
1450	mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0x0f,
1451	mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, /* CU_GROUP_COUNT=1 */
1452	mmCOMPUTE_NUM_THREAD_X, 256*5,
1453	mmCOMPUTE_NUM_THREAD_Y, 1,
1454	mmCOMPUTE_NUM_THREAD_Z, 1,
1455	mmCOMPUTE_PGM_RSRC1, 0x240, /* SGPRS=9 (80 GPRS) */
1456	mmCOMPUTE_PGM_RSRC2, 20,
1457	mmCOMPUTE_USER_DATA_0, 0xedcedc00,
1458	mmCOMPUTE_USER_DATA_1, 0xedcedc01,
1459	mmCOMPUTE_USER_DATA_2, 0xedcedc02,
1460	mmCOMPUTE_USER_DATA_3, 0xedcedc03,
1461	mmCOMPUTE_USER_DATA_4, 0xedcedc04,
1462	mmCOMPUTE_USER_DATA_5, 0xedcedc05,
1463	mmCOMPUTE_USER_DATA_6, 0xedcedc06,
1464	mmCOMPUTE_USER_DATA_7, 0xedcedc07,
1465	mmCOMPUTE_USER_DATA_8, 0xedcedc08,
1466	mmCOMPUTE_USER_DATA_9, 0xedcedc09,
1467};
1468
1469static const u32 sgpr2_init_regs[] =
1470{
1471	mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0xf0,
1472	mmCOMPUTE_RESOURCE_LIMITS, 0x1000000,
1473	mmCOMPUTE_NUM_THREAD_X, 256*5,
1474	mmCOMPUTE_NUM_THREAD_Y, 1,
1475	mmCOMPUTE_NUM_THREAD_Z, 1,
1476	mmCOMPUTE_PGM_RSRC1, 0x240, /* SGPRS=9 (80 GPRS) */
1477	mmCOMPUTE_PGM_RSRC2, 20,
1478	mmCOMPUTE_USER_DATA_0, 0xedcedc00,
1479	mmCOMPUTE_USER_DATA_1, 0xedcedc01,
1480	mmCOMPUTE_USER_DATA_2, 0xedcedc02,
1481	mmCOMPUTE_USER_DATA_3, 0xedcedc03,
1482	mmCOMPUTE_USER_DATA_4, 0xedcedc04,
1483	mmCOMPUTE_USER_DATA_5, 0xedcedc05,
1484	mmCOMPUTE_USER_DATA_6, 0xedcedc06,
1485	mmCOMPUTE_USER_DATA_7, 0xedcedc07,
1486	mmCOMPUTE_USER_DATA_8, 0xedcedc08,
1487	mmCOMPUTE_USER_DATA_9, 0xedcedc09,
1488};
1489
1490static const u32 sec_ded_counter_registers[] =
1491{
1492	mmCPC_EDC_ATC_CNT,
1493	mmCPC_EDC_SCRATCH_CNT,
1494	mmCPC_EDC_UCODE_CNT,
1495	mmCPF_EDC_ATC_CNT,
1496	mmCPF_EDC_ROQ_CNT,
1497	mmCPF_EDC_TAG_CNT,
1498	mmCPG_EDC_ATC_CNT,
1499	mmCPG_EDC_DMA_CNT,
1500	mmCPG_EDC_TAG_CNT,
1501	mmDC_EDC_CSINVOC_CNT,
1502	mmDC_EDC_RESTORE_CNT,
1503	mmDC_EDC_STATE_CNT,
1504	mmGDS_EDC_CNT,
1505	mmGDS_EDC_GRBM_CNT,
1506	mmGDS_EDC_OA_DED,
1507	mmSPI_EDC_CNT,
1508	mmSQC_ATC_EDC_GATCL1_CNT,
1509	mmSQC_EDC_CNT,
1510	mmSQ_EDC_DED_CNT,
1511	mmSQ_EDC_INFO,
1512	mmSQ_EDC_SEC_CNT,
1513	mmTCC_EDC_CNT,
1514	mmTCP_ATC_EDC_GATCL1_CNT,
1515	mmTCP_EDC_CNT,
1516	mmTD_EDC_CNT
1517};
1518
1519static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
1520{
1521	struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
1522	struct amdgpu_ib ib;
1523	struct dma_fence *f = NULL;
1524	int r, i;
1525	u32 tmp;
1526	unsigned total_size, vgpr_offset, sgpr_offset;
1527	u64 gpu_addr;
1528
1529	/* only supported on CZ */
1530	if (adev->asic_type != CHIP_CARRIZO)
1531		return 0;
1532
1533	/* bail if the compute ring is not ready */
1534	if (!ring->sched.ready)
1535		return 0;
1536
1537	tmp = RREG32(mmGB_EDC_MODE);
1538	WREG32(mmGB_EDC_MODE, 0);
1539
1540	total_size =
1541		(((ARRAY_SIZE(vgpr_init_regs) / 2) * 3) + 4 + 5 + 2) * 4;
1542	total_size +=
1543		(((ARRAY_SIZE(sgpr1_init_regs) / 2) * 3) + 4 + 5 + 2) * 4;
1544	total_size +=
1545		(((ARRAY_SIZE(sgpr2_init_regs) / 2) * 3) + 4 + 5 + 2) * 4;
1546	total_size = ALIGN(total_size, 256);
1547	vgpr_offset = total_size;
1548	total_size += ALIGN(sizeof(vgpr_init_compute_shader), 256);
1549	sgpr_offset = total_size;
1550	total_size += sizeof(sgpr_init_compute_shader);
1551
1552	/* allocate an indirect buffer to put the commands in */
1553	memset(&ib, 0, sizeof(ib));
1554	r = amdgpu_ib_get(adev, NULL, total_size,
1555					AMDGPU_IB_POOL_DIRECT, &ib);
1556	if (r) {
1557		DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
1558		return r;
1559	}
1560
1561	/* load the compute shaders */
1562	for (i = 0; i < ARRAY_SIZE(vgpr_init_compute_shader); i++)
1563		ib.ptr[i + (vgpr_offset / 4)] = vgpr_init_compute_shader[i];
1564
1565	for (i = 0; i < ARRAY_SIZE(sgpr_init_compute_shader); i++)
1566		ib.ptr[i + (sgpr_offset / 4)] = sgpr_init_compute_shader[i];
1567
1568	/* init the ib length to 0 */
1569	ib.length_dw = 0;
1570
1571	/* VGPR */
1572	/* write the register state for the compute dispatch */
1573	for (i = 0; i < ARRAY_SIZE(vgpr_init_regs); i += 2) {
1574		ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
1575		ib.ptr[ib.length_dw++] = vgpr_init_regs[i] - PACKET3_SET_SH_REG_START;
1576		ib.ptr[ib.length_dw++] = vgpr_init_regs[i + 1];
1577	}
1578	/* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
1579	gpu_addr = (ib.gpu_addr + (u64)vgpr_offset) >> 8;
1580	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
1581	ib.ptr[ib.length_dw++] = mmCOMPUTE_PGM_LO - PACKET3_SET_SH_REG_START;
1582	ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
1583	ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
1584
1585	/* write dispatch packet */
1586	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
1587	ib.ptr[ib.length_dw++] = 8; /* x */
1588	ib.ptr[ib.length_dw++] = 1; /* y */
1589	ib.ptr[ib.length_dw++] = 1; /* z */
1590	ib.ptr[ib.length_dw++] =
1591		REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
1592
1593	/* write CS partial flush packet */
1594	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
1595	ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
1596
1597	/* SGPR1 */
1598	/* write the register state for the compute dispatch */
1599	for (i = 0; i < ARRAY_SIZE(sgpr1_init_regs); i += 2) {
1600		ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
1601		ib.ptr[ib.length_dw++] = sgpr1_init_regs[i] - PACKET3_SET_SH_REG_START;
1602		ib.ptr[ib.length_dw++] = sgpr1_init_regs[i + 1];
1603	}
1604	/* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
1605	gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
1606	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
1607	ib.ptr[ib.length_dw++] = mmCOMPUTE_PGM_LO - PACKET3_SET_SH_REG_START;
1608	ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
1609	ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
1610
1611	/* write dispatch packet */
1612	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
1613	ib.ptr[ib.length_dw++] = 8; /* x */
1614	ib.ptr[ib.length_dw++] = 1; /* y */
1615	ib.ptr[ib.length_dw++] = 1; /* z */
1616	ib.ptr[ib.length_dw++] =
1617		REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
1618
1619	/* write CS partial flush packet */
1620	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
1621	ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
1622
1623	/* SGPR2 */
1624	/* write the register state for the compute dispatch */
1625	for (i = 0; i < ARRAY_SIZE(sgpr2_init_regs); i += 2) {
1626		ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
1627		ib.ptr[ib.length_dw++] = sgpr2_init_regs[i] - PACKET3_SET_SH_REG_START;
1628		ib.ptr[ib.length_dw++] = sgpr2_init_regs[i + 1];
1629	}
1630	/* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
1631	gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
1632	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
1633	ib.ptr[ib.length_dw++] = mmCOMPUTE_PGM_LO - PACKET3_SET_SH_REG_START;
1634	ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
1635	ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
1636
1637	/* write dispatch packet */
1638	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
1639	ib.ptr[ib.length_dw++] = 8; /* x */
1640	ib.ptr[ib.length_dw++] = 1; /* y */
1641	ib.ptr[ib.length_dw++] = 1; /* z */
1642	ib.ptr[ib.length_dw++] =
1643		REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
1644
1645	/* write CS partial flush packet */
1646	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
1647	ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
1648
1649	/* shedule the ib on the ring */
1650	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
1651	if (r) {
1652		DRM_ERROR("amdgpu: ib submit failed (%d).\n", r);
1653		goto fail;
1654	}
1655
1656	/* wait for the GPU to finish processing the IB */
1657	r = dma_fence_wait(f, false);
1658	if (r) {
1659		DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
1660		goto fail;
1661	}
1662
1663	tmp = REG_SET_FIELD(tmp, GB_EDC_MODE, DED_MODE, 2);
1664	tmp = REG_SET_FIELD(tmp, GB_EDC_MODE, PROP_FED, 1);
1665	WREG32(mmGB_EDC_MODE, tmp);
1666
1667	tmp = RREG32(mmCC_GC_EDC_CONFIG);
1668	tmp = REG_SET_FIELD(tmp, CC_GC_EDC_CONFIG, DIS_EDC, 0) | 1;
1669	WREG32(mmCC_GC_EDC_CONFIG, tmp);
1670
1671
1672	/* read back registers to clear the counters */
1673	for (i = 0; i < ARRAY_SIZE(sec_ded_counter_registers); i++)
1674		RREG32(sec_ded_counter_registers[i]);
1675
1676fail:
1677	amdgpu_ib_free(adev, &ib, NULL);
1678	dma_fence_put(f);
1679
1680	return r;
1681}
1682
1683static int gfx_v8_0_gpu_early_init(struct amdgpu_device *adev)
1684{
1685	u32 gb_addr_config;
1686	u32 mc_arb_ramcfg;
1687	u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map;
1688	u32 tmp;
1689	int ret;
1690
1691	switch (adev->asic_type) {
1692	case CHIP_TOPAZ:
1693		adev->gfx.config.max_shader_engines = 1;
1694		adev->gfx.config.max_tile_pipes = 2;
1695		adev->gfx.config.max_cu_per_sh = 6;
1696		adev->gfx.config.max_sh_per_se = 1;
1697		adev->gfx.config.max_backends_per_se = 2;
1698		adev->gfx.config.max_texture_channel_caches = 2;
1699		adev->gfx.config.max_gprs = 256;
1700		adev->gfx.config.max_gs_threads = 32;
1701		adev->gfx.config.max_hw_contexts = 8;
1702
1703		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1704		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1705		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1706		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1707		gb_addr_config = TOPAZ_GB_ADDR_CONFIG_GOLDEN;
1708		break;
1709	case CHIP_FIJI:
1710		adev->gfx.config.max_shader_engines = 4;
1711		adev->gfx.config.max_tile_pipes = 16;
1712		adev->gfx.config.max_cu_per_sh = 16;
1713		adev->gfx.config.max_sh_per_se = 1;
1714		adev->gfx.config.max_backends_per_se = 4;
1715		adev->gfx.config.max_texture_channel_caches = 16;
1716		adev->gfx.config.max_gprs = 256;
1717		adev->gfx.config.max_gs_threads = 32;
1718		adev->gfx.config.max_hw_contexts = 8;
1719
1720		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1721		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1722		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1723		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1724		gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
1725		break;
1726	case CHIP_POLARIS11:
1727	case CHIP_POLARIS12:
1728		ret = amdgpu_atombios_get_gfx_info(adev);
1729		if (ret)
1730			return ret;
1731		adev->gfx.config.max_gprs = 256;
1732		adev->gfx.config.max_gs_threads = 32;
1733		adev->gfx.config.max_hw_contexts = 8;
1734
1735		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1736		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1737		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1738		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1739		gb_addr_config = POLARIS11_GB_ADDR_CONFIG_GOLDEN;
1740		break;
1741	case CHIP_POLARIS10:
1742	case CHIP_VEGAM:
1743		ret = amdgpu_atombios_get_gfx_info(adev);
1744		if (ret)
1745			return ret;
1746		adev->gfx.config.max_gprs = 256;
1747		adev->gfx.config.max_gs_threads = 32;
1748		adev->gfx.config.max_hw_contexts = 8;
1749
1750		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1751		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1752		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1753		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1754		gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
1755		break;
1756	case CHIP_TONGA:
1757		adev->gfx.config.max_shader_engines = 4;
1758		adev->gfx.config.max_tile_pipes = 8;
1759		adev->gfx.config.max_cu_per_sh = 8;
1760		adev->gfx.config.max_sh_per_se = 1;
1761		adev->gfx.config.max_backends_per_se = 2;
1762		adev->gfx.config.max_texture_channel_caches = 8;
1763		adev->gfx.config.max_gprs = 256;
1764		adev->gfx.config.max_gs_threads = 32;
1765		adev->gfx.config.max_hw_contexts = 8;
1766
1767		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1768		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1769		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1770		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1771		gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
1772		break;
1773	case CHIP_CARRIZO:
1774		adev->gfx.config.max_shader_engines = 1;
1775		adev->gfx.config.max_tile_pipes = 2;
1776		adev->gfx.config.max_sh_per_se = 1;
1777		adev->gfx.config.max_backends_per_se = 2;
1778		adev->gfx.config.max_cu_per_sh = 8;
1779		adev->gfx.config.max_texture_channel_caches = 2;
1780		adev->gfx.config.max_gprs = 256;
1781		adev->gfx.config.max_gs_threads = 32;
1782		adev->gfx.config.max_hw_contexts = 8;
1783
1784		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1785		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1786		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1787		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1788		gb_addr_config = CARRIZO_GB_ADDR_CONFIG_GOLDEN;
1789		break;
1790	case CHIP_STONEY:
1791		adev->gfx.config.max_shader_engines = 1;
1792		adev->gfx.config.max_tile_pipes = 2;
1793		adev->gfx.config.max_sh_per_se = 1;
1794		adev->gfx.config.max_backends_per_se = 1;
1795		adev->gfx.config.max_cu_per_sh = 3;
1796		adev->gfx.config.max_texture_channel_caches = 2;
1797		adev->gfx.config.max_gprs = 256;
1798		adev->gfx.config.max_gs_threads = 16;
1799		adev->gfx.config.max_hw_contexts = 8;
1800
1801		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1802		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1803		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1804		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1805		gb_addr_config = CARRIZO_GB_ADDR_CONFIG_GOLDEN;
1806		break;
1807	default:
1808		adev->gfx.config.max_shader_engines = 2;
1809		adev->gfx.config.max_tile_pipes = 4;
1810		adev->gfx.config.max_cu_per_sh = 2;
1811		adev->gfx.config.max_sh_per_se = 1;
1812		adev->gfx.config.max_backends_per_se = 2;
1813		adev->gfx.config.max_texture_channel_caches = 4;
1814		adev->gfx.config.max_gprs = 256;
1815		adev->gfx.config.max_gs_threads = 32;
1816		adev->gfx.config.max_hw_contexts = 8;
1817
1818		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1819		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1820		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1821		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1822		gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
1823		break;
1824	}
1825
1826	adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG);
1827	mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg;
1828
1829	adev->gfx.config.num_banks = REG_GET_FIELD(mc_arb_ramcfg,
1830				MC_ARB_RAMCFG, NOOFBANK);
1831	adev->gfx.config.num_ranks = REG_GET_FIELD(mc_arb_ramcfg,
1832				MC_ARB_RAMCFG, NOOFRANKS);
1833
1834	adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
1835	adev->gfx.config.mem_max_burst_length_bytes = 256;
1836	if (adev->flags & AMD_IS_APU) {
1837		/* Get memory bank mapping mode. */
1838		tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING);
1839		dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
1840		dimm01_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
1841
1842		tmp = RREG32(mmMC_FUS_DRAM1_BANK_ADDR_MAPPING);
1843		dimm10_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
1844		dimm11_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
1845
1846		/* Validate settings in case only one DIMM installed. */
1847		if ((dimm00_addr_map == 0) || (dimm00_addr_map == 3) || (dimm00_addr_map == 4) || (dimm00_addr_map > 12))
1848			dimm00_addr_map = 0;
1849		if ((dimm01_addr_map == 0) || (dimm01_addr_map == 3) || (dimm01_addr_map == 4) || (dimm01_addr_map > 12))
1850			dimm01_addr_map = 0;
1851		if ((dimm10_addr_map == 0) || (dimm10_addr_map == 3) || (dimm10_addr_map == 4) || (dimm10_addr_map > 12))
1852			dimm10_addr_map = 0;
1853		if ((dimm11_addr_map == 0) || (dimm11_addr_map == 3) || (dimm11_addr_map == 4) || (dimm11_addr_map > 12))
1854			dimm11_addr_map = 0;
1855
1856		/* If DIMM Addr map is 8GB, ROW size should be 2KB. Otherwise 1KB. */
1857		/* If ROW size(DIMM1) != ROW size(DMIMM0), ROW size should be larger one. */
1858		if ((dimm00_addr_map == 11) || (dimm01_addr_map == 11) || (dimm10_addr_map == 11) || (dimm11_addr_map == 11))
1859			adev->gfx.config.mem_row_size_in_kb = 2;
1860		else
1861			adev->gfx.config.mem_row_size_in_kb = 1;
1862	} else {
1863		tmp = REG_GET_FIELD(mc_arb_ramcfg, MC_ARB_RAMCFG, NOOFCOLS);
1864		adev->gfx.config.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
1865		if (adev->gfx.config.mem_row_size_in_kb > 4)
1866			adev->gfx.config.mem_row_size_in_kb = 4;
1867	}
1868
1869	adev->gfx.config.shader_engine_tile_size = 32;
1870	adev->gfx.config.num_gpus = 1;
1871	adev->gfx.config.multi_gpu_tile_size = 64;
1872
1873	/* fix up row size */
1874	switch (adev->gfx.config.mem_row_size_in_kb) {
1875	case 1:
1876	default:
1877		gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 0);
1878		break;
1879	case 2:
1880		gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 1);
1881		break;
1882	case 4:
1883		gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 2);
1884		break;
1885	}
1886	adev->gfx.config.gb_addr_config = gb_addr_config;
1887
1888	return 0;
1889}
1890
1891static int gfx_v8_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
1892					int mec, int pipe, int queue)
1893{
1894	int r;
1895	unsigned irq_type;
1896	struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
1897	unsigned int hw_prio;
1898
1899	ring = &adev->gfx.compute_ring[ring_id];
1900
1901	/* mec0 is me1 */
1902	ring->me = mec + 1;
1903	ring->pipe = pipe;
1904	ring->queue = queue;
1905
1906	ring->ring_obj = NULL;
1907	ring->use_doorbell = true;
1908	ring->doorbell_index = adev->doorbell_index.mec_ring0 + ring_id;
1909	ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
1910				+ (ring_id * GFX8_MEC_HPD_SIZE);
1911	sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
1912
1913	irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
1914		+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
1915		+ ring->pipe;
1916
1917	hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue) ?
1918			AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_RING_PRIO_DEFAULT;
1919	/* type-2 packets are deprecated on MEC, use type-3 instead */
1920	r = amdgpu_ring_init(adev, ring, 1024,
1921			     &adev->gfx.eop_irq, irq_type, hw_prio);
1922	if (r)
1923		return r;
1924
1925
1926	return 0;
1927}
1928
1929static void gfx_v8_0_sq_irq_work_func(struct work_struct *work);
1930
1931static int gfx_v8_0_sw_init(void *handle)
1932{
1933	int i, j, k, r, ring_id;
1934	struct amdgpu_ring *ring;
1935	struct amdgpu_kiq *kiq;
1936	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1937
1938	switch (adev->asic_type) {
1939	case CHIP_TONGA:
1940	case CHIP_CARRIZO:
1941	case CHIP_FIJI:
1942	case CHIP_POLARIS10:
1943	case CHIP_POLARIS11:
1944	case CHIP_POLARIS12:
1945	case CHIP_VEGAM:
1946		adev->gfx.mec.num_mec = 2;
1947		break;
1948	case CHIP_TOPAZ:
1949	case CHIP_STONEY:
1950	default:
1951		adev->gfx.mec.num_mec = 1;
1952		break;
1953	}
1954
1955	adev->gfx.mec.num_pipe_per_mec = 4;
1956	adev->gfx.mec.num_queue_per_pipe = 8;
1957
1958	/* EOP Event */
1959	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_END_OF_PIPE, &adev->gfx.eop_irq);
1960	if (r)
1961		return r;
1962
1963	/* Privileged reg */
1964	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_PRIV_REG_FAULT,
1965			      &adev->gfx.priv_reg_irq);
1966	if (r)
1967		return r;
1968
1969	/* Privileged inst */
1970	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_PRIV_INSTR_FAULT,
1971			      &adev->gfx.priv_inst_irq);
1972	if (r)
1973		return r;
1974
1975	/* Add CP EDC/ECC irq  */
1976	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_ECC_ERROR,
1977			      &adev->gfx.cp_ecc_error_irq);
1978	if (r)
1979		return r;
1980
1981	/* SQ interrupts. */
1982	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SQ_INTERRUPT_MSG,
1983			      &adev->gfx.sq_irq);
1984	if (r) {
1985		DRM_ERROR("amdgpu_irq_add() for SQ failed: %d\n", r);
1986		return r;
1987	}
1988
1989	INIT_WORK(&adev->gfx.sq_work.work, gfx_v8_0_sq_irq_work_func);
1990
1991	adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
1992
1993	gfx_v8_0_scratch_init(adev);
1994
1995	r = gfx_v8_0_init_microcode(adev);
1996	if (r) {
1997		DRM_ERROR("Failed to load gfx firmware!\n");
1998		return r;
1999	}
2000
2001	r = adev->gfx.rlc.funcs->init(adev);
2002	if (r) {
2003		DRM_ERROR("Failed to init rlc BOs!\n");
2004		return r;
2005	}
2006
2007	r = gfx_v8_0_mec_init(adev);
2008	if (r) {
2009		DRM_ERROR("Failed to init MEC BOs!\n");
2010		return r;
2011	}
2012
2013	/* set up the gfx ring */
2014	for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
2015		ring = &adev->gfx.gfx_ring[i];
2016		ring->ring_obj = NULL;
2017		sprintf(ring->name, "gfx");
2018		/* no gfx doorbells on iceland */
2019		if (adev->asic_type != CHIP_TOPAZ) {
2020			ring->use_doorbell = true;
2021			ring->doorbell_index = adev->doorbell_index.gfx_ring0;
2022		}
2023
2024		r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
2025				     AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
2026				     AMDGPU_RING_PRIO_DEFAULT);
2027		if (r)
2028			return r;
2029	}
2030
2031
2032	/* set up the compute queues - allocate horizontally across pipes */
2033	ring_id = 0;
2034	for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
2035		for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
2036			for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
2037				if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k, j))
2038					continue;
2039
2040				r = gfx_v8_0_compute_ring_init(adev,
2041								ring_id,
2042								i, k, j);
2043				if (r)
2044					return r;
2045
2046				ring_id++;
2047			}
2048		}
2049	}
2050
2051	r = amdgpu_gfx_kiq_init(adev, GFX8_MEC_HPD_SIZE);
2052	if (r) {
2053		DRM_ERROR("Failed to init KIQ BOs!\n");
2054		return r;
2055	}
2056
2057	kiq = &adev->gfx.kiq;
2058	r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq);
2059	if (r)
2060		return r;
2061
2062	/* create MQD for all compute queues as well as KIQ for SRIOV case */
2063	r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct vi_mqd_allocation));
2064	if (r)
2065		return r;
2066
2067	adev->gfx.ce_ram_size = 0x8000;
2068
2069	r = gfx_v8_0_gpu_early_init(adev);
2070	if (r)
2071		return r;
2072
2073	return 0;
2074}
2075
2076static int gfx_v8_0_sw_fini(void *handle)
2077{
2078	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2079	int i;
2080
2081	for (i = 0; i < adev->gfx.num_gfx_rings; i++)
2082		amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
2083	for (i = 0; i < adev->gfx.num_compute_rings; i++)
2084		amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
2085
2086	amdgpu_gfx_mqd_sw_fini(adev);
2087	amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring);
2088	amdgpu_gfx_kiq_fini(adev);
2089
2090	gfx_v8_0_mec_fini(adev);
2091	amdgpu_gfx_rlc_fini(adev);
2092	amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
2093				&adev->gfx.rlc.clear_state_gpu_addr,
2094				(void **)&adev->gfx.rlc.cs_ptr);
2095	if ((adev->asic_type == CHIP_CARRIZO) ||
2096	    (adev->asic_type == CHIP_STONEY)) {
2097		amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
2098				&adev->gfx.rlc.cp_table_gpu_addr,
2099				(void **)&adev->gfx.rlc.cp_table_ptr);
2100	}
2101	gfx_v8_0_free_microcode(adev);
2102
2103	return 0;
2104}
2105
2106static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev)
2107{
2108	uint32_t *modearray, *mod2array;
2109	const u32 num_tile_mode_states = ARRAY_SIZE(adev->gfx.config.tile_mode_array);
2110	const u32 num_secondary_tile_mode_states = ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
2111	u32 reg_offset;
2112
2113	modearray = adev->gfx.config.tile_mode_array;
2114	mod2array = adev->gfx.config.macrotile_mode_array;
2115
2116	for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
2117		modearray[reg_offset] = 0;
2118
2119	for (reg_offset = 0; reg_offset <  num_secondary_tile_mode_states; reg_offset++)
2120		mod2array[reg_offset] = 0;
2121
2122	switch (adev->asic_type) {
2123	case CHIP_TOPAZ:
2124		modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2125				PIPE_CONFIG(ADDR_SURF_P2) |
2126				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2127				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2128		modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2129				PIPE_CONFIG(ADDR_SURF_P2) |
2130				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2131				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2132		modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2133				PIPE_CONFIG(ADDR_SURF_P2) |
2134				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2135				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2136		modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2137				PIPE_CONFIG(ADDR_SURF_P2) |
2138				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2139				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2140		modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2141				PIPE_CONFIG(ADDR_SURF_P2) |
2142				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2143				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2144		modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2145				PIPE_CONFIG(ADDR_SURF_P2) |
2146				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2147				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2148		modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2149				PIPE_CONFIG(ADDR_SURF_P2) |
2150				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2151				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2152		modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2153				PIPE_CONFIG(ADDR_SURF_P2));
2154		modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2155				PIPE_CONFIG(ADDR_SURF_P2) |
2156				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2157				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2158		modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2159				 PIPE_CONFIG(ADDR_SURF_P2) |
2160				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2161				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2162		modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2163				 PIPE_CONFIG(ADDR_SURF_P2) |
2164				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2165				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2166		modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2167				 PIPE_CONFIG(ADDR_SURF_P2) |
2168				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2169				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2170		modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2171				 PIPE_CONFIG(ADDR_SURF_P2) |
2172				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2173				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2174		modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
2175				 PIPE_CONFIG(ADDR_SURF_P2) |
2176				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2177				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2178		modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2179				 PIPE_CONFIG(ADDR_SURF_P2) |
2180				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2181				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2182		modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2183				 PIPE_CONFIG(ADDR_SURF_P2) |
2184				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2185				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2186		modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2187				 PIPE_CONFIG(ADDR_SURF_P2) |
2188				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2189				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2190		modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2191				 PIPE_CONFIG(ADDR_SURF_P2) |
2192				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2193				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2194		modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
2195				 PIPE_CONFIG(ADDR_SURF_P2) |
2196				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2197				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2198		modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2199				 PIPE_CONFIG(ADDR_SURF_P2) |
2200				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2201				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2202		modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2203				 PIPE_CONFIG(ADDR_SURF_P2) |
2204				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2205				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2206		modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
2207				 PIPE_CONFIG(ADDR_SURF_P2) |
2208				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2209				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2210		modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
2211				 PIPE_CONFIG(ADDR_SURF_P2) |
2212				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2213				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2214		modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2215				 PIPE_CONFIG(ADDR_SURF_P2) |
2216				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2217				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2218		modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2219				 PIPE_CONFIG(ADDR_SURF_P2) |
2220				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2221				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2222		modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2223				 PIPE_CONFIG(ADDR_SURF_P2) |
2224				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2225				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2226
2227		mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
2228				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2229				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2230				NUM_BANKS(ADDR_SURF_8_BANK));
2231		mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
2232				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2233				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2234				NUM_BANKS(ADDR_SURF_8_BANK));
2235		mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2236				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2237				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2238				NUM_BANKS(ADDR_SURF_8_BANK));
2239		mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2240				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2241				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2242				NUM_BANKS(ADDR_SURF_8_BANK));
2243		mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2244				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2245				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2246				NUM_BANKS(ADDR_SURF_8_BANK));
2247		mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2248				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2249				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2250				NUM_BANKS(ADDR_SURF_8_BANK));
2251		mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2252				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2253				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2254				NUM_BANKS(ADDR_SURF_8_BANK));
2255		mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
2256				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
2257				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2258				NUM_BANKS(ADDR_SURF_16_BANK));
2259		mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
2260				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2261				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2262				NUM_BANKS(ADDR_SURF_16_BANK));
2263		mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2264				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2265				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2266				 NUM_BANKS(ADDR_SURF_16_BANK));
2267		mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2268				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2269				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2270				 NUM_BANKS(ADDR_SURF_16_BANK));
2271		mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2272				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2273				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2274				 NUM_BANKS(ADDR_SURF_16_BANK));
2275		mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2276				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2277				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2278				 NUM_BANKS(ADDR_SURF_16_BANK));
2279		mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2280				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2281				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2282				 NUM_BANKS(ADDR_SURF_8_BANK));
2283
2284		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
2285			if (reg_offset != 7 && reg_offset != 12 && reg_offset != 17 &&
2286			    reg_offset != 23)
2287				WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
2288
2289		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
2290			if (reg_offset != 7)
2291				WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
2292
2293		break;
2294	case CHIP_FIJI:
2295	case CHIP_VEGAM:
2296		modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2297				PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2298				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2299				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2300		modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2301				PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2302				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2303				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2304		modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2305				PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2306				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2307				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2308		modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2309				PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2310				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2311				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2312		modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2313				PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2314				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2315				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2316		modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2317				PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2318				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2319				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2320		modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2321				PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2322				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2323				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2324		modearray[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2325				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2326				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2327				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2328		modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2329				PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16));
2330		modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2331				PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2332				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2333				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2334		modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2335				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2336				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2337				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2338		modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2339				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2340				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2341				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2342		modearray[12] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2343				 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2344				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2345				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2346		modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2347				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2348				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2349				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2350		modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2351				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2352				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2353				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2354		modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
2355				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2356				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2357				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2358		modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2359				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2360				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2361				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2362		modearray[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2363				 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2364				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2365				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2366		modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2367				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2368				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2369				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2370		modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2371				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2372				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2373				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2374		modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2375				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2376				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2377				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2378		modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
2379				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2380				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2381				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2382		modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2383				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2384				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2385				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2386		modearray[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2387				 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2388				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2389				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2390		modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2391				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2392				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2393				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2394		modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
2395				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2396				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2397				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2398		modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
2399				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2400				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2401				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2402		modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2403				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2404				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2405				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2406		modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2407				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2408				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2409				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2410		modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2411				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2412				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2413				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2414		modearray[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2415				 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2416				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2417				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2418
2419		mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2420				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2421				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2422				NUM_BANKS(ADDR_SURF_8_BANK));
2423		mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2424				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2425				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2426				NUM_BANKS(ADDR_SURF_8_BANK));
2427		mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2428				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2429				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2430				NUM_BANKS(ADDR_SURF_8_BANK));
2431		mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2432				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2433				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2434				NUM_BANKS(ADDR_SURF_8_BANK));
2435		mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2436				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2437				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2438				NUM_BANKS(ADDR_SURF_8_BANK));
2439		mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2440				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2441				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2442				NUM_BANKS(ADDR_SURF_8_BANK));
2443		mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2444				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2445				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2446				NUM_BANKS(ADDR_SURF_8_BANK));
2447		mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2448				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
2449				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2450				NUM_BANKS(ADDR_SURF_8_BANK));
2451		mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2452				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2453				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2454				NUM_BANKS(ADDR_SURF_8_BANK));
2455		mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2456				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2457				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2458				 NUM_BANKS(ADDR_SURF_8_BANK));
2459		mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2460				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2461				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2462				 NUM_BANKS(ADDR_SURF_8_BANK));
2463		mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2464				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2465				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2466				 NUM_BANKS(ADDR_SURF_8_BANK));
2467		mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2468				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2469				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2470				 NUM_BANKS(ADDR_SURF_8_BANK));
2471		mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2472				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2473				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2474				 NUM_BANKS(ADDR_SURF_4_BANK));
2475
2476		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
2477			WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
2478
2479		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
2480			if (reg_offset != 7)
2481				WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
2482
2483		break;
2484	case CHIP_TONGA:
2485		modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2486				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2487				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2488				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2489		modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2490				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2491				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2492				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2493		modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2494				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2495				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2496				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2497		modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2498				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2499				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2500				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2501		modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2502				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2503				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2504				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2505		modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2506				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2507				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2508				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2509		modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2510				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2511				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2512				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2513		modearray[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2514				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2515				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2516				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2517		modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2518				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16));
2519		modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2520				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2521				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2522				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2523		modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2524				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2525				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2526				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2527		modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2528				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2529				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2530				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2531		modearray[12] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2532				 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2533				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2534				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2535		modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2536				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2537				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2538				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2539		modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2540				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2541				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2542				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2543		modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
2544				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2545				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2546				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2547		modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2548				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2549				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2550				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2551		modearray[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2552				 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2553				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2554				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2555		modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2556				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2557				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2558				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2559		modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2560				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2561				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2562				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2563		modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2564				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2565				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2566				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2567		modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
2568				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2569				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2570				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2571		modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2572				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2573				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2574				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2575		modearray[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2576				 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2577				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2578				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2579		modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2580				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2581				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2582				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2583		modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
2584				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2585				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2586				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2587		modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
2588				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2589				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2590				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2591		modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2592				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2593				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2594				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2595		modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2596				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2597				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2598				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2599		modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2600				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2601				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2602				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2603		modearray[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2604				 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2605				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2606				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2607
2608		mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2609				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2610				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2611				NUM_BANKS(ADDR_SURF_16_BANK));
2612		mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2613				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2614				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2615				NUM_BANKS(ADDR_SURF_16_BANK));
2616		mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2617				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2618				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2619				NUM_BANKS(ADDR_SURF_16_BANK));
2620		mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2621				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2622				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2623				NUM_BANKS(ADDR_SURF_16_BANK));
2624		mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2625				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2626				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2627				NUM_BANKS(ADDR_SURF_16_BANK));
2628		mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2629				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2630				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2631				NUM_BANKS(ADDR_SURF_16_BANK));
2632		mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2633				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2634				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2635				NUM_BANKS(ADDR_SURF_16_BANK));
2636		mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2637				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
2638				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2639				NUM_BANKS(ADDR_SURF_16_BANK));
2640		mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2641				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2642				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2643				NUM_BANKS(ADDR_SURF_16_BANK));
2644		mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2645				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2646				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2647				 NUM_BANKS(ADDR_SURF_16_BANK));
2648		mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2649				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2650				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2651				 NUM_BANKS(ADDR_SURF_16_BANK));
2652		mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2653				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2654				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2655				 NUM_BANKS(ADDR_SURF_8_BANK));
2656		mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2657				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2658				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2659				 NUM_BANKS(ADDR_SURF_4_BANK));
2660		mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2661				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2662				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2663				 NUM_BANKS(ADDR_SURF_4_BANK));
2664
2665		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
2666			WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
2667
2668		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
2669			if (reg_offset != 7)
2670				WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
2671
2672		break;
2673	case CHIP_POLARIS11:
2674	case CHIP_POLARIS12:
2675		modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2676				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2677				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2678				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2679		modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2680				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2681				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2682				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2683		modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2684				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2685				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2686				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2687		modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2688				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2689				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2690				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2691		modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2692				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2693				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2694				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2695		modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2696				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2697				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2698				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2699		modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2700				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2701				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2702				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2703		modearray[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2704				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2705				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2706				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2707		modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2708				PIPE_CONFIG(ADDR_SURF_P4_16x16));
2709		modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2710				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2711				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2712				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2713		modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2714				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2715				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2716				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2717		modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2718				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2719				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2720				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2721		modearray[12] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2722				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2723				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2724				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2725		modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2726				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2727				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2728				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2729		modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2730				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2731				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2732				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2733		modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
2734				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2735				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2736				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2737		modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2738				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2739				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2740				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2741		modearray[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2742				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2743				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2744				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2745		modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2746				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2747				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2748				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2749		modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2750				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2751				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2752				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2753		modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2754				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2755				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2756				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2757		modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
2758				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2759				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2760				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2761		modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2762				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2763				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2764				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2765		modearray[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2766				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2767				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2768				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2769		modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2770				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2771				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2772				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2773		modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
2774				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2775				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2776				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2777		modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
2778				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2779				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2780				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2781		modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2782				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2783				MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2784				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2785		modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2786				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2787				MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2788				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2789		modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2790				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2791				MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2792				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2793		modearray[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2794				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2795				MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2796				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2797
2798		mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2799				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2800				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2801				NUM_BANKS(ADDR_SURF_16_BANK));
2802
2803		mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2804				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2805				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2806				NUM_BANKS(ADDR_SURF_16_BANK));
2807
2808		mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2809				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2810				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2811				NUM_BANKS(ADDR_SURF_16_BANK));
2812
2813		mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2814				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2815				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2816				NUM_BANKS(ADDR_SURF_16_BANK));
2817
2818		mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2819				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2820				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2821				NUM_BANKS(ADDR_SURF_16_BANK));
2822
2823		mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2824				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2825				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2826				NUM_BANKS(ADDR_SURF_16_BANK));
2827
2828		mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2829				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2830				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2831				NUM_BANKS(ADDR_SURF_16_BANK));
2832
2833		mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2834				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
2835				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2836				NUM_BANKS(ADDR_SURF_16_BANK));
2837
2838		mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2839				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2840				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2841				NUM_BANKS(ADDR_SURF_16_BANK));
2842
2843		mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2844				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2845				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2846				NUM_BANKS(ADDR_SURF_16_BANK));
2847
2848		mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2849				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2850				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2851				NUM_BANKS(ADDR_SURF_16_BANK));
2852
2853		mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2854				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2855				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2856				NUM_BANKS(ADDR_SURF_16_BANK));
2857
2858		mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2859				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2860				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2861				NUM_BANKS(ADDR_SURF_8_BANK));
2862
2863		mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2864				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2865				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2866				NUM_BANKS(ADDR_SURF_4_BANK));
2867
2868		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
2869			WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
2870
2871		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
2872			if (reg_offset != 7)
2873				WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
2874
2875		break;
2876	case CHIP_POLARIS10:
2877		modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2878				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2879				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2880				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2881		modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2882				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2883				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2884				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2885		modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2886				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2887				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2888				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2889		modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2890				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2891				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2892				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2893		modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2894				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2895				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2896				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2897		modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2898				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2899				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2900				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2901		modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2902				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2903				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2904				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2905		modearray[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2906				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2907				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2908				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2909		modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2910				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16));
2911		modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2912				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2913				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2914				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2915		modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2916				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2917				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2918				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2919		modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2920				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2921				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2922				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2923		modearray[12] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2924				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2925				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2926				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2927		modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2928				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2929				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2930				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2931		modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2932				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2933				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2934				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2935		modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
2936				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2937				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2938				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2939		modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2940				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2941				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2942				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2943		modearray[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2944				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2945				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2946				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2947		modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2948				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2949				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2950				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2951		modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2952				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2953				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2954				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2955		modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2956				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2957				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2958				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2959		modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
2960				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2961				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2962				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2963		modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2964				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2965				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2966				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2967		modearray[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2968				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2969				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2970				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2971		modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2972				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2973				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2974				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2975		modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
2976				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2977				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2978				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2979		modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
2980				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2981				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2982				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2983		modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2984				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2985				MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2986				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2987		modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2988				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2989				MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2990				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2991		modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2992				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2993				MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2994				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2995		modearray[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2996				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2997				MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2998				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2999
3000		mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3001				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3002				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3003				NUM_BANKS(ADDR_SURF_16_BANK));
3004
3005		mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3006				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3007				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3008				NUM_BANKS(ADDR_SURF_16_BANK));
3009
3010		mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3011				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3012				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3013				NUM_BANKS(ADDR_SURF_16_BANK));
3014
3015		mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3016				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3017				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3018				NUM_BANKS(ADDR_SURF_16_BANK));
3019
3020		mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3021				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
3022				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3023				NUM_BANKS(ADDR_SURF_16_BANK));
3024
3025		mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3026				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3027				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
3028				NUM_BANKS(ADDR_SURF_16_BANK));
3029
3030		mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3031				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3032				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
3033				NUM_BANKS(ADDR_SURF_16_BANK));
3034
3035		mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3036				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
3037				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3038				NUM_BANKS(ADDR_SURF_16_BANK));
3039
3040		mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3041				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3042				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3043				NUM_BANKS(ADDR_SURF_16_BANK));
3044
3045		mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3046				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
3047				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3048				NUM_BANKS(ADDR_SURF_16_BANK));
3049
3050		mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3051				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3052				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3053				NUM_BANKS(ADDR_SURF_16_BANK));
3054
3055		mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3056				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3057				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
3058				NUM_BANKS(ADDR_SURF_8_BANK));
3059
3060		mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3061				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3062				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
3063				NUM_BANKS(ADDR_SURF_4_BANK));
3064
3065		mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3066				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3067				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
3068				NUM_BANKS(ADDR_SURF_4_BANK));
3069
3070		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
3071			WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
3072
3073		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
3074			if (reg_offset != 7)
3075				WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
3076
3077		break;
3078	case CHIP_STONEY:
3079		modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3080				PIPE_CONFIG(ADDR_SURF_P2) |
3081				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
3082				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3083		modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3084				PIPE_CONFIG(ADDR_SURF_P2) |
3085				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
3086				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3087		modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3088				PIPE_CONFIG(ADDR_SURF_P2) |
3089				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
3090				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3091		modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3092				PIPE_CONFIG(ADDR_SURF_P2) |
3093				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
3094				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3095		modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3096				PIPE_CONFIG(ADDR_SURF_P2) |
3097				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
3098				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3099		modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3100				PIPE_CONFIG(ADDR_SURF_P2) |
3101				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
3102				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3103		modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3104				PIPE_CONFIG(ADDR_SURF_P2) |
3105				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
3106				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3107		modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
3108				PIPE_CONFIG(ADDR_SURF_P2));
3109		modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3110				PIPE_CONFIG(ADDR_SURF_P2) |
3111				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
3112				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3113		modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3114				 PIPE_CONFIG(ADDR_SURF_P2) |
3115				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
3116				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3117		modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3118				 PIPE_CONFIG(ADDR_SURF_P2) |
3119				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
3120				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
3121		modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3122				 PIPE_CONFIG(ADDR_SURF_P2) |
3123				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3124				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3125		modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3126				 PIPE_CONFIG(ADDR_SURF_P2) |
3127				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3128				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3129		modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
3130				 PIPE_CONFIG(ADDR_SURF_P2) |
3131				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3132				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3133		modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3134				 PIPE_CONFIG(ADDR_SURF_P2) |
3135				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3136				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
3137		modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
3138				 PIPE_CONFIG(ADDR_SURF_P2) |
3139				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3140				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3141		modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
3142				 PIPE_CONFIG(ADDR_SURF_P2) |
3143				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3144				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3145		modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
3146				 PIPE_CONFIG(ADDR_SURF_P2) |
3147				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3148				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3149		modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
3150				 PIPE_CONFIG(ADDR_SURF_P2) |
3151				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3152				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3153		modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
3154				 PIPE_CONFIG(ADDR_SURF_P2) |
3155				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3156				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3157		modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
3158				 PIPE_CONFIG(ADDR_SURF_P2) |
3159				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3160				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3161		modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
3162				 PIPE_CONFIG(ADDR_SURF_P2) |
3163				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3164				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3165		modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
3166				 PIPE_CONFIG(ADDR_SURF_P2) |
3167				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3168				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3169		modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3170				 PIPE_CONFIG(ADDR_SURF_P2) |
3171				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
3172				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3173		modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3174				 PIPE_CONFIG(ADDR_SURF_P2) |
3175				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
3176				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3177		modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3178				 PIPE_CONFIG(ADDR_SURF_P2) |
3179				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
3180				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
3181
3182		mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3183				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3184				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3185				NUM_BANKS(ADDR_SURF_8_BANK));
3186		mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3187				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
3188				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3189				NUM_BANKS(ADDR_SURF_8_BANK));
3190		mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3191				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3192				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3193				NUM_BANKS(ADDR_SURF_8_BANK));
3194		mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3195				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3196				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3197				NUM_BANKS(ADDR_SURF_8_BANK));
3198		mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3199				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3200				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3201				NUM_BANKS(ADDR_SURF_8_BANK));
3202		mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3203				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3204				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3205				NUM_BANKS(ADDR_SURF_8_BANK));
3206		mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3207				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3208				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3209				NUM_BANKS(ADDR_SURF_8_BANK));
3210		mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
3211				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
3212				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3213				NUM_BANKS(ADDR_SURF_16_BANK));
3214		mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
3215				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3216				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3217				NUM_BANKS(ADDR_SURF_16_BANK));
3218		mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
3219				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3220				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3221				 NUM_BANKS(ADDR_SURF_16_BANK));
3222		mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
3223				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
3224				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3225				 NUM_BANKS(ADDR_SURF_16_BANK));
3226		mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3227				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
3228				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3229				 NUM_BANKS(ADDR_SURF_16_BANK));
3230		mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3231				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3232				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3233				 NUM_BANKS(ADDR_SURF_16_BANK));
3234		mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3235				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3236				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3237				 NUM_BANKS(ADDR_SURF_8_BANK));
3238
3239		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
3240			if (reg_offset != 7 && reg_offset != 12 && reg_offset != 17 &&
3241			    reg_offset != 23)
3242				WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
3243
3244		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
3245			if (reg_offset != 7)
3246				WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
3247
3248		break;
3249	default:
3250		dev_warn(adev->dev,
3251			 "Unknown chip type (%d) in function gfx_v8_0_tiling_mode_table_init() falling through to CHIP_CARRIZO\n",
3252			 adev->asic_type);
3253		fallthrough;
3254
3255	case CHIP_CARRIZO:
3256		modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3257				PIPE_CONFIG(ADDR_SURF_P2) |
3258				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
3259				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3260		modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3261				PIPE_CONFIG(ADDR_SURF_P2) |
3262				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
3263				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3264		modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3265				PIPE_CONFIG(ADDR_SURF_P2) |
3266				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
3267				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3268		modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3269				PIPE_CONFIG(ADDR_SURF_P2) |
3270				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
3271				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3272		modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3273				PIPE_CONFIG(ADDR_SURF_P2) |
3274				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
3275				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3276		modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3277				PIPE_CONFIG(ADDR_SURF_P2) |
3278				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
3279				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3280		modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3281				PIPE_CONFIG(ADDR_SURF_P2) |
3282				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
3283				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3284		modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
3285				PIPE_CONFIG(ADDR_SURF_P2));
3286		modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3287				PIPE_CONFIG(ADDR_SURF_P2) |
3288				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
3289				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3290		modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3291				 PIPE_CONFIG(ADDR_SURF_P2) |
3292				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
3293				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3294		modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3295				 PIPE_CONFIG(ADDR_SURF_P2) |
3296				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
3297				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
3298		modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3299				 PIPE_CONFIG(ADDR_SURF_P2) |
3300				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3301				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3302		modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3303				 PIPE_CONFIG(ADDR_SURF_P2) |
3304				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3305				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3306		modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
3307				 PIPE_CONFIG(ADDR_SURF_P2) |
3308				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3309				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3310		modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3311				 PIPE_CONFIG(ADDR_SURF_P2) |
3312				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3313				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
3314		modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
3315				 PIPE_CONFIG(ADDR_SURF_P2) |
3316				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3317				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3318		modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
3319				 PIPE_CONFIG(ADDR_SURF_P2) |
3320				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3321				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3322		modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
3323				 PIPE_CONFIG(ADDR_SURF_P2) |
3324				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3325				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3326		modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
3327				 PIPE_CONFIG(ADDR_SURF_P2) |
3328				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3329				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3330		modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
3331				 PIPE_CONFIG(ADDR_SURF_P2) |
3332				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3333				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3334		modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
3335				 PIPE_CONFIG(ADDR_SURF_P2) |
3336				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3337				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3338		modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
3339				 PIPE_CONFIG(ADDR_SURF_P2) |
3340				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3341				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3342		modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
3343				 PIPE_CONFIG(ADDR_SURF_P2) |
3344				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3345				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3346		modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3347				 PIPE_CONFIG(ADDR_SURF_P2) |
3348				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
3349				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3350		modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3351				 PIPE_CONFIG(ADDR_SURF_P2) |
3352				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
3353				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3354		modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3355				 PIPE_CONFIG(ADDR_SURF_P2) |
3356				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
3357				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
3358
3359		mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3360				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3361				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3362				NUM_BANKS(ADDR_SURF_8_BANK));
3363		mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3364				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
3365				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3366				NUM_BANKS(ADDR_SURF_8_BANK));
3367		mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3368				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3369				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3370				NUM_BANKS(ADDR_SURF_8_BANK));
3371		mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3372				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3373				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3374				NUM_BANKS(ADDR_SURF_8_BANK));
3375		mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3376				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3377				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3378				NUM_BANKS(ADDR_SURF_8_BANK));
3379		mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3380				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3381				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3382				NUM_BANKS(ADDR_SURF_8_BANK));
3383		mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3384				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3385				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3386				NUM_BANKS(ADDR_SURF_8_BANK));
3387		mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
3388				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
3389				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3390				NUM_BANKS(ADDR_SURF_16_BANK));
3391		mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
3392				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3393				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3394				NUM_BANKS(ADDR_SURF_16_BANK));
3395		mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
3396				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3397				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3398				 NUM_BANKS(ADDR_SURF_16_BANK));
3399		mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
3400				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
3401				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3402				 NUM_BANKS(ADDR_SURF_16_BANK));
3403		mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3404				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
3405				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3406				 NUM_BANKS(ADDR_SURF_16_BANK));
3407		mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3408				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3409				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3410				 NUM_BANKS(ADDR_SURF_16_BANK));
3411		mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3412				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3413				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3414				 NUM_BANKS(ADDR_SURF_8_BANK));
3415
3416		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
3417			if (reg_offset != 7 && reg_offset != 12 && reg_offset != 17 &&
3418			    reg_offset != 23)
3419				WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
3420
3421		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
3422			if (reg_offset != 7)
3423				WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
3424
3425		break;
3426	}
3427}
3428
3429static void gfx_v8_0_select_se_sh(struct amdgpu_device *adev,
3430				  u32 se_num, u32 sh_num, u32 instance)
3431{
3432	u32 data;
3433
3434	if (instance == 0xffffffff)
3435		data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
3436	else
3437		data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
3438
3439	if (se_num == 0xffffffff)
3440		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
3441	else
3442		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
3443
3444	if (sh_num == 0xffffffff)
3445		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
3446	else
3447		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
3448
3449	WREG32(mmGRBM_GFX_INDEX, data);
3450}
3451
3452static void gfx_v8_0_select_me_pipe_q(struct amdgpu_device *adev,
3453				  u32 me, u32 pipe, u32 q, u32 vm)
3454{
3455	vi_srbm_select(adev, me, pipe, q, vm);
3456}
3457
3458static u32 gfx_v8_0_get_rb_active_bitmap(struct amdgpu_device *adev)
3459{
3460	u32 data, mask;
3461
3462	data =  RREG32(mmCC_RB_BACKEND_DISABLE) |
3463		RREG32(mmGC_USER_RB_BACKEND_DISABLE);
3464
3465	data = REG_GET_FIELD(data, GC_USER_RB_BACKEND_DISABLE, BACKEND_DISABLE);
3466
3467	mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se /
3468					 adev->gfx.config.max_sh_per_se);
3469
3470	return (~data) & mask;
3471}
3472
3473static void
3474gfx_v8_0_raster_config(struct amdgpu_device *adev, u32 *rconf, u32 *rconf1)
3475{
3476	switch (adev->asic_type) {
3477	case CHIP_FIJI:
3478	case CHIP_VEGAM:
3479		*rconf |= RB_MAP_PKR0(2) | RB_MAP_PKR1(2) |
3480			  RB_XSEL2(1) | PKR_MAP(2) |
3481			  PKR_XSEL(1) | PKR_YSEL(1) |
3482			  SE_MAP(2) | SE_XSEL(2) | SE_YSEL(3);
3483		*rconf1 |= SE_PAIR_MAP(2) | SE_PAIR_XSEL(3) |
3484			   SE_PAIR_YSEL(2);
3485		break;
3486	case CHIP_TONGA:
3487	case CHIP_POLARIS10:
3488		*rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) |
3489			  SE_XSEL(1) | SE_YSEL(1);
3490		*rconf1 |= SE_PAIR_MAP(2) | SE_PAIR_XSEL(2) |
3491			   SE_PAIR_YSEL(2);
3492		break;
3493	case CHIP_TOPAZ:
3494	case CHIP_CARRIZO:
3495		*rconf |= RB_MAP_PKR0(2);
3496		*rconf1 |= 0x0;
3497		break;
3498	case CHIP_POLARIS11:
3499	case CHIP_POLARIS12:
3500		*rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) |
3501			  SE_XSEL(1) | SE_YSEL(1);
3502		*rconf1 |= 0x0;
3503		break;
3504	case CHIP_STONEY:
3505		*rconf |= 0x0;
3506		*rconf1 |= 0x0;
3507		break;
3508	default:
3509		DRM_ERROR("unknown asic: 0x%x\n", adev->asic_type);
3510		break;
3511	}
3512}
3513
3514static void
3515gfx_v8_0_write_harvested_raster_configs(struct amdgpu_device *adev,
3516					u32 raster_config, u32 raster_config_1,
3517					unsigned rb_mask, unsigned num_rb)
3518{
3519	unsigned sh_per_se = max_t(unsigned, adev->gfx.config.max_sh_per_se, 1);
3520	unsigned num_se = max_t(unsigned, adev->gfx.config.max_shader_engines, 1);
3521	unsigned rb_per_pkr = min_t(unsigned, num_rb / num_se / sh_per_se, 2);
3522	unsigned rb_per_se = num_rb / num_se;
3523	unsigned se_mask[4];
3524	unsigned se;
3525
3526	se_mask[0] = ((1 << rb_per_se) - 1) & rb_mask;
3527	se_mask[1] = (se_mask[0] << rb_per_se) & rb_mask;
3528	se_mask[2] = (se_mask[1] << rb_per_se) & rb_mask;
3529	se_mask[3] = (se_mask[2] << rb_per_se) & rb_mask;
3530
3531	WARN_ON(!(num_se == 1 || num_se == 2 || num_se == 4));
3532	WARN_ON(!(sh_per_se == 1 || sh_per_se == 2));
3533	WARN_ON(!(rb_per_pkr == 1 || rb_per_pkr == 2));
3534
3535	if ((num_se > 2) && ((!se_mask[0] && !se_mask[1]) ||
3536			     (!se_mask[2] && !se_mask[3]))) {
3537		raster_config_1 &= ~SE_PAIR_MAP_MASK;
3538
3539		if (!se_mask[0] && !se_mask[1]) {
3540			raster_config_1 |=
3541				SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_3);
3542		} else {
3543			raster_config_1 |=
3544				SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_0);
3545		}
3546	}
3547
3548	for (se = 0; se < num_se; se++) {
3549		unsigned raster_config_se = raster_config;
3550		unsigned pkr0_mask = ((1 << rb_per_pkr) - 1) << (se * rb_per_se);
3551		unsigned pkr1_mask = pkr0_mask << rb_per_pkr;
3552		int idx = (se / 2) * 2;
3553
3554		if ((num_se > 1) && (!se_mask[idx] || !se_mask[idx + 1])) {
3555			raster_config_se &= ~SE_MAP_MASK;
3556
3557			if (!se_mask[idx]) {
3558				raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_3);
3559			} else {
3560				raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_0);
3561			}
3562		}
3563
3564		pkr0_mask &= rb_mask;
3565		pkr1_mask &= rb_mask;
3566		if (rb_per_se > 2 && (!pkr0_mask || !pkr1_mask)) {
3567			raster_config_se &= ~PKR_MAP_MASK;
3568
3569			if (!pkr0_mask) {
3570				raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_3);
3571			} else {
3572				raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_0);
3573			}
3574		}
3575
3576		if (rb_per_se >= 2) {
3577			unsigned rb0_mask = 1 << (se * rb_per_se);
3578			unsigned rb1_mask = rb0_mask << 1;
3579
3580			rb0_mask &= rb_mask;
3581			rb1_mask &= rb_mask;
3582			if (!rb0_mask || !rb1_mask) {
3583				raster_config_se &= ~RB_MAP_PKR0_MASK;
3584
3585				if (!rb0_mask) {
3586					raster_config_se |=
3587						RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_3);
3588				} else {
3589					raster_config_se |=
3590						RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_0);
3591				}
3592			}
3593
3594			if (rb_per_se > 2) {
3595				rb0_mask = 1 << (se * rb_per_se + rb_per_pkr);
3596				rb1_mask = rb0_mask << 1;
3597				rb0_mask &= rb_mask;
3598				rb1_mask &= rb_mask;
3599				if (!rb0_mask || !rb1_mask) {
3600					raster_config_se &= ~RB_MAP_PKR1_MASK;
3601
3602					if (!rb0_mask) {
3603						raster_config_se |=
3604							RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_3);
3605					} else {
3606						raster_config_se |=
3607							RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_0);
3608					}
3609				}
3610			}
3611		}
3612
3613		/* GRBM_GFX_INDEX has a different offset on VI */
3614		gfx_v8_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff);
3615		WREG32(mmPA_SC_RASTER_CONFIG, raster_config_se);
3616		WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
3617	}
3618
3619	/* GRBM_GFX_INDEX has a different offset on VI */
3620	gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3621}
3622
3623static void gfx_v8_0_setup_rb(struct amdgpu_device *adev)
3624{
3625	int i, j;
3626	u32 data;
3627	u32 raster_config = 0, raster_config_1 = 0;
3628	u32 active_rbs = 0;
3629	u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
3630					adev->gfx.config.max_sh_per_se;
3631	unsigned num_rb_pipes;
3632
3633	mutex_lock(&adev->grbm_idx_mutex);
3634	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
3635		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
3636			gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff);
3637			data = gfx_v8_0_get_rb_active_bitmap(adev);
3638			active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
3639					       rb_bitmap_width_per_sh);
3640		}
3641	}
3642	gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3643
3644	adev->gfx.config.backend_enable_mask = active_rbs;
3645	adev->gfx.config.num_rbs = hweight32(active_rbs);
3646
3647	num_rb_pipes = min_t(unsigned, adev->gfx.config.max_backends_per_se *
3648			     adev->gfx.config.max_shader_engines, 16);
3649
3650	gfx_v8_0_raster_config(adev, &raster_config, &raster_config_1);
3651
3652	if (!adev->gfx.config.backend_enable_mask ||
3653			adev->gfx.config.num_rbs >= num_rb_pipes) {
3654		WREG32(mmPA_SC_RASTER_CONFIG, raster_config);
3655		WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
3656	} else {
3657		gfx_v8_0_write_harvested_raster_configs(adev, raster_config, raster_config_1,
3658							adev->gfx.config.backend_enable_mask,
3659							num_rb_pipes);
3660	}
3661
3662	/* cache the values for userspace */
3663	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
3664		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
3665			gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff);
3666			adev->gfx.config.rb_config[i][j].rb_backend_disable =
3667				RREG32(mmCC_RB_BACKEND_DISABLE);
3668			adev->gfx.config.rb_config[i][j].user_rb_backend_disable =
3669				RREG32(mmGC_USER_RB_BACKEND_DISABLE);
3670			adev->gfx.config.rb_config[i][j].raster_config =
3671				RREG32(mmPA_SC_RASTER_CONFIG);
3672			adev->gfx.config.rb_config[i][j].raster_config_1 =
3673				RREG32(mmPA_SC_RASTER_CONFIG_1);
3674		}
3675	}
3676	gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3677	mutex_unlock(&adev->grbm_idx_mutex);
3678}
3679
 
3680/**
3681 * gfx_v8_0_init_compute_vmid - gart enable
3682 *
3683 * @adev: amdgpu_device pointer
3684 *
3685 * Initialize compute vmid sh_mem registers
3686 *
3687 */
3688#define DEFAULT_SH_MEM_BASES	(0x6000)
3689static void gfx_v8_0_init_compute_vmid(struct amdgpu_device *adev)
3690{
3691	int i;
3692	uint32_t sh_mem_config;
3693	uint32_t sh_mem_bases;
3694
3695	/*
3696	 * Configure apertures:
3697	 * LDS:         0x60000000'00000000 - 0x60000001'00000000 (4GB)
3698	 * Scratch:     0x60000001'00000000 - 0x60000002'00000000 (4GB)
3699	 * GPUVM:       0x60010000'00000000 - 0x60020000'00000000 (1TB)
3700	 */
3701	sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
3702
3703	sh_mem_config = SH_MEM_ADDRESS_MODE_HSA64 <<
3704			SH_MEM_CONFIG__ADDRESS_MODE__SHIFT |
3705			SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
3706			SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT |
3707			MTYPE_CC << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT |
3708			SH_MEM_CONFIG__PRIVATE_ATC_MASK;
3709
3710	mutex_lock(&adev->srbm_mutex);
3711	for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
3712		vi_srbm_select(adev, 0, 0, 0, i);
3713		/* CP and shaders */
3714		WREG32(mmSH_MEM_CONFIG, sh_mem_config);
3715		WREG32(mmSH_MEM_APE1_BASE, 1);
3716		WREG32(mmSH_MEM_APE1_LIMIT, 0);
3717		WREG32(mmSH_MEM_BASES, sh_mem_bases);
3718	}
3719	vi_srbm_select(adev, 0, 0, 0, 0);
3720	mutex_unlock(&adev->srbm_mutex);
3721
3722	/* Initialize all compute VMIDs to have no GDS, GWS, or OA
3723	   acccess. These should be enabled by FW for target VMIDs. */
3724	for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
3725		WREG32(amdgpu_gds_reg_offset[i].mem_base, 0);
3726		WREG32(amdgpu_gds_reg_offset[i].mem_size, 0);
3727		WREG32(amdgpu_gds_reg_offset[i].gws, 0);
3728		WREG32(amdgpu_gds_reg_offset[i].oa, 0);
3729	}
3730}
3731
3732static void gfx_v8_0_init_gds_vmid(struct amdgpu_device *adev)
3733{
3734	int vmid;
3735
3736	/*
3737	 * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA
3738	 * access. Compute VMIDs should be enabled by FW for target VMIDs,
3739	 * the driver can enable them for graphics. VMID0 should maintain
3740	 * access so that HWS firmware can save/restore entries.
3741	 */
3742	for (vmid = 1; vmid < 16; vmid++) {
3743		WREG32(amdgpu_gds_reg_offset[vmid].mem_base, 0);
3744		WREG32(amdgpu_gds_reg_offset[vmid].mem_size, 0);
3745		WREG32(amdgpu_gds_reg_offset[vmid].gws, 0);
3746		WREG32(amdgpu_gds_reg_offset[vmid].oa, 0);
3747	}
3748}
3749
3750static void gfx_v8_0_config_init(struct amdgpu_device *adev)
3751{
3752	switch (adev->asic_type) {
3753	default:
3754		adev->gfx.config.double_offchip_lds_buf = 1;
3755		break;
3756	case CHIP_CARRIZO:
3757	case CHIP_STONEY:
3758		adev->gfx.config.double_offchip_lds_buf = 0;
3759		break;
3760	}
3761}
3762
3763static void gfx_v8_0_constants_init(struct amdgpu_device *adev)
3764{
3765	u32 tmp, sh_static_mem_cfg;
3766	int i;
3767
3768	WREG32_FIELD(GRBM_CNTL, READ_TIMEOUT, 0xFF);
3769	WREG32(mmGB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
3770	WREG32(mmHDP_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
3771	WREG32(mmDMIF_ADDR_CALC, adev->gfx.config.gb_addr_config);
3772
3773	gfx_v8_0_tiling_mode_table_init(adev);
3774	gfx_v8_0_setup_rb(adev);
3775	gfx_v8_0_get_cu_info(adev);
3776	gfx_v8_0_config_init(adev);
3777
3778	/* XXX SH_MEM regs */
3779	/* where to put LDS, scratch, GPUVM in FSA64 space */
3780	sh_static_mem_cfg = REG_SET_FIELD(0, SH_STATIC_MEM_CONFIG,
3781				   SWIZZLE_ENABLE, 1);
3782	sh_static_mem_cfg = REG_SET_FIELD(sh_static_mem_cfg, SH_STATIC_MEM_CONFIG,
3783				   ELEMENT_SIZE, 1);
3784	sh_static_mem_cfg = REG_SET_FIELD(sh_static_mem_cfg, SH_STATIC_MEM_CONFIG,
3785				   INDEX_STRIDE, 3);
3786	WREG32(mmSH_STATIC_MEM_CONFIG, sh_static_mem_cfg);
3787
3788	mutex_lock(&adev->srbm_mutex);
3789	for (i = 0; i < adev->vm_manager.id_mgr[0].num_ids; i++) {
3790		vi_srbm_select(adev, 0, 0, 0, i);
3791		/* CP and shaders */
3792		if (i == 0) {
3793			tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, DEFAULT_MTYPE, MTYPE_UC);
3794			tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, APE1_MTYPE, MTYPE_UC);
3795			tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE,
3796					    SH_MEM_ALIGNMENT_MODE_UNALIGNED);
3797			WREG32(mmSH_MEM_CONFIG, tmp);
3798			WREG32(mmSH_MEM_BASES, 0);
3799		} else {
3800			tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, DEFAULT_MTYPE, MTYPE_NC);
3801			tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, APE1_MTYPE, MTYPE_UC);
3802			tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE,
3803					    SH_MEM_ALIGNMENT_MODE_UNALIGNED);
3804			WREG32(mmSH_MEM_CONFIG, tmp);
3805			tmp = adev->gmc.shared_aperture_start >> 48;
3806			WREG32(mmSH_MEM_BASES, tmp);
3807		}
3808
3809		WREG32(mmSH_MEM_APE1_BASE, 1);
3810		WREG32(mmSH_MEM_APE1_LIMIT, 0);
3811	}
3812	vi_srbm_select(adev, 0, 0, 0, 0);
3813	mutex_unlock(&adev->srbm_mutex);
3814
3815	gfx_v8_0_init_compute_vmid(adev);
3816	gfx_v8_0_init_gds_vmid(adev);
3817
3818	mutex_lock(&adev->grbm_idx_mutex);
3819	/*
3820	 * making sure that the following register writes will be broadcasted
3821	 * to all the shaders
3822	 */
3823	gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3824
3825	WREG32(mmPA_SC_FIFO_SIZE,
3826		   (adev->gfx.config.sc_prim_fifo_size_frontend <<
3827			PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) |
3828		   (adev->gfx.config.sc_prim_fifo_size_backend <<
3829			PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT) |
3830		   (adev->gfx.config.sc_hiz_tile_fifo_size <<
3831			PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) |
3832		   (adev->gfx.config.sc_earlyz_tile_fifo_size <<
3833			PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT));
3834
3835	tmp = RREG32(mmSPI_ARB_PRIORITY);
3836	tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS0, 2);
3837	tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS1, 2);
3838	tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS2, 2);
3839	tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS3, 2);
3840	WREG32(mmSPI_ARB_PRIORITY, tmp);
3841
3842	mutex_unlock(&adev->grbm_idx_mutex);
3843
3844}
3845
3846static void gfx_v8_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
3847{
3848	u32 i, j, k;
3849	u32 mask;
3850
3851	mutex_lock(&adev->grbm_idx_mutex);
3852	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
3853		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
3854			gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff);
3855			for (k = 0; k < adev->usec_timeout; k++) {
3856				if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY) == 0)
3857					break;
3858				udelay(1);
3859			}
3860			if (k == adev->usec_timeout) {
3861				gfx_v8_0_select_se_sh(adev, 0xffffffff,
3862						      0xffffffff, 0xffffffff);
3863				mutex_unlock(&adev->grbm_idx_mutex);
3864				DRM_INFO("Timeout wait for RLC serdes %u,%u\n",
3865					 i, j);
3866				return;
3867			}
3868		}
3869	}
3870	gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3871	mutex_unlock(&adev->grbm_idx_mutex);
3872
3873	mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
3874		RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
3875		RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
3876		RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
3877	for (k = 0; k < adev->usec_timeout; k++) {
3878		if ((RREG32(mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
3879			break;
3880		udelay(1);
3881	}
3882}
3883
3884static void gfx_v8_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
3885					       bool enable)
3886{
3887	u32 tmp = RREG32(mmCP_INT_CNTL_RING0);
3888
3889	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
3890	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
3891	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
3892	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0);
3893
3894	WREG32(mmCP_INT_CNTL_RING0, tmp);
3895}
3896
3897static void gfx_v8_0_init_csb(struct amdgpu_device *adev)
3898{
3899	adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
3900	/* csib */
3901	WREG32(mmRLC_CSIB_ADDR_HI,
3902			adev->gfx.rlc.clear_state_gpu_addr >> 32);
3903	WREG32(mmRLC_CSIB_ADDR_LO,
3904			adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
3905	WREG32(mmRLC_CSIB_LENGTH,
3906			adev->gfx.rlc.clear_state_size);
3907}
3908
3909static void gfx_v8_0_parse_ind_reg_list(int *register_list_format,
3910				int ind_offset,
3911				int list_size,
3912				int *unique_indices,
3913				int *indices_count,
3914				int max_indices,
3915				int *ind_start_offsets,
3916				int *offset_count,
3917				int max_offset)
3918{
3919	int indices;
3920	bool new_entry = true;
3921
3922	for (; ind_offset < list_size; ind_offset++) {
3923
3924		if (new_entry) {
3925			new_entry = false;
3926			ind_start_offsets[*offset_count] = ind_offset;
3927			*offset_count = *offset_count + 1;
3928			BUG_ON(*offset_count >= max_offset);
3929		}
3930
3931		if (register_list_format[ind_offset] == 0xFFFFFFFF) {
3932			new_entry = true;
3933			continue;
3934		}
3935
3936		ind_offset += 2;
3937
3938		/* look for the matching indice */
3939		for (indices = 0;
3940			indices < *indices_count;
3941			indices++) {
3942			if (unique_indices[indices] ==
3943				register_list_format[ind_offset])
3944				break;
3945		}
3946
3947		if (indices >= *indices_count) {
3948			unique_indices[*indices_count] =
3949				register_list_format[ind_offset];
3950			indices = *indices_count;
3951			*indices_count = *indices_count + 1;
3952			BUG_ON(*indices_count >= max_indices);
3953		}
3954
3955		register_list_format[ind_offset] = indices;
3956	}
3957}
3958
3959static int gfx_v8_0_init_save_restore_list(struct amdgpu_device *adev)
3960{
3961	int i, temp, data;
3962	int unique_indices[] = {0, 0, 0, 0, 0, 0, 0, 0};
3963	int indices_count = 0;
3964	int indirect_start_offsets[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
3965	int offset_count = 0;
3966
3967	int list_size;
3968	unsigned int *register_list_format =
3969		kmemdup(adev->gfx.rlc.register_list_format,
3970			adev->gfx.rlc.reg_list_format_size_bytes, GFP_KERNEL);
3971	if (!register_list_format)
3972		return -ENOMEM;
3973
3974	gfx_v8_0_parse_ind_reg_list(register_list_format,
3975				RLC_FormatDirectRegListLength,
3976				adev->gfx.rlc.reg_list_format_size_bytes >> 2,
3977				unique_indices,
3978				&indices_count,
3979				ARRAY_SIZE(unique_indices),
3980				indirect_start_offsets,
3981				&offset_count,
3982				ARRAY_SIZE(indirect_start_offsets));
3983
3984	/* save and restore list */
3985	WREG32_FIELD(RLC_SRM_CNTL, AUTO_INCR_ADDR, 1);
3986
3987	WREG32(mmRLC_SRM_ARAM_ADDR, 0);
3988	for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++)
3989		WREG32(mmRLC_SRM_ARAM_DATA, adev->gfx.rlc.register_restore[i]);
3990
3991	/* indirect list */
3992	WREG32(mmRLC_GPM_SCRATCH_ADDR, adev->gfx.rlc.reg_list_format_start);
3993	for (i = 0; i < adev->gfx.rlc.reg_list_format_size_bytes >> 2; i++)
3994		WREG32(mmRLC_GPM_SCRATCH_DATA, register_list_format[i]);
3995
3996	list_size = adev->gfx.rlc.reg_list_size_bytes >> 2;
3997	list_size = list_size >> 1;
3998	WREG32(mmRLC_GPM_SCRATCH_ADDR, adev->gfx.rlc.reg_restore_list_size);
3999	WREG32(mmRLC_GPM_SCRATCH_DATA, list_size);
4000
4001	/* starting offsets starts */
4002	WREG32(mmRLC_GPM_SCRATCH_ADDR,
4003		adev->gfx.rlc.starting_offsets_start);
4004	for (i = 0; i < ARRAY_SIZE(indirect_start_offsets); i++)
4005		WREG32(mmRLC_GPM_SCRATCH_DATA,
4006				indirect_start_offsets[i]);
4007
4008	/* unique indices */
4009	temp = mmRLC_SRM_INDEX_CNTL_ADDR_0;
4010	data = mmRLC_SRM_INDEX_CNTL_DATA_0;
4011	for (i = 0; i < ARRAY_SIZE(unique_indices); i++) {
4012		if (unique_indices[i] != 0) {
4013			WREG32(temp + i, unique_indices[i] & 0x3FFFF);
4014			WREG32(data + i, unique_indices[i] >> 20);
4015		}
4016	}
4017	kfree(register_list_format);
4018
4019	return 0;
4020}
4021
4022static void gfx_v8_0_enable_save_restore_machine(struct amdgpu_device *adev)
4023{
4024	WREG32_FIELD(RLC_SRM_CNTL, SRM_ENABLE, 1);
4025}
4026
4027static void gfx_v8_0_init_power_gating(struct amdgpu_device *adev)
4028{
4029	uint32_t data;
4030
4031	WREG32_FIELD(CP_RB_WPTR_POLL_CNTL, IDLE_POLL_COUNT, 0x60);
4032
4033	data = REG_SET_FIELD(0, RLC_PG_DELAY, POWER_UP_DELAY, 0x10);
4034	data = REG_SET_FIELD(data, RLC_PG_DELAY, POWER_DOWN_DELAY, 0x10);
4035	data = REG_SET_FIELD(data, RLC_PG_DELAY, CMD_PROPAGATE_DELAY, 0x10);
4036	data = REG_SET_FIELD(data, RLC_PG_DELAY, MEM_SLEEP_DELAY, 0x10);
4037	WREG32(mmRLC_PG_DELAY, data);
4038
4039	WREG32_FIELD(RLC_PG_DELAY_2, SERDES_CMD_DELAY, 0x3);
4040	WREG32_FIELD(RLC_AUTO_PG_CTRL, GRBM_REG_SAVE_GFX_IDLE_THRESHOLD, 0x55f0);
4041
4042}
4043
4044static void cz_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev,
4045						bool enable)
4046{
4047	WREG32_FIELD(RLC_PG_CNTL, SMU_CLK_SLOWDOWN_ON_PU_ENABLE, enable ? 1 : 0);
4048}
4049
4050static void cz_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev,
4051						  bool enable)
4052{
4053	WREG32_FIELD(RLC_PG_CNTL, SMU_CLK_SLOWDOWN_ON_PD_ENABLE, enable ? 1 : 0);
4054}
4055
4056static void cz_enable_cp_power_gating(struct amdgpu_device *adev, bool enable)
4057{
4058	WREG32_FIELD(RLC_PG_CNTL, CP_PG_DISABLE, enable ? 0 : 1);
4059}
4060
4061static void gfx_v8_0_init_pg(struct amdgpu_device *adev)
4062{
4063	if ((adev->asic_type == CHIP_CARRIZO) ||
4064	    (adev->asic_type == CHIP_STONEY)) {
4065		gfx_v8_0_init_csb(adev);
4066		gfx_v8_0_init_save_restore_list(adev);
4067		gfx_v8_0_enable_save_restore_machine(adev);
4068		WREG32(mmRLC_JUMP_TABLE_RESTORE, adev->gfx.rlc.cp_table_gpu_addr >> 8);
4069		gfx_v8_0_init_power_gating(adev);
4070		WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask);
4071	} else if ((adev->asic_type == CHIP_POLARIS11) ||
4072		   (adev->asic_type == CHIP_POLARIS12) ||
4073		   (adev->asic_type == CHIP_VEGAM)) {
4074		gfx_v8_0_init_csb(adev);
4075		gfx_v8_0_init_save_restore_list(adev);
4076		gfx_v8_0_enable_save_restore_machine(adev);
4077		gfx_v8_0_init_power_gating(adev);
4078	}
4079
4080}
4081
4082static void gfx_v8_0_rlc_stop(struct amdgpu_device *adev)
4083{
4084	WREG32_FIELD(RLC_CNTL, RLC_ENABLE_F32, 0);
4085
4086	gfx_v8_0_enable_gui_idle_interrupt(adev, false);
4087	gfx_v8_0_wait_for_rlc_serdes(adev);
4088}
4089
4090static void gfx_v8_0_rlc_reset(struct amdgpu_device *adev)
4091{
4092	WREG32_FIELD(GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
4093	udelay(50);
4094
4095	WREG32_FIELD(GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
4096	udelay(50);
4097}
4098
4099static void gfx_v8_0_rlc_start(struct amdgpu_device *adev)
4100{
4101	WREG32_FIELD(RLC_CNTL, RLC_ENABLE_F32, 1);
4102
4103	/* carrizo do enable cp interrupt after cp inited */
4104	if (!(adev->flags & AMD_IS_APU))
4105		gfx_v8_0_enable_gui_idle_interrupt(adev, true);
4106
4107	udelay(50);
4108}
4109
4110static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
4111{
4112	if (amdgpu_sriov_vf(adev)) {
4113		gfx_v8_0_init_csb(adev);
4114		return 0;
4115	}
4116
4117	adev->gfx.rlc.funcs->stop(adev);
4118	adev->gfx.rlc.funcs->reset(adev);
4119	gfx_v8_0_init_pg(adev);
4120	adev->gfx.rlc.funcs->start(adev);
4121
4122	return 0;
4123}
4124
4125static void gfx_v8_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
4126{
4127	u32 tmp = RREG32(mmCP_ME_CNTL);
4128
4129	if (enable) {
4130		tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 0);
4131		tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 0);
4132		tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 0);
4133	} else {
4134		tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 1);
4135		tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1);
4136		tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1);
4137	}
4138	WREG32(mmCP_ME_CNTL, tmp);
4139	udelay(50);
4140}
4141
4142static u32 gfx_v8_0_get_csb_size(struct amdgpu_device *adev)
4143{
4144	u32 count = 0;
4145	const struct cs_section_def *sect = NULL;
4146	const struct cs_extent_def *ext = NULL;
4147
4148	/* begin clear state */
4149	count += 2;
4150	/* context control state */
4151	count += 3;
4152
4153	for (sect = vi_cs_data; sect->section != NULL; ++sect) {
4154		for (ext = sect->section; ext->extent != NULL; ++ext) {
4155			if (sect->id == SECT_CONTEXT)
4156				count += 2 + ext->reg_count;
4157			else
4158				return 0;
4159		}
4160	}
4161	/* pa_sc_raster_config/pa_sc_raster_config1 */
4162	count += 4;
4163	/* end clear state */
4164	count += 2;
4165	/* clear state */
4166	count += 2;
4167
4168	return count;
4169}
4170
4171static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev)
4172{
4173	struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
4174	const struct cs_section_def *sect = NULL;
4175	const struct cs_extent_def *ext = NULL;
4176	int r, i;
4177
4178	/* init the CP */
4179	WREG32(mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1);
4180	WREG32(mmCP_ENDIAN_SWAP, 0);
4181	WREG32(mmCP_DEVICE_ID, 1);
4182
4183	gfx_v8_0_cp_gfx_enable(adev, true);
4184
4185	r = amdgpu_ring_alloc(ring, gfx_v8_0_get_csb_size(adev) + 4);
4186	if (r) {
4187		DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
4188		return r;
4189	}
4190
4191	/* clear state buffer */
4192	amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
4193	amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
4194
4195	amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
4196	amdgpu_ring_write(ring, 0x80000000);
4197	amdgpu_ring_write(ring, 0x80000000);
4198
4199	for (sect = vi_cs_data; sect->section != NULL; ++sect) {
4200		for (ext = sect->section; ext->extent != NULL; ++ext) {
4201			if (sect->id == SECT_CONTEXT) {
4202				amdgpu_ring_write(ring,
4203				       PACKET3(PACKET3_SET_CONTEXT_REG,
4204					       ext->reg_count));
4205				amdgpu_ring_write(ring,
4206				       ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
4207				for (i = 0; i < ext->reg_count; i++)
4208					amdgpu_ring_write(ring, ext->extent[i]);
4209			}
4210		}
4211	}
4212
4213	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
4214	amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
4215	amdgpu_ring_write(ring, adev->gfx.config.rb_config[0][0].raster_config);
4216	amdgpu_ring_write(ring, adev->gfx.config.rb_config[0][0].raster_config_1);
4217
4218	amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
4219	amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
4220
4221	amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
4222	amdgpu_ring_write(ring, 0);
4223
4224	/* init the CE partitions */
4225	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
4226	amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
4227	amdgpu_ring_write(ring, 0x8000);
4228	amdgpu_ring_write(ring, 0x8000);
4229
4230	amdgpu_ring_commit(ring);
4231
4232	return 0;
4233}
4234static void gfx_v8_0_set_cpg_door_bell(struct amdgpu_device *adev, struct amdgpu_ring *ring)
4235{
4236	u32 tmp;
4237	/* no gfx doorbells on iceland */
4238	if (adev->asic_type == CHIP_TOPAZ)
4239		return;
4240
4241	tmp = RREG32(mmCP_RB_DOORBELL_CONTROL);
4242
4243	if (ring->use_doorbell) {
4244		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
4245				DOORBELL_OFFSET, ring->doorbell_index);
4246		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
4247						DOORBELL_HIT, 0);
4248		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
4249					    DOORBELL_EN, 1);
4250	} else {
4251		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, DOORBELL_EN, 0);
4252	}
4253
4254	WREG32(mmCP_RB_DOORBELL_CONTROL, tmp);
4255
4256	if (adev->flags & AMD_IS_APU)
4257		return;
4258
4259	tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
4260					DOORBELL_RANGE_LOWER,
4261					adev->doorbell_index.gfx_ring0);
4262	WREG32(mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
4263
4264	WREG32(mmCP_RB_DOORBELL_RANGE_UPPER,
4265		CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK);
4266}
4267
4268static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
4269{
4270	struct amdgpu_ring *ring;
4271	u32 tmp;
4272	u32 rb_bufsz;
4273	u64 rb_addr, rptr_addr, wptr_gpu_addr;
4274
4275	/* Set the write pointer delay */
4276	WREG32(mmCP_RB_WPTR_DELAY, 0);
4277
4278	/* set the RB to use vmid 0 */
4279	WREG32(mmCP_RB_VMID, 0);
4280
4281	/* Set ring buffer size */
4282	ring = &adev->gfx.gfx_ring[0];
4283	rb_bufsz = order_base_2(ring->ring_size / 8);
4284	tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz);
4285	tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2);
4286	tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, MTYPE, 3);
4287	tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, MIN_IB_AVAILSZ, 1);
4288#ifdef __BIG_ENDIAN
4289	tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, BUF_SWAP, 1);
4290#endif
4291	WREG32(mmCP_RB0_CNTL, tmp);
4292
4293	/* Initialize the ring buffer's read and write pointers */
4294	WREG32(mmCP_RB0_CNTL, tmp | CP_RB0_CNTL__RB_RPTR_WR_ENA_MASK);
4295	ring->wptr = 0;
4296	WREG32(mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
4297
4298	/* set the wb address wether it's enabled or not */
4299	rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
4300	WREG32(mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
4301	WREG32(mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
4302
4303	wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
4304	WREG32(mmCP_RB_WPTR_POLL_ADDR_LO, lower_32_bits(wptr_gpu_addr));
4305	WREG32(mmCP_RB_WPTR_POLL_ADDR_HI, upper_32_bits(wptr_gpu_addr));
4306	mdelay(1);
4307	WREG32(mmCP_RB0_CNTL, tmp);
4308
4309	rb_addr = ring->gpu_addr >> 8;
4310	WREG32(mmCP_RB0_BASE, rb_addr);
4311	WREG32(mmCP_RB0_BASE_HI, upper_32_bits(rb_addr));
4312
4313	gfx_v8_0_set_cpg_door_bell(adev, ring);
4314	/* start the ring */
4315	amdgpu_ring_clear_ring(ring);
4316	gfx_v8_0_cp_gfx_start(adev);
4317	ring->sched.ready = true;
4318
4319	return 0;
4320}
4321
4322static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
4323{
4324	if (enable) {
4325		WREG32(mmCP_MEC_CNTL, 0);
4326	} else {
4327		WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
4328		adev->gfx.kiq.ring.sched.ready = false;
4329	}
4330	udelay(50);
4331}
4332
4333/* KIQ functions */
4334static void gfx_v8_0_kiq_setting(struct amdgpu_ring *ring)
4335{
4336	uint32_t tmp;
4337	struct amdgpu_device *adev = ring->adev;
4338
4339	/* tell RLC which is KIQ queue */
4340	tmp = RREG32(mmRLC_CP_SCHEDULERS);
4341	tmp &= 0xffffff00;
4342	tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
4343	WREG32(mmRLC_CP_SCHEDULERS, tmp);
4344	tmp |= 0x80;
4345	WREG32(mmRLC_CP_SCHEDULERS, tmp);
4346}
4347
4348static int gfx_v8_0_kiq_kcq_enable(struct amdgpu_device *adev)
4349{
4350	struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
4351	uint64_t queue_mask = 0;
4352	int r, i;
4353
4354	for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
4355		if (!test_bit(i, adev->gfx.mec.queue_bitmap))
4356			continue;
4357
4358		/* This situation may be hit in the future if a new HW
4359		 * generation exposes more than 64 queues. If so, the
4360		 * definition of queue_mask needs updating */
4361		if (WARN_ON(i >= (sizeof(queue_mask)*8))) {
4362			DRM_ERROR("Invalid KCQ enabled: %d\n", i);
4363			break;
4364		}
4365
4366		queue_mask |= (1ull << i);
4367	}
4368
4369	r = amdgpu_ring_alloc(kiq_ring, (8 * adev->gfx.num_compute_rings) + 8);
4370	if (r) {
4371		DRM_ERROR("Failed to lock KIQ (%d).\n", r);
4372		return r;
4373	}
4374	/* set resources */
4375	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
4376	amdgpu_ring_write(kiq_ring, 0);	/* vmid_mask:0 queue_type:0 (KIQ) */
4377	amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask));	/* queue mask lo */
4378	amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask));	/* queue mask hi */
4379	amdgpu_ring_write(kiq_ring, 0);	/* gws mask lo */
4380	amdgpu_ring_write(kiq_ring, 0);	/* gws mask hi */
4381	amdgpu_ring_write(kiq_ring, 0);	/* oac mask */
4382	amdgpu_ring_write(kiq_ring, 0);	/* gds heap base:0, gds heap size:0 */
4383	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4384		struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
4385		uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
4386		uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
4387
4388		/* map queues */
4389		amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
4390		/* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
4391		amdgpu_ring_write(kiq_ring,
4392				  PACKET3_MAP_QUEUES_NUM_QUEUES(1));
4393		amdgpu_ring_write(kiq_ring,
4394				  PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index) |
4395				  PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
4396				  PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
4397				  PACKET3_MAP_QUEUES_ME(ring->me == 1 ? 0 : 1)); /* doorbell */
4398		amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
4399		amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
4400		amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
4401		amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
4402	}
4403
4404	amdgpu_ring_commit(kiq_ring);
4405
4406	return 0;
4407}
4408
4409static int gfx_v8_0_deactivate_hqd(struct amdgpu_device *adev, u32 req)
4410{
4411	int i, r = 0;
4412
4413	if (RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK) {
4414		WREG32_FIELD(CP_HQD_DEQUEUE_REQUEST, DEQUEUE_REQ, req);
4415		for (i = 0; i < adev->usec_timeout; i++) {
4416			if (!(RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK))
4417				break;
4418			udelay(1);
4419		}
4420		if (i == adev->usec_timeout)
4421			r = -ETIMEDOUT;
4422	}
4423	WREG32(mmCP_HQD_DEQUEUE_REQUEST, 0);
4424	WREG32(mmCP_HQD_PQ_RPTR, 0);
4425	WREG32(mmCP_HQD_PQ_WPTR, 0);
4426
4427	return r;
4428}
4429
4430static void gfx_v8_0_mqd_set_priority(struct amdgpu_ring *ring, struct vi_mqd *mqd)
4431{
4432	struct amdgpu_device *adev = ring->adev;
4433
4434	if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
4435		if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue)) {
4436			mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
4437			mqd->cp_hqd_queue_priority =
4438				AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
4439		}
4440	}
4441}
4442
4443static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring)
4444{
4445	struct amdgpu_device *adev = ring->adev;
4446	struct vi_mqd *mqd = ring->mqd_ptr;
4447	uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
4448	uint32_t tmp;
4449
4450	mqd->header = 0xC0310800;
4451	mqd->compute_pipelinestat_enable = 0x00000001;
4452	mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
4453	mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
4454	mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
4455	mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
4456	mqd->compute_misc_reserved = 0x00000003;
4457	mqd->dynamic_cu_mask_addr_lo = lower_32_bits(ring->mqd_gpu_addr
4458						     + offsetof(struct vi_mqd_allocation, dynamic_cu_mask));
4459	mqd->dynamic_cu_mask_addr_hi = upper_32_bits(ring->mqd_gpu_addr
4460						     + offsetof(struct vi_mqd_allocation, dynamic_cu_mask));
4461	eop_base_addr = ring->eop_gpu_addr >> 8;
4462	mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
4463	mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
4464
4465	/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
4466	tmp = RREG32(mmCP_HQD_EOP_CONTROL);
4467	tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
4468			(order_base_2(GFX8_MEC_HPD_SIZE / 4) - 1));
4469
4470	mqd->cp_hqd_eop_control = tmp;
4471
4472	/* enable doorbell? */
4473	tmp = REG_SET_FIELD(RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL),
4474			    CP_HQD_PQ_DOORBELL_CONTROL,
4475			    DOORBELL_EN,
4476			    ring->use_doorbell ? 1 : 0);
4477
4478	mqd->cp_hqd_pq_doorbell_control = tmp;
4479
4480	/* set the pointer to the MQD */
4481	mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
4482	mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
4483
4484	/* set MQD vmid to 0 */
4485	tmp = RREG32(mmCP_MQD_CONTROL);
4486	tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
4487	mqd->cp_mqd_control = tmp;
4488
4489	/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
4490	hqd_gpu_addr = ring->gpu_addr >> 8;
4491	mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
4492	mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
4493
4494	/* set up the HQD, this is similar to CP_RB0_CNTL */
4495	tmp = RREG32(mmCP_HQD_PQ_CONTROL);
4496	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
4497			    (order_base_2(ring->ring_size / 4) - 1));
4498	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
4499			((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
4500#ifdef __BIG_ENDIAN
4501	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
4502#endif
4503	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
4504	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0);
4505	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
4506	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
4507	mqd->cp_hqd_pq_control = tmp;
4508
4509	/* set the wb address whether it's enabled or not */
4510	wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
4511	mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
4512	mqd->cp_hqd_pq_rptr_report_addr_hi =
4513		upper_32_bits(wb_gpu_addr) & 0xffff;
4514
4515	/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
4516	wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
4517	mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
4518	mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
4519
4520	tmp = 0;
4521	/* enable the doorbell if requested */
4522	if (ring->use_doorbell) {
4523		tmp = RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
4524		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4525				DOORBELL_OFFSET, ring->doorbell_index);
4526
4527		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4528					 DOORBELL_EN, 1);
4529		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4530					 DOORBELL_SOURCE, 0);
4531		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4532					 DOORBELL_HIT, 0);
4533	}
4534
4535	mqd->cp_hqd_pq_doorbell_control = tmp;
4536
4537	/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
4538	ring->wptr = 0;
4539	mqd->cp_hqd_pq_wptr = ring->wptr;
4540	mqd->cp_hqd_pq_rptr = RREG32(mmCP_HQD_PQ_RPTR);
4541
4542	/* set the vmid for the queue */
4543	mqd->cp_hqd_vmid = 0;
4544
4545	tmp = RREG32(mmCP_HQD_PERSISTENT_STATE);
4546	tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
4547	mqd->cp_hqd_persistent_state = tmp;
4548
4549	/* set MTYPE */
4550	tmp = RREG32(mmCP_HQD_IB_CONTROL);
4551	tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
4552	tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MTYPE, 3);
4553	mqd->cp_hqd_ib_control = tmp;
4554
4555	tmp = RREG32(mmCP_HQD_IQ_TIMER);
4556	tmp = REG_SET_FIELD(tmp, CP_HQD_IQ_TIMER, MTYPE, 3);
4557	mqd->cp_hqd_iq_timer = tmp;
4558
4559	tmp = RREG32(mmCP_HQD_CTX_SAVE_CONTROL);
4560	tmp = REG_SET_FIELD(tmp, CP_HQD_CTX_SAVE_CONTROL, MTYPE, 3);
4561	mqd->cp_hqd_ctx_save_control = tmp;
4562
4563	/* defaults */
4564	mqd->cp_hqd_eop_rptr = RREG32(mmCP_HQD_EOP_RPTR);
4565	mqd->cp_hqd_eop_wptr = RREG32(mmCP_HQD_EOP_WPTR);
4566	mqd->cp_hqd_ctx_save_base_addr_lo = RREG32(mmCP_HQD_CTX_SAVE_BASE_ADDR_LO);
4567	mqd->cp_hqd_ctx_save_base_addr_hi = RREG32(mmCP_HQD_CTX_SAVE_BASE_ADDR_HI);
4568	mqd->cp_hqd_cntl_stack_offset = RREG32(mmCP_HQD_CNTL_STACK_OFFSET);
4569	mqd->cp_hqd_cntl_stack_size = RREG32(mmCP_HQD_CNTL_STACK_SIZE);
4570	mqd->cp_hqd_wg_state_offset = RREG32(mmCP_HQD_WG_STATE_OFFSET);
4571	mqd->cp_hqd_ctx_save_size = RREG32(mmCP_HQD_CTX_SAVE_SIZE);
4572	mqd->cp_hqd_eop_done_events = RREG32(mmCP_HQD_EOP_EVENTS);
4573	mqd->cp_hqd_error = RREG32(mmCP_HQD_ERROR);
4574	mqd->cp_hqd_eop_wptr_mem = RREG32(mmCP_HQD_EOP_WPTR_MEM);
4575	mqd->cp_hqd_eop_dones = RREG32(mmCP_HQD_EOP_DONES);
4576
4577	/* set static priority for a queue/ring */
4578	gfx_v8_0_mqd_set_priority(ring, mqd);
4579	mqd->cp_hqd_quantum = RREG32(mmCP_HQD_QUANTUM);
4580
4581	/* map_queues packet doesn't need activate the queue,
4582	 * so only kiq need set this field.
4583	 */
4584	if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
4585		mqd->cp_hqd_active = 1;
4586
4587	return 0;
4588}
4589
4590static int gfx_v8_0_mqd_commit(struct amdgpu_device *adev,
4591			struct vi_mqd *mqd)
4592{
4593	uint32_t mqd_reg;
4594	uint32_t *mqd_data;
4595
4596	/* HQD registers extend from mmCP_MQD_BASE_ADDR to mmCP_HQD_ERROR */
4597	mqd_data = &mqd->cp_mqd_base_addr_lo;
4598
4599	/* disable wptr polling */
4600	WREG32_FIELD(CP_PQ_WPTR_POLL_CNTL, EN, 0);
4601
4602	/* program all HQD registers */
4603	for (mqd_reg = mmCP_HQD_VMID; mqd_reg <= mmCP_HQD_EOP_CONTROL; mqd_reg++)
4604		WREG32(mqd_reg, mqd_data[mqd_reg - mmCP_MQD_BASE_ADDR]);
4605
4606	/* Tonga errata: EOP RPTR/WPTR should be left unmodified.
4607	 * This is safe since EOP RPTR==WPTR for any inactive HQD
4608	 * on ASICs that do not support context-save.
4609	 * EOP writes/reads can start anywhere in the ring.
4610	 */
4611	if (adev->asic_type != CHIP_TONGA) {
4612		WREG32(mmCP_HQD_EOP_RPTR, mqd->cp_hqd_eop_rptr);
4613		WREG32(mmCP_HQD_EOP_WPTR, mqd->cp_hqd_eop_wptr);
4614		WREG32(mmCP_HQD_EOP_WPTR_MEM, mqd->cp_hqd_eop_wptr_mem);
4615	}
4616
4617	for (mqd_reg = mmCP_HQD_EOP_EVENTS; mqd_reg <= mmCP_HQD_ERROR; mqd_reg++)
4618		WREG32(mqd_reg, mqd_data[mqd_reg - mmCP_MQD_BASE_ADDR]);
4619
4620	/* activate the HQD */
4621	for (mqd_reg = mmCP_MQD_BASE_ADDR; mqd_reg <= mmCP_HQD_ACTIVE; mqd_reg++)
4622		WREG32(mqd_reg, mqd_data[mqd_reg - mmCP_MQD_BASE_ADDR]);
4623
4624	return 0;
4625}
4626
4627static int gfx_v8_0_kiq_init_queue(struct amdgpu_ring *ring)
4628{
4629	struct amdgpu_device *adev = ring->adev;
4630	struct vi_mqd *mqd = ring->mqd_ptr;
4631	int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS;
4632
4633	gfx_v8_0_kiq_setting(ring);
4634
4635	if (adev->in_gpu_reset) { /* for GPU_RESET case */
4636		/* reset MQD to a clean status */
4637		if (adev->gfx.mec.mqd_backup[mqd_idx])
4638			memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation));
4639
4640		/* reset ring buffer */
4641		ring->wptr = 0;
4642		amdgpu_ring_clear_ring(ring);
4643		mutex_lock(&adev->srbm_mutex);
4644		vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
4645		gfx_v8_0_mqd_commit(adev, mqd);
4646		vi_srbm_select(adev, 0, 0, 0, 0);
4647		mutex_unlock(&adev->srbm_mutex);
4648	} else {
4649		memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation));
4650		((struct vi_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
4651		((struct vi_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
4652		mutex_lock(&adev->srbm_mutex);
4653		vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
4654		gfx_v8_0_mqd_init(ring);
4655		gfx_v8_0_mqd_commit(adev, mqd);
4656		vi_srbm_select(adev, 0, 0, 0, 0);
4657		mutex_unlock(&adev->srbm_mutex);
4658
4659		if (adev->gfx.mec.mqd_backup[mqd_idx])
4660			memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct vi_mqd_allocation));
4661	}
4662
4663	return 0;
4664}
4665
4666static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
4667{
4668	struct amdgpu_device *adev = ring->adev;
4669	struct vi_mqd *mqd = ring->mqd_ptr;
4670	int mqd_idx = ring - &adev->gfx.compute_ring[0];
4671
4672	if (!adev->in_gpu_reset && !adev->in_suspend) {
4673		memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation));
4674		((struct vi_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
4675		((struct vi_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
4676		mutex_lock(&adev->srbm_mutex);
4677		vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
4678		gfx_v8_0_mqd_init(ring);
4679		vi_srbm_select(adev, 0, 0, 0, 0);
4680		mutex_unlock(&adev->srbm_mutex);
4681
4682		if (adev->gfx.mec.mqd_backup[mqd_idx])
4683			memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct vi_mqd_allocation));
4684	} else if (adev->in_gpu_reset) { /* for GPU_RESET case */
4685		/* reset MQD to a clean status */
4686		if (adev->gfx.mec.mqd_backup[mqd_idx])
4687			memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation));
4688		/* reset ring buffer */
4689		ring->wptr = 0;
4690		amdgpu_ring_clear_ring(ring);
4691	} else {
4692		amdgpu_ring_clear_ring(ring);
4693	}
4694	return 0;
4695}
4696
4697static void gfx_v8_0_set_mec_doorbell_range(struct amdgpu_device *adev)
4698{
4699	if (adev->asic_type > CHIP_TONGA) {
4700		WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER, adev->doorbell_index.kiq << 2);
4701		WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER, adev->doorbell_index.mec_ring7 << 2);
4702	}
4703	/* enable doorbells */
4704	WREG32_FIELD(CP_PQ_STATUS, DOORBELL_ENABLE, 1);
4705}
4706
4707static int gfx_v8_0_kiq_resume(struct amdgpu_device *adev)
4708{
4709	struct amdgpu_ring *ring;
4710	int r;
4711
4712	ring = &adev->gfx.kiq.ring;
4713
4714	r = amdgpu_bo_reserve(ring->mqd_obj, false);
4715	if (unlikely(r != 0))
4716		return r;
4717
4718	r = amdgpu_bo_kmap(ring->mqd_obj, &ring->mqd_ptr);
4719	if (unlikely(r != 0))
4720		return r;
4721
4722	gfx_v8_0_kiq_init_queue(ring);
4723	amdgpu_bo_kunmap(ring->mqd_obj);
4724	ring->mqd_ptr = NULL;
4725	amdgpu_bo_unreserve(ring->mqd_obj);
4726	ring->sched.ready = true;
4727	return 0;
4728}
4729
4730static int gfx_v8_0_kcq_resume(struct amdgpu_device *adev)
4731{
4732	struct amdgpu_ring *ring = NULL;
4733	int r = 0, i;
4734
4735	gfx_v8_0_cp_compute_enable(adev, true);
4736
4737	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4738		ring = &adev->gfx.compute_ring[i];
4739
4740		r = amdgpu_bo_reserve(ring->mqd_obj, false);
4741		if (unlikely(r != 0))
4742			goto done;
4743		r = amdgpu_bo_kmap(ring->mqd_obj, &ring->mqd_ptr);
4744		if (!r) {
4745			r = gfx_v8_0_kcq_init_queue(ring);
4746			amdgpu_bo_kunmap(ring->mqd_obj);
4747			ring->mqd_ptr = NULL;
4748		}
4749		amdgpu_bo_unreserve(ring->mqd_obj);
4750		if (r)
4751			goto done;
4752	}
4753
4754	gfx_v8_0_set_mec_doorbell_range(adev);
4755
4756	r = gfx_v8_0_kiq_kcq_enable(adev);
4757	if (r)
4758		goto done;
4759
4760done:
4761	return r;
4762}
4763
4764static int gfx_v8_0_cp_test_all_rings(struct amdgpu_device *adev)
4765{
4766	int r, i;
4767	struct amdgpu_ring *ring;
4768
4769	/* collect all the ring_tests here, gfx, kiq, compute */
4770	ring = &adev->gfx.gfx_ring[0];
4771	r = amdgpu_ring_test_helper(ring);
4772	if (r)
4773		return r;
4774
4775	ring = &adev->gfx.kiq.ring;
4776	r = amdgpu_ring_test_helper(ring);
4777	if (r)
4778		return r;
4779
4780	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4781		ring = &adev->gfx.compute_ring[i];
4782		amdgpu_ring_test_helper(ring);
4783	}
4784
4785	return 0;
4786}
4787
4788static int gfx_v8_0_cp_resume(struct amdgpu_device *adev)
4789{
4790	int r;
4791
4792	if (!(adev->flags & AMD_IS_APU))
4793		gfx_v8_0_enable_gui_idle_interrupt(adev, false);
4794
4795	r = gfx_v8_0_kiq_resume(adev);
4796	if (r)
4797		return r;
4798
4799	r = gfx_v8_0_cp_gfx_resume(adev);
4800	if (r)
4801		return r;
4802
4803	r = gfx_v8_0_kcq_resume(adev);
4804	if (r)
4805		return r;
4806
4807	r = gfx_v8_0_cp_test_all_rings(adev);
4808	if (r)
4809		return r;
4810
4811	gfx_v8_0_enable_gui_idle_interrupt(adev, true);
4812
4813	return 0;
4814}
4815
4816static void gfx_v8_0_cp_enable(struct amdgpu_device *adev, bool enable)
4817{
4818	gfx_v8_0_cp_gfx_enable(adev, enable);
4819	gfx_v8_0_cp_compute_enable(adev, enable);
4820}
4821
4822static int gfx_v8_0_hw_init(void *handle)
4823{
4824	int r;
4825	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4826
4827	gfx_v8_0_init_golden_registers(adev);
4828	gfx_v8_0_constants_init(adev);
4829
4830	r = adev->gfx.rlc.funcs->resume(adev);
4831	if (r)
4832		return r;
4833
4834	r = gfx_v8_0_cp_resume(adev);
4835
4836	return r;
4837}
4838
4839static int gfx_v8_0_kcq_disable(struct amdgpu_device *adev)
4840{
4841	int r, i;
4842	struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
4843
4844	r = amdgpu_ring_alloc(kiq_ring, 6 * adev->gfx.num_compute_rings);
4845	if (r)
4846		DRM_ERROR("Failed to lock KIQ (%d).\n", r);
4847
4848	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4849		struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
4850
4851		amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
4852		amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
4853						PACKET3_UNMAP_QUEUES_ACTION(1) | /* RESET_QUEUES */
4854						PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
4855						PACKET3_UNMAP_QUEUES_ENGINE_SEL(0) |
4856						PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
4857		amdgpu_ring_write(kiq_ring, PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
4858		amdgpu_ring_write(kiq_ring, 0);
4859		amdgpu_ring_write(kiq_ring, 0);
4860		amdgpu_ring_write(kiq_ring, 0);
4861	}
4862	r = amdgpu_ring_test_helper(kiq_ring);
4863	if (r)
4864		DRM_ERROR("KCQ disable failed\n");
4865
4866	return r;
4867}
4868
4869static bool gfx_v8_0_is_idle(void *handle)
4870{
4871	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4872
4873	if (REG_GET_FIELD(RREG32(mmGRBM_STATUS), GRBM_STATUS, GUI_ACTIVE)
4874		|| RREG32(mmGRBM_STATUS2) != 0x8)
4875		return false;
4876	else
4877		return true;
4878}
4879
4880static bool gfx_v8_0_rlc_is_idle(void *handle)
4881{
4882	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4883
4884	if (RREG32(mmGRBM_STATUS2) != 0x8)
4885		return false;
4886	else
4887		return true;
4888}
4889
4890static int gfx_v8_0_wait_for_rlc_idle(void *handle)
4891{
4892	unsigned int i;
4893	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4894
4895	for (i = 0; i < adev->usec_timeout; i++) {
4896		if (gfx_v8_0_rlc_is_idle(handle))
4897			return 0;
4898
4899		udelay(1);
4900	}
4901	return -ETIMEDOUT;
4902}
4903
4904static int gfx_v8_0_wait_for_idle(void *handle)
4905{
4906	unsigned int i;
4907	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4908
4909	for (i = 0; i < adev->usec_timeout; i++) {
4910		if (gfx_v8_0_is_idle(handle))
4911			return 0;
4912
4913		udelay(1);
4914	}
4915	return -ETIMEDOUT;
4916}
4917
4918static int gfx_v8_0_hw_fini(void *handle)
4919{
4920	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4921
4922	amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
4923	amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
4924
4925	amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
4926
4927	amdgpu_irq_put(adev, &adev->gfx.sq_irq, 0);
4928
4929	/* disable KCQ to avoid CPC touch memory not valid anymore */
4930	gfx_v8_0_kcq_disable(adev);
4931
4932	if (amdgpu_sriov_vf(adev)) {
4933		pr_debug("For SRIOV client, shouldn't do anything.\n");
4934		return 0;
4935	}
4936	amdgpu_gfx_rlc_enter_safe_mode(adev);
4937	if (!gfx_v8_0_wait_for_idle(adev))
4938		gfx_v8_0_cp_enable(adev, false);
4939	else
4940		pr_err("cp is busy, skip halt cp\n");
4941	if (!gfx_v8_0_wait_for_rlc_idle(adev))
4942		adev->gfx.rlc.funcs->stop(adev);
4943	else
4944		pr_err("rlc is busy, skip halt rlc\n");
4945	amdgpu_gfx_rlc_exit_safe_mode(adev);
4946
4947	return 0;
4948}
4949
4950static int gfx_v8_0_suspend(void *handle)
4951{
4952	return gfx_v8_0_hw_fini(handle);
4953}
4954
4955static int gfx_v8_0_resume(void *handle)
4956{
4957	return gfx_v8_0_hw_init(handle);
4958}
4959
4960static bool gfx_v8_0_check_soft_reset(void *handle)
4961{
4962	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4963	u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
4964	u32 tmp;
4965
4966	/* GRBM_STATUS */
4967	tmp = RREG32(mmGRBM_STATUS);
4968	if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
4969		   GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
4970		   GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
4971		   GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
4972		   GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
4973		   GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK |
4974		   GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
4975		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4976						GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
4977		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4978						GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
4979		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
4980						SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1);
4981	}
4982
4983	/* GRBM_STATUS2 */
4984	tmp = RREG32(mmGRBM_STATUS2);
4985	if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
4986		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4987						GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
4988
4989	if (REG_GET_FIELD(tmp, GRBM_STATUS2, CPF_BUSY) ||
4990	    REG_GET_FIELD(tmp, GRBM_STATUS2, CPC_BUSY) ||
4991	    REG_GET_FIELD(tmp, GRBM_STATUS2, CPG_BUSY)) {
4992		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4993						SOFT_RESET_CPF, 1);
4994		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4995						SOFT_RESET_CPC, 1);
4996		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4997						SOFT_RESET_CPG, 1);
4998		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET,
4999						SOFT_RESET_GRBM, 1);
5000	}
5001
5002	/* SRBM_STATUS */
5003	tmp = RREG32(mmSRBM_STATUS);
5004	if (REG_GET_FIELD(tmp, SRBM_STATUS, GRBM_RQ_PENDING))
5005		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
5006						SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1);
5007	if (REG_GET_FIELD(tmp, SRBM_STATUS, SEM_BUSY))
5008		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
5009						SRBM_SOFT_RESET, SOFT_RESET_SEM, 1);
5010
5011	if (grbm_soft_reset || srbm_soft_reset) {
5012		adev->gfx.grbm_soft_reset = grbm_soft_reset;
5013		adev->gfx.srbm_soft_reset = srbm_soft_reset;
5014		return true;
5015	} else {
5016		adev->gfx.grbm_soft_reset = 0;
5017		adev->gfx.srbm_soft_reset = 0;
5018		return false;
5019	}
5020}
5021
5022static int gfx_v8_0_pre_soft_reset(void *handle)
5023{
5024	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5025	u32 grbm_soft_reset = 0;
5026
5027	if ((!adev->gfx.grbm_soft_reset) &&
5028	    (!adev->gfx.srbm_soft_reset))
5029		return 0;
5030
5031	grbm_soft_reset = adev->gfx.grbm_soft_reset;
5032
5033	/* stop the rlc */
5034	adev->gfx.rlc.funcs->stop(adev);
5035
5036	if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
5037	    REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX))
5038		/* Disable GFX parsing/prefetching */
5039		gfx_v8_0_cp_gfx_enable(adev, false);
5040
5041	if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
5042	    REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPF) ||
5043	    REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPC) ||
5044	    REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPG)) {
5045		int i;
5046
5047		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
5048			struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
5049
5050			mutex_lock(&adev->srbm_mutex);
5051			vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
5052			gfx_v8_0_deactivate_hqd(adev, 2);
5053			vi_srbm_select(adev, 0, 0, 0, 0);
5054			mutex_unlock(&adev->srbm_mutex);
5055		}
5056		/* Disable MEC parsing/prefetching */
5057		gfx_v8_0_cp_compute_enable(adev, false);
5058	}
5059
5060       return 0;
5061}
5062
5063static int gfx_v8_0_soft_reset(void *handle)
5064{
5065	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5066	u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
5067	u32 tmp;
5068
5069	if ((!adev->gfx.grbm_soft_reset) &&
5070	    (!adev->gfx.srbm_soft_reset))
5071		return 0;
5072
5073	grbm_soft_reset = adev->gfx.grbm_soft_reset;
5074	srbm_soft_reset = adev->gfx.srbm_soft_reset;
5075
5076	if (grbm_soft_reset || srbm_soft_reset) {
5077		tmp = RREG32(mmGMCON_DEBUG);
5078		tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_STALL, 1);
5079		tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_CLEAR, 1);
5080		WREG32(mmGMCON_DEBUG, tmp);
5081		udelay(50);
5082	}
5083
5084	if (grbm_soft_reset) {
5085		tmp = RREG32(mmGRBM_SOFT_RESET);
5086		tmp |= grbm_soft_reset;
5087		dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
5088		WREG32(mmGRBM_SOFT_RESET, tmp);
5089		tmp = RREG32(mmGRBM_SOFT_RESET);
5090
5091		udelay(50);
5092
5093		tmp &= ~grbm_soft_reset;
5094		WREG32(mmGRBM_SOFT_RESET, tmp);
5095		tmp = RREG32(mmGRBM_SOFT_RESET);
5096	}
5097
5098	if (srbm_soft_reset) {
5099		tmp = RREG32(mmSRBM_SOFT_RESET);
5100		tmp |= srbm_soft_reset;
5101		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
5102		WREG32(mmSRBM_SOFT_RESET, tmp);
5103		tmp = RREG32(mmSRBM_SOFT_RESET);
5104
5105		udelay(50);
5106
5107		tmp &= ~srbm_soft_reset;
5108		WREG32(mmSRBM_SOFT_RESET, tmp);
5109		tmp = RREG32(mmSRBM_SOFT_RESET);
5110	}
5111
5112	if (grbm_soft_reset || srbm_soft_reset) {
5113		tmp = RREG32(mmGMCON_DEBUG);
5114		tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_STALL, 0);
5115		tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_CLEAR, 0);
5116		WREG32(mmGMCON_DEBUG, tmp);
5117	}
5118
5119	/* Wait a little for things to settle down */
5120	udelay(50);
5121
5122	return 0;
5123}
5124
5125static int gfx_v8_0_post_soft_reset(void *handle)
5126{
5127	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5128	u32 grbm_soft_reset = 0;
5129
5130	if ((!adev->gfx.grbm_soft_reset) &&
5131	    (!adev->gfx.srbm_soft_reset))
5132		return 0;
5133
5134	grbm_soft_reset = adev->gfx.grbm_soft_reset;
5135
5136	if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
5137	    REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPF) ||
5138	    REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPC) ||
5139	    REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPG)) {
5140		int i;
5141
5142		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
5143			struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
5144
5145			mutex_lock(&adev->srbm_mutex);
5146			vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
5147			gfx_v8_0_deactivate_hqd(adev, 2);
5148			vi_srbm_select(adev, 0, 0, 0, 0);
5149			mutex_unlock(&adev->srbm_mutex);
5150		}
5151		gfx_v8_0_kiq_resume(adev);
5152		gfx_v8_0_kcq_resume(adev);
5153	}
5154
5155	if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
5156	    REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX))
5157		gfx_v8_0_cp_gfx_resume(adev);
5158
5159	gfx_v8_0_cp_test_all_rings(adev);
5160
5161	adev->gfx.rlc.funcs->start(adev);
5162
5163	return 0;
5164}
5165
5166/**
5167 * gfx_v8_0_get_gpu_clock_counter - return GPU clock counter snapshot
5168 *
5169 * @adev: amdgpu_device pointer
5170 *
5171 * Fetches a GPU clock counter snapshot.
5172 * Returns the 64 bit clock counter snapshot.
5173 */
5174static uint64_t gfx_v8_0_get_gpu_clock_counter(struct amdgpu_device *adev)
5175{
5176	uint64_t clock;
5177
5178	mutex_lock(&adev->gfx.gpu_clock_mutex);
5179	WREG32(mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
5180	clock = (uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_LSB) |
5181		((uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
5182	mutex_unlock(&adev->gfx.gpu_clock_mutex);
5183	return clock;
5184}
5185
5186static void gfx_v8_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
5187					  uint32_t vmid,
5188					  uint32_t gds_base, uint32_t gds_size,
5189					  uint32_t gws_base, uint32_t gws_size,
5190					  uint32_t oa_base, uint32_t oa_size)
5191{
5192	/* GDS Base */
5193	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5194	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5195				WRITE_DATA_DST_SEL(0)));
5196	amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_base);
5197	amdgpu_ring_write(ring, 0);
5198	amdgpu_ring_write(ring, gds_base);
5199
5200	/* GDS Size */
5201	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5202	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5203				WRITE_DATA_DST_SEL(0)));
5204	amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_size);
5205	amdgpu_ring_write(ring, 0);
5206	amdgpu_ring_write(ring, gds_size);
5207
5208	/* GWS */
5209	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5210	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5211				WRITE_DATA_DST_SEL(0)));
5212	amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].gws);
5213	amdgpu_ring_write(ring, 0);
5214	amdgpu_ring_write(ring, gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
5215
5216	/* OA */
5217	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5218	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5219				WRITE_DATA_DST_SEL(0)));
5220	amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].oa);
5221	amdgpu_ring_write(ring, 0);
5222	amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));
5223}
5224
5225static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
5226{
5227	WREG32(mmSQ_IND_INDEX,
5228		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
5229		(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
5230		(address << SQ_IND_INDEX__INDEX__SHIFT) |
5231		(SQ_IND_INDEX__FORCE_READ_MASK));
5232	return RREG32(mmSQ_IND_DATA);
5233}
5234
5235static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
5236			   uint32_t wave, uint32_t thread,
5237			   uint32_t regno, uint32_t num, uint32_t *out)
5238{
5239	WREG32(mmSQ_IND_INDEX,
5240		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
5241		(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
5242		(regno << SQ_IND_INDEX__INDEX__SHIFT) |
5243		(thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
5244		(SQ_IND_INDEX__FORCE_READ_MASK) |
5245		(SQ_IND_INDEX__AUTO_INCR_MASK));
5246	while (num--)
5247		*(out++) = RREG32(mmSQ_IND_DATA);
5248}
5249
5250static void gfx_v8_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
5251{
5252	/* type 0 wave data */
5253	dst[(*no_fields)++] = 0;
5254	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
5255	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
5256	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
5257	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
5258	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
5259	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
5260	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
5261	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
5262	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
5263	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
5264	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
5265	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
5266	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_LO);
5267	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_HI);
5268	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_LO);
5269	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_HI);
5270	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
5271	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
5272}
5273
5274static void gfx_v8_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
5275				     uint32_t wave, uint32_t start,
5276				     uint32_t size, uint32_t *dst)
5277{
5278	wave_read_regs(
5279		adev, simd, wave, 0,
5280		start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
5281}
5282
5283
5284static const struct amdgpu_gfx_funcs gfx_v8_0_gfx_funcs = {
5285	.get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter,
5286	.select_se_sh = &gfx_v8_0_select_se_sh,
5287	.read_wave_data = &gfx_v8_0_read_wave_data,
5288	.read_wave_sgprs = &gfx_v8_0_read_wave_sgprs,
5289	.select_me_pipe_q = &gfx_v8_0_select_me_pipe_q
5290};
5291
5292static int gfx_v8_0_early_init(void *handle)
5293{
5294	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5295
5296	adev->gfx.num_gfx_rings = GFX8_NUM_GFX_RINGS;
5297	adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
 
5298	adev->gfx.funcs = &gfx_v8_0_gfx_funcs;
5299	gfx_v8_0_set_ring_funcs(adev);
5300	gfx_v8_0_set_irq_funcs(adev);
5301	gfx_v8_0_set_gds_init(adev);
5302	gfx_v8_0_set_rlc_funcs(adev);
5303
5304	return 0;
5305}
5306
5307static int gfx_v8_0_late_init(void *handle)
5308{
5309	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5310	int r;
5311
5312	r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
5313	if (r)
5314		return r;
5315
5316	r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
5317	if (r)
5318		return r;
5319
5320	/* requires IBs so do in late init after IB pool is initialized */
5321	r = gfx_v8_0_do_edc_gpr_workarounds(adev);
5322	if (r)
5323		return r;
5324
5325	r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
5326	if (r) {
5327		DRM_ERROR("amdgpu_irq_get() failed to get IRQ for EDC, r: %d.\n", r);
5328		return r;
5329	}
5330
5331	r = amdgpu_irq_get(adev, &adev->gfx.sq_irq, 0);
5332	if (r) {
5333		DRM_ERROR(
5334			"amdgpu_irq_get() failed to get IRQ for SQ, r: %d.\n",
5335			r);
5336		return r;
5337	}
5338
5339	return 0;
5340}
5341
5342static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
5343						       bool enable)
5344{
5345	if (((adev->asic_type == CHIP_POLARIS11) ||
5346	    (adev->asic_type == CHIP_POLARIS12) ||
5347	    (adev->asic_type == CHIP_VEGAM)) &&
5348	    adev->powerplay.pp_funcs->set_powergating_by_smu)
5349		/* Send msg to SMU via Powerplay */
5350		amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, enable);
5351
5352	WREG32_FIELD(RLC_PG_CNTL, STATIC_PER_CU_PG_ENABLE, enable ? 1 : 0);
5353}
5354
5355static void gfx_v8_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev,
5356							bool enable)
5357{
5358	WREG32_FIELD(RLC_PG_CNTL, DYN_PER_CU_PG_ENABLE, enable ? 1 : 0);
5359}
5360
5361static void polaris11_enable_gfx_quick_mg_power_gating(struct amdgpu_device *adev,
5362		bool enable)
5363{
5364	WREG32_FIELD(RLC_PG_CNTL, QUICK_PG_ENABLE, enable ? 1 : 0);
5365}
5366
5367static void cz_enable_gfx_cg_power_gating(struct amdgpu_device *adev,
5368					  bool enable)
5369{
5370	WREG32_FIELD(RLC_PG_CNTL, GFX_POWER_GATING_ENABLE, enable ? 1 : 0);
5371}
5372
5373static void cz_enable_gfx_pipeline_power_gating(struct amdgpu_device *adev,
5374						bool enable)
5375{
5376	WREG32_FIELD(RLC_PG_CNTL, GFX_PIPELINE_PG_ENABLE, enable ? 1 : 0);
5377
5378	/* Read any GFX register to wake up GFX. */
5379	if (!enable)
5380		RREG32(mmDB_RENDER_CONTROL);
5381}
5382
5383static void cz_update_gfx_cg_power_gating(struct amdgpu_device *adev,
5384					  bool enable)
5385{
5386	if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
5387		cz_enable_gfx_cg_power_gating(adev, true);
5388		if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
5389			cz_enable_gfx_pipeline_power_gating(adev, true);
5390	} else {
5391		cz_enable_gfx_cg_power_gating(adev, false);
5392		cz_enable_gfx_pipeline_power_gating(adev, false);
5393	}
5394}
5395
5396static int gfx_v8_0_set_powergating_state(void *handle,
5397					  enum amd_powergating_state state)
5398{
5399	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5400	bool enable = (state == AMD_PG_STATE_GATE);
5401
5402	if (amdgpu_sriov_vf(adev))
5403		return 0;
5404
5405	if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG |
5406				AMD_PG_SUPPORT_RLC_SMU_HS |
5407				AMD_PG_SUPPORT_CP |
5408				AMD_PG_SUPPORT_GFX_DMG))
5409		amdgpu_gfx_rlc_enter_safe_mode(adev);
5410	switch (adev->asic_type) {
5411	case CHIP_CARRIZO:
5412	case CHIP_STONEY:
5413
5414		if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
5415			cz_enable_sck_slow_down_on_power_up(adev, true);
5416			cz_enable_sck_slow_down_on_power_down(adev, true);
5417		} else {
5418			cz_enable_sck_slow_down_on_power_up(adev, false);
5419			cz_enable_sck_slow_down_on_power_down(adev, false);
5420		}
5421		if (adev->pg_flags & AMD_PG_SUPPORT_CP)
5422			cz_enable_cp_power_gating(adev, true);
5423		else
5424			cz_enable_cp_power_gating(adev, false);
5425
5426		cz_update_gfx_cg_power_gating(adev, enable);
5427
5428		if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
5429			gfx_v8_0_enable_gfx_static_mg_power_gating(adev, true);
5430		else
5431			gfx_v8_0_enable_gfx_static_mg_power_gating(adev, false);
5432
5433		if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable)
5434			gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, true);
5435		else
5436			gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, false);
5437		break;
5438	case CHIP_POLARIS11:
5439	case CHIP_POLARIS12:
5440	case CHIP_VEGAM:
5441		if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
5442			gfx_v8_0_enable_gfx_static_mg_power_gating(adev, true);
5443		else
5444			gfx_v8_0_enable_gfx_static_mg_power_gating(adev, false);
5445
5446		if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable)
5447			gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, true);
5448		else
5449			gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, false);
5450
5451		if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_QUICK_MG) && enable)
5452			polaris11_enable_gfx_quick_mg_power_gating(adev, true);
5453		else
5454			polaris11_enable_gfx_quick_mg_power_gating(adev, false);
5455		break;
5456	default:
5457		break;
5458	}
5459	if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG |
5460				AMD_PG_SUPPORT_RLC_SMU_HS |
5461				AMD_PG_SUPPORT_CP |
5462				AMD_PG_SUPPORT_GFX_DMG))
5463		amdgpu_gfx_rlc_exit_safe_mode(adev);
5464	return 0;
5465}
5466
5467static void gfx_v8_0_get_clockgating_state(void *handle, u32 *flags)
5468{
5469	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5470	int data;
5471
5472	if (amdgpu_sriov_vf(adev))
5473		*flags = 0;
5474
5475	/* AMD_CG_SUPPORT_GFX_MGCG */
5476	data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
5477	if (!(data & RLC_CGTT_MGCG_OVERRIDE__CPF_MASK))
5478		*flags |= AMD_CG_SUPPORT_GFX_MGCG;
5479
5480	/* AMD_CG_SUPPORT_GFX_CGLG */
5481	data = RREG32(mmRLC_CGCG_CGLS_CTRL);
5482	if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
5483		*flags |= AMD_CG_SUPPORT_GFX_CGCG;
5484
5485	/* AMD_CG_SUPPORT_GFX_CGLS */
5486	if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
5487		*flags |= AMD_CG_SUPPORT_GFX_CGLS;
5488
5489	/* AMD_CG_SUPPORT_GFX_CGTS */
5490	data = RREG32(mmCGTS_SM_CTRL_REG);
5491	if (!(data & CGTS_SM_CTRL_REG__OVERRIDE_MASK))
5492		*flags |= AMD_CG_SUPPORT_GFX_CGTS;
5493
5494	/* AMD_CG_SUPPORT_GFX_CGTS_LS */
5495	if (!(data & CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK))
5496		*flags |= AMD_CG_SUPPORT_GFX_CGTS_LS;
5497
5498	/* AMD_CG_SUPPORT_GFX_RLC_LS */
5499	data = RREG32(mmRLC_MEM_SLP_CNTL);
5500	if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
5501		*flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
5502
5503	/* AMD_CG_SUPPORT_GFX_CP_LS */
5504	data = RREG32(mmCP_MEM_SLP_CNTL);
5505	if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
5506		*flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
5507}
5508
5509static void gfx_v8_0_send_serdes_cmd(struct amdgpu_device *adev,
5510				     uint32_t reg_addr, uint32_t cmd)
5511{
5512	uint32_t data;
5513
5514	gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
5515
5516	WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
5517	WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
5518
5519	data = RREG32(mmRLC_SERDES_WR_CTRL);
5520	if (adev->asic_type == CHIP_STONEY)
5521		data &= ~(RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK |
5522			  RLC_SERDES_WR_CTRL__READ_COMMAND_MASK |
5523			  RLC_SERDES_WR_CTRL__P1_SELECT_MASK |
5524			  RLC_SERDES_WR_CTRL__P2_SELECT_MASK |
5525			  RLC_SERDES_WR_CTRL__RDDATA_RESET_MASK |
5526			  RLC_SERDES_WR_CTRL__POWER_DOWN_MASK |
5527			  RLC_SERDES_WR_CTRL__POWER_UP_MASK |
5528			  RLC_SERDES_WR_CTRL__SHORT_FORMAT_MASK |
5529			  RLC_SERDES_WR_CTRL__SRBM_OVERRIDE_MASK);
5530	else
5531		data &= ~(RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK |
5532			  RLC_SERDES_WR_CTRL__READ_COMMAND_MASK |
5533			  RLC_SERDES_WR_CTRL__P1_SELECT_MASK |
5534			  RLC_SERDES_WR_CTRL__P2_SELECT_MASK |
5535			  RLC_SERDES_WR_CTRL__RDDATA_RESET_MASK |
5536			  RLC_SERDES_WR_CTRL__POWER_DOWN_MASK |
5537			  RLC_SERDES_WR_CTRL__POWER_UP_MASK |
5538			  RLC_SERDES_WR_CTRL__SHORT_FORMAT_MASK |
5539			  RLC_SERDES_WR_CTRL__BPM_DATA_MASK |
5540			  RLC_SERDES_WR_CTRL__REG_ADDR_MASK |
5541			  RLC_SERDES_WR_CTRL__SRBM_OVERRIDE_MASK);
5542	data |= (RLC_SERDES_WR_CTRL__RSVD_BPM_ADDR_MASK |
5543		 (cmd << RLC_SERDES_WR_CTRL__BPM_DATA__SHIFT) |
5544		 (reg_addr << RLC_SERDES_WR_CTRL__REG_ADDR__SHIFT) |
5545		 (0xff << RLC_SERDES_WR_CTRL__BPM_ADDR__SHIFT));
5546
5547	WREG32(mmRLC_SERDES_WR_CTRL, data);
5548}
5549
5550#define MSG_ENTER_RLC_SAFE_MODE     1
5551#define MSG_EXIT_RLC_SAFE_MODE      0
5552#define RLC_GPR_REG2__REQ_MASK 0x00000001
5553#define RLC_GPR_REG2__REQ__SHIFT 0
5554#define RLC_GPR_REG2__MESSAGE__SHIFT 0x00000001
5555#define RLC_GPR_REG2__MESSAGE_MASK 0x0000001e
5556
5557static bool gfx_v8_0_is_rlc_enabled(struct amdgpu_device *adev)
5558{
5559	uint32_t rlc_setting;
5560
5561	rlc_setting = RREG32(mmRLC_CNTL);
5562	if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
5563		return false;
5564
5565	return true;
5566}
5567
5568static void gfx_v8_0_set_safe_mode(struct amdgpu_device *adev)
5569{
5570	uint32_t data;
5571	unsigned i;
5572	data = RREG32(mmRLC_CNTL);
5573	data |= RLC_SAFE_MODE__CMD_MASK;
5574	data &= ~RLC_SAFE_MODE__MESSAGE_MASK;
5575	data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
5576	WREG32(mmRLC_SAFE_MODE, data);
5577
5578	/* wait for RLC_SAFE_MODE */
5579	for (i = 0; i < adev->usec_timeout; i++) {
5580		if ((RREG32(mmRLC_GPM_STAT) &
5581		     (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
5582		      RLC_GPM_STAT__GFX_POWER_STATUS_MASK)) ==
5583		    (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
5584		     RLC_GPM_STAT__GFX_POWER_STATUS_MASK))
5585			break;
5586		udelay(1);
5587	}
5588	for (i = 0; i < adev->usec_timeout; i++) {
5589		if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
5590			break;
5591		udelay(1);
5592	}
5593}
5594
5595static void gfx_v8_0_unset_safe_mode(struct amdgpu_device *adev)
5596{
5597	uint32_t data;
5598	unsigned i;
5599
5600	data = RREG32(mmRLC_CNTL);
5601	data |= RLC_SAFE_MODE__CMD_MASK;
5602	data &= ~RLC_SAFE_MODE__MESSAGE_MASK;
5603	WREG32(mmRLC_SAFE_MODE, data);
5604
5605	for (i = 0; i < adev->usec_timeout; i++) {
5606		if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
5607			break;
5608		udelay(1);
5609	}
5610}
5611
5612static void gfx_v8_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
5613{
5614	u32 data;
5615
5616	if (amdgpu_sriov_is_pp_one_vf(adev))
5617		data = RREG32_NO_KIQ(mmRLC_SPM_VMID);
5618	else
5619		data = RREG32(mmRLC_SPM_VMID);
5620
5621	data &= ~RLC_SPM_VMID__RLC_SPM_VMID_MASK;
5622	data |= (vmid & RLC_SPM_VMID__RLC_SPM_VMID_MASK) << RLC_SPM_VMID__RLC_SPM_VMID__SHIFT;
5623
5624	if (amdgpu_sriov_is_pp_one_vf(adev))
5625		WREG32_NO_KIQ(mmRLC_SPM_VMID, data);
5626	else
5627		WREG32(mmRLC_SPM_VMID, data);
5628}
5629
5630static const struct amdgpu_rlc_funcs iceland_rlc_funcs = {
5631	.is_rlc_enabled = gfx_v8_0_is_rlc_enabled,
5632	.set_safe_mode = gfx_v8_0_set_safe_mode,
5633	.unset_safe_mode = gfx_v8_0_unset_safe_mode,
5634	.init = gfx_v8_0_rlc_init,
5635	.get_csb_size = gfx_v8_0_get_csb_size,
5636	.get_csb_buffer = gfx_v8_0_get_csb_buffer,
5637	.get_cp_table_num = gfx_v8_0_cp_jump_table_num,
5638	.resume = gfx_v8_0_rlc_resume,
5639	.stop = gfx_v8_0_rlc_stop,
5640	.reset = gfx_v8_0_rlc_reset,
5641	.start = gfx_v8_0_rlc_start,
5642	.update_spm_vmid = gfx_v8_0_update_spm_vmid
5643};
5644
5645static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
5646						      bool enable)
5647{
5648	uint32_t temp, data;
5649
5650	amdgpu_gfx_rlc_enter_safe_mode(adev);
5651
5652	/* It is disabled by HW by default */
5653	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
5654		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
5655			if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS)
5656				/* 1 - RLC memory Light sleep */
5657				WREG32_FIELD(RLC_MEM_SLP_CNTL, RLC_MEM_LS_EN, 1);
5658
5659			if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS)
5660				WREG32_FIELD(CP_MEM_SLP_CNTL, CP_MEM_LS_EN, 1);
5661		}
5662
5663		/* 3 - RLC_CGTT_MGCG_OVERRIDE */
5664		temp = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
5665		if (adev->flags & AMD_IS_APU)
5666			data &= ~(RLC_CGTT_MGCG_OVERRIDE__CPF_MASK |
5667				  RLC_CGTT_MGCG_OVERRIDE__RLC_MASK |
5668				  RLC_CGTT_MGCG_OVERRIDE__MGCG_MASK);
5669		else
5670			data &= ~(RLC_CGTT_MGCG_OVERRIDE__CPF_MASK |
5671				  RLC_CGTT_MGCG_OVERRIDE__RLC_MASK |
5672				  RLC_CGTT_MGCG_OVERRIDE__MGCG_MASK |
5673				  RLC_CGTT_MGCG_OVERRIDE__GRBM_MASK);
5674
5675		if (temp != data)
5676			WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data);
5677
5678		/* 4 - wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
5679		gfx_v8_0_wait_for_rlc_serdes(adev);
5680
5681		/* 5 - clear mgcg override */
5682		gfx_v8_0_send_serdes_cmd(adev, BPM_REG_MGCG_OVERRIDE, CLE_BPM_SERDES_CMD);
5683
5684		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS) {
5685			/* 6 - Enable CGTS(Tree Shade) MGCG /MGLS */
5686			temp = data = RREG32(mmCGTS_SM_CTRL_REG);
5687			data &= ~(CGTS_SM_CTRL_REG__SM_MODE_MASK);
5688			data |= (0x2 << CGTS_SM_CTRL_REG__SM_MODE__SHIFT);
5689			data |= CGTS_SM_CTRL_REG__SM_MODE_ENABLE_MASK;
5690			data &= ~CGTS_SM_CTRL_REG__OVERRIDE_MASK;
5691			if ((adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) &&
5692			    (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS_LS))
5693				data &= ~CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK;
5694			data |= CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN_MASK;
5695			data |= (0x96 << CGTS_SM_CTRL_REG__ON_MONITOR_ADD__SHIFT);
5696			if (temp != data)
5697				WREG32(mmCGTS_SM_CTRL_REG, data);
5698		}
5699		udelay(50);
5700
5701		/* 7 - wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
5702		gfx_v8_0_wait_for_rlc_serdes(adev);
5703	} else {
5704		/* 1 - MGCG_OVERRIDE[0] for CP and MGCG_OVERRIDE[1] for RLC */
5705		temp = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
5706		data |= (RLC_CGTT_MGCG_OVERRIDE__CPF_MASK |
5707				RLC_CGTT_MGCG_OVERRIDE__RLC_MASK |
5708				RLC_CGTT_MGCG_OVERRIDE__MGCG_MASK |
5709				RLC_CGTT_MGCG_OVERRIDE__GRBM_MASK);
5710		if (temp != data)
5711			WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data);
5712
5713		/* 2 - disable MGLS in RLC */
5714		data = RREG32(mmRLC_MEM_SLP_CNTL);
5715		if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
5716			data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
5717			WREG32(mmRLC_MEM_SLP_CNTL, data);
5718		}
5719
5720		/* 3 - disable MGLS in CP */
5721		data = RREG32(mmCP_MEM_SLP_CNTL);
5722		if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
5723			data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
5724			WREG32(mmCP_MEM_SLP_CNTL, data);
5725		}
5726
5727		/* 4 - Disable CGTS(Tree Shade) MGCG and MGLS */
5728		temp = data = RREG32(mmCGTS_SM_CTRL_REG);
5729		data |= (CGTS_SM_CTRL_REG__OVERRIDE_MASK |
5730				CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK);
5731		if (temp != data)
5732			WREG32(mmCGTS_SM_CTRL_REG, data);
5733
5734		/* 5 - wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
5735		gfx_v8_0_wait_for_rlc_serdes(adev);
5736
5737		/* 6 - set mgcg override */
5738		gfx_v8_0_send_serdes_cmd(adev, BPM_REG_MGCG_OVERRIDE, SET_BPM_SERDES_CMD);
5739
5740		udelay(50);
5741
5742		/* 7- wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
5743		gfx_v8_0_wait_for_rlc_serdes(adev);
5744	}
5745
5746	amdgpu_gfx_rlc_exit_safe_mode(adev);
5747}
5748
5749static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
5750						      bool enable)
5751{
5752	uint32_t temp, temp1, data, data1;
5753
5754	temp = data = RREG32(mmRLC_CGCG_CGLS_CTRL);
5755
5756	amdgpu_gfx_rlc_enter_safe_mode(adev);
5757
5758	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
5759		temp1 = data1 =	RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
5760		data1 &= ~RLC_CGTT_MGCG_OVERRIDE__CGCG_MASK;
5761		if (temp1 != data1)
5762			WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data1);
5763
5764		/* : wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
5765		gfx_v8_0_wait_for_rlc_serdes(adev);
5766
5767		/* 2 - clear cgcg override */
5768		gfx_v8_0_send_serdes_cmd(adev, BPM_REG_CGCG_OVERRIDE, CLE_BPM_SERDES_CMD);
5769
5770		/* wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
5771		gfx_v8_0_wait_for_rlc_serdes(adev);
5772
5773		/* 3 - write cmd to set CGLS */
5774		gfx_v8_0_send_serdes_cmd(adev, BPM_REG_CGLS_EN, SET_BPM_SERDES_CMD);
5775
5776		/* 4 - enable cgcg */
5777		data |= RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
5778
5779		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
5780			/* enable cgls*/
5781			data |= RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
5782
5783			temp1 = data1 =	RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
5784			data1 &= ~RLC_CGTT_MGCG_OVERRIDE__CGLS_MASK;
5785
5786			if (temp1 != data1)
5787				WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data1);
5788		} else {
5789			data &= ~RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
5790		}
5791
5792		if (temp != data)
5793			WREG32(mmRLC_CGCG_CGLS_CTRL, data);
5794
5795		/* 5 enable cntx_empty_int_enable/cntx_busy_int_enable/
5796		 * Cmp_busy/GFX_Idle interrupts
5797		 */
5798		gfx_v8_0_enable_gui_idle_interrupt(adev, true);
5799	} else {
5800		/* disable cntx_empty_int_enable & GFX Idle interrupt */
5801		gfx_v8_0_enable_gui_idle_interrupt(adev, false);
5802
5803		/* TEST CGCG */
5804		temp1 = data1 =	RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
5805		data1 |= (RLC_CGTT_MGCG_OVERRIDE__CGCG_MASK |
5806				RLC_CGTT_MGCG_OVERRIDE__CGLS_MASK);
5807		if (temp1 != data1)
5808			WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data1);
5809
5810		/* read gfx register to wake up cgcg */
5811		RREG32(mmCB_CGTT_SCLK_CTRL);
5812		RREG32(mmCB_CGTT_SCLK_CTRL);
5813		RREG32(mmCB_CGTT_SCLK_CTRL);
5814		RREG32(mmCB_CGTT_SCLK_CTRL);
5815
5816		/* wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
5817		gfx_v8_0_wait_for_rlc_serdes(adev);
5818
5819		/* write cmd to Set CGCG Overrride */
5820		gfx_v8_0_send_serdes_cmd(adev, BPM_REG_CGCG_OVERRIDE, SET_BPM_SERDES_CMD);
5821
5822		/* wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
5823		gfx_v8_0_wait_for_rlc_serdes(adev);
5824
5825		/* write cmd to Clear CGLS */
5826		gfx_v8_0_send_serdes_cmd(adev, BPM_REG_CGLS_EN, CLE_BPM_SERDES_CMD);
5827
5828		/* disable cgcg, cgls should be disabled too. */
5829		data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK |
5830			  RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
5831		if (temp != data)
5832			WREG32(mmRLC_CGCG_CGLS_CTRL, data);
5833		/* enable interrupts again for PG */
5834		gfx_v8_0_enable_gui_idle_interrupt(adev, true);
5835	}
5836
5837	gfx_v8_0_wait_for_rlc_serdes(adev);
5838
5839	amdgpu_gfx_rlc_exit_safe_mode(adev);
5840}
5841static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev,
5842					    bool enable)
5843{
5844	if (enable) {
5845		/* CGCG/CGLS should be enabled after MGCG/MGLS/TS(CG/LS)
5846		 * ===  MGCG + MGLS + TS(CG/LS) ===
5847		 */
5848		gfx_v8_0_update_medium_grain_clock_gating(adev, enable);
5849		gfx_v8_0_update_coarse_grain_clock_gating(adev, enable);
5850	} else {
5851		/* CGCG/CGLS should be disabled before MGCG/MGLS/TS(CG/LS)
5852		 * ===  CGCG + CGLS ===
5853		 */
5854		gfx_v8_0_update_coarse_grain_clock_gating(adev, enable);
5855		gfx_v8_0_update_medium_grain_clock_gating(adev, enable);
5856	}
5857	return 0;
5858}
5859
5860static int gfx_v8_0_tonga_update_gfx_clock_gating(struct amdgpu_device *adev,
5861					  enum amd_clockgating_state state)
5862{
5863	uint32_t msg_id, pp_state = 0;
5864	uint32_t pp_support_state = 0;
5865
5866	if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_CGLS)) {
5867		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
5868			pp_support_state = PP_STATE_SUPPORT_LS;
5869			pp_state = PP_STATE_LS;
5870		}
5871		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) {
5872			pp_support_state |= PP_STATE_SUPPORT_CG;
5873			pp_state |= PP_STATE_CG;
5874		}
5875		if (state == AMD_CG_STATE_UNGATE)
5876			pp_state = 0;
5877
5878		msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
5879				PP_BLOCK_GFX_CG,
5880				pp_support_state,
5881				pp_state);
5882		if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
5883			amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
5884	}
5885
5886	if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)) {
5887		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
5888			pp_support_state = PP_STATE_SUPPORT_LS;
5889			pp_state = PP_STATE_LS;
5890		}
5891
5892		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) {
5893			pp_support_state |= PP_STATE_SUPPORT_CG;
5894			pp_state |= PP_STATE_CG;
5895		}
5896
5897		if (state == AMD_CG_STATE_UNGATE)
5898			pp_state = 0;
5899
5900		msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
5901				PP_BLOCK_GFX_MG,
5902				pp_support_state,
5903				pp_state);
5904		if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
5905			amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
5906	}
5907
5908	return 0;
5909}
5910
5911static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
5912					  enum amd_clockgating_state state)
5913{
5914
5915	uint32_t msg_id, pp_state = 0;
5916	uint32_t pp_support_state = 0;
5917
5918	if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_CGLS)) {
5919		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
5920			pp_support_state = PP_STATE_SUPPORT_LS;
5921			pp_state = PP_STATE_LS;
5922		}
5923		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) {
5924			pp_support_state |= PP_STATE_SUPPORT_CG;
5925			pp_state |= PP_STATE_CG;
5926		}
5927		if (state == AMD_CG_STATE_UNGATE)
5928			pp_state = 0;
5929
5930		msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
5931				PP_BLOCK_GFX_CG,
5932				pp_support_state,
5933				pp_state);
5934		if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
5935			amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
5936	}
5937
5938	if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_3D_CGCG | AMD_CG_SUPPORT_GFX_3D_CGLS)) {
5939		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) {
5940			pp_support_state = PP_STATE_SUPPORT_LS;
5941			pp_state = PP_STATE_LS;
5942		}
5943		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG) {
5944			pp_support_state |= PP_STATE_SUPPORT_CG;
5945			pp_state |= PP_STATE_CG;
5946		}
5947		if (state == AMD_CG_STATE_UNGATE)
5948			pp_state = 0;
5949
5950		msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
5951				PP_BLOCK_GFX_3D,
5952				pp_support_state,
5953				pp_state);
5954		if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
5955			amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
5956	}
5957
5958	if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)) {
5959		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
5960			pp_support_state = PP_STATE_SUPPORT_LS;
5961			pp_state = PP_STATE_LS;
5962		}
5963
5964		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) {
5965			pp_support_state |= PP_STATE_SUPPORT_CG;
5966			pp_state |= PP_STATE_CG;
5967		}
5968
5969		if (state == AMD_CG_STATE_UNGATE)
5970			pp_state = 0;
5971
5972		msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
5973				PP_BLOCK_GFX_MG,
5974				pp_support_state,
5975				pp_state);
5976		if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
5977			amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
5978	}
5979
5980	if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
5981		pp_support_state = PP_STATE_SUPPORT_LS;
5982
5983		if (state == AMD_CG_STATE_UNGATE)
5984			pp_state = 0;
5985		else
5986			pp_state = PP_STATE_LS;
5987
5988		msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
5989				PP_BLOCK_GFX_RLC,
5990				pp_support_state,
5991				pp_state);
5992		if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
5993			amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
5994	}
5995
5996	if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
5997		pp_support_state = PP_STATE_SUPPORT_LS;
5998
5999		if (state == AMD_CG_STATE_UNGATE)
6000			pp_state = 0;
6001		else
6002			pp_state = PP_STATE_LS;
6003		msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
6004			PP_BLOCK_GFX_CP,
6005			pp_support_state,
6006			pp_state);
6007		if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
6008			amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
6009	}
6010
6011	return 0;
6012}
6013
6014static int gfx_v8_0_set_clockgating_state(void *handle,
6015					  enum amd_clockgating_state state)
6016{
6017	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6018
6019	if (amdgpu_sriov_vf(adev))
6020		return 0;
6021
6022	switch (adev->asic_type) {
6023	case CHIP_FIJI:
6024	case CHIP_CARRIZO:
6025	case CHIP_STONEY:
6026		gfx_v8_0_update_gfx_clock_gating(adev,
6027						 state == AMD_CG_STATE_GATE);
6028		break;
6029	case CHIP_TONGA:
6030		gfx_v8_0_tonga_update_gfx_clock_gating(adev, state);
6031		break;
6032	case CHIP_POLARIS10:
6033	case CHIP_POLARIS11:
6034	case CHIP_POLARIS12:
6035	case CHIP_VEGAM:
6036		gfx_v8_0_polaris_update_gfx_clock_gating(adev, state);
6037		break;
6038	default:
6039		break;
6040	}
6041	return 0;
6042}
6043
6044static u64 gfx_v8_0_ring_get_rptr(struct amdgpu_ring *ring)
6045{
6046	return ring->adev->wb.wb[ring->rptr_offs];
6047}
6048
6049static u64 gfx_v8_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
6050{
6051	struct amdgpu_device *adev = ring->adev;
6052
6053	if (ring->use_doorbell)
6054		/* XXX check if swapping is necessary on BE */
6055		return ring->adev->wb.wb[ring->wptr_offs];
6056	else
6057		return RREG32(mmCP_RB0_WPTR);
6058}
6059
6060static void gfx_v8_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
6061{
6062	struct amdgpu_device *adev = ring->adev;
6063
6064	if (ring->use_doorbell) {
6065		/* XXX check if swapping is necessary on BE */
6066		adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
6067		WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
6068	} else {
6069		WREG32(mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
6070		(void)RREG32(mmCP_RB0_WPTR);
6071	}
6072}
6073
6074static void gfx_v8_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
6075{
6076	u32 ref_and_mask, reg_mem_engine;
6077
6078	if ((ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) ||
6079	    (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)) {
6080		switch (ring->me) {
6081		case 1:
6082			ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe;
6083			break;
6084		case 2:
6085			ref_and_mask = GPU_HDP_FLUSH_DONE__CP6_MASK << ring->pipe;
6086			break;
6087		default:
6088			return;
6089		}
6090		reg_mem_engine = 0;
6091	} else {
6092		ref_and_mask = GPU_HDP_FLUSH_DONE__CP0_MASK;
6093		reg_mem_engine = WAIT_REG_MEM_ENGINE(1); /* pfp */
6094	}
6095
6096	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
6097	amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(1) | /* write, wait, write */
6098				 WAIT_REG_MEM_FUNCTION(3) |  /* == */
6099				 reg_mem_engine));
6100	amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ);
6101	amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE);
6102	amdgpu_ring_write(ring, ref_and_mask);
6103	amdgpu_ring_write(ring, ref_and_mask);
6104	amdgpu_ring_write(ring, 0x20); /* poll interval */
6105}
6106
6107static void gfx_v8_0_ring_emit_vgt_flush(struct amdgpu_ring *ring)
6108{
6109	amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
6110	amdgpu_ring_write(ring, EVENT_TYPE(VS_PARTIAL_FLUSH) |
6111		EVENT_INDEX(4));
6112
6113	amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
6114	amdgpu_ring_write(ring, EVENT_TYPE(VGT_FLUSH) |
6115		EVENT_INDEX(0));
6116}
6117
6118static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
6119					struct amdgpu_job *job,
6120					struct amdgpu_ib *ib,
6121					uint32_t flags)
6122{
6123	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
6124	u32 header, control = 0;
6125
6126	if (ib->flags & AMDGPU_IB_FLAG_CE)
6127		header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
6128	else
6129		header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
6130
6131	control |= ib->length_dw | (vmid << 24);
6132
6133	if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
6134		control |= INDIRECT_BUFFER_PRE_ENB(1);
6135
6136		if (!(ib->flags & AMDGPU_IB_FLAG_CE) && vmid)
6137			gfx_v8_0_ring_emit_de_meta(ring);
6138	}
6139
6140	amdgpu_ring_write(ring, header);
6141	amdgpu_ring_write(ring,
6142#ifdef __BIG_ENDIAN
6143			  (2 << 0) |
6144#endif
6145			  (ib->gpu_addr & 0xFFFFFFFC));
6146	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
6147	amdgpu_ring_write(ring, control);
6148}
6149
6150static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
6151					  struct amdgpu_job *job,
6152					  struct amdgpu_ib *ib,
6153					  uint32_t flags)
6154{
6155	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
6156	u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
6157
6158	/* Currently, there is a high possibility to get wave ID mismatch
6159	 * between ME and GDS, leading to a hw deadlock, because ME generates
6160	 * different wave IDs than the GDS expects. This situation happens
6161	 * randomly when at least 5 compute pipes use GDS ordered append.
6162	 * The wave IDs generated by ME are also wrong after suspend/resume.
6163	 * Those are probably bugs somewhere else in the kernel driver.
6164	 *
6165	 * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
6166	 * GDS to 0 for this ring (me/pipe).
6167	 */
6168	if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
6169		amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
6170		amdgpu_ring_write(ring, mmGDS_COMPUTE_MAX_WAVE_ID - PACKET3_SET_CONFIG_REG_START);
6171		amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
6172	}
6173
6174	amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
6175	amdgpu_ring_write(ring,
6176#ifdef __BIG_ENDIAN
6177				(2 << 0) |
6178#endif
6179				(ib->gpu_addr & 0xFFFFFFFC));
6180	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
6181	amdgpu_ring_write(ring, control);
6182}
6183
6184static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
6185					 u64 seq, unsigned flags)
6186{
6187	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
6188	bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
6189
6190	/* Workaround for cache flush problems. First send a dummy EOP
6191	 * event down the pipe with seq one below.
6192	 */
6193	amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
6194	amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
6195				 EOP_TC_ACTION_EN |
6196				 EOP_TC_WB_ACTION_EN |
6197				 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
6198				 EVENT_INDEX(5)));
6199	amdgpu_ring_write(ring, addr & 0xfffffffc);
6200	amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
6201				DATA_SEL(1) | INT_SEL(0));
6202	amdgpu_ring_write(ring, lower_32_bits(seq - 1));
6203	amdgpu_ring_write(ring, upper_32_bits(seq - 1));
6204
6205	/* Then send the real EOP event down the pipe:
6206	 * EVENT_WRITE_EOP - flush caches, send int */
6207	amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
6208	amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
6209				 EOP_TC_ACTION_EN |
6210				 EOP_TC_WB_ACTION_EN |
6211				 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
6212				 EVENT_INDEX(5)));
6213	amdgpu_ring_write(ring, addr & 0xfffffffc);
6214	amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
6215			  DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
6216	amdgpu_ring_write(ring, lower_32_bits(seq));
6217	amdgpu_ring_write(ring, upper_32_bits(seq));
6218
6219}
6220
6221static void gfx_v8_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
6222{
6223	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
6224	uint32_t seq = ring->fence_drv.sync_seq;
6225	uint64_t addr = ring->fence_drv.gpu_addr;
6226
6227	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
6228	amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
6229				 WAIT_REG_MEM_FUNCTION(3) | /* equal */
6230				 WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */
6231	amdgpu_ring_write(ring, addr & 0xfffffffc);
6232	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
6233	amdgpu_ring_write(ring, seq);
6234	amdgpu_ring_write(ring, 0xffffffff);
6235	amdgpu_ring_write(ring, 4); /* poll interval */
6236}
6237
6238static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
6239					unsigned vmid, uint64_t pd_addr)
6240{
6241	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
6242
6243	amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
6244
6245	/* wait for the invalidate to complete */
6246	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
6247	amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */
6248				 WAIT_REG_MEM_FUNCTION(0) |  /* always */
6249				 WAIT_REG_MEM_ENGINE(0))); /* me */
6250	amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
6251	amdgpu_ring_write(ring, 0);
6252	amdgpu_ring_write(ring, 0); /* ref */
6253	amdgpu_ring_write(ring, 0); /* mask */
6254	amdgpu_ring_write(ring, 0x20); /* poll interval */
6255
6256	/* compute doesn't have PFP */
6257	if (usepfp) {
6258		/* sync PFP to ME, otherwise we might get invalid PFP reads */
6259		amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
6260		amdgpu_ring_write(ring, 0x0);
6261	}
6262}
6263
6264static u64 gfx_v8_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
6265{
6266	return ring->adev->wb.wb[ring->wptr_offs];
6267}
6268
6269static void gfx_v8_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
6270{
6271	struct amdgpu_device *adev = ring->adev;
6272
6273	/* XXX check if swapping is necessary on BE */
6274	adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
6275	WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
6276}
6277
6278static void gfx_v8_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
6279					     u64 addr, u64 seq,
6280					     unsigned flags)
6281{
6282	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
6283	bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
6284
6285	/* RELEASE_MEM - flush caches, send int */
6286	amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 5));
6287	amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
6288				 EOP_TC_ACTION_EN |
6289				 EOP_TC_WB_ACTION_EN |
6290				 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
6291				 EVENT_INDEX(5)));
6292	amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
6293	amdgpu_ring_write(ring, addr & 0xfffffffc);
6294	amdgpu_ring_write(ring, upper_32_bits(addr));
6295	amdgpu_ring_write(ring, lower_32_bits(seq));
6296	amdgpu_ring_write(ring, upper_32_bits(seq));
6297}
6298
6299static void gfx_v8_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
6300					 u64 seq, unsigned int flags)
6301{
6302	/* we only allocate 32bit for each seq wb address */
6303	BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
6304
6305	/* write fence seq to the "addr" */
6306	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
6307	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
6308				 WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
6309	amdgpu_ring_write(ring, lower_32_bits(addr));
6310	amdgpu_ring_write(ring, upper_32_bits(addr));
6311	amdgpu_ring_write(ring, lower_32_bits(seq));
6312
6313	if (flags & AMDGPU_FENCE_FLAG_INT) {
6314		/* set register to trigger INT */
6315		amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
6316		amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
6317					 WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
6318		amdgpu_ring_write(ring, mmCPC_INT_STATUS);
6319		amdgpu_ring_write(ring, 0);
6320		amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
6321	}
6322}
6323
6324static void gfx_v8_ring_emit_sb(struct amdgpu_ring *ring)
6325{
6326	amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
6327	amdgpu_ring_write(ring, 0);
6328}
6329
6330static void gfx_v8_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
6331{
6332	uint32_t dw2 = 0;
6333
6334	if (amdgpu_sriov_vf(ring->adev))
6335		gfx_v8_0_ring_emit_ce_meta(ring);
6336
6337	dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
6338	if (flags & AMDGPU_HAVE_CTX_SWITCH) {
6339		gfx_v8_0_ring_emit_vgt_flush(ring);
6340		/* set load_global_config & load_global_uconfig */
6341		dw2 |= 0x8001;
6342		/* set load_cs_sh_regs */
6343		dw2 |= 0x01000000;
6344		/* set load_per_context_state & load_gfx_sh_regs for GFX */
6345		dw2 |= 0x10002;
6346
6347		/* set load_ce_ram if preamble presented */
6348		if (AMDGPU_PREAMBLE_IB_PRESENT & flags)
6349			dw2 |= 0x10000000;
6350	} else {
6351		/* still load_ce_ram if this is the first time preamble presented
6352		 * although there is no context switch happens.
6353		 */
6354		if (AMDGPU_PREAMBLE_IB_PRESENT_FIRST & flags)
6355			dw2 |= 0x10000000;
6356	}
6357
6358	amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
6359	amdgpu_ring_write(ring, dw2);
6360	amdgpu_ring_write(ring, 0);
6361}
6362
6363static unsigned gfx_v8_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring)
6364{
6365	unsigned ret;
6366
6367	amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
6368	amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
6369	amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
6370	amdgpu_ring_write(ring, 0); /* discard following DWs if *cond_exec_gpu_addr==0 */
6371	ret = ring->wptr & ring->buf_mask;
6372	amdgpu_ring_write(ring, 0x55aa55aa); /* patch dummy value later */
6373	return ret;
6374}
6375
6376static void gfx_v8_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned offset)
6377{
6378	unsigned cur;
6379
6380	BUG_ON(offset > ring->buf_mask);
6381	BUG_ON(ring->ring[offset] != 0x55aa55aa);
6382
6383	cur = (ring->wptr & ring->buf_mask) - 1;
6384	if (likely(cur > offset))
6385		ring->ring[offset] = cur - offset;
6386	else
6387		ring->ring[offset] = (ring->ring_size >> 2) - offset + cur;
6388}
6389
6390static void gfx_v8_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
6391				    uint32_t reg_val_offs)
6392{
6393	struct amdgpu_device *adev = ring->adev;
6394
6395	amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
6396	amdgpu_ring_write(ring, 0 |	/* src: register*/
6397				(5 << 8) |	/* dst: memory */
6398				(1 << 20));	/* write confirm */
6399	amdgpu_ring_write(ring, reg);
6400	amdgpu_ring_write(ring, 0);
6401	amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
6402				reg_val_offs * 4));
6403	amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
6404				reg_val_offs * 4));
6405}
6406
6407static void gfx_v8_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
6408				  uint32_t val)
6409{
6410	uint32_t cmd;
6411
6412	switch (ring->funcs->type) {
6413	case AMDGPU_RING_TYPE_GFX:
6414		cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
6415		break;
6416	case AMDGPU_RING_TYPE_KIQ:
6417		cmd = 1 << 16; /* no inc addr */
6418		break;
6419	default:
6420		cmd = WR_CONFIRM;
6421		break;
6422	}
6423
6424	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
6425	amdgpu_ring_write(ring, cmd);
6426	amdgpu_ring_write(ring, reg);
6427	amdgpu_ring_write(ring, 0);
6428	amdgpu_ring_write(ring, val);
6429}
6430
6431static void gfx_v8_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid)
6432{
6433	struct amdgpu_device *adev = ring->adev;
6434	uint32_t value = 0;
6435
6436	value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
6437	value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
6438	value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
6439	value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
6440	WREG32(mmSQ_CMD, value);
6441}
6442
6443static void gfx_v8_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
6444						 enum amdgpu_interrupt_state state)
6445{
6446	WREG32_FIELD(CP_INT_CNTL_RING0, TIME_STAMP_INT_ENABLE,
6447		     state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
6448}
6449
6450static void gfx_v8_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
6451						     int me, int pipe,
6452						     enum amdgpu_interrupt_state state)
6453{
6454	u32 mec_int_cntl, mec_int_cntl_reg;
6455
6456	/*
6457	 * amdgpu controls only the first MEC. That's why this function only
6458	 * handles the setting of interrupts for this specific MEC. All other
6459	 * pipes' interrupts are set by amdkfd.
6460	 */
6461
6462	if (me == 1) {
6463		switch (pipe) {
6464		case 0:
6465			mec_int_cntl_reg = mmCP_ME1_PIPE0_INT_CNTL;
6466			break;
6467		case 1:
6468			mec_int_cntl_reg = mmCP_ME1_PIPE1_INT_CNTL;
6469			break;
6470		case 2:
6471			mec_int_cntl_reg = mmCP_ME1_PIPE2_INT_CNTL;
6472			break;
6473		case 3:
6474			mec_int_cntl_reg = mmCP_ME1_PIPE3_INT_CNTL;
6475			break;
6476		default:
6477			DRM_DEBUG("invalid pipe %d\n", pipe);
6478			return;
6479		}
6480	} else {
6481		DRM_DEBUG("invalid me %d\n", me);
6482		return;
6483	}
6484
6485	switch (state) {
6486	case AMDGPU_IRQ_STATE_DISABLE:
6487		mec_int_cntl = RREG32(mec_int_cntl_reg);
6488		mec_int_cntl &= ~CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
6489		WREG32(mec_int_cntl_reg, mec_int_cntl);
6490		break;
6491	case AMDGPU_IRQ_STATE_ENABLE:
6492		mec_int_cntl = RREG32(mec_int_cntl_reg);
6493		mec_int_cntl |= CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
6494		WREG32(mec_int_cntl_reg, mec_int_cntl);
6495		break;
6496	default:
6497		break;
6498	}
6499}
6500
6501static int gfx_v8_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
6502					     struct amdgpu_irq_src *source,
6503					     unsigned type,
6504					     enum amdgpu_interrupt_state state)
6505{
6506	WREG32_FIELD(CP_INT_CNTL_RING0, PRIV_REG_INT_ENABLE,
6507		     state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
6508
6509	return 0;
6510}
6511
6512static int gfx_v8_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
6513					      struct amdgpu_irq_src *source,
6514					      unsigned type,
6515					      enum amdgpu_interrupt_state state)
6516{
6517	WREG32_FIELD(CP_INT_CNTL_RING0, PRIV_INSTR_INT_ENABLE,
6518		     state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
6519
6520	return 0;
6521}
6522
6523static int gfx_v8_0_set_eop_interrupt_state(struct amdgpu_device *adev,
6524					    struct amdgpu_irq_src *src,
6525					    unsigned type,
6526					    enum amdgpu_interrupt_state state)
6527{
6528	switch (type) {
6529	case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP:
6530		gfx_v8_0_set_gfx_eop_interrupt_state(adev, state);
6531		break;
6532	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
6533		gfx_v8_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
6534		break;
6535	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
6536		gfx_v8_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
6537		break;
6538	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
6539		gfx_v8_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
6540		break;
6541	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
6542		gfx_v8_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
6543		break;
6544	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
6545		gfx_v8_0_set_compute_eop_interrupt_state(adev, 2, 0, state);
6546		break;
6547	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
6548		gfx_v8_0_set_compute_eop_interrupt_state(adev, 2, 1, state);
6549		break;
6550	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
6551		gfx_v8_0_set_compute_eop_interrupt_state(adev, 2, 2, state);
6552		break;
6553	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
6554		gfx_v8_0_set_compute_eop_interrupt_state(adev, 2, 3, state);
6555		break;
6556	default:
6557		break;
6558	}
6559	return 0;
6560}
6561
6562static int gfx_v8_0_set_cp_ecc_int_state(struct amdgpu_device *adev,
6563					 struct amdgpu_irq_src *source,
6564					 unsigned int type,
6565					 enum amdgpu_interrupt_state state)
6566{
6567	int enable_flag;
6568
6569	switch (state) {
6570	case AMDGPU_IRQ_STATE_DISABLE:
6571		enable_flag = 0;
6572		break;
6573
6574	case AMDGPU_IRQ_STATE_ENABLE:
6575		enable_flag = 1;
6576		break;
6577
6578	default:
6579		return -EINVAL;
6580	}
6581
6582	WREG32_FIELD(CP_INT_CNTL, CP_ECC_ERROR_INT_ENABLE, enable_flag);
6583	WREG32_FIELD(CP_INT_CNTL_RING0, CP_ECC_ERROR_INT_ENABLE, enable_flag);
6584	WREG32_FIELD(CP_INT_CNTL_RING1, CP_ECC_ERROR_INT_ENABLE, enable_flag);
6585	WREG32_FIELD(CP_INT_CNTL_RING2, CP_ECC_ERROR_INT_ENABLE, enable_flag);
6586	WREG32_FIELD(CPC_INT_CNTL, CP_ECC_ERROR_INT_ENABLE, enable_flag);
6587	WREG32_FIELD(CP_ME1_PIPE0_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
6588		     enable_flag);
6589	WREG32_FIELD(CP_ME1_PIPE1_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
6590		     enable_flag);
6591	WREG32_FIELD(CP_ME1_PIPE2_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
6592		     enable_flag);
6593	WREG32_FIELD(CP_ME1_PIPE3_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
6594		     enable_flag);
6595	WREG32_FIELD(CP_ME2_PIPE0_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
6596		     enable_flag);
6597	WREG32_FIELD(CP_ME2_PIPE1_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
6598		     enable_flag);
6599	WREG32_FIELD(CP_ME2_PIPE2_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
6600		     enable_flag);
6601	WREG32_FIELD(CP_ME2_PIPE3_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
6602		     enable_flag);
6603
6604	return 0;
6605}
6606
6607static int gfx_v8_0_set_sq_int_state(struct amdgpu_device *adev,
6608				     struct amdgpu_irq_src *source,
6609				     unsigned int type,
6610				     enum amdgpu_interrupt_state state)
6611{
6612	int enable_flag;
6613
6614	switch (state) {
6615	case AMDGPU_IRQ_STATE_DISABLE:
6616		enable_flag = 1;
6617		break;
6618
6619	case AMDGPU_IRQ_STATE_ENABLE:
6620		enable_flag = 0;
6621		break;
6622
6623	default:
6624		return -EINVAL;
6625	}
6626
6627	WREG32_FIELD(SQ_INTERRUPT_MSG_CTRL, STALL,
6628		     enable_flag);
6629
6630	return 0;
6631}
6632
6633static int gfx_v8_0_eop_irq(struct amdgpu_device *adev,
6634			    struct amdgpu_irq_src *source,
6635			    struct amdgpu_iv_entry *entry)
6636{
6637	int i;
6638	u8 me_id, pipe_id, queue_id;
6639	struct amdgpu_ring *ring;
6640
6641	DRM_DEBUG("IH: CP EOP\n");
6642	me_id = (entry->ring_id & 0x0c) >> 2;
6643	pipe_id = (entry->ring_id & 0x03) >> 0;
6644	queue_id = (entry->ring_id & 0x70) >> 4;
6645
6646	switch (me_id) {
6647	case 0:
6648		amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
6649		break;
6650	case 1:
6651	case 2:
6652		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
6653			ring = &adev->gfx.compute_ring[i];
6654			/* Per-queue interrupt is supported for MEC starting from VI.
6655			  * The interrupt can only be enabled/disabled per pipe instead of per queue.
6656			  */
6657			if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
6658				amdgpu_fence_process(ring);
6659		}
6660		break;
6661	}
6662	return 0;
6663}
6664
6665static void gfx_v8_0_fault(struct amdgpu_device *adev,
6666			   struct amdgpu_iv_entry *entry)
6667{
6668	u8 me_id, pipe_id, queue_id;
6669	struct amdgpu_ring *ring;
6670	int i;
6671
6672	me_id = (entry->ring_id & 0x0c) >> 2;
6673	pipe_id = (entry->ring_id & 0x03) >> 0;
6674	queue_id = (entry->ring_id & 0x70) >> 4;
6675
6676	switch (me_id) {
6677	case 0:
6678		drm_sched_fault(&adev->gfx.gfx_ring[0].sched);
6679		break;
6680	case 1:
6681	case 2:
6682		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
6683			ring = &adev->gfx.compute_ring[i];
6684			if (ring->me == me_id && ring->pipe == pipe_id &&
6685			    ring->queue == queue_id)
6686				drm_sched_fault(&ring->sched);
6687		}
6688		break;
6689	}
6690}
6691
6692static int gfx_v8_0_priv_reg_irq(struct amdgpu_device *adev,
6693				 struct amdgpu_irq_src *source,
6694				 struct amdgpu_iv_entry *entry)
6695{
6696	DRM_ERROR("Illegal register access in command stream\n");
6697	gfx_v8_0_fault(adev, entry);
6698	return 0;
6699}
6700
6701static int gfx_v8_0_priv_inst_irq(struct amdgpu_device *adev,
6702				  struct amdgpu_irq_src *source,
6703				  struct amdgpu_iv_entry *entry)
6704{
6705	DRM_ERROR("Illegal instruction in command stream\n");
6706	gfx_v8_0_fault(adev, entry);
6707	return 0;
6708}
6709
6710static int gfx_v8_0_cp_ecc_error_irq(struct amdgpu_device *adev,
6711				     struct amdgpu_irq_src *source,
6712				     struct amdgpu_iv_entry *entry)
6713{
6714	DRM_ERROR("CP EDC/ECC error detected.");
6715	return 0;
6716}
6717
6718static void gfx_v8_0_parse_sq_irq(struct amdgpu_device *adev, unsigned ih_data)
 
6719{
6720	u32 enc, se_id, sh_id, cu_id;
6721	char type[20];
6722	int sq_edc_source = -1;
6723
6724	enc = REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_CMN, ENCODING);
6725	se_id = REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_CMN, SE_ID);
6726
6727	switch (enc) {
6728		case 0:
6729			DRM_INFO("SQ general purpose intr detected:"
6730					"se_id %d, immed_overflow %d, host_reg_overflow %d,"
6731					"host_cmd_overflow %d, cmd_timestamp %d,"
6732					"reg_timestamp %d, thread_trace_buff_full %d,"
6733					"wlt %d, thread_trace %d.\n",
6734					se_id,
6735					REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, IMMED_OVERFLOW),
6736					REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, HOST_REG_OVERFLOW),
6737					REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, HOST_CMD_OVERFLOW),
6738					REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, CMD_TIMESTAMP),
6739					REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, REG_TIMESTAMP),
6740					REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, THREAD_TRACE_BUF_FULL),
6741					REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, WLT),
6742					REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, THREAD_TRACE)
6743					);
6744			break;
6745		case 1:
6746		case 2:
6747
6748			cu_id = REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, CU_ID);
6749			sh_id = REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, SH_ID);
6750
6751			/*
6752			 * This function can be called either directly from ISR
6753			 * or from BH in which case we can access SQ_EDC_INFO
6754			 * instance
6755			 */
6756			if (in_task()) {
6757				mutex_lock(&adev->grbm_idx_mutex);
6758				gfx_v8_0_select_se_sh(adev, se_id, sh_id, cu_id);
6759
6760				sq_edc_source = REG_GET_FIELD(RREG32(mmSQ_EDC_INFO), SQ_EDC_INFO, SOURCE);
6761
6762				gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
6763				mutex_unlock(&adev->grbm_idx_mutex);
6764			}
6765
6766			if (enc == 1)
6767				sprintf(type, "instruction intr");
6768			else
6769				sprintf(type, "EDC/ECC error");
6770
6771			DRM_INFO(
6772				"SQ %s detected: "
6773					"se_id %d, sh_id %d, cu_id %d, simd_id %d, wave_id %d, vm_id %d "
6774					"trap %s, sq_ed_info.source %s.\n",
6775					type, se_id, sh_id, cu_id,
6776					REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, SIMD_ID),
6777					REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, WAVE_ID),
6778					REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, VM_ID),
6779					REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, PRIV) ? "true" : "false",
6780					(sq_edc_source != -1) ? sq_edc_source_names[sq_edc_source] : "unavailable"
6781				);
6782			break;
6783		default:
6784			DRM_ERROR("SQ invalid encoding type\n.");
6785	}
6786}
6787
6788static void gfx_v8_0_sq_irq_work_func(struct work_struct *work)
6789{
6790
6791	struct amdgpu_device *adev = container_of(work, struct amdgpu_device, gfx.sq_work.work);
6792	struct sq_work *sq_work = container_of(work, struct sq_work, work);
6793
6794	gfx_v8_0_parse_sq_irq(adev, sq_work->ih_data);
6795}
6796
6797static int gfx_v8_0_sq_irq(struct amdgpu_device *adev,
6798			   struct amdgpu_irq_src *source,
6799			   struct amdgpu_iv_entry *entry)
6800{
6801	unsigned ih_data = entry->src_data[0];
6802
6803	/*
6804	 * Try to submit work so SQ_EDC_INFO can be accessed from
6805	 * BH. If previous work submission hasn't finished yet
6806	 * just print whatever info is possible directly from the ISR.
6807	 */
6808	if (work_pending(&adev->gfx.sq_work.work)) {
6809		gfx_v8_0_parse_sq_irq(adev, ih_data);
6810	} else {
6811		adev->gfx.sq_work.ih_data = ih_data;
6812		schedule_work(&adev->gfx.sq_work.work);
6813	}
6814
6815	return 0;
6816}
6817
6818static void gfx_v8_0_emit_mem_sync(struct amdgpu_ring *ring)
6819{
6820	amdgpu_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
6821	amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
6822			  PACKET3_TC_ACTION_ENA |
6823			  PACKET3_SH_KCACHE_ACTION_ENA |
6824			  PACKET3_SH_ICACHE_ACTION_ENA |
6825			  PACKET3_TC_WB_ACTION_ENA);  /* CP_COHER_CNTL */
6826	amdgpu_ring_write(ring, 0xffffffff);  /* CP_COHER_SIZE */
6827	amdgpu_ring_write(ring, 0);  /* CP_COHER_BASE */
6828	amdgpu_ring_write(ring, 0x0000000A); /* poll interval */
6829}
6830
6831static void gfx_v8_0_emit_mem_sync_compute(struct amdgpu_ring *ring)
6832{
6833	amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5));
6834	amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
6835			  PACKET3_TC_ACTION_ENA |
6836			  PACKET3_SH_KCACHE_ACTION_ENA |
6837			  PACKET3_SH_ICACHE_ACTION_ENA |
6838			  PACKET3_TC_WB_ACTION_ENA);  /* CP_COHER_CNTL */
6839	amdgpu_ring_write(ring, 0xffffffff);	/* CP_COHER_SIZE */
6840	amdgpu_ring_write(ring, 0xff);		/* CP_COHER_SIZE_HI */
6841	amdgpu_ring_write(ring, 0);		/* CP_COHER_BASE */
6842	amdgpu_ring_write(ring, 0);		/* CP_COHER_BASE_HI */
6843	amdgpu_ring_write(ring, 0x0000000A);	/* poll interval */
6844}
6845
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6846static const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
6847	.name = "gfx_v8_0",
6848	.early_init = gfx_v8_0_early_init,
6849	.late_init = gfx_v8_0_late_init,
6850	.sw_init = gfx_v8_0_sw_init,
6851	.sw_fini = gfx_v8_0_sw_fini,
6852	.hw_init = gfx_v8_0_hw_init,
6853	.hw_fini = gfx_v8_0_hw_fini,
6854	.suspend = gfx_v8_0_suspend,
6855	.resume = gfx_v8_0_resume,
6856	.is_idle = gfx_v8_0_is_idle,
6857	.wait_for_idle = gfx_v8_0_wait_for_idle,
6858	.check_soft_reset = gfx_v8_0_check_soft_reset,
6859	.pre_soft_reset = gfx_v8_0_pre_soft_reset,
6860	.soft_reset = gfx_v8_0_soft_reset,
6861	.post_soft_reset = gfx_v8_0_post_soft_reset,
6862	.set_clockgating_state = gfx_v8_0_set_clockgating_state,
6863	.set_powergating_state = gfx_v8_0_set_powergating_state,
6864	.get_clockgating_state = gfx_v8_0_get_clockgating_state,
6865};
6866
6867static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
6868	.type = AMDGPU_RING_TYPE_GFX,
6869	.align_mask = 0xff,
6870	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
6871	.support_64bit_ptrs = false,
6872	.get_rptr = gfx_v8_0_ring_get_rptr,
6873	.get_wptr = gfx_v8_0_ring_get_wptr_gfx,
6874	.set_wptr = gfx_v8_0_ring_set_wptr_gfx,
6875	.emit_frame_size = /* maximum 215dw if count 16 IBs in */
6876		5 +  /* COND_EXEC */
6877		7 +  /* PIPELINE_SYNC */
6878		VI_FLUSH_GPU_TLB_NUM_WREG * 5 + 9 + /* VM_FLUSH */
6879		12 +  /* FENCE for VM_FLUSH */
6880		20 + /* GDS switch */
6881		4 + /* double SWITCH_BUFFER,
6882		       the first COND_EXEC jump to the place just
6883			   prior to this double SWITCH_BUFFER  */
6884		5 + /* COND_EXEC */
6885		7 +	 /*	HDP_flush */
6886		4 +	 /*	VGT_flush */
6887		14 + /*	CE_META */
6888		31 + /*	DE_META */
6889		3 + /* CNTX_CTRL */
6890		5 + /* HDP_INVL */
6891		12 + 12 + /* FENCE x2 */
6892		2 + /* SWITCH_BUFFER */
6893		5, /* SURFACE_SYNC */
6894	.emit_ib_size =	4, /* gfx_v8_0_ring_emit_ib_gfx */
6895	.emit_ib = gfx_v8_0_ring_emit_ib_gfx,
6896	.emit_fence = gfx_v8_0_ring_emit_fence_gfx,
6897	.emit_pipeline_sync = gfx_v8_0_ring_emit_pipeline_sync,
6898	.emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
6899	.emit_gds_switch = gfx_v8_0_ring_emit_gds_switch,
6900	.emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush,
6901	.test_ring = gfx_v8_0_ring_test_ring,
6902	.test_ib = gfx_v8_0_ring_test_ib,
6903	.insert_nop = amdgpu_ring_insert_nop,
6904	.pad_ib = amdgpu_ring_generic_pad_ib,
6905	.emit_switch_buffer = gfx_v8_ring_emit_sb,
6906	.emit_cntxcntl = gfx_v8_ring_emit_cntxcntl,
6907	.init_cond_exec = gfx_v8_0_ring_emit_init_cond_exec,
6908	.patch_cond_exec = gfx_v8_0_ring_emit_patch_cond_exec,
6909	.emit_wreg = gfx_v8_0_ring_emit_wreg,
6910	.soft_recovery = gfx_v8_0_ring_soft_recovery,
6911	.emit_mem_sync = gfx_v8_0_emit_mem_sync,
6912};
6913
6914static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
6915	.type = AMDGPU_RING_TYPE_COMPUTE,
6916	.align_mask = 0xff,
6917	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
6918	.support_64bit_ptrs = false,
6919	.get_rptr = gfx_v8_0_ring_get_rptr,
6920	.get_wptr = gfx_v8_0_ring_get_wptr_compute,
6921	.set_wptr = gfx_v8_0_ring_set_wptr_compute,
6922	.emit_frame_size =
6923		20 + /* gfx_v8_0_ring_emit_gds_switch */
6924		7 + /* gfx_v8_0_ring_emit_hdp_flush */
6925		5 + /* hdp_invalidate */
6926		7 + /* gfx_v8_0_ring_emit_pipeline_sync */
6927		VI_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + /* gfx_v8_0_ring_emit_vm_flush */
6928		7 + 7 + 7 + /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */
6929		7, /* gfx_v8_0_emit_mem_sync_compute */
 
 
6930	.emit_ib_size =	7, /* gfx_v8_0_ring_emit_ib_compute */
6931	.emit_ib = gfx_v8_0_ring_emit_ib_compute,
6932	.emit_fence = gfx_v8_0_ring_emit_fence_compute,
6933	.emit_pipeline_sync = gfx_v8_0_ring_emit_pipeline_sync,
6934	.emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
6935	.emit_gds_switch = gfx_v8_0_ring_emit_gds_switch,
6936	.emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush,
6937	.test_ring = gfx_v8_0_ring_test_ring,
6938	.test_ib = gfx_v8_0_ring_test_ib,
6939	.insert_nop = amdgpu_ring_insert_nop,
6940	.pad_ib = amdgpu_ring_generic_pad_ib,
6941	.emit_wreg = gfx_v8_0_ring_emit_wreg,
6942	.emit_mem_sync = gfx_v8_0_emit_mem_sync_compute,
 
6943};
6944
6945static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_kiq = {
6946	.type = AMDGPU_RING_TYPE_KIQ,
6947	.align_mask = 0xff,
6948	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
6949	.support_64bit_ptrs = false,
6950	.get_rptr = gfx_v8_0_ring_get_rptr,
6951	.get_wptr = gfx_v8_0_ring_get_wptr_compute,
6952	.set_wptr = gfx_v8_0_ring_set_wptr_compute,
6953	.emit_frame_size =
6954		20 + /* gfx_v8_0_ring_emit_gds_switch */
6955		7 + /* gfx_v8_0_ring_emit_hdp_flush */
6956		5 + /* hdp_invalidate */
6957		7 + /* gfx_v8_0_ring_emit_pipeline_sync */
6958		17 + /* gfx_v8_0_ring_emit_vm_flush */
6959		7 + 7 + 7, /* gfx_v8_0_ring_emit_fence_kiq x3 for user fence, vm fence */
6960	.emit_ib_size =	7, /* gfx_v8_0_ring_emit_ib_compute */
6961	.emit_fence = gfx_v8_0_ring_emit_fence_kiq,
6962	.test_ring = gfx_v8_0_ring_test_ring,
6963	.insert_nop = amdgpu_ring_insert_nop,
6964	.pad_ib = amdgpu_ring_generic_pad_ib,
6965	.emit_rreg = gfx_v8_0_ring_emit_rreg,
6966	.emit_wreg = gfx_v8_0_ring_emit_wreg,
6967};
6968
6969static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev)
6970{
6971	int i;
6972
6973	adev->gfx.kiq.ring.funcs = &gfx_v8_0_ring_funcs_kiq;
6974
6975	for (i = 0; i < adev->gfx.num_gfx_rings; i++)
6976		adev->gfx.gfx_ring[i].funcs = &gfx_v8_0_ring_funcs_gfx;
6977
6978	for (i = 0; i < adev->gfx.num_compute_rings; i++)
6979		adev->gfx.compute_ring[i].funcs = &gfx_v8_0_ring_funcs_compute;
6980}
6981
6982static const struct amdgpu_irq_src_funcs gfx_v8_0_eop_irq_funcs = {
6983	.set = gfx_v8_0_set_eop_interrupt_state,
6984	.process = gfx_v8_0_eop_irq,
6985};
6986
6987static const struct amdgpu_irq_src_funcs gfx_v8_0_priv_reg_irq_funcs = {
6988	.set = gfx_v8_0_set_priv_reg_fault_state,
6989	.process = gfx_v8_0_priv_reg_irq,
6990};
6991
6992static const struct amdgpu_irq_src_funcs gfx_v8_0_priv_inst_irq_funcs = {
6993	.set = gfx_v8_0_set_priv_inst_fault_state,
6994	.process = gfx_v8_0_priv_inst_irq,
6995};
6996
6997static const struct amdgpu_irq_src_funcs gfx_v8_0_cp_ecc_error_irq_funcs = {
6998	.set = gfx_v8_0_set_cp_ecc_int_state,
6999	.process = gfx_v8_0_cp_ecc_error_irq,
7000};
7001
7002static const struct amdgpu_irq_src_funcs gfx_v8_0_sq_irq_funcs = {
7003	.set = gfx_v8_0_set_sq_int_state,
7004	.process = gfx_v8_0_sq_irq,
7005};
7006
7007static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev)
7008{
7009	adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
7010	adev->gfx.eop_irq.funcs = &gfx_v8_0_eop_irq_funcs;
7011
7012	adev->gfx.priv_reg_irq.num_types = 1;
7013	adev->gfx.priv_reg_irq.funcs = &gfx_v8_0_priv_reg_irq_funcs;
7014
7015	adev->gfx.priv_inst_irq.num_types = 1;
7016	adev->gfx.priv_inst_irq.funcs = &gfx_v8_0_priv_inst_irq_funcs;
7017
7018	adev->gfx.cp_ecc_error_irq.num_types = 1;
7019	adev->gfx.cp_ecc_error_irq.funcs = &gfx_v8_0_cp_ecc_error_irq_funcs;
7020
7021	adev->gfx.sq_irq.num_types = 1;
7022	adev->gfx.sq_irq.funcs = &gfx_v8_0_sq_irq_funcs;
7023}
7024
7025static void gfx_v8_0_set_rlc_funcs(struct amdgpu_device *adev)
7026{
7027	adev->gfx.rlc.funcs = &iceland_rlc_funcs;
7028}
7029
7030static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev)
7031{
7032	/* init asci gds info */
7033	adev->gds.gds_size = RREG32(mmGDS_VMID0_SIZE);
7034	adev->gds.gws_size = 64;
7035	adev->gds.oa_size = 16;
7036	adev->gds.gds_compute_max_wave_id = RREG32(mmGDS_COMPUTE_MAX_WAVE_ID);
7037}
7038
7039static void gfx_v8_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
7040						 u32 bitmap)
7041{
7042	u32 data;
7043
7044	if (!bitmap)
7045		return;
7046
7047	data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
7048	data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
7049
7050	WREG32(mmGC_USER_SHADER_ARRAY_CONFIG, data);
7051}
7052
7053static u32 gfx_v8_0_get_cu_active_bitmap(struct amdgpu_device *adev)
7054{
7055	u32 data, mask;
7056
7057	data =  RREG32(mmCC_GC_SHADER_ARRAY_CONFIG) |
7058		RREG32(mmGC_USER_SHADER_ARRAY_CONFIG);
7059
7060	mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh);
7061
7062	return ~REG_GET_FIELD(data, CC_GC_SHADER_ARRAY_CONFIG, INACTIVE_CUS) & mask;
7063}
7064
7065static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev)
7066{
7067	int i, j, k, counter, active_cu_number = 0;
7068	u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
7069	struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
7070	unsigned disable_masks[4 * 2];
7071	u32 ao_cu_num;
7072
7073	memset(cu_info, 0, sizeof(*cu_info));
7074
7075	if (adev->flags & AMD_IS_APU)
7076		ao_cu_num = 2;
7077	else
7078		ao_cu_num = adev->gfx.config.max_cu_per_sh;
7079
7080	amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2);
7081
7082	mutex_lock(&adev->grbm_idx_mutex);
7083	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
7084		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
7085			mask = 1;
7086			ao_bitmap = 0;
7087			counter = 0;
7088			gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff);
7089			if (i < 4 && j < 2)
7090				gfx_v8_0_set_user_cu_inactive_bitmap(
7091					adev, disable_masks[i * 2 + j]);
7092			bitmap = gfx_v8_0_get_cu_active_bitmap(adev);
7093			cu_info->bitmap[i][j] = bitmap;
7094
7095			for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
7096				if (bitmap & mask) {
7097					if (counter < ao_cu_num)
7098						ao_bitmap |= mask;
7099					counter ++;
7100				}
7101				mask <<= 1;
7102			}
7103			active_cu_number += counter;
7104			if (i < 2 && j < 2)
7105				ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
7106			cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
7107		}
7108	}
7109	gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
7110	mutex_unlock(&adev->grbm_idx_mutex);
7111
7112	cu_info->number = active_cu_number;
7113	cu_info->ao_cu_mask = ao_cu_mask;
7114	cu_info->simd_per_cu = NUM_SIMD_PER_CU;
7115	cu_info->max_waves_per_simd = 10;
7116	cu_info->max_scratch_slots_per_cu = 32;
7117	cu_info->wave_front_size = 64;
7118	cu_info->lds_size = 64;
7119}
7120
7121const struct amdgpu_ip_block_version gfx_v8_0_ip_block =
7122{
7123	.type = AMD_IP_BLOCK_TYPE_GFX,
7124	.major = 8,
7125	.minor = 0,
7126	.rev = 0,
7127	.funcs = &gfx_v8_0_ip_funcs,
7128};
7129
7130const struct amdgpu_ip_block_version gfx_v8_1_ip_block =
7131{
7132	.type = AMD_IP_BLOCK_TYPE_GFX,
7133	.major = 8,
7134	.minor = 1,
7135	.rev = 0,
7136	.funcs = &gfx_v8_0_ip_funcs,
7137};
7138
7139static void gfx_v8_0_ring_emit_ce_meta(struct amdgpu_ring *ring)
7140{
7141	uint64_t ce_payload_addr;
7142	int cnt_ce;
7143	union {
7144		struct vi_ce_ib_state regular;
7145		struct vi_ce_ib_state_chained_ib chained;
7146	} ce_payload = {};
7147
7148	if (ring->adev->virt.chained_ib_support) {
7149		ce_payload_addr = amdgpu_csa_vaddr(ring->adev) +
7150			offsetof(struct vi_gfx_meta_data_chained_ib, ce_payload);
7151		cnt_ce = (sizeof(ce_payload.chained) >> 2) + 4 - 2;
7152	} else {
7153		ce_payload_addr = amdgpu_csa_vaddr(ring->adev) +
7154			offsetof(struct vi_gfx_meta_data, ce_payload);
7155		cnt_ce = (sizeof(ce_payload.regular) >> 2) + 4 - 2;
7156	}
7157
7158	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt_ce));
7159	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
7160				WRITE_DATA_DST_SEL(8) |
7161				WR_CONFIRM) |
7162				WRITE_DATA_CACHE_POLICY(0));
7163	amdgpu_ring_write(ring, lower_32_bits(ce_payload_addr));
7164	amdgpu_ring_write(ring, upper_32_bits(ce_payload_addr));
7165	amdgpu_ring_write_multiple(ring, (void *)&ce_payload, cnt_ce - 2);
7166}
7167
7168static void gfx_v8_0_ring_emit_de_meta(struct amdgpu_ring *ring)
7169{
7170	uint64_t de_payload_addr, gds_addr, csa_addr;
7171	int cnt_de;
7172	union {
7173		struct vi_de_ib_state regular;
7174		struct vi_de_ib_state_chained_ib chained;
7175	} de_payload = {};
7176
7177	csa_addr = amdgpu_csa_vaddr(ring->adev);
7178	gds_addr = csa_addr + 4096;
7179	if (ring->adev->virt.chained_ib_support) {
7180		de_payload.chained.gds_backup_addrlo = lower_32_bits(gds_addr);
7181		de_payload.chained.gds_backup_addrhi = upper_32_bits(gds_addr);
7182		de_payload_addr = csa_addr + offsetof(struct vi_gfx_meta_data_chained_ib, de_payload);
7183		cnt_de = (sizeof(de_payload.chained) >> 2) + 4 - 2;
7184	} else {
7185		de_payload.regular.gds_backup_addrlo = lower_32_bits(gds_addr);
7186		de_payload.regular.gds_backup_addrhi = upper_32_bits(gds_addr);
7187		de_payload_addr = csa_addr + offsetof(struct vi_gfx_meta_data, de_payload);
7188		cnt_de = (sizeof(de_payload.regular) >> 2) + 4 - 2;
7189	}
7190
7191	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt_de));
7192	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
7193				WRITE_DATA_DST_SEL(8) |
7194				WR_CONFIRM) |
7195				WRITE_DATA_CACHE_POLICY(0));
7196	amdgpu_ring_write(ring, lower_32_bits(de_payload_addr));
7197	amdgpu_ring_write(ring, upper_32_bits(de_payload_addr));
7198	amdgpu_ring_write_multiple(ring, (void *)&de_payload, cnt_de - 2);
7199}