Linux Audio

Check our new training course

Loading...
v4.6
   1/*
   2 * Copyright 2014 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
 
 
 
  23#include <linux/firmware.h>
  24#include "drmP.h"
 
 
  25#include "amdgpu.h"
  26#include "amdgpu_gfx.h"
  27#include "vi.h"
 
  28#include "vid.h"
  29#include "amdgpu_ucode.h"
 
 
  30#include "clearstate_vi.h"
  31
  32#include "gmc/gmc_8_2_d.h"
  33#include "gmc/gmc_8_2_sh_mask.h"
  34
  35#include "oss/oss_3_0_d.h"
  36#include "oss/oss_3_0_sh_mask.h"
  37
  38#include "bif/bif_5_0_d.h"
  39#include "bif/bif_5_0_sh_mask.h"
  40
  41#include "gca/gfx_8_0_d.h"
  42#include "gca/gfx_8_0_enum.h"
  43#include "gca/gfx_8_0_sh_mask.h"
  44#include "gca/gfx_8_0_enum.h"
  45
  46#include "dce/dce_10_0_d.h"
  47#include "dce/dce_10_0_sh_mask.h"
  48
 
 
 
 
  49#define GFX8_NUM_GFX_RINGS     1
  50#define GFX8_NUM_COMPUTE_RINGS 8
  51
  52#define TOPAZ_GB_ADDR_CONFIG_GOLDEN 0x22010001
  53#define CARRIZO_GB_ADDR_CONFIG_GOLDEN 0x22010001
 
  54#define TONGA_GB_ADDR_CONFIG_GOLDEN 0x22011003
  55
  56#define ARRAY_MODE(x)					((x) << GB_TILE_MODE0__ARRAY_MODE__SHIFT)
  57#define PIPE_CONFIG(x)					((x) << GB_TILE_MODE0__PIPE_CONFIG__SHIFT)
  58#define TILE_SPLIT(x)					((x) << GB_TILE_MODE0__TILE_SPLIT__SHIFT)
  59#define MICRO_TILE_MODE_NEW(x)				((x) << GB_TILE_MODE0__MICRO_TILE_MODE_NEW__SHIFT)
  60#define SAMPLE_SPLIT(x)					((x) << GB_TILE_MODE0__SAMPLE_SPLIT__SHIFT)
  61#define BANK_WIDTH(x)					((x) << GB_MACROTILE_MODE0__BANK_WIDTH__SHIFT)
  62#define BANK_HEIGHT(x)					((x) << GB_MACROTILE_MODE0__BANK_HEIGHT__SHIFT)
  63#define MACRO_TILE_ASPECT(x)				((x) << GB_MACROTILE_MODE0__MACRO_TILE_ASPECT__SHIFT)
  64#define NUM_BANKS(x)					((x) << GB_MACROTILE_MODE0__NUM_BANKS__SHIFT)
  65
  66#define RLC_CGTT_MGCG_OVERRIDE__CPF_MASK            0x00000001L
  67#define RLC_CGTT_MGCG_OVERRIDE__RLC_MASK            0x00000002L
  68#define RLC_CGTT_MGCG_OVERRIDE__MGCG_MASK           0x00000004L
  69#define RLC_CGTT_MGCG_OVERRIDE__CGCG_MASK           0x00000008L
  70#define RLC_CGTT_MGCG_OVERRIDE__CGLS_MASK           0x00000010L
  71#define RLC_CGTT_MGCG_OVERRIDE__GRBM_MASK           0x00000020L
  72
  73/* BPM SERDES CMD */
  74#define SET_BPM_SERDES_CMD    1
  75#define CLE_BPM_SERDES_CMD    0
  76
  77/* BPM Register Address*/
  78enum {
  79	BPM_REG_CGLS_EN = 0,        /* Enable/Disable CGLS */
  80	BPM_REG_CGLS_ON,            /* ON/OFF CGLS: shall be controlled by RLC FW */
  81	BPM_REG_CGCG_OVERRIDE,      /* Set/Clear CGCG Override */
  82	BPM_REG_MGCG_OVERRIDE,      /* Set/Clear MGCG Override */
  83	BPM_REG_FGCG_OVERRIDE,      /* Set/Clear FGCG Override */
  84	BPM_REG_FGCG_MAX
  85};
  86
 
 
  87MODULE_FIRMWARE("amdgpu/carrizo_ce.bin");
  88MODULE_FIRMWARE("amdgpu/carrizo_pfp.bin");
  89MODULE_FIRMWARE("amdgpu/carrizo_me.bin");
  90MODULE_FIRMWARE("amdgpu/carrizo_mec.bin");
  91MODULE_FIRMWARE("amdgpu/carrizo_mec2.bin");
  92MODULE_FIRMWARE("amdgpu/carrizo_rlc.bin");
  93
  94MODULE_FIRMWARE("amdgpu/stoney_ce.bin");
  95MODULE_FIRMWARE("amdgpu/stoney_pfp.bin");
  96MODULE_FIRMWARE("amdgpu/stoney_me.bin");
  97MODULE_FIRMWARE("amdgpu/stoney_mec.bin");
  98MODULE_FIRMWARE("amdgpu/stoney_rlc.bin");
  99
 100MODULE_FIRMWARE("amdgpu/tonga_ce.bin");
 101MODULE_FIRMWARE("amdgpu/tonga_pfp.bin");
 102MODULE_FIRMWARE("amdgpu/tonga_me.bin");
 103MODULE_FIRMWARE("amdgpu/tonga_mec.bin");
 104MODULE_FIRMWARE("amdgpu/tonga_mec2.bin");
 105MODULE_FIRMWARE("amdgpu/tonga_rlc.bin");
 106
 107MODULE_FIRMWARE("amdgpu/topaz_ce.bin");
 108MODULE_FIRMWARE("amdgpu/topaz_pfp.bin");
 109MODULE_FIRMWARE("amdgpu/topaz_me.bin");
 110MODULE_FIRMWARE("amdgpu/topaz_mec.bin");
 111MODULE_FIRMWARE("amdgpu/topaz_rlc.bin");
 112
 113MODULE_FIRMWARE("amdgpu/fiji_ce.bin");
 114MODULE_FIRMWARE("amdgpu/fiji_pfp.bin");
 115MODULE_FIRMWARE("amdgpu/fiji_me.bin");
 116MODULE_FIRMWARE("amdgpu/fiji_mec.bin");
 117MODULE_FIRMWARE("amdgpu/fiji_mec2.bin");
 118MODULE_FIRMWARE("amdgpu/fiji_rlc.bin");
 119
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 120static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
 121{
 122	{mmGDS_VMID0_BASE, mmGDS_VMID0_SIZE, mmGDS_GWS_VMID0, mmGDS_OA_VMID0},
 123	{mmGDS_VMID1_BASE, mmGDS_VMID1_SIZE, mmGDS_GWS_VMID1, mmGDS_OA_VMID1},
 124	{mmGDS_VMID2_BASE, mmGDS_VMID2_SIZE, mmGDS_GWS_VMID2, mmGDS_OA_VMID2},
 125	{mmGDS_VMID3_BASE, mmGDS_VMID3_SIZE, mmGDS_GWS_VMID3, mmGDS_OA_VMID3},
 126	{mmGDS_VMID4_BASE, mmGDS_VMID4_SIZE, mmGDS_GWS_VMID4, mmGDS_OA_VMID4},
 127	{mmGDS_VMID5_BASE, mmGDS_VMID5_SIZE, mmGDS_GWS_VMID5, mmGDS_OA_VMID5},
 128	{mmGDS_VMID6_BASE, mmGDS_VMID6_SIZE, mmGDS_GWS_VMID6, mmGDS_OA_VMID6},
 129	{mmGDS_VMID7_BASE, mmGDS_VMID7_SIZE, mmGDS_GWS_VMID7, mmGDS_OA_VMID7},
 130	{mmGDS_VMID8_BASE, mmGDS_VMID8_SIZE, mmGDS_GWS_VMID8, mmGDS_OA_VMID8},
 131	{mmGDS_VMID9_BASE, mmGDS_VMID9_SIZE, mmGDS_GWS_VMID9, mmGDS_OA_VMID9},
 132	{mmGDS_VMID10_BASE, mmGDS_VMID10_SIZE, mmGDS_GWS_VMID10, mmGDS_OA_VMID10},
 133	{mmGDS_VMID11_BASE, mmGDS_VMID11_SIZE, mmGDS_GWS_VMID11, mmGDS_OA_VMID11},
 134	{mmGDS_VMID12_BASE, mmGDS_VMID12_SIZE, mmGDS_GWS_VMID12, mmGDS_OA_VMID12},
 135	{mmGDS_VMID13_BASE, mmGDS_VMID13_SIZE, mmGDS_GWS_VMID13, mmGDS_OA_VMID13},
 136	{mmGDS_VMID14_BASE, mmGDS_VMID14_SIZE, mmGDS_GWS_VMID14, mmGDS_OA_VMID14},
 137	{mmGDS_VMID15_BASE, mmGDS_VMID15_SIZE, mmGDS_GWS_VMID15, mmGDS_OA_VMID15}
 138};
 139
 140static const u32 golden_settings_tonga_a11[] =
 141{
 142	mmCB_HW_CONTROL, 0xfffdf3cf, 0x00007208,
 143	mmCB_HW_CONTROL_3, 0x00000040, 0x00000040,
 144	mmDB_DEBUG2, 0xf00fffff, 0x00000400,
 145	mmGB_GPU_ID, 0x0000000f, 0x00000000,
 146	mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
 147	mmPA_SC_FIFO_DEPTH_CNTL, 0x000003ff, 0x000000fc,
 148	mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
 
 149	mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
 150	mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
 151	mmTCC_CTRL, 0x00100000, 0xf31fff7f,
 152	mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
 153	mmTCP_ADDR_CONFIG, 0x000003ff, 0x000002fb,
 154	mmTCP_CHAN_STEER_HI, 0xffffffff, 0x0000543b,
 155	mmTCP_CHAN_STEER_LO, 0xffffffff, 0xa9210876,
 156	mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
 157};
 158
 159static const u32 tonga_golden_common_all[] =
 160{
 161	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 162	mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x16000012,
 163	mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002A,
 164	mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
 165	mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
 166	mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
 167	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
 168	mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF
 169};
 170
 171static const u32 tonga_mgcg_cgcg_init[] =
 172{
 173	mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
 174	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 175	mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 176	mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
 177	mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
 178	mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
 179	mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100,
 180	mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
 181	mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
 182	mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
 183	mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
 184	mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
 185	mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
 186	mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
 187	mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
 188	mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
 189	mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
 190	mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
 191	mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
 192	mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
 193	mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
 194	mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
 195	mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
 196	mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
 197	mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
 198	mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
 199	mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
 200	mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 201	mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 202	mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
 203	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 204	mmCGTS_CU0_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 205	mmCGTS_CU0_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 206	mmCGTS_CU0_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
 207	mmCGTS_CU0_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 208	mmCGTS_CU0_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 209	mmCGTS_CU1_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 210	mmCGTS_CU1_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 211	mmCGTS_CU1_TA_CTRL_REG, 0xffffffff, 0x00040007,
 212	mmCGTS_CU1_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 213	mmCGTS_CU1_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 214	mmCGTS_CU2_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 215	mmCGTS_CU2_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 216	mmCGTS_CU2_TA_CTRL_REG, 0xffffffff, 0x00040007,
 217	mmCGTS_CU2_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 218	mmCGTS_CU2_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 219	mmCGTS_CU3_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 220	mmCGTS_CU3_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 221	mmCGTS_CU3_TA_CTRL_REG, 0xffffffff, 0x00040007,
 222	mmCGTS_CU3_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 223	mmCGTS_CU3_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 224	mmCGTS_CU4_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 225	mmCGTS_CU4_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 226	mmCGTS_CU4_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
 227	mmCGTS_CU4_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 228	mmCGTS_CU4_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 229	mmCGTS_CU5_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 230	mmCGTS_CU5_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 231	mmCGTS_CU5_TA_CTRL_REG, 0xffffffff, 0x00040007,
 232	mmCGTS_CU5_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 233	mmCGTS_CU5_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 234	mmCGTS_CU6_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 235	mmCGTS_CU6_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 236	mmCGTS_CU6_TA_CTRL_REG, 0xffffffff, 0x00040007,
 237	mmCGTS_CU6_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 238	mmCGTS_CU6_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 239	mmCGTS_CU7_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 240	mmCGTS_CU7_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 241	mmCGTS_CU7_TA_CTRL_REG, 0xffffffff, 0x00040007,
 242	mmCGTS_CU7_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 243	mmCGTS_CU7_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 244	mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
 245	mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
 246	mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
 247	mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
 248};
 249
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 250static const u32 fiji_golden_common_all[] =
 251{
 252	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 253	mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x3a00161a,
 254	mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002e,
 255	mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
 256	mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
 257	mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
 258	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
 259	mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF,
 260	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 261	mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x00000009,
 262};
 263
 264static const u32 golden_settings_fiji_a10[] =
 265{
 266	mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
 267	mmDB_DEBUG2, 0xf00fffff, 0x00000400,
 268	mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
 269	mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
 270	mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
 271	mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
 272	mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
 273	mmTCC_CTRL, 0x00100000, 0xf31fff7f,
 274	mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
 275	mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000ff,
 276	mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
 277};
 278
 279static const u32 fiji_mgcg_cgcg_init[] =
 280{
 281	mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
 282	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 283	mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 284	mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
 285	mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
 286	mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
 287	mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100,
 288	mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
 289	mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
 290	mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
 291	mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
 292	mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
 293	mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
 294	mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
 295	mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
 296	mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
 297	mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
 298	mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
 299	mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
 300	mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
 301	mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
 302	mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
 303	mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
 304	mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
 305	mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
 306	mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
 307	mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
 308	mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 309	mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 310	mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
 311	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 312	mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
 313	mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
 314	mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
 315	mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
 316};
 317
 318static const u32 golden_settings_iceland_a11[] =
 319{
 320	mmCB_HW_CONTROL_3, 0x00000040, 0x00000040,
 321	mmDB_DEBUG2, 0xf00fffff, 0x00000400,
 322	mmDB_DEBUG3, 0xc0000000, 0xc0000000,
 323	mmGB_GPU_ID, 0x0000000f, 0x00000000,
 324	mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
 325	mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
 326	mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x00000002,
 327	mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x00000000,
 
 328	mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
 329	mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
 330	mmTCC_CTRL, 0x00100000, 0xf31fff7f,
 331	mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
 332	mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f1,
 333	mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
 334	mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00000010,
 335};
 336
 337static const u32 iceland_golden_common_all[] =
 338{
 339	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 340	mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x00000002,
 341	mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x00000000,
 342	mmGB_ADDR_CONFIG, 0xffffffff, 0x22010001,
 343	mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
 344	mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
 345	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
 346	mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF
 347};
 348
 349static const u32 iceland_mgcg_cgcg_init[] =
 350{
 351	mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
 352	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 353	mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 354	mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
 355	mmCGTT_CP_CLK_CTRL, 0xffffffff, 0xc0000100,
 356	mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0xc0000100,
 357	mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0xc0000100,
 358	mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
 359	mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
 360	mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
 361	mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
 362	mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
 363	mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
 364	mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
 365	mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
 366	mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
 367	mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
 368	mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
 369	mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
 370	mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
 371	mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
 372	mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
 373	mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0xff000100,
 374	mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
 375	mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
 376	mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
 377	mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
 378	mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 379	mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 380	mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
 381	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 382	mmCGTS_CU0_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 383	mmCGTS_CU0_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 384	mmCGTS_CU0_TA_SQC_CTRL_REG, 0xffffffff, 0x0f840f87,
 385	mmCGTS_CU0_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 386	mmCGTS_CU0_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 387	mmCGTS_CU1_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 388	mmCGTS_CU1_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 389	mmCGTS_CU1_TA_CTRL_REG, 0xffffffff, 0x00040007,
 390	mmCGTS_CU1_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 391	mmCGTS_CU1_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 392	mmCGTS_CU2_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 393	mmCGTS_CU2_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 394	mmCGTS_CU2_TA_CTRL_REG, 0xffffffff, 0x00040007,
 395	mmCGTS_CU2_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 396	mmCGTS_CU2_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 397	mmCGTS_CU3_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 398	mmCGTS_CU3_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 399	mmCGTS_CU3_TA_CTRL_REG, 0xffffffff, 0x00040007,
 400	mmCGTS_CU3_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 401	mmCGTS_CU3_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 402	mmCGTS_CU4_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 403	mmCGTS_CU4_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 404	mmCGTS_CU4_TA_SQC_CTRL_REG, 0xffffffff, 0x0f840f87,
 405	mmCGTS_CU4_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 406	mmCGTS_CU4_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 407	mmCGTS_CU5_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 408	mmCGTS_CU5_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 409	mmCGTS_CU5_TA_CTRL_REG, 0xffffffff, 0x00040007,
 410	mmCGTS_CU5_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 411	mmCGTS_CU5_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 412	mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
 413	mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
 414	mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
 415};
 416
 417static const u32 cz_golden_settings_a11[] =
 418{
 419	mmCB_HW_CONTROL_3, 0x00000040, 0x00000040,
 420	mmDB_DEBUG2, 0xf00fffff, 0x00000400,
 421	mmGB_GPU_ID, 0x0000000f, 0x00000000,
 422	mmPA_SC_ENHANCE, 0xffffffff, 0x00000001,
 423	mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
 
 424	mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
 425	mmTA_CNTL_AUX, 0x000f000f, 0x00010000,
 
 426	mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
 427	mmTCP_ADDR_CONFIG, 0x0000000f, 0x000000f3,
 428	mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00001302
 429};
 430
 431static const u32 cz_golden_common_all[] =
 432{
 433	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 434	mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x00000002,
 435	mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x00000000,
 436	mmGB_ADDR_CONFIG, 0xffffffff, 0x22010001,
 437	mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
 438	mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
 439	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
 440	mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF
 441};
 442
 443static const u32 cz_mgcg_cgcg_init[] =
 444{
 445	mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
 446	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 447	mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 448	mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
 449	mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
 450	mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
 451	mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x00000100,
 452	mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
 453	mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
 454	mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
 455	mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
 456	mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
 457	mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
 458	mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
 459	mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
 460	mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
 461	mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
 462	mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
 463	mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
 464	mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
 465	mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
 466	mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
 467	mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
 468	mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
 469	mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
 470	mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
 471	mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
 472	mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 473	mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 474	mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
 475	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 476	mmCGTS_CU0_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 477	mmCGTS_CU0_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 478	mmCGTS_CU0_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
 479	mmCGTS_CU0_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 480	mmCGTS_CU0_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 481	mmCGTS_CU1_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 482	mmCGTS_CU1_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 483	mmCGTS_CU1_TA_CTRL_REG, 0xffffffff, 0x00040007,
 484	mmCGTS_CU1_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 485	mmCGTS_CU1_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 486	mmCGTS_CU2_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 487	mmCGTS_CU2_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 488	mmCGTS_CU2_TA_CTRL_REG, 0xffffffff, 0x00040007,
 489	mmCGTS_CU2_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 490	mmCGTS_CU2_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 491	mmCGTS_CU3_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 492	mmCGTS_CU3_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 493	mmCGTS_CU3_TA_CTRL_REG, 0xffffffff, 0x00040007,
 494	mmCGTS_CU3_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 495	mmCGTS_CU3_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 496	mmCGTS_CU4_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 497	mmCGTS_CU4_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 498	mmCGTS_CU4_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
 499	mmCGTS_CU4_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 500	mmCGTS_CU4_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 501	mmCGTS_CU5_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 502	mmCGTS_CU5_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 503	mmCGTS_CU5_TA_CTRL_REG, 0xffffffff, 0x00040007,
 504	mmCGTS_CU5_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 505	mmCGTS_CU5_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 506	mmCGTS_CU6_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 507	mmCGTS_CU6_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 508	mmCGTS_CU6_TA_CTRL_REG, 0xffffffff, 0x00040007,
 509	mmCGTS_CU6_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 510	mmCGTS_CU6_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 511	mmCGTS_CU7_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 512	mmCGTS_CU7_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 513	mmCGTS_CU7_TA_CTRL_REG, 0xffffffff, 0x00040007,
 514	mmCGTS_CU7_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 515	mmCGTS_CU7_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 516	mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
 517	mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
 518	mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003f,
 519	mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
 520};
 521
 522static const u32 stoney_golden_settings_a11[] =
 523{
 524	mmDB_DEBUG2, 0xf00fffff, 0x00000400,
 525	mmGB_GPU_ID, 0x0000000f, 0x00000000,
 526	mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
 527	mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
 528	mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
 529	mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
 530  	mmTCC_CTRL, 0x00100000, 0xf31fff7f,
 531	mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
 532	mmTCP_ADDR_CONFIG, 0x0000000f, 0x000000f1,
 533	mmTCP_CHAN_STEER_LO, 0xffffffff, 0x10101010,
 534};
 535
 536static const u32 stoney_golden_common_all[] =
 537{
 538	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 539	mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x00000000,
 540	mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x00000000,
 541	mmGB_ADDR_CONFIG, 0xffffffff, 0x12010001,
 542	mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
 543	mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
 544	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
 545	mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF,
 546};
 547
 548static const u32 stoney_mgcg_cgcg_init[] =
 549{
 550	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 551	mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003f,
 552	mmCP_MEM_SLP_CNTL, 0xffffffff, 0x00020201,
 553	mmRLC_MEM_SLP_CNTL, 0xffffffff, 0x00020201,
 554	mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96940200,
 555	mmATC_MISC_CG, 0xffffffff, 0x000c0200,
 
 
 
 
 
 
 
 
 
 
 556};
 557
 558static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev);
 559static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev);
 560static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev);
 
 
 
 
 
 561
 562static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev)
 563{
 564	switch (adev->asic_type) {
 565	case CHIP_TOPAZ:
 566		amdgpu_program_register_sequence(adev,
 567						 iceland_mgcg_cgcg_init,
 568						 (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
 569		amdgpu_program_register_sequence(adev,
 570						 golden_settings_iceland_a11,
 571						 (const u32)ARRAY_SIZE(golden_settings_iceland_a11));
 572		amdgpu_program_register_sequence(adev,
 573						 iceland_golden_common_all,
 574						 (const u32)ARRAY_SIZE(iceland_golden_common_all));
 575		break;
 576	case CHIP_FIJI:
 577		amdgpu_program_register_sequence(adev,
 578						 fiji_mgcg_cgcg_init,
 579						 (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init));
 580		amdgpu_program_register_sequence(adev,
 581						 golden_settings_fiji_a10,
 582						 (const u32)ARRAY_SIZE(golden_settings_fiji_a10));
 583		amdgpu_program_register_sequence(adev,
 584						 fiji_golden_common_all,
 585						 (const u32)ARRAY_SIZE(fiji_golden_common_all));
 586		break;
 587
 588	case CHIP_TONGA:
 589		amdgpu_program_register_sequence(adev,
 590						 tonga_mgcg_cgcg_init,
 591						 (const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init));
 592		amdgpu_program_register_sequence(adev,
 593						 golden_settings_tonga_a11,
 594						 (const u32)ARRAY_SIZE(golden_settings_tonga_a11));
 595		amdgpu_program_register_sequence(adev,
 596						 tonga_golden_common_all,
 597						 (const u32)ARRAY_SIZE(tonga_golden_common_all));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 598		break;
 599	case CHIP_CARRIZO:
 600		amdgpu_program_register_sequence(adev,
 601						 cz_mgcg_cgcg_init,
 602						 (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init));
 603		amdgpu_program_register_sequence(adev,
 604						 cz_golden_settings_a11,
 605						 (const u32)ARRAY_SIZE(cz_golden_settings_a11));
 606		amdgpu_program_register_sequence(adev,
 607						 cz_golden_common_all,
 608						 (const u32)ARRAY_SIZE(cz_golden_common_all));
 609		break;
 610	case CHIP_STONEY:
 611		amdgpu_program_register_sequence(adev,
 612						 stoney_mgcg_cgcg_init,
 613						 (const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init));
 614		amdgpu_program_register_sequence(adev,
 615						 stoney_golden_settings_a11,
 616						 (const u32)ARRAY_SIZE(stoney_golden_settings_a11));
 617		amdgpu_program_register_sequence(adev,
 618						 stoney_golden_common_all,
 619						 (const u32)ARRAY_SIZE(stoney_golden_common_all));
 620		break;
 621	default:
 622		break;
 623	}
 624}
 625
 626static void gfx_v8_0_scratch_init(struct amdgpu_device *adev)
 627{
 628	int i;
 629
 630	adev->gfx.scratch.num_reg = 7;
 631	adev->gfx.scratch.reg_base = mmSCRATCH_REG0;
 632	for (i = 0; i < adev->gfx.scratch.num_reg; i++) {
 633		adev->gfx.scratch.free[i] = true;
 634		adev->gfx.scratch.reg[i] = adev->gfx.scratch.reg_base + i;
 635	}
 636}
 637
 638static int gfx_v8_0_ring_test_ring(struct amdgpu_ring *ring)
 639{
 640	struct amdgpu_device *adev = ring->adev;
 641	uint32_t scratch;
 642	uint32_t tmp = 0;
 643	unsigned i;
 644	int r;
 645
 646	r = amdgpu_gfx_scratch_get(adev, &scratch);
 647	if (r) {
 648		DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r);
 649		return r;
 650	}
 651	WREG32(scratch, 0xCAFEDEAD);
 652	r = amdgpu_ring_alloc(ring, 3);
 653	if (r) {
 654		DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
 655			  ring->idx, r);
 656		amdgpu_gfx_scratch_free(adev, scratch);
 657		return r;
 658	}
 659	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
 660	amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
 661	amdgpu_ring_write(ring, 0xDEADBEEF);
 662	amdgpu_ring_commit(ring);
 663
 664	for (i = 0; i < adev->usec_timeout; i++) {
 665		tmp = RREG32(scratch);
 666		if (tmp == 0xDEADBEEF)
 667			break;
 668		DRM_UDELAY(1);
 669	}
 670	if (i < adev->usec_timeout) {
 671		DRM_INFO("ring test on %d succeeded in %d usecs\n",
 672			 ring->idx, i);
 673	} else {
 674		DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
 675			  ring->idx, scratch, tmp);
 676		r = -EINVAL;
 677	}
 
 
 
 
 
 678	amdgpu_gfx_scratch_free(adev, scratch);
 679	return r;
 680}
 681
 682static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring)
 683{
 684	struct amdgpu_device *adev = ring->adev;
 685	struct amdgpu_ib ib;
 686	struct fence *f = NULL;
 687	uint32_t scratch;
 688	uint32_t tmp = 0;
 689	unsigned i;
 690	int r;
 691
 692	r = amdgpu_gfx_scratch_get(adev, &scratch);
 693	if (r) {
 694		DRM_ERROR("amdgpu: failed to get scratch reg (%d).\n", r);
 
 
 
 
 695		return r;
 696	}
 697	WREG32(scratch, 0xCAFEDEAD);
 
 698	memset(&ib, 0, sizeof(ib));
 699	r = amdgpu_ib_get(adev, NULL, 256, &ib);
 700	if (r) {
 701		DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
 702		goto err1;
 703	}
 704	ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
 705	ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START));
 706	ib.ptr[2] = 0xDEADBEEF;
 707	ib.length_dw = 3;
 
 
 708
 709	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
 710	if (r)
 711		goto err2;
 712
 713	r = fence_wait(f, false);
 714	if (r) {
 715		DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
 716		goto err2;
 717	}
 718	for (i = 0; i < adev->usec_timeout; i++) {
 719		tmp = RREG32(scratch);
 720		if (tmp == 0xDEADBEEF)
 721			break;
 722		DRM_UDELAY(1);
 723	}
 724	if (i < adev->usec_timeout) {
 725		DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
 726			 ring->idx, i);
 727		goto err2;
 728	} else {
 729		DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
 730			  scratch, tmp);
 731		r = -EINVAL;
 732	}
 
 
 
 
 
 
 
 733err2:
 734	fence_put(f);
 735	amdgpu_ib_free(adev, &ib, NULL);
 736	fence_put(f);
 737err1:
 738	amdgpu_gfx_scratch_free(adev, scratch);
 739	return r;
 740}
 741
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 742static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
 743{
 744	const char *chip_name;
 745	char fw_name[30];
 746	int err;
 747	struct amdgpu_firmware_info *info = NULL;
 748	const struct common_firmware_header *header = NULL;
 749	const struct gfx_firmware_header_v1_0 *cp_hdr;
 
 
 750
 751	DRM_DEBUG("\n");
 752
 753	switch (adev->asic_type) {
 754	case CHIP_TOPAZ:
 755		chip_name = "topaz";
 756		break;
 757	case CHIP_TONGA:
 758		chip_name = "tonga";
 759		break;
 760	case CHIP_CARRIZO:
 761		chip_name = "carrizo";
 762		break;
 763	case CHIP_FIJI:
 764		chip_name = "fiji";
 765		break;
 766	case CHIP_STONEY:
 767		chip_name = "stoney";
 768		break;
 
 
 
 
 
 
 
 
 
 
 
 
 769	default:
 770		BUG();
 771	}
 772
 773	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
 774	err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
 
 
 
 
 
 
 
 
 
 775	if (err)
 776		goto out;
 777	err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
 778	if (err)
 779		goto out;
 780	cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
 781	adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
 782	adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
 783
 784	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
 785	err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
 
 
 
 
 
 
 
 
 
 786	if (err)
 787		goto out;
 788	err = amdgpu_ucode_validate(adev->gfx.me_fw);
 789	if (err)
 790		goto out;
 791	cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
 792	adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
 
 793	adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
 794
 795	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
 796	err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
 
 
 
 
 
 
 
 
 
 797	if (err)
 798		goto out;
 799	err = amdgpu_ucode_validate(adev->gfx.ce_fw);
 800	if (err)
 801		goto out;
 802	cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
 803	adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
 804	adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
 805
 
 
 
 
 
 
 
 
 
 
 
 806	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
 807	err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
 808	if (err)
 809		goto out;
 810	err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
 811	cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.rlc_fw->data;
 812	adev->gfx.rlc_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
 813	adev->gfx.rlc_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 814
 815	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
 816	err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 817	if (err)
 818		goto out;
 819	err = amdgpu_ucode_validate(adev->gfx.mec_fw);
 820	if (err)
 821		goto out;
 822	cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
 823	adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
 824	adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
 825
 826	if ((adev->asic_type != CHIP_STONEY) &&
 827	    (adev->asic_type != CHIP_TOPAZ)) {
 828		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
 829		err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
 
 
 
 
 
 
 
 
 
 830		if (!err) {
 831			err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
 832			if (err)
 833				goto out;
 834			cp_hdr = (const struct gfx_firmware_header_v1_0 *)
 835				adev->gfx.mec2_fw->data;
 836			adev->gfx.mec2_fw_version =
 837				le32_to_cpu(cp_hdr->header.ucode_version);
 838			adev->gfx.mec2_feature_version =
 839				le32_to_cpu(cp_hdr->ucode_feature_version);
 840		} else {
 841			err = 0;
 842			adev->gfx.mec2_fw = NULL;
 843		}
 844	}
 845
 846	if (adev->firmware.smu_load) {
 847		info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
 848		info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
 849		info->fw = adev->gfx.pfp_fw;
 850		header = (const struct common_firmware_header *)info->fw->data;
 851		adev->firmware.fw_size +=
 852			ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
 853
 854		info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
 855		info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
 856		info->fw = adev->gfx.me_fw;
 857		header = (const struct common_firmware_header *)info->fw->data;
 858		adev->firmware.fw_size +=
 859			ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 860
 861		info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
 862		info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
 863		info->fw = adev->gfx.ce_fw;
 864		header = (const struct common_firmware_header *)info->fw->data;
 865		adev->firmware.fw_size +=
 866			ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
 867
 868		info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
 869		info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
 870		info->fw = adev->gfx.rlc_fw;
 871		header = (const struct common_firmware_header *)info->fw->data;
 872		adev->firmware.fw_size +=
 873			ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
 
 874
 875		info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
 876		info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
 877		info->fw = adev->gfx.mec_fw;
 
 878		header = (const struct common_firmware_header *)info->fw->data;
 879		adev->firmware.fw_size +=
 880			ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
 881
 882		if (adev->gfx.mec2_fw) {
 883			info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
 884			info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
 885			info->fw = adev->gfx.mec2_fw;
 886			header = (const struct common_firmware_header *)info->fw->data;
 887			adev->firmware.fw_size +=
 888				ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
 889		}
 890
 891	}
 892
 893out:
 894	if (err) {
 895		dev_err(adev->dev,
 896			"gfx8: Failed to load firmware \"%s\"\n",
 897			fw_name);
 898		release_firmware(adev->gfx.pfp_fw);
 899		adev->gfx.pfp_fw = NULL;
 900		release_firmware(adev->gfx.me_fw);
 901		adev->gfx.me_fw = NULL;
 902		release_firmware(adev->gfx.ce_fw);
 903		adev->gfx.ce_fw = NULL;
 904		release_firmware(adev->gfx.rlc_fw);
 905		adev->gfx.rlc_fw = NULL;
 906		release_firmware(adev->gfx.mec_fw);
 907		adev->gfx.mec_fw = NULL;
 908		release_firmware(adev->gfx.mec2_fw);
 909		adev->gfx.mec2_fw = NULL;
 910	}
 911	return err;
 912}
 913
 914static void gfx_v8_0_mec_fini(struct amdgpu_device *adev)
 
 915{
 916	int r;
 
 
 917
 918	if (adev->gfx.mec.hpd_eop_obj) {
 919		r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false);
 920		if (unlikely(r != 0))
 921			dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r);
 922		amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj);
 923		amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
 
 924
 925		amdgpu_bo_unref(&adev->gfx.mec.hpd_eop_obj);
 926		adev->gfx.mec.hpd_eop_obj = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 927	}
 
 
 
 
 
 
 
 
 
 
 
 
 928}
 929
 930#define MEC_HPD_SIZE 2048
 
 
 
 
 
 
 931
 932static int gfx_v8_0_mec_init(struct amdgpu_device *adev)
 933{
 
 934	int r;
 935	u32 *hpd;
 936
 937	/*
 938	 * we assign only 1 pipe because all other pipes will
 939	 * be handled by KFD
 940	 */
 941	adev->gfx.mec.num_mec = 1;
 942	adev->gfx.mec.num_pipe = 1;
 943	adev->gfx.mec.num_queue = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe * 8;
 944
 945	if (adev->gfx.mec.hpd_eop_obj == NULL) {
 946		r = amdgpu_bo_create(adev,
 947				     adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2,
 948				     PAGE_SIZE, true,
 949				     AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
 950				     &adev->gfx.mec.hpd_eop_obj);
 951		if (r) {
 952			dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
 953			return r;
 954		}
 955	}
 956
 957	r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false);
 958	if (unlikely(r != 0)) {
 959		gfx_v8_0_mec_fini(adev);
 960		return r;
 961	}
 962	r = amdgpu_bo_pin(adev->gfx.mec.hpd_eop_obj, AMDGPU_GEM_DOMAIN_GTT,
 963			  &adev->gfx.mec.hpd_eop_gpu_addr);
 964	if (r) {
 965		dev_warn(adev->dev, "(%d) pin HDP EOP bo failed\n", r);
 966		gfx_v8_0_mec_fini(adev);
 967		return r;
 968	}
 969	r = amdgpu_bo_kmap(adev->gfx.mec.hpd_eop_obj, (void **)&hpd);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 970	if (r) {
 971		dev_warn(adev->dev, "(%d) map HDP EOP bo failed\n", r);
 972		gfx_v8_0_mec_fini(adev);
 973		return r;
 974	}
 975
 976	memset(hpd, 0, adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2);
 977
 978	amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
 979	amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
 980
 981	return 0;
 982}
 983
 984static const u32 vgpr_init_compute_shader[] =
 985{
 986	0x7e000209, 0x7e020208,
 987	0x7e040207, 0x7e060206,
 988	0x7e080205, 0x7e0a0204,
 989	0x7e0c0203, 0x7e0e0202,
 990	0x7e100201, 0x7e120200,
 991	0x7e140209, 0x7e160208,
 992	0x7e180207, 0x7e1a0206,
 993	0x7e1c0205, 0x7e1e0204,
 994	0x7e200203, 0x7e220202,
 995	0x7e240201, 0x7e260200,
 996	0x7e280209, 0x7e2a0208,
 997	0x7e2c0207, 0x7e2e0206,
 998	0x7e300205, 0x7e320204,
 999	0x7e340203, 0x7e360202,
1000	0x7e380201, 0x7e3a0200,
1001	0x7e3c0209, 0x7e3e0208,
1002	0x7e400207, 0x7e420206,
1003	0x7e440205, 0x7e460204,
1004	0x7e480203, 0x7e4a0202,
1005	0x7e4c0201, 0x7e4e0200,
1006	0x7e500209, 0x7e520208,
1007	0x7e540207, 0x7e560206,
1008	0x7e580205, 0x7e5a0204,
1009	0x7e5c0203, 0x7e5e0202,
1010	0x7e600201, 0x7e620200,
1011	0x7e640209, 0x7e660208,
1012	0x7e680207, 0x7e6a0206,
1013	0x7e6c0205, 0x7e6e0204,
1014	0x7e700203, 0x7e720202,
1015	0x7e740201, 0x7e760200,
1016	0x7e780209, 0x7e7a0208,
1017	0x7e7c0207, 0x7e7e0206,
1018	0xbf8a0000, 0xbf810000,
1019};
1020
1021static const u32 sgpr_init_compute_shader[] =
1022{
1023	0xbe8a0100, 0xbe8c0102,
1024	0xbe8e0104, 0xbe900106,
1025	0xbe920108, 0xbe940100,
1026	0xbe960102, 0xbe980104,
1027	0xbe9a0106, 0xbe9c0108,
1028	0xbe9e0100, 0xbea00102,
1029	0xbea20104, 0xbea40106,
1030	0xbea60108, 0xbea80100,
1031	0xbeaa0102, 0xbeac0104,
1032	0xbeae0106, 0xbeb00108,
1033	0xbeb20100, 0xbeb40102,
1034	0xbeb60104, 0xbeb80106,
1035	0xbeba0108, 0xbebc0100,
1036	0xbebe0102, 0xbec00104,
1037	0xbec20106, 0xbec40108,
1038	0xbec60100, 0xbec80102,
1039	0xbee60004, 0xbee70005,
1040	0xbeea0006, 0xbeeb0007,
1041	0xbee80008, 0xbee90009,
1042	0xbefc0000, 0xbf8a0000,
1043	0xbf810000, 0x00000000,
1044};
1045
1046static const u32 vgpr_init_regs[] =
1047{
1048	mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0xffffffff,
1049	mmCOMPUTE_RESOURCE_LIMITS, 0,
1050	mmCOMPUTE_NUM_THREAD_X, 256*4,
1051	mmCOMPUTE_NUM_THREAD_Y, 1,
1052	mmCOMPUTE_NUM_THREAD_Z, 1,
 
1053	mmCOMPUTE_PGM_RSRC2, 20,
1054	mmCOMPUTE_USER_DATA_0, 0xedcedc00,
1055	mmCOMPUTE_USER_DATA_1, 0xedcedc01,
1056	mmCOMPUTE_USER_DATA_2, 0xedcedc02,
1057	mmCOMPUTE_USER_DATA_3, 0xedcedc03,
1058	mmCOMPUTE_USER_DATA_4, 0xedcedc04,
1059	mmCOMPUTE_USER_DATA_5, 0xedcedc05,
1060	mmCOMPUTE_USER_DATA_6, 0xedcedc06,
1061	mmCOMPUTE_USER_DATA_7, 0xedcedc07,
1062	mmCOMPUTE_USER_DATA_8, 0xedcedc08,
1063	mmCOMPUTE_USER_DATA_9, 0xedcedc09,
1064};
1065
1066static const u32 sgpr1_init_regs[] =
1067{
1068	mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0x0f,
1069	mmCOMPUTE_RESOURCE_LIMITS, 0x1000000,
1070	mmCOMPUTE_NUM_THREAD_X, 256*5,
1071	mmCOMPUTE_NUM_THREAD_Y, 1,
1072	mmCOMPUTE_NUM_THREAD_Z, 1,
 
1073	mmCOMPUTE_PGM_RSRC2, 20,
1074	mmCOMPUTE_USER_DATA_0, 0xedcedc00,
1075	mmCOMPUTE_USER_DATA_1, 0xedcedc01,
1076	mmCOMPUTE_USER_DATA_2, 0xedcedc02,
1077	mmCOMPUTE_USER_DATA_3, 0xedcedc03,
1078	mmCOMPUTE_USER_DATA_4, 0xedcedc04,
1079	mmCOMPUTE_USER_DATA_5, 0xedcedc05,
1080	mmCOMPUTE_USER_DATA_6, 0xedcedc06,
1081	mmCOMPUTE_USER_DATA_7, 0xedcedc07,
1082	mmCOMPUTE_USER_DATA_8, 0xedcedc08,
1083	mmCOMPUTE_USER_DATA_9, 0xedcedc09,
1084};
1085
1086static const u32 sgpr2_init_regs[] =
1087{
1088	mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0xf0,
1089	mmCOMPUTE_RESOURCE_LIMITS, 0x1000000,
1090	mmCOMPUTE_NUM_THREAD_X, 256*5,
1091	mmCOMPUTE_NUM_THREAD_Y, 1,
1092	mmCOMPUTE_NUM_THREAD_Z, 1,
 
1093	mmCOMPUTE_PGM_RSRC2, 20,
1094	mmCOMPUTE_USER_DATA_0, 0xedcedc00,
1095	mmCOMPUTE_USER_DATA_1, 0xedcedc01,
1096	mmCOMPUTE_USER_DATA_2, 0xedcedc02,
1097	mmCOMPUTE_USER_DATA_3, 0xedcedc03,
1098	mmCOMPUTE_USER_DATA_4, 0xedcedc04,
1099	mmCOMPUTE_USER_DATA_5, 0xedcedc05,
1100	mmCOMPUTE_USER_DATA_6, 0xedcedc06,
1101	mmCOMPUTE_USER_DATA_7, 0xedcedc07,
1102	mmCOMPUTE_USER_DATA_8, 0xedcedc08,
1103	mmCOMPUTE_USER_DATA_9, 0xedcedc09,
1104};
1105
1106static const u32 sec_ded_counter_registers[] =
1107{
1108	mmCPC_EDC_ATC_CNT,
1109	mmCPC_EDC_SCRATCH_CNT,
1110	mmCPC_EDC_UCODE_CNT,
1111	mmCPF_EDC_ATC_CNT,
1112	mmCPF_EDC_ROQ_CNT,
1113	mmCPF_EDC_TAG_CNT,
1114	mmCPG_EDC_ATC_CNT,
1115	mmCPG_EDC_DMA_CNT,
1116	mmCPG_EDC_TAG_CNT,
1117	mmDC_EDC_CSINVOC_CNT,
1118	mmDC_EDC_RESTORE_CNT,
1119	mmDC_EDC_STATE_CNT,
1120	mmGDS_EDC_CNT,
1121	mmGDS_EDC_GRBM_CNT,
1122	mmGDS_EDC_OA_DED,
1123	mmSPI_EDC_CNT,
1124	mmSQC_ATC_EDC_GATCL1_CNT,
1125	mmSQC_EDC_CNT,
1126	mmSQ_EDC_DED_CNT,
1127	mmSQ_EDC_INFO,
1128	mmSQ_EDC_SEC_CNT,
1129	mmTCC_EDC_CNT,
1130	mmTCP_ATC_EDC_GATCL1_CNT,
1131	mmTCP_EDC_CNT,
1132	mmTD_EDC_CNT
1133};
1134
1135static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
1136{
1137	struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
1138	struct amdgpu_ib ib;
1139	struct fence *f = NULL;
1140	int r, i;
1141	u32 tmp;
1142	unsigned total_size, vgpr_offset, sgpr_offset;
1143	u64 gpu_addr;
1144
1145	/* only supported on CZ */
1146	if (adev->asic_type != CHIP_CARRIZO)
1147		return 0;
1148
1149	/* bail if the compute ring is not ready */
1150	if (!ring->ready)
1151		return 0;
1152
1153	tmp = RREG32(mmGB_EDC_MODE);
1154	WREG32(mmGB_EDC_MODE, 0);
1155
1156	total_size =
1157		(((ARRAY_SIZE(vgpr_init_regs) / 2) * 3) + 4 + 5 + 2) * 4;
1158	total_size +=
1159		(((ARRAY_SIZE(sgpr1_init_regs) / 2) * 3) + 4 + 5 + 2) * 4;
1160	total_size +=
1161		(((ARRAY_SIZE(sgpr2_init_regs) / 2) * 3) + 4 + 5 + 2) * 4;
1162	total_size = ALIGN(total_size, 256);
1163	vgpr_offset = total_size;
1164	total_size += ALIGN(sizeof(vgpr_init_compute_shader), 256);
1165	sgpr_offset = total_size;
1166	total_size += sizeof(sgpr_init_compute_shader);
1167
1168	/* allocate an indirect buffer to put the commands in */
1169	memset(&ib, 0, sizeof(ib));
1170	r = amdgpu_ib_get(adev, NULL, total_size, &ib);
 
1171	if (r) {
1172		DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
1173		return r;
1174	}
1175
1176	/* load the compute shaders */
1177	for (i = 0; i < ARRAY_SIZE(vgpr_init_compute_shader); i++)
1178		ib.ptr[i + (vgpr_offset / 4)] = vgpr_init_compute_shader[i];
1179
1180	for (i = 0; i < ARRAY_SIZE(sgpr_init_compute_shader); i++)
1181		ib.ptr[i + (sgpr_offset / 4)] = sgpr_init_compute_shader[i];
1182
1183	/* init the ib length to 0 */
1184	ib.length_dw = 0;
1185
1186	/* VGPR */
1187	/* write the register state for the compute dispatch */
1188	for (i = 0; i < ARRAY_SIZE(vgpr_init_regs); i += 2) {
1189		ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
1190		ib.ptr[ib.length_dw++] = vgpr_init_regs[i] - PACKET3_SET_SH_REG_START;
1191		ib.ptr[ib.length_dw++] = vgpr_init_regs[i + 1];
1192	}
1193	/* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
1194	gpu_addr = (ib.gpu_addr + (u64)vgpr_offset) >> 8;
1195	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
1196	ib.ptr[ib.length_dw++] = mmCOMPUTE_PGM_LO - PACKET3_SET_SH_REG_START;
1197	ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
1198	ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
1199
1200	/* write dispatch packet */
1201	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
1202	ib.ptr[ib.length_dw++] = 8; /* x */
1203	ib.ptr[ib.length_dw++] = 1; /* y */
1204	ib.ptr[ib.length_dw++] = 1; /* z */
1205	ib.ptr[ib.length_dw++] =
1206		REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
1207
1208	/* write CS partial flush packet */
1209	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
1210	ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
1211
1212	/* SGPR1 */
1213	/* write the register state for the compute dispatch */
1214	for (i = 0; i < ARRAY_SIZE(sgpr1_init_regs); i += 2) {
1215		ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
1216		ib.ptr[ib.length_dw++] = sgpr1_init_regs[i] - PACKET3_SET_SH_REG_START;
1217		ib.ptr[ib.length_dw++] = sgpr1_init_regs[i + 1];
1218	}
1219	/* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
1220	gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
1221	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
1222	ib.ptr[ib.length_dw++] = mmCOMPUTE_PGM_LO - PACKET3_SET_SH_REG_START;
1223	ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
1224	ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
1225
1226	/* write dispatch packet */
1227	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
1228	ib.ptr[ib.length_dw++] = 8; /* x */
1229	ib.ptr[ib.length_dw++] = 1; /* y */
1230	ib.ptr[ib.length_dw++] = 1; /* z */
1231	ib.ptr[ib.length_dw++] =
1232		REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
1233
1234	/* write CS partial flush packet */
1235	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
1236	ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
1237
1238	/* SGPR2 */
1239	/* write the register state for the compute dispatch */
1240	for (i = 0; i < ARRAY_SIZE(sgpr2_init_regs); i += 2) {
1241		ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
1242		ib.ptr[ib.length_dw++] = sgpr2_init_regs[i] - PACKET3_SET_SH_REG_START;
1243		ib.ptr[ib.length_dw++] = sgpr2_init_regs[i + 1];
1244	}
1245	/* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
1246	gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
1247	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
1248	ib.ptr[ib.length_dw++] = mmCOMPUTE_PGM_LO - PACKET3_SET_SH_REG_START;
1249	ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
1250	ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
1251
1252	/* write dispatch packet */
1253	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
1254	ib.ptr[ib.length_dw++] = 8; /* x */
1255	ib.ptr[ib.length_dw++] = 1; /* y */
1256	ib.ptr[ib.length_dw++] = 1; /* z */
1257	ib.ptr[ib.length_dw++] =
1258		REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
1259
1260	/* write CS partial flush packet */
1261	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
1262	ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
1263
1264	/* shedule the ib on the ring */
1265	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
1266	if (r) {
1267		DRM_ERROR("amdgpu: ib submit failed (%d).\n", r);
1268		goto fail;
1269	}
1270
1271	/* wait for the GPU to finish processing the IB */
1272	r = fence_wait(f, false);
1273	if (r) {
1274		DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
1275		goto fail;
1276	}
1277
1278	tmp = REG_SET_FIELD(tmp, GB_EDC_MODE, DED_MODE, 2);
1279	tmp = REG_SET_FIELD(tmp, GB_EDC_MODE, PROP_FED, 1);
1280	WREG32(mmGB_EDC_MODE, tmp);
1281
1282	tmp = RREG32(mmCC_GC_EDC_CONFIG);
1283	tmp = REG_SET_FIELD(tmp, CC_GC_EDC_CONFIG, DIS_EDC, 0) | 1;
1284	WREG32(mmCC_GC_EDC_CONFIG, tmp);
1285
1286
1287	/* read back registers to clear the counters */
1288	for (i = 0; i < ARRAY_SIZE(sec_ded_counter_registers); i++)
1289		RREG32(sec_ded_counter_registers[i]);
1290
1291fail:
1292	fence_put(f);
1293	amdgpu_ib_free(adev, &ib, NULL);
1294	fence_put(f);
1295
1296	return r;
1297}
1298
1299static void gfx_v8_0_gpu_early_init(struct amdgpu_device *adev)
1300{
1301	u32 gb_addr_config;
1302	u32 mc_shared_chmap, mc_arb_ramcfg;
1303	u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map;
1304	u32 tmp;
 
1305
1306	switch (adev->asic_type) {
1307	case CHIP_TOPAZ:
1308		adev->gfx.config.max_shader_engines = 1;
1309		adev->gfx.config.max_tile_pipes = 2;
1310		adev->gfx.config.max_cu_per_sh = 6;
1311		adev->gfx.config.max_sh_per_se = 1;
1312		adev->gfx.config.max_backends_per_se = 2;
1313		adev->gfx.config.max_texture_channel_caches = 2;
1314		adev->gfx.config.max_gprs = 256;
1315		adev->gfx.config.max_gs_threads = 32;
1316		adev->gfx.config.max_hw_contexts = 8;
1317
1318		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1319		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1320		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1321		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1322		gb_addr_config = TOPAZ_GB_ADDR_CONFIG_GOLDEN;
1323		break;
1324	case CHIP_FIJI:
1325		adev->gfx.config.max_shader_engines = 4;
1326		adev->gfx.config.max_tile_pipes = 16;
1327		adev->gfx.config.max_cu_per_sh = 16;
1328		adev->gfx.config.max_sh_per_se = 1;
1329		adev->gfx.config.max_backends_per_se = 4;
1330		adev->gfx.config.max_texture_channel_caches = 16;
1331		adev->gfx.config.max_gprs = 256;
1332		adev->gfx.config.max_gs_threads = 32;
1333		adev->gfx.config.max_hw_contexts = 8;
1334
1335		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1336		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1337		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1338		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1339		gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
1340		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1341	case CHIP_TONGA:
1342		adev->gfx.config.max_shader_engines = 4;
1343		adev->gfx.config.max_tile_pipes = 8;
1344		adev->gfx.config.max_cu_per_sh = 8;
1345		adev->gfx.config.max_sh_per_se = 1;
1346		adev->gfx.config.max_backends_per_se = 2;
1347		adev->gfx.config.max_texture_channel_caches = 8;
1348		adev->gfx.config.max_gprs = 256;
1349		adev->gfx.config.max_gs_threads = 32;
1350		adev->gfx.config.max_hw_contexts = 8;
1351
1352		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1353		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1354		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1355		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1356		gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
1357		break;
1358	case CHIP_CARRIZO:
1359		adev->gfx.config.max_shader_engines = 1;
1360		adev->gfx.config.max_tile_pipes = 2;
1361		adev->gfx.config.max_sh_per_se = 1;
1362		adev->gfx.config.max_backends_per_se = 2;
1363
1364		switch (adev->pdev->revision) {
1365		case 0xc4:
1366		case 0x84:
1367		case 0xc8:
1368		case 0xcc:
1369		case 0xe1:
1370		case 0xe3:
1371			/* B10 */
1372			adev->gfx.config.max_cu_per_sh = 8;
1373			break;
1374		case 0xc5:
1375		case 0x81:
1376		case 0x85:
1377		case 0xc9:
1378		case 0xcd:
1379		case 0xe2:
1380		case 0xe4:
1381			/* B8 */
1382			adev->gfx.config.max_cu_per_sh = 6;
1383			break;
1384		case 0xc6:
1385		case 0xca:
1386		case 0xce:
1387		case 0x88:
1388			/* B6 */
1389			adev->gfx.config.max_cu_per_sh = 6;
1390			break;
1391		case 0xc7:
1392		case 0x87:
1393		case 0xcb:
1394		case 0xe5:
1395		case 0x89:
1396		default:
1397			/* B4 */
1398			adev->gfx.config.max_cu_per_sh = 4;
1399			break;
1400		}
1401
1402		adev->gfx.config.max_texture_channel_caches = 2;
1403		adev->gfx.config.max_gprs = 256;
1404		adev->gfx.config.max_gs_threads = 32;
1405		adev->gfx.config.max_hw_contexts = 8;
1406
1407		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1408		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1409		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1410		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1411		gb_addr_config = CARRIZO_GB_ADDR_CONFIG_GOLDEN;
1412		break;
1413	case CHIP_STONEY:
1414		adev->gfx.config.max_shader_engines = 1;
1415		adev->gfx.config.max_tile_pipes = 2;
1416		adev->gfx.config.max_sh_per_se = 1;
1417		adev->gfx.config.max_backends_per_se = 1;
1418
1419		switch (adev->pdev->revision) {
1420		case 0xc0:
1421		case 0xc1:
1422		case 0xc2:
1423		case 0xc4:
1424		case 0xc8:
1425		case 0xc9:
1426			adev->gfx.config.max_cu_per_sh = 3;
1427			break;
1428		case 0xd0:
1429		case 0xd1:
1430		case 0xd2:
1431		default:
1432			adev->gfx.config.max_cu_per_sh = 2;
1433			break;
1434		}
1435
1436		adev->gfx.config.max_texture_channel_caches = 2;
1437		adev->gfx.config.max_gprs = 256;
1438		adev->gfx.config.max_gs_threads = 16;
1439		adev->gfx.config.max_hw_contexts = 8;
1440
1441		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1442		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1443		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1444		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1445		gb_addr_config = CARRIZO_GB_ADDR_CONFIG_GOLDEN;
1446		break;
1447	default:
1448		adev->gfx.config.max_shader_engines = 2;
1449		adev->gfx.config.max_tile_pipes = 4;
1450		adev->gfx.config.max_cu_per_sh = 2;
1451		adev->gfx.config.max_sh_per_se = 1;
1452		adev->gfx.config.max_backends_per_se = 2;
1453		adev->gfx.config.max_texture_channel_caches = 4;
1454		adev->gfx.config.max_gprs = 256;
1455		adev->gfx.config.max_gs_threads = 32;
1456		adev->gfx.config.max_hw_contexts = 8;
1457
1458		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1459		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1460		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1461		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1462		gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
1463		break;
1464	}
1465
1466	mc_shared_chmap = RREG32(mmMC_SHARED_CHMAP);
1467	adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG);
1468	mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg;
1469
 
 
 
 
 
1470	adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
1471	adev->gfx.config.mem_max_burst_length_bytes = 256;
1472	if (adev->flags & AMD_IS_APU) {
1473		/* Get memory bank mapping mode. */
1474		tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING);
1475		dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
1476		dimm01_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
1477
1478		tmp = RREG32(mmMC_FUS_DRAM1_BANK_ADDR_MAPPING);
1479		dimm10_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
1480		dimm11_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
1481
1482		/* Validate settings in case only one DIMM installed. */
1483		if ((dimm00_addr_map == 0) || (dimm00_addr_map == 3) || (dimm00_addr_map == 4) || (dimm00_addr_map > 12))
1484			dimm00_addr_map = 0;
1485		if ((dimm01_addr_map == 0) || (dimm01_addr_map == 3) || (dimm01_addr_map == 4) || (dimm01_addr_map > 12))
1486			dimm01_addr_map = 0;
1487		if ((dimm10_addr_map == 0) || (dimm10_addr_map == 3) || (dimm10_addr_map == 4) || (dimm10_addr_map > 12))
1488			dimm10_addr_map = 0;
1489		if ((dimm11_addr_map == 0) || (dimm11_addr_map == 3) || (dimm11_addr_map == 4) || (dimm11_addr_map > 12))
1490			dimm11_addr_map = 0;
1491
1492		/* If DIMM Addr map is 8GB, ROW size should be 2KB. Otherwise 1KB. */
1493		/* If ROW size(DIMM1) != ROW size(DMIMM0), ROW size should be larger one. */
1494		if ((dimm00_addr_map == 11) || (dimm01_addr_map == 11) || (dimm10_addr_map == 11) || (dimm11_addr_map == 11))
1495			adev->gfx.config.mem_row_size_in_kb = 2;
1496		else
1497			adev->gfx.config.mem_row_size_in_kb = 1;
1498	} else {
1499		tmp = REG_GET_FIELD(mc_arb_ramcfg, MC_ARB_RAMCFG, NOOFCOLS);
1500		adev->gfx.config.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
1501		if (adev->gfx.config.mem_row_size_in_kb > 4)
1502			adev->gfx.config.mem_row_size_in_kb = 4;
1503	}
1504
1505	adev->gfx.config.shader_engine_tile_size = 32;
1506	adev->gfx.config.num_gpus = 1;
1507	adev->gfx.config.multi_gpu_tile_size = 64;
1508
1509	/* fix up row size */
1510	switch (adev->gfx.config.mem_row_size_in_kb) {
1511	case 1:
1512	default:
1513		gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 0);
1514		break;
1515	case 2:
1516		gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 1);
1517		break;
1518	case 4:
1519		gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 2);
1520		break;
1521	}
1522	adev->gfx.config.gb_addr_config = gb_addr_config;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1523}
1524
 
 
1525static int gfx_v8_0_sw_init(void *handle)
1526{
1527	int i, r;
1528	struct amdgpu_ring *ring;
 
1529	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1530
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1531	/* EOP Event */
1532	r = amdgpu_irq_add_id(adev, 181, &adev->gfx.eop_irq);
1533	if (r)
1534		return r;
1535
1536	/* Privileged reg */
1537	r = amdgpu_irq_add_id(adev, 184, &adev->gfx.priv_reg_irq);
 
1538	if (r)
1539		return r;
1540
1541	/* Privileged inst */
1542	r = amdgpu_irq_add_id(adev, 185, &adev->gfx.priv_inst_irq);
 
1543	if (r)
1544		return r;
1545
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1546	adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
1547
1548	gfx_v8_0_scratch_init(adev);
1549
1550	r = gfx_v8_0_init_microcode(adev);
1551	if (r) {
1552		DRM_ERROR("Failed to load gfx firmware!\n");
1553		return r;
1554	}
1555
 
 
 
 
 
 
1556	r = gfx_v8_0_mec_init(adev);
1557	if (r) {
1558		DRM_ERROR("Failed to init MEC BOs!\n");
1559		return r;
1560	}
1561
1562	/* set up the gfx ring */
1563	for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
1564		ring = &adev->gfx.gfx_ring[i];
1565		ring->ring_obj = NULL;
1566		sprintf(ring->name, "gfx");
1567		/* no gfx doorbells on iceland */
1568		if (adev->asic_type != CHIP_TOPAZ) {
1569			ring->use_doorbell = true;
1570			ring->doorbell_index = AMDGPU_DOORBELL_GFX_RING0;
1571		}
1572
1573		r = amdgpu_ring_init(adev, ring, 1024 * 1024,
1574				     PACKET3(PACKET3_NOP, 0x3FFF), 0xf,
1575				     &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP,
1576				     AMDGPU_RING_TYPE_GFX);
1577		if (r)
1578			return r;
1579	}
1580
1581	/* set up the compute queues */
1582	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
1583		unsigned irq_type;
1584
1585		/* max 32 queues per MEC */
1586		if ((i >= 32) || (i >= AMDGPU_MAX_COMPUTE_RINGS)) {
1587			DRM_ERROR("Too many (%d) compute rings!\n", i);
1588			break;
 
 
 
 
 
 
 
 
 
 
 
 
1589		}
1590		ring = &adev->gfx.compute_ring[i];
1591		ring->ring_obj = NULL;
1592		ring->use_doorbell = true;
1593		ring->doorbell_index = AMDGPU_DOORBELL_MEC_RING0 + i;
1594		ring->me = 1; /* first MEC */
1595		ring->pipe = i / 8;
1596		ring->queue = i % 8;
1597		sprintf(ring->name, "comp %d.%d.%d", ring->me, ring->pipe, ring->queue);
1598		irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
1599		/* type-2 packets are deprecated on MEC, use type-3 instead */
1600		r = amdgpu_ring_init(adev, ring, 1024 * 1024,
1601				     PACKET3(PACKET3_NOP, 0x3FFF), 0xf,
1602				     &adev->gfx.eop_irq, irq_type,
1603				     AMDGPU_RING_TYPE_COMPUTE);
1604		if (r)
1605			return r;
1606	}
1607
1608	/* reserve GDS, GWS and OA resource for gfx */
1609	r = amdgpu_bo_create(adev, adev->gds.mem.gfx_partition_size,
1610			PAGE_SIZE, true,
1611			AMDGPU_GEM_DOMAIN_GDS, 0, NULL,
1612			NULL, &adev->gds.gds_gfx_bo);
1613	if (r)
1614		return r;
 
1615
1616	r = amdgpu_bo_create(adev, adev->gds.gws.gfx_partition_size,
1617		PAGE_SIZE, true,
1618		AMDGPU_GEM_DOMAIN_GWS, 0, NULL,
1619		NULL, &adev->gds.gws_gfx_bo);
1620	if (r)
1621		return r;
1622
1623	r = amdgpu_bo_create(adev, adev->gds.oa.gfx_partition_size,
1624			PAGE_SIZE, true,
1625			AMDGPU_GEM_DOMAIN_OA, 0, NULL,
1626			NULL, &adev->gds.oa_gfx_bo);
1627	if (r)
1628		return r;
1629
1630	adev->gfx.ce_ram_size = 0x8000;
1631
1632	gfx_v8_0_gpu_early_init(adev);
 
 
1633
1634	return 0;
1635}
1636
1637static int gfx_v8_0_sw_fini(void *handle)
1638{
1639	int i;
1640	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1641
1642	amdgpu_bo_unref(&adev->gds.oa_gfx_bo);
1643	amdgpu_bo_unref(&adev->gds.gws_gfx_bo);
1644	amdgpu_bo_unref(&adev->gds.gds_gfx_bo);
1645
1646	for (i = 0; i < adev->gfx.num_gfx_rings; i++)
1647		amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
1648	for (i = 0; i < adev->gfx.num_compute_rings; i++)
1649		amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
1650
 
 
 
 
1651	gfx_v8_0_mec_fini(adev);
 
 
 
 
 
 
 
 
 
 
 
1652
1653	return 0;
1654}
1655
1656static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev)
1657{
1658	uint32_t *modearray, *mod2array;
1659	const u32 num_tile_mode_states = ARRAY_SIZE(adev->gfx.config.tile_mode_array);
1660	const u32 num_secondary_tile_mode_states = ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
1661	u32 reg_offset;
1662
1663	modearray = adev->gfx.config.tile_mode_array;
1664	mod2array = adev->gfx.config.macrotile_mode_array;
1665
1666	for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
1667		modearray[reg_offset] = 0;
1668
1669	for (reg_offset = 0; reg_offset <  num_secondary_tile_mode_states; reg_offset++)
1670		mod2array[reg_offset] = 0;
1671
1672	switch (adev->asic_type) {
1673	case CHIP_TOPAZ:
1674		modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1675				PIPE_CONFIG(ADDR_SURF_P2) |
1676				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
1677				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1678		modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1679				PIPE_CONFIG(ADDR_SURF_P2) |
1680				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
1681				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1682		modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1683				PIPE_CONFIG(ADDR_SURF_P2) |
1684				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1685				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1686		modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1687				PIPE_CONFIG(ADDR_SURF_P2) |
1688				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
1689				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1690		modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1691				PIPE_CONFIG(ADDR_SURF_P2) |
1692				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
1693				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1694		modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1695				PIPE_CONFIG(ADDR_SURF_P2) |
1696				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
1697				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1698		modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1699				PIPE_CONFIG(ADDR_SURF_P2) |
1700				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
1701				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1702		modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
1703				PIPE_CONFIG(ADDR_SURF_P2));
1704		modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1705				PIPE_CONFIG(ADDR_SURF_P2) |
1706				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1707				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1708		modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1709				 PIPE_CONFIG(ADDR_SURF_P2) |
1710				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1711				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1712		modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1713				 PIPE_CONFIG(ADDR_SURF_P2) |
1714				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1715				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1716		modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1717				 PIPE_CONFIG(ADDR_SURF_P2) |
1718				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1719				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1720		modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1721				 PIPE_CONFIG(ADDR_SURF_P2) |
1722				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1723				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1724		modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
1725				 PIPE_CONFIG(ADDR_SURF_P2) |
1726				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1727				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1728		modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1729				 PIPE_CONFIG(ADDR_SURF_P2) |
1730				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1731				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1732		modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1733				 PIPE_CONFIG(ADDR_SURF_P2) |
1734				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1735				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1736		modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1737				 PIPE_CONFIG(ADDR_SURF_P2) |
1738				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1739				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1740		modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1741				 PIPE_CONFIG(ADDR_SURF_P2) |
1742				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1743				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1744		modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
1745				 PIPE_CONFIG(ADDR_SURF_P2) |
1746				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1747				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1748		modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
1749				 PIPE_CONFIG(ADDR_SURF_P2) |
1750				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1751				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1752		modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1753				 PIPE_CONFIG(ADDR_SURF_P2) |
1754				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1755				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1756		modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
1757				 PIPE_CONFIG(ADDR_SURF_P2) |
1758				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1759				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1760		modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
1761				 PIPE_CONFIG(ADDR_SURF_P2) |
1762				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1763				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1764		modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1765				 PIPE_CONFIG(ADDR_SURF_P2) |
1766				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1767				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1768		modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1769				 PIPE_CONFIG(ADDR_SURF_P2) |
1770				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1771				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1772		modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1773				 PIPE_CONFIG(ADDR_SURF_P2) |
1774				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1775				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1776
1777		mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
1778				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1779				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1780				NUM_BANKS(ADDR_SURF_8_BANK));
1781		mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
1782				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1783				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1784				NUM_BANKS(ADDR_SURF_8_BANK));
1785		mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1786				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1787				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1788				NUM_BANKS(ADDR_SURF_8_BANK));
1789		mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1790				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1791				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1792				NUM_BANKS(ADDR_SURF_8_BANK));
1793		mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1794				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1795				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1796				NUM_BANKS(ADDR_SURF_8_BANK));
1797		mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1798				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1799				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1800				NUM_BANKS(ADDR_SURF_8_BANK));
1801		mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1802				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1803				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1804				NUM_BANKS(ADDR_SURF_8_BANK));
1805		mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
1806				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
1807				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1808				NUM_BANKS(ADDR_SURF_16_BANK));
1809		mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
1810				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1811				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1812				NUM_BANKS(ADDR_SURF_16_BANK));
1813		mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1814				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1815				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1816				 NUM_BANKS(ADDR_SURF_16_BANK));
1817		mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1818				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1819				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1820				 NUM_BANKS(ADDR_SURF_16_BANK));
1821		mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1822				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1823				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1824				 NUM_BANKS(ADDR_SURF_16_BANK));
1825		mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1826				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1827				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1828				 NUM_BANKS(ADDR_SURF_16_BANK));
1829		mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1830				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1831				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1832				 NUM_BANKS(ADDR_SURF_8_BANK));
1833
1834		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
1835			if (reg_offset != 7 && reg_offset != 12 && reg_offset != 17 &&
1836			    reg_offset != 23)
1837				WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
1838
1839		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
1840			if (reg_offset != 7)
1841				WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
1842
1843		break;
1844	case CHIP_FIJI:
 
1845		modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1846				PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1847				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
1848				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1849		modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1850				PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1851				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
1852				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1853		modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1854				PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1855				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1856				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1857		modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1858				PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1859				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
1860				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1861		modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1862				PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1863				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
1864				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1865		modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1866				PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1867				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
1868				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1869		modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1870				PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1871				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
1872				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1873		modearray[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1874				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1875				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
1876				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1877		modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
1878				PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16));
1879		modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1880				PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1881				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1882				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1883		modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1884				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1885				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1886				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1887		modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1888				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1889				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1890				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1891		modearray[12] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1892				 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1893				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1894				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1895		modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1896				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1897				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1898				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1899		modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1900				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1901				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1902				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1903		modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
1904				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1905				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1906				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1907		modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1908				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1909				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1910				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1911		modearray[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1912				 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1913				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1914				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1915		modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1916				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1917				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1918				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1919		modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1920				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1921				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1922				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1923		modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1924				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1925				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1926				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1927		modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
1928				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1929				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1930				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1931		modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
1932				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1933				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1934				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1935		modearray[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
1936				 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1937				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1938				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1939		modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1940				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1941				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1942				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1943		modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
1944				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1945				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1946				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1947		modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
1948				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1949				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1950				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1951		modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1952				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1953				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1954				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1955		modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1956				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1957				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1958				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1959		modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1960				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1961				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1962				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1963		modearray[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1964				 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1965				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1966				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1967
1968		mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1969				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1970				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1971				NUM_BANKS(ADDR_SURF_8_BANK));
1972		mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1973				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1974				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1975				NUM_BANKS(ADDR_SURF_8_BANK));
1976		mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1977				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1978				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1979				NUM_BANKS(ADDR_SURF_8_BANK));
1980		mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1981				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1982				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1983				NUM_BANKS(ADDR_SURF_8_BANK));
1984		mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1985				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1986				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1987				NUM_BANKS(ADDR_SURF_8_BANK));
1988		mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1989				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1990				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1991				NUM_BANKS(ADDR_SURF_8_BANK));
1992		mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1993				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1994				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1995				NUM_BANKS(ADDR_SURF_8_BANK));
1996		mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1997				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
1998				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1999				NUM_BANKS(ADDR_SURF_8_BANK));
2000		mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2001				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2002				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2003				NUM_BANKS(ADDR_SURF_8_BANK));
2004		mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2005				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2006				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2007				 NUM_BANKS(ADDR_SURF_8_BANK));
2008		mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2009				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2010				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2011				 NUM_BANKS(ADDR_SURF_8_BANK));
2012		mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2013				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2014				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2015				 NUM_BANKS(ADDR_SURF_8_BANK));
2016		mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2017				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2018				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2019				 NUM_BANKS(ADDR_SURF_8_BANK));
2020		mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2021				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2022				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2023				 NUM_BANKS(ADDR_SURF_4_BANK));
2024
2025		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
2026			WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
2027
2028		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
2029			if (reg_offset != 7)
2030				WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
2031
2032		break;
2033	case CHIP_TONGA:
2034		modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2035				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2036				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2037				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2038		modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2039				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2040				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2041				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2042		modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2043				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2044				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2045				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2046		modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2047				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2048				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2049				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2050		modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2051				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2052				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2053				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2054		modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2055				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2056				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2057				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2058		modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2059				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2060				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2061				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2062		modearray[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2063				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2064				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2065				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2066		modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2067				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16));
2068		modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2069				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2070				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2071				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2072		modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2073				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2074				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2075				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2076		modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2077				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2078				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2079				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2080		modearray[12] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2081				 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2082				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2083				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2084		modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2085				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2086				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2087				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2088		modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2089				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2090				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2091				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2092		modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
2093				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2094				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2095				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2096		modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2097				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2098				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2099				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2100		modearray[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2101				 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2102				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2103				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2104		modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2105				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2106				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2107				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2108		modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2109				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2110				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2111				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2112		modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2113				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2114				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2115				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2116		modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
2117				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2118				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2119				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2120		modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2121				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2122				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2123				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2124		modearray[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2125				 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2126				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2127				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2128		modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2129				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2130				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2131				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2132		modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
2133				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2134				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2135				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2136		modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
2137				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2138				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2139				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2140		modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2141				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2142				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2143				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2144		modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2145				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2146				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2147				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2148		modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2149				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2150				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2151				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2152		modearray[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2153				 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2154				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2155				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2156
2157		mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2158				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2159				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2160				NUM_BANKS(ADDR_SURF_16_BANK));
2161		mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2162				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2163				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2164				NUM_BANKS(ADDR_SURF_16_BANK));
2165		mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2166				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2167				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2168				NUM_BANKS(ADDR_SURF_16_BANK));
2169		mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2170				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2171				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2172				NUM_BANKS(ADDR_SURF_16_BANK));
2173		mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2174				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2175				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2176				NUM_BANKS(ADDR_SURF_16_BANK));
2177		mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2178				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2179				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2180				NUM_BANKS(ADDR_SURF_16_BANK));
2181		mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2182				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2183				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2184				NUM_BANKS(ADDR_SURF_16_BANK));
2185		mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2186				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
2187				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2188				NUM_BANKS(ADDR_SURF_16_BANK));
2189		mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2190				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2191				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2192				NUM_BANKS(ADDR_SURF_16_BANK));
2193		mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2194				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2195				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2196				 NUM_BANKS(ADDR_SURF_16_BANK));
2197		mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2198				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2199				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2200				 NUM_BANKS(ADDR_SURF_16_BANK));
2201		mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2202				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2203				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2204				 NUM_BANKS(ADDR_SURF_8_BANK));
2205		mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2206				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2207				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2208				 NUM_BANKS(ADDR_SURF_4_BANK));
2209		mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2210				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2211				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2212				 NUM_BANKS(ADDR_SURF_4_BANK));
2213
2214		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
2215			WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
2216
2217		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
2218			if (reg_offset != 7)
2219				WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
2220
2221		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2222	case CHIP_STONEY:
2223		modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2224				PIPE_CONFIG(ADDR_SURF_P2) |
2225				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2226				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2227		modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2228				PIPE_CONFIG(ADDR_SURF_P2) |
2229				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2230				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2231		modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2232				PIPE_CONFIG(ADDR_SURF_P2) |
2233				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2234				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2235		modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2236				PIPE_CONFIG(ADDR_SURF_P2) |
2237				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2238				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2239		modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2240				PIPE_CONFIG(ADDR_SURF_P2) |
2241				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2242				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2243		modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2244				PIPE_CONFIG(ADDR_SURF_P2) |
2245				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2246				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2247		modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2248				PIPE_CONFIG(ADDR_SURF_P2) |
2249				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2250				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2251		modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2252				PIPE_CONFIG(ADDR_SURF_P2));
2253		modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2254				PIPE_CONFIG(ADDR_SURF_P2) |
2255				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2256				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2257		modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2258				 PIPE_CONFIG(ADDR_SURF_P2) |
2259				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2260				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2261		modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2262				 PIPE_CONFIG(ADDR_SURF_P2) |
2263				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2264				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2265		modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2266				 PIPE_CONFIG(ADDR_SURF_P2) |
2267				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2268				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2269		modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2270				 PIPE_CONFIG(ADDR_SURF_P2) |
2271				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2272				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2273		modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
2274				 PIPE_CONFIG(ADDR_SURF_P2) |
2275				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2276				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2277		modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2278				 PIPE_CONFIG(ADDR_SURF_P2) |
2279				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2280				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2281		modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2282				 PIPE_CONFIG(ADDR_SURF_P2) |
2283				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2284				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2285		modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2286				 PIPE_CONFIG(ADDR_SURF_P2) |
2287				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2288				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2289		modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2290				 PIPE_CONFIG(ADDR_SURF_P2) |
2291				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2292				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2293		modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
2294				 PIPE_CONFIG(ADDR_SURF_P2) |
2295				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2296				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2297		modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2298				 PIPE_CONFIG(ADDR_SURF_P2) |
2299				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2300				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2301		modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2302				 PIPE_CONFIG(ADDR_SURF_P2) |
2303				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2304				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2305		modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
2306				 PIPE_CONFIG(ADDR_SURF_P2) |
2307				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2308				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2309		modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
2310				 PIPE_CONFIG(ADDR_SURF_P2) |
2311				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2312				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2313		modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2314				 PIPE_CONFIG(ADDR_SURF_P2) |
2315				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2316				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2317		modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2318				 PIPE_CONFIG(ADDR_SURF_P2) |
2319				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2320				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2321		modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2322				 PIPE_CONFIG(ADDR_SURF_P2) |
2323				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2324				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2325
2326		mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2327				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2328				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2329				NUM_BANKS(ADDR_SURF_8_BANK));
2330		mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2331				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2332				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2333				NUM_BANKS(ADDR_SURF_8_BANK));
2334		mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2335				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2336				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2337				NUM_BANKS(ADDR_SURF_8_BANK));
2338		mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2339				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2340				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2341				NUM_BANKS(ADDR_SURF_8_BANK));
2342		mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2343				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2344				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2345				NUM_BANKS(ADDR_SURF_8_BANK));
2346		mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2347				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2348				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2349				NUM_BANKS(ADDR_SURF_8_BANK));
2350		mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2351				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2352				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2353				NUM_BANKS(ADDR_SURF_8_BANK));
2354		mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
2355				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
2356				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2357				NUM_BANKS(ADDR_SURF_16_BANK));
2358		mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
2359				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2360				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2361				NUM_BANKS(ADDR_SURF_16_BANK));
2362		mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2363				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2364				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2365				 NUM_BANKS(ADDR_SURF_16_BANK));
2366		mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2367				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2368				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2369				 NUM_BANKS(ADDR_SURF_16_BANK));
2370		mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2371				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2372				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2373				 NUM_BANKS(ADDR_SURF_16_BANK));
2374		mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2375				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2376				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2377				 NUM_BANKS(ADDR_SURF_16_BANK));
2378		mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2379				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2380				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2381				 NUM_BANKS(ADDR_SURF_8_BANK));
2382
2383		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
2384			if (reg_offset != 7 && reg_offset != 12 && reg_offset != 17 &&
2385			    reg_offset != 23)
2386				WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
2387
2388		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
2389			if (reg_offset != 7)
2390				WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
2391
2392		break;
2393	default:
2394		dev_warn(adev->dev,
2395			 "Unknown chip type (%d) in function gfx_v8_0_tiling_mode_table_init() falling through to CHIP_CARRIZO\n",
2396			 adev->asic_type);
 
2397
2398	case CHIP_CARRIZO:
2399		modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2400				PIPE_CONFIG(ADDR_SURF_P2) |
2401				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2402				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2403		modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2404				PIPE_CONFIG(ADDR_SURF_P2) |
2405				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2406				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2407		modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2408				PIPE_CONFIG(ADDR_SURF_P2) |
2409				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2410				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2411		modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2412				PIPE_CONFIG(ADDR_SURF_P2) |
2413				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2414				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2415		modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2416				PIPE_CONFIG(ADDR_SURF_P2) |
2417				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2418				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2419		modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2420				PIPE_CONFIG(ADDR_SURF_P2) |
2421				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2422				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2423		modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2424				PIPE_CONFIG(ADDR_SURF_P2) |
2425				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2426				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2427		modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2428				PIPE_CONFIG(ADDR_SURF_P2));
2429		modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2430				PIPE_CONFIG(ADDR_SURF_P2) |
2431				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2432				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2433		modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2434				 PIPE_CONFIG(ADDR_SURF_P2) |
2435				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2436				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2437		modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2438				 PIPE_CONFIG(ADDR_SURF_P2) |
2439				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2440				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2441		modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2442				 PIPE_CONFIG(ADDR_SURF_P2) |
2443				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2444				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2445		modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2446				 PIPE_CONFIG(ADDR_SURF_P2) |
2447				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2448				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2449		modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
2450				 PIPE_CONFIG(ADDR_SURF_P2) |
2451				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2452				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2453		modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2454				 PIPE_CONFIG(ADDR_SURF_P2) |
2455				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2456				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2457		modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2458				 PIPE_CONFIG(ADDR_SURF_P2) |
2459				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2460				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2461		modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2462				 PIPE_CONFIG(ADDR_SURF_P2) |
2463				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2464				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2465		modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2466				 PIPE_CONFIG(ADDR_SURF_P2) |
2467				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2468				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2469		modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
2470				 PIPE_CONFIG(ADDR_SURF_P2) |
2471				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2472				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2473		modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2474				 PIPE_CONFIG(ADDR_SURF_P2) |
2475				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2476				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2477		modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2478				 PIPE_CONFIG(ADDR_SURF_P2) |
2479				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2480				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2481		modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
2482				 PIPE_CONFIG(ADDR_SURF_P2) |
2483				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2484				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2485		modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
2486				 PIPE_CONFIG(ADDR_SURF_P2) |
2487				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2488				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2489		modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2490				 PIPE_CONFIG(ADDR_SURF_P2) |
2491				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2492				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2493		modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2494				 PIPE_CONFIG(ADDR_SURF_P2) |
2495				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2496				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2497		modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2498				 PIPE_CONFIG(ADDR_SURF_P2) |
2499				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2500				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2501
2502		mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2503				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2504				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2505				NUM_BANKS(ADDR_SURF_8_BANK));
2506		mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2507				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2508				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2509				NUM_BANKS(ADDR_SURF_8_BANK));
2510		mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2511				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2512				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2513				NUM_BANKS(ADDR_SURF_8_BANK));
2514		mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2515				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2516				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2517				NUM_BANKS(ADDR_SURF_8_BANK));
2518		mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2519				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2520				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2521				NUM_BANKS(ADDR_SURF_8_BANK));
2522		mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2523				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2524				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2525				NUM_BANKS(ADDR_SURF_8_BANK));
2526		mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2527				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2528				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2529				NUM_BANKS(ADDR_SURF_8_BANK));
2530		mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
2531				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
2532				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2533				NUM_BANKS(ADDR_SURF_16_BANK));
2534		mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
2535				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2536				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2537				NUM_BANKS(ADDR_SURF_16_BANK));
2538		mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2539				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2540				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2541				 NUM_BANKS(ADDR_SURF_16_BANK));
2542		mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2543				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2544				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2545				 NUM_BANKS(ADDR_SURF_16_BANK));
2546		mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2547				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2548				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2549				 NUM_BANKS(ADDR_SURF_16_BANK));
2550		mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2551				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2552				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2553				 NUM_BANKS(ADDR_SURF_16_BANK));
2554		mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2555				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2556				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2557				 NUM_BANKS(ADDR_SURF_8_BANK));
2558
2559		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
2560			if (reg_offset != 7 && reg_offset != 12 && reg_offset != 17 &&
2561			    reg_offset != 23)
2562				WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
2563
2564		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
2565			if (reg_offset != 7)
2566				WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
2567
2568		break;
2569	}
2570}
2571
2572void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num)
 
2573{
2574	u32 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
2575
2576	if ((se_num == 0xffffffff) && (sh_num == 0xffffffff)) {
2577		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
2578		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
2579	} else if (se_num == 0xffffffff) {
2580		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
 
2581		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
2582	} else if (sh_num == 0xffffffff) {
2583		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
2584		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
2585	} else {
 
 
 
2586		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
2587		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
2588	}
2589	WREG32(mmGRBM_GFX_INDEX, data);
2590}
2591
2592static u32 gfx_v8_0_create_bitmask(u32 bit_width)
 
2593{
2594	return (u32)((1ULL << bit_width) - 1);
2595}
2596
2597static u32 gfx_v8_0_get_rb_active_bitmap(struct amdgpu_device *adev)
2598{
2599	u32 data, mask;
2600
2601	data = RREG32(mmCC_RB_BACKEND_DISABLE);
2602	data |= RREG32(mmGC_USER_RB_BACKEND_DISABLE);
2603
2604	data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
2605	data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
2606
2607	mask = gfx_v8_0_create_bitmask(adev->gfx.config.max_backends_per_se /
2608				       adev->gfx.config.max_sh_per_se);
2609
2610	return (~data) & mask;
2611}
2612
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2613static void gfx_v8_0_setup_rb(struct amdgpu_device *adev)
2614{
2615	int i, j;
2616	u32 data;
 
2617	u32 active_rbs = 0;
2618	u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
2619					adev->gfx.config.max_sh_per_se;
 
2620
2621	mutex_lock(&adev->grbm_idx_mutex);
2622	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
2623		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
2624			gfx_v8_0_select_se_sh(adev, i, j);
2625			data = gfx_v8_0_get_rb_active_bitmap(adev);
2626			active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
2627					       rb_bitmap_width_per_sh);
2628		}
2629	}
2630	gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
2631	mutex_unlock(&adev->grbm_idx_mutex);
2632
2633	adev->gfx.config.backend_enable_mask = active_rbs;
2634	adev->gfx.config.num_rbs = hweight32(active_rbs);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2635}
2636
2637/**
2638 * gfx_v8_0_init_compute_vmid - gart enable
2639 *
2640 * @rdev: amdgpu_device pointer
2641 *
2642 * Initialize compute vmid sh_mem registers
2643 *
2644 */
2645#define DEFAULT_SH_MEM_BASES	(0x6000)
2646#define FIRST_COMPUTE_VMID	(8)
2647#define LAST_COMPUTE_VMID	(16)
2648static void gfx_v8_0_init_compute_vmid(struct amdgpu_device *adev)
2649{
2650	int i;
2651	uint32_t sh_mem_config;
2652	uint32_t sh_mem_bases;
2653
2654	/*
2655	 * Configure apertures:
2656	 * LDS:         0x60000000'00000000 - 0x60000001'00000000 (4GB)
2657	 * Scratch:     0x60000001'00000000 - 0x60000002'00000000 (4GB)
2658	 * GPUVM:       0x60010000'00000000 - 0x60020000'00000000 (1TB)
2659	 */
2660	sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
2661
2662	sh_mem_config = SH_MEM_ADDRESS_MODE_HSA64 <<
2663			SH_MEM_CONFIG__ADDRESS_MODE__SHIFT |
2664			SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
2665			SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT |
2666			MTYPE_CC << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT |
2667			SH_MEM_CONFIG__PRIVATE_ATC_MASK;
2668
2669	mutex_lock(&adev->srbm_mutex);
2670	for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
2671		vi_srbm_select(adev, 0, 0, 0, i);
2672		/* CP and shaders */
2673		WREG32(mmSH_MEM_CONFIG, sh_mem_config);
2674		WREG32(mmSH_MEM_APE1_BASE, 1);
2675		WREG32(mmSH_MEM_APE1_LIMIT, 0);
2676		WREG32(mmSH_MEM_BASES, sh_mem_bases);
2677	}
2678	vi_srbm_select(adev, 0, 0, 0, 0);
2679	mutex_unlock(&adev->srbm_mutex);
 
 
 
 
 
 
 
 
 
2680}
2681
2682static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
2683{
2684	u32 tmp;
2685	int i;
 
 
 
 
 
 
 
 
 
 
 
 
 
2686
2687	tmp = RREG32(mmGRBM_CNTL);
2688	tmp = REG_SET_FIELD(tmp, GRBM_CNTL, READ_TIMEOUT, 0xff);
2689	WREG32(mmGRBM_CNTL, tmp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2690
 
2691	WREG32(mmGB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
2692	WREG32(mmHDP_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
2693	WREG32(mmDMIF_ADDR_CALC, adev->gfx.config.gb_addr_config);
2694
2695	gfx_v8_0_tiling_mode_table_init(adev);
2696
2697	gfx_v8_0_setup_rb(adev);
 
 
2698
2699	/* XXX SH_MEM regs */
2700	/* where to put LDS, scratch, GPUVM in FSA64 space */
 
 
 
 
 
 
 
 
2701	mutex_lock(&adev->srbm_mutex);
2702	for (i = 0; i < 16; i++) {
2703		vi_srbm_select(adev, 0, 0, 0, i);
2704		/* CP and shaders */
2705		if (i == 0) {
2706			tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, DEFAULT_MTYPE, MTYPE_UC);
2707			tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, APE1_MTYPE, MTYPE_UC);
2708			tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE,
2709					    SH_MEM_ALIGNMENT_MODE_UNALIGNED);
2710			WREG32(mmSH_MEM_CONFIG, tmp);
 
2711		} else {
2712			tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, DEFAULT_MTYPE, MTYPE_NC);
2713			tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, APE1_MTYPE, MTYPE_NC);
2714			tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE,
2715					    SH_MEM_ALIGNMENT_MODE_UNALIGNED);
2716			WREG32(mmSH_MEM_CONFIG, tmp);
 
 
2717		}
2718
2719		WREG32(mmSH_MEM_APE1_BASE, 1);
2720		WREG32(mmSH_MEM_APE1_LIMIT, 0);
2721		WREG32(mmSH_MEM_BASES, 0);
2722	}
2723	vi_srbm_select(adev, 0, 0, 0, 0);
2724	mutex_unlock(&adev->srbm_mutex);
2725
2726	gfx_v8_0_init_compute_vmid(adev);
 
2727
2728	mutex_lock(&adev->grbm_idx_mutex);
2729	/*
2730	 * making sure that the following register writes will be broadcasted
2731	 * to all the shaders
2732	 */
2733	gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
2734
2735	WREG32(mmPA_SC_FIFO_SIZE,
2736		   (adev->gfx.config.sc_prim_fifo_size_frontend <<
2737			PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) |
2738		   (adev->gfx.config.sc_prim_fifo_size_backend <<
2739			PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT) |
2740		   (adev->gfx.config.sc_hiz_tile_fifo_size <<
2741			PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) |
2742		   (adev->gfx.config.sc_earlyz_tile_fifo_size <<
2743			PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT));
 
 
 
 
 
 
 
 
2744	mutex_unlock(&adev->grbm_idx_mutex);
2745
2746}
2747
2748static void gfx_v8_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
2749{
2750	u32 i, j, k;
2751	u32 mask;
2752
2753	mutex_lock(&adev->grbm_idx_mutex);
2754	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
2755		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
2756			gfx_v8_0_select_se_sh(adev, i, j);
2757			for (k = 0; k < adev->usec_timeout; k++) {
2758				if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY) == 0)
2759					break;
2760				udelay(1);
2761			}
 
 
 
 
 
 
 
 
2762		}
2763	}
2764	gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
2765	mutex_unlock(&adev->grbm_idx_mutex);
2766
2767	mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
2768		RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
2769		RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
2770		RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
2771	for (k = 0; k < adev->usec_timeout; k++) {
2772		if ((RREG32(mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
2773			break;
2774		udelay(1);
2775	}
2776}
2777
2778static void gfx_v8_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
2779					       bool enable)
2780{
2781	u32 tmp = RREG32(mmCP_INT_CNTL_RING0);
2782
2783	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
2784	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
2785	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
2786	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0);
2787
2788	WREG32(mmCP_INT_CNTL_RING0, tmp);
2789}
2790
2791void gfx_v8_0_rlc_stop(struct amdgpu_device *adev)
2792{
2793	u32 tmp = RREG32(mmRLC_CNTL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2794
2795	tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 0);
2796	WREG32(mmRLC_CNTL, tmp);
 
 
2797
2798	gfx_v8_0_enable_gui_idle_interrupt(adev, false);
2799
2800	gfx_v8_0_wait_for_rlc_serdes(adev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2801}
2802
2803static void gfx_v8_0_rlc_reset(struct amdgpu_device *adev)
2804{
2805	u32 tmp = RREG32(mmGRBM_SOFT_RESET);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2806
2807	tmp = REG_SET_FIELD(tmp, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
2808	WREG32(mmGRBM_SOFT_RESET, tmp);
2809	udelay(50);
2810	tmp = REG_SET_FIELD(tmp, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
2811	WREG32(mmGRBM_SOFT_RESET, tmp);
2812	udelay(50);
2813}
2814
2815static void gfx_v8_0_rlc_start(struct amdgpu_device *adev)
 
 
 
 
 
2816{
2817	u32 tmp = RREG32(mmRLC_CNTL);
2818
2819	tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 1);
2820	WREG32(mmRLC_CNTL, tmp);
2821
2822	/* carrizo do enable cp interrupt after cp inited */
2823	if (!(adev->flags & AMD_IS_APU))
2824		gfx_v8_0_enable_gui_idle_interrupt(adev, true);
 
 
 
 
 
2825
2826	udelay(50);
2827}
2828
2829static int gfx_v8_0_rlc_load_microcode(struct amdgpu_device *adev)
 
2830{
2831	const struct rlc_firmware_header_v2_0 *hdr;
2832	const __le32 *fw_data;
2833	unsigned i, fw_size;
2834
2835	if (!adev->gfx.rlc_fw)
2836		return -EINVAL;
 
 
 
2837
2838	hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
2839	amdgpu_ucode_print_rlc_hdr(&hdr->header);
 
 
2840
2841	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
2842			   le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2843	fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
2844
2845	WREG32(mmRLC_GPM_UCODE_ADDR, 0);
2846	for (i = 0; i < fw_size; i++)
2847		WREG32(mmRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
2848	WREG32(mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
 
 
 
 
 
 
 
 
 
 
2849
2850	return 0;
2851}
2852
2853static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
2854{
2855	int r;
2856
2857	gfx_v8_0_rlc_stop(adev);
 
 
2858
2859	/* disable CG */
2860	WREG32(mmRLC_CGCG_CGLS_CTRL, 0);
 
 
2861
2862	/* disable PG */
2863	WREG32(mmRLC_PG_CNTL, 0);
 
2864
2865	gfx_v8_0_rlc_reset(adev);
2866
2867	if (!adev->pp_enabled) {
2868		if (!adev->firmware.smu_load) {
2869			/* legacy rlc firmware loading */
2870			r = gfx_v8_0_rlc_load_microcode(adev);
2871			if (r)
2872				return r;
2873		} else {
2874			r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
2875							AMDGPU_UCODE_ID_RLC_G);
2876			if (r)
2877				return -EINVAL;
2878		}
 
 
2879	}
2880
2881	gfx_v8_0_rlc_start(adev);
 
 
 
2882
2883	return 0;
2884}
2885
2886static void gfx_v8_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
2887{
2888	int i;
2889	u32 tmp = RREG32(mmCP_ME_CNTL);
2890
2891	if (enable) {
2892		tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 0);
2893		tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 0);
2894		tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 0);
2895	} else {
2896		tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 1);
2897		tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1);
2898		tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1);
2899		for (i = 0; i < adev->gfx.num_gfx_rings; i++)
2900			adev->gfx.gfx_ring[i].ready = false;
2901	}
2902	WREG32(mmCP_ME_CNTL, tmp);
2903	udelay(50);
2904}
2905
2906static int gfx_v8_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
2907{
2908	const struct gfx_firmware_header_v1_0 *pfp_hdr;
2909	const struct gfx_firmware_header_v1_0 *ce_hdr;
2910	const struct gfx_firmware_header_v1_0 *me_hdr;
2911	const __le32 *fw_data;
2912	unsigned i, fw_size;
2913
2914	if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
2915		return -EINVAL;
2916
2917	pfp_hdr = (const struct gfx_firmware_header_v1_0 *)
2918		adev->gfx.pfp_fw->data;
2919	ce_hdr = (const struct gfx_firmware_header_v1_0 *)
2920		adev->gfx.ce_fw->data;
2921	me_hdr = (const struct gfx_firmware_header_v1_0 *)
2922		adev->gfx.me_fw->data;
2923
2924	amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
2925	amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
2926	amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
2927
2928	gfx_v8_0_cp_gfx_enable(adev, false);
2929
2930	/* PFP */
2931	fw_data = (const __le32 *)
2932		(adev->gfx.pfp_fw->data +
2933		 le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
2934	fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
2935	WREG32(mmCP_PFP_UCODE_ADDR, 0);
2936	for (i = 0; i < fw_size; i++)
2937		WREG32(mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
2938	WREG32(mmCP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version);
2939
2940	/* CE */
2941	fw_data = (const __le32 *)
2942		(adev->gfx.ce_fw->data +
2943		 le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
2944	fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
2945	WREG32(mmCP_CE_UCODE_ADDR, 0);
2946	for (i = 0; i < fw_size; i++)
2947		WREG32(mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
2948	WREG32(mmCP_CE_UCODE_ADDR, adev->gfx.ce_fw_version);
2949
2950	/* ME */
2951	fw_data = (const __le32 *)
2952		(adev->gfx.me_fw->data +
2953		 le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
2954	fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
2955	WREG32(mmCP_ME_RAM_WADDR, 0);
2956	for (i = 0; i < fw_size; i++)
2957		WREG32(mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++));
2958	WREG32(mmCP_ME_RAM_WADDR, adev->gfx.me_fw_version);
2959
2960	return 0;
2961}
2962
2963static u32 gfx_v8_0_get_csb_size(struct amdgpu_device *adev)
2964{
2965	u32 count = 0;
2966	const struct cs_section_def *sect = NULL;
2967	const struct cs_extent_def *ext = NULL;
2968
2969	/* begin clear state */
2970	count += 2;
2971	/* context control state */
2972	count += 3;
2973
2974	for (sect = vi_cs_data; sect->section != NULL; ++sect) {
2975		for (ext = sect->section; ext->extent != NULL; ++ext) {
2976			if (sect->id == SECT_CONTEXT)
2977				count += 2 + ext->reg_count;
2978			else
2979				return 0;
2980		}
2981	}
2982	/* pa_sc_raster_config/pa_sc_raster_config1 */
2983	count += 4;
2984	/* end clear state */
2985	count += 2;
2986	/* clear state */
2987	count += 2;
2988
2989	return count;
2990}
2991
2992static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev)
2993{
2994	struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
2995	const struct cs_section_def *sect = NULL;
2996	const struct cs_extent_def *ext = NULL;
2997	int r, i;
2998
2999	/* init the CP */
3000	WREG32(mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1);
3001	WREG32(mmCP_ENDIAN_SWAP, 0);
3002	WREG32(mmCP_DEVICE_ID, 1);
3003
3004	gfx_v8_0_cp_gfx_enable(adev, true);
3005
3006	r = amdgpu_ring_alloc(ring, gfx_v8_0_get_csb_size(adev) + 4);
3007	if (r) {
3008		DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
3009		return r;
3010	}
3011
3012	/* clear state buffer */
3013	amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3014	amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
3015
3016	amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
3017	amdgpu_ring_write(ring, 0x80000000);
3018	amdgpu_ring_write(ring, 0x80000000);
3019
3020	for (sect = vi_cs_data; sect->section != NULL; ++sect) {
3021		for (ext = sect->section; ext->extent != NULL; ++ext) {
3022			if (sect->id == SECT_CONTEXT) {
3023				amdgpu_ring_write(ring,
3024				       PACKET3(PACKET3_SET_CONTEXT_REG,
3025					       ext->reg_count));
3026				amdgpu_ring_write(ring,
3027				       ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
3028				for (i = 0; i < ext->reg_count; i++)
3029					amdgpu_ring_write(ring, ext->extent[i]);
3030			}
3031		}
3032	}
3033
3034	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
3035	amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
3036	switch (adev->asic_type) {
3037	case CHIP_TONGA:
3038		amdgpu_ring_write(ring, 0x16000012);
3039		amdgpu_ring_write(ring, 0x0000002A);
3040		break;
3041	case CHIP_FIJI:
3042		amdgpu_ring_write(ring, 0x3a00161a);
3043		amdgpu_ring_write(ring, 0x0000002e);
3044		break;
3045	case CHIP_TOPAZ:
3046	case CHIP_CARRIZO:
3047		amdgpu_ring_write(ring, 0x00000002);
3048		amdgpu_ring_write(ring, 0x00000000);
3049		break;
3050	case CHIP_STONEY:
3051		amdgpu_ring_write(ring, 0x00000000);
3052		amdgpu_ring_write(ring, 0x00000000);
3053		break;
3054	default:
3055		BUG();
3056	}
3057
3058	amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3059	amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
3060
3061	amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
3062	amdgpu_ring_write(ring, 0);
3063
3064	/* init the CE partitions */
3065	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
3066	amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
3067	amdgpu_ring_write(ring, 0x8000);
3068	amdgpu_ring_write(ring, 0x8000);
3069
3070	amdgpu_ring_commit(ring);
3071
3072	return 0;
3073}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3074
3075static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
3076{
3077	struct amdgpu_ring *ring;
3078	u32 tmp;
3079	u32 rb_bufsz;
3080	u64 rb_addr, rptr_addr;
3081	int r;
3082
3083	/* Set the write pointer delay */
3084	WREG32(mmCP_RB_WPTR_DELAY, 0);
3085
3086	/* set the RB to use vmid 0 */
3087	WREG32(mmCP_RB_VMID, 0);
3088
3089	/* Set ring buffer size */
3090	ring = &adev->gfx.gfx_ring[0];
3091	rb_bufsz = order_base_2(ring->ring_size / 8);
3092	tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz);
3093	tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2);
3094	tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, MTYPE, 3);
3095	tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, MIN_IB_AVAILSZ, 1);
3096#ifdef __BIG_ENDIAN
3097	tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, BUF_SWAP, 1);
3098#endif
3099	WREG32(mmCP_RB0_CNTL, tmp);
3100
3101	/* Initialize the ring buffer's read and write pointers */
3102	WREG32(mmCP_RB0_CNTL, tmp | CP_RB0_CNTL__RB_RPTR_WR_ENA_MASK);
3103	ring->wptr = 0;
3104	WREG32(mmCP_RB0_WPTR, ring->wptr);
3105
3106	/* set the wb address wether it's enabled or not */
3107	rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
3108	WREG32(mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
3109	WREG32(mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
3110
 
 
 
3111	mdelay(1);
3112	WREG32(mmCP_RB0_CNTL, tmp);
3113
3114	rb_addr = ring->gpu_addr >> 8;
3115	WREG32(mmCP_RB0_BASE, rb_addr);
3116	WREG32(mmCP_RB0_BASE_HI, upper_32_bits(rb_addr));
3117
3118	/* no gfx doorbells on iceland */
3119	if (adev->asic_type != CHIP_TOPAZ) {
3120		tmp = RREG32(mmCP_RB_DOORBELL_CONTROL);
3121		if (ring->use_doorbell) {
3122			tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3123					    DOORBELL_OFFSET, ring->doorbell_index);
3124			tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3125					    DOORBELL_EN, 1);
3126		} else {
3127			tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3128					    DOORBELL_EN, 0);
3129		}
3130		WREG32(mmCP_RB_DOORBELL_CONTROL, tmp);
3131
3132		if (adev->asic_type == CHIP_TONGA) {
3133			tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
3134					    DOORBELL_RANGE_LOWER,
3135					    AMDGPU_DOORBELL_GFX_RING0);
3136			WREG32(mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
3137
3138			WREG32(mmCP_RB_DOORBELL_RANGE_UPPER,
3139			       CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK);
3140		}
3141
3142	}
3143
3144	/* start the ring */
 
3145	gfx_v8_0_cp_gfx_start(adev);
3146	ring->ready = true;
3147	r = amdgpu_ring_test_ring(ring);
3148	if (r) {
3149		ring->ready = false;
3150		return r;
3151	}
3152
3153	return 0;
3154}
3155
3156static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
3157{
3158	int i;
3159
3160	if (enable) {
3161		WREG32(mmCP_MEC_CNTL, 0);
3162	} else {
3163		WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
3164		for (i = 0; i < adev->gfx.num_compute_rings; i++)
3165			adev->gfx.compute_ring[i].ready = false;
3166	}
3167	udelay(50);
3168}
3169
3170static int gfx_v8_0_cp_compute_load_microcode(struct amdgpu_device *adev)
 
3171{
3172	const struct gfx_firmware_header_v1_0 *mec_hdr;
3173	const __le32 *fw_data;
3174	unsigned i, fw_size;
3175
3176	if (!adev->gfx.mec_fw)
3177		return -EINVAL;
3178
3179	gfx_v8_0_cp_compute_enable(adev, false);
 
 
 
 
 
 
 
3180
3181	mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
3182	amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
 
 
 
3183
3184	fw_data = (const __le32 *)
3185		(adev->gfx.mec_fw->data +
3186		 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
3187	fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4;
3188
3189	/* MEC1 */
3190	WREG32(mmCP_MEC_ME1_UCODE_ADDR, 0);
3191	for (i = 0; i < fw_size; i++)
3192		WREG32(mmCP_MEC_ME1_UCODE_DATA, le32_to_cpup(fw_data+i));
3193	WREG32(mmCP_MEC_ME1_UCODE_ADDR, adev->gfx.mec_fw_version);
 
3194
3195	/* Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */
3196	if (adev->gfx.mec2_fw) {
3197		const struct gfx_firmware_header_v1_0 *mec2_hdr;
3198
3199		mec2_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
3200		amdgpu_ucode_print_gfx_hdr(&mec2_hdr->header);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3201
3202		fw_data = (const __le32 *)
3203			(adev->gfx.mec2_fw->data +
3204			 le32_to_cpu(mec2_hdr->header.ucode_array_offset_bytes));
3205		fw_size = le32_to_cpu(mec2_hdr->header.ucode_size_bytes) / 4;
3206
3207		WREG32(mmCP_MEC_ME2_UCODE_ADDR, 0);
3208		for (i = 0; i < fw_size; i++)
3209			WREG32(mmCP_MEC_ME2_UCODE_DATA, le32_to_cpup(fw_data+i));
3210		WREG32(mmCP_MEC_ME2_UCODE_ADDR, adev->gfx.mec2_fw_version);
 
 
 
 
 
3211	}
3212
 
 
3213	return 0;
3214}
3215
3216struct vi_mqd {
3217	uint32_t header;  /* ordinal0 */
3218	uint32_t compute_dispatch_initiator;  /* ordinal1 */
3219	uint32_t compute_dim_x;  /* ordinal2 */
3220	uint32_t compute_dim_y;  /* ordinal3 */
3221	uint32_t compute_dim_z;  /* ordinal4 */
3222	uint32_t compute_start_x;  /* ordinal5 */
3223	uint32_t compute_start_y;  /* ordinal6 */
3224	uint32_t compute_start_z;  /* ordinal7 */
3225	uint32_t compute_num_thread_x;  /* ordinal8 */
3226	uint32_t compute_num_thread_y;  /* ordinal9 */
3227	uint32_t compute_num_thread_z;  /* ordinal10 */
3228	uint32_t compute_pipelinestat_enable;  /* ordinal11 */
3229	uint32_t compute_perfcount_enable;  /* ordinal12 */
3230	uint32_t compute_pgm_lo;  /* ordinal13 */
3231	uint32_t compute_pgm_hi;  /* ordinal14 */
3232	uint32_t compute_tba_lo;  /* ordinal15 */
3233	uint32_t compute_tba_hi;  /* ordinal16 */
3234	uint32_t compute_tma_lo;  /* ordinal17 */
3235	uint32_t compute_tma_hi;  /* ordinal18 */
3236	uint32_t compute_pgm_rsrc1;  /* ordinal19 */
3237	uint32_t compute_pgm_rsrc2;  /* ordinal20 */
3238	uint32_t compute_vmid;  /* ordinal21 */
3239	uint32_t compute_resource_limits;  /* ordinal22 */
3240	uint32_t compute_static_thread_mgmt_se0;  /* ordinal23 */
3241	uint32_t compute_static_thread_mgmt_se1;  /* ordinal24 */
3242	uint32_t compute_tmpring_size;  /* ordinal25 */
3243	uint32_t compute_static_thread_mgmt_se2;  /* ordinal26 */
3244	uint32_t compute_static_thread_mgmt_se3;  /* ordinal27 */
3245	uint32_t compute_restart_x;  /* ordinal28 */
3246	uint32_t compute_restart_y;  /* ordinal29 */
3247	uint32_t compute_restart_z;  /* ordinal30 */
3248	uint32_t compute_thread_trace_enable;  /* ordinal31 */
3249	uint32_t compute_misc_reserved;  /* ordinal32 */
3250	uint32_t compute_dispatch_id;  /* ordinal33 */
3251	uint32_t compute_threadgroup_id;  /* ordinal34 */
3252	uint32_t compute_relaunch;  /* ordinal35 */
3253	uint32_t compute_wave_restore_addr_lo;  /* ordinal36 */
3254	uint32_t compute_wave_restore_addr_hi;  /* ordinal37 */
3255	uint32_t compute_wave_restore_control;  /* ordinal38 */
3256	uint32_t reserved9;  /* ordinal39 */
3257	uint32_t reserved10;  /* ordinal40 */
3258	uint32_t reserved11;  /* ordinal41 */
3259	uint32_t reserved12;  /* ordinal42 */
3260	uint32_t reserved13;  /* ordinal43 */
3261	uint32_t reserved14;  /* ordinal44 */
3262	uint32_t reserved15;  /* ordinal45 */
3263	uint32_t reserved16;  /* ordinal46 */
3264	uint32_t reserved17;  /* ordinal47 */
3265	uint32_t reserved18;  /* ordinal48 */
3266	uint32_t reserved19;  /* ordinal49 */
3267	uint32_t reserved20;  /* ordinal50 */
3268	uint32_t reserved21;  /* ordinal51 */
3269	uint32_t reserved22;  /* ordinal52 */
3270	uint32_t reserved23;  /* ordinal53 */
3271	uint32_t reserved24;  /* ordinal54 */
3272	uint32_t reserved25;  /* ordinal55 */
3273	uint32_t reserved26;  /* ordinal56 */
3274	uint32_t reserved27;  /* ordinal57 */
3275	uint32_t reserved28;  /* ordinal58 */
3276	uint32_t reserved29;  /* ordinal59 */
3277	uint32_t reserved30;  /* ordinal60 */
3278	uint32_t reserved31;  /* ordinal61 */
3279	uint32_t reserved32;  /* ordinal62 */
3280	uint32_t reserved33;  /* ordinal63 */
3281	uint32_t reserved34;  /* ordinal64 */
3282	uint32_t compute_user_data_0;  /* ordinal65 */
3283	uint32_t compute_user_data_1;  /* ordinal66 */
3284	uint32_t compute_user_data_2;  /* ordinal67 */
3285	uint32_t compute_user_data_3;  /* ordinal68 */
3286	uint32_t compute_user_data_4;  /* ordinal69 */
3287	uint32_t compute_user_data_5;  /* ordinal70 */
3288	uint32_t compute_user_data_6;  /* ordinal71 */
3289	uint32_t compute_user_data_7;  /* ordinal72 */
3290	uint32_t compute_user_data_8;  /* ordinal73 */
3291	uint32_t compute_user_data_9;  /* ordinal74 */
3292	uint32_t compute_user_data_10;  /* ordinal75 */
3293	uint32_t compute_user_data_11;  /* ordinal76 */
3294	uint32_t compute_user_data_12;  /* ordinal77 */
3295	uint32_t compute_user_data_13;  /* ordinal78 */
3296	uint32_t compute_user_data_14;  /* ordinal79 */
3297	uint32_t compute_user_data_15;  /* ordinal80 */
3298	uint32_t cp_compute_csinvoc_count_lo;  /* ordinal81 */
3299	uint32_t cp_compute_csinvoc_count_hi;  /* ordinal82 */
3300	uint32_t reserved35;  /* ordinal83 */
3301	uint32_t reserved36;  /* ordinal84 */
3302	uint32_t reserved37;  /* ordinal85 */
3303	uint32_t cp_mqd_query_time_lo;  /* ordinal86 */
3304	uint32_t cp_mqd_query_time_hi;  /* ordinal87 */
3305	uint32_t cp_mqd_connect_start_time_lo;  /* ordinal88 */
3306	uint32_t cp_mqd_connect_start_time_hi;  /* ordinal89 */
3307	uint32_t cp_mqd_connect_end_time_lo;  /* ordinal90 */
3308	uint32_t cp_mqd_connect_end_time_hi;  /* ordinal91 */
3309	uint32_t cp_mqd_connect_end_wf_count;  /* ordinal92 */
3310	uint32_t cp_mqd_connect_end_pq_rptr;  /* ordinal93 */
3311	uint32_t cp_mqd_connect_end_pq_wptr;  /* ordinal94 */
3312	uint32_t cp_mqd_connect_end_ib_rptr;  /* ordinal95 */
3313	uint32_t reserved38;  /* ordinal96 */
3314	uint32_t reserved39;  /* ordinal97 */
3315	uint32_t cp_mqd_save_start_time_lo;  /* ordinal98 */
3316	uint32_t cp_mqd_save_start_time_hi;  /* ordinal99 */
3317	uint32_t cp_mqd_save_end_time_lo;  /* ordinal100 */
3318	uint32_t cp_mqd_save_end_time_hi;  /* ordinal101 */
3319	uint32_t cp_mqd_restore_start_time_lo;  /* ordinal102 */
3320	uint32_t cp_mqd_restore_start_time_hi;  /* ordinal103 */
3321	uint32_t cp_mqd_restore_end_time_lo;  /* ordinal104 */
3322	uint32_t cp_mqd_restore_end_time_hi;  /* ordinal105 */
3323	uint32_t reserved40;  /* ordinal106 */
3324	uint32_t reserved41;  /* ordinal107 */
3325	uint32_t gds_cs_ctxsw_cnt0;  /* ordinal108 */
3326	uint32_t gds_cs_ctxsw_cnt1;  /* ordinal109 */
3327	uint32_t gds_cs_ctxsw_cnt2;  /* ordinal110 */
3328	uint32_t gds_cs_ctxsw_cnt3;  /* ordinal111 */
3329	uint32_t reserved42;  /* ordinal112 */
3330	uint32_t reserved43;  /* ordinal113 */
3331	uint32_t cp_pq_exe_status_lo;  /* ordinal114 */
3332	uint32_t cp_pq_exe_status_hi;  /* ordinal115 */
3333	uint32_t cp_packet_id_lo;  /* ordinal116 */
3334	uint32_t cp_packet_id_hi;  /* ordinal117 */
3335	uint32_t cp_packet_exe_status_lo;  /* ordinal118 */
3336	uint32_t cp_packet_exe_status_hi;  /* ordinal119 */
3337	uint32_t gds_save_base_addr_lo;  /* ordinal120 */
3338	uint32_t gds_save_base_addr_hi;  /* ordinal121 */
3339	uint32_t gds_save_mask_lo;  /* ordinal122 */
3340	uint32_t gds_save_mask_hi;  /* ordinal123 */
3341	uint32_t ctx_save_base_addr_lo;  /* ordinal124 */
3342	uint32_t ctx_save_base_addr_hi;  /* ordinal125 */
3343	uint32_t reserved44;  /* ordinal126 */
3344	uint32_t reserved45;  /* ordinal127 */
3345	uint32_t cp_mqd_base_addr_lo;  /* ordinal128 */
3346	uint32_t cp_mqd_base_addr_hi;  /* ordinal129 */
3347	uint32_t cp_hqd_active;  /* ordinal130 */
3348	uint32_t cp_hqd_vmid;  /* ordinal131 */
3349	uint32_t cp_hqd_persistent_state;  /* ordinal132 */
3350	uint32_t cp_hqd_pipe_priority;  /* ordinal133 */
3351	uint32_t cp_hqd_queue_priority;  /* ordinal134 */
3352	uint32_t cp_hqd_quantum;  /* ordinal135 */
3353	uint32_t cp_hqd_pq_base_lo;  /* ordinal136 */
3354	uint32_t cp_hqd_pq_base_hi;  /* ordinal137 */
3355	uint32_t cp_hqd_pq_rptr;  /* ordinal138 */
3356	uint32_t cp_hqd_pq_rptr_report_addr_lo;  /* ordinal139 */
3357	uint32_t cp_hqd_pq_rptr_report_addr_hi;  /* ordinal140 */
3358	uint32_t cp_hqd_pq_wptr_poll_addr;  /* ordinal141 */
3359	uint32_t cp_hqd_pq_wptr_poll_addr_hi;  /* ordinal142 */
3360	uint32_t cp_hqd_pq_doorbell_control;  /* ordinal143 */
3361	uint32_t cp_hqd_pq_wptr;  /* ordinal144 */
3362	uint32_t cp_hqd_pq_control;  /* ordinal145 */
3363	uint32_t cp_hqd_ib_base_addr_lo;  /* ordinal146 */
3364	uint32_t cp_hqd_ib_base_addr_hi;  /* ordinal147 */
3365	uint32_t cp_hqd_ib_rptr;  /* ordinal148 */
3366	uint32_t cp_hqd_ib_control;  /* ordinal149 */
3367	uint32_t cp_hqd_iq_timer;  /* ordinal150 */
3368	uint32_t cp_hqd_iq_rptr;  /* ordinal151 */
3369	uint32_t cp_hqd_dequeue_request;  /* ordinal152 */
3370	uint32_t cp_hqd_dma_offload;  /* ordinal153 */
3371	uint32_t cp_hqd_sema_cmd;  /* ordinal154 */
3372	uint32_t cp_hqd_msg_type;  /* ordinal155 */
3373	uint32_t cp_hqd_atomic0_preop_lo;  /* ordinal156 */
3374	uint32_t cp_hqd_atomic0_preop_hi;  /* ordinal157 */
3375	uint32_t cp_hqd_atomic1_preop_lo;  /* ordinal158 */
3376	uint32_t cp_hqd_atomic1_preop_hi;  /* ordinal159 */
3377	uint32_t cp_hqd_hq_status0;  /* ordinal160 */
3378	uint32_t cp_hqd_hq_control0;  /* ordinal161 */
3379	uint32_t cp_mqd_control;  /* ordinal162 */
3380	uint32_t cp_hqd_hq_status1;  /* ordinal163 */
3381	uint32_t cp_hqd_hq_control1;  /* ordinal164 */
3382	uint32_t cp_hqd_eop_base_addr_lo;  /* ordinal165 */
3383	uint32_t cp_hqd_eop_base_addr_hi;  /* ordinal166 */
3384	uint32_t cp_hqd_eop_control;  /* ordinal167 */
3385	uint32_t cp_hqd_eop_rptr;  /* ordinal168 */
3386	uint32_t cp_hqd_eop_wptr;  /* ordinal169 */
3387	uint32_t cp_hqd_eop_done_events;  /* ordinal170 */
3388	uint32_t cp_hqd_ctx_save_base_addr_lo;  /* ordinal171 */
3389	uint32_t cp_hqd_ctx_save_base_addr_hi;  /* ordinal172 */
3390	uint32_t cp_hqd_ctx_save_control;  /* ordinal173 */
3391	uint32_t cp_hqd_cntl_stack_offset;  /* ordinal174 */
3392	uint32_t cp_hqd_cntl_stack_size;  /* ordinal175 */
3393	uint32_t cp_hqd_wg_state_offset;  /* ordinal176 */
3394	uint32_t cp_hqd_ctx_save_size;  /* ordinal177 */
3395	uint32_t cp_hqd_gds_resource_state;  /* ordinal178 */
3396	uint32_t cp_hqd_error;  /* ordinal179 */
3397	uint32_t cp_hqd_eop_wptr_mem;  /* ordinal180 */
3398	uint32_t cp_hqd_eop_dones;  /* ordinal181 */
3399	uint32_t reserved46;  /* ordinal182 */
3400	uint32_t reserved47;  /* ordinal183 */
3401	uint32_t reserved48;  /* ordinal184 */
3402	uint32_t reserved49;  /* ordinal185 */
3403	uint32_t reserved50;  /* ordinal186 */
3404	uint32_t reserved51;  /* ordinal187 */
3405	uint32_t reserved52;  /* ordinal188 */
3406	uint32_t reserved53;  /* ordinal189 */
3407	uint32_t reserved54;  /* ordinal190 */
3408	uint32_t reserved55;  /* ordinal191 */
3409	uint32_t iqtimer_pkt_header;  /* ordinal192 */
3410	uint32_t iqtimer_pkt_dw0;  /* ordinal193 */
3411	uint32_t iqtimer_pkt_dw1;  /* ordinal194 */
3412	uint32_t iqtimer_pkt_dw2;  /* ordinal195 */
3413	uint32_t iqtimer_pkt_dw3;  /* ordinal196 */
3414	uint32_t iqtimer_pkt_dw4;  /* ordinal197 */
3415	uint32_t iqtimer_pkt_dw5;  /* ordinal198 */
3416	uint32_t iqtimer_pkt_dw6;  /* ordinal199 */
3417	uint32_t iqtimer_pkt_dw7;  /* ordinal200 */
3418	uint32_t iqtimer_pkt_dw8;  /* ordinal201 */
3419	uint32_t iqtimer_pkt_dw9;  /* ordinal202 */
3420	uint32_t iqtimer_pkt_dw10;  /* ordinal203 */
3421	uint32_t iqtimer_pkt_dw11;  /* ordinal204 */
3422	uint32_t iqtimer_pkt_dw12;  /* ordinal205 */
3423	uint32_t iqtimer_pkt_dw13;  /* ordinal206 */
3424	uint32_t iqtimer_pkt_dw14;  /* ordinal207 */
3425	uint32_t iqtimer_pkt_dw15;  /* ordinal208 */
3426	uint32_t iqtimer_pkt_dw16;  /* ordinal209 */
3427	uint32_t iqtimer_pkt_dw17;  /* ordinal210 */
3428	uint32_t iqtimer_pkt_dw18;  /* ordinal211 */
3429	uint32_t iqtimer_pkt_dw19;  /* ordinal212 */
3430	uint32_t iqtimer_pkt_dw20;  /* ordinal213 */
3431	uint32_t iqtimer_pkt_dw21;  /* ordinal214 */
3432	uint32_t iqtimer_pkt_dw22;  /* ordinal215 */
3433	uint32_t iqtimer_pkt_dw23;  /* ordinal216 */
3434	uint32_t iqtimer_pkt_dw24;  /* ordinal217 */
3435	uint32_t iqtimer_pkt_dw25;  /* ordinal218 */
3436	uint32_t iqtimer_pkt_dw26;  /* ordinal219 */
3437	uint32_t iqtimer_pkt_dw27;  /* ordinal220 */
3438	uint32_t iqtimer_pkt_dw28;  /* ordinal221 */
3439	uint32_t iqtimer_pkt_dw29;  /* ordinal222 */
3440	uint32_t iqtimer_pkt_dw30;  /* ordinal223 */
3441	uint32_t iqtimer_pkt_dw31;  /* ordinal224 */
3442	uint32_t reserved56;  /* ordinal225 */
3443	uint32_t reserved57;  /* ordinal226 */
3444	uint32_t reserved58;  /* ordinal227 */
3445	uint32_t set_resources_header;  /* ordinal228 */
3446	uint32_t set_resources_dw1;  /* ordinal229 */
3447	uint32_t set_resources_dw2;  /* ordinal230 */
3448	uint32_t set_resources_dw3;  /* ordinal231 */
3449	uint32_t set_resources_dw4;  /* ordinal232 */
3450	uint32_t set_resources_dw5;  /* ordinal233 */
3451	uint32_t set_resources_dw6;  /* ordinal234 */
3452	uint32_t set_resources_dw7;  /* ordinal235 */
3453	uint32_t reserved59;  /* ordinal236 */
3454	uint32_t reserved60;  /* ordinal237 */
3455	uint32_t reserved61;  /* ordinal238 */
3456	uint32_t reserved62;  /* ordinal239 */
3457	uint32_t reserved63;  /* ordinal240 */
3458	uint32_t reserved64;  /* ordinal241 */
3459	uint32_t reserved65;  /* ordinal242 */
3460	uint32_t reserved66;  /* ordinal243 */
3461	uint32_t reserved67;  /* ordinal244 */
3462	uint32_t reserved68;  /* ordinal245 */
3463	uint32_t reserved69;  /* ordinal246 */
3464	uint32_t reserved70;  /* ordinal247 */
3465	uint32_t reserved71;  /* ordinal248 */
3466	uint32_t reserved72;  /* ordinal249 */
3467	uint32_t reserved73;  /* ordinal250 */
3468	uint32_t reserved74;  /* ordinal251 */
3469	uint32_t reserved75;  /* ordinal252 */
3470	uint32_t reserved76;  /* ordinal253 */
3471	uint32_t reserved77;  /* ordinal254 */
3472	uint32_t reserved78;  /* ordinal255 */
3473
3474	uint32_t reserved_t[256]; /* Reserve 256 dword buffer used by ucode */
3475};
3476
3477static void gfx_v8_0_cp_compute_fini(struct amdgpu_device *adev)
3478{
3479	int i, r;
3480
3481	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3482		struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
 
 
 
 
 
 
 
 
 
 
 
3483
3484		if (ring->mqd_obj) {
3485			r = amdgpu_bo_reserve(ring->mqd_obj, false);
3486			if (unlikely(r != 0))
3487				dev_warn(adev->dev, "(%d) reserve MQD bo failed\n", r);
3488
3489			amdgpu_bo_unpin(ring->mqd_obj);
3490			amdgpu_bo_unreserve(ring->mqd_obj);
 
3491
3492			amdgpu_bo_unref(&ring->mqd_obj);
3493			ring->mqd_obj = NULL;
 
 
 
3494		}
3495	}
3496}
3497
3498static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev)
3499{
3500	int r, i, j;
3501	u32 tmp;
3502	bool use_doorbell = true;
3503	u64 hqd_gpu_addr;
3504	u64 mqd_gpu_addr;
3505	u64 eop_gpu_addr;
3506	u64 wb_gpu_addr;
3507	u32 *buf;
3508	struct vi_mqd *mqd;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3509
3510	/* init the pipes */
3511	mutex_lock(&adev->srbm_mutex);
3512	for (i = 0; i < (adev->gfx.mec.num_pipe * adev->gfx.mec.num_mec); i++) {
3513		int me = (i < 4) ? 1 : 2;
3514		int pipe = (i < 4) ? i : (i - 4);
3515
3516		eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + (i * MEC_HPD_SIZE);
3517		eop_gpu_addr >>= 8;
3518
3519		vi_srbm_select(adev, me, pipe, 0, 0);
3520
3521		/* write the EOP addr */
3522		WREG32(mmCP_HQD_EOP_BASE_ADDR, eop_gpu_addr);
3523		WREG32(mmCP_HQD_EOP_BASE_ADDR_HI, upper_32_bits(eop_gpu_addr));
3524
3525		/* set the VMID assigned */
3526		WREG32(mmCP_HQD_VMID, 0);
3527
3528		/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
3529		tmp = RREG32(mmCP_HQD_EOP_CONTROL);
3530		tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
3531				    (order_base_2(MEC_HPD_SIZE / 4) - 1));
3532		WREG32(mmCP_HQD_EOP_CONTROL, tmp);
3533	}
3534	vi_srbm_select(adev, 0, 0, 0, 0);
3535	mutex_unlock(&adev->srbm_mutex);
3536
3537	/* init the queues.  Just two for now. */
3538	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3539		struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
3540
3541		if (ring->mqd_obj == NULL) {
3542			r = amdgpu_bo_create(adev,
3543					     sizeof(struct vi_mqd),
3544					     PAGE_SIZE, true,
3545					     AMDGPU_GEM_DOMAIN_GTT, 0, NULL,
3546					     NULL, &ring->mqd_obj);
3547			if (r) {
3548				dev_warn(adev->dev, "(%d) create MQD bo failed\n", r);
3549				return r;
3550			}
3551		}
3552
3553		r = amdgpu_bo_reserve(ring->mqd_obj, false);
3554		if (unlikely(r != 0)) {
3555			gfx_v8_0_cp_compute_fini(adev);
3556			return r;
3557		}
3558		r = amdgpu_bo_pin(ring->mqd_obj, AMDGPU_GEM_DOMAIN_GTT,
3559				  &mqd_gpu_addr);
3560		if (r) {
3561			dev_warn(adev->dev, "(%d) pin MQD bo failed\n", r);
3562			gfx_v8_0_cp_compute_fini(adev);
3563			return r;
3564		}
3565		r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&buf);
3566		if (r) {
3567			dev_warn(adev->dev, "(%d) map MQD bo failed\n", r);
3568			gfx_v8_0_cp_compute_fini(adev);
3569			return r;
3570		}
3571
3572		/* init the mqd struct */
3573		memset(buf, 0, sizeof(struct vi_mqd));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3574
3575		mqd = (struct vi_mqd *)buf;
3576		mqd->header = 0xC0310800;
3577		mqd->compute_pipelinestat_enable = 0x00000001;
3578		mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
3579		mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
3580		mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
3581		mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
3582		mqd->compute_misc_reserved = 0x00000003;
3583
3584		mutex_lock(&adev->srbm_mutex);
3585		vi_srbm_select(adev, ring->me,
3586			       ring->pipe,
3587			       ring->queue, 0);
3588
3589		/* disable wptr polling */
3590		tmp = RREG32(mmCP_PQ_WPTR_POLL_CNTL);
3591		tmp = REG_SET_FIELD(tmp, CP_PQ_WPTR_POLL_CNTL, EN, 0);
3592		WREG32(mmCP_PQ_WPTR_POLL_CNTL, tmp);
3593
3594		mqd->cp_hqd_eop_base_addr_lo =
3595			RREG32(mmCP_HQD_EOP_BASE_ADDR);
3596		mqd->cp_hqd_eop_base_addr_hi =
3597			RREG32(mmCP_HQD_EOP_BASE_ADDR_HI);
3598
3599		/* enable doorbell? */
3600		tmp = RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
3601		if (use_doorbell) {
3602			tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
3603		} else {
3604			tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 0);
3605		}
3606		WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, tmp);
3607		mqd->cp_hqd_pq_doorbell_control = tmp;
3608
3609		/* disable the queue if it's active */
3610		mqd->cp_hqd_dequeue_request = 0;
3611		mqd->cp_hqd_pq_rptr = 0;
3612		mqd->cp_hqd_pq_wptr= 0;
3613		if (RREG32(mmCP_HQD_ACTIVE) & 1) {
3614			WREG32(mmCP_HQD_DEQUEUE_REQUEST, 1);
3615			for (j = 0; j < adev->usec_timeout; j++) {
3616				if (!(RREG32(mmCP_HQD_ACTIVE) & 1))
3617					break;
3618				udelay(1);
3619			}
3620			WREG32(mmCP_HQD_DEQUEUE_REQUEST, mqd->cp_hqd_dequeue_request);
3621			WREG32(mmCP_HQD_PQ_RPTR, mqd->cp_hqd_pq_rptr);
3622			WREG32(mmCP_HQD_PQ_WPTR, mqd->cp_hqd_pq_wptr);
3623		}
3624
3625		/* set the pointer to the MQD */
3626		mqd->cp_mqd_base_addr_lo = mqd_gpu_addr & 0xfffffffc;
3627		mqd->cp_mqd_base_addr_hi = upper_32_bits(mqd_gpu_addr);
3628		WREG32(mmCP_MQD_BASE_ADDR, mqd->cp_mqd_base_addr_lo);
3629		WREG32(mmCP_MQD_BASE_ADDR_HI, mqd->cp_mqd_base_addr_hi);
3630
3631		/* set MQD vmid to 0 */
3632		tmp = RREG32(mmCP_MQD_CONTROL);
3633		tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
3634		WREG32(mmCP_MQD_CONTROL, tmp);
3635		mqd->cp_mqd_control = tmp;
3636
3637		/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
3638		hqd_gpu_addr = ring->gpu_addr >> 8;
3639		mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
3640		mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
3641		WREG32(mmCP_HQD_PQ_BASE, mqd->cp_hqd_pq_base_lo);
3642		WREG32(mmCP_HQD_PQ_BASE_HI, mqd->cp_hqd_pq_base_hi);
3643
3644		/* set up the HQD, this is similar to CP_RB0_CNTL */
3645		tmp = RREG32(mmCP_HQD_PQ_CONTROL);
3646		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
3647				    (order_base_2(ring->ring_size / 4) - 1));
3648		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
3649			       ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
3650#ifdef __BIG_ENDIAN
3651		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
3652#endif
3653		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
3654		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0);
3655		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
3656		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
3657		WREG32(mmCP_HQD_PQ_CONTROL, tmp);
3658		mqd->cp_hqd_pq_control = tmp;
3659
3660		/* set the wb address wether it's enabled or not */
3661		wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
3662		mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
3663		mqd->cp_hqd_pq_rptr_report_addr_hi =
3664			upper_32_bits(wb_gpu_addr) & 0xffff;
3665		WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR,
3666		       mqd->cp_hqd_pq_rptr_report_addr_lo);
3667		WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
3668		       mqd->cp_hqd_pq_rptr_report_addr_hi);
3669
3670		/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
3671		wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
3672		mqd->cp_hqd_pq_wptr_poll_addr = wb_gpu_addr & 0xfffffffc;
3673		mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
3674		WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR, mqd->cp_hqd_pq_wptr_poll_addr);
3675		WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
3676		       mqd->cp_hqd_pq_wptr_poll_addr_hi);
3677
3678		/* enable the doorbell if requested */
3679		if (use_doorbell) {
3680			if ((adev->asic_type == CHIP_CARRIZO) ||
3681			    (adev->asic_type == CHIP_FIJI) ||
3682			    (adev->asic_type == CHIP_STONEY)) {
3683				WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER,
3684				       AMDGPU_DOORBELL_KIQ << 2);
3685				WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER,
3686				       AMDGPU_DOORBELL_MEC_RING7 << 2);
3687			}
3688			tmp = RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
3689			tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3690					    DOORBELL_OFFSET, ring->doorbell_index);
3691			tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
3692			tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_SOURCE, 0);
3693			tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_HIT, 0);
3694			mqd->cp_hqd_pq_doorbell_control = tmp;
3695
3696		} else {
3697			mqd->cp_hqd_pq_doorbell_control = 0;
3698		}
3699		WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL,
3700		       mqd->cp_hqd_pq_doorbell_control);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3701
3702		/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3703		ring->wptr = 0;
3704		mqd->cp_hqd_pq_wptr = ring->wptr;
3705		WREG32(mmCP_HQD_PQ_WPTR, mqd->cp_hqd_pq_wptr);
3706		mqd->cp_hqd_pq_rptr = RREG32(mmCP_HQD_PQ_RPTR);
3707
3708		/* set the vmid for the queue */
3709		mqd->cp_hqd_vmid = 0;
3710		WREG32(mmCP_HQD_VMID, mqd->cp_hqd_vmid);
3711
3712		tmp = RREG32(mmCP_HQD_PERSISTENT_STATE);
3713		tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
3714		WREG32(mmCP_HQD_PERSISTENT_STATE, tmp);
3715		mqd->cp_hqd_persistent_state = tmp;
3716		if (adev->asic_type == CHIP_STONEY) {
3717			tmp = RREG32(mmCP_ME1_PIPE3_INT_CNTL);
3718			tmp = REG_SET_FIELD(tmp, CP_ME1_PIPE3_INT_CNTL, GENERIC2_INT_ENABLE, 1);
3719			WREG32(mmCP_ME1_PIPE3_INT_CNTL, tmp);
3720		}
3721
3722		/* activate the queue */
3723		mqd->cp_hqd_active = 1;
3724		WREG32(mmCP_HQD_ACTIVE, mqd->cp_hqd_active);
 
 
 
 
 
 
 
 
 
3725
 
 
 
 
 
 
 
3726		vi_srbm_select(adev, 0, 0, 0, 0);
3727		mutex_unlock(&adev->srbm_mutex);
3728
3729		amdgpu_bo_kunmap(ring->mqd_obj);
3730		amdgpu_bo_unreserve(ring->mqd_obj);
 
 
 
 
 
 
 
 
 
3731	}
 
 
3732
3733	if (use_doorbell) {
3734		tmp = RREG32(mmCP_PQ_STATUS);
3735		tmp = REG_SET_FIELD(tmp, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
3736		WREG32(mmCP_PQ_STATUS, tmp);
 
3737	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3738
3739	gfx_v8_0_cp_compute_enable(adev, true);
3740
3741	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3742		struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
3743
3744		ring->ready = true;
3745		r = amdgpu_ring_test_ring(ring);
 
 
 
 
 
 
 
 
3746		if (r)
3747			ring->ready = false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3748	}
3749
3750	return 0;
3751}
3752
3753static int gfx_v8_0_cp_resume(struct amdgpu_device *adev)
3754{
3755	int r;
3756
3757	if (!(adev->flags & AMD_IS_APU))
3758		gfx_v8_0_enable_gui_idle_interrupt(adev, false);
3759
3760	if (!adev->pp_enabled) {
3761		if (!adev->firmware.smu_load) {
3762			/* legacy firmware loading */
3763			r = gfx_v8_0_cp_gfx_load_microcode(adev);
3764			if (r)
3765				return r;
3766
3767			r = gfx_v8_0_cp_compute_load_microcode(adev);
3768			if (r)
3769				return r;
3770		} else {
3771			r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
3772							AMDGPU_UCODE_ID_CP_CE);
3773			if (r)
3774				return -EINVAL;
3775
3776			r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
3777							AMDGPU_UCODE_ID_CP_PFP);
3778			if (r)
3779				return -EINVAL;
3780
3781			r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
3782							AMDGPU_UCODE_ID_CP_ME);
3783			if (r)
3784				return -EINVAL;
3785
3786			if (adev->asic_type == CHIP_TOPAZ) {
3787				r = gfx_v8_0_cp_compute_load_microcode(adev);
3788				if (r)
3789					return r;
3790			} else {
3791				r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
3792										 AMDGPU_UCODE_ID_CP_MEC1);
3793				if (r)
3794					return -EINVAL;
3795			}
3796		}
3797	}
3798
3799	r = gfx_v8_0_cp_gfx_resume(adev);
3800	if (r)
3801		return r;
3802
3803	r = gfx_v8_0_cp_compute_resume(adev);
 
 
 
 
3804	if (r)
3805		return r;
3806
3807	gfx_v8_0_enable_gui_idle_interrupt(adev, true);
3808
3809	return 0;
3810}
3811
3812static void gfx_v8_0_cp_enable(struct amdgpu_device *adev, bool enable)
3813{
3814	gfx_v8_0_cp_gfx_enable(adev, enable);
3815	gfx_v8_0_cp_compute_enable(adev, enable);
3816}
3817
3818static int gfx_v8_0_hw_init(void *handle)
3819{
3820	int r;
3821	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3822
3823	gfx_v8_0_init_golden_registers(adev);
 
3824
3825	gfx_v8_0_gpu_init(adev);
3826
3827	r = gfx_v8_0_rlc_resume(adev);
3828	if (r)
3829		return r;
3830
3831	r = gfx_v8_0_cp_resume(adev);
3832	if (r)
3833		return r;
3834
3835	return r;
3836}
3837
3838static int gfx_v8_0_hw_fini(void *handle)
3839{
3840	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
3841
3842	amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
3843	amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
3844	gfx_v8_0_cp_enable(adev, false);
3845	gfx_v8_0_rlc_stop(adev);
3846	gfx_v8_0_cp_compute_fini(adev);
3847
3848	return 0;
3849}
3850
3851static int gfx_v8_0_suspend(void *handle)
3852{
3853	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 
 
 
 
 
 
 
 
 
 
3854
3855	return gfx_v8_0_hw_fini(adev);
3856}
3857
3858static int gfx_v8_0_resume(void *handle)
3859{
3860	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3861
3862	return gfx_v8_0_hw_init(adev);
 
 
 
 
3863}
3864
3865static bool gfx_v8_0_is_idle(void *handle)
3866{
3867	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3868
3869	if (REG_GET_FIELD(RREG32(mmGRBM_STATUS), GRBM_STATUS, GUI_ACTIVE))
3870		return false;
3871	else
3872		return true;
3873}
3874
3875static int gfx_v8_0_wait_for_idle(void *handle)
3876{
3877	unsigned i;
3878	u32 tmp;
3879	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3880
3881	for (i = 0; i < adev->usec_timeout; i++) {
3882		/* read MC_STATUS */
3883		tmp = RREG32(mmGRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK;
3884
3885		if (!REG_GET_FIELD(tmp, GRBM_STATUS, GUI_ACTIVE))
3886			return 0;
 
3887		udelay(1);
3888	}
3889	return -ETIMEDOUT;
3890}
3891
3892static void gfx_v8_0_print_status(void *handle)
3893{
3894	int i;
3895	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3896
3897	dev_info(adev->dev, "GFX 8.x registers\n");
3898	dev_info(adev->dev, "  GRBM_STATUS=0x%08X\n",
3899		 RREG32(mmGRBM_STATUS));
3900	dev_info(adev->dev, "  GRBM_STATUS2=0x%08X\n",
3901		 RREG32(mmGRBM_STATUS2));
3902	dev_info(adev->dev, "  GRBM_STATUS_SE0=0x%08X\n",
3903		 RREG32(mmGRBM_STATUS_SE0));
3904	dev_info(adev->dev, "  GRBM_STATUS_SE1=0x%08X\n",
3905		 RREG32(mmGRBM_STATUS_SE1));
3906	dev_info(adev->dev, "  GRBM_STATUS_SE2=0x%08X\n",
3907		 RREG32(mmGRBM_STATUS_SE2));
3908	dev_info(adev->dev, "  GRBM_STATUS_SE3=0x%08X\n",
3909		 RREG32(mmGRBM_STATUS_SE3));
3910	dev_info(adev->dev, "  CP_STAT = 0x%08x\n", RREG32(mmCP_STAT));
3911	dev_info(adev->dev, "  CP_STALLED_STAT1 = 0x%08x\n",
3912		 RREG32(mmCP_STALLED_STAT1));
3913	dev_info(adev->dev, "  CP_STALLED_STAT2 = 0x%08x\n",
3914		 RREG32(mmCP_STALLED_STAT2));
3915	dev_info(adev->dev, "  CP_STALLED_STAT3 = 0x%08x\n",
3916		 RREG32(mmCP_STALLED_STAT3));
3917	dev_info(adev->dev, "  CP_CPF_BUSY_STAT = 0x%08x\n",
3918		 RREG32(mmCP_CPF_BUSY_STAT));
3919	dev_info(adev->dev, "  CP_CPF_STALLED_STAT1 = 0x%08x\n",
3920		 RREG32(mmCP_CPF_STALLED_STAT1));
3921	dev_info(adev->dev, "  CP_CPF_STATUS = 0x%08x\n", RREG32(mmCP_CPF_STATUS));
3922	dev_info(adev->dev, "  CP_CPC_BUSY_STAT = 0x%08x\n", RREG32(mmCP_CPC_BUSY_STAT));
3923	dev_info(adev->dev, "  CP_CPC_STALLED_STAT1 = 0x%08x\n",
3924		 RREG32(mmCP_CPC_STALLED_STAT1));
3925	dev_info(adev->dev, "  CP_CPC_STATUS = 0x%08x\n", RREG32(mmCP_CPC_STATUS));
3926
3927	for (i = 0; i < 32; i++) {
3928		dev_info(adev->dev, "  GB_TILE_MODE%d=0x%08X\n",
3929			 i, RREG32(mmGB_TILE_MODE0 + (i * 4)));
3930	}
3931	for (i = 0; i < 16; i++) {
3932		dev_info(adev->dev, "  GB_MACROTILE_MODE%d=0x%08X\n",
3933			 i, RREG32(mmGB_MACROTILE_MODE0 + (i * 4)));
3934	}
3935	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
3936		dev_info(adev->dev, "  se: %d\n", i);
3937		gfx_v8_0_select_se_sh(adev, i, 0xffffffff);
3938		dev_info(adev->dev, "  PA_SC_RASTER_CONFIG=0x%08X\n",
3939			 RREG32(mmPA_SC_RASTER_CONFIG));
3940		dev_info(adev->dev, "  PA_SC_RASTER_CONFIG_1=0x%08X\n",
3941			 RREG32(mmPA_SC_RASTER_CONFIG_1));
3942	}
3943	gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
3944
3945	dev_info(adev->dev, "  GB_ADDR_CONFIG=0x%08X\n",
3946		 RREG32(mmGB_ADDR_CONFIG));
3947	dev_info(adev->dev, "  HDP_ADDR_CONFIG=0x%08X\n",
3948		 RREG32(mmHDP_ADDR_CONFIG));
3949	dev_info(adev->dev, "  DMIF_ADDR_CALC=0x%08X\n",
3950		 RREG32(mmDMIF_ADDR_CALC));
3951
3952	dev_info(adev->dev, "  CP_MEQ_THRESHOLDS=0x%08X\n",
3953		 RREG32(mmCP_MEQ_THRESHOLDS));
3954	dev_info(adev->dev, "  SX_DEBUG_1=0x%08X\n",
3955		 RREG32(mmSX_DEBUG_1));
3956	dev_info(adev->dev, "  TA_CNTL_AUX=0x%08X\n",
3957		 RREG32(mmTA_CNTL_AUX));
3958	dev_info(adev->dev, "  SPI_CONFIG_CNTL=0x%08X\n",
3959		 RREG32(mmSPI_CONFIG_CNTL));
3960	dev_info(adev->dev, "  SQ_CONFIG=0x%08X\n",
3961		 RREG32(mmSQ_CONFIG));
3962	dev_info(adev->dev, "  DB_DEBUG=0x%08X\n",
3963		 RREG32(mmDB_DEBUG));
3964	dev_info(adev->dev, "  DB_DEBUG2=0x%08X\n",
3965		 RREG32(mmDB_DEBUG2));
3966	dev_info(adev->dev, "  DB_DEBUG3=0x%08X\n",
3967		 RREG32(mmDB_DEBUG3));
3968	dev_info(adev->dev, "  CB_HW_CONTROL=0x%08X\n",
3969		 RREG32(mmCB_HW_CONTROL));
3970	dev_info(adev->dev, "  SPI_CONFIG_CNTL_1=0x%08X\n",
3971		 RREG32(mmSPI_CONFIG_CNTL_1));
3972	dev_info(adev->dev, "  PA_SC_FIFO_SIZE=0x%08X\n",
3973		 RREG32(mmPA_SC_FIFO_SIZE));
3974	dev_info(adev->dev, "  VGT_NUM_INSTANCES=0x%08X\n",
3975		 RREG32(mmVGT_NUM_INSTANCES));
3976	dev_info(adev->dev, "  CP_PERFMON_CNTL=0x%08X\n",
3977		 RREG32(mmCP_PERFMON_CNTL));
3978	dev_info(adev->dev, "  PA_SC_FORCE_EOV_MAX_CNTS=0x%08X\n",
3979		 RREG32(mmPA_SC_FORCE_EOV_MAX_CNTS));
3980	dev_info(adev->dev, "  VGT_CACHE_INVALIDATION=0x%08X\n",
3981		 RREG32(mmVGT_CACHE_INVALIDATION));
3982	dev_info(adev->dev, "  VGT_GS_VERTEX_REUSE=0x%08X\n",
3983		 RREG32(mmVGT_GS_VERTEX_REUSE));
3984	dev_info(adev->dev, "  PA_SC_LINE_STIPPLE_STATE=0x%08X\n",
3985		 RREG32(mmPA_SC_LINE_STIPPLE_STATE));
3986	dev_info(adev->dev, "  PA_CL_ENHANCE=0x%08X\n",
3987		 RREG32(mmPA_CL_ENHANCE));
3988	dev_info(adev->dev, "  PA_SC_ENHANCE=0x%08X\n",
3989		 RREG32(mmPA_SC_ENHANCE));
3990
3991	dev_info(adev->dev, "  CP_ME_CNTL=0x%08X\n",
3992		 RREG32(mmCP_ME_CNTL));
3993	dev_info(adev->dev, "  CP_MAX_CONTEXT=0x%08X\n",
3994		 RREG32(mmCP_MAX_CONTEXT));
3995	dev_info(adev->dev, "  CP_ENDIAN_SWAP=0x%08X\n",
3996		 RREG32(mmCP_ENDIAN_SWAP));
3997	dev_info(adev->dev, "  CP_DEVICE_ID=0x%08X\n",
3998		 RREG32(mmCP_DEVICE_ID));
3999
4000	dev_info(adev->dev, "  CP_SEM_WAIT_TIMER=0x%08X\n",
4001		 RREG32(mmCP_SEM_WAIT_TIMER));
4002
4003	dev_info(adev->dev, "  CP_RB_WPTR_DELAY=0x%08X\n",
4004		 RREG32(mmCP_RB_WPTR_DELAY));
4005	dev_info(adev->dev, "  CP_RB_VMID=0x%08X\n",
4006		 RREG32(mmCP_RB_VMID));
4007	dev_info(adev->dev, "  CP_RB0_CNTL=0x%08X\n",
4008		 RREG32(mmCP_RB0_CNTL));
4009	dev_info(adev->dev, "  CP_RB0_WPTR=0x%08X\n",
4010		 RREG32(mmCP_RB0_WPTR));
4011	dev_info(adev->dev, "  CP_RB0_RPTR_ADDR=0x%08X\n",
4012		 RREG32(mmCP_RB0_RPTR_ADDR));
4013	dev_info(adev->dev, "  CP_RB0_RPTR_ADDR_HI=0x%08X\n",
4014		 RREG32(mmCP_RB0_RPTR_ADDR_HI));
4015	dev_info(adev->dev, "  CP_RB0_CNTL=0x%08X\n",
4016		 RREG32(mmCP_RB0_CNTL));
4017	dev_info(adev->dev, "  CP_RB0_BASE=0x%08X\n",
4018		 RREG32(mmCP_RB0_BASE));
4019	dev_info(adev->dev, "  CP_RB0_BASE_HI=0x%08X\n",
4020		 RREG32(mmCP_RB0_BASE_HI));
4021	dev_info(adev->dev, "  CP_MEC_CNTL=0x%08X\n",
4022		 RREG32(mmCP_MEC_CNTL));
4023	dev_info(adev->dev, "  CP_CPF_DEBUG=0x%08X\n",
4024		 RREG32(mmCP_CPF_DEBUG));
4025
4026	dev_info(adev->dev, "  SCRATCH_ADDR=0x%08X\n",
4027		 RREG32(mmSCRATCH_ADDR));
4028	dev_info(adev->dev, "  SCRATCH_UMSK=0x%08X\n",
4029		 RREG32(mmSCRATCH_UMSK));
4030
4031	dev_info(adev->dev, "  CP_INT_CNTL_RING0=0x%08X\n",
4032		 RREG32(mmCP_INT_CNTL_RING0));
4033	dev_info(adev->dev, "  RLC_LB_CNTL=0x%08X\n",
4034		 RREG32(mmRLC_LB_CNTL));
4035	dev_info(adev->dev, "  RLC_CNTL=0x%08X\n",
4036		 RREG32(mmRLC_CNTL));
4037	dev_info(adev->dev, "  RLC_CGCG_CGLS_CTRL=0x%08X\n",
4038		 RREG32(mmRLC_CGCG_CGLS_CTRL));
4039	dev_info(adev->dev, "  RLC_LB_CNTR_INIT=0x%08X\n",
4040		 RREG32(mmRLC_LB_CNTR_INIT));
4041	dev_info(adev->dev, "  RLC_LB_CNTR_MAX=0x%08X\n",
4042		 RREG32(mmRLC_LB_CNTR_MAX));
4043	dev_info(adev->dev, "  RLC_LB_INIT_CU_MASK=0x%08X\n",
4044		 RREG32(mmRLC_LB_INIT_CU_MASK));
4045	dev_info(adev->dev, "  RLC_LB_PARAMS=0x%08X\n",
4046		 RREG32(mmRLC_LB_PARAMS));
4047	dev_info(adev->dev, "  RLC_LB_CNTL=0x%08X\n",
4048		 RREG32(mmRLC_LB_CNTL));
4049	dev_info(adev->dev, "  RLC_MC_CNTL=0x%08X\n",
4050		 RREG32(mmRLC_MC_CNTL));
4051	dev_info(adev->dev, "  RLC_UCODE_CNTL=0x%08X\n",
4052		 RREG32(mmRLC_UCODE_CNTL));
4053
4054	mutex_lock(&adev->srbm_mutex);
4055	for (i = 0; i < 16; i++) {
4056		vi_srbm_select(adev, 0, 0, 0, i);
4057		dev_info(adev->dev, "  VM %d:\n", i);
4058		dev_info(adev->dev, "  SH_MEM_CONFIG=0x%08X\n",
4059			 RREG32(mmSH_MEM_CONFIG));
4060		dev_info(adev->dev, "  SH_MEM_APE1_BASE=0x%08X\n",
4061			 RREG32(mmSH_MEM_APE1_BASE));
4062		dev_info(adev->dev, "  SH_MEM_APE1_LIMIT=0x%08X\n",
4063			 RREG32(mmSH_MEM_APE1_LIMIT));
4064		dev_info(adev->dev, "  SH_MEM_BASES=0x%08X\n",
4065			 RREG32(mmSH_MEM_BASES));
 
 
 
 
 
4066	}
4067	vi_srbm_select(adev, 0, 0, 0, 0);
4068	mutex_unlock(&adev->srbm_mutex);
 
 
 
 
 
 
 
 
 
 
4069}
4070
4071static int gfx_v8_0_soft_reset(void *handle)
4072{
 
 
 
 
 
 
 
 
 
 
 
4073	u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
4074	u32 tmp;
4075	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4076
4077	/* GRBM_STATUS */
4078	tmp = RREG32(mmGRBM_STATUS);
4079	if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
4080		   GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
4081		   GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
4082		   GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
4083		   GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
4084		   GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) {
 
4085		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4086						GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
4087		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4088						GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
4089	}
4090
4091	if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
4092		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4093						GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
4094		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
4095						SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1);
4096	}
4097
4098	/* GRBM_STATUS2 */
4099	tmp = RREG32(mmGRBM_STATUS2);
4100	if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
4101		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4102						GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
4103
 
 
 
 
 
 
 
 
 
 
 
 
 
4104	/* SRBM_STATUS */
4105	tmp = RREG32(mmSRBM_STATUS);
4106	if (REG_GET_FIELD(tmp, SRBM_STATUS, GRBM_RQ_PENDING))
4107		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
4108						SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1);
 
 
 
4109
4110	if (grbm_soft_reset || srbm_soft_reset) {
4111		gfx_v8_0_print_status((void *)adev);
4112		/* stop the rlc */
4113		gfx_v8_0_rlc_stop(adev);
 
 
 
 
 
 
 
 
 
 
 
4114
 
 
 
 
 
 
 
 
 
 
 
4115		/* Disable GFX parsing/prefetching */
4116		gfx_v8_0_cp_gfx_enable(adev, false);
4117
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4118		/* Disable MEC parsing/prefetching */
4119		gfx_v8_0_cp_compute_enable(adev, false);
 
4120
4121		if (grbm_soft_reset || srbm_soft_reset) {
4122			tmp = RREG32(mmGMCON_DEBUG);
4123			tmp = REG_SET_FIELD(tmp,
4124					    GMCON_DEBUG, GFX_STALL, 1);
4125			tmp = REG_SET_FIELD(tmp,
4126					    GMCON_DEBUG, GFX_CLEAR, 1);
4127			WREG32(mmGMCON_DEBUG, tmp);
4128
4129			udelay(50);
4130		}
 
 
 
4131
4132		if (grbm_soft_reset) {
4133			tmp = RREG32(mmGRBM_SOFT_RESET);
4134			tmp |= grbm_soft_reset;
4135			dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
4136			WREG32(mmGRBM_SOFT_RESET, tmp);
4137			tmp = RREG32(mmGRBM_SOFT_RESET);
4138
4139			udelay(50);
 
4140
4141			tmp &= ~grbm_soft_reset;
4142			WREG32(mmGRBM_SOFT_RESET, tmp);
4143			tmp = RREG32(mmGRBM_SOFT_RESET);
4144		}
 
 
 
4145
4146		if (srbm_soft_reset) {
4147			tmp = RREG32(mmSRBM_SOFT_RESET);
4148			tmp |= srbm_soft_reset;
4149			dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
4150			WREG32(mmSRBM_SOFT_RESET, tmp);
4151			tmp = RREG32(mmSRBM_SOFT_RESET);
4152
4153			udelay(50);
4154
4155			tmp &= ~srbm_soft_reset;
4156			WREG32(mmSRBM_SOFT_RESET, tmp);
4157			tmp = RREG32(mmSRBM_SOFT_RESET);
4158		}
4159
4160		if (grbm_soft_reset || srbm_soft_reset) {
4161			tmp = RREG32(mmGMCON_DEBUG);
4162			tmp = REG_SET_FIELD(tmp,
4163					    GMCON_DEBUG, GFX_STALL, 0);
4164			tmp = REG_SET_FIELD(tmp,
4165					    GMCON_DEBUG, GFX_CLEAR, 0);
4166			WREG32(mmGMCON_DEBUG, tmp);
4167		}
4168
4169		/* Wait a little for things to settle down */
4170		udelay(50);
4171		gfx_v8_0_print_status((void *)adev);
 
 
 
4172	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4173	return 0;
4174}
4175
4176/**
4177 * gfx_v8_0_get_gpu_clock_counter - return GPU clock counter snapshot
4178 *
4179 * @adev: amdgpu_device pointer
4180 *
4181 * Fetches a GPU clock counter snapshot.
4182 * Returns the 64 bit clock counter snapshot.
4183 */
4184uint64_t gfx_v8_0_get_gpu_clock_counter(struct amdgpu_device *adev)
4185{
4186	uint64_t clock;
4187
4188	mutex_lock(&adev->gfx.gpu_clock_mutex);
4189	WREG32(mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
4190	clock = (uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_LSB) |
4191		((uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
4192	mutex_unlock(&adev->gfx.gpu_clock_mutex);
4193	return clock;
4194}
4195
4196static void gfx_v8_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
4197					  uint32_t vmid,
4198					  uint32_t gds_base, uint32_t gds_size,
4199					  uint32_t gws_base, uint32_t gws_size,
4200					  uint32_t oa_base, uint32_t oa_size)
4201{
4202	gds_base = gds_base >> AMDGPU_GDS_SHIFT;
4203	gds_size = gds_size >> AMDGPU_GDS_SHIFT;
4204
4205	gws_base = gws_base >> AMDGPU_GWS_SHIFT;
4206	gws_size = gws_size >> AMDGPU_GWS_SHIFT;
4207
4208	oa_base = oa_base >> AMDGPU_OA_SHIFT;
4209	oa_size = oa_size >> AMDGPU_OA_SHIFT;
4210
4211	/* GDS Base */
4212	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4213	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4214				WRITE_DATA_DST_SEL(0)));
4215	amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_base);
4216	amdgpu_ring_write(ring, 0);
4217	amdgpu_ring_write(ring, gds_base);
4218
4219	/* GDS Size */
4220	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4221	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4222				WRITE_DATA_DST_SEL(0)));
4223	amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_size);
4224	amdgpu_ring_write(ring, 0);
4225	amdgpu_ring_write(ring, gds_size);
4226
4227	/* GWS */
4228	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4229	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4230				WRITE_DATA_DST_SEL(0)));
4231	amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].gws);
4232	amdgpu_ring_write(ring, 0);
4233	amdgpu_ring_write(ring, gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
4234
4235	/* OA */
4236	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4237	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4238				WRITE_DATA_DST_SEL(0)));
4239	amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].oa);
4240	amdgpu_ring_write(ring, 0);
4241	amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));
4242}
4243
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4244static int gfx_v8_0_early_init(void *handle)
4245{
4246	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4247
4248	adev->gfx.num_gfx_rings = GFX8_NUM_GFX_RINGS;
4249	adev->gfx.num_compute_rings = GFX8_NUM_COMPUTE_RINGS;
 
4250	gfx_v8_0_set_ring_funcs(adev);
4251	gfx_v8_0_set_irq_funcs(adev);
4252	gfx_v8_0_set_gds_init(adev);
 
4253
4254	return 0;
4255}
4256
4257static int gfx_v8_0_late_init(void *handle)
4258{
4259	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4260	int r;
4261
4262	r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
4263	if (r)
4264		return r;
4265
4266	r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
4267	if (r)
4268		return r;
4269
4270	/* requires IBs so do in late init after IB pool is initialized */
4271	r = gfx_v8_0_do_edc_gpr_workarounds(adev);
4272	if (r)
4273		return r;
4274
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4275	return 0;
4276}
4277
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4278static int gfx_v8_0_set_powergating_state(void *handle,
4279					  enum amd_powergating_state state)
4280{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4281	return 0;
4282}
4283
4284static void fiji_send_serdes_cmd(struct amdgpu_device *adev,
4285		uint32_t reg_addr, uint32_t cmd)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4286{
4287	uint32_t data;
4288
4289	gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
4290
4291	WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
4292	WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
4293
4294	data = RREG32(mmRLC_SERDES_WR_CTRL);
4295	data &= ~(RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK |
4296			RLC_SERDES_WR_CTRL__READ_COMMAND_MASK |
4297			RLC_SERDES_WR_CTRL__P1_SELECT_MASK |
4298			RLC_SERDES_WR_CTRL__P2_SELECT_MASK |
4299			RLC_SERDES_WR_CTRL__RDDATA_RESET_MASK |
4300			RLC_SERDES_WR_CTRL__POWER_DOWN_MASK |
4301			RLC_SERDES_WR_CTRL__POWER_UP_MASK |
4302			RLC_SERDES_WR_CTRL__SHORT_FORMAT_MASK |
4303			RLC_SERDES_WR_CTRL__BPM_DATA_MASK |
4304			RLC_SERDES_WR_CTRL__REG_ADDR_MASK |
4305			RLC_SERDES_WR_CTRL__SRBM_OVERRIDE_MASK);
 
 
 
 
 
 
 
 
 
 
 
4306	data |= (RLC_SERDES_WR_CTRL__RSVD_BPM_ADDR_MASK |
4307			(cmd << RLC_SERDES_WR_CTRL__BPM_DATA__SHIFT) |
4308			(reg_addr << RLC_SERDES_WR_CTRL__REG_ADDR__SHIFT) |
4309			(0xff << RLC_SERDES_WR_CTRL__BPM_ADDR__SHIFT));
4310
4311	WREG32(mmRLC_SERDES_WR_CTRL, data);
4312}
4313
4314static void fiji_update_medium_grain_clock_gating(struct amdgpu_device *adev,
4315		bool enable)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4316{
4317	uint32_t temp, data;
4318
 
 
4319	/* It is disabled by HW by default */
4320	if (enable) {
4321		/* 1 - RLC memory Light sleep */
4322		temp = data = RREG32(mmRLC_MEM_SLP_CNTL);
4323		data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
4324		if (temp != data)
4325			WREG32(mmRLC_MEM_SLP_CNTL, data);
4326
4327		/* 2 - CP memory Light sleep */
4328		temp = data = RREG32(mmCP_MEM_SLP_CNTL);
4329		data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
4330		if (temp != data)
4331			WREG32(mmCP_MEM_SLP_CNTL, data);
4332
4333		/* 3 - RLC_CGTT_MGCG_OVERRIDE */
4334		temp = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
4335		data &= ~(RLC_CGTT_MGCG_OVERRIDE__CPF_MASK |
4336				RLC_CGTT_MGCG_OVERRIDE__RLC_MASK |
4337				RLC_CGTT_MGCG_OVERRIDE__MGCG_MASK |
4338				RLC_CGTT_MGCG_OVERRIDE__GRBM_MASK);
 
 
 
 
 
4339
4340		if (temp != data)
4341			WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data);
4342
4343		/* 4 - wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
4344		gfx_v8_0_wait_for_rlc_serdes(adev);
4345
4346		/* 5 - clear mgcg override */
4347		fiji_send_serdes_cmd(adev, BPM_REG_MGCG_OVERRIDE, CLE_BPM_SERDES_CMD);
4348
4349		/* 6 - Enable CGTS(Tree Shade) MGCG /MGLS */
4350		temp = data = RREG32(mmCGTS_SM_CTRL_REG);
4351		data &= ~(CGTS_SM_CTRL_REG__SM_MODE_MASK);
4352		data |= (0x2 << CGTS_SM_CTRL_REG__SM_MODE__SHIFT);
4353		data |= CGTS_SM_CTRL_REG__SM_MODE_ENABLE_MASK;
4354		data &= ~CGTS_SM_CTRL_REG__OVERRIDE_MASK;
4355		data &= ~CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK;
4356		data |= CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN_MASK;
4357		data |= (0x96 << CGTS_SM_CTRL_REG__ON_MONITOR_ADD__SHIFT);
4358		if (temp != data)
4359			WREG32(mmCGTS_SM_CTRL_REG, data);
 
 
 
 
4360		udelay(50);
4361
4362		/* 7 - wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
4363		gfx_v8_0_wait_for_rlc_serdes(adev);
4364	} else {
4365		/* 1 - MGCG_OVERRIDE[0] for CP and MGCG_OVERRIDE[1] for RLC */
4366		temp = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
4367		data |= (RLC_CGTT_MGCG_OVERRIDE__CPF_MASK |
4368				RLC_CGTT_MGCG_OVERRIDE__RLC_MASK |
4369				RLC_CGTT_MGCG_OVERRIDE__MGCG_MASK |
4370				RLC_CGTT_MGCG_OVERRIDE__GRBM_MASK);
4371		if (temp != data)
4372			WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data);
4373
4374		/* 2 - disable MGLS in RLC */
4375		data = RREG32(mmRLC_MEM_SLP_CNTL);
4376		if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
4377			data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
4378			WREG32(mmRLC_MEM_SLP_CNTL, data);
4379		}
4380
4381		/* 3 - disable MGLS in CP */
4382		data = RREG32(mmCP_MEM_SLP_CNTL);
4383		if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
4384			data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
4385			WREG32(mmCP_MEM_SLP_CNTL, data);
4386		}
4387
4388		/* 4 - Disable CGTS(Tree Shade) MGCG and MGLS */
4389		temp = data = RREG32(mmCGTS_SM_CTRL_REG);
4390		data |= (CGTS_SM_CTRL_REG__OVERRIDE_MASK |
4391				CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK);
4392		if (temp != data)
4393			WREG32(mmCGTS_SM_CTRL_REG, data);
4394
4395		/* 5 - wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
4396		gfx_v8_0_wait_for_rlc_serdes(adev);
4397
4398		/* 6 - set mgcg override */
4399		fiji_send_serdes_cmd(adev, BPM_REG_MGCG_OVERRIDE, SET_BPM_SERDES_CMD);
4400
4401		udelay(50);
4402
4403		/* 7- wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
4404		gfx_v8_0_wait_for_rlc_serdes(adev);
4405	}
 
 
4406}
4407
4408static void fiji_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
4409		bool enable)
4410{
4411	uint32_t temp, temp1, data, data1;
4412
4413	temp = data = RREG32(mmRLC_CGCG_CGLS_CTRL);
4414
4415	if (enable) {
4416		/* 1 enable cntx_empty_int_enable/cntx_busy_int_enable/
4417		 * Cmp_busy/GFX_Idle interrupts
4418		 */
4419		gfx_v8_0_enable_gui_idle_interrupt(adev, true);
4420
 
4421		temp1 = data1 =	RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
4422		data1 &= ~RLC_CGTT_MGCG_OVERRIDE__CGCG_MASK;
4423		if (temp1 != data1)
4424			WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data1);
4425
4426		/* 2 wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
4427		gfx_v8_0_wait_for_rlc_serdes(adev);
4428
4429		/* 3 - clear cgcg override */
4430		fiji_send_serdes_cmd(adev, BPM_REG_CGCG_OVERRIDE, CLE_BPM_SERDES_CMD);
4431
4432		/* wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
4433		gfx_v8_0_wait_for_rlc_serdes(adev);
4434
4435		/* 4 - write cmd to set CGLS */
4436		fiji_send_serdes_cmd(adev, BPM_REG_CGLS_EN, SET_BPM_SERDES_CMD);
4437
4438		/* 5 - enable cgcg */
4439		data |= RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
4440
4441		/* enable cgls*/
4442		data |= RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
 
4443
4444		temp1 = data1 =	RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
4445		data1 &= ~RLC_CGTT_MGCG_OVERRIDE__CGLS_MASK;
4446
4447		if (temp1 != data1)
4448			WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data1);
 
 
 
4449
4450		if (temp != data)
4451			WREG32(mmRLC_CGCG_CGLS_CTRL, data);
 
 
 
 
 
4452	} else {
4453		/* disable cntx_empty_int_enable & GFX Idle interrupt */
4454		gfx_v8_0_enable_gui_idle_interrupt(adev, false);
4455
4456		/* TEST CGCG */
4457		temp1 = data1 =	RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
4458		data1 |= (RLC_CGTT_MGCG_OVERRIDE__CGCG_MASK |
4459				RLC_CGTT_MGCG_OVERRIDE__CGLS_MASK);
4460		if (temp1 != data1)
4461			WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data1);
4462
4463		/* read gfx register to wake up cgcg */
4464		RREG32(mmCB_CGTT_SCLK_CTRL);
4465		RREG32(mmCB_CGTT_SCLK_CTRL);
4466		RREG32(mmCB_CGTT_SCLK_CTRL);
4467		RREG32(mmCB_CGTT_SCLK_CTRL);
4468
4469		/* wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
4470		gfx_v8_0_wait_for_rlc_serdes(adev);
4471
4472		/* write cmd to Set CGCG Overrride */
4473		fiji_send_serdes_cmd(adev, BPM_REG_CGCG_OVERRIDE, SET_BPM_SERDES_CMD);
4474
4475		/* wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
4476		gfx_v8_0_wait_for_rlc_serdes(adev);
4477
4478		/* write cmd to Clear CGLS */
4479		fiji_send_serdes_cmd(adev, BPM_REG_CGLS_EN, CLE_BPM_SERDES_CMD);
4480
4481		/* disable cgcg, cgls should be disabled too. */
4482		data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK |
4483				RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
4484		if (temp != data)
4485			WREG32(mmRLC_CGCG_CGLS_CTRL, data);
 
 
4486	}
 
 
 
 
4487}
4488static int fiji_update_gfx_clock_gating(struct amdgpu_device *adev,
4489		bool enable)
4490{
4491	if (enable) {
4492		/* CGCG/CGLS should be enabled after MGCG/MGLS/TS(CG/LS)
4493		 * ===  MGCG + MGLS + TS(CG/LS) ===
4494		 */
4495		fiji_update_medium_grain_clock_gating(adev, enable);
4496		fiji_update_coarse_grain_clock_gating(adev, enable);
4497	} else {
4498		/* CGCG/CGLS should be disabled before MGCG/MGLS/TS(CG/LS)
4499		 * ===  CGCG + CGLS ===
4500		 */
4501		fiji_update_coarse_grain_clock_gating(adev, enable);
4502		fiji_update_medium_grain_clock_gating(adev, enable);
4503	}
4504	return 0;
4505}
4506
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4507static int gfx_v8_0_set_clockgating_state(void *handle,
4508					  enum amd_clockgating_state state)
4509{
4510	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4511
 
 
 
4512	switch (adev->asic_type) {
4513	case CHIP_FIJI:
4514		fiji_update_gfx_clock_gating(adev,
4515				state == AMD_CG_STATE_GATE ? true : false);
 
 
 
 
 
 
 
 
 
 
 
4516		break;
4517	default:
4518		break;
4519	}
4520	return 0;
4521}
4522
4523static u32 gfx_v8_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
4524{
4525	u32 rptr;
4526
4527	rptr = ring->adev->wb.wb[ring->rptr_offs];
4528
4529	return rptr;
4530}
4531
4532static u32 gfx_v8_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
4533{
4534	struct amdgpu_device *adev = ring->adev;
4535	u32 wptr;
4536
4537	if (ring->use_doorbell)
4538		/* XXX check if swapping is necessary on BE */
4539		wptr = ring->adev->wb.wb[ring->wptr_offs];
4540	else
4541		wptr = RREG32(mmCP_RB0_WPTR);
4542
4543	return wptr;
4544}
4545
4546static void gfx_v8_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
4547{
4548	struct amdgpu_device *adev = ring->adev;
4549
4550	if (ring->use_doorbell) {
4551		/* XXX check if swapping is necessary on BE */
4552		adev->wb.wb[ring->wptr_offs] = ring->wptr;
4553		WDOORBELL32(ring->doorbell_index, ring->wptr);
4554	} else {
4555		WREG32(mmCP_RB0_WPTR, ring->wptr);
4556		(void)RREG32(mmCP_RB0_WPTR);
4557	}
4558}
4559
4560static void gfx_v8_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
4561{
4562	u32 ref_and_mask, reg_mem_engine;
4563
4564	if (ring->type == AMDGPU_RING_TYPE_COMPUTE) {
 
4565		switch (ring->me) {
4566		case 1:
4567			ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe;
4568			break;
4569		case 2:
4570			ref_and_mask = GPU_HDP_FLUSH_DONE__CP6_MASK << ring->pipe;
4571			break;
4572		default:
4573			return;
4574		}
4575		reg_mem_engine = 0;
4576	} else {
4577		ref_and_mask = GPU_HDP_FLUSH_DONE__CP0_MASK;
4578		reg_mem_engine = WAIT_REG_MEM_ENGINE(1); /* pfp */
4579	}
4580
4581	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
4582	amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(1) | /* write, wait, write */
4583				 WAIT_REG_MEM_FUNCTION(3) |  /* == */
4584				 reg_mem_engine));
4585	amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ);
4586	amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE);
4587	amdgpu_ring_write(ring, ref_and_mask);
4588	amdgpu_ring_write(ring, ref_and_mask);
4589	amdgpu_ring_write(ring, 0x20); /* poll interval */
4590}
4591
4592static void gfx_v8_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
4593{
4594	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4595	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4596				 WRITE_DATA_DST_SEL(0) |
4597				 WR_CONFIRM));
4598	amdgpu_ring_write(ring, mmHDP_DEBUG0);
4599	amdgpu_ring_write(ring, 0);
4600	amdgpu_ring_write(ring, 1);
4601
4602}
4603
4604static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
4605				  struct amdgpu_ib *ib)
 
 
4606{
4607	bool need_ctx_switch = ring->current_ctx != ib->ctx;
4608	u32 header, control = 0;
4609	u32 next_rptr = ring->wptr + 5;
4610
4611	/* drop the CE preamble IB for the same context */
4612	if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && !need_ctx_switch)
4613		return;
4614
4615	if (need_ctx_switch)
4616		next_rptr += 2;
4617
4618	next_rptr += 4;
4619	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4620	amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
4621	amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
4622	amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
4623	amdgpu_ring_write(ring, next_rptr);
4624
4625	/* insert SWITCH_BUFFER packet before first IB in the ring frame */
4626	if (need_ctx_switch) {
4627		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
4628		amdgpu_ring_write(ring, 0);
4629	}
4630
4631	if (ib->flags & AMDGPU_IB_FLAG_CE)
4632		header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
4633	else
4634		header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
4635
4636	control |= ib->length_dw | (ib->vm_id << 24);
 
 
 
 
 
 
 
4637
4638	amdgpu_ring_write(ring, header);
4639	amdgpu_ring_write(ring,
4640#ifdef __BIG_ENDIAN
4641			  (2 << 0) |
4642#endif
4643			  (ib->gpu_addr & 0xFFFFFFFC));
4644	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
4645	amdgpu_ring_write(ring, control);
4646}
4647
4648static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
4649				  struct amdgpu_ib *ib)
4650{
4651	u32 header, control = 0;
4652	u32 next_rptr = ring->wptr + 5;
4653
4654	control |= INDIRECT_BUFFER_VALID;
4655
4656	next_rptr += 4;
4657	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4658	amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
4659	amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
4660	amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
4661	amdgpu_ring_write(ring, next_rptr);
4662
4663	header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
4664
4665	control |= ib->length_dw | (ib->vm_id << 24);
 
 
 
 
 
4666
4667	amdgpu_ring_write(ring, header);
4668	amdgpu_ring_write(ring,
4669#ifdef __BIG_ENDIAN
4670					  (2 << 0) |
4671#endif
4672					  (ib->gpu_addr & 0xFFFFFFFC));
4673	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
4674	amdgpu_ring_write(ring, control);
4675}
4676
4677static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
4678					 u64 seq, unsigned flags)
4679{
4680	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
4681	bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
4682
4683	/* EVENT_WRITE_EOP - flush caches, send int */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4684	amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
4685	amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
4686				 EOP_TC_ACTION_EN |
 
4687				 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
4688				 EVENT_INDEX(5)));
4689	amdgpu_ring_write(ring, addr & 0xfffffffc);
4690	amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
4691			  DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
4692	amdgpu_ring_write(ring, lower_32_bits(seq));
4693	amdgpu_ring_write(ring, upper_32_bits(seq));
4694
4695}
4696
4697static void gfx_v8_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
4698{
4699	int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
4700	uint32_t seq = ring->fence_drv.sync_seq;
4701	uint64_t addr = ring->fence_drv.gpu_addr;
4702
4703	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
4704	amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
4705				 WAIT_REG_MEM_FUNCTION(3) | /* equal */
4706				 WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */
4707	amdgpu_ring_write(ring, addr & 0xfffffffc);
4708	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
4709	amdgpu_ring_write(ring, seq);
4710	amdgpu_ring_write(ring, 0xffffffff);
4711	amdgpu_ring_write(ring, 4); /* poll interval */
4712
4713	if (usepfp) {
4714		/* synce CE with ME to prevent CE fetch CEIB before context switch done */
4715		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
4716		amdgpu_ring_write(ring, 0);
4717		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
4718		amdgpu_ring_write(ring, 0);
4719	}
4720}
4721
4722static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
4723					unsigned vm_id, uint64_t pd_addr)
4724{
4725	int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
4726
4727	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4728	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
4729				 WRITE_DATA_DST_SEL(0)) |
4730				 WR_CONFIRM);
4731	if (vm_id < 8) {
4732		amdgpu_ring_write(ring,
4733				  (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
4734	} else {
4735		amdgpu_ring_write(ring,
4736				  (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8));
4737	}
4738	amdgpu_ring_write(ring, 0);
4739	amdgpu_ring_write(ring, pd_addr >> 12);
4740
4741	/* bits 0-15 are the VM contexts0-15 */
4742	/* invalidate the cache */
4743	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4744	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4745				 WRITE_DATA_DST_SEL(0)));
4746	amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
4747	amdgpu_ring_write(ring, 0);
4748	amdgpu_ring_write(ring, 1 << vm_id);
4749
4750	/* wait for the invalidate to complete */
4751	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
4752	amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */
4753				 WAIT_REG_MEM_FUNCTION(0) |  /* always */
4754				 WAIT_REG_MEM_ENGINE(0))); /* me */
4755	amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
4756	amdgpu_ring_write(ring, 0);
4757	amdgpu_ring_write(ring, 0); /* ref */
4758	amdgpu_ring_write(ring, 0); /* mask */
4759	amdgpu_ring_write(ring, 0x20); /* poll interval */
4760
4761	/* compute doesn't have PFP */
4762	if (usepfp) {
4763		/* sync PFP to ME, otherwise we might get invalid PFP reads */
4764		amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
4765		amdgpu_ring_write(ring, 0x0);
4766		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
4767		amdgpu_ring_write(ring, 0);
4768		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
4769		amdgpu_ring_write(ring, 0);
4770	}
4771}
4772
4773static u32 gfx_v8_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
4774{
4775	return ring->adev->wb.wb[ring->rptr_offs];
4776}
4777
4778static u32 gfx_v8_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
4779{
4780	return ring->adev->wb.wb[ring->wptr_offs];
4781}
4782
4783static void gfx_v8_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
4784{
4785	struct amdgpu_device *adev = ring->adev;
4786
4787	/* XXX check if swapping is necessary on BE */
4788	adev->wb.wb[ring->wptr_offs] = ring->wptr;
4789	WDOORBELL32(ring->doorbell_index, ring->wptr);
4790}
4791
4792static void gfx_v8_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
4793					     u64 addr, u64 seq,
4794					     unsigned flags)
4795{
4796	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
4797	bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
4798
4799	/* RELEASE_MEM - flush caches, send int */
4800	amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 5));
4801	amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
4802				 EOP_TC_ACTION_EN |
4803				 EOP_TC_WB_ACTION_EN |
4804				 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
4805				 EVENT_INDEX(5)));
4806	amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
4807	amdgpu_ring_write(ring, addr & 0xfffffffc);
4808	amdgpu_ring_write(ring, upper_32_bits(addr));
4809	amdgpu_ring_write(ring, lower_32_bits(seq));
4810	amdgpu_ring_write(ring, upper_32_bits(seq));
4811}
4812
4813static void gfx_v8_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
4814						 enum amdgpu_interrupt_state state)
4815{
4816	u32 cp_int_cntl;
 
4817
4818	switch (state) {
4819	case AMDGPU_IRQ_STATE_DISABLE:
4820		cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
4821		cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
4822					    TIME_STAMP_INT_ENABLE, 0);
4823		WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4824		break;
4825	case AMDGPU_IRQ_STATE_ENABLE:
4826		cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
4827		cp_int_cntl =
4828			REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
4829				      TIME_STAMP_INT_ENABLE, 1);
4830		WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
4831		break;
4832	default:
 
4833		break;
4834	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4835}
4836
4837static void gfx_v8_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
4838						     int me, int pipe,
4839						     enum amdgpu_interrupt_state state)
4840{
4841	u32 mec_int_cntl, mec_int_cntl_reg;
4842
4843	/*
4844	 * amdgpu controls only pipe 0 of MEC1. That's why this function only
4845	 * handles the setting of interrupts for this specific pipe. All other
4846	 * pipes' interrupts are set by amdkfd.
4847	 */
4848
4849	if (me == 1) {
4850		switch (pipe) {
4851		case 0:
4852			mec_int_cntl_reg = mmCP_ME1_PIPE0_INT_CNTL;
4853			break;
 
 
 
 
 
 
 
 
 
4854		default:
4855			DRM_DEBUG("invalid pipe %d\n", pipe);
4856			return;
4857		}
4858	} else {
4859		DRM_DEBUG("invalid me %d\n", me);
4860		return;
4861	}
4862
4863	switch (state) {
4864	case AMDGPU_IRQ_STATE_DISABLE:
4865		mec_int_cntl = RREG32(mec_int_cntl_reg);
4866		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
4867					     TIME_STAMP_INT_ENABLE, 0);
4868		WREG32(mec_int_cntl_reg, mec_int_cntl);
4869		break;
4870	case AMDGPU_IRQ_STATE_ENABLE:
4871		mec_int_cntl = RREG32(mec_int_cntl_reg);
4872		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
4873					     TIME_STAMP_INT_ENABLE, 1);
4874		WREG32(mec_int_cntl_reg, mec_int_cntl);
4875		break;
4876	default:
4877		break;
4878	}
4879}
4880
4881static int gfx_v8_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
4882					     struct amdgpu_irq_src *source,
4883					     unsigned type,
4884					     enum amdgpu_interrupt_state state)
4885{
4886	u32 cp_int_cntl;
4887
4888	switch (state) {
4889	case AMDGPU_IRQ_STATE_DISABLE:
4890		cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
4891		cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
4892					    PRIV_REG_INT_ENABLE, 0);
4893		WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
4894		break;
4895	case AMDGPU_IRQ_STATE_ENABLE:
4896		cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
4897		cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
4898					    PRIV_REG_INT_ENABLE, 1);
4899		WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
4900		break;
4901	default:
4902		break;
4903	}
4904
4905	return 0;
4906}
4907
4908static int gfx_v8_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
4909					      struct amdgpu_irq_src *source,
4910					      unsigned type,
4911					      enum amdgpu_interrupt_state state)
4912{
4913	u32 cp_int_cntl;
4914
4915	switch (state) {
4916	case AMDGPU_IRQ_STATE_DISABLE:
4917		cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
4918		cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
4919					    PRIV_INSTR_INT_ENABLE, 0);
4920		WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
4921		break;
4922	case AMDGPU_IRQ_STATE_ENABLE:
4923		cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
4924		cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
4925					    PRIV_INSTR_INT_ENABLE, 1);
4926		WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
4927		break;
4928	default:
4929		break;
4930	}
4931
4932	return 0;
4933}
4934
4935static int gfx_v8_0_set_eop_interrupt_state(struct amdgpu_device *adev,
4936					    struct amdgpu_irq_src *src,
4937					    unsigned type,
4938					    enum amdgpu_interrupt_state state)
4939{
4940	switch (type) {
4941	case AMDGPU_CP_IRQ_GFX_EOP:
4942		gfx_v8_0_set_gfx_eop_interrupt_state(adev, state);
4943		break;
4944	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
4945		gfx_v8_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
4946		break;
4947	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
4948		gfx_v8_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
4949		break;
4950	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
4951		gfx_v8_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
4952		break;
4953	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
4954		gfx_v8_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
4955		break;
4956	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
4957		gfx_v8_0_set_compute_eop_interrupt_state(adev, 2, 0, state);
4958		break;
4959	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
4960		gfx_v8_0_set_compute_eop_interrupt_state(adev, 2, 1, state);
4961		break;
4962	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
4963		gfx_v8_0_set_compute_eop_interrupt_state(adev, 2, 2, state);
4964		break;
4965	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
4966		gfx_v8_0_set_compute_eop_interrupt_state(adev, 2, 3, state);
4967		break;
4968	default:
4969		break;
4970	}
4971	return 0;
4972}
4973
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4974static int gfx_v8_0_eop_irq(struct amdgpu_device *adev,
4975			    struct amdgpu_irq_src *source,
4976			    struct amdgpu_iv_entry *entry)
4977{
4978	int i;
4979	u8 me_id, pipe_id, queue_id;
4980	struct amdgpu_ring *ring;
4981
4982	DRM_DEBUG("IH: CP EOP\n");
4983	me_id = (entry->ring_id & 0x0c) >> 2;
4984	pipe_id = (entry->ring_id & 0x03) >> 0;
4985	queue_id = (entry->ring_id & 0x70) >> 4;
4986
4987	switch (me_id) {
4988	case 0:
4989		amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
4990		break;
4991	case 1:
4992	case 2:
4993		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4994			ring = &adev->gfx.compute_ring[i];
4995			/* Per-queue interrupt is supported for MEC starting from VI.
4996			  * The interrupt can only be enabled/disabled per pipe instead of per queue.
4997			  */
4998			if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
4999				amdgpu_fence_process(ring);
5000		}
5001		break;
5002	}
5003	return 0;
5004}
5005
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5006static int gfx_v8_0_priv_reg_irq(struct amdgpu_device *adev,
5007				 struct amdgpu_irq_src *source,
5008				 struct amdgpu_iv_entry *entry)
5009{
5010	DRM_ERROR("Illegal register access in command stream\n");
5011	schedule_work(&adev->reset_work);
5012	return 0;
5013}
5014
5015static int gfx_v8_0_priv_inst_irq(struct amdgpu_device *adev,
5016				  struct amdgpu_irq_src *source,
5017				  struct amdgpu_iv_entry *entry)
5018{
5019	DRM_ERROR("Illegal instruction in command stream\n");
5020	schedule_work(&adev->reset_work);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5021	return 0;
5022}
5023
5024const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5025	.early_init = gfx_v8_0_early_init,
5026	.late_init = gfx_v8_0_late_init,
5027	.sw_init = gfx_v8_0_sw_init,
5028	.sw_fini = gfx_v8_0_sw_fini,
5029	.hw_init = gfx_v8_0_hw_init,
5030	.hw_fini = gfx_v8_0_hw_fini,
5031	.suspend = gfx_v8_0_suspend,
5032	.resume = gfx_v8_0_resume,
5033	.is_idle = gfx_v8_0_is_idle,
5034	.wait_for_idle = gfx_v8_0_wait_for_idle,
 
 
5035	.soft_reset = gfx_v8_0_soft_reset,
5036	.print_status = gfx_v8_0_print_status,
5037	.set_clockgating_state = gfx_v8_0_set_clockgating_state,
5038	.set_powergating_state = gfx_v8_0_set_powergating_state,
 
5039};
5040
5041static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
5042	.get_rptr = gfx_v8_0_ring_get_rptr_gfx,
 
 
 
 
5043	.get_wptr = gfx_v8_0_ring_get_wptr_gfx,
5044	.set_wptr = gfx_v8_0_ring_set_wptr_gfx,
5045	.parse_cs = NULL,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5046	.emit_ib = gfx_v8_0_ring_emit_ib_gfx,
5047	.emit_fence = gfx_v8_0_ring_emit_fence_gfx,
5048	.emit_pipeline_sync = gfx_v8_0_ring_emit_pipeline_sync,
5049	.emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
5050	.emit_gds_switch = gfx_v8_0_ring_emit_gds_switch,
5051	.emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush,
5052	.emit_hdp_invalidate = gfx_v8_0_ring_emit_hdp_invalidate,
5053	.test_ring = gfx_v8_0_ring_test_ring,
5054	.test_ib = gfx_v8_0_ring_test_ib,
5055	.insert_nop = amdgpu_ring_insert_nop,
5056	.pad_ib = amdgpu_ring_generic_pad_ib,
 
 
 
 
 
 
 
5057};
5058
5059static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
5060	.get_rptr = gfx_v8_0_ring_get_rptr_compute,
 
 
 
 
5061	.get_wptr = gfx_v8_0_ring_get_wptr_compute,
5062	.set_wptr = gfx_v8_0_ring_set_wptr_compute,
5063	.parse_cs = NULL,
 
 
 
 
 
 
 
 
5064	.emit_ib = gfx_v8_0_ring_emit_ib_compute,
5065	.emit_fence = gfx_v8_0_ring_emit_fence_compute,
5066	.emit_pipeline_sync = gfx_v8_0_ring_emit_pipeline_sync,
5067	.emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
5068	.emit_gds_switch = gfx_v8_0_ring_emit_gds_switch,
5069	.emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush,
5070	.emit_hdp_invalidate = gfx_v8_0_ring_emit_hdp_invalidate,
5071	.test_ring = gfx_v8_0_ring_test_ring,
5072	.test_ib = gfx_v8_0_ring_test_ib,
5073	.insert_nop = amdgpu_ring_insert_nop,
5074	.pad_ib = amdgpu_ring_generic_pad_ib,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5075};
5076
5077static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev)
5078{
5079	int i;
5080
 
 
5081	for (i = 0; i < adev->gfx.num_gfx_rings; i++)
5082		adev->gfx.gfx_ring[i].funcs = &gfx_v8_0_ring_funcs_gfx;
5083
5084	for (i = 0; i < adev->gfx.num_compute_rings; i++)
5085		adev->gfx.compute_ring[i].funcs = &gfx_v8_0_ring_funcs_compute;
5086}
5087
5088static const struct amdgpu_irq_src_funcs gfx_v8_0_eop_irq_funcs = {
5089	.set = gfx_v8_0_set_eop_interrupt_state,
5090	.process = gfx_v8_0_eop_irq,
5091};
5092
5093static const struct amdgpu_irq_src_funcs gfx_v8_0_priv_reg_irq_funcs = {
5094	.set = gfx_v8_0_set_priv_reg_fault_state,
5095	.process = gfx_v8_0_priv_reg_irq,
5096};
5097
5098static const struct amdgpu_irq_src_funcs gfx_v8_0_priv_inst_irq_funcs = {
5099	.set = gfx_v8_0_set_priv_inst_fault_state,
5100	.process = gfx_v8_0_priv_inst_irq,
5101};
5102
 
 
 
 
 
 
 
 
 
 
5103static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev)
5104{
5105	adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
5106	adev->gfx.eop_irq.funcs = &gfx_v8_0_eop_irq_funcs;
5107
5108	adev->gfx.priv_reg_irq.num_types = 1;
5109	adev->gfx.priv_reg_irq.funcs = &gfx_v8_0_priv_reg_irq_funcs;
5110
5111	adev->gfx.priv_inst_irq.num_types = 1;
5112	adev->gfx.priv_inst_irq.funcs = &gfx_v8_0_priv_inst_irq_funcs;
 
 
 
 
 
 
 
 
 
 
 
5113}
5114
5115static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev)
5116{
5117	/* init asci gds info */
5118	adev->gds.mem.total_size = RREG32(mmGDS_VMID0_SIZE);
5119	adev->gds.gws.total_size = 64;
5120	adev->gds.oa.total_size = 16;
5121
5122	if (adev->gds.mem.total_size == 64 * 1024) {
5123		adev->gds.mem.gfx_partition_size = 4096;
5124		adev->gds.mem.cs_partition_size = 4096;
5125
5126		adev->gds.gws.gfx_partition_size = 4;
5127		adev->gds.gws.cs_partition_size = 4;
 
 
5128
5129		adev->gds.oa.gfx_partition_size = 4;
5130		adev->gds.oa.cs_partition_size = 1;
5131	} else {
5132		adev->gds.mem.gfx_partition_size = 1024;
5133		adev->gds.mem.cs_partition_size = 1024;
5134
5135		adev->gds.gws.gfx_partition_size = 16;
5136		adev->gds.gws.cs_partition_size = 16;
5137
5138		adev->gds.oa.gfx_partition_size = 4;
5139		adev->gds.oa.cs_partition_size = 4;
5140	}
5141}
5142
5143static u32 gfx_v8_0_get_cu_active_bitmap(struct amdgpu_device *adev)
5144{
5145	u32 data, mask;
5146
5147	data = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG);
5148	data |= RREG32(mmGC_USER_SHADER_ARRAY_CONFIG);
5149
5150	data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
5151	data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
5152
5153	mask = gfx_v8_0_create_bitmask(adev->gfx.config.max_cu_per_sh);
5154
5155	return (~data) & mask;
5156}
5157
5158int gfx_v8_0_get_cu_info(struct amdgpu_device *adev,
5159			 struct amdgpu_cu_info *cu_info)
5160{
5161	int i, j, k, counter, active_cu_number = 0;
5162	u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
5163
5164	if (!adev || !cu_info)
5165		return -EINVAL;
5166
5167	memset(cu_info, 0, sizeof(*cu_info));
5168
 
 
 
 
 
 
 
5169	mutex_lock(&adev->grbm_idx_mutex);
5170	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
5171		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
5172			mask = 1;
5173			ao_bitmap = 0;
5174			counter = 0;
5175			gfx_v8_0_select_se_sh(adev, i, j);
 
 
 
5176			bitmap = gfx_v8_0_get_cu_active_bitmap(adev);
5177			cu_info->bitmap[i][j] = bitmap;
5178
5179			for (k = 0; k < 16; k ++) {
5180				if (bitmap & mask) {
5181					if (counter < 2)
5182						ao_bitmap |= mask;
5183					counter ++;
5184				}
5185				mask <<= 1;
5186			}
5187			active_cu_number += counter;
5188			ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
 
 
5189		}
5190	}
5191	gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
5192	mutex_unlock(&adev->grbm_idx_mutex);
5193
5194	cu_info->number = active_cu_number;
5195	cu_info->ao_cu_mask = ao_cu_mask;
 
 
 
 
 
 
5196
5197	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5198}
v5.9
   1/*
   2 * Copyright 2014 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23
  24#include <linux/delay.h>
  25#include <linux/kernel.h>
  26#include <linux/firmware.h>
  27#include <linux/module.h>
  28#include <linux/pci.h>
  29
  30#include "amdgpu.h"
  31#include "amdgpu_gfx.h"
  32#include "vi.h"
  33#include "vi_structs.h"
  34#include "vid.h"
  35#include "amdgpu_ucode.h"
  36#include "amdgpu_atombios.h"
  37#include "atombios_i2c.h"
  38#include "clearstate_vi.h"
  39
  40#include "gmc/gmc_8_2_d.h"
  41#include "gmc/gmc_8_2_sh_mask.h"
  42
  43#include "oss/oss_3_0_d.h"
  44#include "oss/oss_3_0_sh_mask.h"
  45
  46#include "bif/bif_5_0_d.h"
  47#include "bif/bif_5_0_sh_mask.h"
 
  48#include "gca/gfx_8_0_d.h"
  49#include "gca/gfx_8_0_enum.h"
  50#include "gca/gfx_8_0_sh_mask.h"
 
  51
  52#include "dce/dce_10_0_d.h"
  53#include "dce/dce_10_0_sh_mask.h"
  54
  55#include "smu/smu_7_1_3_d.h"
  56
  57#include "ivsrcid/ivsrcid_vislands30.h"
  58
  59#define GFX8_NUM_GFX_RINGS     1
  60#define GFX8_MEC_HPD_SIZE 4096
  61
  62#define TOPAZ_GB_ADDR_CONFIG_GOLDEN 0x22010001
  63#define CARRIZO_GB_ADDR_CONFIG_GOLDEN 0x22010001
  64#define POLARIS11_GB_ADDR_CONFIG_GOLDEN 0x22011002
  65#define TONGA_GB_ADDR_CONFIG_GOLDEN 0x22011003
  66
  67#define ARRAY_MODE(x)					((x) << GB_TILE_MODE0__ARRAY_MODE__SHIFT)
  68#define PIPE_CONFIG(x)					((x) << GB_TILE_MODE0__PIPE_CONFIG__SHIFT)
  69#define TILE_SPLIT(x)					((x) << GB_TILE_MODE0__TILE_SPLIT__SHIFT)
  70#define MICRO_TILE_MODE_NEW(x)				((x) << GB_TILE_MODE0__MICRO_TILE_MODE_NEW__SHIFT)
  71#define SAMPLE_SPLIT(x)					((x) << GB_TILE_MODE0__SAMPLE_SPLIT__SHIFT)
  72#define BANK_WIDTH(x)					((x) << GB_MACROTILE_MODE0__BANK_WIDTH__SHIFT)
  73#define BANK_HEIGHT(x)					((x) << GB_MACROTILE_MODE0__BANK_HEIGHT__SHIFT)
  74#define MACRO_TILE_ASPECT(x)				((x) << GB_MACROTILE_MODE0__MACRO_TILE_ASPECT__SHIFT)
  75#define NUM_BANKS(x)					((x) << GB_MACROTILE_MODE0__NUM_BANKS__SHIFT)
  76
  77#define RLC_CGTT_MGCG_OVERRIDE__CPF_MASK            0x00000001L
  78#define RLC_CGTT_MGCG_OVERRIDE__RLC_MASK            0x00000002L
  79#define RLC_CGTT_MGCG_OVERRIDE__MGCG_MASK           0x00000004L
  80#define RLC_CGTT_MGCG_OVERRIDE__CGCG_MASK           0x00000008L
  81#define RLC_CGTT_MGCG_OVERRIDE__CGLS_MASK           0x00000010L
  82#define RLC_CGTT_MGCG_OVERRIDE__GRBM_MASK           0x00000020L
  83
  84/* BPM SERDES CMD */
  85#define SET_BPM_SERDES_CMD    1
  86#define CLE_BPM_SERDES_CMD    0
  87
  88/* BPM Register Address*/
  89enum {
  90	BPM_REG_CGLS_EN = 0,        /* Enable/Disable CGLS */
  91	BPM_REG_CGLS_ON,            /* ON/OFF CGLS: shall be controlled by RLC FW */
  92	BPM_REG_CGCG_OVERRIDE,      /* Set/Clear CGCG Override */
  93	BPM_REG_MGCG_OVERRIDE,      /* Set/Clear MGCG Override */
  94	BPM_REG_FGCG_OVERRIDE,      /* Set/Clear FGCG Override */
  95	BPM_REG_FGCG_MAX
  96};
  97
  98#define RLC_FormatDirectRegListLength        14
  99
 100MODULE_FIRMWARE("amdgpu/carrizo_ce.bin");
 101MODULE_FIRMWARE("amdgpu/carrizo_pfp.bin");
 102MODULE_FIRMWARE("amdgpu/carrizo_me.bin");
 103MODULE_FIRMWARE("amdgpu/carrizo_mec.bin");
 104MODULE_FIRMWARE("amdgpu/carrizo_mec2.bin");
 105MODULE_FIRMWARE("amdgpu/carrizo_rlc.bin");
 106
 107MODULE_FIRMWARE("amdgpu/stoney_ce.bin");
 108MODULE_FIRMWARE("amdgpu/stoney_pfp.bin");
 109MODULE_FIRMWARE("amdgpu/stoney_me.bin");
 110MODULE_FIRMWARE("amdgpu/stoney_mec.bin");
 111MODULE_FIRMWARE("amdgpu/stoney_rlc.bin");
 112
 113MODULE_FIRMWARE("amdgpu/tonga_ce.bin");
 114MODULE_FIRMWARE("amdgpu/tonga_pfp.bin");
 115MODULE_FIRMWARE("amdgpu/tonga_me.bin");
 116MODULE_FIRMWARE("amdgpu/tonga_mec.bin");
 117MODULE_FIRMWARE("amdgpu/tonga_mec2.bin");
 118MODULE_FIRMWARE("amdgpu/tonga_rlc.bin");
 119
 120MODULE_FIRMWARE("amdgpu/topaz_ce.bin");
 121MODULE_FIRMWARE("amdgpu/topaz_pfp.bin");
 122MODULE_FIRMWARE("amdgpu/topaz_me.bin");
 123MODULE_FIRMWARE("amdgpu/topaz_mec.bin");
 124MODULE_FIRMWARE("amdgpu/topaz_rlc.bin");
 125
 126MODULE_FIRMWARE("amdgpu/fiji_ce.bin");
 127MODULE_FIRMWARE("amdgpu/fiji_pfp.bin");
 128MODULE_FIRMWARE("amdgpu/fiji_me.bin");
 129MODULE_FIRMWARE("amdgpu/fiji_mec.bin");
 130MODULE_FIRMWARE("amdgpu/fiji_mec2.bin");
 131MODULE_FIRMWARE("amdgpu/fiji_rlc.bin");
 132
 133MODULE_FIRMWARE("amdgpu/polaris10_ce.bin");
 134MODULE_FIRMWARE("amdgpu/polaris10_ce_2.bin");
 135MODULE_FIRMWARE("amdgpu/polaris10_pfp.bin");
 136MODULE_FIRMWARE("amdgpu/polaris10_pfp_2.bin");
 137MODULE_FIRMWARE("amdgpu/polaris10_me.bin");
 138MODULE_FIRMWARE("amdgpu/polaris10_me_2.bin");
 139MODULE_FIRMWARE("amdgpu/polaris10_mec.bin");
 140MODULE_FIRMWARE("amdgpu/polaris10_mec_2.bin");
 141MODULE_FIRMWARE("amdgpu/polaris10_mec2.bin");
 142MODULE_FIRMWARE("amdgpu/polaris10_mec2_2.bin");
 143MODULE_FIRMWARE("amdgpu/polaris10_rlc.bin");
 144
 145MODULE_FIRMWARE("amdgpu/polaris11_ce.bin");
 146MODULE_FIRMWARE("amdgpu/polaris11_ce_2.bin");
 147MODULE_FIRMWARE("amdgpu/polaris11_pfp.bin");
 148MODULE_FIRMWARE("amdgpu/polaris11_pfp_2.bin");
 149MODULE_FIRMWARE("amdgpu/polaris11_me.bin");
 150MODULE_FIRMWARE("amdgpu/polaris11_me_2.bin");
 151MODULE_FIRMWARE("amdgpu/polaris11_mec.bin");
 152MODULE_FIRMWARE("amdgpu/polaris11_mec_2.bin");
 153MODULE_FIRMWARE("amdgpu/polaris11_mec2.bin");
 154MODULE_FIRMWARE("amdgpu/polaris11_mec2_2.bin");
 155MODULE_FIRMWARE("amdgpu/polaris11_rlc.bin");
 156
 157MODULE_FIRMWARE("amdgpu/polaris12_ce.bin");
 158MODULE_FIRMWARE("amdgpu/polaris12_ce_2.bin");
 159MODULE_FIRMWARE("amdgpu/polaris12_pfp.bin");
 160MODULE_FIRMWARE("amdgpu/polaris12_pfp_2.bin");
 161MODULE_FIRMWARE("amdgpu/polaris12_me.bin");
 162MODULE_FIRMWARE("amdgpu/polaris12_me_2.bin");
 163MODULE_FIRMWARE("amdgpu/polaris12_mec.bin");
 164MODULE_FIRMWARE("amdgpu/polaris12_mec_2.bin");
 165MODULE_FIRMWARE("amdgpu/polaris12_mec2.bin");
 166MODULE_FIRMWARE("amdgpu/polaris12_mec2_2.bin");
 167MODULE_FIRMWARE("amdgpu/polaris12_rlc.bin");
 168
 169MODULE_FIRMWARE("amdgpu/vegam_ce.bin");
 170MODULE_FIRMWARE("amdgpu/vegam_pfp.bin");
 171MODULE_FIRMWARE("amdgpu/vegam_me.bin");
 172MODULE_FIRMWARE("amdgpu/vegam_mec.bin");
 173MODULE_FIRMWARE("amdgpu/vegam_mec2.bin");
 174MODULE_FIRMWARE("amdgpu/vegam_rlc.bin");
 175
 176static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
 177{
 178	{mmGDS_VMID0_BASE, mmGDS_VMID0_SIZE, mmGDS_GWS_VMID0, mmGDS_OA_VMID0},
 179	{mmGDS_VMID1_BASE, mmGDS_VMID1_SIZE, mmGDS_GWS_VMID1, mmGDS_OA_VMID1},
 180	{mmGDS_VMID2_BASE, mmGDS_VMID2_SIZE, mmGDS_GWS_VMID2, mmGDS_OA_VMID2},
 181	{mmGDS_VMID3_BASE, mmGDS_VMID3_SIZE, mmGDS_GWS_VMID3, mmGDS_OA_VMID3},
 182	{mmGDS_VMID4_BASE, mmGDS_VMID4_SIZE, mmGDS_GWS_VMID4, mmGDS_OA_VMID4},
 183	{mmGDS_VMID5_BASE, mmGDS_VMID5_SIZE, mmGDS_GWS_VMID5, mmGDS_OA_VMID5},
 184	{mmGDS_VMID6_BASE, mmGDS_VMID6_SIZE, mmGDS_GWS_VMID6, mmGDS_OA_VMID6},
 185	{mmGDS_VMID7_BASE, mmGDS_VMID7_SIZE, mmGDS_GWS_VMID7, mmGDS_OA_VMID7},
 186	{mmGDS_VMID8_BASE, mmGDS_VMID8_SIZE, mmGDS_GWS_VMID8, mmGDS_OA_VMID8},
 187	{mmGDS_VMID9_BASE, mmGDS_VMID9_SIZE, mmGDS_GWS_VMID9, mmGDS_OA_VMID9},
 188	{mmGDS_VMID10_BASE, mmGDS_VMID10_SIZE, mmGDS_GWS_VMID10, mmGDS_OA_VMID10},
 189	{mmGDS_VMID11_BASE, mmGDS_VMID11_SIZE, mmGDS_GWS_VMID11, mmGDS_OA_VMID11},
 190	{mmGDS_VMID12_BASE, mmGDS_VMID12_SIZE, mmGDS_GWS_VMID12, mmGDS_OA_VMID12},
 191	{mmGDS_VMID13_BASE, mmGDS_VMID13_SIZE, mmGDS_GWS_VMID13, mmGDS_OA_VMID13},
 192	{mmGDS_VMID14_BASE, mmGDS_VMID14_SIZE, mmGDS_GWS_VMID14, mmGDS_OA_VMID14},
 193	{mmGDS_VMID15_BASE, mmGDS_VMID15_SIZE, mmGDS_GWS_VMID15, mmGDS_OA_VMID15}
 194};
 195
 196static const u32 golden_settings_tonga_a11[] =
 197{
 198	mmCB_HW_CONTROL, 0xfffdf3cf, 0x00007208,
 199	mmCB_HW_CONTROL_3, 0x00000040, 0x00000040,
 200	mmDB_DEBUG2, 0xf00fffff, 0x00000400,
 201	mmGB_GPU_ID, 0x0000000f, 0x00000000,
 202	mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
 203	mmPA_SC_FIFO_DEPTH_CNTL, 0x000003ff, 0x000000fc,
 204	mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
 205	mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c,
 206	mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
 207	mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
 208	mmTCC_CTRL, 0x00100000, 0xf31fff7f,
 209	mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
 210	mmTCP_ADDR_CONFIG, 0x000003ff, 0x000002fb,
 211	mmTCP_CHAN_STEER_HI, 0xffffffff, 0x0000543b,
 212	mmTCP_CHAN_STEER_LO, 0xffffffff, 0xa9210876,
 213	mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
 214};
 215
 216static const u32 tonga_golden_common_all[] =
 217{
 218	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 219	mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x16000012,
 220	mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002A,
 221	mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
 222	mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
 223	mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
 224	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
 225	mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF
 226};
 227
 228static const u32 tonga_mgcg_cgcg_init[] =
 229{
 230	mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
 231	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 232	mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 233	mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
 234	mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
 235	mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
 236	mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100,
 237	mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
 238	mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
 239	mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
 240	mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
 241	mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
 242	mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
 243	mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
 244	mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
 245	mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
 246	mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
 247	mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
 248	mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
 249	mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
 250	mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
 251	mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
 252	mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
 253	mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
 254	mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
 255	mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
 256	mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
 257	mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 258	mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 259	mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
 260	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 261	mmCGTS_CU0_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 262	mmCGTS_CU0_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 263	mmCGTS_CU0_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
 264	mmCGTS_CU0_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 265	mmCGTS_CU0_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 266	mmCGTS_CU1_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 267	mmCGTS_CU1_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 268	mmCGTS_CU1_TA_CTRL_REG, 0xffffffff, 0x00040007,
 269	mmCGTS_CU1_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 270	mmCGTS_CU1_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 271	mmCGTS_CU2_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 272	mmCGTS_CU2_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 273	mmCGTS_CU2_TA_CTRL_REG, 0xffffffff, 0x00040007,
 274	mmCGTS_CU2_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 275	mmCGTS_CU2_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 276	mmCGTS_CU3_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 277	mmCGTS_CU3_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 278	mmCGTS_CU3_TA_CTRL_REG, 0xffffffff, 0x00040007,
 279	mmCGTS_CU3_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 280	mmCGTS_CU3_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 281	mmCGTS_CU4_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 282	mmCGTS_CU4_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 283	mmCGTS_CU4_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
 284	mmCGTS_CU4_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 285	mmCGTS_CU4_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 286	mmCGTS_CU5_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 287	mmCGTS_CU5_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 288	mmCGTS_CU5_TA_CTRL_REG, 0xffffffff, 0x00040007,
 289	mmCGTS_CU5_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 290	mmCGTS_CU5_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 291	mmCGTS_CU6_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 292	mmCGTS_CU6_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 293	mmCGTS_CU6_TA_CTRL_REG, 0xffffffff, 0x00040007,
 294	mmCGTS_CU6_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 295	mmCGTS_CU6_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 296	mmCGTS_CU7_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 297	mmCGTS_CU7_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 298	mmCGTS_CU7_TA_CTRL_REG, 0xffffffff, 0x00040007,
 299	mmCGTS_CU7_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 300	mmCGTS_CU7_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 301	mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
 302	mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
 303	mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
 304	mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
 305};
 306
 307static const u32 golden_settings_vegam_a11[] =
 308{
 309	mmCB_HW_CONTROL, 0x0001f3cf, 0x00007208,
 310	mmCB_HW_CONTROL_2, 0x0f000000, 0x0d000000,
 311	mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
 312	mmDB_DEBUG2, 0xf00fffff, 0x00000400,
 313	mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
 314	mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
 315	mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x3a00161a,
 316	mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x0000002e,
 317	mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
 318	mmRLC_CGCG_CGLS_CTRL_3D, 0xffffffff, 0x0001003c,
 319	mmSQ_CONFIG, 0x07f80000, 0x01180000,
 320	mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
 321	mmTCC_CTRL, 0x00100000, 0xf31fff7f,
 322	mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f7,
 323	mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
 324	mmTCP_CHAN_STEER_LO, 0xffffffff, 0x32761054,
 325	mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
 326};
 327
 328static const u32 vegam_golden_common_all[] =
 329{
 330	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 331	mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
 332	mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
 333	mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
 334	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
 335	mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF,
 336};
 337
 338static const u32 golden_settings_polaris11_a11[] =
 339{
 340	mmCB_HW_CONTROL, 0x0000f3cf, 0x00007208,
 341	mmCB_HW_CONTROL_2, 0x0f000000, 0x0f000000,
 342	mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
 343	mmDB_DEBUG2, 0xf00fffff, 0x00000400,
 344	mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
 345	mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
 346	mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x16000012,
 347	mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x00000000,
 348	mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
 349	mmRLC_CGCG_CGLS_CTRL_3D, 0xffffffff, 0x0001003c,
 350	mmSQ_CONFIG, 0x07f80000, 0x01180000,
 351	mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
 352	mmTCC_CTRL, 0x00100000, 0xf31fff7f,
 353	mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f3,
 354	mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
 355	mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003210,
 356	mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
 357};
 358
 359static const u32 polaris11_golden_common_all[] =
 360{
 361	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 362	mmGB_ADDR_CONFIG, 0xffffffff, 0x22011002,
 363	mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
 364	mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
 365	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
 366	mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF,
 367};
 368
 369static const u32 golden_settings_polaris10_a11[] =
 370{
 371	mmATC_MISC_CG, 0x000c0fc0, 0x000c0200,
 372	mmCB_HW_CONTROL, 0x0001f3cf, 0x00007208,
 373	mmCB_HW_CONTROL_2, 0x0f000000, 0x0f000000,
 374	mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
 375	mmDB_DEBUG2, 0xf00fffff, 0x00000400,
 376	mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
 377	mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
 378	mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x16000012,
 379	mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x0000002a,
 380	mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
 381	mmRLC_CGCG_CGLS_CTRL_3D, 0xffffffff, 0x0001003c,
 382	mmSQ_CONFIG, 0x07f80000, 0x07180000,
 383	mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
 384	mmTCC_CTRL, 0x00100000, 0xf31fff7f,
 385	mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f7,
 386	mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
 387	mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
 388};
 389
 390static const u32 polaris10_golden_common_all[] =
 391{
 392	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 393	mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x16000012,
 394	mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002A,
 395	mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
 396	mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
 397	mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
 398	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
 399	mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF,
 400};
 401
 402static const u32 fiji_golden_common_all[] =
 403{
 404	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 405	mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x3a00161a,
 406	mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002e,
 407	mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
 408	mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
 409	mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
 410	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
 411	mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF,
 412	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 413	mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x00000009,
 414};
 415
 416static const u32 golden_settings_fiji_a10[] =
 417{
 418	mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
 419	mmDB_DEBUG2, 0xf00fffff, 0x00000400,
 420	mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
 421	mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
 422	mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
 423	mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
 424	mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
 425	mmTCC_CTRL, 0x00100000, 0xf31fff7f,
 426	mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
 427	mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000ff,
 428	mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
 429};
 430
 431static const u32 fiji_mgcg_cgcg_init[] =
 432{
 433	mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
 434	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 435	mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 436	mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
 437	mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
 438	mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
 439	mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100,
 440	mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
 441	mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
 442	mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
 443	mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
 444	mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
 445	mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
 446	mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
 447	mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
 448	mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
 449	mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
 450	mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
 451	mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
 452	mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
 453	mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
 454	mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
 455	mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
 456	mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
 457	mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
 458	mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
 459	mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
 460	mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 461	mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 462	mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
 463	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 464	mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
 465	mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
 466	mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
 467	mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
 468};
 469
 470static const u32 golden_settings_iceland_a11[] =
 471{
 472	mmCB_HW_CONTROL_3, 0x00000040, 0x00000040,
 473	mmDB_DEBUG2, 0xf00fffff, 0x00000400,
 474	mmDB_DEBUG3, 0xc0000000, 0xc0000000,
 475	mmGB_GPU_ID, 0x0000000f, 0x00000000,
 476	mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
 477	mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
 478	mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x00000002,
 479	mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x00000000,
 480	mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c,
 481	mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
 482	mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
 483	mmTCC_CTRL, 0x00100000, 0xf31fff7f,
 484	mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
 485	mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f1,
 486	mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
 487	mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00000010,
 488};
 489
 490static const u32 iceland_golden_common_all[] =
 491{
 492	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 493	mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x00000002,
 494	mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x00000000,
 495	mmGB_ADDR_CONFIG, 0xffffffff, 0x22010001,
 496	mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
 497	mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
 498	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
 499	mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF
 500};
 501
 502static const u32 iceland_mgcg_cgcg_init[] =
 503{
 504	mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
 505	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 506	mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 507	mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
 508	mmCGTT_CP_CLK_CTRL, 0xffffffff, 0xc0000100,
 509	mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0xc0000100,
 510	mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0xc0000100,
 511	mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
 512	mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
 513	mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
 514	mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
 515	mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
 516	mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
 517	mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
 518	mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
 519	mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
 520	mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
 521	mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
 522	mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
 523	mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
 524	mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
 525	mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
 526	mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0xff000100,
 527	mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
 528	mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
 529	mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
 530	mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
 531	mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 532	mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 533	mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
 534	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 535	mmCGTS_CU0_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 536	mmCGTS_CU0_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 537	mmCGTS_CU0_TA_SQC_CTRL_REG, 0xffffffff, 0x0f840f87,
 538	mmCGTS_CU0_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 539	mmCGTS_CU0_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 540	mmCGTS_CU1_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 541	mmCGTS_CU1_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 542	mmCGTS_CU1_TA_CTRL_REG, 0xffffffff, 0x00040007,
 543	mmCGTS_CU1_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 544	mmCGTS_CU1_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 545	mmCGTS_CU2_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 546	mmCGTS_CU2_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 547	mmCGTS_CU2_TA_CTRL_REG, 0xffffffff, 0x00040007,
 548	mmCGTS_CU2_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 549	mmCGTS_CU2_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 550	mmCGTS_CU3_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 551	mmCGTS_CU3_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 552	mmCGTS_CU3_TA_CTRL_REG, 0xffffffff, 0x00040007,
 553	mmCGTS_CU3_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 554	mmCGTS_CU3_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 555	mmCGTS_CU4_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 556	mmCGTS_CU4_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 557	mmCGTS_CU4_TA_SQC_CTRL_REG, 0xffffffff, 0x0f840f87,
 558	mmCGTS_CU4_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 559	mmCGTS_CU4_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 560	mmCGTS_CU5_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 561	mmCGTS_CU5_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 562	mmCGTS_CU5_TA_CTRL_REG, 0xffffffff, 0x00040007,
 563	mmCGTS_CU5_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 564	mmCGTS_CU5_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 565	mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
 566	mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
 567	mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
 568};
 569
 570static const u32 cz_golden_settings_a11[] =
 571{
 572	mmCB_HW_CONTROL_3, 0x00000040, 0x00000040,
 573	mmDB_DEBUG2, 0xf00fffff, 0x00000400,
 574	mmGB_GPU_ID, 0x0000000f, 0x00000000,
 575	mmPA_SC_ENHANCE, 0xffffffff, 0x00000001,
 576	mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
 577	mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c,
 578	mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
 579	mmTA_CNTL_AUX, 0x000f000f, 0x00010000,
 580	mmTCC_CTRL, 0x00100000, 0xf31fff7f,
 581	mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
 582	mmTCP_ADDR_CONFIG, 0x0000000f, 0x000000f3,
 583	mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00001302
 584};
 585
 586static const u32 cz_golden_common_all[] =
 587{
 588	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 589	mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x00000002,
 590	mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x00000000,
 591	mmGB_ADDR_CONFIG, 0xffffffff, 0x22010001,
 592	mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
 593	mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
 594	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
 595	mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF
 596};
 597
 598static const u32 cz_mgcg_cgcg_init[] =
 599{
 600	mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
 601	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 602	mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 603	mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
 604	mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
 605	mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
 606	mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x00000100,
 607	mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
 608	mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
 609	mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
 610	mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
 611	mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
 612	mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
 613	mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
 614	mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
 615	mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
 616	mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
 617	mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
 618	mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
 619	mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
 620	mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
 621	mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
 622	mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
 623	mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
 624	mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
 625	mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
 626	mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
 627	mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 628	mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 629	mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
 630	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 631	mmCGTS_CU0_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 632	mmCGTS_CU0_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 633	mmCGTS_CU0_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
 634	mmCGTS_CU0_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 635	mmCGTS_CU0_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 636	mmCGTS_CU1_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 637	mmCGTS_CU1_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 638	mmCGTS_CU1_TA_CTRL_REG, 0xffffffff, 0x00040007,
 639	mmCGTS_CU1_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 640	mmCGTS_CU1_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 641	mmCGTS_CU2_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 642	mmCGTS_CU2_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 643	mmCGTS_CU2_TA_CTRL_REG, 0xffffffff, 0x00040007,
 644	mmCGTS_CU2_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 645	mmCGTS_CU2_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 646	mmCGTS_CU3_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 647	mmCGTS_CU3_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 648	mmCGTS_CU3_TA_CTRL_REG, 0xffffffff, 0x00040007,
 649	mmCGTS_CU3_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 650	mmCGTS_CU3_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 651	mmCGTS_CU4_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 652	mmCGTS_CU4_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 653	mmCGTS_CU4_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
 654	mmCGTS_CU4_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 655	mmCGTS_CU4_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 656	mmCGTS_CU5_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 657	mmCGTS_CU5_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 658	mmCGTS_CU5_TA_CTRL_REG, 0xffffffff, 0x00040007,
 659	mmCGTS_CU5_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 660	mmCGTS_CU5_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 661	mmCGTS_CU6_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 662	mmCGTS_CU6_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 663	mmCGTS_CU6_TA_CTRL_REG, 0xffffffff, 0x00040007,
 664	mmCGTS_CU6_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 665	mmCGTS_CU6_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 666	mmCGTS_CU7_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 667	mmCGTS_CU7_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 668	mmCGTS_CU7_TA_CTRL_REG, 0xffffffff, 0x00040007,
 669	mmCGTS_CU7_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 670	mmCGTS_CU7_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 671	mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
 672	mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
 673	mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003f,
 674	mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
 675};
 676
 677static const u32 stoney_golden_settings_a11[] =
 678{
 679	mmDB_DEBUG2, 0xf00fffff, 0x00000400,
 680	mmGB_GPU_ID, 0x0000000f, 0x00000000,
 681	mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
 682	mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
 683	mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
 684	mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
 685	mmTCC_CTRL, 0x00100000, 0xf31fff7f,
 686	mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
 687	mmTCP_ADDR_CONFIG, 0x0000000f, 0x000000f1,
 688	mmTCP_CHAN_STEER_LO, 0xffffffff, 0x10101010,
 689};
 690
 691static const u32 stoney_golden_common_all[] =
 692{
 693	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 694	mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x00000000,
 695	mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x00000000,
 696	mmGB_ADDR_CONFIG, 0xffffffff, 0x12010001,
 697	mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
 698	mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
 699	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
 700	mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF,
 701};
 702
 703static const u32 stoney_mgcg_cgcg_init[] =
 704{
 705	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 706	mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003f,
 707	mmCP_MEM_SLP_CNTL, 0xffffffff, 0x00020201,
 708	mmRLC_MEM_SLP_CNTL, 0xffffffff, 0x00020201,
 709	mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96940200,
 710};
 711
 712
 713static const char * const sq_edc_source_names[] = {
 714	"SQ_EDC_INFO_SOURCE_INVALID: No EDC error has occurred",
 715	"SQ_EDC_INFO_SOURCE_INST: EDC source is Instruction Fetch",
 716	"SQ_EDC_INFO_SOURCE_SGPR: EDC source is SGPR or SQC data return",
 717	"SQ_EDC_INFO_SOURCE_VGPR: EDC source is VGPR",
 718	"SQ_EDC_INFO_SOURCE_LDS: EDC source is LDS",
 719	"SQ_EDC_INFO_SOURCE_GDS: EDC source is GDS",
 720	"SQ_EDC_INFO_SOURCE_TA: EDC source is TA",
 721};
 722
 723static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev);
 724static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev);
 725static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev);
 726static void gfx_v8_0_set_rlc_funcs(struct amdgpu_device *adev);
 727static u32 gfx_v8_0_get_csb_size(struct amdgpu_device *adev);
 728static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev);
 729static void gfx_v8_0_ring_emit_ce_meta(struct amdgpu_ring *ring);
 730static void gfx_v8_0_ring_emit_de_meta(struct amdgpu_ring *ring);
 731
 732static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev)
 733{
 734	switch (adev->asic_type) {
 735	case CHIP_TOPAZ:
 736		amdgpu_device_program_register_sequence(adev,
 737							iceland_mgcg_cgcg_init,
 738							ARRAY_SIZE(iceland_mgcg_cgcg_init));
 739		amdgpu_device_program_register_sequence(adev,
 740							golden_settings_iceland_a11,
 741							ARRAY_SIZE(golden_settings_iceland_a11));
 742		amdgpu_device_program_register_sequence(adev,
 743							iceland_golden_common_all,
 744							ARRAY_SIZE(iceland_golden_common_all));
 745		break;
 746	case CHIP_FIJI:
 747		amdgpu_device_program_register_sequence(adev,
 748							fiji_mgcg_cgcg_init,
 749							ARRAY_SIZE(fiji_mgcg_cgcg_init));
 750		amdgpu_device_program_register_sequence(adev,
 751							golden_settings_fiji_a10,
 752							ARRAY_SIZE(golden_settings_fiji_a10));
 753		amdgpu_device_program_register_sequence(adev,
 754							fiji_golden_common_all,
 755							ARRAY_SIZE(fiji_golden_common_all));
 756		break;
 757
 758	case CHIP_TONGA:
 759		amdgpu_device_program_register_sequence(adev,
 760							tonga_mgcg_cgcg_init,
 761							ARRAY_SIZE(tonga_mgcg_cgcg_init));
 762		amdgpu_device_program_register_sequence(adev,
 763							golden_settings_tonga_a11,
 764							ARRAY_SIZE(golden_settings_tonga_a11));
 765		amdgpu_device_program_register_sequence(adev,
 766							tonga_golden_common_all,
 767							ARRAY_SIZE(tonga_golden_common_all));
 768		break;
 769	case CHIP_VEGAM:
 770		amdgpu_device_program_register_sequence(adev,
 771							golden_settings_vegam_a11,
 772							ARRAY_SIZE(golden_settings_vegam_a11));
 773		amdgpu_device_program_register_sequence(adev,
 774							vegam_golden_common_all,
 775							ARRAY_SIZE(vegam_golden_common_all));
 776		break;
 777	case CHIP_POLARIS11:
 778	case CHIP_POLARIS12:
 779		amdgpu_device_program_register_sequence(adev,
 780							golden_settings_polaris11_a11,
 781							ARRAY_SIZE(golden_settings_polaris11_a11));
 782		amdgpu_device_program_register_sequence(adev,
 783							polaris11_golden_common_all,
 784							ARRAY_SIZE(polaris11_golden_common_all));
 785		break;
 786	case CHIP_POLARIS10:
 787		amdgpu_device_program_register_sequence(adev,
 788							golden_settings_polaris10_a11,
 789							ARRAY_SIZE(golden_settings_polaris10_a11));
 790		amdgpu_device_program_register_sequence(adev,
 791							polaris10_golden_common_all,
 792							ARRAY_SIZE(polaris10_golden_common_all));
 793		WREG32_SMC(ixCG_ACLK_CNTL, 0x0000001C);
 794		if (adev->pdev->revision == 0xc7 &&
 795		    ((adev->pdev->subsystem_device == 0xb37 && adev->pdev->subsystem_vendor == 0x1002) ||
 796		     (adev->pdev->subsystem_device == 0x4a8 && adev->pdev->subsystem_vendor == 0x1043) ||
 797		     (adev->pdev->subsystem_device == 0x9480 && adev->pdev->subsystem_vendor == 0x1682))) {
 798			amdgpu_atombios_i2c_channel_trans(adev, 0x10, 0x96, 0x1E, 0xDD);
 799			amdgpu_atombios_i2c_channel_trans(adev, 0x10, 0x96, 0x1F, 0xD0);
 800		}
 801		break;
 802	case CHIP_CARRIZO:
 803		amdgpu_device_program_register_sequence(adev,
 804							cz_mgcg_cgcg_init,
 805							ARRAY_SIZE(cz_mgcg_cgcg_init));
 806		amdgpu_device_program_register_sequence(adev,
 807							cz_golden_settings_a11,
 808							ARRAY_SIZE(cz_golden_settings_a11));
 809		amdgpu_device_program_register_sequence(adev,
 810							cz_golden_common_all,
 811							ARRAY_SIZE(cz_golden_common_all));
 812		break;
 813	case CHIP_STONEY:
 814		amdgpu_device_program_register_sequence(adev,
 815							stoney_mgcg_cgcg_init,
 816							ARRAY_SIZE(stoney_mgcg_cgcg_init));
 817		amdgpu_device_program_register_sequence(adev,
 818							stoney_golden_settings_a11,
 819							ARRAY_SIZE(stoney_golden_settings_a11));
 820		amdgpu_device_program_register_sequence(adev,
 821							stoney_golden_common_all,
 822							ARRAY_SIZE(stoney_golden_common_all));
 823		break;
 824	default:
 825		break;
 826	}
 827}
 828
 829static void gfx_v8_0_scratch_init(struct amdgpu_device *adev)
 830{
 831	adev->gfx.scratch.num_reg = 8;
 
 
 832	adev->gfx.scratch.reg_base = mmSCRATCH_REG0;
 833	adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1;
 
 
 
 834}
 835
 836static int gfx_v8_0_ring_test_ring(struct amdgpu_ring *ring)
 837{
 838	struct amdgpu_device *adev = ring->adev;
 839	uint32_t scratch;
 840	uint32_t tmp = 0;
 841	unsigned i;
 842	int r;
 843
 844	r = amdgpu_gfx_scratch_get(adev, &scratch);
 845	if (r)
 
 846		return r;
 847
 848	WREG32(scratch, 0xCAFEDEAD);
 849	r = amdgpu_ring_alloc(ring, 3);
 850	if (r)
 851		goto error_free_scratch;
 852
 
 
 
 853	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
 854	amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
 855	amdgpu_ring_write(ring, 0xDEADBEEF);
 856	amdgpu_ring_commit(ring);
 857
 858	for (i = 0; i < adev->usec_timeout; i++) {
 859		tmp = RREG32(scratch);
 860		if (tmp == 0xDEADBEEF)
 861			break;
 862		udelay(1);
 
 
 
 
 
 
 
 
 863	}
 864
 865	if (i >= adev->usec_timeout)
 866		r = -ETIMEDOUT;
 867
 868error_free_scratch:
 869	amdgpu_gfx_scratch_free(adev, scratch);
 870	return r;
 871}
 872
 873static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 874{
 875	struct amdgpu_device *adev = ring->adev;
 876	struct amdgpu_ib ib;
 877	struct dma_fence *f = NULL;
 
 
 
 
 878
 879	unsigned int index;
 880	uint64_t gpu_addr;
 881	uint32_t tmp;
 882	long r;
 883
 884	r = amdgpu_device_wb_get(adev, &index);
 885	if (r)
 886		return r;
 887
 888	gpu_addr = adev->wb.gpu_addr + (index * 4);
 889	adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
 890	memset(&ib, 0, sizeof(ib));
 891	r = amdgpu_ib_get(adev, NULL, 16,
 892					AMDGPU_IB_POOL_DIRECT, &ib);
 893	if (r)
 894		goto err1;
 895
 896	ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
 897	ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
 898	ib.ptr[2] = lower_32_bits(gpu_addr);
 899	ib.ptr[3] = upper_32_bits(gpu_addr);
 900	ib.ptr[4] = 0xDEADBEEF;
 901	ib.length_dw = 5;
 902
 903	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
 904	if (r)
 905		goto err2;
 906
 907	r = dma_fence_wait_timeout(f, false, timeout);
 908	if (r == 0) {
 909		r = -ETIMEDOUT;
 910		goto err2;
 911	} else if (r < 0) {
 
 
 
 
 
 
 
 
 
 912		goto err2;
 
 
 
 
 913	}
 914
 915	tmp = adev->wb.wb[index];
 916	if (tmp == 0xDEADBEEF)
 917		r = 0;
 918	else
 919		r = -EINVAL;
 920
 921err2:
 
 922	amdgpu_ib_free(adev, &ib, NULL);
 923	dma_fence_put(f);
 924err1:
 925	amdgpu_device_wb_free(adev, index);
 926	return r;
 927}
 928
 929
 930static void gfx_v8_0_free_microcode(struct amdgpu_device *adev)
 931{
 932	release_firmware(adev->gfx.pfp_fw);
 933	adev->gfx.pfp_fw = NULL;
 934	release_firmware(adev->gfx.me_fw);
 935	adev->gfx.me_fw = NULL;
 936	release_firmware(adev->gfx.ce_fw);
 937	adev->gfx.ce_fw = NULL;
 938	release_firmware(adev->gfx.rlc_fw);
 939	adev->gfx.rlc_fw = NULL;
 940	release_firmware(adev->gfx.mec_fw);
 941	adev->gfx.mec_fw = NULL;
 942	if ((adev->asic_type != CHIP_STONEY) &&
 943	    (adev->asic_type != CHIP_TOPAZ))
 944		release_firmware(adev->gfx.mec2_fw);
 945	adev->gfx.mec2_fw = NULL;
 946
 947	kfree(adev->gfx.rlc.register_list_format);
 948}
 949
 950static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
 951{
 952	const char *chip_name;
 953	char fw_name[30];
 954	int err;
 955	struct amdgpu_firmware_info *info = NULL;
 956	const struct common_firmware_header *header = NULL;
 957	const struct gfx_firmware_header_v1_0 *cp_hdr;
 958	const struct rlc_firmware_header_v2_0 *rlc_hdr;
 959	unsigned int *tmp = NULL, i;
 960
 961	DRM_DEBUG("\n");
 962
 963	switch (adev->asic_type) {
 964	case CHIP_TOPAZ:
 965		chip_name = "topaz";
 966		break;
 967	case CHIP_TONGA:
 968		chip_name = "tonga";
 969		break;
 970	case CHIP_CARRIZO:
 971		chip_name = "carrizo";
 972		break;
 973	case CHIP_FIJI:
 974		chip_name = "fiji";
 975		break;
 976	case CHIP_STONEY:
 977		chip_name = "stoney";
 978		break;
 979	case CHIP_POLARIS10:
 980		chip_name = "polaris10";
 981		break;
 982	case CHIP_POLARIS11:
 983		chip_name = "polaris11";
 984		break;
 985	case CHIP_POLARIS12:
 986		chip_name = "polaris12";
 987		break;
 988	case CHIP_VEGAM:
 989		chip_name = "vegam";
 990		break;
 991	default:
 992		BUG();
 993	}
 994
 995	if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
 996		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp_2.bin", chip_name);
 997		err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
 998		if (err == -ENOENT) {
 999			snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
1000			err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
1001		}
1002	} else {
1003		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
1004		err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
1005	}
1006	if (err)
1007		goto out;
1008	err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
1009	if (err)
1010		goto out;
1011	cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
1012	adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1013	adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1014
1015	if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
1016		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me_2.bin", chip_name);
1017		err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
1018		if (err == -ENOENT) {
1019			snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
1020			err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
1021		}
1022	} else {
1023		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
1024		err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
1025	}
1026	if (err)
1027		goto out;
1028	err = amdgpu_ucode_validate(adev->gfx.me_fw);
1029	if (err)
1030		goto out;
1031	cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
1032	adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1033
1034	adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1035
1036	if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
1037		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce_2.bin", chip_name);
1038		err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
1039		if (err == -ENOENT) {
1040			snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
1041			err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
1042		}
1043	} else {
1044		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
1045		err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
1046	}
1047	if (err)
1048		goto out;
1049	err = amdgpu_ucode_validate(adev->gfx.ce_fw);
1050	if (err)
1051		goto out;
1052	cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
1053	adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1054	adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1055
1056	/*
1057	 * Support for MCBP/Virtualization in combination with chained IBs is
1058	 * formal released on feature version #46
1059	 */
1060	if (adev->gfx.ce_feature_version >= 46 &&
1061	    adev->gfx.pfp_feature_version >= 46) {
1062		adev->virt.chained_ib_support = true;
1063		DRM_INFO("Chained IB support enabled!\n");
1064	} else
1065		adev->virt.chained_ib_support = false;
1066
1067	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
1068	err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
1069	if (err)
1070		goto out;
1071	err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
1072	rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1073	adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
1074	adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
1075
1076	adev->gfx.rlc.save_and_restore_offset =
1077			le32_to_cpu(rlc_hdr->save_and_restore_offset);
1078	adev->gfx.rlc.clear_state_descriptor_offset =
1079			le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
1080	adev->gfx.rlc.avail_scratch_ram_locations =
1081			le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
1082	adev->gfx.rlc.reg_restore_list_size =
1083			le32_to_cpu(rlc_hdr->reg_restore_list_size);
1084	adev->gfx.rlc.reg_list_format_start =
1085			le32_to_cpu(rlc_hdr->reg_list_format_start);
1086	adev->gfx.rlc.reg_list_format_separate_start =
1087			le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
1088	adev->gfx.rlc.starting_offsets_start =
1089			le32_to_cpu(rlc_hdr->starting_offsets_start);
1090	adev->gfx.rlc.reg_list_format_size_bytes =
1091			le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
1092	adev->gfx.rlc.reg_list_size_bytes =
1093			le32_to_cpu(rlc_hdr->reg_list_size_bytes);
1094
1095	adev->gfx.rlc.register_list_format =
1096			kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
1097					adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
1098
1099	if (!adev->gfx.rlc.register_list_format) {
1100		err = -ENOMEM;
1101		goto out;
1102	}
1103
1104	tmp = (unsigned int *)((uintptr_t)rlc_hdr +
1105			le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
1106	for (i = 0 ; i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2); i++)
1107		adev->gfx.rlc.register_list_format[i] =	le32_to_cpu(tmp[i]);
1108
1109	adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
1110
1111	tmp = (unsigned int *)((uintptr_t)rlc_hdr +
1112			le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
1113	for (i = 0 ; i < (adev->gfx.rlc.reg_list_size_bytes >> 2); i++)
1114		adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
1115
1116	if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
1117		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec_2.bin", chip_name);
1118		err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
1119		if (err == -ENOENT) {
1120			snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
1121			err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
1122		}
1123	} else {
1124		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
1125		err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
1126	}
1127	if (err)
1128		goto out;
1129	err = amdgpu_ucode_validate(adev->gfx.mec_fw);
1130	if (err)
1131		goto out;
1132	cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1133	adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1134	adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1135
1136	if ((adev->asic_type != CHIP_STONEY) &&
1137	    (adev->asic_type != CHIP_TOPAZ)) {
1138		if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
1139			snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2_2.bin", chip_name);
1140			err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
1141			if (err == -ENOENT) {
1142				snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
1143				err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
1144			}
1145		} else {
1146			snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
1147			err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
1148		}
1149		if (!err) {
1150			err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
1151			if (err)
1152				goto out;
1153			cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1154				adev->gfx.mec2_fw->data;
1155			adev->gfx.mec2_fw_version =
1156				le32_to_cpu(cp_hdr->header.ucode_version);
1157			adev->gfx.mec2_feature_version =
1158				le32_to_cpu(cp_hdr->ucode_feature_version);
1159		} else {
1160			err = 0;
1161			adev->gfx.mec2_fw = NULL;
1162		}
1163	}
1164
1165	info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
1166	info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
1167	info->fw = adev->gfx.pfp_fw;
1168	header = (const struct common_firmware_header *)info->fw->data;
1169	adev->firmware.fw_size +=
1170		ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1171
1172	info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
1173	info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
1174	info->fw = adev->gfx.me_fw;
1175	header = (const struct common_firmware_header *)info->fw->data;
1176	adev->firmware.fw_size +=
1177		ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1178
1179	info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
1180	info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
1181	info->fw = adev->gfx.ce_fw;
1182	header = (const struct common_firmware_header *)info->fw->data;
1183	adev->firmware.fw_size +=
1184		ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1185
1186	info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
1187	info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
1188	info->fw = adev->gfx.rlc_fw;
1189	header = (const struct common_firmware_header *)info->fw->data;
1190	adev->firmware.fw_size +=
1191		ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1192
1193	info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
1194	info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
1195	info->fw = adev->gfx.mec_fw;
1196	header = (const struct common_firmware_header *)info->fw->data;
1197	adev->firmware.fw_size +=
1198		ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1199
1200	/* we need account JT in */
1201	cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1202	adev->firmware.fw_size +=
1203		ALIGN(le32_to_cpu(cp_hdr->jt_size) << 2, PAGE_SIZE);
 
 
1204
1205	if (amdgpu_sriov_vf(adev)) {
1206		info = &adev->firmware.ucode[AMDGPU_UCODE_ID_STORAGE];
1207		info->ucode_id = AMDGPU_UCODE_ID_STORAGE;
1208		info->fw = adev->gfx.mec_fw;
1209		adev->firmware.fw_size +=
1210			ALIGN(le32_to_cpu(64 * PAGE_SIZE), PAGE_SIZE);
1211	}
1212
1213	if (adev->gfx.mec2_fw) {
1214		info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
1215		info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
1216		info->fw = adev->gfx.mec2_fw;
1217		header = (const struct common_firmware_header *)info->fw->data;
1218		adev->firmware.fw_size +=
1219			ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
 
 
 
 
 
 
 
 
 
 
1220	}
1221
1222out:
1223	if (err) {
1224		dev_err(adev->dev,
1225			"gfx8: Failed to load firmware \"%s\"\n",
1226			fw_name);
1227		release_firmware(adev->gfx.pfp_fw);
1228		adev->gfx.pfp_fw = NULL;
1229		release_firmware(adev->gfx.me_fw);
1230		adev->gfx.me_fw = NULL;
1231		release_firmware(adev->gfx.ce_fw);
1232		adev->gfx.ce_fw = NULL;
1233		release_firmware(adev->gfx.rlc_fw);
1234		adev->gfx.rlc_fw = NULL;
1235		release_firmware(adev->gfx.mec_fw);
1236		adev->gfx.mec_fw = NULL;
1237		release_firmware(adev->gfx.mec2_fw);
1238		adev->gfx.mec2_fw = NULL;
1239	}
1240	return err;
1241}
1242
1243static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev,
1244				    volatile u32 *buffer)
1245{
1246	u32 count = 0, i;
1247	const struct cs_section_def *sect = NULL;
1248	const struct cs_extent_def *ext = NULL;
1249
1250	if (adev->gfx.rlc.cs_data == NULL)
1251		return;
1252	if (buffer == NULL)
1253		return;
1254
1255	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1256	buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
1257
1258	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
1259	buffer[count++] = cpu_to_le32(0x80000000);
1260	buffer[count++] = cpu_to_le32(0x80000000);
1261
1262	for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
1263		for (ext = sect->section; ext->extent != NULL; ++ext) {
1264			if (sect->id == SECT_CONTEXT) {
1265				buffer[count++] =
1266					cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
1267				buffer[count++] = cpu_to_le32(ext->reg_index -
1268						PACKET3_SET_CONTEXT_REG_START);
1269				for (i = 0; i < ext->reg_count; i++)
1270					buffer[count++] = cpu_to_le32(ext->extent[i]);
1271			} else {
1272				return;
1273			}
1274		}
1275	}
1276
1277	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2));
1278	buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG -
1279			PACKET3_SET_CONTEXT_REG_START);
1280	buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config);
1281	buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config_1);
1282
1283	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1284	buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
1285
1286	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
1287	buffer[count++] = cpu_to_le32(0);
1288}
1289
1290static int gfx_v8_0_cp_jump_table_num(struct amdgpu_device *adev)
1291{
1292	if (adev->asic_type == CHIP_CARRIZO)
1293		return 5;
1294	else
1295		return 4;
1296}
1297
1298static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
1299{
1300	const struct cs_section_def *cs_data;
1301	int r;
 
1302
1303	adev->gfx.rlc.cs_data = vi_cs_data;
1304
1305	cs_data = adev->gfx.rlc.cs_data;
1306
1307	if (cs_data) {
1308		/* init clear state block */
1309		r = amdgpu_gfx_rlc_init_csb(adev);
1310		if (r)
 
 
 
 
 
 
 
 
1311			return r;
 
1312	}
1313
1314	if ((adev->asic_type == CHIP_CARRIZO) ||
1315	    (adev->asic_type == CHIP_STONEY)) {
1316		adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
1317		r = amdgpu_gfx_rlc_init_cpt(adev);
1318		if (r)
1319			return r;
 
 
 
 
 
1320	}
1321
1322	/* init spm vmid with 0xf */
1323	if (adev->gfx.rlc.funcs->update_spm_vmid)
1324		adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
1325
1326	return 0;
1327}
1328
1329static void gfx_v8_0_mec_fini(struct amdgpu_device *adev)
1330{
1331	amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
1332}
1333
1334static int gfx_v8_0_mec_init(struct amdgpu_device *adev)
1335{
1336	int r;
1337	u32 *hpd;
1338	size_t mec_hpd_size;
1339
1340	bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
1341
1342	/* take ownership of the relevant compute queues */
1343	amdgpu_gfx_compute_queue_acquire(adev);
1344
1345	mec_hpd_size = adev->gfx.num_compute_rings * GFX8_MEC_HPD_SIZE;
1346
1347	r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
1348				      AMDGPU_GEM_DOMAIN_VRAM,
1349				      &adev->gfx.mec.hpd_eop_obj,
1350				      &adev->gfx.mec.hpd_eop_gpu_addr,
1351				      (void **)&hpd);
1352	if (r) {
1353		dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
 
1354		return r;
1355	}
1356
1357	memset(hpd, 0, mec_hpd_size);
1358
1359	amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
1360	amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
1361
1362	return 0;
1363}
1364
1365static const u32 vgpr_init_compute_shader[] =
1366{
1367	0x7e000209, 0x7e020208,
1368	0x7e040207, 0x7e060206,
1369	0x7e080205, 0x7e0a0204,
1370	0x7e0c0203, 0x7e0e0202,
1371	0x7e100201, 0x7e120200,
1372	0x7e140209, 0x7e160208,
1373	0x7e180207, 0x7e1a0206,
1374	0x7e1c0205, 0x7e1e0204,
1375	0x7e200203, 0x7e220202,
1376	0x7e240201, 0x7e260200,
1377	0x7e280209, 0x7e2a0208,
1378	0x7e2c0207, 0x7e2e0206,
1379	0x7e300205, 0x7e320204,
1380	0x7e340203, 0x7e360202,
1381	0x7e380201, 0x7e3a0200,
1382	0x7e3c0209, 0x7e3e0208,
1383	0x7e400207, 0x7e420206,
1384	0x7e440205, 0x7e460204,
1385	0x7e480203, 0x7e4a0202,
1386	0x7e4c0201, 0x7e4e0200,
1387	0x7e500209, 0x7e520208,
1388	0x7e540207, 0x7e560206,
1389	0x7e580205, 0x7e5a0204,
1390	0x7e5c0203, 0x7e5e0202,
1391	0x7e600201, 0x7e620200,
1392	0x7e640209, 0x7e660208,
1393	0x7e680207, 0x7e6a0206,
1394	0x7e6c0205, 0x7e6e0204,
1395	0x7e700203, 0x7e720202,
1396	0x7e740201, 0x7e760200,
1397	0x7e780209, 0x7e7a0208,
1398	0x7e7c0207, 0x7e7e0206,
1399	0xbf8a0000, 0xbf810000,
1400};
1401
1402static const u32 sgpr_init_compute_shader[] =
1403{
1404	0xbe8a0100, 0xbe8c0102,
1405	0xbe8e0104, 0xbe900106,
1406	0xbe920108, 0xbe940100,
1407	0xbe960102, 0xbe980104,
1408	0xbe9a0106, 0xbe9c0108,
1409	0xbe9e0100, 0xbea00102,
1410	0xbea20104, 0xbea40106,
1411	0xbea60108, 0xbea80100,
1412	0xbeaa0102, 0xbeac0104,
1413	0xbeae0106, 0xbeb00108,
1414	0xbeb20100, 0xbeb40102,
1415	0xbeb60104, 0xbeb80106,
1416	0xbeba0108, 0xbebc0100,
1417	0xbebe0102, 0xbec00104,
1418	0xbec20106, 0xbec40108,
1419	0xbec60100, 0xbec80102,
1420	0xbee60004, 0xbee70005,
1421	0xbeea0006, 0xbeeb0007,
1422	0xbee80008, 0xbee90009,
1423	0xbefc0000, 0xbf8a0000,
1424	0xbf810000, 0x00000000,
1425};
1426
1427static const u32 vgpr_init_regs[] =
1428{
1429	mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0xffffffff,
1430	mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, /* CU_GROUP_COUNT=1 */
1431	mmCOMPUTE_NUM_THREAD_X, 256*4,
1432	mmCOMPUTE_NUM_THREAD_Y, 1,
1433	mmCOMPUTE_NUM_THREAD_Z, 1,
1434	mmCOMPUTE_PGM_RSRC1, 0x100004f, /* VGPRS=15 (64 logical VGPRs), SGPRS=1 (16 SGPRs), BULKY=1 */
1435	mmCOMPUTE_PGM_RSRC2, 20,
1436	mmCOMPUTE_USER_DATA_0, 0xedcedc00,
1437	mmCOMPUTE_USER_DATA_1, 0xedcedc01,
1438	mmCOMPUTE_USER_DATA_2, 0xedcedc02,
1439	mmCOMPUTE_USER_DATA_3, 0xedcedc03,
1440	mmCOMPUTE_USER_DATA_4, 0xedcedc04,
1441	mmCOMPUTE_USER_DATA_5, 0xedcedc05,
1442	mmCOMPUTE_USER_DATA_6, 0xedcedc06,
1443	mmCOMPUTE_USER_DATA_7, 0xedcedc07,
1444	mmCOMPUTE_USER_DATA_8, 0xedcedc08,
1445	mmCOMPUTE_USER_DATA_9, 0xedcedc09,
1446};
1447
1448static const u32 sgpr1_init_regs[] =
1449{
1450	mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0x0f,
1451	mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, /* CU_GROUP_COUNT=1 */
1452	mmCOMPUTE_NUM_THREAD_X, 256*5,
1453	mmCOMPUTE_NUM_THREAD_Y, 1,
1454	mmCOMPUTE_NUM_THREAD_Z, 1,
1455	mmCOMPUTE_PGM_RSRC1, 0x240, /* SGPRS=9 (80 GPRS) */
1456	mmCOMPUTE_PGM_RSRC2, 20,
1457	mmCOMPUTE_USER_DATA_0, 0xedcedc00,
1458	mmCOMPUTE_USER_DATA_1, 0xedcedc01,
1459	mmCOMPUTE_USER_DATA_2, 0xedcedc02,
1460	mmCOMPUTE_USER_DATA_3, 0xedcedc03,
1461	mmCOMPUTE_USER_DATA_4, 0xedcedc04,
1462	mmCOMPUTE_USER_DATA_5, 0xedcedc05,
1463	mmCOMPUTE_USER_DATA_6, 0xedcedc06,
1464	mmCOMPUTE_USER_DATA_7, 0xedcedc07,
1465	mmCOMPUTE_USER_DATA_8, 0xedcedc08,
1466	mmCOMPUTE_USER_DATA_9, 0xedcedc09,
1467};
1468
1469static const u32 sgpr2_init_regs[] =
1470{
1471	mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0xf0,
1472	mmCOMPUTE_RESOURCE_LIMITS, 0x1000000,
1473	mmCOMPUTE_NUM_THREAD_X, 256*5,
1474	mmCOMPUTE_NUM_THREAD_Y, 1,
1475	mmCOMPUTE_NUM_THREAD_Z, 1,
1476	mmCOMPUTE_PGM_RSRC1, 0x240, /* SGPRS=9 (80 GPRS) */
1477	mmCOMPUTE_PGM_RSRC2, 20,
1478	mmCOMPUTE_USER_DATA_0, 0xedcedc00,
1479	mmCOMPUTE_USER_DATA_1, 0xedcedc01,
1480	mmCOMPUTE_USER_DATA_2, 0xedcedc02,
1481	mmCOMPUTE_USER_DATA_3, 0xedcedc03,
1482	mmCOMPUTE_USER_DATA_4, 0xedcedc04,
1483	mmCOMPUTE_USER_DATA_5, 0xedcedc05,
1484	mmCOMPUTE_USER_DATA_6, 0xedcedc06,
1485	mmCOMPUTE_USER_DATA_7, 0xedcedc07,
1486	mmCOMPUTE_USER_DATA_8, 0xedcedc08,
1487	mmCOMPUTE_USER_DATA_9, 0xedcedc09,
1488};
1489
1490static const u32 sec_ded_counter_registers[] =
1491{
1492	mmCPC_EDC_ATC_CNT,
1493	mmCPC_EDC_SCRATCH_CNT,
1494	mmCPC_EDC_UCODE_CNT,
1495	mmCPF_EDC_ATC_CNT,
1496	mmCPF_EDC_ROQ_CNT,
1497	mmCPF_EDC_TAG_CNT,
1498	mmCPG_EDC_ATC_CNT,
1499	mmCPG_EDC_DMA_CNT,
1500	mmCPG_EDC_TAG_CNT,
1501	mmDC_EDC_CSINVOC_CNT,
1502	mmDC_EDC_RESTORE_CNT,
1503	mmDC_EDC_STATE_CNT,
1504	mmGDS_EDC_CNT,
1505	mmGDS_EDC_GRBM_CNT,
1506	mmGDS_EDC_OA_DED,
1507	mmSPI_EDC_CNT,
1508	mmSQC_ATC_EDC_GATCL1_CNT,
1509	mmSQC_EDC_CNT,
1510	mmSQ_EDC_DED_CNT,
1511	mmSQ_EDC_INFO,
1512	mmSQ_EDC_SEC_CNT,
1513	mmTCC_EDC_CNT,
1514	mmTCP_ATC_EDC_GATCL1_CNT,
1515	mmTCP_EDC_CNT,
1516	mmTD_EDC_CNT
1517};
1518
1519static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
1520{
1521	struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
1522	struct amdgpu_ib ib;
1523	struct dma_fence *f = NULL;
1524	int r, i;
1525	u32 tmp;
1526	unsigned total_size, vgpr_offset, sgpr_offset;
1527	u64 gpu_addr;
1528
1529	/* only supported on CZ */
1530	if (adev->asic_type != CHIP_CARRIZO)
1531		return 0;
1532
1533	/* bail if the compute ring is not ready */
1534	if (!ring->sched.ready)
1535		return 0;
1536
1537	tmp = RREG32(mmGB_EDC_MODE);
1538	WREG32(mmGB_EDC_MODE, 0);
1539
1540	total_size =
1541		(((ARRAY_SIZE(vgpr_init_regs) / 2) * 3) + 4 + 5 + 2) * 4;
1542	total_size +=
1543		(((ARRAY_SIZE(sgpr1_init_regs) / 2) * 3) + 4 + 5 + 2) * 4;
1544	total_size +=
1545		(((ARRAY_SIZE(sgpr2_init_regs) / 2) * 3) + 4 + 5 + 2) * 4;
1546	total_size = ALIGN(total_size, 256);
1547	vgpr_offset = total_size;
1548	total_size += ALIGN(sizeof(vgpr_init_compute_shader), 256);
1549	sgpr_offset = total_size;
1550	total_size += sizeof(sgpr_init_compute_shader);
1551
1552	/* allocate an indirect buffer to put the commands in */
1553	memset(&ib, 0, sizeof(ib));
1554	r = amdgpu_ib_get(adev, NULL, total_size,
1555					AMDGPU_IB_POOL_DIRECT, &ib);
1556	if (r) {
1557		DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
1558		return r;
1559	}
1560
1561	/* load the compute shaders */
1562	for (i = 0; i < ARRAY_SIZE(vgpr_init_compute_shader); i++)
1563		ib.ptr[i + (vgpr_offset / 4)] = vgpr_init_compute_shader[i];
1564
1565	for (i = 0; i < ARRAY_SIZE(sgpr_init_compute_shader); i++)
1566		ib.ptr[i + (sgpr_offset / 4)] = sgpr_init_compute_shader[i];
1567
1568	/* init the ib length to 0 */
1569	ib.length_dw = 0;
1570
1571	/* VGPR */
1572	/* write the register state for the compute dispatch */
1573	for (i = 0; i < ARRAY_SIZE(vgpr_init_regs); i += 2) {
1574		ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
1575		ib.ptr[ib.length_dw++] = vgpr_init_regs[i] - PACKET3_SET_SH_REG_START;
1576		ib.ptr[ib.length_dw++] = vgpr_init_regs[i + 1];
1577	}
1578	/* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
1579	gpu_addr = (ib.gpu_addr + (u64)vgpr_offset) >> 8;
1580	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
1581	ib.ptr[ib.length_dw++] = mmCOMPUTE_PGM_LO - PACKET3_SET_SH_REG_START;
1582	ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
1583	ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
1584
1585	/* write dispatch packet */
1586	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
1587	ib.ptr[ib.length_dw++] = 8; /* x */
1588	ib.ptr[ib.length_dw++] = 1; /* y */
1589	ib.ptr[ib.length_dw++] = 1; /* z */
1590	ib.ptr[ib.length_dw++] =
1591		REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
1592
1593	/* write CS partial flush packet */
1594	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
1595	ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
1596
1597	/* SGPR1 */
1598	/* write the register state for the compute dispatch */
1599	for (i = 0; i < ARRAY_SIZE(sgpr1_init_regs); i += 2) {
1600		ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
1601		ib.ptr[ib.length_dw++] = sgpr1_init_regs[i] - PACKET3_SET_SH_REG_START;
1602		ib.ptr[ib.length_dw++] = sgpr1_init_regs[i + 1];
1603	}
1604	/* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
1605	gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
1606	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
1607	ib.ptr[ib.length_dw++] = mmCOMPUTE_PGM_LO - PACKET3_SET_SH_REG_START;
1608	ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
1609	ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
1610
1611	/* write dispatch packet */
1612	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
1613	ib.ptr[ib.length_dw++] = 8; /* x */
1614	ib.ptr[ib.length_dw++] = 1; /* y */
1615	ib.ptr[ib.length_dw++] = 1; /* z */
1616	ib.ptr[ib.length_dw++] =
1617		REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
1618
1619	/* write CS partial flush packet */
1620	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
1621	ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
1622
1623	/* SGPR2 */
1624	/* write the register state for the compute dispatch */
1625	for (i = 0; i < ARRAY_SIZE(sgpr2_init_regs); i += 2) {
1626		ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
1627		ib.ptr[ib.length_dw++] = sgpr2_init_regs[i] - PACKET3_SET_SH_REG_START;
1628		ib.ptr[ib.length_dw++] = sgpr2_init_regs[i + 1];
1629	}
1630	/* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
1631	gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
1632	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
1633	ib.ptr[ib.length_dw++] = mmCOMPUTE_PGM_LO - PACKET3_SET_SH_REG_START;
1634	ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
1635	ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
1636
1637	/* write dispatch packet */
1638	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
1639	ib.ptr[ib.length_dw++] = 8; /* x */
1640	ib.ptr[ib.length_dw++] = 1; /* y */
1641	ib.ptr[ib.length_dw++] = 1; /* z */
1642	ib.ptr[ib.length_dw++] =
1643		REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
1644
1645	/* write CS partial flush packet */
1646	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
1647	ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
1648
1649	/* shedule the ib on the ring */
1650	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
1651	if (r) {
1652		DRM_ERROR("amdgpu: ib submit failed (%d).\n", r);
1653		goto fail;
1654	}
1655
1656	/* wait for the GPU to finish processing the IB */
1657	r = dma_fence_wait(f, false);
1658	if (r) {
1659		DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
1660		goto fail;
1661	}
1662
1663	tmp = REG_SET_FIELD(tmp, GB_EDC_MODE, DED_MODE, 2);
1664	tmp = REG_SET_FIELD(tmp, GB_EDC_MODE, PROP_FED, 1);
1665	WREG32(mmGB_EDC_MODE, tmp);
1666
1667	tmp = RREG32(mmCC_GC_EDC_CONFIG);
1668	tmp = REG_SET_FIELD(tmp, CC_GC_EDC_CONFIG, DIS_EDC, 0) | 1;
1669	WREG32(mmCC_GC_EDC_CONFIG, tmp);
1670
1671
1672	/* read back registers to clear the counters */
1673	for (i = 0; i < ARRAY_SIZE(sec_ded_counter_registers); i++)
1674		RREG32(sec_ded_counter_registers[i]);
1675
1676fail:
 
1677	amdgpu_ib_free(adev, &ib, NULL);
1678	dma_fence_put(f);
1679
1680	return r;
1681}
1682
1683static int gfx_v8_0_gpu_early_init(struct amdgpu_device *adev)
1684{
1685	u32 gb_addr_config;
1686	u32 mc_arb_ramcfg;
1687	u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map;
1688	u32 tmp;
1689	int ret;
1690
1691	switch (adev->asic_type) {
1692	case CHIP_TOPAZ:
1693		adev->gfx.config.max_shader_engines = 1;
1694		adev->gfx.config.max_tile_pipes = 2;
1695		adev->gfx.config.max_cu_per_sh = 6;
1696		adev->gfx.config.max_sh_per_se = 1;
1697		adev->gfx.config.max_backends_per_se = 2;
1698		adev->gfx.config.max_texture_channel_caches = 2;
1699		adev->gfx.config.max_gprs = 256;
1700		adev->gfx.config.max_gs_threads = 32;
1701		adev->gfx.config.max_hw_contexts = 8;
1702
1703		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1704		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1705		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1706		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1707		gb_addr_config = TOPAZ_GB_ADDR_CONFIG_GOLDEN;
1708		break;
1709	case CHIP_FIJI:
1710		adev->gfx.config.max_shader_engines = 4;
1711		adev->gfx.config.max_tile_pipes = 16;
1712		adev->gfx.config.max_cu_per_sh = 16;
1713		adev->gfx.config.max_sh_per_se = 1;
1714		adev->gfx.config.max_backends_per_se = 4;
1715		adev->gfx.config.max_texture_channel_caches = 16;
1716		adev->gfx.config.max_gprs = 256;
1717		adev->gfx.config.max_gs_threads = 32;
1718		adev->gfx.config.max_hw_contexts = 8;
1719
1720		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1721		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1722		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1723		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1724		gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
1725		break;
1726	case CHIP_POLARIS11:
1727	case CHIP_POLARIS12:
1728		ret = amdgpu_atombios_get_gfx_info(adev);
1729		if (ret)
1730			return ret;
1731		adev->gfx.config.max_gprs = 256;
1732		adev->gfx.config.max_gs_threads = 32;
1733		adev->gfx.config.max_hw_contexts = 8;
1734
1735		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1736		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1737		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1738		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1739		gb_addr_config = POLARIS11_GB_ADDR_CONFIG_GOLDEN;
1740		break;
1741	case CHIP_POLARIS10:
1742	case CHIP_VEGAM:
1743		ret = amdgpu_atombios_get_gfx_info(adev);
1744		if (ret)
1745			return ret;
1746		adev->gfx.config.max_gprs = 256;
1747		adev->gfx.config.max_gs_threads = 32;
1748		adev->gfx.config.max_hw_contexts = 8;
1749
1750		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1751		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1752		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1753		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1754		gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
1755		break;
1756	case CHIP_TONGA:
1757		adev->gfx.config.max_shader_engines = 4;
1758		adev->gfx.config.max_tile_pipes = 8;
1759		adev->gfx.config.max_cu_per_sh = 8;
1760		adev->gfx.config.max_sh_per_se = 1;
1761		adev->gfx.config.max_backends_per_se = 2;
1762		adev->gfx.config.max_texture_channel_caches = 8;
1763		adev->gfx.config.max_gprs = 256;
1764		adev->gfx.config.max_gs_threads = 32;
1765		adev->gfx.config.max_hw_contexts = 8;
1766
1767		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1768		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1769		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1770		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1771		gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
1772		break;
1773	case CHIP_CARRIZO:
1774		adev->gfx.config.max_shader_engines = 1;
1775		adev->gfx.config.max_tile_pipes = 2;
1776		adev->gfx.config.max_sh_per_se = 1;
1777		adev->gfx.config.max_backends_per_se = 2;
1778		adev->gfx.config.max_cu_per_sh = 8;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1779		adev->gfx.config.max_texture_channel_caches = 2;
1780		adev->gfx.config.max_gprs = 256;
1781		adev->gfx.config.max_gs_threads = 32;
1782		adev->gfx.config.max_hw_contexts = 8;
1783
1784		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1785		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1786		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1787		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1788		gb_addr_config = CARRIZO_GB_ADDR_CONFIG_GOLDEN;
1789		break;
1790	case CHIP_STONEY:
1791		adev->gfx.config.max_shader_engines = 1;
1792		adev->gfx.config.max_tile_pipes = 2;
1793		adev->gfx.config.max_sh_per_se = 1;
1794		adev->gfx.config.max_backends_per_se = 1;
1795		adev->gfx.config.max_cu_per_sh = 3;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1796		adev->gfx.config.max_texture_channel_caches = 2;
1797		adev->gfx.config.max_gprs = 256;
1798		adev->gfx.config.max_gs_threads = 16;
1799		adev->gfx.config.max_hw_contexts = 8;
1800
1801		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1802		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1803		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1804		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1805		gb_addr_config = CARRIZO_GB_ADDR_CONFIG_GOLDEN;
1806		break;
1807	default:
1808		adev->gfx.config.max_shader_engines = 2;
1809		adev->gfx.config.max_tile_pipes = 4;
1810		adev->gfx.config.max_cu_per_sh = 2;
1811		adev->gfx.config.max_sh_per_se = 1;
1812		adev->gfx.config.max_backends_per_se = 2;
1813		adev->gfx.config.max_texture_channel_caches = 4;
1814		adev->gfx.config.max_gprs = 256;
1815		adev->gfx.config.max_gs_threads = 32;
1816		adev->gfx.config.max_hw_contexts = 8;
1817
1818		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1819		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1820		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1821		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1822		gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
1823		break;
1824	}
1825
 
1826	adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG);
1827	mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg;
1828
1829	adev->gfx.config.num_banks = REG_GET_FIELD(mc_arb_ramcfg,
1830				MC_ARB_RAMCFG, NOOFBANK);
1831	adev->gfx.config.num_ranks = REG_GET_FIELD(mc_arb_ramcfg,
1832				MC_ARB_RAMCFG, NOOFRANKS);
1833
1834	adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
1835	adev->gfx.config.mem_max_burst_length_bytes = 256;
1836	if (adev->flags & AMD_IS_APU) {
1837		/* Get memory bank mapping mode. */
1838		tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING);
1839		dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
1840		dimm01_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
1841
1842		tmp = RREG32(mmMC_FUS_DRAM1_BANK_ADDR_MAPPING);
1843		dimm10_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
1844		dimm11_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
1845
1846		/* Validate settings in case only one DIMM installed. */
1847		if ((dimm00_addr_map == 0) || (dimm00_addr_map == 3) || (dimm00_addr_map == 4) || (dimm00_addr_map > 12))
1848			dimm00_addr_map = 0;
1849		if ((dimm01_addr_map == 0) || (dimm01_addr_map == 3) || (dimm01_addr_map == 4) || (dimm01_addr_map > 12))
1850			dimm01_addr_map = 0;
1851		if ((dimm10_addr_map == 0) || (dimm10_addr_map == 3) || (dimm10_addr_map == 4) || (dimm10_addr_map > 12))
1852			dimm10_addr_map = 0;
1853		if ((dimm11_addr_map == 0) || (dimm11_addr_map == 3) || (dimm11_addr_map == 4) || (dimm11_addr_map > 12))
1854			dimm11_addr_map = 0;
1855
1856		/* If DIMM Addr map is 8GB, ROW size should be 2KB. Otherwise 1KB. */
1857		/* If ROW size(DIMM1) != ROW size(DMIMM0), ROW size should be larger one. */
1858		if ((dimm00_addr_map == 11) || (dimm01_addr_map == 11) || (dimm10_addr_map == 11) || (dimm11_addr_map == 11))
1859			adev->gfx.config.mem_row_size_in_kb = 2;
1860		else
1861			adev->gfx.config.mem_row_size_in_kb = 1;
1862	} else {
1863		tmp = REG_GET_FIELD(mc_arb_ramcfg, MC_ARB_RAMCFG, NOOFCOLS);
1864		adev->gfx.config.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
1865		if (adev->gfx.config.mem_row_size_in_kb > 4)
1866			adev->gfx.config.mem_row_size_in_kb = 4;
1867	}
1868
1869	adev->gfx.config.shader_engine_tile_size = 32;
1870	adev->gfx.config.num_gpus = 1;
1871	adev->gfx.config.multi_gpu_tile_size = 64;
1872
1873	/* fix up row size */
1874	switch (adev->gfx.config.mem_row_size_in_kb) {
1875	case 1:
1876	default:
1877		gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 0);
1878		break;
1879	case 2:
1880		gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 1);
1881		break;
1882	case 4:
1883		gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 2);
1884		break;
1885	}
1886	adev->gfx.config.gb_addr_config = gb_addr_config;
1887
1888	return 0;
1889}
1890
1891static int gfx_v8_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
1892					int mec, int pipe, int queue)
1893{
1894	int r;
1895	unsigned irq_type;
1896	struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
1897	unsigned int hw_prio;
1898
1899	ring = &adev->gfx.compute_ring[ring_id];
1900
1901	/* mec0 is me1 */
1902	ring->me = mec + 1;
1903	ring->pipe = pipe;
1904	ring->queue = queue;
1905
1906	ring->ring_obj = NULL;
1907	ring->use_doorbell = true;
1908	ring->doorbell_index = adev->doorbell_index.mec_ring0 + ring_id;
1909	ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
1910				+ (ring_id * GFX8_MEC_HPD_SIZE);
1911	sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
1912
1913	irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
1914		+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
1915		+ ring->pipe;
1916
1917	hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue) ?
1918			AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_RING_PRIO_DEFAULT;
1919	/* type-2 packets are deprecated on MEC, use type-3 instead */
1920	r = amdgpu_ring_init(adev, ring, 1024,
1921			     &adev->gfx.eop_irq, irq_type, hw_prio);
1922	if (r)
1923		return r;
1924
1925
1926	return 0;
1927}
1928
1929static void gfx_v8_0_sq_irq_work_func(struct work_struct *work);
1930
1931static int gfx_v8_0_sw_init(void *handle)
1932{
1933	int i, j, k, r, ring_id;
1934	struct amdgpu_ring *ring;
1935	struct amdgpu_kiq *kiq;
1936	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1937
1938	switch (adev->asic_type) {
1939	case CHIP_TONGA:
1940	case CHIP_CARRIZO:
1941	case CHIP_FIJI:
1942	case CHIP_POLARIS10:
1943	case CHIP_POLARIS11:
1944	case CHIP_POLARIS12:
1945	case CHIP_VEGAM:
1946		adev->gfx.mec.num_mec = 2;
1947		break;
1948	case CHIP_TOPAZ:
1949	case CHIP_STONEY:
1950	default:
1951		adev->gfx.mec.num_mec = 1;
1952		break;
1953	}
1954
1955	adev->gfx.mec.num_pipe_per_mec = 4;
1956	adev->gfx.mec.num_queue_per_pipe = 8;
1957
1958	/* EOP Event */
1959	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_END_OF_PIPE, &adev->gfx.eop_irq);
1960	if (r)
1961		return r;
1962
1963	/* Privileged reg */
1964	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_PRIV_REG_FAULT,
1965			      &adev->gfx.priv_reg_irq);
1966	if (r)
1967		return r;
1968
1969	/* Privileged inst */
1970	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_PRIV_INSTR_FAULT,
1971			      &adev->gfx.priv_inst_irq);
1972	if (r)
1973		return r;
1974
1975	/* Add CP EDC/ECC irq  */
1976	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_ECC_ERROR,
1977			      &adev->gfx.cp_ecc_error_irq);
1978	if (r)
1979		return r;
1980
1981	/* SQ interrupts. */
1982	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SQ_INTERRUPT_MSG,
1983			      &adev->gfx.sq_irq);
1984	if (r) {
1985		DRM_ERROR("amdgpu_irq_add() for SQ failed: %d\n", r);
1986		return r;
1987	}
1988
1989	INIT_WORK(&adev->gfx.sq_work.work, gfx_v8_0_sq_irq_work_func);
1990
1991	adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
1992
1993	gfx_v8_0_scratch_init(adev);
1994
1995	r = gfx_v8_0_init_microcode(adev);
1996	if (r) {
1997		DRM_ERROR("Failed to load gfx firmware!\n");
1998		return r;
1999	}
2000
2001	r = adev->gfx.rlc.funcs->init(adev);
2002	if (r) {
2003		DRM_ERROR("Failed to init rlc BOs!\n");
2004		return r;
2005	}
2006
2007	r = gfx_v8_0_mec_init(adev);
2008	if (r) {
2009		DRM_ERROR("Failed to init MEC BOs!\n");
2010		return r;
2011	}
2012
2013	/* set up the gfx ring */
2014	for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
2015		ring = &adev->gfx.gfx_ring[i];
2016		ring->ring_obj = NULL;
2017		sprintf(ring->name, "gfx");
2018		/* no gfx doorbells on iceland */
2019		if (adev->asic_type != CHIP_TOPAZ) {
2020			ring->use_doorbell = true;
2021			ring->doorbell_index = adev->doorbell_index.gfx_ring0;
2022		}
2023
2024		r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
2025				     AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
2026				     AMDGPU_RING_PRIO_DEFAULT);
 
2027		if (r)
2028			return r;
2029	}
2030
 
 
 
2031
2032	/* set up the compute queues - allocate horizontally across pipes */
2033	ring_id = 0;
2034	for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
2035		for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
2036			for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
2037				if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k, j))
2038					continue;
2039
2040				r = gfx_v8_0_compute_ring_init(adev,
2041								ring_id,
2042								i, k, j);
2043				if (r)
2044					return r;
2045
2046				ring_id++;
2047			}
2048		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2049	}
2050
2051	r = amdgpu_gfx_kiq_init(adev, GFX8_MEC_HPD_SIZE);
2052	if (r) {
2053		DRM_ERROR("Failed to init KIQ BOs!\n");
 
 
 
2054		return r;
2055	}
2056
2057	kiq = &adev->gfx.kiq;
2058	r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq);
 
 
2059	if (r)
2060		return r;
2061
2062	/* create MQD for all compute queues as well as KIQ for SRIOV case */
2063	r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct vi_mqd_allocation));
 
 
2064	if (r)
2065		return r;
2066
2067	adev->gfx.ce_ram_size = 0x8000;
2068
2069	r = gfx_v8_0_gpu_early_init(adev);
2070	if (r)
2071		return r;
2072
2073	return 0;
2074}
2075
2076static int gfx_v8_0_sw_fini(void *handle)
2077{
 
2078	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2079	int i;
 
 
 
2080
2081	for (i = 0; i < adev->gfx.num_gfx_rings; i++)
2082		amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
2083	for (i = 0; i < adev->gfx.num_compute_rings; i++)
2084		amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
2085
2086	amdgpu_gfx_mqd_sw_fini(adev);
2087	amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring);
2088	amdgpu_gfx_kiq_fini(adev);
2089
2090	gfx_v8_0_mec_fini(adev);
2091	amdgpu_gfx_rlc_fini(adev);
2092	amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
2093				&adev->gfx.rlc.clear_state_gpu_addr,
2094				(void **)&adev->gfx.rlc.cs_ptr);
2095	if ((adev->asic_type == CHIP_CARRIZO) ||
2096	    (adev->asic_type == CHIP_STONEY)) {
2097		amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
2098				&adev->gfx.rlc.cp_table_gpu_addr,
2099				(void **)&adev->gfx.rlc.cp_table_ptr);
2100	}
2101	gfx_v8_0_free_microcode(adev);
2102
2103	return 0;
2104}
2105
2106static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev)
2107{
2108	uint32_t *modearray, *mod2array;
2109	const u32 num_tile_mode_states = ARRAY_SIZE(adev->gfx.config.tile_mode_array);
2110	const u32 num_secondary_tile_mode_states = ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
2111	u32 reg_offset;
2112
2113	modearray = adev->gfx.config.tile_mode_array;
2114	mod2array = adev->gfx.config.macrotile_mode_array;
2115
2116	for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
2117		modearray[reg_offset] = 0;
2118
2119	for (reg_offset = 0; reg_offset <  num_secondary_tile_mode_states; reg_offset++)
2120		mod2array[reg_offset] = 0;
2121
2122	switch (adev->asic_type) {
2123	case CHIP_TOPAZ:
2124		modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2125				PIPE_CONFIG(ADDR_SURF_P2) |
2126				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2127				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2128		modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2129				PIPE_CONFIG(ADDR_SURF_P2) |
2130				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2131				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2132		modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2133				PIPE_CONFIG(ADDR_SURF_P2) |
2134				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2135				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2136		modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2137				PIPE_CONFIG(ADDR_SURF_P2) |
2138				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2139				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2140		modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2141				PIPE_CONFIG(ADDR_SURF_P2) |
2142				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2143				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2144		modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2145				PIPE_CONFIG(ADDR_SURF_P2) |
2146				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2147				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2148		modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2149				PIPE_CONFIG(ADDR_SURF_P2) |
2150				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2151				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2152		modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2153				PIPE_CONFIG(ADDR_SURF_P2));
2154		modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2155				PIPE_CONFIG(ADDR_SURF_P2) |
2156				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2157				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2158		modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2159				 PIPE_CONFIG(ADDR_SURF_P2) |
2160				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2161				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2162		modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2163				 PIPE_CONFIG(ADDR_SURF_P2) |
2164				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2165				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2166		modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2167				 PIPE_CONFIG(ADDR_SURF_P2) |
2168				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2169				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2170		modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2171				 PIPE_CONFIG(ADDR_SURF_P2) |
2172				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2173				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2174		modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
2175				 PIPE_CONFIG(ADDR_SURF_P2) |
2176				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2177				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2178		modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2179				 PIPE_CONFIG(ADDR_SURF_P2) |
2180				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2181				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2182		modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2183				 PIPE_CONFIG(ADDR_SURF_P2) |
2184				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2185				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2186		modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2187				 PIPE_CONFIG(ADDR_SURF_P2) |
2188				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2189				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2190		modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2191				 PIPE_CONFIG(ADDR_SURF_P2) |
2192				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2193				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2194		modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
2195				 PIPE_CONFIG(ADDR_SURF_P2) |
2196				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2197				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2198		modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2199				 PIPE_CONFIG(ADDR_SURF_P2) |
2200				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2201				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2202		modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2203				 PIPE_CONFIG(ADDR_SURF_P2) |
2204				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2205				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2206		modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
2207				 PIPE_CONFIG(ADDR_SURF_P2) |
2208				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2209				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2210		modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
2211				 PIPE_CONFIG(ADDR_SURF_P2) |
2212				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2213				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2214		modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2215				 PIPE_CONFIG(ADDR_SURF_P2) |
2216				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2217				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2218		modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2219				 PIPE_CONFIG(ADDR_SURF_P2) |
2220				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2221				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2222		modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2223				 PIPE_CONFIG(ADDR_SURF_P2) |
2224				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2225				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2226
2227		mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
2228				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2229				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2230				NUM_BANKS(ADDR_SURF_8_BANK));
2231		mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
2232				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2233				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2234				NUM_BANKS(ADDR_SURF_8_BANK));
2235		mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2236				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2237				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2238				NUM_BANKS(ADDR_SURF_8_BANK));
2239		mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2240				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2241				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2242				NUM_BANKS(ADDR_SURF_8_BANK));
2243		mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2244				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2245				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2246				NUM_BANKS(ADDR_SURF_8_BANK));
2247		mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2248				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2249				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2250				NUM_BANKS(ADDR_SURF_8_BANK));
2251		mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2252				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2253				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2254				NUM_BANKS(ADDR_SURF_8_BANK));
2255		mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
2256				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
2257				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2258				NUM_BANKS(ADDR_SURF_16_BANK));
2259		mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
2260				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2261				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2262				NUM_BANKS(ADDR_SURF_16_BANK));
2263		mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2264				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2265				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2266				 NUM_BANKS(ADDR_SURF_16_BANK));
2267		mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2268				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2269				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2270				 NUM_BANKS(ADDR_SURF_16_BANK));
2271		mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2272				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2273				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2274				 NUM_BANKS(ADDR_SURF_16_BANK));
2275		mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2276				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2277				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2278				 NUM_BANKS(ADDR_SURF_16_BANK));
2279		mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2280				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2281				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2282				 NUM_BANKS(ADDR_SURF_8_BANK));
2283
2284		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
2285			if (reg_offset != 7 && reg_offset != 12 && reg_offset != 17 &&
2286			    reg_offset != 23)
2287				WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
2288
2289		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
2290			if (reg_offset != 7)
2291				WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
2292
2293		break;
2294	case CHIP_FIJI:
2295	case CHIP_VEGAM:
2296		modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2297				PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2298				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2299				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2300		modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2301				PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2302				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2303				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2304		modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2305				PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2306				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2307				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2308		modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2309				PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2310				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2311				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2312		modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2313				PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2314				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2315				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2316		modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2317				PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2318				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2319				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2320		modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2321				PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2322				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2323				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2324		modearray[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2325				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2326				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2327				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2328		modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2329				PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16));
2330		modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2331				PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2332				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2333				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2334		modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2335				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2336				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2337				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2338		modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2339				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2340				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2341				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2342		modearray[12] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2343				 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2344				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2345				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2346		modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2347				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2348				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2349				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2350		modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2351				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2352				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2353				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2354		modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
2355				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2356				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2357				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2358		modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2359				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2360				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2361				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2362		modearray[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2363				 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2364				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2365				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2366		modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2367				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2368				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2369				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2370		modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2371				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2372				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2373				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2374		modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2375				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2376				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2377				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2378		modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
2379				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2380				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2381				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2382		modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2383				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2384				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2385				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2386		modearray[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2387				 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2388				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2389				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2390		modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2391				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2392				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2393				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2394		modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
2395				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2396				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2397				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2398		modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
2399				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2400				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2401				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2402		modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2403				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2404				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2405				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2406		modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2407				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2408				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2409				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2410		modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2411				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2412				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2413				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2414		modearray[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2415				 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2416				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2417				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2418
2419		mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2420				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2421				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2422				NUM_BANKS(ADDR_SURF_8_BANK));
2423		mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2424				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2425				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2426				NUM_BANKS(ADDR_SURF_8_BANK));
2427		mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2428				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2429				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2430				NUM_BANKS(ADDR_SURF_8_BANK));
2431		mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2432				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2433				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2434				NUM_BANKS(ADDR_SURF_8_BANK));
2435		mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2436				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2437				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2438				NUM_BANKS(ADDR_SURF_8_BANK));
2439		mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2440				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2441				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2442				NUM_BANKS(ADDR_SURF_8_BANK));
2443		mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2444				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2445				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2446				NUM_BANKS(ADDR_SURF_8_BANK));
2447		mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2448				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
2449				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2450				NUM_BANKS(ADDR_SURF_8_BANK));
2451		mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2452				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2453				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2454				NUM_BANKS(ADDR_SURF_8_BANK));
2455		mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2456				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2457				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2458				 NUM_BANKS(ADDR_SURF_8_BANK));
2459		mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2460				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2461				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2462				 NUM_BANKS(ADDR_SURF_8_BANK));
2463		mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2464				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2465				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2466				 NUM_BANKS(ADDR_SURF_8_BANK));
2467		mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2468				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2469				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2470				 NUM_BANKS(ADDR_SURF_8_BANK));
2471		mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2472				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2473				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2474				 NUM_BANKS(ADDR_SURF_4_BANK));
2475
2476		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
2477			WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
2478
2479		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
2480			if (reg_offset != 7)
2481				WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
2482
2483		break;
2484	case CHIP_TONGA:
2485		modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2486				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2487				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2488				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2489		modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2490				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2491				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2492				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2493		modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2494				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2495				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2496				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2497		modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2498				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2499				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2500				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2501		modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2502				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2503				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2504				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2505		modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2506				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2507				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2508				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2509		modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2510				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2511				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2512				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2513		modearray[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2514				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2515				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2516				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2517		modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2518				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16));
2519		modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2520				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2521				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2522				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2523		modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2524				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2525				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2526				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2527		modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2528				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2529				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2530				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2531		modearray[12] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2532				 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2533				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2534				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2535		modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2536				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2537				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2538				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2539		modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2540				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2541				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2542				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2543		modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
2544				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2545				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2546				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2547		modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2548				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2549				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2550				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2551		modearray[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2552				 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2553				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2554				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2555		modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2556				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2557				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2558				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2559		modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2560				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2561				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2562				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2563		modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2564				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2565				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2566				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2567		modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
2568				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2569				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2570				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2571		modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2572				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2573				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2574				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2575		modearray[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2576				 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2577				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2578				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2579		modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2580				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2581				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2582				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2583		modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
2584				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2585				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2586				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2587		modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
2588				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2589				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2590				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2591		modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2592				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2593				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2594				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2595		modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2596				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2597				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2598				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2599		modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2600				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2601				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2602				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2603		modearray[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2604				 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2605				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2606				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2607
2608		mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2609				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2610				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2611				NUM_BANKS(ADDR_SURF_16_BANK));
2612		mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2613				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2614				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2615				NUM_BANKS(ADDR_SURF_16_BANK));
2616		mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2617				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2618				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2619				NUM_BANKS(ADDR_SURF_16_BANK));
2620		mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2621				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2622				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2623				NUM_BANKS(ADDR_SURF_16_BANK));
2624		mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2625				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2626				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2627				NUM_BANKS(ADDR_SURF_16_BANK));
2628		mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2629				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2630				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2631				NUM_BANKS(ADDR_SURF_16_BANK));
2632		mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2633				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2634				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2635				NUM_BANKS(ADDR_SURF_16_BANK));
2636		mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2637				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
2638				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2639				NUM_BANKS(ADDR_SURF_16_BANK));
2640		mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2641				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2642				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2643				NUM_BANKS(ADDR_SURF_16_BANK));
2644		mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2645				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2646				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2647				 NUM_BANKS(ADDR_SURF_16_BANK));
2648		mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2649				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2650				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2651				 NUM_BANKS(ADDR_SURF_16_BANK));
2652		mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2653				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2654				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2655				 NUM_BANKS(ADDR_SURF_8_BANK));
2656		mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2657				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2658				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2659				 NUM_BANKS(ADDR_SURF_4_BANK));
2660		mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2661				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2662				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2663				 NUM_BANKS(ADDR_SURF_4_BANK));
2664
2665		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
2666			WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
2667
2668		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
2669			if (reg_offset != 7)
2670				WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
2671
2672		break;
2673	case CHIP_POLARIS11:
2674	case CHIP_POLARIS12:
2675		modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2676				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2677				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2678				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2679		modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2680				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2681				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2682				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2683		modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2684				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2685				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2686				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2687		modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2688				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2689				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2690				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2691		modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2692				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2693				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2694				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2695		modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2696				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2697				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2698				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2699		modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2700				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2701				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2702				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2703		modearray[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2704				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2705				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2706				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2707		modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2708				PIPE_CONFIG(ADDR_SURF_P4_16x16));
2709		modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2710				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2711				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2712				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2713		modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2714				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2715				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2716				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2717		modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2718				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2719				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2720				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2721		modearray[12] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2722				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2723				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2724				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2725		modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2726				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2727				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2728				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2729		modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2730				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2731				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2732				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2733		modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
2734				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2735				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2736				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2737		modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2738				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2739				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2740				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2741		modearray[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2742				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2743				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2744				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2745		modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2746				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2747				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2748				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2749		modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2750				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2751				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2752				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2753		modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2754				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2755				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2756				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2757		modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
2758				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2759				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2760				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2761		modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2762				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2763				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2764				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2765		modearray[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2766				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2767				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2768				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2769		modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2770				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2771				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2772				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2773		modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
2774				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2775				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2776				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2777		modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
2778				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2779				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2780				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2781		modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2782				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2783				MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2784				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2785		modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2786				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2787				MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2788				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2789		modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2790				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2791				MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2792				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2793		modearray[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2794				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2795				MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2796				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2797
2798		mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2799				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2800				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2801				NUM_BANKS(ADDR_SURF_16_BANK));
2802
2803		mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2804				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2805				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2806				NUM_BANKS(ADDR_SURF_16_BANK));
2807
2808		mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2809				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2810				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2811				NUM_BANKS(ADDR_SURF_16_BANK));
2812
2813		mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2814				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2815				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2816				NUM_BANKS(ADDR_SURF_16_BANK));
2817
2818		mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2819				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2820				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2821				NUM_BANKS(ADDR_SURF_16_BANK));
2822
2823		mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2824				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2825				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2826				NUM_BANKS(ADDR_SURF_16_BANK));
2827
2828		mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2829				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2830				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2831				NUM_BANKS(ADDR_SURF_16_BANK));
2832
2833		mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2834				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
2835				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2836				NUM_BANKS(ADDR_SURF_16_BANK));
2837
2838		mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2839				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2840				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2841				NUM_BANKS(ADDR_SURF_16_BANK));
2842
2843		mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2844				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2845				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2846				NUM_BANKS(ADDR_SURF_16_BANK));
2847
2848		mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2849				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2850				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2851				NUM_BANKS(ADDR_SURF_16_BANK));
2852
2853		mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2854				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2855				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2856				NUM_BANKS(ADDR_SURF_16_BANK));
2857
2858		mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2859				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2860				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2861				NUM_BANKS(ADDR_SURF_8_BANK));
2862
2863		mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2864				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2865				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2866				NUM_BANKS(ADDR_SURF_4_BANK));
2867
2868		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
2869			WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
2870
2871		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
2872			if (reg_offset != 7)
2873				WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
2874
2875		break;
2876	case CHIP_POLARIS10:
2877		modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2878				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2879				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2880				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2881		modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2882				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2883				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2884				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2885		modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2886				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2887				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2888				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2889		modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2890				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2891				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2892				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2893		modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2894				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2895				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2896				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2897		modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2898				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2899				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2900				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2901		modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2902				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2903				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2904				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2905		modearray[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2906				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2907				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2908				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2909		modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2910				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16));
2911		modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2912				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2913				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2914				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2915		modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2916				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2917				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2918				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2919		modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2920				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2921				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2922				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2923		modearray[12] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2924				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2925				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2926				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2927		modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2928				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2929				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2930				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2931		modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2932				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2933				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2934				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2935		modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
2936				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2937				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2938				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2939		modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2940				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2941				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2942				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2943		modearray[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2944				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2945				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2946				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2947		modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2948				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2949				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2950				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2951		modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2952				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2953				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2954				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2955		modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2956				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2957				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2958				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2959		modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
2960				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2961				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2962				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2963		modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2964				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2965				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2966				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2967		modearray[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2968				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2969				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2970				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2971		modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2972				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2973				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2974				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2975		modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
2976				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2977				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2978				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2979		modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
2980				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2981				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2982				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2983		modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2984				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2985				MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2986				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2987		modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2988				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2989				MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2990				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2991		modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2992				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2993				MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2994				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2995		modearray[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2996				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2997				MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2998				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2999
3000		mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3001				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3002				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3003				NUM_BANKS(ADDR_SURF_16_BANK));
3004
3005		mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3006				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3007				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3008				NUM_BANKS(ADDR_SURF_16_BANK));
3009
3010		mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3011				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3012				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3013				NUM_BANKS(ADDR_SURF_16_BANK));
3014
3015		mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3016				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3017				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3018				NUM_BANKS(ADDR_SURF_16_BANK));
3019
3020		mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3021				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
3022				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3023				NUM_BANKS(ADDR_SURF_16_BANK));
3024
3025		mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3026				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3027				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
3028				NUM_BANKS(ADDR_SURF_16_BANK));
3029
3030		mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3031				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3032				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
3033				NUM_BANKS(ADDR_SURF_16_BANK));
3034
3035		mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3036				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
3037				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3038				NUM_BANKS(ADDR_SURF_16_BANK));
3039
3040		mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3041				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3042				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3043				NUM_BANKS(ADDR_SURF_16_BANK));
3044
3045		mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3046				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
3047				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3048				NUM_BANKS(ADDR_SURF_16_BANK));
3049
3050		mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3051				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3052				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3053				NUM_BANKS(ADDR_SURF_16_BANK));
3054
3055		mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3056				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3057				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
3058				NUM_BANKS(ADDR_SURF_8_BANK));
3059
3060		mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3061				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3062				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
3063				NUM_BANKS(ADDR_SURF_4_BANK));
3064
3065		mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3066				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3067				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
3068				NUM_BANKS(ADDR_SURF_4_BANK));
3069
3070		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
3071			WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
3072
3073		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
3074			if (reg_offset != 7)
3075				WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
3076
3077		break;
3078	case CHIP_STONEY:
3079		modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3080				PIPE_CONFIG(ADDR_SURF_P2) |
3081				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
3082				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3083		modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3084				PIPE_CONFIG(ADDR_SURF_P2) |
3085				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
3086				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3087		modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3088				PIPE_CONFIG(ADDR_SURF_P2) |
3089				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
3090				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3091		modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3092				PIPE_CONFIG(ADDR_SURF_P2) |
3093				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
3094				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3095		modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3096				PIPE_CONFIG(ADDR_SURF_P2) |
3097				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
3098				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3099		modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3100				PIPE_CONFIG(ADDR_SURF_P2) |
3101				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
3102				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3103		modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3104				PIPE_CONFIG(ADDR_SURF_P2) |
3105				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
3106				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3107		modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
3108				PIPE_CONFIG(ADDR_SURF_P2));
3109		modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3110				PIPE_CONFIG(ADDR_SURF_P2) |
3111				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
3112				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3113		modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3114				 PIPE_CONFIG(ADDR_SURF_P2) |
3115				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
3116				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3117		modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3118				 PIPE_CONFIG(ADDR_SURF_P2) |
3119				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
3120				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
3121		modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3122				 PIPE_CONFIG(ADDR_SURF_P2) |
3123				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3124				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3125		modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3126				 PIPE_CONFIG(ADDR_SURF_P2) |
3127				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3128				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3129		modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
3130				 PIPE_CONFIG(ADDR_SURF_P2) |
3131				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3132				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3133		modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3134				 PIPE_CONFIG(ADDR_SURF_P2) |
3135				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3136				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
3137		modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
3138				 PIPE_CONFIG(ADDR_SURF_P2) |
3139				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3140				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3141		modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
3142				 PIPE_CONFIG(ADDR_SURF_P2) |
3143				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3144				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3145		modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
3146				 PIPE_CONFIG(ADDR_SURF_P2) |
3147				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3148				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3149		modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
3150				 PIPE_CONFIG(ADDR_SURF_P2) |
3151				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3152				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3153		modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
3154				 PIPE_CONFIG(ADDR_SURF_P2) |
3155				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3156				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3157		modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
3158				 PIPE_CONFIG(ADDR_SURF_P2) |
3159				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3160				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3161		modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
3162				 PIPE_CONFIG(ADDR_SURF_P2) |
3163				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3164				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3165		modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
3166				 PIPE_CONFIG(ADDR_SURF_P2) |
3167				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3168				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3169		modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3170				 PIPE_CONFIG(ADDR_SURF_P2) |
3171				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
3172				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3173		modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3174				 PIPE_CONFIG(ADDR_SURF_P2) |
3175				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
3176				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3177		modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3178				 PIPE_CONFIG(ADDR_SURF_P2) |
3179				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
3180				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
3181
3182		mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3183				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3184				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3185				NUM_BANKS(ADDR_SURF_8_BANK));
3186		mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3187				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
3188				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3189				NUM_BANKS(ADDR_SURF_8_BANK));
3190		mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3191				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3192				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3193				NUM_BANKS(ADDR_SURF_8_BANK));
3194		mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3195				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3196				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3197				NUM_BANKS(ADDR_SURF_8_BANK));
3198		mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3199				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3200				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3201				NUM_BANKS(ADDR_SURF_8_BANK));
3202		mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3203				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3204				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3205				NUM_BANKS(ADDR_SURF_8_BANK));
3206		mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3207				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3208				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3209				NUM_BANKS(ADDR_SURF_8_BANK));
3210		mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
3211				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
3212				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3213				NUM_BANKS(ADDR_SURF_16_BANK));
3214		mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
3215				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3216				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3217				NUM_BANKS(ADDR_SURF_16_BANK));
3218		mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
3219				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3220				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3221				 NUM_BANKS(ADDR_SURF_16_BANK));
3222		mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
3223				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
3224				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3225				 NUM_BANKS(ADDR_SURF_16_BANK));
3226		mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3227				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
3228				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3229				 NUM_BANKS(ADDR_SURF_16_BANK));
3230		mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3231				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3232				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3233				 NUM_BANKS(ADDR_SURF_16_BANK));
3234		mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3235				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3236				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3237				 NUM_BANKS(ADDR_SURF_8_BANK));
3238
3239		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
3240			if (reg_offset != 7 && reg_offset != 12 && reg_offset != 17 &&
3241			    reg_offset != 23)
3242				WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
3243
3244		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
3245			if (reg_offset != 7)
3246				WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
3247
3248		break;
3249	default:
3250		dev_warn(adev->dev,
3251			 "Unknown chip type (%d) in function gfx_v8_0_tiling_mode_table_init() falling through to CHIP_CARRIZO\n",
3252			 adev->asic_type);
3253		fallthrough;
3254
3255	case CHIP_CARRIZO:
3256		modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3257				PIPE_CONFIG(ADDR_SURF_P2) |
3258				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
3259				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3260		modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3261				PIPE_CONFIG(ADDR_SURF_P2) |
3262				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
3263				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3264		modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3265				PIPE_CONFIG(ADDR_SURF_P2) |
3266				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
3267				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3268		modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3269				PIPE_CONFIG(ADDR_SURF_P2) |
3270				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
3271				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3272		modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3273				PIPE_CONFIG(ADDR_SURF_P2) |
3274				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
3275				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3276		modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3277				PIPE_CONFIG(ADDR_SURF_P2) |
3278				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
3279				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3280		modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3281				PIPE_CONFIG(ADDR_SURF_P2) |
3282				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
3283				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3284		modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
3285				PIPE_CONFIG(ADDR_SURF_P2));
3286		modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3287				PIPE_CONFIG(ADDR_SURF_P2) |
3288				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
3289				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3290		modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3291				 PIPE_CONFIG(ADDR_SURF_P2) |
3292				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
3293				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3294		modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3295				 PIPE_CONFIG(ADDR_SURF_P2) |
3296				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
3297				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
3298		modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3299				 PIPE_CONFIG(ADDR_SURF_P2) |
3300				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3301				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3302		modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3303				 PIPE_CONFIG(ADDR_SURF_P2) |
3304				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3305				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3306		modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
3307				 PIPE_CONFIG(ADDR_SURF_P2) |
3308				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3309				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3310		modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3311				 PIPE_CONFIG(ADDR_SURF_P2) |
3312				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3313				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
3314		modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
3315				 PIPE_CONFIG(ADDR_SURF_P2) |
3316				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3317				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3318		modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
3319				 PIPE_CONFIG(ADDR_SURF_P2) |
3320				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3321				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3322		modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
3323				 PIPE_CONFIG(ADDR_SURF_P2) |
3324				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3325				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3326		modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
3327				 PIPE_CONFIG(ADDR_SURF_P2) |
3328				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3329				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3330		modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
3331				 PIPE_CONFIG(ADDR_SURF_P2) |
3332				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3333				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3334		modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
3335				 PIPE_CONFIG(ADDR_SURF_P2) |
3336				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3337				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3338		modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
3339				 PIPE_CONFIG(ADDR_SURF_P2) |
3340				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3341				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3342		modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
3343				 PIPE_CONFIG(ADDR_SURF_P2) |
3344				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3345				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3346		modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3347				 PIPE_CONFIG(ADDR_SURF_P2) |
3348				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
3349				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3350		modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3351				 PIPE_CONFIG(ADDR_SURF_P2) |
3352				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
3353				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3354		modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3355				 PIPE_CONFIG(ADDR_SURF_P2) |
3356				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
3357				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
3358
3359		mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3360				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3361				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3362				NUM_BANKS(ADDR_SURF_8_BANK));
3363		mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3364				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
3365				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3366				NUM_BANKS(ADDR_SURF_8_BANK));
3367		mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3368				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3369				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3370				NUM_BANKS(ADDR_SURF_8_BANK));
3371		mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3372				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3373				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3374				NUM_BANKS(ADDR_SURF_8_BANK));
3375		mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3376				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3377				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3378				NUM_BANKS(ADDR_SURF_8_BANK));
3379		mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3380				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3381				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3382				NUM_BANKS(ADDR_SURF_8_BANK));
3383		mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3384				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3385				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3386				NUM_BANKS(ADDR_SURF_8_BANK));
3387		mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
3388				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
3389				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3390				NUM_BANKS(ADDR_SURF_16_BANK));
3391		mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
3392				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3393				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3394				NUM_BANKS(ADDR_SURF_16_BANK));
3395		mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
3396				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3397				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3398				 NUM_BANKS(ADDR_SURF_16_BANK));
3399		mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
3400				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
3401				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3402				 NUM_BANKS(ADDR_SURF_16_BANK));
3403		mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3404				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
3405				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3406				 NUM_BANKS(ADDR_SURF_16_BANK));
3407		mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3408				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3409				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3410				 NUM_BANKS(ADDR_SURF_16_BANK));
3411		mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3412				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3413				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3414				 NUM_BANKS(ADDR_SURF_8_BANK));
3415
3416		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
3417			if (reg_offset != 7 && reg_offset != 12 && reg_offset != 17 &&
3418			    reg_offset != 23)
3419				WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
3420
3421		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
3422			if (reg_offset != 7)
3423				WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
3424
3425		break;
3426	}
3427}
3428
3429static void gfx_v8_0_select_se_sh(struct amdgpu_device *adev,
3430				  u32 se_num, u32 sh_num, u32 instance)
3431{
3432	u32 data;
3433
3434	if (instance == 0xffffffff)
3435		data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
3436	else
3437		data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
3438
3439	if (se_num == 0xffffffff)
3440		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
3441	else
 
3442		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
3443
3444	if (sh_num == 0xffffffff)
3445		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
3446	else
3447		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
3448
 
3449	WREG32(mmGRBM_GFX_INDEX, data);
3450}
3451
3452static void gfx_v8_0_select_me_pipe_q(struct amdgpu_device *adev,
3453				  u32 me, u32 pipe, u32 q, u32 vm)
3454{
3455	vi_srbm_select(adev, me, pipe, q, vm);
3456}
3457
3458static u32 gfx_v8_0_get_rb_active_bitmap(struct amdgpu_device *adev)
3459{
3460	u32 data, mask;
3461
3462	data =  RREG32(mmCC_RB_BACKEND_DISABLE) |
3463		RREG32(mmGC_USER_RB_BACKEND_DISABLE);
3464
3465	data = REG_GET_FIELD(data, GC_USER_RB_BACKEND_DISABLE, BACKEND_DISABLE);
 
3466
3467	mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se /
3468					 adev->gfx.config.max_sh_per_se);
3469
3470	return (~data) & mask;
3471}
3472
3473static void
3474gfx_v8_0_raster_config(struct amdgpu_device *adev, u32 *rconf, u32 *rconf1)
3475{
3476	switch (adev->asic_type) {
3477	case CHIP_FIJI:
3478	case CHIP_VEGAM:
3479		*rconf |= RB_MAP_PKR0(2) | RB_MAP_PKR1(2) |
3480			  RB_XSEL2(1) | PKR_MAP(2) |
3481			  PKR_XSEL(1) | PKR_YSEL(1) |
3482			  SE_MAP(2) | SE_XSEL(2) | SE_YSEL(3);
3483		*rconf1 |= SE_PAIR_MAP(2) | SE_PAIR_XSEL(3) |
3484			   SE_PAIR_YSEL(2);
3485		break;
3486	case CHIP_TONGA:
3487	case CHIP_POLARIS10:
3488		*rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) |
3489			  SE_XSEL(1) | SE_YSEL(1);
3490		*rconf1 |= SE_PAIR_MAP(2) | SE_PAIR_XSEL(2) |
3491			   SE_PAIR_YSEL(2);
3492		break;
3493	case CHIP_TOPAZ:
3494	case CHIP_CARRIZO:
3495		*rconf |= RB_MAP_PKR0(2);
3496		*rconf1 |= 0x0;
3497		break;
3498	case CHIP_POLARIS11:
3499	case CHIP_POLARIS12:
3500		*rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) |
3501			  SE_XSEL(1) | SE_YSEL(1);
3502		*rconf1 |= 0x0;
3503		break;
3504	case CHIP_STONEY:
3505		*rconf |= 0x0;
3506		*rconf1 |= 0x0;
3507		break;
3508	default:
3509		DRM_ERROR("unknown asic: 0x%x\n", adev->asic_type);
3510		break;
3511	}
3512}
3513
3514static void
3515gfx_v8_0_write_harvested_raster_configs(struct amdgpu_device *adev,
3516					u32 raster_config, u32 raster_config_1,
3517					unsigned rb_mask, unsigned num_rb)
3518{
3519	unsigned sh_per_se = max_t(unsigned, adev->gfx.config.max_sh_per_se, 1);
3520	unsigned num_se = max_t(unsigned, adev->gfx.config.max_shader_engines, 1);
3521	unsigned rb_per_pkr = min_t(unsigned, num_rb / num_se / sh_per_se, 2);
3522	unsigned rb_per_se = num_rb / num_se;
3523	unsigned se_mask[4];
3524	unsigned se;
3525
3526	se_mask[0] = ((1 << rb_per_se) - 1) & rb_mask;
3527	se_mask[1] = (se_mask[0] << rb_per_se) & rb_mask;
3528	se_mask[2] = (se_mask[1] << rb_per_se) & rb_mask;
3529	se_mask[3] = (se_mask[2] << rb_per_se) & rb_mask;
3530
3531	WARN_ON(!(num_se == 1 || num_se == 2 || num_se == 4));
3532	WARN_ON(!(sh_per_se == 1 || sh_per_se == 2));
3533	WARN_ON(!(rb_per_pkr == 1 || rb_per_pkr == 2));
3534
3535	if ((num_se > 2) && ((!se_mask[0] && !se_mask[1]) ||
3536			     (!se_mask[2] && !se_mask[3]))) {
3537		raster_config_1 &= ~SE_PAIR_MAP_MASK;
3538
3539		if (!se_mask[0] && !se_mask[1]) {
3540			raster_config_1 |=
3541				SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_3);
3542		} else {
3543			raster_config_1 |=
3544				SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_0);
3545		}
3546	}
3547
3548	for (se = 0; se < num_se; se++) {
3549		unsigned raster_config_se = raster_config;
3550		unsigned pkr0_mask = ((1 << rb_per_pkr) - 1) << (se * rb_per_se);
3551		unsigned pkr1_mask = pkr0_mask << rb_per_pkr;
3552		int idx = (se / 2) * 2;
3553
3554		if ((num_se > 1) && (!se_mask[idx] || !se_mask[idx + 1])) {
3555			raster_config_se &= ~SE_MAP_MASK;
3556
3557			if (!se_mask[idx]) {
3558				raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_3);
3559			} else {
3560				raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_0);
3561			}
3562		}
3563
3564		pkr0_mask &= rb_mask;
3565		pkr1_mask &= rb_mask;
3566		if (rb_per_se > 2 && (!pkr0_mask || !pkr1_mask)) {
3567			raster_config_se &= ~PKR_MAP_MASK;
3568
3569			if (!pkr0_mask) {
3570				raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_3);
3571			} else {
3572				raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_0);
3573			}
3574		}
3575
3576		if (rb_per_se >= 2) {
3577			unsigned rb0_mask = 1 << (se * rb_per_se);
3578			unsigned rb1_mask = rb0_mask << 1;
3579
3580			rb0_mask &= rb_mask;
3581			rb1_mask &= rb_mask;
3582			if (!rb0_mask || !rb1_mask) {
3583				raster_config_se &= ~RB_MAP_PKR0_MASK;
3584
3585				if (!rb0_mask) {
3586					raster_config_se |=
3587						RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_3);
3588				} else {
3589					raster_config_se |=
3590						RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_0);
3591				}
3592			}
3593
3594			if (rb_per_se > 2) {
3595				rb0_mask = 1 << (se * rb_per_se + rb_per_pkr);
3596				rb1_mask = rb0_mask << 1;
3597				rb0_mask &= rb_mask;
3598				rb1_mask &= rb_mask;
3599				if (!rb0_mask || !rb1_mask) {
3600					raster_config_se &= ~RB_MAP_PKR1_MASK;
3601
3602					if (!rb0_mask) {
3603						raster_config_se |=
3604							RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_3);
3605					} else {
3606						raster_config_se |=
3607							RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_0);
3608					}
3609				}
3610			}
3611		}
3612
3613		/* GRBM_GFX_INDEX has a different offset on VI */
3614		gfx_v8_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff);
3615		WREG32(mmPA_SC_RASTER_CONFIG, raster_config_se);
3616		WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
3617	}
3618
3619	/* GRBM_GFX_INDEX has a different offset on VI */
3620	gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3621}
3622
3623static void gfx_v8_0_setup_rb(struct amdgpu_device *adev)
3624{
3625	int i, j;
3626	u32 data;
3627	u32 raster_config = 0, raster_config_1 = 0;
3628	u32 active_rbs = 0;
3629	u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
3630					adev->gfx.config.max_sh_per_se;
3631	unsigned num_rb_pipes;
3632
3633	mutex_lock(&adev->grbm_idx_mutex);
3634	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
3635		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
3636			gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff);
3637			data = gfx_v8_0_get_rb_active_bitmap(adev);
3638			active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
3639					       rb_bitmap_width_per_sh);
3640		}
3641	}
3642	gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
 
3643
3644	adev->gfx.config.backend_enable_mask = active_rbs;
3645	adev->gfx.config.num_rbs = hweight32(active_rbs);
3646
3647	num_rb_pipes = min_t(unsigned, adev->gfx.config.max_backends_per_se *
3648			     adev->gfx.config.max_shader_engines, 16);
3649
3650	gfx_v8_0_raster_config(adev, &raster_config, &raster_config_1);
3651
3652	if (!adev->gfx.config.backend_enable_mask ||
3653			adev->gfx.config.num_rbs >= num_rb_pipes) {
3654		WREG32(mmPA_SC_RASTER_CONFIG, raster_config);
3655		WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
3656	} else {
3657		gfx_v8_0_write_harvested_raster_configs(adev, raster_config, raster_config_1,
3658							adev->gfx.config.backend_enable_mask,
3659							num_rb_pipes);
3660	}
3661
3662	/* cache the values for userspace */
3663	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
3664		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
3665			gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff);
3666			adev->gfx.config.rb_config[i][j].rb_backend_disable =
3667				RREG32(mmCC_RB_BACKEND_DISABLE);
3668			adev->gfx.config.rb_config[i][j].user_rb_backend_disable =
3669				RREG32(mmGC_USER_RB_BACKEND_DISABLE);
3670			adev->gfx.config.rb_config[i][j].raster_config =
3671				RREG32(mmPA_SC_RASTER_CONFIG);
3672			adev->gfx.config.rb_config[i][j].raster_config_1 =
3673				RREG32(mmPA_SC_RASTER_CONFIG_1);
3674		}
3675	}
3676	gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3677	mutex_unlock(&adev->grbm_idx_mutex);
3678}
3679
3680/**
3681 * gfx_v8_0_init_compute_vmid - gart enable
3682 *
3683 * @adev: amdgpu_device pointer
3684 *
3685 * Initialize compute vmid sh_mem registers
3686 *
3687 */
3688#define DEFAULT_SH_MEM_BASES	(0x6000)
 
 
3689static void gfx_v8_0_init_compute_vmid(struct amdgpu_device *adev)
3690{
3691	int i;
3692	uint32_t sh_mem_config;
3693	uint32_t sh_mem_bases;
3694
3695	/*
3696	 * Configure apertures:
3697	 * LDS:         0x60000000'00000000 - 0x60000001'00000000 (4GB)
3698	 * Scratch:     0x60000001'00000000 - 0x60000002'00000000 (4GB)
3699	 * GPUVM:       0x60010000'00000000 - 0x60020000'00000000 (1TB)
3700	 */
3701	sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
3702
3703	sh_mem_config = SH_MEM_ADDRESS_MODE_HSA64 <<
3704			SH_MEM_CONFIG__ADDRESS_MODE__SHIFT |
3705			SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
3706			SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT |
3707			MTYPE_CC << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT |
3708			SH_MEM_CONFIG__PRIVATE_ATC_MASK;
3709
3710	mutex_lock(&adev->srbm_mutex);
3711	for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
3712		vi_srbm_select(adev, 0, 0, 0, i);
3713		/* CP and shaders */
3714		WREG32(mmSH_MEM_CONFIG, sh_mem_config);
3715		WREG32(mmSH_MEM_APE1_BASE, 1);
3716		WREG32(mmSH_MEM_APE1_LIMIT, 0);
3717		WREG32(mmSH_MEM_BASES, sh_mem_bases);
3718	}
3719	vi_srbm_select(adev, 0, 0, 0, 0);
3720	mutex_unlock(&adev->srbm_mutex);
3721
3722	/* Initialize all compute VMIDs to have no GDS, GWS, or OA
3723	   acccess. These should be enabled by FW for target VMIDs. */
3724	for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
3725		WREG32(amdgpu_gds_reg_offset[i].mem_base, 0);
3726		WREG32(amdgpu_gds_reg_offset[i].mem_size, 0);
3727		WREG32(amdgpu_gds_reg_offset[i].gws, 0);
3728		WREG32(amdgpu_gds_reg_offset[i].oa, 0);
3729	}
3730}
3731
3732static void gfx_v8_0_init_gds_vmid(struct amdgpu_device *adev)
3733{
3734	int vmid;
3735
3736	/*
3737	 * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA
3738	 * access. Compute VMIDs should be enabled by FW for target VMIDs,
3739	 * the driver can enable them for graphics. VMID0 should maintain
3740	 * access so that HWS firmware can save/restore entries.
3741	 */
3742	for (vmid = 1; vmid < 16; vmid++) {
3743		WREG32(amdgpu_gds_reg_offset[vmid].mem_base, 0);
3744		WREG32(amdgpu_gds_reg_offset[vmid].mem_size, 0);
3745		WREG32(amdgpu_gds_reg_offset[vmid].gws, 0);
3746		WREG32(amdgpu_gds_reg_offset[vmid].oa, 0);
3747	}
3748}
3749
3750static void gfx_v8_0_config_init(struct amdgpu_device *adev)
3751{
3752	switch (adev->asic_type) {
3753	default:
3754		adev->gfx.config.double_offchip_lds_buf = 1;
3755		break;
3756	case CHIP_CARRIZO:
3757	case CHIP_STONEY:
3758		adev->gfx.config.double_offchip_lds_buf = 0;
3759		break;
3760	}
3761}
3762
3763static void gfx_v8_0_constants_init(struct amdgpu_device *adev)
3764{
3765	u32 tmp, sh_static_mem_cfg;
3766	int i;
3767
3768	WREG32_FIELD(GRBM_CNTL, READ_TIMEOUT, 0xFF);
3769	WREG32(mmGB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
3770	WREG32(mmHDP_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
3771	WREG32(mmDMIF_ADDR_CALC, adev->gfx.config.gb_addr_config);
3772
3773	gfx_v8_0_tiling_mode_table_init(adev);
 
3774	gfx_v8_0_setup_rb(adev);
3775	gfx_v8_0_get_cu_info(adev);
3776	gfx_v8_0_config_init(adev);
3777
3778	/* XXX SH_MEM regs */
3779	/* where to put LDS, scratch, GPUVM in FSA64 space */
3780	sh_static_mem_cfg = REG_SET_FIELD(0, SH_STATIC_MEM_CONFIG,
3781				   SWIZZLE_ENABLE, 1);
3782	sh_static_mem_cfg = REG_SET_FIELD(sh_static_mem_cfg, SH_STATIC_MEM_CONFIG,
3783				   ELEMENT_SIZE, 1);
3784	sh_static_mem_cfg = REG_SET_FIELD(sh_static_mem_cfg, SH_STATIC_MEM_CONFIG,
3785				   INDEX_STRIDE, 3);
3786	WREG32(mmSH_STATIC_MEM_CONFIG, sh_static_mem_cfg);
3787
3788	mutex_lock(&adev->srbm_mutex);
3789	for (i = 0; i < adev->vm_manager.id_mgr[0].num_ids; i++) {
3790		vi_srbm_select(adev, 0, 0, 0, i);
3791		/* CP and shaders */
3792		if (i == 0) {
3793			tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, DEFAULT_MTYPE, MTYPE_UC);
3794			tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, APE1_MTYPE, MTYPE_UC);
3795			tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE,
3796					    SH_MEM_ALIGNMENT_MODE_UNALIGNED);
3797			WREG32(mmSH_MEM_CONFIG, tmp);
3798			WREG32(mmSH_MEM_BASES, 0);
3799		} else {
3800			tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, DEFAULT_MTYPE, MTYPE_NC);
3801			tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, APE1_MTYPE, MTYPE_UC);
3802			tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE,
3803					    SH_MEM_ALIGNMENT_MODE_UNALIGNED);
3804			WREG32(mmSH_MEM_CONFIG, tmp);
3805			tmp = adev->gmc.shared_aperture_start >> 48;
3806			WREG32(mmSH_MEM_BASES, tmp);
3807		}
3808
3809		WREG32(mmSH_MEM_APE1_BASE, 1);
3810		WREG32(mmSH_MEM_APE1_LIMIT, 0);
 
3811	}
3812	vi_srbm_select(adev, 0, 0, 0, 0);
3813	mutex_unlock(&adev->srbm_mutex);
3814
3815	gfx_v8_0_init_compute_vmid(adev);
3816	gfx_v8_0_init_gds_vmid(adev);
3817
3818	mutex_lock(&adev->grbm_idx_mutex);
3819	/*
3820	 * making sure that the following register writes will be broadcasted
3821	 * to all the shaders
3822	 */
3823	gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3824
3825	WREG32(mmPA_SC_FIFO_SIZE,
3826		   (adev->gfx.config.sc_prim_fifo_size_frontend <<
3827			PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) |
3828		   (adev->gfx.config.sc_prim_fifo_size_backend <<
3829			PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT) |
3830		   (adev->gfx.config.sc_hiz_tile_fifo_size <<
3831			PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) |
3832		   (adev->gfx.config.sc_earlyz_tile_fifo_size <<
3833			PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT));
3834
3835	tmp = RREG32(mmSPI_ARB_PRIORITY);
3836	tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS0, 2);
3837	tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS1, 2);
3838	tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS2, 2);
3839	tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS3, 2);
3840	WREG32(mmSPI_ARB_PRIORITY, tmp);
3841
3842	mutex_unlock(&adev->grbm_idx_mutex);
3843
3844}
3845
3846static void gfx_v8_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
3847{
3848	u32 i, j, k;
3849	u32 mask;
3850
3851	mutex_lock(&adev->grbm_idx_mutex);
3852	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
3853		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
3854			gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff);
3855			for (k = 0; k < adev->usec_timeout; k++) {
3856				if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY) == 0)
3857					break;
3858				udelay(1);
3859			}
3860			if (k == adev->usec_timeout) {
3861				gfx_v8_0_select_se_sh(adev, 0xffffffff,
3862						      0xffffffff, 0xffffffff);
3863				mutex_unlock(&adev->grbm_idx_mutex);
3864				DRM_INFO("Timeout wait for RLC serdes %u,%u\n",
3865					 i, j);
3866				return;
3867			}
3868		}
3869	}
3870	gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3871	mutex_unlock(&adev->grbm_idx_mutex);
3872
3873	mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
3874		RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
3875		RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
3876		RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
3877	for (k = 0; k < adev->usec_timeout; k++) {
3878		if ((RREG32(mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
3879			break;
3880		udelay(1);
3881	}
3882}
3883
3884static void gfx_v8_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
3885					       bool enable)
3886{
3887	u32 tmp = RREG32(mmCP_INT_CNTL_RING0);
3888
3889	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
3890	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
3891	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
3892	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0);
3893
3894	WREG32(mmCP_INT_CNTL_RING0, tmp);
3895}
3896
3897static void gfx_v8_0_init_csb(struct amdgpu_device *adev)
3898{
3899	adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
3900	/* csib */
3901	WREG32(mmRLC_CSIB_ADDR_HI,
3902			adev->gfx.rlc.clear_state_gpu_addr >> 32);
3903	WREG32(mmRLC_CSIB_ADDR_LO,
3904			adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
3905	WREG32(mmRLC_CSIB_LENGTH,
3906			adev->gfx.rlc.clear_state_size);
3907}
3908
3909static void gfx_v8_0_parse_ind_reg_list(int *register_list_format,
3910				int ind_offset,
3911				int list_size,
3912				int *unique_indices,
3913				int *indices_count,
3914				int max_indices,
3915				int *ind_start_offsets,
3916				int *offset_count,
3917				int max_offset)
3918{
3919	int indices;
3920	bool new_entry = true;
3921
3922	for (; ind_offset < list_size; ind_offset++) {
3923
3924		if (new_entry) {
3925			new_entry = false;
3926			ind_start_offsets[*offset_count] = ind_offset;
3927			*offset_count = *offset_count + 1;
3928			BUG_ON(*offset_count >= max_offset);
3929		}
3930
3931		if (register_list_format[ind_offset] == 0xFFFFFFFF) {
3932			new_entry = true;
3933			continue;
3934		}
3935
3936		ind_offset += 2;
3937
3938		/* look for the matching indice */
3939		for (indices = 0;
3940			indices < *indices_count;
3941			indices++) {
3942			if (unique_indices[indices] ==
3943				register_list_format[ind_offset])
3944				break;
3945		}
3946
3947		if (indices >= *indices_count) {
3948			unique_indices[*indices_count] =
3949				register_list_format[ind_offset];
3950			indices = *indices_count;
3951			*indices_count = *indices_count + 1;
3952			BUG_ON(*indices_count >= max_indices);
3953		}
3954
3955		register_list_format[ind_offset] = indices;
3956	}
3957}
3958
3959static int gfx_v8_0_init_save_restore_list(struct amdgpu_device *adev)
3960{
3961	int i, temp, data;
3962	int unique_indices[] = {0, 0, 0, 0, 0, 0, 0, 0};
3963	int indices_count = 0;
3964	int indirect_start_offsets[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
3965	int offset_count = 0;
3966
3967	int list_size;
3968	unsigned int *register_list_format =
3969		kmemdup(adev->gfx.rlc.register_list_format,
3970			adev->gfx.rlc.reg_list_format_size_bytes, GFP_KERNEL);
3971	if (!register_list_format)
3972		return -ENOMEM;
3973
3974	gfx_v8_0_parse_ind_reg_list(register_list_format,
3975				RLC_FormatDirectRegListLength,
3976				adev->gfx.rlc.reg_list_format_size_bytes >> 2,
3977				unique_indices,
3978				&indices_count,
3979				ARRAY_SIZE(unique_indices),
3980				indirect_start_offsets,
3981				&offset_count,
3982				ARRAY_SIZE(indirect_start_offsets));
3983
3984	/* save and restore list */
3985	WREG32_FIELD(RLC_SRM_CNTL, AUTO_INCR_ADDR, 1);
3986
3987	WREG32(mmRLC_SRM_ARAM_ADDR, 0);
3988	for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++)
3989		WREG32(mmRLC_SRM_ARAM_DATA, adev->gfx.rlc.register_restore[i]);
3990
3991	/* indirect list */
3992	WREG32(mmRLC_GPM_SCRATCH_ADDR, adev->gfx.rlc.reg_list_format_start);
3993	for (i = 0; i < adev->gfx.rlc.reg_list_format_size_bytes >> 2; i++)
3994		WREG32(mmRLC_GPM_SCRATCH_DATA, register_list_format[i]);
3995
3996	list_size = adev->gfx.rlc.reg_list_size_bytes >> 2;
3997	list_size = list_size >> 1;
3998	WREG32(mmRLC_GPM_SCRATCH_ADDR, adev->gfx.rlc.reg_restore_list_size);
3999	WREG32(mmRLC_GPM_SCRATCH_DATA, list_size);
4000
4001	/* starting offsets starts */
4002	WREG32(mmRLC_GPM_SCRATCH_ADDR,
4003		adev->gfx.rlc.starting_offsets_start);
4004	for (i = 0; i < ARRAY_SIZE(indirect_start_offsets); i++)
4005		WREG32(mmRLC_GPM_SCRATCH_DATA,
4006				indirect_start_offsets[i]);
4007
4008	/* unique indices */
4009	temp = mmRLC_SRM_INDEX_CNTL_ADDR_0;
4010	data = mmRLC_SRM_INDEX_CNTL_DATA_0;
4011	for (i = 0; i < ARRAY_SIZE(unique_indices); i++) {
4012		if (unique_indices[i] != 0) {
4013			WREG32(temp + i, unique_indices[i] & 0x3FFFF);
4014			WREG32(data + i, unique_indices[i] >> 20);
4015		}
4016	}
4017	kfree(register_list_format);
4018
4019	return 0;
 
 
 
 
 
4020}
4021
4022static void gfx_v8_0_enable_save_restore_machine(struct amdgpu_device *adev)
4023{
4024	WREG32_FIELD(RLC_SRM_CNTL, SRM_ENABLE, 1);
4025}
4026
4027static void gfx_v8_0_init_power_gating(struct amdgpu_device *adev)
4028{
4029	uint32_t data;
4030
4031	WREG32_FIELD(CP_RB_WPTR_POLL_CNTL, IDLE_POLL_COUNT, 0x60);
 
4032
4033	data = REG_SET_FIELD(0, RLC_PG_DELAY, POWER_UP_DELAY, 0x10);
4034	data = REG_SET_FIELD(data, RLC_PG_DELAY, POWER_DOWN_DELAY, 0x10);
4035	data = REG_SET_FIELD(data, RLC_PG_DELAY, CMD_PROPAGATE_DELAY, 0x10);
4036	data = REG_SET_FIELD(data, RLC_PG_DELAY, MEM_SLEEP_DELAY, 0x10);
4037	WREG32(mmRLC_PG_DELAY, data);
4038
4039	WREG32_FIELD(RLC_PG_DELAY_2, SERDES_CMD_DELAY, 0x3);
4040	WREG32_FIELD(RLC_AUTO_PG_CTRL, GRBM_REG_SAVE_GFX_IDLE_THRESHOLD, 0x55f0);
4041
 
4042}
4043
4044static void cz_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev,
4045						bool enable)
4046{
4047	WREG32_FIELD(RLC_PG_CNTL, SMU_CLK_SLOWDOWN_ON_PU_ENABLE, enable ? 1 : 0);
4048}
 
4049
4050static void cz_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev,
4051						  bool enable)
4052{
4053	WREG32_FIELD(RLC_PG_CNTL, SMU_CLK_SLOWDOWN_ON_PD_ENABLE, enable ? 1 : 0);
4054}
4055
4056static void cz_enable_cp_power_gating(struct amdgpu_device *adev, bool enable)
4057{
4058	WREG32_FIELD(RLC_PG_CNTL, CP_PG_DISABLE, enable ? 0 : 1);
4059}
4060
4061static void gfx_v8_0_init_pg(struct amdgpu_device *adev)
4062{
4063	if ((adev->asic_type == CHIP_CARRIZO) ||
4064	    (adev->asic_type == CHIP_STONEY)) {
4065		gfx_v8_0_init_csb(adev);
4066		gfx_v8_0_init_save_restore_list(adev);
4067		gfx_v8_0_enable_save_restore_machine(adev);
4068		WREG32(mmRLC_JUMP_TABLE_RESTORE, adev->gfx.rlc.cp_table_gpu_addr >> 8);
4069		gfx_v8_0_init_power_gating(adev);
4070		WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask);
4071	} else if ((adev->asic_type == CHIP_POLARIS11) ||
4072		   (adev->asic_type == CHIP_POLARIS12) ||
4073		   (adev->asic_type == CHIP_VEGAM)) {
4074		gfx_v8_0_init_csb(adev);
4075		gfx_v8_0_init_save_restore_list(adev);
4076		gfx_v8_0_enable_save_restore_machine(adev);
4077		gfx_v8_0_init_power_gating(adev);
4078	}
4079
 
4080}
4081
4082static void gfx_v8_0_rlc_stop(struct amdgpu_device *adev)
4083{
4084	WREG32_FIELD(RLC_CNTL, RLC_ENABLE_F32, 0);
4085
4086	gfx_v8_0_enable_gui_idle_interrupt(adev, false);
4087	gfx_v8_0_wait_for_rlc_serdes(adev);
4088}
4089
4090static void gfx_v8_0_rlc_reset(struct amdgpu_device *adev)
4091{
4092	WREG32_FIELD(GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
4093	udelay(50);
4094
4095	WREG32_FIELD(GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
4096	udelay(50);
4097}
4098
4099static void gfx_v8_0_rlc_start(struct amdgpu_device *adev)
4100{
4101	WREG32_FIELD(RLC_CNTL, RLC_ENABLE_F32, 1);
4102
4103	/* carrizo do enable cp interrupt after cp inited */
4104	if (!(adev->flags & AMD_IS_APU))
4105		gfx_v8_0_enable_gui_idle_interrupt(adev, true);
4106
4107	udelay(50);
4108}
4109
4110static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
4111{
4112	if (amdgpu_sriov_vf(adev)) {
4113		gfx_v8_0_init_csb(adev);
4114		return 0;
4115	}
4116
4117	adev->gfx.rlc.funcs->stop(adev);
4118	adev->gfx.rlc.funcs->reset(adev);
4119	gfx_v8_0_init_pg(adev);
4120	adev->gfx.rlc.funcs->start(adev);
4121
4122	return 0;
4123}
4124
4125static void gfx_v8_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
4126{
 
4127	u32 tmp = RREG32(mmCP_ME_CNTL);
4128
4129	if (enable) {
4130		tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 0);
4131		tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 0);
4132		tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 0);
4133	} else {
4134		tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 1);
4135		tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1);
4136		tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1);
 
 
4137	}
4138	WREG32(mmCP_ME_CNTL, tmp);
4139	udelay(50);
4140}
4141
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4142static u32 gfx_v8_0_get_csb_size(struct amdgpu_device *adev)
4143{
4144	u32 count = 0;
4145	const struct cs_section_def *sect = NULL;
4146	const struct cs_extent_def *ext = NULL;
4147
4148	/* begin clear state */
4149	count += 2;
4150	/* context control state */
4151	count += 3;
4152
4153	for (sect = vi_cs_data; sect->section != NULL; ++sect) {
4154		for (ext = sect->section; ext->extent != NULL; ++ext) {
4155			if (sect->id == SECT_CONTEXT)
4156				count += 2 + ext->reg_count;
4157			else
4158				return 0;
4159		}
4160	}
4161	/* pa_sc_raster_config/pa_sc_raster_config1 */
4162	count += 4;
4163	/* end clear state */
4164	count += 2;
4165	/* clear state */
4166	count += 2;
4167
4168	return count;
4169}
4170
4171static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev)
4172{
4173	struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
4174	const struct cs_section_def *sect = NULL;
4175	const struct cs_extent_def *ext = NULL;
4176	int r, i;
4177
4178	/* init the CP */
4179	WREG32(mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1);
4180	WREG32(mmCP_ENDIAN_SWAP, 0);
4181	WREG32(mmCP_DEVICE_ID, 1);
4182
4183	gfx_v8_0_cp_gfx_enable(adev, true);
4184
4185	r = amdgpu_ring_alloc(ring, gfx_v8_0_get_csb_size(adev) + 4);
4186	if (r) {
4187		DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
4188		return r;
4189	}
4190
4191	/* clear state buffer */
4192	amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
4193	amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
4194
4195	amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
4196	amdgpu_ring_write(ring, 0x80000000);
4197	amdgpu_ring_write(ring, 0x80000000);
4198
4199	for (sect = vi_cs_data; sect->section != NULL; ++sect) {
4200		for (ext = sect->section; ext->extent != NULL; ++ext) {
4201			if (sect->id == SECT_CONTEXT) {
4202				amdgpu_ring_write(ring,
4203				       PACKET3(PACKET3_SET_CONTEXT_REG,
4204					       ext->reg_count));
4205				amdgpu_ring_write(ring,
4206				       ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
4207				for (i = 0; i < ext->reg_count; i++)
4208					amdgpu_ring_write(ring, ext->extent[i]);
4209			}
4210		}
4211	}
4212
4213	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
4214	amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
4215	amdgpu_ring_write(ring, adev->gfx.config.rb_config[0][0].raster_config);
4216	amdgpu_ring_write(ring, adev->gfx.config.rb_config[0][0].raster_config_1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4217
4218	amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
4219	amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
4220
4221	amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
4222	amdgpu_ring_write(ring, 0);
4223
4224	/* init the CE partitions */
4225	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
4226	amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
4227	amdgpu_ring_write(ring, 0x8000);
4228	amdgpu_ring_write(ring, 0x8000);
4229
4230	amdgpu_ring_commit(ring);
4231
4232	return 0;
4233}
4234static void gfx_v8_0_set_cpg_door_bell(struct amdgpu_device *adev, struct amdgpu_ring *ring)
4235{
4236	u32 tmp;
4237	/* no gfx doorbells on iceland */
4238	if (adev->asic_type == CHIP_TOPAZ)
4239		return;
4240
4241	tmp = RREG32(mmCP_RB_DOORBELL_CONTROL);
4242
4243	if (ring->use_doorbell) {
4244		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
4245				DOORBELL_OFFSET, ring->doorbell_index);
4246		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
4247						DOORBELL_HIT, 0);
4248		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
4249					    DOORBELL_EN, 1);
4250	} else {
4251		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, DOORBELL_EN, 0);
4252	}
4253
4254	WREG32(mmCP_RB_DOORBELL_CONTROL, tmp);
4255
4256	if (adev->flags & AMD_IS_APU)
4257		return;
4258
4259	tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
4260					DOORBELL_RANGE_LOWER,
4261					adev->doorbell_index.gfx_ring0);
4262	WREG32(mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
4263
4264	WREG32(mmCP_RB_DOORBELL_RANGE_UPPER,
4265		CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK);
4266}
4267
4268static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
4269{
4270	struct amdgpu_ring *ring;
4271	u32 tmp;
4272	u32 rb_bufsz;
4273	u64 rb_addr, rptr_addr, wptr_gpu_addr;
 
4274
4275	/* Set the write pointer delay */
4276	WREG32(mmCP_RB_WPTR_DELAY, 0);
4277
4278	/* set the RB to use vmid 0 */
4279	WREG32(mmCP_RB_VMID, 0);
4280
4281	/* Set ring buffer size */
4282	ring = &adev->gfx.gfx_ring[0];
4283	rb_bufsz = order_base_2(ring->ring_size / 8);
4284	tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz);
4285	tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2);
4286	tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, MTYPE, 3);
4287	tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, MIN_IB_AVAILSZ, 1);
4288#ifdef __BIG_ENDIAN
4289	tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, BUF_SWAP, 1);
4290#endif
4291	WREG32(mmCP_RB0_CNTL, tmp);
4292
4293	/* Initialize the ring buffer's read and write pointers */
4294	WREG32(mmCP_RB0_CNTL, tmp | CP_RB0_CNTL__RB_RPTR_WR_ENA_MASK);
4295	ring->wptr = 0;
4296	WREG32(mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
4297
4298	/* set the wb address wether it's enabled or not */
4299	rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
4300	WREG32(mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
4301	WREG32(mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
4302
4303	wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
4304	WREG32(mmCP_RB_WPTR_POLL_ADDR_LO, lower_32_bits(wptr_gpu_addr));
4305	WREG32(mmCP_RB_WPTR_POLL_ADDR_HI, upper_32_bits(wptr_gpu_addr));
4306	mdelay(1);
4307	WREG32(mmCP_RB0_CNTL, tmp);
4308
4309	rb_addr = ring->gpu_addr >> 8;
4310	WREG32(mmCP_RB0_BASE, rb_addr);
4311	WREG32(mmCP_RB0_BASE_HI, upper_32_bits(rb_addr));
4312
4313	gfx_v8_0_set_cpg_door_bell(adev, ring);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4314	/* start the ring */
4315	amdgpu_ring_clear_ring(ring);
4316	gfx_v8_0_cp_gfx_start(adev);
4317	ring->sched.ready = true;
 
 
 
 
 
4318
4319	return 0;
4320}
4321
4322static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
4323{
 
 
4324	if (enable) {
4325		WREG32(mmCP_MEC_CNTL, 0);
4326	} else {
4327		WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
4328		adev->gfx.kiq.ring.sched.ready = false;
 
4329	}
4330	udelay(50);
4331}
4332
4333/* KIQ functions */
4334static void gfx_v8_0_kiq_setting(struct amdgpu_ring *ring)
4335{
4336	uint32_t tmp;
4337	struct amdgpu_device *adev = ring->adev;
 
 
 
 
4338
4339	/* tell RLC which is KIQ queue */
4340	tmp = RREG32(mmRLC_CP_SCHEDULERS);
4341	tmp &= 0xffffff00;
4342	tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
4343	WREG32(mmRLC_CP_SCHEDULERS, tmp);
4344	tmp |= 0x80;
4345	WREG32(mmRLC_CP_SCHEDULERS, tmp);
4346}
4347
4348static int gfx_v8_0_kiq_kcq_enable(struct amdgpu_device *adev)
4349{
4350	struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
4351	uint64_t queue_mask = 0;
4352	int r, i;
4353
4354	for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
4355		if (!test_bit(i, adev->gfx.mec.queue_bitmap))
4356			continue;
4357
4358		/* This situation may be hit in the future if a new HW
4359		 * generation exposes more than 64 queues. If so, the
4360		 * definition of queue_mask needs updating */
4361		if (WARN_ON(i >= (sizeof(queue_mask)*8))) {
4362			DRM_ERROR("Invalid KCQ enabled: %d\n", i);
4363			break;
4364		}
4365
4366		queue_mask |= (1ull << i);
4367	}
 
4368
4369	r = amdgpu_ring_alloc(kiq_ring, (8 * adev->gfx.num_compute_rings) + 8);
4370	if (r) {
4371		DRM_ERROR("Failed to lock KIQ (%d).\n", r);
4372		return r;
4373	}
4374	/* set resources */
4375	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
4376	amdgpu_ring_write(kiq_ring, 0);	/* vmid_mask:0 queue_type:0 (KIQ) */
4377	amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask));	/* queue mask lo */
4378	amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask));	/* queue mask hi */
4379	amdgpu_ring_write(kiq_ring, 0);	/* gws mask lo */
4380	amdgpu_ring_write(kiq_ring, 0);	/* gws mask hi */
4381	amdgpu_ring_write(kiq_ring, 0);	/* oac mask */
4382	amdgpu_ring_write(kiq_ring, 0);	/* gds heap base:0, gds heap size:0 */
4383	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4384		struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
4385		uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
4386		uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
4387
4388		/* map queues */
4389		amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
4390		/* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
4391		amdgpu_ring_write(kiq_ring,
4392				  PACKET3_MAP_QUEUES_NUM_QUEUES(1));
4393		amdgpu_ring_write(kiq_ring,
4394				  PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index) |
4395				  PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
4396				  PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
4397				  PACKET3_MAP_QUEUES_ME(ring->me == 1 ? 0 : 1)); /* doorbell */
4398		amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
4399		amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
4400		amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
4401		amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
4402	}
4403
4404	amdgpu_ring_commit(kiq_ring);
4405
4406	return 0;
4407}
4408
4409static int gfx_v8_0_deactivate_hqd(struct amdgpu_device *adev, u32 req)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4410{
4411	int i, r = 0;
4412
4413	if (RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK) {
4414		WREG32_FIELD(CP_HQD_DEQUEUE_REQUEST, DEQUEUE_REQ, req);
4415		for (i = 0; i < adev->usec_timeout; i++) {
4416			if (!(RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK))
4417				break;
4418			udelay(1);
4419		}
4420		if (i == adev->usec_timeout)
4421			r = -ETIMEDOUT;
4422	}
4423	WREG32(mmCP_HQD_DEQUEUE_REQUEST, 0);
4424	WREG32(mmCP_HQD_PQ_RPTR, 0);
4425	WREG32(mmCP_HQD_PQ_WPTR, 0);
4426
4427	return r;
4428}
 
 
4429
4430static void gfx_v8_0_mqd_set_priority(struct amdgpu_ring *ring, struct vi_mqd *mqd)
4431{
4432	struct amdgpu_device *adev = ring->adev;
4433
4434	if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
4435		if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue)) {
4436			mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
4437			mqd->cp_hqd_queue_priority =
4438				AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
4439		}
4440	}
4441}
4442
4443static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring)
4444{
4445	struct amdgpu_device *adev = ring->adev;
4446	struct vi_mqd *mqd = ring->mqd_ptr;
4447	uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
4448	uint32_t tmp;
4449
4450	mqd->header = 0xC0310800;
4451	mqd->compute_pipelinestat_enable = 0x00000001;
4452	mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
4453	mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
4454	mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
4455	mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
4456	mqd->compute_misc_reserved = 0x00000003;
4457	mqd->dynamic_cu_mask_addr_lo = lower_32_bits(ring->mqd_gpu_addr
4458						     + offsetof(struct vi_mqd_allocation, dynamic_cu_mask));
4459	mqd->dynamic_cu_mask_addr_hi = upper_32_bits(ring->mqd_gpu_addr
4460						     + offsetof(struct vi_mqd_allocation, dynamic_cu_mask));
4461	eop_base_addr = ring->eop_gpu_addr >> 8;
4462	mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
4463	mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
4464
4465	/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
4466	tmp = RREG32(mmCP_HQD_EOP_CONTROL);
4467	tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
4468			(order_base_2(GFX8_MEC_HPD_SIZE / 4) - 1));
4469
4470	mqd->cp_hqd_eop_control = tmp;
4471
4472	/* enable doorbell? */
4473	tmp = REG_SET_FIELD(RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL),
4474			    CP_HQD_PQ_DOORBELL_CONTROL,
4475			    DOORBELL_EN,
4476			    ring->use_doorbell ? 1 : 0);
4477
4478	mqd->cp_hqd_pq_doorbell_control = tmp;
4479
4480	/* set the pointer to the MQD */
4481	mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
4482	mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
4483
4484	/* set MQD vmid to 0 */
4485	tmp = RREG32(mmCP_MQD_CONTROL);
4486	tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
4487	mqd->cp_mqd_control = tmp;
4488
4489	/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
4490	hqd_gpu_addr = ring->gpu_addr >> 8;
4491	mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
4492	mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
4493
4494	/* set up the HQD, this is similar to CP_RB0_CNTL */
4495	tmp = RREG32(mmCP_HQD_PQ_CONTROL);
4496	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
4497			    (order_base_2(ring->ring_size / 4) - 1));
4498	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
4499			((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
4500#ifdef __BIG_ENDIAN
4501	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
4502#endif
4503	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
4504	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0);
4505	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
4506	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
4507	mqd->cp_hqd_pq_control = tmp;
4508
4509	/* set the wb address whether it's enabled or not */
4510	wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
4511	mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
4512	mqd->cp_hqd_pq_rptr_report_addr_hi =
4513		upper_32_bits(wb_gpu_addr) & 0xffff;
4514
4515	/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
4516	wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
4517	mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
4518	mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
4519
4520	tmp = 0;
4521	/* enable the doorbell if requested */
4522	if (ring->use_doorbell) {
4523		tmp = RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
4524		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4525				DOORBELL_OFFSET, ring->doorbell_index);
4526
4527		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4528					 DOORBELL_EN, 1);
4529		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4530					 DOORBELL_SOURCE, 0);
4531		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4532					 DOORBELL_HIT, 0);
 
 
 
 
 
 
 
 
 
 
4533	}
 
 
4534
4535	mqd->cp_hqd_pq_doorbell_control = tmp;
 
 
4536
4537	/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
4538	ring->wptr = 0;
4539	mqd->cp_hqd_pq_wptr = ring->wptr;
4540	mqd->cp_hqd_pq_rptr = RREG32(mmCP_HQD_PQ_RPTR);
 
 
 
 
 
 
 
4541
4542	/* set the vmid for the queue */
4543	mqd->cp_hqd_vmid = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4544
4545	tmp = RREG32(mmCP_HQD_PERSISTENT_STATE);
4546	tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
4547	mqd->cp_hqd_persistent_state = tmp;
4548
4549	/* set MTYPE */
4550	tmp = RREG32(mmCP_HQD_IB_CONTROL);
4551	tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
4552	tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MTYPE, 3);
4553	mqd->cp_hqd_ib_control = tmp;
4554
4555	tmp = RREG32(mmCP_HQD_IQ_TIMER);
4556	tmp = REG_SET_FIELD(tmp, CP_HQD_IQ_TIMER, MTYPE, 3);
4557	mqd->cp_hqd_iq_timer = tmp;
4558
4559	tmp = RREG32(mmCP_HQD_CTX_SAVE_CONTROL);
4560	tmp = REG_SET_FIELD(tmp, CP_HQD_CTX_SAVE_CONTROL, MTYPE, 3);
4561	mqd->cp_hqd_ctx_save_control = tmp;
4562
4563	/* defaults */
4564	mqd->cp_hqd_eop_rptr = RREG32(mmCP_HQD_EOP_RPTR);
4565	mqd->cp_hqd_eop_wptr = RREG32(mmCP_HQD_EOP_WPTR);
4566	mqd->cp_hqd_ctx_save_base_addr_lo = RREG32(mmCP_HQD_CTX_SAVE_BASE_ADDR_LO);
4567	mqd->cp_hqd_ctx_save_base_addr_hi = RREG32(mmCP_HQD_CTX_SAVE_BASE_ADDR_HI);
4568	mqd->cp_hqd_cntl_stack_offset = RREG32(mmCP_HQD_CNTL_STACK_OFFSET);
4569	mqd->cp_hqd_cntl_stack_size = RREG32(mmCP_HQD_CNTL_STACK_SIZE);
4570	mqd->cp_hqd_wg_state_offset = RREG32(mmCP_HQD_WG_STATE_OFFSET);
4571	mqd->cp_hqd_ctx_save_size = RREG32(mmCP_HQD_CTX_SAVE_SIZE);
4572	mqd->cp_hqd_eop_done_events = RREG32(mmCP_HQD_EOP_EVENTS);
4573	mqd->cp_hqd_error = RREG32(mmCP_HQD_ERROR);
4574	mqd->cp_hqd_eop_wptr_mem = RREG32(mmCP_HQD_EOP_WPTR_MEM);
4575	mqd->cp_hqd_eop_dones = RREG32(mmCP_HQD_EOP_DONES);
4576
4577	/* set static priority for a queue/ring */
4578	gfx_v8_0_mqd_set_priority(ring, mqd);
4579	mqd->cp_hqd_quantum = RREG32(mmCP_HQD_QUANTUM);
4580
4581	/* map_queues packet doesn't need activate the queue,
4582	 * so only kiq need set this field.
4583	 */
4584	if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
4585		mqd->cp_hqd_active = 1;
 
 
 
4586
4587	return 0;
4588}
 
 
 
 
 
 
 
 
 
 
 
 
4589
4590static int gfx_v8_0_mqd_commit(struct amdgpu_device *adev,
4591			struct vi_mqd *mqd)
4592{
4593	uint32_t mqd_reg;
4594	uint32_t *mqd_data;
 
 
 
 
4595
4596	/* HQD registers extend from mmCP_MQD_BASE_ADDR to mmCP_HQD_ERROR */
4597	mqd_data = &mqd->cp_mqd_base_addr_lo;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4598
4599	/* disable wptr polling */
4600	WREG32_FIELD(CP_PQ_WPTR_POLL_CNTL, EN, 0);
4601
4602	/* program all HQD registers */
4603	for (mqd_reg = mmCP_HQD_VMID; mqd_reg <= mmCP_HQD_EOP_CONTROL; mqd_reg++)
4604		WREG32(mqd_reg, mqd_data[mqd_reg - mmCP_MQD_BASE_ADDR]);
4605
4606	/* Tonga errata: EOP RPTR/WPTR should be left unmodified.
4607	 * This is safe since EOP RPTR==WPTR for any inactive HQD
4608	 * on ASICs that do not support context-save.
4609	 * EOP writes/reads can start anywhere in the ring.
4610	 */
4611	if (adev->asic_type != CHIP_TONGA) {
4612		WREG32(mmCP_HQD_EOP_RPTR, mqd->cp_hqd_eop_rptr);
4613		WREG32(mmCP_HQD_EOP_WPTR, mqd->cp_hqd_eop_wptr);
4614		WREG32(mmCP_HQD_EOP_WPTR_MEM, mqd->cp_hqd_eop_wptr_mem);
4615	}
4616
4617	for (mqd_reg = mmCP_HQD_EOP_EVENTS; mqd_reg <= mmCP_HQD_ERROR; mqd_reg++)
4618		WREG32(mqd_reg, mqd_data[mqd_reg - mmCP_MQD_BASE_ADDR]);
4619
4620	/* activate the HQD */
4621	for (mqd_reg = mmCP_MQD_BASE_ADDR; mqd_reg <= mmCP_HQD_ACTIVE; mqd_reg++)
4622		WREG32(mqd_reg, mqd_data[mqd_reg - mmCP_MQD_BASE_ADDR]);
4623
4624	return 0;
4625}
4626
4627static int gfx_v8_0_kiq_init_queue(struct amdgpu_ring *ring)
4628{
4629	struct amdgpu_device *adev = ring->adev;
4630	struct vi_mqd *mqd = ring->mqd_ptr;
4631	int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS;
4632
4633	gfx_v8_0_kiq_setting(ring);
4634
4635	if (adev->in_gpu_reset) { /* for GPU_RESET case */
4636		/* reset MQD to a clean status */
4637		if (adev->gfx.mec.mqd_backup[mqd_idx])
4638			memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation));
4639
4640		/* reset ring buffer */
4641		ring->wptr = 0;
4642		amdgpu_ring_clear_ring(ring);
4643		mutex_lock(&adev->srbm_mutex);
4644		vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
4645		gfx_v8_0_mqd_commit(adev, mqd);
4646		vi_srbm_select(adev, 0, 0, 0, 0);
4647		mutex_unlock(&adev->srbm_mutex);
4648	} else {
4649		memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation));
4650		((struct vi_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
4651		((struct vi_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
4652		mutex_lock(&adev->srbm_mutex);
4653		vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
4654		gfx_v8_0_mqd_init(ring);
4655		gfx_v8_0_mqd_commit(adev, mqd);
4656		vi_srbm_select(adev, 0, 0, 0, 0);
4657		mutex_unlock(&adev->srbm_mutex);
 
4658
4659		if (adev->gfx.mec.mqd_backup[mqd_idx])
4660			memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct vi_mqd_allocation));
4661	}
4662
4663	return 0;
4664}
4665
4666static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
4667{
4668	struct amdgpu_device *adev = ring->adev;
4669	struct vi_mqd *mqd = ring->mqd_ptr;
4670	int mqd_idx = ring - &adev->gfx.compute_ring[0];
4671
4672	if (!adev->in_gpu_reset && !adev->in_suspend) {
4673		memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation));
4674		((struct vi_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
4675		((struct vi_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
4676		mutex_lock(&adev->srbm_mutex);
4677		vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
4678		gfx_v8_0_mqd_init(ring);
4679		vi_srbm_select(adev, 0, 0, 0, 0);
4680		mutex_unlock(&adev->srbm_mutex);
4681
4682		if (adev->gfx.mec.mqd_backup[mqd_idx])
4683			memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct vi_mqd_allocation));
4684	} else if (adev->in_gpu_reset) { /* for GPU_RESET case */
4685		/* reset MQD to a clean status */
4686		if (adev->gfx.mec.mqd_backup[mqd_idx])
4687			memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation));
4688		/* reset ring buffer */
4689		ring->wptr = 0;
4690		amdgpu_ring_clear_ring(ring);
4691	} else {
4692		amdgpu_ring_clear_ring(ring);
4693	}
4694	return 0;
4695}
4696
4697static void gfx_v8_0_set_mec_doorbell_range(struct amdgpu_device *adev)
4698{
4699	if (adev->asic_type > CHIP_TONGA) {
4700		WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER, adev->doorbell_index.kiq << 2);
4701		WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER, adev->doorbell_index.mec_ring7 << 2);
4702	}
4703	/* enable doorbells */
4704	WREG32_FIELD(CP_PQ_STATUS, DOORBELL_ENABLE, 1);
4705}
4706
4707static int gfx_v8_0_kiq_resume(struct amdgpu_device *adev)
4708{
4709	struct amdgpu_ring *ring;
4710	int r;
4711
4712	ring = &adev->gfx.kiq.ring;
4713
4714	r = amdgpu_bo_reserve(ring->mqd_obj, false);
4715	if (unlikely(r != 0))
4716		return r;
4717
4718	r = amdgpu_bo_kmap(ring->mqd_obj, &ring->mqd_ptr);
4719	if (unlikely(r != 0))
4720		return r;
4721
4722	gfx_v8_0_kiq_init_queue(ring);
4723	amdgpu_bo_kunmap(ring->mqd_obj);
4724	ring->mqd_ptr = NULL;
4725	amdgpu_bo_unreserve(ring->mqd_obj);
4726	ring->sched.ready = true;
4727	return 0;
4728}
4729
4730static int gfx_v8_0_kcq_resume(struct amdgpu_device *adev)
4731{
4732	struct amdgpu_ring *ring = NULL;
4733	int r = 0, i;
4734
4735	gfx_v8_0_cp_compute_enable(adev, true);
4736
4737	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4738		ring = &adev->gfx.compute_ring[i];
4739
4740		r = amdgpu_bo_reserve(ring->mqd_obj, false);
4741		if (unlikely(r != 0))
4742			goto done;
4743		r = amdgpu_bo_kmap(ring->mqd_obj, &ring->mqd_ptr);
4744		if (!r) {
4745			r = gfx_v8_0_kcq_init_queue(ring);
4746			amdgpu_bo_kunmap(ring->mqd_obj);
4747			ring->mqd_ptr = NULL;
4748		}
4749		amdgpu_bo_unreserve(ring->mqd_obj);
4750		if (r)
4751			goto done;
4752	}
4753
4754	gfx_v8_0_set_mec_doorbell_range(adev);
4755
4756	r = gfx_v8_0_kiq_kcq_enable(adev);
4757	if (r)
4758		goto done;
4759
4760done:
4761	return r;
4762}
4763
4764static int gfx_v8_0_cp_test_all_rings(struct amdgpu_device *adev)
4765{
4766	int r, i;
4767	struct amdgpu_ring *ring;
4768
4769	/* collect all the ring_tests here, gfx, kiq, compute */
4770	ring = &adev->gfx.gfx_ring[0];
4771	r = amdgpu_ring_test_helper(ring);
4772	if (r)
4773		return r;
4774
4775	ring = &adev->gfx.kiq.ring;
4776	r = amdgpu_ring_test_helper(ring);
4777	if (r)
4778		return r;
4779
4780	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4781		ring = &adev->gfx.compute_ring[i];
4782		amdgpu_ring_test_helper(ring);
4783	}
4784
4785	return 0;
4786}
4787
4788static int gfx_v8_0_cp_resume(struct amdgpu_device *adev)
4789{
4790	int r;
4791
4792	if (!(adev->flags & AMD_IS_APU))
4793		gfx_v8_0_enable_gui_idle_interrupt(adev, false);
4794
4795	r = gfx_v8_0_kiq_resume(adev);
4796	if (r)
4797		return r;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4798
4799	r = gfx_v8_0_cp_gfx_resume(adev);
4800	if (r)
4801		return r;
4802
4803	r = gfx_v8_0_kcq_resume(adev);
4804	if (r)
4805		return r;
4806
4807	r = gfx_v8_0_cp_test_all_rings(adev);
4808	if (r)
4809		return r;
4810
4811	gfx_v8_0_enable_gui_idle_interrupt(adev, true);
4812
4813	return 0;
4814}
4815
4816static void gfx_v8_0_cp_enable(struct amdgpu_device *adev, bool enable)
4817{
4818	gfx_v8_0_cp_gfx_enable(adev, enable);
4819	gfx_v8_0_cp_compute_enable(adev, enable);
4820}
4821
4822static int gfx_v8_0_hw_init(void *handle)
4823{
4824	int r;
4825	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4826
4827	gfx_v8_0_init_golden_registers(adev);
4828	gfx_v8_0_constants_init(adev);
4829
4830	r = adev->gfx.rlc.funcs->resume(adev);
 
 
4831	if (r)
4832		return r;
4833
4834	r = gfx_v8_0_cp_resume(adev);
 
 
4835
4836	return r;
4837}
4838
4839static int gfx_v8_0_kcq_disable(struct amdgpu_device *adev)
4840{
4841	int r, i;
4842	struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
4843
4844	r = amdgpu_ring_alloc(kiq_ring, 6 * adev->gfx.num_compute_rings);
4845	if (r)
4846		DRM_ERROR("Failed to lock KIQ (%d).\n", r);
 
 
4847
4848	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4849		struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
4850
4851		amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
4852		amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
4853						PACKET3_UNMAP_QUEUES_ACTION(1) | /* RESET_QUEUES */
4854						PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
4855						PACKET3_UNMAP_QUEUES_ENGINE_SEL(0) |
4856						PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
4857		amdgpu_ring_write(kiq_ring, PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
4858		amdgpu_ring_write(kiq_ring, 0);
4859		amdgpu_ring_write(kiq_ring, 0);
4860		amdgpu_ring_write(kiq_ring, 0);
4861	}
4862	r = amdgpu_ring_test_helper(kiq_ring);
4863	if (r)
4864		DRM_ERROR("KCQ disable failed\n");
4865
4866	return r;
4867}
4868
4869static bool gfx_v8_0_is_idle(void *handle)
4870{
4871	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4872
4873	if (REG_GET_FIELD(RREG32(mmGRBM_STATUS), GRBM_STATUS, GUI_ACTIVE)
4874		|| RREG32(mmGRBM_STATUS2) != 0x8)
4875		return false;
4876	else
4877		return true;
4878}
4879
4880static bool gfx_v8_0_rlc_is_idle(void *handle)
4881{
4882	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4883
4884	if (RREG32(mmGRBM_STATUS2) != 0x8)
4885		return false;
4886	else
4887		return true;
4888}
4889
4890static int gfx_v8_0_wait_for_rlc_idle(void *handle)
4891{
4892	unsigned int i;
 
4893	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4894
4895	for (i = 0; i < adev->usec_timeout; i++) {
4896		if (gfx_v8_0_rlc_is_idle(handle))
 
 
 
4897			return 0;
4898
4899		udelay(1);
4900	}
4901	return -ETIMEDOUT;
4902}
4903
4904static int gfx_v8_0_wait_for_idle(void *handle)
4905{
4906	unsigned int i;
4907	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4908
4909	for (i = 0; i < adev->usec_timeout; i++) {
4910		if (gfx_v8_0_is_idle(handle))
4911			return 0;
4912
4913		udelay(1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4914	}
4915	return -ETIMEDOUT;
4916}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4917
4918static int gfx_v8_0_hw_fini(void *handle)
4919{
4920	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4921
4922	amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
4923	amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
4924
4925	amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
4926
4927	amdgpu_irq_put(adev, &adev->gfx.sq_irq, 0);
4928
4929	/* disable KCQ to avoid CPC touch memory not valid anymore */
4930	gfx_v8_0_kcq_disable(adev);
4931
4932	if (amdgpu_sriov_vf(adev)) {
4933		pr_debug("For SRIOV client, shouldn't do anything.\n");
4934		return 0;
4935	}
4936	amdgpu_gfx_rlc_enter_safe_mode(adev);
4937	if (!gfx_v8_0_wait_for_idle(adev))
4938		gfx_v8_0_cp_enable(adev, false);
4939	else
4940		pr_err("cp is busy, skip halt cp\n");
4941	if (!gfx_v8_0_wait_for_rlc_idle(adev))
4942		adev->gfx.rlc.funcs->stop(adev);
4943	else
4944		pr_err("rlc is busy, skip halt rlc\n");
4945	amdgpu_gfx_rlc_exit_safe_mode(adev);
4946
4947	return 0;
4948}
4949
4950static int gfx_v8_0_suspend(void *handle)
4951{
4952	return gfx_v8_0_hw_fini(handle);
4953}
4954
4955static int gfx_v8_0_resume(void *handle)
4956{
4957	return gfx_v8_0_hw_init(handle);
4958}
4959
4960static bool gfx_v8_0_check_soft_reset(void *handle)
4961{
4962	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4963	u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
4964	u32 tmp;
 
4965
4966	/* GRBM_STATUS */
4967	tmp = RREG32(mmGRBM_STATUS);
4968	if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
4969		   GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
4970		   GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
4971		   GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
4972		   GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
4973		   GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK |
4974		   GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
4975		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4976						GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
4977		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4978						GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
 
 
 
 
 
4979		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
4980						SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1);
4981	}
4982
4983	/* GRBM_STATUS2 */
4984	tmp = RREG32(mmGRBM_STATUS2);
4985	if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
4986		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4987						GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
4988
4989	if (REG_GET_FIELD(tmp, GRBM_STATUS2, CPF_BUSY) ||
4990	    REG_GET_FIELD(tmp, GRBM_STATUS2, CPC_BUSY) ||
4991	    REG_GET_FIELD(tmp, GRBM_STATUS2, CPG_BUSY)) {
4992		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4993						SOFT_RESET_CPF, 1);
4994		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4995						SOFT_RESET_CPC, 1);
4996		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4997						SOFT_RESET_CPG, 1);
4998		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET,
4999						SOFT_RESET_GRBM, 1);
5000	}
5001
5002	/* SRBM_STATUS */
5003	tmp = RREG32(mmSRBM_STATUS);
5004	if (REG_GET_FIELD(tmp, SRBM_STATUS, GRBM_RQ_PENDING))
5005		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
5006						SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1);
5007	if (REG_GET_FIELD(tmp, SRBM_STATUS, SEM_BUSY))
5008		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
5009						SRBM_SOFT_RESET, SOFT_RESET_SEM, 1);
5010
5011	if (grbm_soft_reset || srbm_soft_reset) {
5012		adev->gfx.grbm_soft_reset = grbm_soft_reset;
5013		adev->gfx.srbm_soft_reset = srbm_soft_reset;
5014		return true;
5015	} else {
5016		adev->gfx.grbm_soft_reset = 0;
5017		adev->gfx.srbm_soft_reset = 0;
5018		return false;
5019	}
5020}
5021
5022static int gfx_v8_0_pre_soft_reset(void *handle)
5023{
5024	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5025	u32 grbm_soft_reset = 0;
5026
5027	if ((!adev->gfx.grbm_soft_reset) &&
5028	    (!adev->gfx.srbm_soft_reset))
5029		return 0;
5030
5031	grbm_soft_reset = adev->gfx.grbm_soft_reset;
5032
5033	/* stop the rlc */
5034	adev->gfx.rlc.funcs->stop(adev);
5035
5036	if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
5037	    REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX))
5038		/* Disable GFX parsing/prefetching */
5039		gfx_v8_0_cp_gfx_enable(adev, false);
5040
5041	if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
5042	    REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPF) ||
5043	    REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPC) ||
5044	    REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPG)) {
5045		int i;
5046
5047		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
5048			struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
5049
5050			mutex_lock(&adev->srbm_mutex);
5051			vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
5052			gfx_v8_0_deactivate_hqd(adev, 2);
5053			vi_srbm_select(adev, 0, 0, 0, 0);
5054			mutex_unlock(&adev->srbm_mutex);
5055		}
5056		/* Disable MEC parsing/prefetching */
5057		gfx_v8_0_cp_compute_enable(adev, false);
5058	}
5059
5060       return 0;
5061}
 
 
 
 
 
5062
5063static int gfx_v8_0_soft_reset(void *handle)
5064{
5065	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5066	u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
5067	u32 tmp;
5068
5069	if ((!adev->gfx.grbm_soft_reset) &&
5070	    (!adev->gfx.srbm_soft_reset))
5071		return 0;
 
 
 
5072
5073	grbm_soft_reset = adev->gfx.grbm_soft_reset;
5074	srbm_soft_reset = adev->gfx.srbm_soft_reset;
5075
5076	if (grbm_soft_reset || srbm_soft_reset) {
5077		tmp = RREG32(mmGMCON_DEBUG);
5078		tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_STALL, 1);
5079		tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_CLEAR, 1);
5080		WREG32(mmGMCON_DEBUG, tmp);
5081		udelay(50);
5082	}
5083
5084	if (grbm_soft_reset) {
5085		tmp = RREG32(mmGRBM_SOFT_RESET);
5086		tmp |= grbm_soft_reset;
5087		dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
5088		WREG32(mmGRBM_SOFT_RESET, tmp);
5089		tmp = RREG32(mmGRBM_SOFT_RESET);
5090
5091		udelay(50);
5092
5093		tmp &= ~grbm_soft_reset;
5094		WREG32(mmGRBM_SOFT_RESET, tmp);
5095		tmp = RREG32(mmGRBM_SOFT_RESET);
5096	}
5097
5098	if (srbm_soft_reset) {
5099		tmp = RREG32(mmSRBM_SOFT_RESET);
5100		tmp |= srbm_soft_reset;
5101		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
5102		WREG32(mmSRBM_SOFT_RESET, tmp);
5103		tmp = RREG32(mmSRBM_SOFT_RESET);
 
 
5104
 
5105		udelay(50);
5106
5107		tmp &= ~srbm_soft_reset;
5108		WREG32(mmSRBM_SOFT_RESET, tmp);
5109		tmp = RREG32(mmSRBM_SOFT_RESET);
5110	}
5111
5112	if (grbm_soft_reset || srbm_soft_reset) {
5113		tmp = RREG32(mmGMCON_DEBUG);
5114		tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_STALL, 0);
5115		tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_CLEAR, 0);
5116		WREG32(mmGMCON_DEBUG, tmp);
5117	}
5118
5119	/* Wait a little for things to settle down */
5120	udelay(50);
5121
5122	return 0;
5123}
5124
5125static int gfx_v8_0_post_soft_reset(void *handle)
5126{
5127	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5128	u32 grbm_soft_reset = 0;
5129
5130	if ((!adev->gfx.grbm_soft_reset) &&
5131	    (!adev->gfx.srbm_soft_reset))
5132		return 0;
5133
5134	grbm_soft_reset = adev->gfx.grbm_soft_reset;
5135
5136	if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
5137	    REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPF) ||
5138	    REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPC) ||
5139	    REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPG)) {
5140		int i;
5141
5142		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
5143			struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
5144
5145			mutex_lock(&adev->srbm_mutex);
5146			vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
5147			gfx_v8_0_deactivate_hqd(adev, 2);
5148			vi_srbm_select(adev, 0, 0, 0, 0);
5149			mutex_unlock(&adev->srbm_mutex);
5150		}
5151		gfx_v8_0_kiq_resume(adev);
5152		gfx_v8_0_kcq_resume(adev);
5153	}
5154
5155	if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
5156	    REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX))
5157		gfx_v8_0_cp_gfx_resume(adev);
5158
5159	gfx_v8_0_cp_test_all_rings(adev);
5160
5161	adev->gfx.rlc.funcs->start(adev);
5162
5163	return 0;
5164}
5165
5166/**
5167 * gfx_v8_0_get_gpu_clock_counter - return GPU clock counter snapshot
5168 *
5169 * @adev: amdgpu_device pointer
5170 *
5171 * Fetches a GPU clock counter snapshot.
5172 * Returns the 64 bit clock counter snapshot.
5173 */
5174static uint64_t gfx_v8_0_get_gpu_clock_counter(struct amdgpu_device *adev)
5175{
5176	uint64_t clock;
5177
5178	mutex_lock(&adev->gfx.gpu_clock_mutex);
5179	WREG32(mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
5180	clock = (uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_LSB) |
5181		((uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
5182	mutex_unlock(&adev->gfx.gpu_clock_mutex);
5183	return clock;
5184}
5185
5186static void gfx_v8_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
5187					  uint32_t vmid,
5188					  uint32_t gds_base, uint32_t gds_size,
5189					  uint32_t gws_base, uint32_t gws_size,
5190					  uint32_t oa_base, uint32_t oa_size)
5191{
 
 
 
 
 
 
 
 
 
5192	/* GDS Base */
5193	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5194	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5195				WRITE_DATA_DST_SEL(0)));
5196	amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_base);
5197	amdgpu_ring_write(ring, 0);
5198	amdgpu_ring_write(ring, gds_base);
5199
5200	/* GDS Size */
5201	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5202	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5203				WRITE_DATA_DST_SEL(0)));
5204	amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_size);
5205	amdgpu_ring_write(ring, 0);
5206	amdgpu_ring_write(ring, gds_size);
5207
5208	/* GWS */
5209	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5210	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5211				WRITE_DATA_DST_SEL(0)));
5212	amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].gws);
5213	amdgpu_ring_write(ring, 0);
5214	amdgpu_ring_write(ring, gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
5215
5216	/* OA */
5217	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5218	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5219				WRITE_DATA_DST_SEL(0)));
5220	amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].oa);
5221	amdgpu_ring_write(ring, 0);
5222	amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));
5223}
5224
5225static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
5226{
5227	WREG32(mmSQ_IND_INDEX,
5228		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
5229		(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
5230		(address << SQ_IND_INDEX__INDEX__SHIFT) |
5231		(SQ_IND_INDEX__FORCE_READ_MASK));
5232	return RREG32(mmSQ_IND_DATA);
5233}
5234
5235static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
5236			   uint32_t wave, uint32_t thread,
5237			   uint32_t regno, uint32_t num, uint32_t *out)
5238{
5239	WREG32(mmSQ_IND_INDEX,
5240		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
5241		(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
5242		(regno << SQ_IND_INDEX__INDEX__SHIFT) |
5243		(thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
5244		(SQ_IND_INDEX__FORCE_READ_MASK) |
5245		(SQ_IND_INDEX__AUTO_INCR_MASK));
5246	while (num--)
5247		*(out++) = RREG32(mmSQ_IND_DATA);
5248}
5249
5250static void gfx_v8_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
5251{
5252	/* type 0 wave data */
5253	dst[(*no_fields)++] = 0;
5254	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
5255	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
5256	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
5257	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
5258	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
5259	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
5260	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
5261	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
5262	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
5263	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
5264	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
5265	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
5266	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_LO);
5267	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_HI);
5268	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_LO);
5269	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_HI);
5270	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
5271	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
5272}
5273
5274static void gfx_v8_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
5275				     uint32_t wave, uint32_t start,
5276				     uint32_t size, uint32_t *dst)
5277{
5278	wave_read_regs(
5279		adev, simd, wave, 0,
5280		start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
5281}
5282
5283
5284static const struct amdgpu_gfx_funcs gfx_v8_0_gfx_funcs = {
5285	.get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter,
5286	.select_se_sh = &gfx_v8_0_select_se_sh,
5287	.read_wave_data = &gfx_v8_0_read_wave_data,
5288	.read_wave_sgprs = &gfx_v8_0_read_wave_sgprs,
5289	.select_me_pipe_q = &gfx_v8_0_select_me_pipe_q
5290};
5291
5292static int gfx_v8_0_early_init(void *handle)
5293{
5294	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5295
5296	adev->gfx.num_gfx_rings = GFX8_NUM_GFX_RINGS;
5297	adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
5298	adev->gfx.funcs = &gfx_v8_0_gfx_funcs;
5299	gfx_v8_0_set_ring_funcs(adev);
5300	gfx_v8_0_set_irq_funcs(adev);
5301	gfx_v8_0_set_gds_init(adev);
5302	gfx_v8_0_set_rlc_funcs(adev);
5303
5304	return 0;
5305}
5306
5307static int gfx_v8_0_late_init(void *handle)
5308{
5309	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5310	int r;
5311
5312	r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
5313	if (r)
5314		return r;
5315
5316	r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
5317	if (r)
5318		return r;
5319
5320	/* requires IBs so do in late init after IB pool is initialized */
5321	r = gfx_v8_0_do_edc_gpr_workarounds(adev);
5322	if (r)
5323		return r;
5324
5325	r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
5326	if (r) {
5327		DRM_ERROR("amdgpu_irq_get() failed to get IRQ for EDC, r: %d.\n", r);
5328		return r;
5329	}
5330
5331	r = amdgpu_irq_get(adev, &adev->gfx.sq_irq, 0);
5332	if (r) {
5333		DRM_ERROR(
5334			"amdgpu_irq_get() failed to get IRQ for SQ, r: %d.\n",
5335			r);
5336		return r;
5337	}
5338
5339	return 0;
5340}
5341
5342static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
5343						       bool enable)
5344{
5345	if (((adev->asic_type == CHIP_POLARIS11) ||
5346	    (adev->asic_type == CHIP_POLARIS12) ||
5347	    (adev->asic_type == CHIP_VEGAM)) &&
5348	    adev->powerplay.pp_funcs->set_powergating_by_smu)
5349		/* Send msg to SMU via Powerplay */
5350		amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, enable);
5351
5352	WREG32_FIELD(RLC_PG_CNTL, STATIC_PER_CU_PG_ENABLE, enable ? 1 : 0);
5353}
5354
5355static void gfx_v8_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev,
5356							bool enable)
5357{
5358	WREG32_FIELD(RLC_PG_CNTL, DYN_PER_CU_PG_ENABLE, enable ? 1 : 0);
5359}
5360
5361static void polaris11_enable_gfx_quick_mg_power_gating(struct amdgpu_device *adev,
5362		bool enable)
5363{
5364	WREG32_FIELD(RLC_PG_CNTL, QUICK_PG_ENABLE, enable ? 1 : 0);
5365}
5366
5367static void cz_enable_gfx_cg_power_gating(struct amdgpu_device *adev,
5368					  bool enable)
5369{
5370	WREG32_FIELD(RLC_PG_CNTL, GFX_POWER_GATING_ENABLE, enable ? 1 : 0);
5371}
5372
5373static void cz_enable_gfx_pipeline_power_gating(struct amdgpu_device *adev,
5374						bool enable)
5375{
5376	WREG32_FIELD(RLC_PG_CNTL, GFX_PIPELINE_PG_ENABLE, enable ? 1 : 0);
5377
5378	/* Read any GFX register to wake up GFX. */
5379	if (!enable)
5380		RREG32(mmDB_RENDER_CONTROL);
5381}
5382
5383static void cz_update_gfx_cg_power_gating(struct amdgpu_device *adev,
5384					  bool enable)
5385{
5386	if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
5387		cz_enable_gfx_cg_power_gating(adev, true);
5388		if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
5389			cz_enable_gfx_pipeline_power_gating(adev, true);
5390	} else {
5391		cz_enable_gfx_cg_power_gating(adev, false);
5392		cz_enable_gfx_pipeline_power_gating(adev, false);
5393	}
5394}
5395
5396static int gfx_v8_0_set_powergating_state(void *handle,
5397					  enum amd_powergating_state state)
5398{
5399	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5400	bool enable = (state == AMD_PG_STATE_GATE);
5401
5402	if (amdgpu_sriov_vf(adev))
5403		return 0;
5404
5405	if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG |
5406				AMD_PG_SUPPORT_RLC_SMU_HS |
5407				AMD_PG_SUPPORT_CP |
5408				AMD_PG_SUPPORT_GFX_DMG))
5409		amdgpu_gfx_rlc_enter_safe_mode(adev);
5410	switch (adev->asic_type) {
5411	case CHIP_CARRIZO:
5412	case CHIP_STONEY:
5413
5414		if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
5415			cz_enable_sck_slow_down_on_power_up(adev, true);
5416			cz_enable_sck_slow_down_on_power_down(adev, true);
5417		} else {
5418			cz_enable_sck_slow_down_on_power_up(adev, false);
5419			cz_enable_sck_slow_down_on_power_down(adev, false);
5420		}
5421		if (adev->pg_flags & AMD_PG_SUPPORT_CP)
5422			cz_enable_cp_power_gating(adev, true);
5423		else
5424			cz_enable_cp_power_gating(adev, false);
5425
5426		cz_update_gfx_cg_power_gating(adev, enable);
5427
5428		if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
5429			gfx_v8_0_enable_gfx_static_mg_power_gating(adev, true);
5430		else
5431			gfx_v8_0_enable_gfx_static_mg_power_gating(adev, false);
5432
5433		if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable)
5434			gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, true);
5435		else
5436			gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, false);
5437		break;
5438	case CHIP_POLARIS11:
5439	case CHIP_POLARIS12:
5440	case CHIP_VEGAM:
5441		if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
5442			gfx_v8_0_enable_gfx_static_mg_power_gating(adev, true);
5443		else
5444			gfx_v8_0_enable_gfx_static_mg_power_gating(adev, false);
5445
5446		if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable)
5447			gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, true);
5448		else
5449			gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, false);
5450
5451		if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_QUICK_MG) && enable)
5452			polaris11_enable_gfx_quick_mg_power_gating(adev, true);
5453		else
5454			polaris11_enable_gfx_quick_mg_power_gating(adev, false);
5455		break;
5456	default:
5457		break;
5458	}
5459	if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG |
5460				AMD_PG_SUPPORT_RLC_SMU_HS |
5461				AMD_PG_SUPPORT_CP |
5462				AMD_PG_SUPPORT_GFX_DMG))
5463		amdgpu_gfx_rlc_exit_safe_mode(adev);
5464	return 0;
5465}
5466
5467static void gfx_v8_0_get_clockgating_state(void *handle, u32 *flags)
5468{
5469	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5470	int data;
5471
5472	if (amdgpu_sriov_vf(adev))
5473		*flags = 0;
5474
5475	/* AMD_CG_SUPPORT_GFX_MGCG */
5476	data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
5477	if (!(data & RLC_CGTT_MGCG_OVERRIDE__CPF_MASK))
5478		*flags |= AMD_CG_SUPPORT_GFX_MGCG;
5479
5480	/* AMD_CG_SUPPORT_GFX_CGLG */
5481	data = RREG32(mmRLC_CGCG_CGLS_CTRL);
5482	if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
5483		*flags |= AMD_CG_SUPPORT_GFX_CGCG;
5484
5485	/* AMD_CG_SUPPORT_GFX_CGLS */
5486	if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
5487		*flags |= AMD_CG_SUPPORT_GFX_CGLS;
5488
5489	/* AMD_CG_SUPPORT_GFX_CGTS */
5490	data = RREG32(mmCGTS_SM_CTRL_REG);
5491	if (!(data & CGTS_SM_CTRL_REG__OVERRIDE_MASK))
5492		*flags |= AMD_CG_SUPPORT_GFX_CGTS;
5493
5494	/* AMD_CG_SUPPORT_GFX_CGTS_LS */
5495	if (!(data & CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK))
5496		*flags |= AMD_CG_SUPPORT_GFX_CGTS_LS;
5497
5498	/* AMD_CG_SUPPORT_GFX_RLC_LS */
5499	data = RREG32(mmRLC_MEM_SLP_CNTL);
5500	if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
5501		*flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
5502
5503	/* AMD_CG_SUPPORT_GFX_CP_LS */
5504	data = RREG32(mmCP_MEM_SLP_CNTL);
5505	if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
5506		*flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
5507}
5508
5509static void gfx_v8_0_send_serdes_cmd(struct amdgpu_device *adev,
5510				     uint32_t reg_addr, uint32_t cmd)
5511{
5512	uint32_t data;
5513
5514	gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
5515
5516	WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
5517	WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
5518
5519	data = RREG32(mmRLC_SERDES_WR_CTRL);
5520	if (adev->asic_type == CHIP_STONEY)
5521		data &= ~(RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK |
5522			  RLC_SERDES_WR_CTRL__READ_COMMAND_MASK |
5523			  RLC_SERDES_WR_CTRL__P1_SELECT_MASK |
5524			  RLC_SERDES_WR_CTRL__P2_SELECT_MASK |
5525			  RLC_SERDES_WR_CTRL__RDDATA_RESET_MASK |
5526			  RLC_SERDES_WR_CTRL__POWER_DOWN_MASK |
5527			  RLC_SERDES_WR_CTRL__POWER_UP_MASK |
5528			  RLC_SERDES_WR_CTRL__SHORT_FORMAT_MASK |
5529			  RLC_SERDES_WR_CTRL__SRBM_OVERRIDE_MASK);
5530	else
5531		data &= ~(RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK |
5532			  RLC_SERDES_WR_CTRL__READ_COMMAND_MASK |
5533			  RLC_SERDES_WR_CTRL__P1_SELECT_MASK |
5534			  RLC_SERDES_WR_CTRL__P2_SELECT_MASK |
5535			  RLC_SERDES_WR_CTRL__RDDATA_RESET_MASK |
5536			  RLC_SERDES_WR_CTRL__POWER_DOWN_MASK |
5537			  RLC_SERDES_WR_CTRL__POWER_UP_MASK |
5538			  RLC_SERDES_WR_CTRL__SHORT_FORMAT_MASK |
5539			  RLC_SERDES_WR_CTRL__BPM_DATA_MASK |
5540			  RLC_SERDES_WR_CTRL__REG_ADDR_MASK |
5541			  RLC_SERDES_WR_CTRL__SRBM_OVERRIDE_MASK);
5542	data |= (RLC_SERDES_WR_CTRL__RSVD_BPM_ADDR_MASK |
5543		 (cmd << RLC_SERDES_WR_CTRL__BPM_DATA__SHIFT) |
5544		 (reg_addr << RLC_SERDES_WR_CTRL__REG_ADDR__SHIFT) |
5545		 (0xff << RLC_SERDES_WR_CTRL__BPM_ADDR__SHIFT));
5546
5547	WREG32(mmRLC_SERDES_WR_CTRL, data);
5548}
5549
5550#define MSG_ENTER_RLC_SAFE_MODE     1
5551#define MSG_EXIT_RLC_SAFE_MODE      0
5552#define RLC_GPR_REG2__REQ_MASK 0x00000001
5553#define RLC_GPR_REG2__REQ__SHIFT 0
5554#define RLC_GPR_REG2__MESSAGE__SHIFT 0x00000001
5555#define RLC_GPR_REG2__MESSAGE_MASK 0x0000001e
5556
5557static bool gfx_v8_0_is_rlc_enabled(struct amdgpu_device *adev)
5558{
5559	uint32_t rlc_setting;
5560
5561	rlc_setting = RREG32(mmRLC_CNTL);
5562	if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
5563		return false;
5564
5565	return true;
5566}
5567
5568static void gfx_v8_0_set_safe_mode(struct amdgpu_device *adev)
5569{
5570	uint32_t data;
5571	unsigned i;
5572	data = RREG32(mmRLC_CNTL);
5573	data |= RLC_SAFE_MODE__CMD_MASK;
5574	data &= ~RLC_SAFE_MODE__MESSAGE_MASK;
5575	data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
5576	WREG32(mmRLC_SAFE_MODE, data);
5577
5578	/* wait for RLC_SAFE_MODE */
5579	for (i = 0; i < adev->usec_timeout; i++) {
5580		if ((RREG32(mmRLC_GPM_STAT) &
5581		     (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
5582		      RLC_GPM_STAT__GFX_POWER_STATUS_MASK)) ==
5583		    (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
5584		     RLC_GPM_STAT__GFX_POWER_STATUS_MASK))
5585			break;
5586		udelay(1);
5587	}
5588	for (i = 0; i < adev->usec_timeout; i++) {
5589		if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
5590			break;
5591		udelay(1);
5592	}
5593}
5594
5595static void gfx_v8_0_unset_safe_mode(struct amdgpu_device *adev)
5596{
5597	uint32_t data;
5598	unsigned i;
5599
5600	data = RREG32(mmRLC_CNTL);
5601	data |= RLC_SAFE_MODE__CMD_MASK;
5602	data &= ~RLC_SAFE_MODE__MESSAGE_MASK;
5603	WREG32(mmRLC_SAFE_MODE, data);
5604
5605	for (i = 0; i < adev->usec_timeout; i++) {
5606		if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
5607			break;
5608		udelay(1);
5609	}
5610}
5611
5612static void gfx_v8_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
5613{
5614	u32 data;
5615
5616	if (amdgpu_sriov_is_pp_one_vf(adev))
5617		data = RREG32_NO_KIQ(mmRLC_SPM_VMID);
5618	else
5619		data = RREG32(mmRLC_SPM_VMID);
5620
5621	data &= ~RLC_SPM_VMID__RLC_SPM_VMID_MASK;
5622	data |= (vmid & RLC_SPM_VMID__RLC_SPM_VMID_MASK) << RLC_SPM_VMID__RLC_SPM_VMID__SHIFT;
5623
5624	if (amdgpu_sriov_is_pp_one_vf(adev))
5625		WREG32_NO_KIQ(mmRLC_SPM_VMID, data);
5626	else
5627		WREG32(mmRLC_SPM_VMID, data);
5628}
5629
5630static const struct amdgpu_rlc_funcs iceland_rlc_funcs = {
5631	.is_rlc_enabled = gfx_v8_0_is_rlc_enabled,
5632	.set_safe_mode = gfx_v8_0_set_safe_mode,
5633	.unset_safe_mode = gfx_v8_0_unset_safe_mode,
5634	.init = gfx_v8_0_rlc_init,
5635	.get_csb_size = gfx_v8_0_get_csb_size,
5636	.get_csb_buffer = gfx_v8_0_get_csb_buffer,
5637	.get_cp_table_num = gfx_v8_0_cp_jump_table_num,
5638	.resume = gfx_v8_0_rlc_resume,
5639	.stop = gfx_v8_0_rlc_stop,
5640	.reset = gfx_v8_0_rlc_reset,
5641	.start = gfx_v8_0_rlc_start,
5642	.update_spm_vmid = gfx_v8_0_update_spm_vmid
5643};
5644
5645static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
5646						      bool enable)
5647{
5648	uint32_t temp, data;
5649
5650	amdgpu_gfx_rlc_enter_safe_mode(adev);
5651
5652	/* It is disabled by HW by default */
5653	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
5654		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
5655			if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS)
5656				/* 1 - RLC memory Light sleep */
5657				WREG32_FIELD(RLC_MEM_SLP_CNTL, RLC_MEM_LS_EN, 1);
 
5658
5659			if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS)
5660				WREG32_FIELD(CP_MEM_SLP_CNTL, CP_MEM_LS_EN, 1);
5661		}
 
 
5662
5663		/* 3 - RLC_CGTT_MGCG_OVERRIDE */
5664		temp = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
5665		if (adev->flags & AMD_IS_APU)
5666			data &= ~(RLC_CGTT_MGCG_OVERRIDE__CPF_MASK |
5667				  RLC_CGTT_MGCG_OVERRIDE__RLC_MASK |
5668				  RLC_CGTT_MGCG_OVERRIDE__MGCG_MASK);
5669		else
5670			data &= ~(RLC_CGTT_MGCG_OVERRIDE__CPF_MASK |
5671				  RLC_CGTT_MGCG_OVERRIDE__RLC_MASK |
5672				  RLC_CGTT_MGCG_OVERRIDE__MGCG_MASK |
5673				  RLC_CGTT_MGCG_OVERRIDE__GRBM_MASK);
5674
5675		if (temp != data)
5676			WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data);
5677
5678		/* 4 - wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
5679		gfx_v8_0_wait_for_rlc_serdes(adev);
5680
5681		/* 5 - clear mgcg override */
5682		gfx_v8_0_send_serdes_cmd(adev, BPM_REG_MGCG_OVERRIDE, CLE_BPM_SERDES_CMD);
5683
5684		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS) {
5685			/* 6 - Enable CGTS(Tree Shade) MGCG /MGLS */
5686			temp = data = RREG32(mmCGTS_SM_CTRL_REG);
5687			data &= ~(CGTS_SM_CTRL_REG__SM_MODE_MASK);
5688			data |= (0x2 << CGTS_SM_CTRL_REG__SM_MODE__SHIFT);
5689			data |= CGTS_SM_CTRL_REG__SM_MODE_ENABLE_MASK;
5690			data &= ~CGTS_SM_CTRL_REG__OVERRIDE_MASK;
5691			if ((adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) &&
5692			    (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS_LS))
5693				data &= ~CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK;
5694			data |= CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN_MASK;
5695			data |= (0x96 << CGTS_SM_CTRL_REG__ON_MONITOR_ADD__SHIFT);
5696			if (temp != data)
5697				WREG32(mmCGTS_SM_CTRL_REG, data);
5698		}
5699		udelay(50);
5700
5701		/* 7 - wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
5702		gfx_v8_0_wait_for_rlc_serdes(adev);
5703	} else {
5704		/* 1 - MGCG_OVERRIDE[0] for CP and MGCG_OVERRIDE[1] for RLC */
5705		temp = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
5706		data |= (RLC_CGTT_MGCG_OVERRIDE__CPF_MASK |
5707				RLC_CGTT_MGCG_OVERRIDE__RLC_MASK |
5708				RLC_CGTT_MGCG_OVERRIDE__MGCG_MASK |
5709				RLC_CGTT_MGCG_OVERRIDE__GRBM_MASK);
5710		if (temp != data)
5711			WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data);
5712
5713		/* 2 - disable MGLS in RLC */
5714		data = RREG32(mmRLC_MEM_SLP_CNTL);
5715		if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
5716			data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
5717			WREG32(mmRLC_MEM_SLP_CNTL, data);
5718		}
5719
5720		/* 3 - disable MGLS in CP */
5721		data = RREG32(mmCP_MEM_SLP_CNTL);
5722		if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
5723			data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
5724			WREG32(mmCP_MEM_SLP_CNTL, data);
5725		}
5726
5727		/* 4 - Disable CGTS(Tree Shade) MGCG and MGLS */
5728		temp = data = RREG32(mmCGTS_SM_CTRL_REG);
5729		data |= (CGTS_SM_CTRL_REG__OVERRIDE_MASK |
5730				CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK);
5731		if (temp != data)
5732			WREG32(mmCGTS_SM_CTRL_REG, data);
5733
5734		/* 5 - wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
5735		gfx_v8_0_wait_for_rlc_serdes(adev);
5736
5737		/* 6 - set mgcg override */
5738		gfx_v8_0_send_serdes_cmd(adev, BPM_REG_MGCG_OVERRIDE, SET_BPM_SERDES_CMD);
5739
5740		udelay(50);
5741
5742		/* 7- wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
5743		gfx_v8_0_wait_for_rlc_serdes(adev);
5744	}
5745
5746	amdgpu_gfx_rlc_exit_safe_mode(adev);
5747}
5748
5749static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
5750						      bool enable)
5751{
5752	uint32_t temp, temp1, data, data1;
5753
5754	temp = data = RREG32(mmRLC_CGCG_CGLS_CTRL);
5755
5756	amdgpu_gfx_rlc_enter_safe_mode(adev);
 
 
 
 
5757
5758	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
5759		temp1 = data1 =	RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
5760		data1 &= ~RLC_CGTT_MGCG_OVERRIDE__CGCG_MASK;
5761		if (temp1 != data1)
5762			WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data1);
5763
5764		/* : wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
5765		gfx_v8_0_wait_for_rlc_serdes(adev);
5766
5767		/* 2 - clear cgcg override */
5768		gfx_v8_0_send_serdes_cmd(adev, BPM_REG_CGCG_OVERRIDE, CLE_BPM_SERDES_CMD);
5769
5770		/* wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
5771		gfx_v8_0_wait_for_rlc_serdes(adev);
5772
5773		/* 3 - write cmd to set CGLS */
5774		gfx_v8_0_send_serdes_cmd(adev, BPM_REG_CGLS_EN, SET_BPM_SERDES_CMD);
5775
5776		/* 4 - enable cgcg */
5777		data |= RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
5778
5779		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
5780			/* enable cgls*/
5781			data |= RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
5782
5783			temp1 = data1 =	RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
5784			data1 &= ~RLC_CGTT_MGCG_OVERRIDE__CGLS_MASK;
5785
5786			if (temp1 != data1)
5787				WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data1);
5788		} else {
5789			data &= ~RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
5790		}
5791
5792		if (temp != data)
5793			WREG32(mmRLC_CGCG_CGLS_CTRL, data);
5794
5795		/* 5 enable cntx_empty_int_enable/cntx_busy_int_enable/
5796		 * Cmp_busy/GFX_Idle interrupts
5797		 */
5798		gfx_v8_0_enable_gui_idle_interrupt(adev, true);
5799	} else {
5800		/* disable cntx_empty_int_enable & GFX Idle interrupt */
5801		gfx_v8_0_enable_gui_idle_interrupt(adev, false);
5802
5803		/* TEST CGCG */
5804		temp1 = data1 =	RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
5805		data1 |= (RLC_CGTT_MGCG_OVERRIDE__CGCG_MASK |
5806				RLC_CGTT_MGCG_OVERRIDE__CGLS_MASK);
5807		if (temp1 != data1)
5808			WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data1);
5809
5810		/* read gfx register to wake up cgcg */
5811		RREG32(mmCB_CGTT_SCLK_CTRL);
5812		RREG32(mmCB_CGTT_SCLK_CTRL);
5813		RREG32(mmCB_CGTT_SCLK_CTRL);
5814		RREG32(mmCB_CGTT_SCLK_CTRL);
5815
5816		/* wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
5817		gfx_v8_0_wait_for_rlc_serdes(adev);
5818
5819		/* write cmd to Set CGCG Overrride */
5820		gfx_v8_0_send_serdes_cmd(adev, BPM_REG_CGCG_OVERRIDE, SET_BPM_SERDES_CMD);
5821
5822		/* wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
5823		gfx_v8_0_wait_for_rlc_serdes(adev);
5824
5825		/* write cmd to Clear CGLS */
5826		gfx_v8_0_send_serdes_cmd(adev, BPM_REG_CGLS_EN, CLE_BPM_SERDES_CMD);
5827
5828		/* disable cgcg, cgls should be disabled too. */
5829		data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK |
5830			  RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
5831		if (temp != data)
5832			WREG32(mmRLC_CGCG_CGLS_CTRL, data);
5833		/* enable interrupts again for PG */
5834		gfx_v8_0_enable_gui_idle_interrupt(adev, true);
5835	}
5836
5837	gfx_v8_0_wait_for_rlc_serdes(adev);
5838
5839	amdgpu_gfx_rlc_exit_safe_mode(adev);
5840}
5841static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev,
5842					    bool enable)
5843{
5844	if (enable) {
5845		/* CGCG/CGLS should be enabled after MGCG/MGLS/TS(CG/LS)
5846		 * ===  MGCG + MGLS + TS(CG/LS) ===
5847		 */
5848		gfx_v8_0_update_medium_grain_clock_gating(adev, enable);
5849		gfx_v8_0_update_coarse_grain_clock_gating(adev, enable);
5850	} else {
5851		/* CGCG/CGLS should be disabled before MGCG/MGLS/TS(CG/LS)
5852		 * ===  CGCG + CGLS ===
5853		 */
5854		gfx_v8_0_update_coarse_grain_clock_gating(adev, enable);
5855		gfx_v8_0_update_medium_grain_clock_gating(adev, enable);
5856	}
5857	return 0;
5858}
5859
5860static int gfx_v8_0_tonga_update_gfx_clock_gating(struct amdgpu_device *adev,
5861					  enum amd_clockgating_state state)
5862{
5863	uint32_t msg_id, pp_state = 0;
5864	uint32_t pp_support_state = 0;
5865
5866	if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_CGLS)) {
5867		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
5868			pp_support_state = PP_STATE_SUPPORT_LS;
5869			pp_state = PP_STATE_LS;
5870		}
5871		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) {
5872			pp_support_state |= PP_STATE_SUPPORT_CG;
5873			pp_state |= PP_STATE_CG;
5874		}
5875		if (state == AMD_CG_STATE_UNGATE)
5876			pp_state = 0;
5877
5878		msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
5879				PP_BLOCK_GFX_CG,
5880				pp_support_state,
5881				pp_state);
5882		if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
5883			amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
5884	}
5885
5886	if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)) {
5887		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
5888			pp_support_state = PP_STATE_SUPPORT_LS;
5889			pp_state = PP_STATE_LS;
5890		}
5891
5892		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) {
5893			pp_support_state |= PP_STATE_SUPPORT_CG;
5894			pp_state |= PP_STATE_CG;
5895		}
5896
5897		if (state == AMD_CG_STATE_UNGATE)
5898			pp_state = 0;
5899
5900		msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
5901				PP_BLOCK_GFX_MG,
5902				pp_support_state,
5903				pp_state);
5904		if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
5905			amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
5906	}
5907
5908	return 0;
5909}
5910
5911static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
5912					  enum amd_clockgating_state state)
5913{
5914
5915	uint32_t msg_id, pp_state = 0;
5916	uint32_t pp_support_state = 0;
5917
5918	if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_CGLS)) {
5919		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
5920			pp_support_state = PP_STATE_SUPPORT_LS;
5921			pp_state = PP_STATE_LS;
5922		}
5923		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) {
5924			pp_support_state |= PP_STATE_SUPPORT_CG;
5925			pp_state |= PP_STATE_CG;
5926		}
5927		if (state == AMD_CG_STATE_UNGATE)
5928			pp_state = 0;
5929
5930		msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
5931				PP_BLOCK_GFX_CG,
5932				pp_support_state,
5933				pp_state);
5934		if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
5935			amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
5936	}
5937
5938	if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_3D_CGCG | AMD_CG_SUPPORT_GFX_3D_CGLS)) {
5939		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) {
5940			pp_support_state = PP_STATE_SUPPORT_LS;
5941			pp_state = PP_STATE_LS;
5942		}
5943		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG) {
5944			pp_support_state |= PP_STATE_SUPPORT_CG;
5945			pp_state |= PP_STATE_CG;
5946		}
5947		if (state == AMD_CG_STATE_UNGATE)
5948			pp_state = 0;
5949
5950		msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
5951				PP_BLOCK_GFX_3D,
5952				pp_support_state,
5953				pp_state);
5954		if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
5955			amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
5956	}
5957
5958	if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)) {
5959		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
5960			pp_support_state = PP_STATE_SUPPORT_LS;
5961			pp_state = PP_STATE_LS;
5962		}
5963
5964		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) {
5965			pp_support_state |= PP_STATE_SUPPORT_CG;
5966			pp_state |= PP_STATE_CG;
5967		}
5968
5969		if (state == AMD_CG_STATE_UNGATE)
5970			pp_state = 0;
5971
5972		msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
5973				PP_BLOCK_GFX_MG,
5974				pp_support_state,
5975				pp_state);
5976		if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
5977			amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
5978	}
5979
5980	if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
5981		pp_support_state = PP_STATE_SUPPORT_LS;
5982
5983		if (state == AMD_CG_STATE_UNGATE)
5984			pp_state = 0;
5985		else
5986			pp_state = PP_STATE_LS;
5987
5988		msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
5989				PP_BLOCK_GFX_RLC,
5990				pp_support_state,
5991				pp_state);
5992		if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
5993			amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
5994	}
5995
5996	if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
5997		pp_support_state = PP_STATE_SUPPORT_LS;
5998
5999		if (state == AMD_CG_STATE_UNGATE)
6000			pp_state = 0;
6001		else
6002			pp_state = PP_STATE_LS;
6003		msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
6004			PP_BLOCK_GFX_CP,
6005			pp_support_state,
6006			pp_state);
6007		if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
6008			amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
6009	}
6010
6011	return 0;
6012}
6013
6014static int gfx_v8_0_set_clockgating_state(void *handle,
6015					  enum amd_clockgating_state state)
6016{
6017	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6018
6019	if (amdgpu_sriov_vf(adev))
6020		return 0;
6021
6022	switch (adev->asic_type) {
6023	case CHIP_FIJI:
6024	case CHIP_CARRIZO:
6025	case CHIP_STONEY:
6026		gfx_v8_0_update_gfx_clock_gating(adev,
6027						 state == AMD_CG_STATE_GATE);
6028		break;
6029	case CHIP_TONGA:
6030		gfx_v8_0_tonga_update_gfx_clock_gating(adev, state);
6031		break;
6032	case CHIP_POLARIS10:
6033	case CHIP_POLARIS11:
6034	case CHIP_POLARIS12:
6035	case CHIP_VEGAM:
6036		gfx_v8_0_polaris_update_gfx_clock_gating(adev, state);
6037		break;
6038	default:
6039		break;
6040	}
6041	return 0;
6042}
6043
6044static u64 gfx_v8_0_ring_get_rptr(struct amdgpu_ring *ring)
6045{
6046	return ring->adev->wb.wb[ring->rptr_offs];
 
 
 
 
6047}
6048
6049static u64 gfx_v8_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
6050{
6051	struct amdgpu_device *adev = ring->adev;
 
6052
6053	if (ring->use_doorbell)
6054		/* XXX check if swapping is necessary on BE */
6055		return ring->adev->wb.wb[ring->wptr_offs];
6056	else
6057		return RREG32(mmCP_RB0_WPTR);
 
 
6058}
6059
6060static void gfx_v8_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
6061{
6062	struct amdgpu_device *adev = ring->adev;
6063
6064	if (ring->use_doorbell) {
6065		/* XXX check if swapping is necessary on BE */
6066		adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
6067		WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
6068	} else {
6069		WREG32(mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
6070		(void)RREG32(mmCP_RB0_WPTR);
6071	}
6072}
6073
6074static void gfx_v8_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
6075{
6076	u32 ref_and_mask, reg_mem_engine;
6077
6078	if ((ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) ||
6079	    (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)) {
6080		switch (ring->me) {
6081		case 1:
6082			ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe;
6083			break;
6084		case 2:
6085			ref_and_mask = GPU_HDP_FLUSH_DONE__CP6_MASK << ring->pipe;
6086			break;
6087		default:
6088			return;
6089		}
6090		reg_mem_engine = 0;
6091	} else {
6092		ref_and_mask = GPU_HDP_FLUSH_DONE__CP0_MASK;
6093		reg_mem_engine = WAIT_REG_MEM_ENGINE(1); /* pfp */
6094	}
6095
6096	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
6097	amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(1) | /* write, wait, write */
6098				 WAIT_REG_MEM_FUNCTION(3) |  /* == */
6099				 reg_mem_engine));
6100	amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ);
6101	amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE);
6102	amdgpu_ring_write(ring, ref_and_mask);
6103	amdgpu_ring_write(ring, ref_and_mask);
6104	amdgpu_ring_write(ring, 0x20); /* poll interval */
6105}
6106
6107static void gfx_v8_0_ring_emit_vgt_flush(struct amdgpu_ring *ring)
6108{
6109	amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
6110	amdgpu_ring_write(ring, EVENT_TYPE(VS_PARTIAL_FLUSH) |
6111		EVENT_INDEX(4));
6112
6113	amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
6114	amdgpu_ring_write(ring, EVENT_TYPE(VGT_FLUSH) |
6115		EVENT_INDEX(0));
 
6116}
6117
6118static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
6119					struct amdgpu_job *job,
6120					struct amdgpu_ib *ib,
6121					uint32_t flags)
6122{
6123	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
6124	u32 header, control = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6125
6126	if (ib->flags & AMDGPU_IB_FLAG_CE)
6127		header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
6128	else
6129		header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
6130
6131	control |= ib->length_dw | (vmid << 24);
6132
6133	if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
6134		control |= INDIRECT_BUFFER_PRE_ENB(1);
6135
6136		if (!(ib->flags & AMDGPU_IB_FLAG_CE) && vmid)
6137			gfx_v8_0_ring_emit_de_meta(ring);
6138	}
6139
6140	amdgpu_ring_write(ring, header);
6141	amdgpu_ring_write(ring,
6142#ifdef __BIG_ENDIAN
6143			  (2 << 0) |
6144#endif
6145			  (ib->gpu_addr & 0xFFFFFFFC));
6146	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
6147	amdgpu_ring_write(ring, control);
6148}
6149
6150static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
6151					  struct amdgpu_job *job,
6152					  struct amdgpu_ib *ib,
6153					  uint32_t flags)
6154{
6155	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
6156	u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
6157
6158	/* Currently, there is a high possibility to get wave ID mismatch
6159	 * between ME and GDS, leading to a hw deadlock, because ME generates
6160	 * different wave IDs than the GDS expects. This situation happens
6161	 * randomly when at least 5 compute pipes use GDS ordered append.
6162	 * The wave IDs generated by ME are also wrong after suspend/resume.
6163	 * Those are probably bugs somewhere else in the kernel driver.
6164	 *
6165	 * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
6166	 * GDS to 0 for this ring (me/pipe).
6167	 */
6168	if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
6169		amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
6170		amdgpu_ring_write(ring, mmGDS_COMPUTE_MAX_WAVE_ID - PACKET3_SET_CONFIG_REG_START);
6171		amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
6172	}
6173
6174	amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
6175	amdgpu_ring_write(ring,
6176#ifdef __BIG_ENDIAN
6177				(2 << 0) |
6178#endif
6179				(ib->gpu_addr & 0xFFFFFFFC));
6180	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
6181	amdgpu_ring_write(ring, control);
6182}
6183
6184static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
6185					 u64 seq, unsigned flags)
6186{
6187	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
6188	bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
6189
6190	/* Workaround for cache flush problems. First send a dummy EOP
6191	 * event down the pipe with seq one below.
6192	 */
6193	amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
6194	amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
6195				 EOP_TC_ACTION_EN |
6196				 EOP_TC_WB_ACTION_EN |
6197				 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
6198				 EVENT_INDEX(5)));
6199	amdgpu_ring_write(ring, addr & 0xfffffffc);
6200	amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
6201				DATA_SEL(1) | INT_SEL(0));
6202	amdgpu_ring_write(ring, lower_32_bits(seq - 1));
6203	amdgpu_ring_write(ring, upper_32_bits(seq - 1));
6204
6205	/* Then send the real EOP event down the pipe:
6206	 * EVENT_WRITE_EOP - flush caches, send int */
6207	amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
6208	amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
6209				 EOP_TC_ACTION_EN |
6210				 EOP_TC_WB_ACTION_EN |
6211				 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
6212				 EVENT_INDEX(5)));
6213	amdgpu_ring_write(ring, addr & 0xfffffffc);
6214	amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
6215			  DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
6216	amdgpu_ring_write(ring, lower_32_bits(seq));
6217	amdgpu_ring_write(ring, upper_32_bits(seq));
6218
6219}
6220
6221static void gfx_v8_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
6222{
6223	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
6224	uint32_t seq = ring->fence_drv.sync_seq;
6225	uint64_t addr = ring->fence_drv.gpu_addr;
6226
6227	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
6228	amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
6229				 WAIT_REG_MEM_FUNCTION(3) | /* equal */
6230				 WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */
6231	amdgpu_ring_write(ring, addr & 0xfffffffc);
6232	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
6233	amdgpu_ring_write(ring, seq);
6234	amdgpu_ring_write(ring, 0xffffffff);
6235	amdgpu_ring_write(ring, 4); /* poll interval */
 
 
 
 
 
 
 
 
6236}
6237
6238static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
6239					unsigned vmid, uint64_t pd_addr)
6240{
6241	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
6242
6243	amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6244
6245	/* wait for the invalidate to complete */
6246	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
6247	amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */
6248				 WAIT_REG_MEM_FUNCTION(0) |  /* always */
6249				 WAIT_REG_MEM_ENGINE(0))); /* me */
6250	amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
6251	amdgpu_ring_write(ring, 0);
6252	amdgpu_ring_write(ring, 0); /* ref */
6253	amdgpu_ring_write(ring, 0); /* mask */
6254	amdgpu_ring_write(ring, 0x20); /* poll interval */
6255
6256	/* compute doesn't have PFP */
6257	if (usepfp) {
6258		/* sync PFP to ME, otherwise we might get invalid PFP reads */
6259		amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
6260		amdgpu_ring_write(ring, 0x0);
 
 
 
 
6261	}
6262}
6263
6264static u64 gfx_v8_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
 
 
 
 
 
6265{
6266	return ring->adev->wb.wb[ring->wptr_offs];
6267}
6268
6269static void gfx_v8_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
6270{
6271	struct amdgpu_device *adev = ring->adev;
6272
6273	/* XXX check if swapping is necessary on BE */
6274	adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
6275	WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
6276}
6277
6278static void gfx_v8_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
6279					     u64 addr, u64 seq,
6280					     unsigned flags)
6281{
6282	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
6283	bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
6284
6285	/* RELEASE_MEM - flush caches, send int */
6286	amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 5));
6287	amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
6288				 EOP_TC_ACTION_EN |
6289				 EOP_TC_WB_ACTION_EN |
6290				 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
6291				 EVENT_INDEX(5)));
6292	amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
6293	amdgpu_ring_write(ring, addr & 0xfffffffc);
6294	amdgpu_ring_write(ring, upper_32_bits(addr));
6295	amdgpu_ring_write(ring, lower_32_bits(seq));
6296	amdgpu_ring_write(ring, upper_32_bits(seq));
6297}
6298
6299static void gfx_v8_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
6300					 u64 seq, unsigned int flags)
6301{
6302	/* we only allocate 32bit for each seq wb address */
6303	BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
6304
6305	/* write fence seq to the "addr" */
6306	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
6307	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
6308				 WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
6309	amdgpu_ring_write(ring, lower_32_bits(addr));
6310	amdgpu_ring_write(ring, upper_32_bits(addr));
6311	amdgpu_ring_write(ring, lower_32_bits(seq));
6312
6313	if (flags & AMDGPU_FENCE_FLAG_INT) {
6314		/* set register to trigger INT */
6315		amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
6316		amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
6317					 WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
6318		amdgpu_ring_write(ring, mmCPC_INT_STATUS);
6319		amdgpu_ring_write(ring, 0);
6320		amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
6321	}
6322}
6323
6324static void gfx_v8_ring_emit_sb(struct amdgpu_ring *ring)
6325{
6326	amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
6327	amdgpu_ring_write(ring, 0);
6328}
6329
6330static void gfx_v8_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
6331{
6332	uint32_t dw2 = 0;
6333
6334	if (amdgpu_sriov_vf(ring->adev))
6335		gfx_v8_0_ring_emit_ce_meta(ring);
6336
6337	dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
6338	if (flags & AMDGPU_HAVE_CTX_SWITCH) {
6339		gfx_v8_0_ring_emit_vgt_flush(ring);
6340		/* set load_global_config & load_global_uconfig */
6341		dw2 |= 0x8001;
6342		/* set load_cs_sh_regs */
6343		dw2 |= 0x01000000;
6344		/* set load_per_context_state & load_gfx_sh_regs for GFX */
6345		dw2 |= 0x10002;
6346
6347		/* set load_ce_ram if preamble presented */
6348		if (AMDGPU_PREAMBLE_IB_PRESENT & flags)
6349			dw2 |= 0x10000000;
6350	} else {
6351		/* still load_ce_ram if this is the first time preamble presented
6352		 * although there is no context switch happens.
6353		 */
6354		if (AMDGPU_PREAMBLE_IB_PRESENT_FIRST & flags)
6355			dw2 |= 0x10000000;
6356	}
6357
6358	amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
6359	amdgpu_ring_write(ring, dw2);
6360	amdgpu_ring_write(ring, 0);
6361}
6362
6363static unsigned gfx_v8_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring)
6364{
6365	unsigned ret;
6366
6367	amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
6368	amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
6369	amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
6370	amdgpu_ring_write(ring, 0); /* discard following DWs if *cond_exec_gpu_addr==0 */
6371	ret = ring->wptr & ring->buf_mask;
6372	amdgpu_ring_write(ring, 0x55aa55aa); /* patch dummy value later */
6373	return ret;
6374}
6375
6376static void gfx_v8_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned offset)
6377{
6378	unsigned cur;
6379
6380	BUG_ON(offset > ring->buf_mask);
6381	BUG_ON(ring->ring[offset] != 0x55aa55aa);
6382
6383	cur = (ring->wptr & ring->buf_mask) - 1;
6384	if (likely(cur > offset))
6385		ring->ring[offset] = cur - offset;
6386	else
6387		ring->ring[offset] = (ring->ring_size >> 2) - offset + cur;
6388}
6389
6390static void gfx_v8_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
6391				    uint32_t reg_val_offs)
6392{
6393	struct amdgpu_device *adev = ring->adev;
6394
6395	amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
6396	amdgpu_ring_write(ring, 0 |	/* src: register*/
6397				(5 << 8) |	/* dst: memory */
6398				(1 << 20));	/* write confirm */
6399	amdgpu_ring_write(ring, reg);
6400	amdgpu_ring_write(ring, 0);
6401	amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
6402				reg_val_offs * 4));
6403	amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
6404				reg_val_offs * 4));
6405}
6406
6407static void gfx_v8_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
6408				  uint32_t val)
6409{
6410	uint32_t cmd;
6411
6412	switch (ring->funcs->type) {
6413	case AMDGPU_RING_TYPE_GFX:
6414		cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
6415		break;
6416	case AMDGPU_RING_TYPE_KIQ:
6417		cmd = 1 << 16; /* no inc addr */
 
 
 
 
6418		break;
6419	default:
6420		cmd = WR_CONFIRM;
6421		break;
6422	}
6423
6424	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
6425	amdgpu_ring_write(ring, cmd);
6426	amdgpu_ring_write(ring, reg);
6427	amdgpu_ring_write(ring, 0);
6428	amdgpu_ring_write(ring, val);
6429}
6430
6431static void gfx_v8_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid)
6432{
6433	struct amdgpu_device *adev = ring->adev;
6434	uint32_t value = 0;
6435
6436	value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
6437	value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
6438	value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
6439	value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
6440	WREG32(mmSQ_CMD, value);
6441}
6442
6443static void gfx_v8_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
6444						 enum amdgpu_interrupt_state state)
6445{
6446	WREG32_FIELD(CP_INT_CNTL_RING0, TIME_STAMP_INT_ENABLE,
6447		     state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
6448}
6449
6450static void gfx_v8_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
6451						     int me, int pipe,
6452						     enum amdgpu_interrupt_state state)
6453{
6454	u32 mec_int_cntl, mec_int_cntl_reg;
6455
6456	/*
6457	 * amdgpu controls only the first MEC. That's why this function only
6458	 * handles the setting of interrupts for this specific MEC. All other
6459	 * pipes' interrupts are set by amdkfd.
6460	 */
6461
6462	if (me == 1) {
6463		switch (pipe) {
6464		case 0:
6465			mec_int_cntl_reg = mmCP_ME1_PIPE0_INT_CNTL;
6466			break;
6467		case 1:
6468			mec_int_cntl_reg = mmCP_ME1_PIPE1_INT_CNTL;
6469			break;
6470		case 2:
6471			mec_int_cntl_reg = mmCP_ME1_PIPE2_INT_CNTL;
6472			break;
6473		case 3:
6474			mec_int_cntl_reg = mmCP_ME1_PIPE3_INT_CNTL;
6475			break;
6476		default:
6477			DRM_DEBUG("invalid pipe %d\n", pipe);
6478			return;
6479		}
6480	} else {
6481		DRM_DEBUG("invalid me %d\n", me);
6482		return;
6483	}
6484
6485	switch (state) {
6486	case AMDGPU_IRQ_STATE_DISABLE:
6487		mec_int_cntl = RREG32(mec_int_cntl_reg);
6488		mec_int_cntl &= ~CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
 
6489		WREG32(mec_int_cntl_reg, mec_int_cntl);
6490		break;
6491	case AMDGPU_IRQ_STATE_ENABLE:
6492		mec_int_cntl = RREG32(mec_int_cntl_reg);
6493		mec_int_cntl |= CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
 
6494		WREG32(mec_int_cntl_reg, mec_int_cntl);
6495		break;
6496	default:
6497		break;
6498	}
6499}
6500
6501static int gfx_v8_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
6502					     struct amdgpu_irq_src *source,
6503					     unsigned type,
6504					     enum amdgpu_interrupt_state state)
6505{
6506	WREG32_FIELD(CP_INT_CNTL_RING0, PRIV_REG_INT_ENABLE,
6507		     state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6508
6509	return 0;
6510}
6511
6512static int gfx_v8_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
6513					      struct amdgpu_irq_src *source,
6514					      unsigned type,
6515					      enum amdgpu_interrupt_state state)
6516{
6517	WREG32_FIELD(CP_INT_CNTL_RING0, PRIV_INSTR_INT_ENABLE,
6518		     state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6519
6520	return 0;
6521}
6522
6523static int gfx_v8_0_set_eop_interrupt_state(struct amdgpu_device *adev,
6524					    struct amdgpu_irq_src *src,
6525					    unsigned type,
6526					    enum amdgpu_interrupt_state state)
6527{
6528	switch (type) {
6529	case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP:
6530		gfx_v8_0_set_gfx_eop_interrupt_state(adev, state);
6531		break;
6532	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
6533		gfx_v8_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
6534		break;
6535	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
6536		gfx_v8_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
6537		break;
6538	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
6539		gfx_v8_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
6540		break;
6541	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
6542		gfx_v8_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
6543		break;
6544	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
6545		gfx_v8_0_set_compute_eop_interrupt_state(adev, 2, 0, state);
6546		break;
6547	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
6548		gfx_v8_0_set_compute_eop_interrupt_state(adev, 2, 1, state);
6549		break;
6550	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
6551		gfx_v8_0_set_compute_eop_interrupt_state(adev, 2, 2, state);
6552		break;
6553	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
6554		gfx_v8_0_set_compute_eop_interrupt_state(adev, 2, 3, state);
6555		break;
6556	default:
6557		break;
6558	}
6559	return 0;
6560}
6561
6562static int gfx_v8_0_set_cp_ecc_int_state(struct amdgpu_device *adev,
6563					 struct amdgpu_irq_src *source,
6564					 unsigned int type,
6565					 enum amdgpu_interrupt_state state)
6566{
6567	int enable_flag;
6568
6569	switch (state) {
6570	case AMDGPU_IRQ_STATE_DISABLE:
6571		enable_flag = 0;
6572		break;
6573
6574	case AMDGPU_IRQ_STATE_ENABLE:
6575		enable_flag = 1;
6576		break;
6577
6578	default:
6579		return -EINVAL;
6580	}
6581
6582	WREG32_FIELD(CP_INT_CNTL, CP_ECC_ERROR_INT_ENABLE, enable_flag);
6583	WREG32_FIELD(CP_INT_CNTL_RING0, CP_ECC_ERROR_INT_ENABLE, enable_flag);
6584	WREG32_FIELD(CP_INT_CNTL_RING1, CP_ECC_ERROR_INT_ENABLE, enable_flag);
6585	WREG32_FIELD(CP_INT_CNTL_RING2, CP_ECC_ERROR_INT_ENABLE, enable_flag);
6586	WREG32_FIELD(CPC_INT_CNTL, CP_ECC_ERROR_INT_ENABLE, enable_flag);
6587	WREG32_FIELD(CP_ME1_PIPE0_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
6588		     enable_flag);
6589	WREG32_FIELD(CP_ME1_PIPE1_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
6590		     enable_flag);
6591	WREG32_FIELD(CP_ME1_PIPE2_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
6592		     enable_flag);
6593	WREG32_FIELD(CP_ME1_PIPE3_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
6594		     enable_flag);
6595	WREG32_FIELD(CP_ME2_PIPE0_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
6596		     enable_flag);
6597	WREG32_FIELD(CP_ME2_PIPE1_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
6598		     enable_flag);
6599	WREG32_FIELD(CP_ME2_PIPE2_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
6600		     enable_flag);
6601	WREG32_FIELD(CP_ME2_PIPE3_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
6602		     enable_flag);
6603
6604	return 0;
6605}
6606
6607static int gfx_v8_0_set_sq_int_state(struct amdgpu_device *adev,
6608				     struct amdgpu_irq_src *source,
6609				     unsigned int type,
6610				     enum amdgpu_interrupt_state state)
6611{
6612	int enable_flag;
6613
6614	switch (state) {
6615	case AMDGPU_IRQ_STATE_DISABLE:
6616		enable_flag = 1;
6617		break;
6618
6619	case AMDGPU_IRQ_STATE_ENABLE:
6620		enable_flag = 0;
6621		break;
6622
6623	default:
6624		return -EINVAL;
6625	}
6626
6627	WREG32_FIELD(SQ_INTERRUPT_MSG_CTRL, STALL,
6628		     enable_flag);
6629
6630	return 0;
6631}
6632
6633static int gfx_v8_0_eop_irq(struct amdgpu_device *adev,
6634			    struct amdgpu_irq_src *source,
6635			    struct amdgpu_iv_entry *entry)
6636{
6637	int i;
6638	u8 me_id, pipe_id, queue_id;
6639	struct amdgpu_ring *ring;
6640
6641	DRM_DEBUG("IH: CP EOP\n");
6642	me_id = (entry->ring_id & 0x0c) >> 2;
6643	pipe_id = (entry->ring_id & 0x03) >> 0;
6644	queue_id = (entry->ring_id & 0x70) >> 4;
6645
6646	switch (me_id) {
6647	case 0:
6648		amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
6649		break;
6650	case 1:
6651	case 2:
6652		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
6653			ring = &adev->gfx.compute_ring[i];
6654			/* Per-queue interrupt is supported for MEC starting from VI.
6655			  * The interrupt can only be enabled/disabled per pipe instead of per queue.
6656			  */
6657			if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
6658				amdgpu_fence_process(ring);
6659		}
6660		break;
6661	}
6662	return 0;
6663}
6664
6665static void gfx_v8_0_fault(struct amdgpu_device *adev,
6666			   struct amdgpu_iv_entry *entry)
6667{
6668	u8 me_id, pipe_id, queue_id;
6669	struct amdgpu_ring *ring;
6670	int i;
6671
6672	me_id = (entry->ring_id & 0x0c) >> 2;
6673	pipe_id = (entry->ring_id & 0x03) >> 0;
6674	queue_id = (entry->ring_id & 0x70) >> 4;
6675
6676	switch (me_id) {
6677	case 0:
6678		drm_sched_fault(&adev->gfx.gfx_ring[0].sched);
6679		break;
6680	case 1:
6681	case 2:
6682		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
6683			ring = &adev->gfx.compute_ring[i];
6684			if (ring->me == me_id && ring->pipe == pipe_id &&
6685			    ring->queue == queue_id)
6686				drm_sched_fault(&ring->sched);
6687		}
6688		break;
6689	}
6690}
6691
6692static int gfx_v8_0_priv_reg_irq(struct amdgpu_device *adev,
6693				 struct amdgpu_irq_src *source,
6694				 struct amdgpu_iv_entry *entry)
6695{
6696	DRM_ERROR("Illegal register access in command stream\n");
6697	gfx_v8_0_fault(adev, entry);
6698	return 0;
6699}
6700
6701static int gfx_v8_0_priv_inst_irq(struct amdgpu_device *adev,
6702				  struct amdgpu_irq_src *source,
6703				  struct amdgpu_iv_entry *entry)
6704{
6705	DRM_ERROR("Illegal instruction in command stream\n");
6706	gfx_v8_0_fault(adev, entry);
6707	return 0;
6708}
6709
6710static int gfx_v8_0_cp_ecc_error_irq(struct amdgpu_device *adev,
6711				     struct amdgpu_irq_src *source,
6712				     struct amdgpu_iv_entry *entry)
6713{
6714	DRM_ERROR("CP EDC/ECC error detected.");
6715	return 0;
6716}
6717
6718static void gfx_v8_0_parse_sq_irq(struct amdgpu_device *adev, unsigned ih_data)
6719{
6720	u32 enc, se_id, sh_id, cu_id;
6721	char type[20];
6722	int sq_edc_source = -1;
6723
6724	enc = REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_CMN, ENCODING);
6725	se_id = REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_CMN, SE_ID);
6726
6727	switch (enc) {
6728		case 0:
6729			DRM_INFO("SQ general purpose intr detected:"
6730					"se_id %d, immed_overflow %d, host_reg_overflow %d,"
6731					"host_cmd_overflow %d, cmd_timestamp %d,"
6732					"reg_timestamp %d, thread_trace_buff_full %d,"
6733					"wlt %d, thread_trace %d.\n",
6734					se_id,
6735					REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, IMMED_OVERFLOW),
6736					REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, HOST_REG_OVERFLOW),
6737					REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, HOST_CMD_OVERFLOW),
6738					REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, CMD_TIMESTAMP),
6739					REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, REG_TIMESTAMP),
6740					REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, THREAD_TRACE_BUF_FULL),
6741					REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, WLT),
6742					REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, THREAD_TRACE)
6743					);
6744			break;
6745		case 1:
6746		case 2:
6747
6748			cu_id = REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, CU_ID);
6749			sh_id = REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, SH_ID);
6750
6751			/*
6752			 * This function can be called either directly from ISR
6753			 * or from BH in which case we can access SQ_EDC_INFO
6754			 * instance
6755			 */
6756			if (in_task()) {
6757				mutex_lock(&adev->grbm_idx_mutex);
6758				gfx_v8_0_select_se_sh(adev, se_id, sh_id, cu_id);
6759
6760				sq_edc_source = REG_GET_FIELD(RREG32(mmSQ_EDC_INFO), SQ_EDC_INFO, SOURCE);
6761
6762				gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
6763				mutex_unlock(&adev->grbm_idx_mutex);
6764			}
6765
6766			if (enc == 1)
6767				sprintf(type, "instruction intr");
6768			else
6769				sprintf(type, "EDC/ECC error");
6770
6771			DRM_INFO(
6772				"SQ %s detected: "
6773					"se_id %d, sh_id %d, cu_id %d, simd_id %d, wave_id %d, vm_id %d "
6774					"trap %s, sq_ed_info.source %s.\n",
6775					type, se_id, sh_id, cu_id,
6776					REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, SIMD_ID),
6777					REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, WAVE_ID),
6778					REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, VM_ID),
6779					REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, PRIV) ? "true" : "false",
6780					(sq_edc_source != -1) ? sq_edc_source_names[sq_edc_source] : "unavailable"
6781				);
6782			break;
6783		default:
6784			DRM_ERROR("SQ invalid encoding type\n.");
6785	}
6786}
6787
6788static void gfx_v8_0_sq_irq_work_func(struct work_struct *work)
6789{
6790
6791	struct amdgpu_device *adev = container_of(work, struct amdgpu_device, gfx.sq_work.work);
6792	struct sq_work *sq_work = container_of(work, struct sq_work, work);
6793
6794	gfx_v8_0_parse_sq_irq(adev, sq_work->ih_data);
6795}
6796
6797static int gfx_v8_0_sq_irq(struct amdgpu_device *adev,
6798			   struct amdgpu_irq_src *source,
6799			   struct amdgpu_iv_entry *entry)
6800{
6801	unsigned ih_data = entry->src_data[0];
6802
6803	/*
6804	 * Try to submit work so SQ_EDC_INFO can be accessed from
6805	 * BH. If previous work submission hasn't finished yet
6806	 * just print whatever info is possible directly from the ISR.
6807	 */
6808	if (work_pending(&adev->gfx.sq_work.work)) {
6809		gfx_v8_0_parse_sq_irq(adev, ih_data);
6810	} else {
6811		adev->gfx.sq_work.ih_data = ih_data;
6812		schedule_work(&adev->gfx.sq_work.work);
6813	}
6814
6815	return 0;
6816}
6817
6818static void gfx_v8_0_emit_mem_sync(struct amdgpu_ring *ring)
6819{
6820	amdgpu_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
6821	amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
6822			  PACKET3_TC_ACTION_ENA |
6823			  PACKET3_SH_KCACHE_ACTION_ENA |
6824			  PACKET3_SH_ICACHE_ACTION_ENA |
6825			  PACKET3_TC_WB_ACTION_ENA);  /* CP_COHER_CNTL */
6826	amdgpu_ring_write(ring, 0xffffffff);  /* CP_COHER_SIZE */
6827	amdgpu_ring_write(ring, 0);  /* CP_COHER_BASE */
6828	amdgpu_ring_write(ring, 0x0000000A); /* poll interval */
6829}
6830
6831static void gfx_v8_0_emit_mem_sync_compute(struct amdgpu_ring *ring)
6832{
6833	amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5));
6834	amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
6835			  PACKET3_TC_ACTION_ENA |
6836			  PACKET3_SH_KCACHE_ACTION_ENA |
6837			  PACKET3_SH_ICACHE_ACTION_ENA |
6838			  PACKET3_TC_WB_ACTION_ENA);  /* CP_COHER_CNTL */
6839	amdgpu_ring_write(ring, 0xffffffff);	/* CP_COHER_SIZE */
6840	amdgpu_ring_write(ring, 0xff);		/* CP_COHER_SIZE_HI */
6841	amdgpu_ring_write(ring, 0);		/* CP_COHER_BASE */
6842	amdgpu_ring_write(ring, 0);		/* CP_COHER_BASE_HI */
6843	amdgpu_ring_write(ring, 0x0000000A);	/* poll interval */
6844}
6845
6846static const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
6847	.name = "gfx_v8_0",
6848	.early_init = gfx_v8_0_early_init,
6849	.late_init = gfx_v8_0_late_init,
6850	.sw_init = gfx_v8_0_sw_init,
6851	.sw_fini = gfx_v8_0_sw_fini,
6852	.hw_init = gfx_v8_0_hw_init,
6853	.hw_fini = gfx_v8_0_hw_fini,
6854	.suspend = gfx_v8_0_suspend,
6855	.resume = gfx_v8_0_resume,
6856	.is_idle = gfx_v8_0_is_idle,
6857	.wait_for_idle = gfx_v8_0_wait_for_idle,
6858	.check_soft_reset = gfx_v8_0_check_soft_reset,
6859	.pre_soft_reset = gfx_v8_0_pre_soft_reset,
6860	.soft_reset = gfx_v8_0_soft_reset,
6861	.post_soft_reset = gfx_v8_0_post_soft_reset,
6862	.set_clockgating_state = gfx_v8_0_set_clockgating_state,
6863	.set_powergating_state = gfx_v8_0_set_powergating_state,
6864	.get_clockgating_state = gfx_v8_0_get_clockgating_state,
6865};
6866
6867static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
6868	.type = AMDGPU_RING_TYPE_GFX,
6869	.align_mask = 0xff,
6870	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
6871	.support_64bit_ptrs = false,
6872	.get_rptr = gfx_v8_0_ring_get_rptr,
6873	.get_wptr = gfx_v8_0_ring_get_wptr_gfx,
6874	.set_wptr = gfx_v8_0_ring_set_wptr_gfx,
6875	.emit_frame_size = /* maximum 215dw if count 16 IBs in */
6876		5 +  /* COND_EXEC */
6877		7 +  /* PIPELINE_SYNC */
6878		VI_FLUSH_GPU_TLB_NUM_WREG * 5 + 9 + /* VM_FLUSH */
6879		12 +  /* FENCE for VM_FLUSH */
6880		20 + /* GDS switch */
6881		4 + /* double SWITCH_BUFFER,
6882		       the first COND_EXEC jump to the place just
6883			   prior to this double SWITCH_BUFFER  */
6884		5 + /* COND_EXEC */
6885		7 +	 /*	HDP_flush */
6886		4 +	 /*	VGT_flush */
6887		14 + /*	CE_META */
6888		31 + /*	DE_META */
6889		3 + /* CNTX_CTRL */
6890		5 + /* HDP_INVL */
6891		12 + 12 + /* FENCE x2 */
6892		2 + /* SWITCH_BUFFER */
6893		5, /* SURFACE_SYNC */
6894	.emit_ib_size =	4, /* gfx_v8_0_ring_emit_ib_gfx */
6895	.emit_ib = gfx_v8_0_ring_emit_ib_gfx,
6896	.emit_fence = gfx_v8_0_ring_emit_fence_gfx,
6897	.emit_pipeline_sync = gfx_v8_0_ring_emit_pipeline_sync,
6898	.emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
6899	.emit_gds_switch = gfx_v8_0_ring_emit_gds_switch,
6900	.emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush,
 
6901	.test_ring = gfx_v8_0_ring_test_ring,
6902	.test_ib = gfx_v8_0_ring_test_ib,
6903	.insert_nop = amdgpu_ring_insert_nop,
6904	.pad_ib = amdgpu_ring_generic_pad_ib,
6905	.emit_switch_buffer = gfx_v8_ring_emit_sb,
6906	.emit_cntxcntl = gfx_v8_ring_emit_cntxcntl,
6907	.init_cond_exec = gfx_v8_0_ring_emit_init_cond_exec,
6908	.patch_cond_exec = gfx_v8_0_ring_emit_patch_cond_exec,
6909	.emit_wreg = gfx_v8_0_ring_emit_wreg,
6910	.soft_recovery = gfx_v8_0_ring_soft_recovery,
6911	.emit_mem_sync = gfx_v8_0_emit_mem_sync,
6912};
6913
6914static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
6915	.type = AMDGPU_RING_TYPE_COMPUTE,
6916	.align_mask = 0xff,
6917	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
6918	.support_64bit_ptrs = false,
6919	.get_rptr = gfx_v8_0_ring_get_rptr,
6920	.get_wptr = gfx_v8_0_ring_get_wptr_compute,
6921	.set_wptr = gfx_v8_0_ring_set_wptr_compute,
6922	.emit_frame_size =
6923		20 + /* gfx_v8_0_ring_emit_gds_switch */
6924		7 + /* gfx_v8_0_ring_emit_hdp_flush */
6925		5 + /* hdp_invalidate */
6926		7 + /* gfx_v8_0_ring_emit_pipeline_sync */
6927		VI_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + /* gfx_v8_0_ring_emit_vm_flush */
6928		7 + 7 + 7 + /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */
6929		7, /* gfx_v8_0_emit_mem_sync_compute */
6930	.emit_ib_size =	7, /* gfx_v8_0_ring_emit_ib_compute */
6931	.emit_ib = gfx_v8_0_ring_emit_ib_compute,
6932	.emit_fence = gfx_v8_0_ring_emit_fence_compute,
6933	.emit_pipeline_sync = gfx_v8_0_ring_emit_pipeline_sync,
6934	.emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
6935	.emit_gds_switch = gfx_v8_0_ring_emit_gds_switch,
6936	.emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush,
 
6937	.test_ring = gfx_v8_0_ring_test_ring,
6938	.test_ib = gfx_v8_0_ring_test_ib,
6939	.insert_nop = amdgpu_ring_insert_nop,
6940	.pad_ib = amdgpu_ring_generic_pad_ib,
6941	.emit_wreg = gfx_v8_0_ring_emit_wreg,
6942	.emit_mem_sync = gfx_v8_0_emit_mem_sync_compute,
6943};
6944
6945static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_kiq = {
6946	.type = AMDGPU_RING_TYPE_KIQ,
6947	.align_mask = 0xff,
6948	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
6949	.support_64bit_ptrs = false,
6950	.get_rptr = gfx_v8_0_ring_get_rptr,
6951	.get_wptr = gfx_v8_0_ring_get_wptr_compute,
6952	.set_wptr = gfx_v8_0_ring_set_wptr_compute,
6953	.emit_frame_size =
6954		20 + /* gfx_v8_0_ring_emit_gds_switch */
6955		7 + /* gfx_v8_0_ring_emit_hdp_flush */
6956		5 + /* hdp_invalidate */
6957		7 + /* gfx_v8_0_ring_emit_pipeline_sync */
6958		17 + /* gfx_v8_0_ring_emit_vm_flush */
6959		7 + 7 + 7, /* gfx_v8_0_ring_emit_fence_kiq x3 for user fence, vm fence */
6960	.emit_ib_size =	7, /* gfx_v8_0_ring_emit_ib_compute */
6961	.emit_fence = gfx_v8_0_ring_emit_fence_kiq,
6962	.test_ring = gfx_v8_0_ring_test_ring,
6963	.insert_nop = amdgpu_ring_insert_nop,
6964	.pad_ib = amdgpu_ring_generic_pad_ib,
6965	.emit_rreg = gfx_v8_0_ring_emit_rreg,
6966	.emit_wreg = gfx_v8_0_ring_emit_wreg,
6967};
6968
6969static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev)
6970{
6971	int i;
6972
6973	adev->gfx.kiq.ring.funcs = &gfx_v8_0_ring_funcs_kiq;
6974
6975	for (i = 0; i < adev->gfx.num_gfx_rings; i++)
6976		adev->gfx.gfx_ring[i].funcs = &gfx_v8_0_ring_funcs_gfx;
6977
6978	for (i = 0; i < adev->gfx.num_compute_rings; i++)
6979		adev->gfx.compute_ring[i].funcs = &gfx_v8_0_ring_funcs_compute;
6980}
6981
6982static const struct amdgpu_irq_src_funcs gfx_v8_0_eop_irq_funcs = {
6983	.set = gfx_v8_0_set_eop_interrupt_state,
6984	.process = gfx_v8_0_eop_irq,
6985};
6986
6987static const struct amdgpu_irq_src_funcs gfx_v8_0_priv_reg_irq_funcs = {
6988	.set = gfx_v8_0_set_priv_reg_fault_state,
6989	.process = gfx_v8_0_priv_reg_irq,
6990};
6991
6992static const struct amdgpu_irq_src_funcs gfx_v8_0_priv_inst_irq_funcs = {
6993	.set = gfx_v8_0_set_priv_inst_fault_state,
6994	.process = gfx_v8_0_priv_inst_irq,
6995};
6996
6997static const struct amdgpu_irq_src_funcs gfx_v8_0_cp_ecc_error_irq_funcs = {
6998	.set = gfx_v8_0_set_cp_ecc_int_state,
6999	.process = gfx_v8_0_cp_ecc_error_irq,
7000};
7001
7002static const struct amdgpu_irq_src_funcs gfx_v8_0_sq_irq_funcs = {
7003	.set = gfx_v8_0_set_sq_int_state,
7004	.process = gfx_v8_0_sq_irq,
7005};
7006
7007static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev)
7008{
7009	adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
7010	adev->gfx.eop_irq.funcs = &gfx_v8_0_eop_irq_funcs;
7011
7012	adev->gfx.priv_reg_irq.num_types = 1;
7013	adev->gfx.priv_reg_irq.funcs = &gfx_v8_0_priv_reg_irq_funcs;
7014
7015	adev->gfx.priv_inst_irq.num_types = 1;
7016	adev->gfx.priv_inst_irq.funcs = &gfx_v8_0_priv_inst_irq_funcs;
7017
7018	adev->gfx.cp_ecc_error_irq.num_types = 1;
7019	adev->gfx.cp_ecc_error_irq.funcs = &gfx_v8_0_cp_ecc_error_irq_funcs;
7020
7021	adev->gfx.sq_irq.num_types = 1;
7022	adev->gfx.sq_irq.funcs = &gfx_v8_0_sq_irq_funcs;
7023}
7024
7025static void gfx_v8_0_set_rlc_funcs(struct amdgpu_device *adev)
7026{
7027	adev->gfx.rlc.funcs = &iceland_rlc_funcs;
7028}
7029
7030static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev)
7031{
7032	/* init asci gds info */
7033	adev->gds.gds_size = RREG32(mmGDS_VMID0_SIZE);
7034	adev->gds.gws_size = 64;
7035	adev->gds.oa_size = 16;
7036	adev->gds.gds_compute_max_wave_id = RREG32(mmGDS_COMPUTE_MAX_WAVE_ID);
7037}
 
 
7038
7039static void gfx_v8_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
7040						 u32 bitmap)
7041{
7042	u32 data;
7043
7044	if (!bitmap)
7045		return;
 
 
 
7046
7047	data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
7048	data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
7049
7050	WREG32(mmGC_USER_SHADER_ARRAY_CONFIG, data);
 
 
7051}
7052
7053static u32 gfx_v8_0_get_cu_active_bitmap(struct amdgpu_device *adev)
7054{
7055	u32 data, mask;
7056
7057	data =  RREG32(mmCC_GC_SHADER_ARRAY_CONFIG) |
7058		RREG32(mmGC_USER_SHADER_ARRAY_CONFIG);
 
 
 
7059
7060	mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh);
7061
7062	return ~REG_GET_FIELD(data, CC_GC_SHADER_ARRAY_CONFIG, INACTIVE_CUS) & mask;
7063}
7064
7065static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev)
 
7066{
7067	int i, j, k, counter, active_cu_number = 0;
7068	u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
7069	struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
7070	unsigned disable_masks[4 * 2];
7071	u32 ao_cu_num;
7072
7073	memset(cu_info, 0, sizeof(*cu_info));
7074
7075	if (adev->flags & AMD_IS_APU)
7076		ao_cu_num = 2;
7077	else
7078		ao_cu_num = adev->gfx.config.max_cu_per_sh;
7079
7080	amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2);
7081
7082	mutex_lock(&adev->grbm_idx_mutex);
7083	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
7084		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
7085			mask = 1;
7086			ao_bitmap = 0;
7087			counter = 0;
7088			gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff);
7089			if (i < 4 && j < 2)
7090				gfx_v8_0_set_user_cu_inactive_bitmap(
7091					adev, disable_masks[i * 2 + j]);
7092			bitmap = gfx_v8_0_get_cu_active_bitmap(adev);
7093			cu_info->bitmap[i][j] = bitmap;
7094
7095			for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
7096				if (bitmap & mask) {
7097					if (counter < ao_cu_num)
7098						ao_bitmap |= mask;
7099					counter ++;
7100				}
7101				mask <<= 1;
7102			}
7103			active_cu_number += counter;
7104			if (i < 2 && j < 2)
7105				ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
7106			cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
7107		}
7108	}
7109	gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
7110	mutex_unlock(&adev->grbm_idx_mutex);
7111
7112	cu_info->number = active_cu_number;
7113	cu_info->ao_cu_mask = ao_cu_mask;
7114	cu_info->simd_per_cu = NUM_SIMD_PER_CU;
7115	cu_info->max_waves_per_simd = 10;
7116	cu_info->max_scratch_slots_per_cu = 32;
7117	cu_info->wave_front_size = 64;
7118	cu_info->lds_size = 64;
7119}
7120
7121const struct amdgpu_ip_block_version gfx_v8_0_ip_block =
7122{
7123	.type = AMD_IP_BLOCK_TYPE_GFX,
7124	.major = 8,
7125	.minor = 0,
7126	.rev = 0,
7127	.funcs = &gfx_v8_0_ip_funcs,
7128};
7129
7130const struct amdgpu_ip_block_version gfx_v8_1_ip_block =
7131{
7132	.type = AMD_IP_BLOCK_TYPE_GFX,
7133	.major = 8,
7134	.minor = 1,
7135	.rev = 0,
7136	.funcs = &gfx_v8_0_ip_funcs,
7137};
7138
7139static void gfx_v8_0_ring_emit_ce_meta(struct amdgpu_ring *ring)
7140{
7141	uint64_t ce_payload_addr;
7142	int cnt_ce;
7143	union {
7144		struct vi_ce_ib_state regular;
7145		struct vi_ce_ib_state_chained_ib chained;
7146	} ce_payload = {};
7147
7148	if (ring->adev->virt.chained_ib_support) {
7149		ce_payload_addr = amdgpu_csa_vaddr(ring->adev) +
7150			offsetof(struct vi_gfx_meta_data_chained_ib, ce_payload);
7151		cnt_ce = (sizeof(ce_payload.chained) >> 2) + 4 - 2;
7152	} else {
7153		ce_payload_addr = amdgpu_csa_vaddr(ring->adev) +
7154			offsetof(struct vi_gfx_meta_data, ce_payload);
7155		cnt_ce = (sizeof(ce_payload.regular) >> 2) + 4 - 2;
7156	}
7157
7158	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt_ce));
7159	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
7160				WRITE_DATA_DST_SEL(8) |
7161				WR_CONFIRM) |
7162				WRITE_DATA_CACHE_POLICY(0));
7163	amdgpu_ring_write(ring, lower_32_bits(ce_payload_addr));
7164	amdgpu_ring_write(ring, upper_32_bits(ce_payload_addr));
7165	amdgpu_ring_write_multiple(ring, (void *)&ce_payload, cnt_ce - 2);
7166}
7167
7168static void gfx_v8_0_ring_emit_de_meta(struct amdgpu_ring *ring)
7169{
7170	uint64_t de_payload_addr, gds_addr, csa_addr;
7171	int cnt_de;
7172	union {
7173		struct vi_de_ib_state regular;
7174		struct vi_de_ib_state_chained_ib chained;
7175	} de_payload = {};
7176
7177	csa_addr = amdgpu_csa_vaddr(ring->adev);
7178	gds_addr = csa_addr + 4096;
7179	if (ring->adev->virt.chained_ib_support) {
7180		de_payload.chained.gds_backup_addrlo = lower_32_bits(gds_addr);
7181		de_payload.chained.gds_backup_addrhi = upper_32_bits(gds_addr);
7182		de_payload_addr = csa_addr + offsetof(struct vi_gfx_meta_data_chained_ib, de_payload);
7183		cnt_de = (sizeof(de_payload.chained) >> 2) + 4 - 2;
7184	} else {
7185		de_payload.regular.gds_backup_addrlo = lower_32_bits(gds_addr);
7186		de_payload.regular.gds_backup_addrhi = upper_32_bits(gds_addr);
7187		de_payload_addr = csa_addr + offsetof(struct vi_gfx_meta_data, de_payload);
7188		cnt_de = (sizeof(de_payload.regular) >> 2) + 4 - 2;
7189	}
7190
7191	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt_de));
7192	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
7193				WRITE_DATA_DST_SEL(8) |
7194				WR_CONFIRM) |
7195				WRITE_DATA_CACHE_POLICY(0));
7196	amdgpu_ring_write(ring, lower_32_bits(de_payload_addr));
7197	amdgpu_ring_write(ring, upper_32_bits(de_payload_addr));
7198	amdgpu_ring_write_multiple(ring, (void *)&de_payload, cnt_de - 2);
7199}