Linux Audio

Check our new training course

In-person Linux kernel drivers training

Jun 16-20, 2025
Register
Loading...
v6.9.4
   1/*
   2 * Copyright 2014 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23
  24#include <linux/delay.h>
  25#include <linux/kernel.h>
  26#include <linux/firmware.h>
  27#include <linux/module.h>
  28#include <linux/pci.h>
  29
  30#include "amdgpu.h"
  31#include "amdgpu_gfx.h"
  32#include "amdgpu_ring.h"
  33#include "vi.h"
  34#include "vi_structs.h"
  35#include "vid.h"
  36#include "amdgpu_ucode.h"
  37#include "amdgpu_atombios.h"
  38#include "atombios_i2c.h"
  39#include "clearstate_vi.h"
  40
  41#include "gmc/gmc_8_2_d.h"
  42#include "gmc/gmc_8_2_sh_mask.h"
  43
  44#include "oss/oss_3_0_d.h"
  45#include "oss/oss_3_0_sh_mask.h"
  46
  47#include "bif/bif_5_0_d.h"
  48#include "bif/bif_5_0_sh_mask.h"
 
  49#include "gca/gfx_8_0_d.h"
  50#include "gca/gfx_8_0_enum.h"
  51#include "gca/gfx_8_0_sh_mask.h"
 
  52
  53#include "dce/dce_10_0_d.h"
  54#include "dce/dce_10_0_sh_mask.h"
  55
  56#include "smu/smu_7_1_3_d.h"
  57
  58#include "ivsrcid/ivsrcid_vislands30.h"
  59
  60#define GFX8_NUM_GFX_RINGS     1
  61#define GFX8_MEC_HPD_SIZE 4096
  62
  63#define TOPAZ_GB_ADDR_CONFIG_GOLDEN 0x22010001
  64#define CARRIZO_GB_ADDR_CONFIG_GOLDEN 0x22010001
  65#define POLARIS11_GB_ADDR_CONFIG_GOLDEN 0x22011002
  66#define TONGA_GB_ADDR_CONFIG_GOLDEN 0x22011003
  67
  68#define ARRAY_MODE(x)					((x) << GB_TILE_MODE0__ARRAY_MODE__SHIFT)
  69#define PIPE_CONFIG(x)					((x) << GB_TILE_MODE0__PIPE_CONFIG__SHIFT)
  70#define TILE_SPLIT(x)					((x) << GB_TILE_MODE0__TILE_SPLIT__SHIFT)
  71#define MICRO_TILE_MODE_NEW(x)				((x) << GB_TILE_MODE0__MICRO_TILE_MODE_NEW__SHIFT)
  72#define SAMPLE_SPLIT(x)					((x) << GB_TILE_MODE0__SAMPLE_SPLIT__SHIFT)
  73#define BANK_WIDTH(x)					((x) << GB_MACROTILE_MODE0__BANK_WIDTH__SHIFT)
  74#define BANK_HEIGHT(x)					((x) << GB_MACROTILE_MODE0__BANK_HEIGHT__SHIFT)
  75#define MACRO_TILE_ASPECT(x)				((x) << GB_MACROTILE_MODE0__MACRO_TILE_ASPECT__SHIFT)
  76#define NUM_BANKS(x)					((x) << GB_MACROTILE_MODE0__NUM_BANKS__SHIFT)
  77
  78#define RLC_CGTT_MGCG_OVERRIDE__CPF_MASK            0x00000001L
  79#define RLC_CGTT_MGCG_OVERRIDE__RLC_MASK            0x00000002L
  80#define RLC_CGTT_MGCG_OVERRIDE__MGCG_MASK           0x00000004L
  81#define RLC_CGTT_MGCG_OVERRIDE__CGCG_MASK           0x00000008L
  82#define RLC_CGTT_MGCG_OVERRIDE__CGLS_MASK           0x00000010L
  83#define RLC_CGTT_MGCG_OVERRIDE__GRBM_MASK           0x00000020L
  84
  85/* BPM SERDES CMD */
  86#define SET_BPM_SERDES_CMD    1
  87#define CLE_BPM_SERDES_CMD    0
  88
  89/* BPM Register Address*/
  90enum {
  91	BPM_REG_CGLS_EN = 0,        /* Enable/Disable CGLS */
  92	BPM_REG_CGLS_ON,            /* ON/OFF CGLS: shall be controlled by RLC FW */
  93	BPM_REG_CGCG_OVERRIDE,      /* Set/Clear CGCG Override */
  94	BPM_REG_MGCG_OVERRIDE,      /* Set/Clear MGCG Override */
  95	BPM_REG_FGCG_OVERRIDE,      /* Set/Clear FGCG Override */
  96	BPM_REG_FGCG_MAX
  97};
  98
  99#define RLC_FormatDirectRegListLength        14
 100
 101MODULE_FIRMWARE("amdgpu/carrizo_ce.bin");
 102MODULE_FIRMWARE("amdgpu/carrizo_pfp.bin");
 103MODULE_FIRMWARE("amdgpu/carrizo_me.bin");
 104MODULE_FIRMWARE("amdgpu/carrizo_mec.bin");
 105MODULE_FIRMWARE("amdgpu/carrizo_mec2.bin");
 106MODULE_FIRMWARE("amdgpu/carrizo_rlc.bin");
 107
 108MODULE_FIRMWARE("amdgpu/stoney_ce.bin");
 109MODULE_FIRMWARE("amdgpu/stoney_pfp.bin");
 110MODULE_FIRMWARE("amdgpu/stoney_me.bin");
 111MODULE_FIRMWARE("amdgpu/stoney_mec.bin");
 112MODULE_FIRMWARE("amdgpu/stoney_rlc.bin");
 113
 114MODULE_FIRMWARE("amdgpu/tonga_ce.bin");
 115MODULE_FIRMWARE("amdgpu/tonga_pfp.bin");
 116MODULE_FIRMWARE("amdgpu/tonga_me.bin");
 117MODULE_FIRMWARE("amdgpu/tonga_mec.bin");
 118MODULE_FIRMWARE("amdgpu/tonga_mec2.bin");
 119MODULE_FIRMWARE("amdgpu/tonga_rlc.bin");
 120
 121MODULE_FIRMWARE("amdgpu/topaz_ce.bin");
 122MODULE_FIRMWARE("amdgpu/topaz_pfp.bin");
 123MODULE_FIRMWARE("amdgpu/topaz_me.bin");
 124MODULE_FIRMWARE("amdgpu/topaz_mec.bin");
 125MODULE_FIRMWARE("amdgpu/topaz_rlc.bin");
 126
 127MODULE_FIRMWARE("amdgpu/fiji_ce.bin");
 128MODULE_FIRMWARE("amdgpu/fiji_pfp.bin");
 129MODULE_FIRMWARE("amdgpu/fiji_me.bin");
 130MODULE_FIRMWARE("amdgpu/fiji_mec.bin");
 131MODULE_FIRMWARE("amdgpu/fiji_mec2.bin");
 132MODULE_FIRMWARE("amdgpu/fiji_rlc.bin");
 133
 134MODULE_FIRMWARE("amdgpu/polaris10_ce.bin");
 135MODULE_FIRMWARE("amdgpu/polaris10_ce_2.bin");
 136MODULE_FIRMWARE("amdgpu/polaris10_pfp.bin");
 137MODULE_FIRMWARE("amdgpu/polaris10_pfp_2.bin");
 138MODULE_FIRMWARE("amdgpu/polaris10_me.bin");
 139MODULE_FIRMWARE("amdgpu/polaris10_me_2.bin");
 140MODULE_FIRMWARE("amdgpu/polaris10_mec.bin");
 141MODULE_FIRMWARE("amdgpu/polaris10_mec_2.bin");
 142MODULE_FIRMWARE("amdgpu/polaris10_mec2.bin");
 143MODULE_FIRMWARE("amdgpu/polaris10_mec2_2.bin");
 144MODULE_FIRMWARE("amdgpu/polaris10_rlc.bin");
 145
 146MODULE_FIRMWARE("amdgpu/polaris11_ce.bin");
 147MODULE_FIRMWARE("amdgpu/polaris11_ce_2.bin");
 148MODULE_FIRMWARE("amdgpu/polaris11_pfp.bin");
 149MODULE_FIRMWARE("amdgpu/polaris11_pfp_2.bin");
 150MODULE_FIRMWARE("amdgpu/polaris11_me.bin");
 151MODULE_FIRMWARE("amdgpu/polaris11_me_2.bin");
 152MODULE_FIRMWARE("amdgpu/polaris11_mec.bin");
 153MODULE_FIRMWARE("amdgpu/polaris11_mec_2.bin");
 154MODULE_FIRMWARE("amdgpu/polaris11_mec2.bin");
 155MODULE_FIRMWARE("amdgpu/polaris11_mec2_2.bin");
 156MODULE_FIRMWARE("amdgpu/polaris11_rlc.bin");
 157
 158MODULE_FIRMWARE("amdgpu/polaris12_ce.bin");
 159MODULE_FIRMWARE("amdgpu/polaris12_ce_2.bin");
 160MODULE_FIRMWARE("amdgpu/polaris12_pfp.bin");
 161MODULE_FIRMWARE("amdgpu/polaris12_pfp_2.bin");
 162MODULE_FIRMWARE("amdgpu/polaris12_me.bin");
 163MODULE_FIRMWARE("amdgpu/polaris12_me_2.bin");
 164MODULE_FIRMWARE("amdgpu/polaris12_mec.bin");
 165MODULE_FIRMWARE("amdgpu/polaris12_mec_2.bin");
 166MODULE_FIRMWARE("amdgpu/polaris12_mec2.bin");
 167MODULE_FIRMWARE("amdgpu/polaris12_mec2_2.bin");
 168MODULE_FIRMWARE("amdgpu/polaris12_rlc.bin");
 169
 170MODULE_FIRMWARE("amdgpu/vegam_ce.bin");
 171MODULE_FIRMWARE("amdgpu/vegam_pfp.bin");
 172MODULE_FIRMWARE("amdgpu/vegam_me.bin");
 173MODULE_FIRMWARE("amdgpu/vegam_mec.bin");
 174MODULE_FIRMWARE("amdgpu/vegam_mec2.bin");
 175MODULE_FIRMWARE("amdgpu/vegam_rlc.bin");
 176
 177static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
 178{
 179	{mmGDS_VMID0_BASE, mmGDS_VMID0_SIZE, mmGDS_GWS_VMID0, mmGDS_OA_VMID0},
 180	{mmGDS_VMID1_BASE, mmGDS_VMID1_SIZE, mmGDS_GWS_VMID1, mmGDS_OA_VMID1},
 181	{mmGDS_VMID2_BASE, mmGDS_VMID2_SIZE, mmGDS_GWS_VMID2, mmGDS_OA_VMID2},
 182	{mmGDS_VMID3_BASE, mmGDS_VMID3_SIZE, mmGDS_GWS_VMID3, mmGDS_OA_VMID3},
 183	{mmGDS_VMID4_BASE, mmGDS_VMID4_SIZE, mmGDS_GWS_VMID4, mmGDS_OA_VMID4},
 184	{mmGDS_VMID5_BASE, mmGDS_VMID5_SIZE, mmGDS_GWS_VMID5, mmGDS_OA_VMID5},
 185	{mmGDS_VMID6_BASE, mmGDS_VMID6_SIZE, mmGDS_GWS_VMID6, mmGDS_OA_VMID6},
 186	{mmGDS_VMID7_BASE, mmGDS_VMID7_SIZE, mmGDS_GWS_VMID7, mmGDS_OA_VMID7},
 187	{mmGDS_VMID8_BASE, mmGDS_VMID8_SIZE, mmGDS_GWS_VMID8, mmGDS_OA_VMID8},
 188	{mmGDS_VMID9_BASE, mmGDS_VMID9_SIZE, mmGDS_GWS_VMID9, mmGDS_OA_VMID9},
 189	{mmGDS_VMID10_BASE, mmGDS_VMID10_SIZE, mmGDS_GWS_VMID10, mmGDS_OA_VMID10},
 190	{mmGDS_VMID11_BASE, mmGDS_VMID11_SIZE, mmGDS_GWS_VMID11, mmGDS_OA_VMID11},
 191	{mmGDS_VMID12_BASE, mmGDS_VMID12_SIZE, mmGDS_GWS_VMID12, mmGDS_OA_VMID12},
 192	{mmGDS_VMID13_BASE, mmGDS_VMID13_SIZE, mmGDS_GWS_VMID13, mmGDS_OA_VMID13},
 193	{mmGDS_VMID14_BASE, mmGDS_VMID14_SIZE, mmGDS_GWS_VMID14, mmGDS_OA_VMID14},
 194	{mmGDS_VMID15_BASE, mmGDS_VMID15_SIZE, mmGDS_GWS_VMID15, mmGDS_OA_VMID15}
 195};
 196
 197static const u32 golden_settings_tonga_a11[] =
 198{
 199	mmCB_HW_CONTROL, 0xfffdf3cf, 0x00007208,
 200	mmCB_HW_CONTROL_3, 0x00000040, 0x00000040,
 201	mmDB_DEBUG2, 0xf00fffff, 0x00000400,
 202	mmGB_GPU_ID, 0x0000000f, 0x00000000,
 203	mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
 204	mmPA_SC_FIFO_DEPTH_CNTL, 0x000003ff, 0x000000fc,
 205	mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
 206	mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c,
 207	mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
 208	mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
 209	mmTCC_CTRL, 0x00100000, 0xf31fff7f,
 210	mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
 211	mmTCP_ADDR_CONFIG, 0x000003ff, 0x000002fb,
 212	mmTCP_CHAN_STEER_HI, 0xffffffff, 0x0000543b,
 213	mmTCP_CHAN_STEER_LO, 0xffffffff, 0xa9210876,
 214	mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
 215};
 216
 217static const u32 tonga_golden_common_all[] =
 218{
 219	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 220	mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x16000012,
 221	mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002A,
 222	mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
 223	mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
 224	mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
 225	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
 226	mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF
 227};
 228
 229static const u32 tonga_mgcg_cgcg_init[] =
 230{
 231	mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
 232	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 233	mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 234	mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
 235	mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
 236	mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
 237	mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100,
 238	mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
 239	mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
 240	mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
 241	mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
 242	mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
 243	mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
 244	mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
 245	mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
 246	mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
 247	mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
 248	mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
 249	mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
 250	mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
 251	mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
 252	mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
 253	mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
 254	mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
 255	mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
 256	mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
 257	mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
 258	mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 259	mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 260	mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
 261	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 262	mmCGTS_CU0_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 263	mmCGTS_CU0_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 264	mmCGTS_CU0_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
 265	mmCGTS_CU0_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 266	mmCGTS_CU0_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 267	mmCGTS_CU1_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 268	mmCGTS_CU1_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 269	mmCGTS_CU1_TA_CTRL_REG, 0xffffffff, 0x00040007,
 270	mmCGTS_CU1_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 271	mmCGTS_CU1_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 272	mmCGTS_CU2_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 273	mmCGTS_CU2_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 274	mmCGTS_CU2_TA_CTRL_REG, 0xffffffff, 0x00040007,
 275	mmCGTS_CU2_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 276	mmCGTS_CU2_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 277	mmCGTS_CU3_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 278	mmCGTS_CU3_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 279	mmCGTS_CU3_TA_CTRL_REG, 0xffffffff, 0x00040007,
 280	mmCGTS_CU3_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 281	mmCGTS_CU3_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 282	mmCGTS_CU4_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 283	mmCGTS_CU4_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 284	mmCGTS_CU4_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
 285	mmCGTS_CU4_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 286	mmCGTS_CU4_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 287	mmCGTS_CU5_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 288	mmCGTS_CU5_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 289	mmCGTS_CU5_TA_CTRL_REG, 0xffffffff, 0x00040007,
 290	mmCGTS_CU5_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 291	mmCGTS_CU5_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 292	mmCGTS_CU6_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 293	mmCGTS_CU6_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 294	mmCGTS_CU6_TA_CTRL_REG, 0xffffffff, 0x00040007,
 295	mmCGTS_CU6_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 296	mmCGTS_CU6_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 297	mmCGTS_CU7_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 298	mmCGTS_CU7_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 299	mmCGTS_CU7_TA_CTRL_REG, 0xffffffff, 0x00040007,
 300	mmCGTS_CU7_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 301	mmCGTS_CU7_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 302	mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
 303	mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
 304	mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
 305	mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
 306};
 307
 308static const u32 golden_settings_vegam_a11[] =
 309{
 310	mmCB_HW_CONTROL, 0x0001f3cf, 0x00007208,
 311	mmCB_HW_CONTROL_2, 0x0f000000, 0x0d000000,
 312	mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
 313	mmDB_DEBUG2, 0xf00fffff, 0x00000400,
 314	mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
 315	mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
 316	mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x3a00161a,
 317	mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x0000002e,
 318	mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
 319	mmRLC_CGCG_CGLS_CTRL_3D, 0xffffffff, 0x0001003c,
 320	mmSQ_CONFIG, 0x07f80000, 0x01180000,
 321	mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
 322	mmTCC_CTRL, 0x00100000, 0xf31fff7f,
 323	mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f7,
 324	mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
 325	mmTCP_CHAN_STEER_LO, 0xffffffff, 0x32761054,
 326	mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
 327};
 328
 329static const u32 vegam_golden_common_all[] =
 330{
 331	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 332	mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
 333	mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
 334	mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
 335	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
 336	mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF,
 337};
 338
 339static const u32 golden_settings_polaris11_a11[] =
 340{
 341	mmCB_HW_CONTROL, 0x0000f3cf, 0x00007208,
 342	mmCB_HW_CONTROL_2, 0x0f000000, 0x0f000000,
 343	mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
 344	mmDB_DEBUG2, 0xf00fffff, 0x00000400,
 345	mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
 346	mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
 347	mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x16000012,
 348	mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x00000000,
 349	mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
 350	mmRLC_CGCG_CGLS_CTRL_3D, 0xffffffff, 0x0001003c,
 351	mmSQ_CONFIG, 0x07f80000, 0x01180000,
 352	mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
 353	mmTCC_CTRL, 0x00100000, 0xf31fff7f,
 354	mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f3,
 355	mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
 356	mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003210,
 357	mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
 358};
 359
 360static const u32 polaris11_golden_common_all[] =
 361{
 362	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 363	mmGB_ADDR_CONFIG, 0xffffffff, 0x22011002,
 364	mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
 365	mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
 366	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
 367	mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF,
 368};
 369
 370static const u32 golden_settings_polaris10_a11[] =
 371{
 372	mmATC_MISC_CG, 0x000c0fc0, 0x000c0200,
 373	mmCB_HW_CONTROL, 0x0001f3cf, 0x00007208,
 374	mmCB_HW_CONTROL_2, 0x0f000000, 0x0f000000,
 375	mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
 376	mmDB_DEBUG2, 0xf00fffff, 0x00000400,
 377	mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
 378	mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
 379	mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x16000012,
 380	mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x0000002a,
 381	mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
 382	mmRLC_CGCG_CGLS_CTRL_3D, 0xffffffff, 0x0001003c,
 383	mmSQ_CONFIG, 0x07f80000, 0x07180000,
 384	mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
 385	mmTCC_CTRL, 0x00100000, 0xf31fff7f,
 386	mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f7,
 387	mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
 388	mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
 389};
 390
 391static const u32 polaris10_golden_common_all[] =
 392{
 393	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 394	mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x16000012,
 395	mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002A,
 396	mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
 397	mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
 398	mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
 399	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
 400	mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF,
 401};
 402
 403static const u32 fiji_golden_common_all[] =
 404{
 405	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 406	mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x3a00161a,
 407	mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002e,
 408	mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
 409	mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
 410	mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
 411	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
 412	mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF,
 413	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 414	mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x00000009,
 415};
 416
 417static const u32 golden_settings_fiji_a10[] =
 418{
 419	mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
 420	mmDB_DEBUG2, 0xf00fffff, 0x00000400,
 421	mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
 422	mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
 423	mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
 424	mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
 425	mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
 426	mmTCC_CTRL, 0x00100000, 0xf31fff7f,
 427	mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
 428	mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000ff,
 429	mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
 430};
 431
 432static const u32 fiji_mgcg_cgcg_init[] =
 433{
 434	mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
 435	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 436	mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 437	mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
 438	mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
 439	mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
 440	mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100,
 441	mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
 442	mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
 443	mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
 444	mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
 445	mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
 446	mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
 447	mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
 448	mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
 449	mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
 450	mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
 451	mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
 452	mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
 453	mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
 454	mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
 455	mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
 456	mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
 457	mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
 458	mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
 459	mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
 460	mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
 461	mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 462	mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 463	mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
 464	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 465	mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
 466	mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
 467	mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
 468	mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
 469};
 470
 471static const u32 golden_settings_iceland_a11[] =
 472{
 473	mmCB_HW_CONTROL_3, 0x00000040, 0x00000040,
 474	mmDB_DEBUG2, 0xf00fffff, 0x00000400,
 475	mmDB_DEBUG3, 0xc0000000, 0xc0000000,
 476	mmGB_GPU_ID, 0x0000000f, 0x00000000,
 477	mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
 478	mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
 479	mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x00000002,
 480	mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x00000000,
 481	mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c,
 482	mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
 483	mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
 484	mmTCC_CTRL, 0x00100000, 0xf31fff7f,
 485	mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
 486	mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f1,
 487	mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
 488	mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00000010,
 489};
 490
 491static const u32 iceland_golden_common_all[] =
 492{
 493	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 494	mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x00000002,
 495	mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x00000000,
 496	mmGB_ADDR_CONFIG, 0xffffffff, 0x22010001,
 497	mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
 498	mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
 499	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
 500	mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF
 501};
 502
 503static const u32 iceland_mgcg_cgcg_init[] =
 504{
 505	mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
 506	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 507	mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 508	mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
 509	mmCGTT_CP_CLK_CTRL, 0xffffffff, 0xc0000100,
 510	mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0xc0000100,
 511	mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0xc0000100,
 512	mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
 513	mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
 514	mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
 515	mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
 516	mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
 517	mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
 518	mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
 519	mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
 520	mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
 521	mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
 522	mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
 523	mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
 524	mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
 525	mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
 526	mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
 527	mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0xff000100,
 528	mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
 529	mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
 530	mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
 531	mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
 532	mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 533	mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 534	mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
 535	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 536	mmCGTS_CU0_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 537	mmCGTS_CU0_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 538	mmCGTS_CU0_TA_SQC_CTRL_REG, 0xffffffff, 0x0f840f87,
 539	mmCGTS_CU0_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 540	mmCGTS_CU0_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 541	mmCGTS_CU1_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 542	mmCGTS_CU1_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 543	mmCGTS_CU1_TA_CTRL_REG, 0xffffffff, 0x00040007,
 544	mmCGTS_CU1_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 545	mmCGTS_CU1_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 546	mmCGTS_CU2_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 547	mmCGTS_CU2_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 548	mmCGTS_CU2_TA_CTRL_REG, 0xffffffff, 0x00040007,
 549	mmCGTS_CU2_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 550	mmCGTS_CU2_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 551	mmCGTS_CU3_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 552	mmCGTS_CU3_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 553	mmCGTS_CU3_TA_CTRL_REG, 0xffffffff, 0x00040007,
 554	mmCGTS_CU3_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 555	mmCGTS_CU3_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 556	mmCGTS_CU4_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 557	mmCGTS_CU4_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 558	mmCGTS_CU4_TA_SQC_CTRL_REG, 0xffffffff, 0x0f840f87,
 559	mmCGTS_CU4_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 560	mmCGTS_CU4_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 561	mmCGTS_CU5_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 562	mmCGTS_CU5_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 563	mmCGTS_CU5_TA_CTRL_REG, 0xffffffff, 0x00040007,
 564	mmCGTS_CU5_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 565	mmCGTS_CU5_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 566	mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
 567	mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
 568	mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
 569};
 570
 571static const u32 cz_golden_settings_a11[] =
 572{
 573	mmCB_HW_CONTROL_3, 0x00000040, 0x00000040,
 574	mmDB_DEBUG2, 0xf00fffff, 0x00000400,
 575	mmGB_GPU_ID, 0x0000000f, 0x00000000,
 576	mmPA_SC_ENHANCE, 0xffffffff, 0x00000001,
 577	mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
 578	mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c,
 579	mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
 580	mmTA_CNTL_AUX, 0x000f000f, 0x00010000,
 581	mmTCC_CTRL, 0x00100000, 0xf31fff7f,
 582	mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
 583	mmTCP_ADDR_CONFIG, 0x0000000f, 0x000000f3,
 584	mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00001302
 585};
 586
 587static const u32 cz_golden_common_all[] =
 588{
 589	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 590	mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x00000002,
 591	mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x00000000,
 592	mmGB_ADDR_CONFIG, 0xffffffff, 0x22010001,
 593	mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
 594	mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
 595	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
 596	mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF
 597};
 598
 599static const u32 cz_mgcg_cgcg_init[] =
 600{
 601	mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
 602	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 603	mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 604	mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
 605	mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
 606	mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
 607	mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x00000100,
 608	mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
 609	mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
 610	mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
 611	mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
 612	mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
 613	mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
 614	mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
 615	mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
 616	mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
 617	mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
 618	mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
 619	mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
 620	mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
 621	mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
 622	mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
 623	mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
 624	mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
 625	mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
 626	mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
 627	mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
 628	mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 629	mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 630	mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
 631	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 632	mmCGTS_CU0_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 633	mmCGTS_CU0_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 634	mmCGTS_CU0_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
 635	mmCGTS_CU0_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 636	mmCGTS_CU0_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 637	mmCGTS_CU1_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 638	mmCGTS_CU1_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 639	mmCGTS_CU1_TA_CTRL_REG, 0xffffffff, 0x00040007,
 640	mmCGTS_CU1_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 641	mmCGTS_CU1_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 642	mmCGTS_CU2_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 643	mmCGTS_CU2_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 644	mmCGTS_CU2_TA_CTRL_REG, 0xffffffff, 0x00040007,
 645	mmCGTS_CU2_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 646	mmCGTS_CU2_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 647	mmCGTS_CU3_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 648	mmCGTS_CU3_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 649	mmCGTS_CU3_TA_CTRL_REG, 0xffffffff, 0x00040007,
 650	mmCGTS_CU3_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 651	mmCGTS_CU3_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 652	mmCGTS_CU4_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 653	mmCGTS_CU4_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 654	mmCGTS_CU4_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
 655	mmCGTS_CU4_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 656	mmCGTS_CU4_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 657	mmCGTS_CU5_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 658	mmCGTS_CU5_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 659	mmCGTS_CU5_TA_CTRL_REG, 0xffffffff, 0x00040007,
 660	mmCGTS_CU5_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 661	mmCGTS_CU5_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 662	mmCGTS_CU6_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 663	mmCGTS_CU6_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 664	mmCGTS_CU6_TA_CTRL_REG, 0xffffffff, 0x00040007,
 665	mmCGTS_CU6_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 666	mmCGTS_CU6_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 667	mmCGTS_CU7_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 668	mmCGTS_CU7_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 669	mmCGTS_CU7_TA_CTRL_REG, 0xffffffff, 0x00040007,
 670	mmCGTS_CU7_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 671	mmCGTS_CU7_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 672	mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
 673	mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
 674	mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003f,
 675	mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
 676};
 677
 678static const u32 stoney_golden_settings_a11[] =
 679{
 680	mmDB_DEBUG2, 0xf00fffff, 0x00000400,
 681	mmGB_GPU_ID, 0x0000000f, 0x00000000,
 682	mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
 683	mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
 684	mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
 685	mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
 686	mmTCC_CTRL, 0x00100000, 0xf31fff7f,
 687	mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
 688	mmTCP_ADDR_CONFIG, 0x0000000f, 0x000000f1,
 689	mmTCP_CHAN_STEER_LO, 0xffffffff, 0x10101010,
 690};
 691
 692static const u32 stoney_golden_common_all[] =
 693{
 694	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 695	mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x00000000,
 696	mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x00000000,
 697	mmGB_ADDR_CONFIG, 0xffffffff, 0x12010001,
 698	mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
 699	mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
 700	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
 701	mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF,
 702};
 703
 704static const u32 stoney_mgcg_cgcg_init[] =
 705{
 706	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 707	mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003f,
 708	mmCP_MEM_SLP_CNTL, 0xffffffff, 0x00020201,
 709	mmRLC_MEM_SLP_CNTL, 0xffffffff, 0x00020201,
 710	mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96940200,
 711};
 712
 713
 714static const char * const sq_edc_source_names[] = {
 715	"SQ_EDC_INFO_SOURCE_INVALID: No EDC error has occurred",
 716	"SQ_EDC_INFO_SOURCE_INST: EDC source is Instruction Fetch",
 717	"SQ_EDC_INFO_SOURCE_SGPR: EDC source is SGPR or SQC data return",
 718	"SQ_EDC_INFO_SOURCE_VGPR: EDC source is VGPR",
 719	"SQ_EDC_INFO_SOURCE_LDS: EDC source is LDS",
 720	"SQ_EDC_INFO_SOURCE_GDS: EDC source is GDS",
 721	"SQ_EDC_INFO_SOURCE_TA: EDC source is TA",
 722};
 723
 724static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev);
 725static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev);
 726static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev);
 727static void gfx_v8_0_set_rlc_funcs(struct amdgpu_device *adev);
 728static u32 gfx_v8_0_get_csb_size(struct amdgpu_device *adev);
 729static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev);
 730static void gfx_v8_0_ring_emit_ce_meta(struct amdgpu_ring *ring);
 731static void gfx_v8_0_ring_emit_de_meta(struct amdgpu_ring *ring);
 732
 733#define CG_ACLK_CNTL__ACLK_DIVIDER_MASK                    0x0000007fL
 734#define CG_ACLK_CNTL__ACLK_DIVIDER__SHIFT                  0x00000000L
 735
 736static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev)
 737{
 738	uint32_t data;
 739
 740	switch (adev->asic_type) {
 741	case CHIP_TOPAZ:
 742		amdgpu_device_program_register_sequence(adev,
 743							iceland_mgcg_cgcg_init,
 744							ARRAY_SIZE(iceland_mgcg_cgcg_init));
 745		amdgpu_device_program_register_sequence(adev,
 746							golden_settings_iceland_a11,
 747							ARRAY_SIZE(golden_settings_iceland_a11));
 748		amdgpu_device_program_register_sequence(adev,
 749							iceland_golden_common_all,
 750							ARRAY_SIZE(iceland_golden_common_all));
 751		break;
 752	case CHIP_FIJI:
 753		amdgpu_device_program_register_sequence(adev,
 754							fiji_mgcg_cgcg_init,
 755							ARRAY_SIZE(fiji_mgcg_cgcg_init));
 756		amdgpu_device_program_register_sequence(adev,
 757							golden_settings_fiji_a10,
 758							ARRAY_SIZE(golden_settings_fiji_a10));
 759		amdgpu_device_program_register_sequence(adev,
 760							fiji_golden_common_all,
 761							ARRAY_SIZE(fiji_golden_common_all));
 762		break;
 763
 764	case CHIP_TONGA:
 765		amdgpu_device_program_register_sequence(adev,
 766							tonga_mgcg_cgcg_init,
 767							ARRAY_SIZE(tonga_mgcg_cgcg_init));
 768		amdgpu_device_program_register_sequence(adev,
 769							golden_settings_tonga_a11,
 770							ARRAY_SIZE(golden_settings_tonga_a11));
 771		amdgpu_device_program_register_sequence(adev,
 772							tonga_golden_common_all,
 773							ARRAY_SIZE(tonga_golden_common_all));
 774		break;
 775	case CHIP_VEGAM:
 776		amdgpu_device_program_register_sequence(adev,
 777							golden_settings_vegam_a11,
 778							ARRAY_SIZE(golden_settings_vegam_a11));
 779		amdgpu_device_program_register_sequence(adev,
 780							vegam_golden_common_all,
 781							ARRAY_SIZE(vegam_golden_common_all));
 782		break;
 783	case CHIP_POLARIS11:
 784	case CHIP_POLARIS12:
 785		amdgpu_device_program_register_sequence(adev,
 786							golden_settings_polaris11_a11,
 787							ARRAY_SIZE(golden_settings_polaris11_a11));
 788		amdgpu_device_program_register_sequence(adev,
 789							polaris11_golden_common_all,
 790							ARRAY_SIZE(polaris11_golden_common_all));
 791		break;
 792	case CHIP_POLARIS10:
 793		amdgpu_device_program_register_sequence(adev,
 794							golden_settings_polaris10_a11,
 795							ARRAY_SIZE(golden_settings_polaris10_a11));
 796		amdgpu_device_program_register_sequence(adev,
 797							polaris10_golden_common_all,
 798							ARRAY_SIZE(polaris10_golden_common_all));
 799		data = RREG32_SMC(ixCG_ACLK_CNTL);
 800		data &= ~CG_ACLK_CNTL__ACLK_DIVIDER_MASK;
 801		data |= 0x18 << CG_ACLK_CNTL__ACLK_DIVIDER__SHIFT;
 802		WREG32_SMC(ixCG_ACLK_CNTL, data);
 803		if ((adev->pdev->device == 0x67DF) && (adev->pdev->revision == 0xc7) &&
 804		    ((adev->pdev->subsystem_device == 0xb37 && adev->pdev->subsystem_vendor == 0x1002) ||
 805		     (adev->pdev->subsystem_device == 0x4a8 && adev->pdev->subsystem_vendor == 0x1043) ||
 806		     (adev->pdev->subsystem_device == 0x9480 && adev->pdev->subsystem_vendor == 0x1680))) {
 807			amdgpu_atombios_i2c_channel_trans(adev, 0x10, 0x96, 0x1E, 0xDD);
 808			amdgpu_atombios_i2c_channel_trans(adev, 0x10, 0x96, 0x1F, 0xD0);
 809		}
 810		break;
 811	case CHIP_CARRIZO:
 812		amdgpu_device_program_register_sequence(adev,
 813							cz_mgcg_cgcg_init,
 814							ARRAY_SIZE(cz_mgcg_cgcg_init));
 815		amdgpu_device_program_register_sequence(adev,
 816							cz_golden_settings_a11,
 817							ARRAY_SIZE(cz_golden_settings_a11));
 818		amdgpu_device_program_register_sequence(adev,
 819							cz_golden_common_all,
 820							ARRAY_SIZE(cz_golden_common_all));
 821		break;
 822	case CHIP_STONEY:
 823		amdgpu_device_program_register_sequence(adev,
 824							stoney_mgcg_cgcg_init,
 825							ARRAY_SIZE(stoney_mgcg_cgcg_init));
 826		amdgpu_device_program_register_sequence(adev,
 827							stoney_golden_settings_a11,
 828							ARRAY_SIZE(stoney_golden_settings_a11));
 829		amdgpu_device_program_register_sequence(adev,
 830							stoney_golden_common_all,
 831							ARRAY_SIZE(stoney_golden_common_all));
 832		break;
 833	default:
 834		break;
 835	}
 836}
 837
 
 
 
 
 
 
 
 
 
 
 
 
 838static int gfx_v8_0_ring_test_ring(struct amdgpu_ring *ring)
 839{
 840	struct amdgpu_device *adev = ring->adev;
 
 841	uint32_t tmp = 0;
 842	unsigned i;
 843	int r;
 844
 845	WREG32(mmSCRATCH_REG0, 0xCAFEDEAD);
 
 
 
 
 
 846	r = amdgpu_ring_alloc(ring, 3);
 847	if (r)
 
 
 
 848		return r;
 849
 850	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
 851	amdgpu_ring_write(ring, mmSCRATCH_REG0 - PACKET3_SET_UCONFIG_REG_START);
 852	amdgpu_ring_write(ring, 0xDEADBEEF);
 853	amdgpu_ring_commit(ring);
 854
 855	for (i = 0; i < adev->usec_timeout; i++) {
 856		tmp = RREG32(mmSCRATCH_REG0);
 857		if (tmp == 0xDEADBEEF)
 858			break;
 859		udelay(1);
 860	}
 861
 862	if (i >= adev->usec_timeout)
 863		r = -ETIMEDOUT;
 864
 
 
 
 
 
 865	return r;
 866}
 867
 868static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 869{
 870	struct amdgpu_device *adev = ring->adev;
 871	struct amdgpu_ib ib;
 872	struct dma_fence *f = NULL;
 873
 874	unsigned int index;
 875	uint64_t gpu_addr;
 876	uint32_t tmp;
 877	long r;
 878
 879	r = amdgpu_device_wb_get(adev, &index);
 880	if (r)
 
 881		return r;
 882
 883	gpu_addr = adev->wb.gpu_addr + (index * 4);
 884	adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
 885	memset(&ib, 0, sizeof(ib));
 886
 887	r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
 888	if (r)
 889		goto err1;
 890
 891	ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
 892	ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
 893	ib.ptr[2] = lower_32_bits(gpu_addr);
 894	ib.ptr[3] = upper_32_bits(gpu_addr);
 895	ib.ptr[4] = 0xDEADBEEF;
 896	ib.length_dw = 5;
 897
 898	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
 899	if (r)
 900		goto err2;
 901
 902	r = dma_fence_wait_timeout(f, false, timeout);
 903	if (r == 0) {
 904		r = -ETIMEDOUT;
 905		goto err2;
 906	} else if (r < 0) {
 907		goto err2;
 908	}
 909
 910	tmp = adev->wb.wb[index];
 911	if (tmp == 0xDEADBEEF)
 912		r = 0;
 913	else
 
 
 
 
 
 
 
 
 914		r = -EINVAL;
 915
 916err2:
 
 917	amdgpu_ib_free(adev, &ib, NULL);
 918	dma_fence_put(f);
 919err1:
 920	amdgpu_device_wb_free(adev, index);
 921	return r;
 922}
 923
 924
 925static void gfx_v8_0_free_microcode(struct amdgpu_device *adev)
 926{
 927	amdgpu_ucode_release(&adev->gfx.pfp_fw);
 928	amdgpu_ucode_release(&adev->gfx.me_fw);
 929	amdgpu_ucode_release(&adev->gfx.ce_fw);
 930	amdgpu_ucode_release(&adev->gfx.rlc_fw);
 931	amdgpu_ucode_release(&adev->gfx.mec_fw);
 932	if ((adev->asic_type != CHIP_STONEY) &&
 933	    (adev->asic_type != CHIP_TOPAZ))
 934		amdgpu_ucode_release(&adev->gfx.mec2_fw);
 935
 936	kfree(adev->gfx.rlc.register_list_format);
 937}
 938
 939static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
 940{
 941	const char *chip_name;
 942	char fw_name[30];
 943	int err;
 944	struct amdgpu_firmware_info *info = NULL;
 945	const struct common_firmware_header *header = NULL;
 946	const struct gfx_firmware_header_v1_0 *cp_hdr;
 947	const struct rlc_firmware_header_v2_0 *rlc_hdr;
 948	unsigned int *tmp = NULL, i;
 949
 950	DRM_DEBUG("\n");
 951
 952	switch (adev->asic_type) {
 953	case CHIP_TOPAZ:
 954		chip_name = "topaz";
 955		break;
 956	case CHIP_TONGA:
 957		chip_name = "tonga";
 958		break;
 959	case CHIP_CARRIZO:
 960		chip_name = "carrizo";
 961		break;
 962	case CHIP_FIJI:
 963		chip_name = "fiji";
 964		break;
 965	case CHIP_STONEY:
 966		chip_name = "stoney";
 967		break;
 968	case CHIP_POLARIS10:
 969		chip_name = "polaris10";
 970		break;
 971	case CHIP_POLARIS11:
 972		chip_name = "polaris11";
 973		break;
 974	case CHIP_POLARIS12:
 975		chip_name = "polaris12";
 976		break;
 977	case CHIP_VEGAM:
 978		chip_name = "vegam";
 979		break;
 980	default:
 981		BUG();
 982	}
 983
 984	if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
 985		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp_2.bin", chip_name);
 986		err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, fw_name);
 987		if (err == -ENODEV) {
 988			snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
 989			err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, fw_name);
 990		}
 991	} else {
 992		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
 993		err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, fw_name);
 994	}
 995	if (err)
 996		goto out;
 997	cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
 998	adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
 999	adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1000
1001	if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
1002		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me_2.bin", chip_name);
1003		err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name);
1004		if (err == -ENODEV) {
1005			snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
1006			err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name);
1007		}
1008	} else {
1009		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
1010		err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name);
1011	}
1012	if (err)
1013		goto out;
1014	cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
1015	adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1016
1017	adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1018
1019	if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
1020		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce_2.bin", chip_name);
1021		err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw, fw_name);
1022		if (err == -ENODEV) {
1023			snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
1024			err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw, fw_name);
1025		}
1026	} else {
1027		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
1028		err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw, fw_name);
1029	}
1030	if (err)
1031		goto out;
1032	cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
1033	adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1034	adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1035
1036	/*
1037	 * Support for MCBP/Virtualization in combination with chained IBs is
1038	 * formal released on feature version #46
1039	 */
1040	if (adev->gfx.ce_feature_version >= 46 &&
1041	    adev->gfx.pfp_feature_version >= 46) {
1042		adev->virt.chained_ib_support = true;
1043		DRM_INFO("Chained IB support enabled!\n");
1044	} else
1045		adev->virt.chained_ib_support = false;
1046
1047	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
1048	err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, fw_name);
1049	if (err)
1050		goto out;
1051	rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1052	adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
1053	adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
1054
1055	adev->gfx.rlc.save_and_restore_offset =
1056			le32_to_cpu(rlc_hdr->save_and_restore_offset);
1057	adev->gfx.rlc.clear_state_descriptor_offset =
1058			le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
1059	adev->gfx.rlc.avail_scratch_ram_locations =
1060			le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
1061	adev->gfx.rlc.reg_restore_list_size =
1062			le32_to_cpu(rlc_hdr->reg_restore_list_size);
1063	adev->gfx.rlc.reg_list_format_start =
1064			le32_to_cpu(rlc_hdr->reg_list_format_start);
1065	adev->gfx.rlc.reg_list_format_separate_start =
1066			le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
1067	adev->gfx.rlc.starting_offsets_start =
1068			le32_to_cpu(rlc_hdr->starting_offsets_start);
1069	adev->gfx.rlc.reg_list_format_size_bytes =
1070			le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
1071	adev->gfx.rlc.reg_list_size_bytes =
1072			le32_to_cpu(rlc_hdr->reg_list_size_bytes);
1073
1074	adev->gfx.rlc.register_list_format =
1075			kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
1076					adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
1077
1078	if (!adev->gfx.rlc.register_list_format) {
1079		err = -ENOMEM;
 
1080		goto out;
1081	}
1082
1083	tmp = (unsigned int *)((uintptr_t)rlc_hdr +
1084			le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
1085	for (i = 0 ; i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2); i++)
1086		adev->gfx.rlc.register_list_format[i] =	le32_to_cpu(tmp[i]);
1087
1088	adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
1089
1090	tmp = (unsigned int *)((uintptr_t)rlc_hdr +
1091			le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
1092	for (i = 0 ; i < (adev->gfx.rlc.reg_list_size_bytes >> 2); i++)
1093		adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
1094
1095	if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
1096		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec_2.bin", chip_name);
1097		err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name);
1098		if (err == -ENODEV) {
1099			snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
1100			err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name);
1101		}
1102	} else {
1103		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
1104		err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name);
1105	}
1106	if (err)
1107		goto out;
1108	cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1109	adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1110	adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1111
1112	if ((adev->asic_type != CHIP_STONEY) &&
1113	    (adev->asic_type != CHIP_TOPAZ)) {
1114		if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
1115			snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2_2.bin", chip_name);
1116			err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw, fw_name);
1117			if (err == -ENODEV) {
1118				snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
1119				err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw, fw_name);
1120			}
1121		} else {
1122			snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
1123			err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw, fw_name);
1124		}
1125		if (!err) {
 
 
 
1126			cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1127				adev->gfx.mec2_fw->data;
1128			adev->gfx.mec2_fw_version =
1129				le32_to_cpu(cp_hdr->header.ucode_version);
1130			adev->gfx.mec2_feature_version =
1131				le32_to_cpu(cp_hdr->ucode_feature_version);
1132		} else {
1133			err = 0;
1134			adev->gfx.mec2_fw = NULL;
1135		}
1136	}
1137
1138	info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
1139	info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
1140	info->fw = adev->gfx.pfp_fw;
1141	header = (const struct common_firmware_header *)info->fw->data;
1142	adev->firmware.fw_size +=
1143		ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1144
1145	info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
1146	info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
1147	info->fw = adev->gfx.me_fw;
1148	header = (const struct common_firmware_header *)info->fw->data;
1149	adev->firmware.fw_size +=
1150		ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1151
1152	info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
1153	info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
1154	info->fw = adev->gfx.ce_fw;
1155	header = (const struct common_firmware_header *)info->fw->data;
1156	adev->firmware.fw_size +=
1157		ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1158
1159	info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
1160	info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
1161	info->fw = adev->gfx.rlc_fw;
1162	header = (const struct common_firmware_header *)info->fw->data;
1163	adev->firmware.fw_size +=
1164		ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1165
1166	info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
1167	info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
1168	info->fw = adev->gfx.mec_fw;
1169	header = (const struct common_firmware_header *)info->fw->data;
1170	adev->firmware.fw_size +=
1171		ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1172
1173	/* we need account JT in */
1174	cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1175	adev->firmware.fw_size +=
1176		ALIGN(le32_to_cpu(cp_hdr->jt_size) << 2, PAGE_SIZE);
 
 
1177
1178	if (amdgpu_sriov_vf(adev)) {
1179		info = &adev->firmware.ucode[AMDGPU_UCODE_ID_STORAGE];
1180		info->ucode_id = AMDGPU_UCODE_ID_STORAGE;
1181		info->fw = adev->gfx.mec_fw;
1182		adev->firmware.fw_size +=
1183			ALIGN(le32_to_cpu(64 * PAGE_SIZE), PAGE_SIZE);
1184	}
1185
1186	if (adev->gfx.mec2_fw) {
1187		info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
1188		info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
1189		info->fw = adev->gfx.mec2_fw;
1190		header = (const struct common_firmware_header *)info->fw->data;
1191		adev->firmware.fw_size +=
1192			ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1193	}
1194
1195out:
1196	if (err) {
1197		dev_err(adev->dev,
1198			"gfx8: Failed to load firmware \"%s\"\n",
1199			fw_name);
1200		amdgpu_ucode_release(&adev->gfx.pfp_fw);
1201		amdgpu_ucode_release(&adev->gfx.me_fw);
1202		amdgpu_ucode_release(&adev->gfx.ce_fw);
1203		amdgpu_ucode_release(&adev->gfx.rlc_fw);
1204		amdgpu_ucode_release(&adev->gfx.mec_fw);
1205		amdgpu_ucode_release(&adev->gfx.mec2_fw);
 
 
 
 
 
 
1206	}
1207	return err;
1208}
1209
1210static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev,
1211				    volatile u32 *buffer)
1212{
1213	u32 count = 0, i;
1214	const struct cs_section_def *sect = NULL;
1215	const struct cs_extent_def *ext = NULL;
1216
1217	if (adev->gfx.rlc.cs_data == NULL)
1218		return;
1219	if (buffer == NULL)
1220		return;
1221
1222	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1223	buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
1224
1225	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
1226	buffer[count++] = cpu_to_le32(0x80000000);
1227	buffer[count++] = cpu_to_le32(0x80000000);
1228
1229	for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
1230		for (ext = sect->section; ext->extent != NULL; ++ext) {
1231			if (sect->id == SECT_CONTEXT) {
1232				buffer[count++] =
1233					cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
1234				buffer[count++] = cpu_to_le32(ext->reg_index -
1235						PACKET3_SET_CONTEXT_REG_START);
1236				for (i = 0; i < ext->reg_count; i++)
1237					buffer[count++] = cpu_to_le32(ext->extent[i]);
1238			} else {
1239				return;
1240			}
1241		}
1242	}
1243
1244	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2));
1245	buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG -
1246			PACKET3_SET_CONTEXT_REG_START);
1247	buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config);
1248	buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config_1);
1249
1250	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1251	buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
1252
1253	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
1254	buffer[count++] = cpu_to_le32(0);
1255}
1256
1257static int gfx_v8_0_cp_jump_table_num(struct amdgpu_device *adev)
1258{
1259	if (adev->asic_type == CHIP_CARRIZO)
1260		return 5;
1261	else
1262		return 4;
1263}
1264
1265static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
1266{
1267	const struct cs_section_def *cs_data;
1268	int r;
1269
1270	adev->gfx.rlc.cs_data = vi_cs_data;
1271
1272	cs_data = adev->gfx.rlc.cs_data;
1273
1274	if (cs_data) {
1275		/* init clear state block */
1276		r = amdgpu_gfx_rlc_init_csb(adev);
1277		if (r)
1278			return r;
1279	}
1280
1281	if ((adev->asic_type == CHIP_CARRIZO) ||
1282	    (adev->asic_type == CHIP_STONEY)) {
1283		adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
1284		r = amdgpu_gfx_rlc_init_cpt(adev);
1285		if (r)
1286			return r;
1287	}
1288
1289	/* init spm vmid with 0xf */
1290	if (adev->gfx.rlc.funcs->update_spm_vmid)
1291		adev->gfx.rlc.funcs->update_spm_vmid(adev, NULL, 0xf);
1292
1293	return 0;
1294}
1295
1296static void gfx_v8_0_mec_fini(struct amdgpu_device *adev)
1297{
1298	amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
1299}
1300
1301static int gfx_v8_0_mec_init(struct amdgpu_device *adev)
1302{
1303	int r;
1304	u32 *hpd;
1305	size_t mec_hpd_size;
1306
1307	bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
1308
1309	/* take ownership of the relevant compute queues */
1310	amdgpu_gfx_compute_queue_acquire(adev);
1311
1312	mec_hpd_size = adev->gfx.num_compute_rings * GFX8_MEC_HPD_SIZE;
1313	if (mec_hpd_size) {
1314		r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
1315					      AMDGPU_GEM_DOMAIN_VRAM |
1316					      AMDGPU_GEM_DOMAIN_GTT,
1317					      &adev->gfx.mec.hpd_eop_obj,
1318					      &adev->gfx.mec.hpd_eop_gpu_addr,
1319					      (void **)&hpd);
 
 
 
 
 
 
1320		if (r) {
1321			dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
1322			return r;
1323		}
 
1324
1325		memset(hpd, 0, mec_hpd_size);
1326
1327		amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
1328		amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
 
 
 
 
 
 
 
 
 
 
 
 
 
1329	}
1330
 
 
 
 
 
1331	return 0;
1332}
1333
1334static const u32 vgpr_init_compute_shader[] =
1335{
1336	0x7e000209, 0x7e020208,
1337	0x7e040207, 0x7e060206,
1338	0x7e080205, 0x7e0a0204,
1339	0x7e0c0203, 0x7e0e0202,
1340	0x7e100201, 0x7e120200,
1341	0x7e140209, 0x7e160208,
1342	0x7e180207, 0x7e1a0206,
1343	0x7e1c0205, 0x7e1e0204,
1344	0x7e200203, 0x7e220202,
1345	0x7e240201, 0x7e260200,
1346	0x7e280209, 0x7e2a0208,
1347	0x7e2c0207, 0x7e2e0206,
1348	0x7e300205, 0x7e320204,
1349	0x7e340203, 0x7e360202,
1350	0x7e380201, 0x7e3a0200,
1351	0x7e3c0209, 0x7e3e0208,
1352	0x7e400207, 0x7e420206,
1353	0x7e440205, 0x7e460204,
1354	0x7e480203, 0x7e4a0202,
1355	0x7e4c0201, 0x7e4e0200,
1356	0x7e500209, 0x7e520208,
1357	0x7e540207, 0x7e560206,
1358	0x7e580205, 0x7e5a0204,
1359	0x7e5c0203, 0x7e5e0202,
1360	0x7e600201, 0x7e620200,
1361	0x7e640209, 0x7e660208,
1362	0x7e680207, 0x7e6a0206,
1363	0x7e6c0205, 0x7e6e0204,
1364	0x7e700203, 0x7e720202,
1365	0x7e740201, 0x7e760200,
1366	0x7e780209, 0x7e7a0208,
1367	0x7e7c0207, 0x7e7e0206,
1368	0xbf8a0000, 0xbf810000,
1369};
1370
1371static const u32 sgpr_init_compute_shader[] =
1372{
1373	0xbe8a0100, 0xbe8c0102,
1374	0xbe8e0104, 0xbe900106,
1375	0xbe920108, 0xbe940100,
1376	0xbe960102, 0xbe980104,
1377	0xbe9a0106, 0xbe9c0108,
1378	0xbe9e0100, 0xbea00102,
1379	0xbea20104, 0xbea40106,
1380	0xbea60108, 0xbea80100,
1381	0xbeaa0102, 0xbeac0104,
1382	0xbeae0106, 0xbeb00108,
1383	0xbeb20100, 0xbeb40102,
1384	0xbeb60104, 0xbeb80106,
1385	0xbeba0108, 0xbebc0100,
1386	0xbebe0102, 0xbec00104,
1387	0xbec20106, 0xbec40108,
1388	0xbec60100, 0xbec80102,
1389	0xbee60004, 0xbee70005,
1390	0xbeea0006, 0xbeeb0007,
1391	0xbee80008, 0xbee90009,
1392	0xbefc0000, 0xbf8a0000,
1393	0xbf810000, 0x00000000,
1394};
1395
1396static const u32 vgpr_init_regs[] =
1397{
1398	mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0xffffffff,
1399	mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, /* CU_GROUP_COUNT=1 */
1400	mmCOMPUTE_NUM_THREAD_X, 256*4,
1401	mmCOMPUTE_NUM_THREAD_Y, 1,
1402	mmCOMPUTE_NUM_THREAD_Z, 1,
1403	mmCOMPUTE_PGM_RSRC1, 0x100004f, /* VGPRS=15 (64 logical VGPRs), SGPRS=1 (16 SGPRs), BULKY=1 */
1404	mmCOMPUTE_PGM_RSRC2, 20,
1405	mmCOMPUTE_USER_DATA_0, 0xedcedc00,
1406	mmCOMPUTE_USER_DATA_1, 0xedcedc01,
1407	mmCOMPUTE_USER_DATA_2, 0xedcedc02,
1408	mmCOMPUTE_USER_DATA_3, 0xedcedc03,
1409	mmCOMPUTE_USER_DATA_4, 0xedcedc04,
1410	mmCOMPUTE_USER_DATA_5, 0xedcedc05,
1411	mmCOMPUTE_USER_DATA_6, 0xedcedc06,
1412	mmCOMPUTE_USER_DATA_7, 0xedcedc07,
1413	mmCOMPUTE_USER_DATA_8, 0xedcedc08,
1414	mmCOMPUTE_USER_DATA_9, 0xedcedc09,
1415};
1416
1417static const u32 sgpr1_init_regs[] =
1418{
1419	mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0x0f,
1420	mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, /* CU_GROUP_COUNT=1 */
1421	mmCOMPUTE_NUM_THREAD_X, 256*5,
1422	mmCOMPUTE_NUM_THREAD_Y, 1,
1423	mmCOMPUTE_NUM_THREAD_Z, 1,
1424	mmCOMPUTE_PGM_RSRC1, 0x240, /* SGPRS=9 (80 GPRS) */
1425	mmCOMPUTE_PGM_RSRC2, 20,
1426	mmCOMPUTE_USER_DATA_0, 0xedcedc00,
1427	mmCOMPUTE_USER_DATA_1, 0xedcedc01,
1428	mmCOMPUTE_USER_DATA_2, 0xedcedc02,
1429	mmCOMPUTE_USER_DATA_3, 0xedcedc03,
1430	mmCOMPUTE_USER_DATA_4, 0xedcedc04,
1431	mmCOMPUTE_USER_DATA_5, 0xedcedc05,
1432	mmCOMPUTE_USER_DATA_6, 0xedcedc06,
1433	mmCOMPUTE_USER_DATA_7, 0xedcedc07,
1434	mmCOMPUTE_USER_DATA_8, 0xedcedc08,
1435	mmCOMPUTE_USER_DATA_9, 0xedcedc09,
1436};
1437
1438static const u32 sgpr2_init_regs[] =
1439{
1440	mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0xf0,
1441	mmCOMPUTE_RESOURCE_LIMITS, 0x1000000,
1442	mmCOMPUTE_NUM_THREAD_X, 256*5,
1443	mmCOMPUTE_NUM_THREAD_Y, 1,
1444	mmCOMPUTE_NUM_THREAD_Z, 1,
1445	mmCOMPUTE_PGM_RSRC1, 0x240, /* SGPRS=9 (80 GPRS) */
1446	mmCOMPUTE_PGM_RSRC2, 20,
1447	mmCOMPUTE_USER_DATA_0, 0xedcedc00,
1448	mmCOMPUTE_USER_DATA_1, 0xedcedc01,
1449	mmCOMPUTE_USER_DATA_2, 0xedcedc02,
1450	mmCOMPUTE_USER_DATA_3, 0xedcedc03,
1451	mmCOMPUTE_USER_DATA_4, 0xedcedc04,
1452	mmCOMPUTE_USER_DATA_5, 0xedcedc05,
1453	mmCOMPUTE_USER_DATA_6, 0xedcedc06,
1454	mmCOMPUTE_USER_DATA_7, 0xedcedc07,
1455	mmCOMPUTE_USER_DATA_8, 0xedcedc08,
1456	mmCOMPUTE_USER_DATA_9, 0xedcedc09,
1457};
1458
1459static const u32 sec_ded_counter_registers[] =
1460{
1461	mmCPC_EDC_ATC_CNT,
1462	mmCPC_EDC_SCRATCH_CNT,
1463	mmCPC_EDC_UCODE_CNT,
1464	mmCPF_EDC_ATC_CNT,
1465	mmCPF_EDC_ROQ_CNT,
1466	mmCPF_EDC_TAG_CNT,
1467	mmCPG_EDC_ATC_CNT,
1468	mmCPG_EDC_DMA_CNT,
1469	mmCPG_EDC_TAG_CNT,
1470	mmDC_EDC_CSINVOC_CNT,
1471	mmDC_EDC_RESTORE_CNT,
1472	mmDC_EDC_STATE_CNT,
1473	mmGDS_EDC_CNT,
1474	mmGDS_EDC_GRBM_CNT,
1475	mmGDS_EDC_OA_DED,
1476	mmSPI_EDC_CNT,
1477	mmSQC_ATC_EDC_GATCL1_CNT,
1478	mmSQC_EDC_CNT,
1479	mmSQ_EDC_DED_CNT,
1480	mmSQ_EDC_INFO,
1481	mmSQ_EDC_SEC_CNT,
1482	mmTCC_EDC_CNT,
1483	mmTCP_ATC_EDC_GATCL1_CNT,
1484	mmTCP_EDC_CNT,
1485	mmTD_EDC_CNT
1486};
1487
1488static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
1489{
1490	struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
1491	struct amdgpu_ib ib;
1492	struct dma_fence *f = NULL;
1493	int r, i;
1494	u32 tmp;
1495	unsigned total_size, vgpr_offset, sgpr_offset;
1496	u64 gpu_addr;
1497
1498	/* only supported on CZ */
1499	if (adev->asic_type != CHIP_CARRIZO)
1500		return 0;
1501
1502	/* bail if the compute ring is not ready */
1503	if (!ring->sched.ready)
1504		return 0;
1505
1506	tmp = RREG32(mmGB_EDC_MODE);
1507	WREG32(mmGB_EDC_MODE, 0);
1508
1509	total_size =
1510		(((ARRAY_SIZE(vgpr_init_regs) / 2) * 3) + 4 + 5 + 2) * 4;
1511	total_size +=
1512		(((ARRAY_SIZE(sgpr1_init_regs) / 2) * 3) + 4 + 5 + 2) * 4;
1513	total_size +=
1514		(((ARRAY_SIZE(sgpr2_init_regs) / 2) * 3) + 4 + 5 + 2) * 4;
1515	total_size = ALIGN(total_size, 256);
1516	vgpr_offset = total_size;
1517	total_size += ALIGN(sizeof(vgpr_init_compute_shader), 256);
1518	sgpr_offset = total_size;
1519	total_size += sizeof(sgpr_init_compute_shader);
1520
1521	/* allocate an indirect buffer to put the commands in */
1522	memset(&ib, 0, sizeof(ib));
1523	r = amdgpu_ib_get(adev, NULL, total_size,
1524					AMDGPU_IB_POOL_DIRECT, &ib);
1525	if (r) {
1526		DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
1527		return r;
1528	}
1529
1530	/* load the compute shaders */
1531	for (i = 0; i < ARRAY_SIZE(vgpr_init_compute_shader); i++)
1532		ib.ptr[i + (vgpr_offset / 4)] = vgpr_init_compute_shader[i];
1533
1534	for (i = 0; i < ARRAY_SIZE(sgpr_init_compute_shader); i++)
1535		ib.ptr[i + (sgpr_offset / 4)] = sgpr_init_compute_shader[i];
1536
1537	/* init the ib length to 0 */
1538	ib.length_dw = 0;
1539
1540	/* VGPR */
1541	/* write the register state for the compute dispatch */
1542	for (i = 0; i < ARRAY_SIZE(vgpr_init_regs); i += 2) {
1543		ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
1544		ib.ptr[ib.length_dw++] = vgpr_init_regs[i] - PACKET3_SET_SH_REG_START;
1545		ib.ptr[ib.length_dw++] = vgpr_init_regs[i + 1];
1546	}
1547	/* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
1548	gpu_addr = (ib.gpu_addr + (u64)vgpr_offset) >> 8;
1549	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
1550	ib.ptr[ib.length_dw++] = mmCOMPUTE_PGM_LO - PACKET3_SET_SH_REG_START;
1551	ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
1552	ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
1553
1554	/* write dispatch packet */
1555	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
1556	ib.ptr[ib.length_dw++] = 8; /* x */
1557	ib.ptr[ib.length_dw++] = 1; /* y */
1558	ib.ptr[ib.length_dw++] = 1; /* z */
1559	ib.ptr[ib.length_dw++] =
1560		REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
1561
1562	/* write CS partial flush packet */
1563	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
1564	ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
1565
1566	/* SGPR1 */
1567	/* write the register state for the compute dispatch */
1568	for (i = 0; i < ARRAY_SIZE(sgpr1_init_regs); i += 2) {
1569		ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
1570		ib.ptr[ib.length_dw++] = sgpr1_init_regs[i] - PACKET3_SET_SH_REG_START;
1571		ib.ptr[ib.length_dw++] = sgpr1_init_regs[i + 1];
1572	}
1573	/* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
1574	gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
1575	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
1576	ib.ptr[ib.length_dw++] = mmCOMPUTE_PGM_LO - PACKET3_SET_SH_REG_START;
1577	ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
1578	ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
1579
1580	/* write dispatch packet */
1581	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
1582	ib.ptr[ib.length_dw++] = 8; /* x */
1583	ib.ptr[ib.length_dw++] = 1; /* y */
1584	ib.ptr[ib.length_dw++] = 1; /* z */
1585	ib.ptr[ib.length_dw++] =
1586		REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
1587
1588	/* write CS partial flush packet */
1589	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
1590	ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
1591
1592	/* SGPR2 */
1593	/* write the register state for the compute dispatch */
1594	for (i = 0; i < ARRAY_SIZE(sgpr2_init_regs); i += 2) {
1595		ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
1596		ib.ptr[ib.length_dw++] = sgpr2_init_regs[i] - PACKET3_SET_SH_REG_START;
1597		ib.ptr[ib.length_dw++] = sgpr2_init_regs[i + 1];
1598	}
1599	/* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
1600	gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
1601	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
1602	ib.ptr[ib.length_dw++] = mmCOMPUTE_PGM_LO - PACKET3_SET_SH_REG_START;
1603	ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
1604	ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
1605
1606	/* write dispatch packet */
1607	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
1608	ib.ptr[ib.length_dw++] = 8; /* x */
1609	ib.ptr[ib.length_dw++] = 1; /* y */
1610	ib.ptr[ib.length_dw++] = 1; /* z */
1611	ib.ptr[ib.length_dw++] =
1612		REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
1613
1614	/* write CS partial flush packet */
1615	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
1616	ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
1617
1618	/* shedule the ib on the ring */
1619	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
1620	if (r) {
1621		DRM_ERROR("amdgpu: ib submit failed (%d).\n", r);
1622		goto fail;
1623	}
1624
1625	/* wait for the GPU to finish processing the IB */
1626	r = dma_fence_wait(f, false);
1627	if (r) {
1628		DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
1629		goto fail;
1630	}
1631
1632	tmp = REG_SET_FIELD(tmp, GB_EDC_MODE, DED_MODE, 2);
1633	tmp = REG_SET_FIELD(tmp, GB_EDC_MODE, PROP_FED, 1);
1634	WREG32(mmGB_EDC_MODE, tmp);
1635
1636	tmp = RREG32(mmCC_GC_EDC_CONFIG);
1637	tmp = REG_SET_FIELD(tmp, CC_GC_EDC_CONFIG, DIS_EDC, 0) | 1;
1638	WREG32(mmCC_GC_EDC_CONFIG, tmp);
1639
1640
1641	/* read back registers to clear the counters */
1642	for (i = 0; i < ARRAY_SIZE(sec_ded_counter_registers); i++)
1643		RREG32(sec_ded_counter_registers[i]);
1644
1645fail:
 
1646	amdgpu_ib_free(adev, &ib, NULL);
1647	dma_fence_put(f);
1648
1649	return r;
1650}
1651
1652static int gfx_v8_0_gpu_early_init(struct amdgpu_device *adev)
1653{
1654	u32 gb_addr_config;
1655	u32 mc_arb_ramcfg;
1656	u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map;
1657	u32 tmp;
1658	int ret;
1659
1660	switch (adev->asic_type) {
1661	case CHIP_TOPAZ:
1662		adev->gfx.config.max_shader_engines = 1;
1663		adev->gfx.config.max_tile_pipes = 2;
1664		adev->gfx.config.max_cu_per_sh = 6;
1665		adev->gfx.config.max_sh_per_se = 1;
1666		adev->gfx.config.max_backends_per_se = 2;
1667		adev->gfx.config.max_texture_channel_caches = 2;
1668		adev->gfx.config.max_gprs = 256;
1669		adev->gfx.config.max_gs_threads = 32;
1670		adev->gfx.config.max_hw_contexts = 8;
1671
1672		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1673		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1674		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1675		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1676		gb_addr_config = TOPAZ_GB_ADDR_CONFIG_GOLDEN;
1677		break;
1678	case CHIP_FIJI:
1679		adev->gfx.config.max_shader_engines = 4;
1680		adev->gfx.config.max_tile_pipes = 16;
1681		adev->gfx.config.max_cu_per_sh = 16;
1682		adev->gfx.config.max_sh_per_se = 1;
1683		adev->gfx.config.max_backends_per_se = 4;
1684		adev->gfx.config.max_texture_channel_caches = 16;
1685		adev->gfx.config.max_gprs = 256;
1686		adev->gfx.config.max_gs_threads = 32;
1687		adev->gfx.config.max_hw_contexts = 8;
1688
1689		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1690		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1691		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1692		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1693		gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
1694		break;
1695	case CHIP_POLARIS11:
1696	case CHIP_POLARIS12:
1697		ret = amdgpu_atombios_get_gfx_info(adev);
1698		if (ret)
1699			return ret;
1700		adev->gfx.config.max_gprs = 256;
1701		adev->gfx.config.max_gs_threads = 32;
1702		adev->gfx.config.max_hw_contexts = 8;
1703
1704		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1705		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1706		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1707		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1708		gb_addr_config = POLARIS11_GB_ADDR_CONFIG_GOLDEN;
1709		break;
1710	case CHIP_POLARIS10:
1711	case CHIP_VEGAM:
1712		ret = amdgpu_atombios_get_gfx_info(adev);
1713		if (ret)
1714			return ret;
1715		adev->gfx.config.max_gprs = 256;
1716		adev->gfx.config.max_gs_threads = 32;
1717		adev->gfx.config.max_hw_contexts = 8;
1718
1719		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1720		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1721		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1722		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1723		gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
1724		break;
1725	case CHIP_TONGA:
1726		adev->gfx.config.max_shader_engines = 4;
1727		adev->gfx.config.max_tile_pipes = 8;
1728		adev->gfx.config.max_cu_per_sh = 8;
1729		adev->gfx.config.max_sh_per_se = 1;
1730		adev->gfx.config.max_backends_per_se = 2;
1731		adev->gfx.config.max_texture_channel_caches = 8;
1732		adev->gfx.config.max_gprs = 256;
1733		adev->gfx.config.max_gs_threads = 32;
1734		adev->gfx.config.max_hw_contexts = 8;
1735
1736		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1737		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1738		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1739		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1740		gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
1741		break;
1742	case CHIP_CARRIZO:
1743		adev->gfx.config.max_shader_engines = 1;
1744		adev->gfx.config.max_tile_pipes = 2;
1745		adev->gfx.config.max_sh_per_se = 1;
1746		adev->gfx.config.max_backends_per_se = 2;
1747		adev->gfx.config.max_cu_per_sh = 8;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1748		adev->gfx.config.max_texture_channel_caches = 2;
1749		adev->gfx.config.max_gprs = 256;
1750		adev->gfx.config.max_gs_threads = 32;
1751		adev->gfx.config.max_hw_contexts = 8;
1752
1753		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1754		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1755		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1756		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1757		gb_addr_config = CARRIZO_GB_ADDR_CONFIG_GOLDEN;
1758		break;
1759	case CHIP_STONEY:
1760		adev->gfx.config.max_shader_engines = 1;
1761		adev->gfx.config.max_tile_pipes = 2;
1762		adev->gfx.config.max_sh_per_se = 1;
1763		adev->gfx.config.max_backends_per_se = 1;
1764		adev->gfx.config.max_cu_per_sh = 3;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1765		adev->gfx.config.max_texture_channel_caches = 2;
1766		adev->gfx.config.max_gprs = 256;
1767		adev->gfx.config.max_gs_threads = 16;
1768		adev->gfx.config.max_hw_contexts = 8;
1769
1770		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1771		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1772		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1773		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1774		gb_addr_config = CARRIZO_GB_ADDR_CONFIG_GOLDEN;
1775		break;
1776	default:
1777		adev->gfx.config.max_shader_engines = 2;
1778		adev->gfx.config.max_tile_pipes = 4;
1779		adev->gfx.config.max_cu_per_sh = 2;
1780		adev->gfx.config.max_sh_per_se = 1;
1781		adev->gfx.config.max_backends_per_se = 2;
1782		adev->gfx.config.max_texture_channel_caches = 4;
1783		adev->gfx.config.max_gprs = 256;
1784		adev->gfx.config.max_gs_threads = 32;
1785		adev->gfx.config.max_hw_contexts = 8;
1786
1787		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1788		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1789		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1790		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1791		gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
1792		break;
1793	}
1794
 
1795	adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG);
1796	mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg;
1797
1798	adev->gfx.config.num_banks = REG_GET_FIELD(mc_arb_ramcfg,
1799				MC_ARB_RAMCFG, NOOFBANK);
1800	adev->gfx.config.num_ranks = REG_GET_FIELD(mc_arb_ramcfg,
1801				MC_ARB_RAMCFG, NOOFRANKS);
1802
1803	adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
1804	adev->gfx.config.mem_max_burst_length_bytes = 256;
1805	if (adev->flags & AMD_IS_APU) {
1806		/* Get memory bank mapping mode. */
1807		tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING);
1808		dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
1809		dimm01_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
1810
1811		tmp = RREG32(mmMC_FUS_DRAM1_BANK_ADDR_MAPPING);
1812		dimm10_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
1813		dimm11_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
1814
1815		/* Validate settings in case only one DIMM installed. */
1816		if ((dimm00_addr_map == 0) || (dimm00_addr_map == 3) || (dimm00_addr_map == 4) || (dimm00_addr_map > 12))
1817			dimm00_addr_map = 0;
1818		if ((dimm01_addr_map == 0) || (dimm01_addr_map == 3) || (dimm01_addr_map == 4) || (dimm01_addr_map > 12))
1819			dimm01_addr_map = 0;
1820		if ((dimm10_addr_map == 0) || (dimm10_addr_map == 3) || (dimm10_addr_map == 4) || (dimm10_addr_map > 12))
1821			dimm10_addr_map = 0;
1822		if ((dimm11_addr_map == 0) || (dimm11_addr_map == 3) || (dimm11_addr_map == 4) || (dimm11_addr_map > 12))
1823			dimm11_addr_map = 0;
1824
1825		/* If DIMM Addr map is 8GB, ROW size should be 2KB. Otherwise 1KB. */
1826		/* If ROW size(DIMM1) != ROW size(DMIMM0), ROW size should be larger one. */
1827		if ((dimm00_addr_map == 11) || (dimm01_addr_map == 11) || (dimm10_addr_map == 11) || (dimm11_addr_map == 11))
1828			adev->gfx.config.mem_row_size_in_kb = 2;
1829		else
1830			adev->gfx.config.mem_row_size_in_kb = 1;
1831	} else {
1832		tmp = REG_GET_FIELD(mc_arb_ramcfg, MC_ARB_RAMCFG, NOOFCOLS);
1833		adev->gfx.config.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
1834		if (adev->gfx.config.mem_row_size_in_kb > 4)
1835			adev->gfx.config.mem_row_size_in_kb = 4;
1836	}
1837
1838	adev->gfx.config.shader_engine_tile_size = 32;
1839	adev->gfx.config.num_gpus = 1;
1840	adev->gfx.config.multi_gpu_tile_size = 64;
1841
1842	/* fix up row size */
1843	switch (adev->gfx.config.mem_row_size_in_kb) {
1844	case 1:
1845	default:
1846		gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 0);
1847		break;
1848	case 2:
1849		gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 1);
1850		break;
1851	case 4:
1852		gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 2);
1853		break;
1854	}
1855	adev->gfx.config.gb_addr_config = gb_addr_config;
1856
1857	return 0;
1858}
1859
1860static int gfx_v8_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
1861					int mec, int pipe, int queue)
1862{
1863	int r;
1864	unsigned irq_type;
1865	struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
1866	unsigned int hw_prio;
1867
1868	ring = &adev->gfx.compute_ring[ring_id];
1869
1870	/* mec0 is me1 */
1871	ring->me = mec + 1;
1872	ring->pipe = pipe;
1873	ring->queue = queue;
1874
1875	ring->ring_obj = NULL;
1876	ring->use_doorbell = true;
1877	ring->doorbell_index = adev->doorbell_index.mec_ring0 + ring_id;
1878	ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
1879				+ (ring_id * GFX8_MEC_HPD_SIZE);
1880	sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
1881
1882	irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
1883		+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
1884		+ ring->pipe;
1885
1886	hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
1887			AMDGPU_RING_PRIO_2 : AMDGPU_RING_PRIO_DEFAULT;
1888	/* type-2 packets are deprecated on MEC, use type-3 instead */
1889	r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
1890			     hw_prio, NULL);
1891	if (r)
1892		return r;
1893
1894
1895	return 0;
1896}
1897
1898static void gfx_v8_0_sq_irq_work_func(struct work_struct *work);
1899
1900static int gfx_v8_0_sw_init(void *handle)
1901{
1902	int i, j, k, r, ring_id;
1903	int xcc_id = 0;
1904	struct amdgpu_ring *ring;
1905	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1906
1907	switch (adev->asic_type) {
1908	case CHIP_TONGA:
1909	case CHIP_CARRIZO:
1910	case CHIP_FIJI:
1911	case CHIP_POLARIS10:
1912	case CHIP_POLARIS11:
1913	case CHIP_POLARIS12:
1914	case CHIP_VEGAM:
1915		adev->gfx.mec.num_mec = 2;
1916		break;
1917	case CHIP_TOPAZ:
1918	case CHIP_STONEY:
1919	default:
1920		adev->gfx.mec.num_mec = 1;
1921		break;
1922	}
1923
1924	adev->gfx.mec.num_pipe_per_mec = 4;
1925	adev->gfx.mec.num_queue_per_pipe = 8;
1926
1927	/* EOP Event */
1928	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_END_OF_PIPE, &adev->gfx.eop_irq);
1929	if (r)
1930		return r;
1931
1932	/* Privileged reg */
1933	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_PRIV_REG_FAULT,
1934			      &adev->gfx.priv_reg_irq);
1935	if (r)
1936		return r;
1937
1938	/* Privileged inst */
1939	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_PRIV_INSTR_FAULT,
1940			      &adev->gfx.priv_inst_irq);
1941	if (r)
1942		return r;
1943
1944	/* Add CP EDC/ECC irq  */
1945	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_ECC_ERROR,
1946			      &adev->gfx.cp_ecc_error_irq);
1947	if (r)
1948		return r;
1949
1950	/* SQ interrupts. */
1951	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SQ_INTERRUPT_MSG,
1952			      &adev->gfx.sq_irq);
1953	if (r) {
1954		DRM_ERROR("amdgpu_irq_add() for SQ failed: %d\n", r);
1955		return r;
1956	}
1957
1958	INIT_WORK(&adev->gfx.sq_work.work, gfx_v8_0_sq_irq_work_func);
1959
1960	adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
1961
 
 
1962	r = gfx_v8_0_init_microcode(adev);
1963	if (r) {
1964		DRM_ERROR("Failed to load gfx firmware!\n");
1965		return r;
1966	}
1967
1968	r = adev->gfx.rlc.funcs->init(adev);
1969	if (r) {
1970		DRM_ERROR("Failed to init rlc BOs!\n");
1971		return r;
1972	}
1973
1974	r = gfx_v8_0_mec_init(adev);
1975	if (r) {
1976		DRM_ERROR("Failed to init MEC BOs!\n");
1977		return r;
1978	}
1979
1980	/* set up the gfx ring */
1981	for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
1982		ring = &adev->gfx.gfx_ring[i];
1983		ring->ring_obj = NULL;
1984		sprintf(ring->name, "gfx");
1985		/* no gfx doorbells on iceland */
1986		if (adev->asic_type != CHIP_TOPAZ) {
1987			ring->use_doorbell = true;
1988			ring->doorbell_index = adev->doorbell_index.gfx_ring0;
1989		}
1990
1991		r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
1992				     AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
1993				     AMDGPU_RING_PRIO_DEFAULT, NULL);
 
1994		if (r)
1995			return r;
1996	}
1997
 
 
 
1998
1999	/* set up the compute queues - allocate horizontally across pipes */
2000	ring_id = 0;
2001	for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
2002		for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
2003			for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
2004				if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i,
2005								     k, j))
2006					continue;
2007
2008				r = gfx_v8_0_compute_ring_init(adev,
2009								ring_id,
2010								i, k, j);
2011				if (r)
2012					return r;
2013
2014				ring_id++;
2015			}
2016		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2017	}
2018
2019	r = amdgpu_gfx_kiq_init(adev, GFX8_MEC_HPD_SIZE, 0);
2020	if (r) {
2021		DRM_ERROR("Failed to init KIQ BOs!\n");
 
 
 
2022		return r;
2023	}
2024
2025	r = amdgpu_gfx_kiq_init_ring(adev, xcc_id);
 
 
 
2026	if (r)
2027		return r;
2028
2029	/* create MQD for all compute queues as well as KIQ for SRIOV case */
2030	r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct vi_mqd_allocation), 0);
 
 
2031	if (r)
2032		return r;
2033
2034	adev->gfx.ce_ram_size = 0x8000;
2035
2036	r = gfx_v8_0_gpu_early_init(adev);
2037	if (r)
2038		return r;
2039
2040	return 0;
2041}
2042
2043static int gfx_v8_0_sw_fini(void *handle)
2044{
2045	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2046	int i;
 
 
 
 
 
2047
2048	for (i = 0; i < adev->gfx.num_gfx_rings; i++)
2049		amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
2050	for (i = 0; i < adev->gfx.num_compute_rings; i++)
2051		amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
2052
2053	amdgpu_gfx_mqd_sw_fini(adev, 0);
2054	amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring);
2055	amdgpu_gfx_kiq_fini(adev, 0);
2056
2057	gfx_v8_0_mec_fini(adev);
2058	amdgpu_gfx_rlc_fini(adev);
2059	amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
2060				&adev->gfx.rlc.clear_state_gpu_addr,
2061				(void **)&adev->gfx.rlc.cs_ptr);
2062	if ((adev->asic_type == CHIP_CARRIZO) ||
2063	    (adev->asic_type == CHIP_STONEY)) {
2064		amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
2065				&adev->gfx.rlc.cp_table_gpu_addr,
2066				(void **)&adev->gfx.rlc.cp_table_ptr);
2067	}
2068	gfx_v8_0_free_microcode(adev);
2069
2070	return 0;
2071}
2072
2073static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev)
2074{
2075	uint32_t *modearray, *mod2array;
2076	const u32 num_tile_mode_states = ARRAY_SIZE(adev->gfx.config.tile_mode_array);
2077	const u32 num_secondary_tile_mode_states = ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
2078	u32 reg_offset;
2079
2080	modearray = adev->gfx.config.tile_mode_array;
2081	mod2array = adev->gfx.config.macrotile_mode_array;
2082
2083	for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
2084		modearray[reg_offset] = 0;
2085
2086	for (reg_offset = 0; reg_offset <  num_secondary_tile_mode_states; reg_offset++)
2087		mod2array[reg_offset] = 0;
2088
2089	switch (adev->asic_type) {
2090	case CHIP_TOPAZ:
2091		modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2092				PIPE_CONFIG(ADDR_SURF_P2) |
2093				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2094				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2095		modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2096				PIPE_CONFIG(ADDR_SURF_P2) |
2097				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2098				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2099		modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2100				PIPE_CONFIG(ADDR_SURF_P2) |
2101				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2102				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2103		modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2104				PIPE_CONFIG(ADDR_SURF_P2) |
2105				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2106				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2107		modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2108				PIPE_CONFIG(ADDR_SURF_P2) |
2109				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2110				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2111		modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2112				PIPE_CONFIG(ADDR_SURF_P2) |
2113				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2114				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2115		modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2116				PIPE_CONFIG(ADDR_SURF_P2) |
2117				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2118				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2119		modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2120				PIPE_CONFIG(ADDR_SURF_P2));
2121		modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2122				PIPE_CONFIG(ADDR_SURF_P2) |
2123				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2124				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2125		modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2126				 PIPE_CONFIG(ADDR_SURF_P2) |
2127				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2128				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2129		modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2130				 PIPE_CONFIG(ADDR_SURF_P2) |
2131				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2132				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2133		modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2134				 PIPE_CONFIG(ADDR_SURF_P2) |
2135				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2136				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2137		modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2138				 PIPE_CONFIG(ADDR_SURF_P2) |
2139				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2140				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2141		modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
2142				 PIPE_CONFIG(ADDR_SURF_P2) |
2143				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2144				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2145		modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2146				 PIPE_CONFIG(ADDR_SURF_P2) |
2147				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2148				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2149		modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2150				 PIPE_CONFIG(ADDR_SURF_P2) |
2151				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2152				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2153		modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2154				 PIPE_CONFIG(ADDR_SURF_P2) |
2155				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2156				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2157		modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2158				 PIPE_CONFIG(ADDR_SURF_P2) |
2159				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2160				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2161		modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
2162				 PIPE_CONFIG(ADDR_SURF_P2) |
2163				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2164				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2165		modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2166				 PIPE_CONFIG(ADDR_SURF_P2) |
2167				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2168				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2169		modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2170				 PIPE_CONFIG(ADDR_SURF_P2) |
2171				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2172				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2173		modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
2174				 PIPE_CONFIG(ADDR_SURF_P2) |
2175				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2176				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2177		modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
2178				 PIPE_CONFIG(ADDR_SURF_P2) |
2179				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2180				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2181		modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2182				 PIPE_CONFIG(ADDR_SURF_P2) |
2183				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2184				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2185		modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2186				 PIPE_CONFIG(ADDR_SURF_P2) |
2187				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2188				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2189		modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2190				 PIPE_CONFIG(ADDR_SURF_P2) |
2191				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2192				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2193
2194		mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
2195				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2196				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2197				NUM_BANKS(ADDR_SURF_8_BANK));
2198		mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
2199				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2200				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2201				NUM_BANKS(ADDR_SURF_8_BANK));
2202		mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2203				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2204				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2205				NUM_BANKS(ADDR_SURF_8_BANK));
2206		mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2207				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2208				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2209				NUM_BANKS(ADDR_SURF_8_BANK));
2210		mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2211				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2212				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2213				NUM_BANKS(ADDR_SURF_8_BANK));
2214		mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2215				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2216				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2217				NUM_BANKS(ADDR_SURF_8_BANK));
2218		mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2219				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2220				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2221				NUM_BANKS(ADDR_SURF_8_BANK));
2222		mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
2223				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
2224				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2225				NUM_BANKS(ADDR_SURF_16_BANK));
2226		mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
2227				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2228				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2229				NUM_BANKS(ADDR_SURF_16_BANK));
2230		mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2231				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2232				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2233				 NUM_BANKS(ADDR_SURF_16_BANK));
2234		mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2235				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2236				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2237				 NUM_BANKS(ADDR_SURF_16_BANK));
2238		mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2239				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2240				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2241				 NUM_BANKS(ADDR_SURF_16_BANK));
2242		mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2243				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2244				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2245				 NUM_BANKS(ADDR_SURF_16_BANK));
2246		mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2247				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2248				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2249				 NUM_BANKS(ADDR_SURF_8_BANK));
2250
2251		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
2252			if (reg_offset != 7 && reg_offset != 12 && reg_offset != 17 &&
2253			    reg_offset != 23)
2254				WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
2255
2256		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
2257			if (reg_offset != 7)
2258				WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
2259
2260		break;
2261	case CHIP_FIJI:
2262	case CHIP_VEGAM:
2263		modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2264				PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2265				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2266				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2267		modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2268				PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2269				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2270				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2271		modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2272				PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2273				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2274				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2275		modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2276				PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2277				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2278				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2279		modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2280				PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2281				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2282				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2283		modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2284				PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2285				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2286				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2287		modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2288				PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2289				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2290				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2291		modearray[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2292				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2293				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2294				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2295		modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2296				PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16));
2297		modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2298				PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2299				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2300				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2301		modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2302				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2303				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2304				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2305		modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2306				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2307				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2308				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2309		modearray[12] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2310				 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2311				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2312				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2313		modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2314				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2315				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2316				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2317		modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2318				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2319				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2320				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2321		modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
2322				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2323				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2324				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2325		modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2326				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2327				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2328				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2329		modearray[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2330				 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2331				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2332				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2333		modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2334				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2335				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2336				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2337		modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2338				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2339				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2340				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2341		modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2342				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2343				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2344				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2345		modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
2346				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2347				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2348				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2349		modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2350				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2351				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2352				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2353		modearray[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2354				 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2355				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2356				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2357		modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2358				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2359				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2360				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2361		modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
2362				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2363				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2364				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2365		modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
2366				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2367				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2368				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2369		modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2370				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2371				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2372				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2373		modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2374				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2375				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2376				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2377		modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2378				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2379				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2380				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2381		modearray[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2382				 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2383				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2384				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2385
2386		mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2387				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2388				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2389				NUM_BANKS(ADDR_SURF_8_BANK));
2390		mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2391				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2392				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2393				NUM_BANKS(ADDR_SURF_8_BANK));
2394		mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2395				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2396				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2397				NUM_BANKS(ADDR_SURF_8_BANK));
2398		mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2399				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2400				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2401				NUM_BANKS(ADDR_SURF_8_BANK));
2402		mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2403				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2404				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2405				NUM_BANKS(ADDR_SURF_8_BANK));
2406		mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2407				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2408				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2409				NUM_BANKS(ADDR_SURF_8_BANK));
2410		mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2411				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2412				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2413				NUM_BANKS(ADDR_SURF_8_BANK));
2414		mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2415				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
2416				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2417				NUM_BANKS(ADDR_SURF_8_BANK));
2418		mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2419				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2420				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2421				NUM_BANKS(ADDR_SURF_8_BANK));
2422		mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2423				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2424				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2425				 NUM_BANKS(ADDR_SURF_8_BANK));
2426		mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2427				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2428				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2429				 NUM_BANKS(ADDR_SURF_8_BANK));
2430		mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2431				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2432				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2433				 NUM_BANKS(ADDR_SURF_8_BANK));
2434		mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2435				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2436				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2437				 NUM_BANKS(ADDR_SURF_8_BANK));
2438		mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2439				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2440				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2441				 NUM_BANKS(ADDR_SURF_4_BANK));
2442
2443		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
2444			WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
2445
2446		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
2447			if (reg_offset != 7)
2448				WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
2449
2450		break;
2451	case CHIP_TONGA:
2452		modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2453				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2454				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2455				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2456		modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2457				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2458				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2459				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2460		modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2461				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2462				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2463				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2464		modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2465				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2466				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2467				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2468		modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2469				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2470				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2471				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2472		modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2473				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2474				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2475				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2476		modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2477				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2478				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2479				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2480		modearray[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2481				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2482				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2483				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2484		modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2485				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16));
2486		modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2487				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2488				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2489				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2490		modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2491				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2492				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2493				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2494		modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2495				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2496				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2497				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2498		modearray[12] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2499				 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2500				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2501				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2502		modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2503				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2504				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2505				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2506		modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2507				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2508				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2509				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2510		modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
2511				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2512				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2513				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2514		modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2515				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2516				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2517				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2518		modearray[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2519				 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2520				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2521				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2522		modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2523				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2524				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2525				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2526		modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2527				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2528				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2529				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2530		modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2531				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2532				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2533				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2534		modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
2535				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2536				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2537				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2538		modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2539				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2540				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2541				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2542		modearray[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2543				 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2544				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2545				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2546		modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2547				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2548				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2549				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2550		modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
2551				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2552				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2553				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2554		modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
2555				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2556				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2557				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2558		modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2559				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2560				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2561				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2562		modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2563				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2564				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2565				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2566		modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2567				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2568				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2569				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2570		modearray[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2571				 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2572				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2573				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2574
2575		mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2576				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2577				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2578				NUM_BANKS(ADDR_SURF_16_BANK));
2579		mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2580				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2581				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2582				NUM_BANKS(ADDR_SURF_16_BANK));
2583		mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2584				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2585				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2586				NUM_BANKS(ADDR_SURF_16_BANK));
2587		mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2588				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2589				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2590				NUM_BANKS(ADDR_SURF_16_BANK));
2591		mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2592				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2593				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2594				NUM_BANKS(ADDR_SURF_16_BANK));
2595		mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2596				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2597				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2598				NUM_BANKS(ADDR_SURF_16_BANK));
2599		mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2600				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2601				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2602				NUM_BANKS(ADDR_SURF_16_BANK));
2603		mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2604				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
2605				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2606				NUM_BANKS(ADDR_SURF_16_BANK));
2607		mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2608				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2609				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2610				NUM_BANKS(ADDR_SURF_16_BANK));
2611		mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2612				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2613				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2614				 NUM_BANKS(ADDR_SURF_16_BANK));
2615		mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2616				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2617				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2618				 NUM_BANKS(ADDR_SURF_16_BANK));
2619		mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2620				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2621				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2622				 NUM_BANKS(ADDR_SURF_8_BANK));
2623		mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2624				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2625				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2626				 NUM_BANKS(ADDR_SURF_4_BANK));
2627		mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2628				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2629				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2630				 NUM_BANKS(ADDR_SURF_4_BANK));
2631
2632		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
2633			WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
2634
2635		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
2636			if (reg_offset != 7)
2637				WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
2638
2639		break;
2640	case CHIP_POLARIS11:
2641	case CHIP_POLARIS12:
2642		modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2643				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2644				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2645				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2646		modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2647				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2648				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2649				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2650		modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2651				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2652				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2653				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2654		modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2655				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2656				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2657				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2658		modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2659				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2660				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2661				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2662		modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2663				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2664				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2665				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2666		modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2667				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2668				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2669				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2670		modearray[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2671				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2672				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2673				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2674		modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2675				PIPE_CONFIG(ADDR_SURF_P4_16x16));
2676		modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2677				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2678				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2679				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2680		modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2681				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2682				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2683				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2684		modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2685				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2686				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2687				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2688		modearray[12] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2689				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2690				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2691				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2692		modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2693				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2694				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2695				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2696		modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2697				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2698				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2699				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2700		modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
2701				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2702				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2703				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2704		modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2705				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2706				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2707				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2708		modearray[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2709				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2710				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2711				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2712		modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2713				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2714				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2715				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2716		modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2717				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2718				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2719				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2720		modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2721				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2722				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2723				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2724		modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
2725				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2726				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2727				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2728		modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2729				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2730				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2731				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2732		modearray[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2733				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2734				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2735				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2736		modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2737				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2738				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2739				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2740		modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
2741				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2742				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2743				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2744		modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
2745				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2746				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2747				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2748		modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2749				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2750				MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2751				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2752		modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2753				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2754				MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2755				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2756		modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2757				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2758				MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2759				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2760		modearray[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2761				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2762				MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2763				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2764
2765		mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2766				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2767				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2768				NUM_BANKS(ADDR_SURF_16_BANK));
2769
2770		mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2771				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2772				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2773				NUM_BANKS(ADDR_SURF_16_BANK));
2774
2775		mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2776				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2777				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2778				NUM_BANKS(ADDR_SURF_16_BANK));
2779
2780		mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2781				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2782				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2783				NUM_BANKS(ADDR_SURF_16_BANK));
2784
2785		mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2786				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2787				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2788				NUM_BANKS(ADDR_SURF_16_BANK));
2789
2790		mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2791				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2792				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2793				NUM_BANKS(ADDR_SURF_16_BANK));
2794
2795		mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2796				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2797				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2798				NUM_BANKS(ADDR_SURF_16_BANK));
2799
2800		mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2801				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
2802				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2803				NUM_BANKS(ADDR_SURF_16_BANK));
2804
2805		mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2806				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2807				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2808				NUM_BANKS(ADDR_SURF_16_BANK));
2809
2810		mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2811				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2812				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2813				NUM_BANKS(ADDR_SURF_16_BANK));
2814
2815		mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2816				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2817				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2818				NUM_BANKS(ADDR_SURF_16_BANK));
2819
2820		mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2821				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2822				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2823				NUM_BANKS(ADDR_SURF_16_BANK));
2824
2825		mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2826				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2827				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2828				NUM_BANKS(ADDR_SURF_8_BANK));
2829
2830		mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2831				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2832				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2833				NUM_BANKS(ADDR_SURF_4_BANK));
2834
2835		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
2836			WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
2837
2838		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
2839			if (reg_offset != 7)
2840				WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
2841
2842		break;
2843	case CHIP_POLARIS10:
2844		modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2845				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2846				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2847				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2848		modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2849				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2850				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2851				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2852		modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2853				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2854				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2855				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2856		modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2857				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2858				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2859				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2860		modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2861				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2862				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2863				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2864		modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2865				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2866				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2867				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2868		modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2869				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2870				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2871				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2872		modearray[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2873				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2874				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2875				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2876		modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2877				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16));
2878		modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2879				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2880				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2881				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2882		modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2883				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2884				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2885				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2886		modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2887				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2888				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2889				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2890		modearray[12] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2891				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2892				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2893				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2894		modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2895				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2896				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2897				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2898		modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2899				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2900				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2901				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2902		modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
2903				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2904				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2905				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2906		modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2907				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2908				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2909				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2910		modearray[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2911				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2912				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2913				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2914		modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2915				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2916				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2917				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2918		modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2919				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2920				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2921				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2922		modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2923				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2924				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2925				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2926		modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
2927				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2928				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2929				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2930		modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2931				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2932				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2933				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2934		modearray[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2935				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2936				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2937				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2938		modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2939				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2940				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2941				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2942		modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
2943				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2944				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2945				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2946		modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
2947				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2948				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2949				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2950		modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2951				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2952				MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2953				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2954		modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2955				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2956				MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2957				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2958		modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2959				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2960				MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2961				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2962		modearray[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2963				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2964				MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2965				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2966
2967		mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2968				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2969				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2970				NUM_BANKS(ADDR_SURF_16_BANK));
2971
2972		mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2973				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2974				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2975				NUM_BANKS(ADDR_SURF_16_BANK));
2976
2977		mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2978				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2979				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2980				NUM_BANKS(ADDR_SURF_16_BANK));
2981
2982		mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2983				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2984				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2985				NUM_BANKS(ADDR_SURF_16_BANK));
2986
2987		mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2988				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2989				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2990				NUM_BANKS(ADDR_SURF_16_BANK));
2991
2992		mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2993				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2994				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2995				NUM_BANKS(ADDR_SURF_16_BANK));
2996
2997		mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2998				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2999				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
3000				NUM_BANKS(ADDR_SURF_16_BANK));
3001
3002		mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3003				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
3004				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3005				NUM_BANKS(ADDR_SURF_16_BANK));
3006
3007		mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3008				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3009				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3010				NUM_BANKS(ADDR_SURF_16_BANK));
3011
3012		mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3013				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
3014				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3015				NUM_BANKS(ADDR_SURF_16_BANK));
3016
3017		mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3018				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3019				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3020				NUM_BANKS(ADDR_SURF_16_BANK));
3021
3022		mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3023				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3024				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
3025				NUM_BANKS(ADDR_SURF_8_BANK));
3026
3027		mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3028				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3029				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
3030				NUM_BANKS(ADDR_SURF_4_BANK));
3031
3032		mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3033				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3034				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
3035				NUM_BANKS(ADDR_SURF_4_BANK));
3036
3037		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
3038			WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
3039
3040		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
3041			if (reg_offset != 7)
3042				WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
3043
3044		break;
3045	case CHIP_STONEY:
3046		modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3047				PIPE_CONFIG(ADDR_SURF_P2) |
3048				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
3049				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3050		modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3051				PIPE_CONFIG(ADDR_SURF_P2) |
3052				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
3053				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3054		modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3055				PIPE_CONFIG(ADDR_SURF_P2) |
3056				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
3057				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3058		modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3059				PIPE_CONFIG(ADDR_SURF_P2) |
3060				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
3061				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3062		modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3063				PIPE_CONFIG(ADDR_SURF_P2) |
3064				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
3065				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3066		modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3067				PIPE_CONFIG(ADDR_SURF_P2) |
3068				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
3069				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3070		modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3071				PIPE_CONFIG(ADDR_SURF_P2) |
3072				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
3073				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3074		modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
3075				PIPE_CONFIG(ADDR_SURF_P2));
3076		modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3077				PIPE_CONFIG(ADDR_SURF_P2) |
3078				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
3079				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3080		modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3081				 PIPE_CONFIG(ADDR_SURF_P2) |
3082				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
3083				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3084		modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3085				 PIPE_CONFIG(ADDR_SURF_P2) |
3086				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
3087				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
3088		modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3089				 PIPE_CONFIG(ADDR_SURF_P2) |
3090				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3091				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3092		modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3093				 PIPE_CONFIG(ADDR_SURF_P2) |
3094				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3095				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3096		modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
3097				 PIPE_CONFIG(ADDR_SURF_P2) |
3098				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3099				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3100		modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3101				 PIPE_CONFIG(ADDR_SURF_P2) |
3102				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3103				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
3104		modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
3105				 PIPE_CONFIG(ADDR_SURF_P2) |
3106				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3107				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3108		modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
3109				 PIPE_CONFIG(ADDR_SURF_P2) |
3110				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3111				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3112		modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
3113				 PIPE_CONFIG(ADDR_SURF_P2) |
3114				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3115				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3116		modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
3117				 PIPE_CONFIG(ADDR_SURF_P2) |
3118				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3119				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3120		modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
3121				 PIPE_CONFIG(ADDR_SURF_P2) |
3122				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3123				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3124		modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
3125				 PIPE_CONFIG(ADDR_SURF_P2) |
3126				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3127				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3128		modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
3129				 PIPE_CONFIG(ADDR_SURF_P2) |
3130				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3131				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3132		modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
3133				 PIPE_CONFIG(ADDR_SURF_P2) |
3134				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3135				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3136		modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3137				 PIPE_CONFIG(ADDR_SURF_P2) |
3138				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
3139				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3140		modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3141				 PIPE_CONFIG(ADDR_SURF_P2) |
3142				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
3143				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3144		modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3145				 PIPE_CONFIG(ADDR_SURF_P2) |
3146				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
3147				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
3148
3149		mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3150				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3151				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3152				NUM_BANKS(ADDR_SURF_8_BANK));
3153		mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3154				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
3155				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3156				NUM_BANKS(ADDR_SURF_8_BANK));
3157		mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3158				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3159				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3160				NUM_BANKS(ADDR_SURF_8_BANK));
3161		mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3162				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3163				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3164				NUM_BANKS(ADDR_SURF_8_BANK));
3165		mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3166				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3167				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3168				NUM_BANKS(ADDR_SURF_8_BANK));
3169		mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3170				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3171				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3172				NUM_BANKS(ADDR_SURF_8_BANK));
3173		mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3174				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3175				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3176				NUM_BANKS(ADDR_SURF_8_BANK));
3177		mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
3178				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
3179				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3180				NUM_BANKS(ADDR_SURF_16_BANK));
3181		mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
3182				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3183				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3184				NUM_BANKS(ADDR_SURF_16_BANK));
3185		mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
3186				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3187				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3188				 NUM_BANKS(ADDR_SURF_16_BANK));
3189		mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
3190				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
3191				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3192				 NUM_BANKS(ADDR_SURF_16_BANK));
3193		mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3194				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
3195				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3196				 NUM_BANKS(ADDR_SURF_16_BANK));
3197		mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3198				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3199				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3200				 NUM_BANKS(ADDR_SURF_16_BANK));
3201		mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3202				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3203				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3204				 NUM_BANKS(ADDR_SURF_8_BANK));
3205
3206		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
3207			if (reg_offset != 7 && reg_offset != 12 && reg_offset != 17 &&
3208			    reg_offset != 23)
3209				WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
3210
3211		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
3212			if (reg_offset != 7)
3213				WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
3214
3215		break;
3216	default:
3217		dev_warn(adev->dev,
3218			 "Unknown chip type (%d) in function gfx_v8_0_tiling_mode_table_init() falling through to CHIP_CARRIZO\n",
3219			 adev->asic_type);
3220		fallthrough;
3221
3222	case CHIP_CARRIZO:
3223		modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3224				PIPE_CONFIG(ADDR_SURF_P2) |
3225				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
3226				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3227		modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3228				PIPE_CONFIG(ADDR_SURF_P2) |
3229				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
3230				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3231		modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3232				PIPE_CONFIG(ADDR_SURF_P2) |
3233				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
3234				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3235		modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3236				PIPE_CONFIG(ADDR_SURF_P2) |
3237				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
3238				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3239		modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3240				PIPE_CONFIG(ADDR_SURF_P2) |
3241				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
3242				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3243		modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3244				PIPE_CONFIG(ADDR_SURF_P2) |
3245				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
3246				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3247		modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3248				PIPE_CONFIG(ADDR_SURF_P2) |
3249				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
3250				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3251		modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
3252				PIPE_CONFIG(ADDR_SURF_P2));
3253		modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3254				PIPE_CONFIG(ADDR_SURF_P2) |
3255				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
3256				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3257		modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3258				 PIPE_CONFIG(ADDR_SURF_P2) |
3259				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
3260				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3261		modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3262				 PIPE_CONFIG(ADDR_SURF_P2) |
3263				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
3264				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
3265		modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3266				 PIPE_CONFIG(ADDR_SURF_P2) |
3267				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3268				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3269		modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3270				 PIPE_CONFIG(ADDR_SURF_P2) |
3271				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3272				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3273		modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
3274				 PIPE_CONFIG(ADDR_SURF_P2) |
3275				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3276				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3277		modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3278				 PIPE_CONFIG(ADDR_SURF_P2) |
3279				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3280				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
3281		modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
3282				 PIPE_CONFIG(ADDR_SURF_P2) |
3283				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3284				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3285		modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
3286				 PIPE_CONFIG(ADDR_SURF_P2) |
3287				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3288				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3289		modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
3290				 PIPE_CONFIG(ADDR_SURF_P2) |
3291				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3292				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3293		modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
3294				 PIPE_CONFIG(ADDR_SURF_P2) |
3295				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3296				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3297		modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
3298				 PIPE_CONFIG(ADDR_SURF_P2) |
3299				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3300				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3301		modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
3302				 PIPE_CONFIG(ADDR_SURF_P2) |
3303				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3304				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3305		modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
3306				 PIPE_CONFIG(ADDR_SURF_P2) |
3307				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3308				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3309		modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
3310				 PIPE_CONFIG(ADDR_SURF_P2) |
3311				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3312				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3313		modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3314				 PIPE_CONFIG(ADDR_SURF_P2) |
3315				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
3316				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3317		modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3318				 PIPE_CONFIG(ADDR_SURF_P2) |
3319				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
3320				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3321		modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3322				 PIPE_CONFIG(ADDR_SURF_P2) |
3323				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
3324				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
3325
3326		mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3327				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3328				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3329				NUM_BANKS(ADDR_SURF_8_BANK));
3330		mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3331				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
3332				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3333				NUM_BANKS(ADDR_SURF_8_BANK));
3334		mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3335				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3336				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3337				NUM_BANKS(ADDR_SURF_8_BANK));
3338		mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3339				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3340				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3341				NUM_BANKS(ADDR_SURF_8_BANK));
3342		mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3343				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3344				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3345				NUM_BANKS(ADDR_SURF_8_BANK));
3346		mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3347				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3348				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3349				NUM_BANKS(ADDR_SURF_8_BANK));
3350		mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3351				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3352				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3353				NUM_BANKS(ADDR_SURF_8_BANK));
3354		mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
3355				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
3356				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3357				NUM_BANKS(ADDR_SURF_16_BANK));
3358		mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
3359				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3360				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3361				NUM_BANKS(ADDR_SURF_16_BANK));
3362		mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
3363				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3364				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3365				 NUM_BANKS(ADDR_SURF_16_BANK));
3366		mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
3367				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
3368				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3369				 NUM_BANKS(ADDR_SURF_16_BANK));
3370		mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3371				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
3372				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3373				 NUM_BANKS(ADDR_SURF_16_BANK));
3374		mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3375				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3376				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3377				 NUM_BANKS(ADDR_SURF_16_BANK));
3378		mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3379				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3380				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3381				 NUM_BANKS(ADDR_SURF_8_BANK));
3382
3383		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
3384			if (reg_offset != 7 && reg_offset != 12 && reg_offset != 17 &&
3385			    reg_offset != 23)
3386				WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
3387
3388		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
3389			if (reg_offset != 7)
3390				WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
3391
3392		break;
3393	}
3394}
3395
3396static void gfx_v8_0_select_se_sh(struct amdgpu_device *adev,
3397				  u32 se_num, u32 sh_num, u32 instance,
3398				  int xcc_id)
3399{
3400	u32 data;
3401
3402	if (instance == 0xffffffff)
3403		data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
3404	else
3405		data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
3406
3407	if (se_num == 0xffffffff)
 
3408		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
3409	else
3410		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
3411
3412	if (sh_num == 0xffffffff)
3413		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
3414	else
 
3415		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
3416
 
3417	WREG32(mmGRBM_GFX_INDEX, data);
3418}
3419
3420static void gfx_v8_0_select_me_pipe_q(struct amdgpu_device *adev,
3421				  u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
3422{
3423	vi_srbm_select(adev, me, pipe, q, vm);
3424}
3425
3426static u32 gfx_v8_0_get_rb_active_bitmap(struct amdgpu_device *adev)
3427{
3428	u32 data, mask;
3429
3430	data =  RREG32(mmCC_RB_BACKEND_DISABLE) |
3431		RREG32(mmGC_USER_RB_BACKEND_DISABLE);
3432
3433	data = REG_GET_FIELD(data, GC_USER_RB_BACKEND_DISABLE, BACKEND_DISABLE);
 
3434
3435	mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se /
3436					 adev->gfx.config.max_sh_per_se);
3437
3438	return (~data) & mask;
3439}
3440
3441static void
3442gfx_v8_0_raster_config(struct amdgpu_device *adev, u32 *rconf, u32 *rconf1)
3443{
3444	switch (adev->asic_type) {
3445	case CHIP_FIJI:
3446	case CHIP_VEGAM:
3447		*rconf |= RB_MAP_PKR0(2) | RB_MAP_PKR1(2) |
3448			  RB_XSEL2(1) | PKR_MAP(2) |
3449			  PKR_XSEL(1) | PKR_YSEL(1) |
3450			  SE_MAP(2) | SE_XSEL(2) | SE_YSEL(3);
3451		*rconf1 |= SE_PAIR_MAP(2) | SE_PAIR_XSEL(3) |
3452			   SE_PAIR_YSEL(2);
3453		break;
3454	case CHIP_TONGA:
3455	case CHIP_POLARIS10:
3456		*rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) |
3457			  SE_XSEL(1) | SE_YSEL(1);
3458		*rconf1 |= SE_PAIR_MAP(2) | SE_PAIR_XSEL(2) |
3459			   SE_PAIR_YSEL(2);
3460		break;
3461	case CHIP_TOPAZ:
3462	case CHIP_CARRIZO:
3463		*rconf |= RB_MAP_PKR0(2);
3464		*rconf1 |= 0x0;
3465		break;
3466	case CHIP_POLARIS11:
3467	case CHIP_POLARIS12:
3468		*rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) |
3469			  SE_XSEL(1) | SE_YSEL(1);
3470		*rconf1 |= 0x0;
3471		break;
3472	case CHIP_STONEY:
3473		*rconf |= 0x0;
3474		*rconf1 |= 0x0;
3475		break;
3476	default:
3477		DRM_ERROR("unknown asic: 0x%x\n", adev->asic_type);
3478		break;
3479	}
3480}
3481
3482static void
3483gfx_v8_0_write_harvested_raster_configs(struct amdgpu_device *adev,
3484					u32 raster_config, u32 raster_config_1,
3485					unsigned rb_mask, unsigned num_rb)
3486{
3487	unsigned sh_per_se = max_t(unsigned, adev->gfx.config.max_sh_per_se, 1);
3488	unsigned num_se = max_t(unsigned, adev->gfx.config.max_shader_engines, 1);
3489	unsigned rb_per_pkr = min_t(unsigned, num_rb / num_se / sh_per_se, 2);
3490	unsigned rb_per_se = num_rb / num_se;
3491	unsigned se_mask[4];
3492	unsigned se;
3493
3494	se_mask[0] = ((1 << rb_per_se) - 1) & rb_mask;
3495	se_mask[1] = (se_mask[0] << rb_per_se) & rb_mask;
3496	se_mask[2] = (se_mask[1] << rb_per_se) & rb_mask;
3497	se_mask[3] = (se_mask[2] << rb_per_se) & rb_mask;
3498
3499	WARN_ON(!(num_se == 1 || num_se == 2 || num_se == 4));
3500	WARN_ON(!(sh_per_se == 1 || sh_per_se == 2));
3501	WARN_ON(!(rb_per_pkr == 1 || rb_per_pkr == 2));
3502
3503	if ((num_se > 2) && ((!se_mask[0] && !se_mask[1]) ||
3504			     (!se_mask[2] && !se_mask[3]))) {
3505		raster_config_1 &= ~SE_PAIR_MAP_MASK;
3506
3507		if (!se_mask[0] && !se_mask[1]) {
3508			raster_config_1 |=
3509				SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_3);
3510		} else {
3511			raster_config_1 |=
3512				SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_0);
3513		}
3514	}
3515
3516	for (se = 0; se < num_se; se++) {
3517		unsigned raster_config_se = raster_config;
3518		unsigned pkr0_mask = ((1 << rb_per_pkr) - 1) << (se * rb_per_se);
3519		unsigned pkr1_mask = pkr0_mask << rb_per_pkr;
3520		int idx = (se / 2) * 2;
3521
3522		if ((num_se > 1) && (!se_mask[idx] || !se_mask[idx + 1])) {
3523			raster_config_se &= ~SE_MAP_MASK;
3524
3525			if (!se_mask[idx]) {
3526				raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_3);
3527			} else {
3528				raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_0);
3529			}
3530		}
3531
3532		pkr0_mask &= rb_mask;
3533		pkr1_mask &= rb_mask;
3534		if (rb_per_se > 2 && (!pkr0_mask || !pkr1_mask)) {
3535			raster_config_se &= ~PKR_MAP_MASK;
3536
3537			if (!pkr0_mask) {
3538				raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_3);
3539			} else {
3540				raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_0);
3541			}
3542		}
3543
3544		if (rb_per_se >= 2) {
3545			unsigned rb0_mask = 1 << (se * rb_per_se);
3546			unsigned rb1_mask = rb0_mask << 1;
3547
3548			rb0_mask &= rb_mask;
3549			rb1_mask &= rb_mask;
3550			if (!rb0_mask || !rb1_mask) {
3551				raster_config_se &= ~RB_MAP_PKR0_MASK;
3552
3553				if (!rb0_mask) {
3554					raster_config_se |=
3555						RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_3);
3556				} else {
3557					raster_config_se |=
3558						RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_0);
3559				}
3560			}
3561
3562			if (rb_per_se > 2) {
3563				rb0_mask = 1 << (se * rb_per_se + rb_per_pkr);
3564				rb1_mask = rb0_mask << 1;
3565				rb0_mask &= rb_mask;
3566				rb1_mask &= rb_mask;
3567				if (!rb0_mask || !rb1_mask) {
3568					raster_config_se &= ~RB_MAP_PKR1_MASK;
3569
3570					if (!rb0_mask) {
3571						raster_config_se |=
3572							RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_3);
3573					} else {
3574						raster_config_se |=
3575							RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_0);
3576					}
3577				}
3578			}
3579		}
3580
3581		/* GRBM_GFX_INDEX has a different offset on VI */
3582		gfx_v8_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff, 0);
3583		WREG32(mmPA_SC_RASTER_CONFIG, raster_config_se);
3584		WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
3585	}
3586
3587	/* GRBM_GFX_INDEX has a different offset on VI */
3588	gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
3589}
3590
3591static void gfx_v8_0_setup_rb(struct amdgpu_device *adev)
3592{
3593	int i, j;
3594	u32 data;
3595	u32 raster_config = 0, raster_config_1 = 0;
3596	u32 active_rbs = 0;
3597	u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
3598					adev->gfx.config.max_sh_per_se;
3599	unsigned num_rb_pipes;
3600
3601	mutex_lock(&adev->grbm_idx_mutex);
3602	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
3603		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
3604			gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff, 0);
3605			data = gfx_v8_0_get_rb_active_bitmap(adev);
3606			active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
3607					       rb_bitmap_width_per_sh);
3608		}
3609	}
3610	gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
 
3611
3612	adev->gfx.config.backend_enable_mask = active_rbs;
3613	adev->gfx.config.num_rbs = hweight32(active_rbs);
3614
3615	num_rb_pipes = min_t(unsigned, adev->gfx.config.max_backends_per_se *
3616			     adev->gfx.config.max_shader_engines, 16);
3617
3618	gfx_v8_0_raster_config(adev, &raster_config, &raster_config_1);
3619
3620	if (!adev->gfx.config.backend_enable_mask ||
3621			adev->gfx.config.num_rbs >= num_rb_pipes) {
3622		WREG32(mmPA_SC_RASTER_CONFIG, raster_config);
3623		WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
3624	} else {
3625		gfx_v8_0_write_harvested_raster_configs(adev, raster_config, raster_config_1,
3626							adev->gfx.config.backend_enable_mask,
3627							num_rb_pipes);
3628	}
3629
3630	/* cache the values for userspace */
3631	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
3632		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
3633			gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff, 0);
3634			adev->gfx.config.rb_config[i][j].rb_backend_disable =
3635				RREG32(mmCC_RB_BACKEND_DISABLE);
3636			adev->gfx.config.rb_config[i][j].user_rb_backend_disable =
3637				RREG32(mmGC_USER_RB_BACKEND_DISABLE);
3638			adev->gfx.config.rb_config[i][j].raster_config =
3639				RREG32(mmPA_SC_RASTER_CONFIG);
3640			adev->gfx.config.rb_config[i][j].raster_config_1 =
3641				RREG32(mmPA_SC_RASTER_CONFIG_1);
3642		}
3643	}
3644	gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
3645	mutex_unlock(&adev->grbm_idx_mutex);
3646}
3647
3648#define DEFAULT_SH_MEM_BASES	(0x6000)
3649/**
3650 * gfx_v8_0_init_compute_vmid - gart enable
3651 *
3652 * @adev: amdgpu_device pointer
3653 *
3654 * Initialize compute vmid sh_mem registers
3655 *
3656 */
 
 
 
3657static void gfx_v8_0_init_compute_vmid(struct amdgpu_device *adev)
3658{
3659	int i;
3660	uint32_t sh_mem_config;
3661	uint32_t sh_mem_bases;
3662
3663	/*
3664	 * Configure apertures:
3665	 * LDS:         0x60000000'00000000 - 0x60000001'00000000 (4GB)
3666	 * Scratch:     0x60000001'00000000 - 0x60000002'00000000 (4GB)
3667	 * GPUVM:       0x60010000'00000000 - 0x60020000'00000000 (1TB)
3668	 */
3669	sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
3670
3671	sh_mem_config = SH_MEM_ADDRESS_MODE_HSA64 <<
3672			SH_MEM_CONFIG__ADDRESS_MODE__SHIFT |
3673			SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
3674			SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT |
3675			MTYPE_CC << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT |
3676			SH_MEM_CONFIG__PRIVATE_ATC_MASK;
3677
3678	mutex_lock(&adev->srbm_mutex);
3679	for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
3680		vi_srbm_select(adev, 0, 0, 0, i);
3681		/* CP and shaders */
3682		WREG32(mmSH_MEM_CONFIG, sh_mem_config);
3683		WREG32(mmSH_MEM_APE1_BASE, 1);
3684		WREG32(mmSH_MEM_APE1_LIMIT, 0);
3685		WREG32(mmSH_MEM_BASES, sh_mem_bases);
3686	}
3687	vi_srbm_select(adev, 0, 0, 0, 0);
3688	mutex_unlock(&adev->srbm_mutex);
3689
3690	/* Initialize all compute VMIDs to have no GDS, GWS, or OA
3691	   access. These should be enabled by FW for target VMIDs. */
3692	for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
3693		WREG32(amdgpu_gds_reg_offset[i].mem_base, 0);
3694		WREG32(amdgpu_gds_reg_offset[i].mem_size, 0);
3695		WREG32(amdgpu_gds_reg_offset[i].gws, 0);
3696		WREG32(amdgpu_gds_reg_offset[i].oa, 0);
3697	}
3698}
3699
3700static void gfx_v8_0_init_gds_vmid(struct amdgpu_device *adev)
3701{
3702	int vmid;
3703
3704	/*
3705	 * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA
3706	 * access. Compute VMIDs should be enabled by FW for target VMIDs,
3707	 * the driver can enable them for graphics. VMID0 should maintain
3708	 * access so that HWS firmware can save/restore entries.
3709	 */
3710	for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) {
3711		WREG32(amdgpu_gds_reg_offset[vmid].mem_base, 0);
3712		WREG32(amdgpu_gds_reg_offset[vmid].mem_size, 0);
3713		WREG32(amdgpu_gds_reg_offset[vmid].gws, 0);
3714		WREG32(amdgpu_gds_reg_offset[vmid].oa, 0);
3715	}
3716}
3717
3718static void gfx_v8_0_config_init(struct amdgpu_device *adev)
3719{
3720	switch (adev->asic_type) {
3721	default:
3722		adev->gfx.config.double_offchip_lds_buf = 1;
3723		break;
3724	case CHIP_CARRIZO:
3725	case CHIP_STONEY:
3726		adev->gfx.config.double_offchip_lds_buf = 0;
3727		break;
3728	}
3729}
3730
3731static void gfx_v8_0_constants_init(struct amdgpu_device *adev)
3732{
3733	u32 tmp, sh_static_mem_cfg;
3734	int i;
3735
3736	WREG32_FIELD(GRBM_CNTL, READ_TIMEOUT, 0xFF);
 
 
 
3737	WREG32(mmGB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
3738	WREG32(mmHDP_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
3739	WREG32(mmDMIF_ADDR_CALC, adev->gfx.config.gb_addr_config);
3740
3741	gfx_v8_0_tiling_mode_table_init(adev);
 
3742	gfx_v8_0_setup_rb(adev);
3743	gfx_v8_0_get_cu_info(adev);
3744	gfx_v8_0_config_init(adev);
3745
3746	/* XXX SH_MEM regs */
3747	/* where to put LDS, scratch, GPUVM in FSA64 space */
3748	sh_static_mem_cfg = REG_SET_FIELD(0, SH_STATIC_MEM_CONFIG,
3749				   SWIZZLE_ENABLE, 1);
3750	sh_static_mem_cfg = REG_SET_FIELD(sh_static_mem_cfg, SH_STATIC_MEM_CONFIG,
3751				   ELEMENT_SIZE, 1);
3752	sh_static_mem_cfg = REG_SET_FIELD(sh_static_mem_cfg, SH_STATIC_MEM_CONFIG,
3753				   INDEX_STRIDE, 3);
3754	WREG32(mmSH_STATIC_MEM_CONFIG, sh_static_mem_cfg);
3755
3756	mutex_lock(&adev->srbm_mutex);
3757	for (i = 0; i < adev->vm_manager.id_mgr[0].num_ids; i++) {
3758		vi_srbm_select(adev, 0, 0, 0, i);
3759		/* CP and shaders */
3760		if (i == 0) {
3761			tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, DEFAULT_MTYPE, MTYPE_UC);
3762			tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, APE1_MTYPE, MTYPE_UC);
3763			tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE,
3764					    SH_MEM_ALIGNMENT_MODE_UNALIGNED);
3765			WREG32(mmSH_MEM_CONFIG, tmp);
3766			WREG32(mmSH_MEM_BASES, 0);
3767		} else {
3768			tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, DEFAULT_MTYPE, MTYPE_NC);
3769			tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, APE1_MTYPE, MTYPE_UC);
3770			tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE,
3771					    SH_MEM_ALIGNMENT_MODE_UNALIGNED);
3772			WREG32(mmSH_MEM_CONFIG, tmp);
3773			tmp = adev->gmc.shared_aperture_start >> 48;
3774			WREG32(mmSH_MEM_BASES, tmp);
3775		}
3776
3777		WREG32(mmSH_MEM_APE1_BASE, 1);
3778		WREG32(mmSH_MEM_APE1_LIMIT, 0);
 
3779	}
3780	vi_srbm_select(adev, 0, 0, 0, 0);
3781	mutex_unlock(&adev->srbm_mutex);
3782
3783	gfx_v8_0_init_compute_vmid(adev);
3784	gfx_v8_0_init_gds_vmid(adev);
3785
3786	mutex_lock(&adev->grbm_idx_mutex);
3787	/*
3788	 * making sure that the following register writes will be broadcasted
3789	 * to all the shaders
3790	 */
3791	gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
3792
3793	WREG32(mmPA_SC_FIFO_SIZE,
3794		   (adev->gfx.config.sc_prim_fifo_size_frontend <<
3795			PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) |
3796		   (adev->gfx.config.sc_prim_fifo_size_backend <<
3797			PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT) |
3798		   (adev->gfx.config.sc_hiz_tile_fifo_size <<
3799			PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) |
3800		   (adev->gfx.config.sc_earlyz_tile_fifo_size <<
3801			PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT));
3802
3803	tmp = RREG32(mmSPI_ARB_PRIORITY);
3804	tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS0, 2);
3805	tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS1, 2);
3806	tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS2, 2);
3807	tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS3, 2);
3808	WREG32(mmSPI_ARB_PRIORITY, tmp);
3809
3810	mutex_unlock(&adev->grbm_idx_mutex);
3811
3812}
3813
3814static void gfx_v8_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
3815{
3816	u32 i, j, k;
3817	u32 mask;
3818
3819	mutex_lock(&adev->grbm_idx_mutex);
3820	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
3821		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
3822			gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff, 0);
3823			for (k = 0; k < adev->usec_timeout; k++) {
3824				if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY) == 0)
3825					break;
3826				udelay(1);
3827			}
3828			if (k == adev->usec_timeout) {
3829				gfx_v8_0_select_se_sh(adev, 0xffffffff,
3830						      0xffffffff, 0xffffffff, 0);
3831				mutex_unlock(&adev->grbm_idx_mutex);
3832				DRM_INFO("Timeout wait for RLC serdes %u,%u\n",
3833					 i, j);
3834				return;
3835			}
3836		}
3837	}
3838	gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
3839	mutex_unlock(&adev->grbm_idx_mutex);
3840
3841	mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
3842		RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
3843		RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
3844		RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
3845	for (k = 0; k < adev->usec_timeout; k++) {
3846		if ((RREG32(mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
3847			break;
3848		udelay(1);
3849	}
3850}
3851
3852static void gfx_v8_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
3853					       bool enable)
3854{
3855	u32 tmp = RREG32(mmCP_INT_CNTL_RING0);
3856
3857	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
3858	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
3859	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
3860	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0);
3861
3862	WREG32(mmCP_INT_CNTL_RING0, tmp);
3863}
3864
3865static void gfx_v8_0_init_csb(struct amdgpu_device *adev)
3866{
3867	adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
3868	/* csib */
3869	WREG32(mmRLC_CSIB_ADDR_HI,
3870			adev->gfx.rlc.clear_state_gpu_addr >> 32);
3871	WREG32(mmRLC_CSIB_ADDR_LO,
3872			adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
3873	WREG32(mmRLC_CSIB_LENGTH,
3874			adev->gfx.rlc.clear_state_size);
3875}
3876
3877static void gfx_v8_0_parse_ind_reg_list(int *register_list_format,
3878				int ind_offset,
3879				int list_size,
3880				int *unique_indices,
3881				int *indices_count,
3882				int max_indices,
3883				int *ind_start_offsets,
3884				int *offset_count,
3885				int max_offset)
3886{
3887	int indices;
3888	bool new_entry = true;
3889
3890	for (; ind_offset < list_size; ind_offset++) {
3891
3892		if (new_entry) {
3893			new_entry = false;
3894			ind_start_offsets[*offset_count] = ind_offset;
3895			*offset_count = *offset_count + 1;
3896			BUG_ON(*offset_count >= max_offset);
3897		}
3898
3899		if (register_list_format[ind_offset] == 0xFFFFFFFF) {
3900			new_entry = true;
3901			continue;
3902		}
3903
3904		ind_offset += 2;
3905
3906		/* look for the matching indice */
3907		for (indices = 0;
3908			indices < *indices_count;
3909			indices++) {
3910			if (unique_indices[indices] ==
3911				register_list_format[ind_offset])
3912				break;
3913		}
3914
3915		if (indices >= *indices_count) {
3916			unique_indices[*indices_count] =
3917				register_list_format[ind_offset];
3918			indices = *indices_count;
3919			*indices_count = *indices_count + 1;
3920			BUG_ON(*indices_count >= max_indices);
3921		}
3922
3923		register_list_format[ind_offset] = indices;
3924	}
3925}
3926
3927static int gfx_v8_0_init_save_restore_list(struct amdgpu_device *adev)
3928{
3929	int i, temp, data;
3930	int unique_indices[] = {0, 0, 0, 0, 0, 0, 0, 0};
3931	int indices_count = 0;
3932	int indirect_start_offsets[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
3933	int offset_count = 0;
3934
3935	int list_size;
3936	unsigned int *register_list_format =
3937		kmemdup(adev->gfx.rlc.register_list_format,
3938			adev->gfx.rlc.reg_list_format_size_bytes, GFP_KERNEL);
3939	if (!register_list_format)
3940		return -ENOMEM;
3941
3942	gfx_v8_0_parse_ind_reg_list(register_list_format,
3943				RLC_FormatDirectRegListLength,
3944				adev->gfx.rlc.reg_list_format_size_bytes >> 2,
3945				unique_indices,
3946				&indices_count,
3947				ARRAY_SIZE(unique_indices),
3948				indirect_start_offsets,
3949				&offset_count,
3950				ARRAY_SIZE(indirect_start_offsets));
3951
3952	/* save and restore list */
3953	WREG32_FIELD(RLC_SRM_CNTL, AUTO_INCR_ADDR, 1);
3954
3955	WREG32(mmRLC_SRM_ARAM_ADDR, 0);
3956	for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++)
3957		WREG32(mmRLC_SRM_ARAM_DATA, adev->gfx.rlc.register_restore[i]);
3958
3959	/* indirect list */
3960	WREG32(mmRLC_GPM_SCRATCH_ADDR, adev->gfx.rlc.reg_list_format_start);
3961	for (i = 0; i < adev->gfx.rlc.reg_list_format_size_bytes >> 2; i++)
3962		WREG32(mmRLC_GPM_SCRATCH_DATA, register_list_format[i]);
3963
3964	list_size = adev->gfx.rlc.reg_list_size_bytes >> 2;
3965	list_size = list_size >> 1;
3966	WREG32(mmRLC_GPM_SCRATCH_ADDR, adev->gfx.rlc.reg_restore_list_size);
3967	WREG32(mmRLC_GPM_SCRATCH_DATA, list_size);
3968
3969	/* starting offsets starts */
3970	WREG32(mmRLC_GPM_SCRATCH_ADDR,
3971		adev->gfx.rlc.starting_offsets_start);
3972	for (i = 0; i < ARRAY_SIZE(indirect_start_offsets); i++)
3973		WREG32(mmRLC_GPM_SCRATCH_DATA,
3974				indirect_start_offsets[i]);
3975
3976	/* unique indices */
3977	temp = mmRLC_SRM_INDEX_CNTL_ADDR_0;
3978	data = mmRLC_SRM_INDEX_CNTL_DATA_0;
3979	for (i = 0; i < ARRAY_SIZE(unique_indices); i++) {
3980		if (unique_indices[i] != 0) {
3981			WREG32(temp + i, unique_indices[i] & 0x3FFFF);
3982			WREG32(data + i, unique_indices[i] >> 20);
3983		}
3984	}
3985	kfree(register_list_format);
3986
3987	return 0;
3988}
3989
3990static void gfx_v8_0_enable_save_restore_machine(struct amdgpu_device *adev)
3991{
3992	WREG32_FIELD(RLC_SRM_CNTL, SRM_ENABLE, 1);
 
 
 
3993}
3994
3995static void gfx_v8_0_init_power_gating(struct amdgpu_device *adev)
3996{
3997	uint32_t data;
3998
3999	WREG32_FIELD(CP_RB_WPTR_POLL_CNTL, IDLE_POLL_COUNT, 0x60);
4000
4001	data = REG_SET_FIELD(0, RLC_PG_DELAY, POWER_UP_DELAY, 0x10);
4002	data = REG_SET_FIELD(data, RLC_PG_DELAY, POWER_DOWN_DELAY, 0x10);
4003	data = REG_SET_FIELD(data, RLC_PG_DELAY, CMD_PROPAGATE_DELAY, 0x10);
4004	data = REG_SET_FIELD(data, RLC_PG_DELAY, MEM_SLEEP_DELAY, 0x10);
4005	WREG32(mmRLC_PG_DELAY, data);
4006
4007	WREG32_FIELD(RLC_PG_DELAY_2, SERDES_CMD_DELAY, 0x3);
4008	WREG32_FIELD(RLC_AUTO_PG_CTRL, GRBM_REG_SAVE_GFX_IDLE_THRESHOLD, 0x55f0);
4009
4010}
4011
4012static void cz_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev,
4013						bool enable)
4014{
4015	WREG32_FIELD(RLC_PG_CNTL, SMU_CLK_SLOWDOWN_ON_PU_ENABLE, enable ? 1 : 0);
4016}
4017
4018static void cz_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev,
4019						  bool enable)
4020{
4021	WREG32_FIELD(RLC_PG_CNTL, SMU_CLK_SLOWDOWN_ON_PD_ENABLE, enable ? 1 : 0);
4022}
4023
4024static void cz_enable_cp_power_gating(struct amdgpu_device *adev, bool enable)
4025{
4026	WREG32_FIELD(RLC_PG_CNTL, CP_PG_DISABLE, enable ? 0 : 1);
4027}
 
4028
4029static void gfx_v8_0_init_pg(struct amdgpu_device *adev)
4030{
4031	if ((adev->asic_type == CHIP_CARRIZO) ||
4032	    (adev->asic_type == CHIP_STONEY)) {
4033		gfx_v8_0_init_csb(adev);
4034		gfx_v8_0_init_save_restore_list(adev);
4035		gfx_v8_0_enable_save_restore_machine(adev);
4036		WREG32(mmRLC_JUMP_TABLE_RESTORE, adev->gfx.rlc.cp_table_gpu_addr >> 8);
4037		gfx_v8_0_init_power_gating(adev);
4038		WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask);
4039	} else if ((adev->asic_type == CHIP_POLARIS11) ||
4040		   (adev->asic_type == CHIP_POLARIS12) ||
4041		   (adev->asic_type == CHIP_VEGAM)) {
4042		gfx_v8_0_init_csb(adev);
4043		gfx_v8_0_init_save_restore_list(adev);
4044		gfx_v8_0_enable_save_restore_machine(adev);
4045		gfx_v8_0_init_power_gating(adev);
4046	}
4047
4048}
 
4049
4050static void gfx_v8_0_rlc_stop(struct amdgpu_device *adev)
4051{
4052	WREG32_FIELD(RLC_CNTL, RLC_ENABLE_F32, 0);
 
 
 
 
 
4053
4054	gfx_v8_0_enable_gui_idle_interrupt(adev, false);
4055	gfx_v8_0_wait_for_rlc_serdes(adev);
4056}
4057
4058static void gfx_v8_0_rlc_reset(struct amdgpu_device *adev)
4059{
4060	WREG32_FIELD(GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
4061	udelay(50);
4062
4063	WREG32_FIELD(GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
4064	udelay(50);
4065}
4066
4067static void gfx_v8_0_rlc_start(struct amdgpu_device *adev)
4068{
4069	WREG32_FIELD(RLC_CNTL, RLC_ENABLE_F32, 1);
4070
4071	/* carrizo do enable cp interrupt after cp inited */
4072	if (!(adev->flags & AMD_IS_APU))
4073		gfx_v8_0_enable_gui_idle_interrupt(adev, true);
4074
4075	udelay(50);
4076}
4077
4078static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
4079{
4080	if (amdgpu_sriov_vf(adev)) {
4081		gfx_v8_0_init_csb(adev);
4082		return 0;
 
 
 
 
 
 
 
 
 
4083	}
4084
4085	adev->gfx.rlc.funcs->stop(adev);
4086	adev->gfx.rlc.funcs->reset(adev);
4087	gfx_v8_0_init_pg(adev);
4088	adev->gfx.rlc.funcs->start(adev);
4089
4090	return 0;
4091}
4092
4093static void gfx_v8_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
4094{
 
4095	u32 tmp = RREG32(mmCP_ME_CNTL);
4096
4097	if (enable) {
4098		tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 0);
4099		tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 0);
4100		tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 0);
4101	} else {
4102		tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 1);
4103		tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1);
4104		tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1);
 
 
4105	}
4106	WREG32(mmCP_ME_CNTL, tmp);
4107	udelay(50);
4108}
4109
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4110static u32 gfx_v8_0_get_csb_size(struct amdgpu_device *adev)
4111{
4112	u32 count = 0;
4113	const struct cs_section_def *sect = NULL;
4114	const struct cs_extent_def *ext = NULL;
4115
4116	/* begin clear state */
4117	count += 2;
4118	/* context control state */
4119	count += 3;
4120
4121	for (sect = vi_cs_data; sect->section != NULL; ++sect) {
4122		for (ext = sect->section; ext->extent != NULL; ++ext) {
4123			if (sect->id == SECT_CONTEXT)
4124				count += 2 + ext->reg_count;
4125			else
4126				return 0;
4127		}
4128	}
4129	/* pa_sc_raster_config/pa_sc_raster_config1 */
4130	count += 4;
4131	/* end clear state */
4132	count += 2;
4133	/* clear state */
4134	count += 2;
4135
4136	return count;
4137}
4138
4139static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev)
4140{
4141	struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
4142	const struct cs_section_def *sect = NULL;
4143	const struct cs_extent_def *ext = NULL;
4144	int r, i;
4145
4146	/* init the CP */
4147	WREG32(mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1);
4148	WREG32(mmCP_ENDIAN_SWAP, 0);
4149	WREG32(mmCP_DEVICE_ID, 1);
4150
4151	gfx_v8_0_cp_gfx_enable(adev, true);
4152
4153	r = amdgpu_ring_alloc(ring, gfx_v8_0_get_csb_size(adev) + 4);
4154	if (r) {
4155		DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
4156		return r;
4157	}
4158
4159	/* clear state buffer */
4160	amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
4161	amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
4162
4163	amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
4164	amdgpu_ring_write(ring, 0x80000000);
4165	amdgpu_ring_write(ring, 0x80000000);
4166
4167	for (sect = vi_cs_data; sect->section != NULL; ++sect) {
4168		for (ext = sect->section; ext->extent != NULL; ++ext) {
4169			if (sect->id == SECT_CONTEXT) {
4170				amdgpu_ring_write(ring,
4171				       PACKET3(PACKET3_SET_CONTEXT_REG,
4172					       ext->reg_count));
4173				amdgpu_ring_write(ring,
4174				       ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
4175				for (i = 0; i < ext->reg_count; i++)
4176					amdgpu_ring_write(ring, ext->extent[i]);
4177			}
4178		}
4179	}
4180
4181	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
4182	amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
4183	amdgpu_ring_write(ring, adev->gfx.config.rb_config[0][0].raster_config);
4184	amdgpu_ring_write(ring, adev->gfx.config.rb_config[0][0].raster_config_1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4185
4186	amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
4187	amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
4188
4189	amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
4190	amdgpu_ring_write(ring, 0);
4191
4192	/* init the CE partitions */
4193	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
4194	amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
4195	amdgpu_ring_write(ring, 0x8000);
4196	amdgpu_ring_write(ring, 0x8000);
4197
4198	amdgpu_ring_commit(ring);
4199
4200	return 0;
4201}
4202static void gfx_v8_0_set_cpg_door_bell(struct amdgpu_device *adev, struct amdgpu_ring *ring)
4203{
4204	u32 tmp;
4205	/* no gfx doorbells on iceland */
4206	if (adev->asic_type == CHIP_TOPAZ)
4207		return;
4208
4209	tmp = RREG32(mmCP_RB_DOORBELL_CONTROL);
4210
4211	if (ring->use_doorbell) {
4212		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
4213				DOORBELL_OFFSET, ring->doorbell_index);
4214		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
4215						DOORBELL_HIT, 0);
4216		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
4217					    DOORBELL_EN, 1);
4218	} else {
4219		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, DOORBELL_EN, 0);
4220	}
4221
4222	WREG32(mmCP_RB_DOORBELL_CONTROL, tmp);
4223
4224	if (adev->flags & AMD_IS_APU)
4225		return;
4226
4227	tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
4228					DOORBELL_RANGE_LOWER,
4229					adev->doorbell_index.gfx_ring0);
4230	WREG32(mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
4231
4232	WREG32(mmCP_RB_DOORBELL_RANGE_UPPER,
4233		CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK);
4234}
4235
4236static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
4237{
4238	struct amdgpu_ring *ring;
4239	u32 tmp;
4240	u32 rb_bufsz;
4241	u64 rb_addr, rptr_addr, wptr_gpu_addr;
 
4242
4243	/* Set the write pointer delay */
4244	WREG32(mmCP_RB_WPTR_DELAY, 0);
4245
4246	/* set the RB to use vmid 0 */
4247	WREG32(mmCP_RB_VMID, 0);
4248
4249	/* Set ring buffer size */
4250	ring = &adev->gfx.gfx_ring[0];
4251	rb_bufsz = order_base_2(ring->ring_size / 8);
4252	tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz);
4253	tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2);
4254	tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, MTYPE, 3);
4255	tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, MIN_IB_AVAILSZ, 1);
4256#ifdef __BIG_ENDIAN
4257	tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, BUF_SWAP, 1);
4258#endif
4259	WREG32(mmCP_RB0_CNTL, tmp);
4260
4261	/* Initialize the ring buffer's read and write pointers */
4262	WREG32(mmCP_RB0_CNTL, tmp | CP_RB0_CNTL__RB_RPTR_WR_ENA_MASK);
4263	ring->wptr = 0;
4264	WREG32(mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
4265
4266	/* set the wb address wether it's enabled or not */
4267	rptr_addr = ring->rptr_gpu_addr;
4268	WREG32(mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
4269	WREG32(mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
4270
4271	wptr_gpu_addr = ring->wptr_gpu_addr;
4272	WREG32(mmCP_RB_WPTR_POLL_ADDR_LO, lower_32_bits(wptr_gpu_addr));
4273	WREG32(mmCP_RB_WPTR_POLL_ADDR_HI, upper_32_bits(wptr_gpu_addr));
4274	mdelay(1);
4275	WREG32(mmCP_RB0_CNTL, tmp);
4276
4277	rb_addr = ring->gpu_addr >> 8;
4278	WREG32(mmCP_RB0_BASE, rb_addr);
4279	WREG32(mmCP_RB0_BASE_HI, upper_32_bits(rb_addr));
4280
4281	gfx_v8_0_set_cpg_door_bell(adev, ring);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4282	/* start the ring */
4283	amdgpu_ring_clear_ring(ring);
4284	gfx_v8_0_cp_gfx_start(adev);
 
 
 
 
 
 
4285
4286	return 0;
4287}
4288
4289static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
4290{
 
 
4291	if (enable) {
4292		WREG32(mmCP_MEC_CNTL, 0);
4293	} else {
4294		WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
4295		adev->gfx.kiq[0].ring.sched.ready = false;
 
4296	}
4297	udelay(50);
4298}
4299
4300/* KIQ functions */
4301static void gfx_v8_0_kiq_setting(struct amdgpu_ring *ring)
4302{
4303	uint32_t tmp;
4304	struct amdgpu_device *adev = ring->adev;
 
4305
4306	/* tell RLC which is KIQ queue */
4307	tmp = RREG32(mmRLC_CP_SCHEDULERS);
4308	tmp &= 0xffffff00;
4309	tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
4310	WREG32(mmRLC_CP_SCHEDULERS, tmp);
4311	tmp |= 0x80;
4312	WREG32(mmRLC_CP_SCHEDULERS, tmp);
4313}
4314
4315static int gfx_v8_0_kiq_kcq_enable(struct amdgpu_device *adev)
4316{
4317	struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring;
4318	uint64_t queue_mask = 0;
4319	int r, i;
4320
4321	for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
4322		if (!test_bit(i, adev->gfx.mec_bitmap[0].queue_bitmap))
4323			continue;
4324
4325		/* This situation may be hit in the future if a new HW
4326		 * generation exposes more than 64 queues. If so, the
4327		 * definition of queue_mask needs updating */
4328		if (WARN_ON(i >= (sizeof(queue_mask)*8))) {
4329			DRM_ERROR("Invalid KCQ enabled: %d\n", i);
4330			break;
4331		}
4332
4333		queue_mask |= (1ull << i);
4334	}
 
 
 
 
 
 
 
 
4335
4336	r = amdgpu_ring_alloc(kiq_ring, (8 * adev->gfx.num_compute_rings) + 8);
4337	if (r) {
4338		DRM_ERROR("Failed to lock KIQ (%d).\n", r);
4339		return r;
4340	}
4341	/* set resources */
4342	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
4343	amdgpu_ring_write(kiq_ring, 0);	/* vmid_mask:0 queue_type:0 (KIQ) */
4344	amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask));	/* queue mask lo */
4345	amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask));	/* queue mask hi */
4346	amdgpu_ring_write(kiq_ring, 0);	/* gws mask lo */
4347	amdgpu_ring_write(kiq_ring, 0);	/* gws mask hi */
4348	amdgpu_ring_write(kiq_ring, 0);	/* oac mask */
4349	amdgpu_ring_write(kiq_ring, 0);	/* gds heap base:0, gds heap size:0 */
4350	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4351		struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
4352		uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
4353		uint64_t wptr_addr = ring->wptr_gpu_addr;
4354
4355		/* map queues */
4356		amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
4357		/* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
4358		amdgpu_ring_write(kiq_ring,
4359				  PACKET3_MAP_QUEUES_NUM_QUEUES(1));
4360		amdgpu_ring_write(kiq_ring,
4361				  PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index) |
4362				  PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
4363				  PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
4364				  PACKET3_MAP_QUEUES_ME(ring->me == 1 ? 0 : 1)); /* doorbell */
4365		amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
4366		amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
4367		amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
4368		amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
4369	}
4370
4371	amdgpu_ring_commit(kiq_ring);
 
 
 
 
 
 
 
 
 
4372
4373	return 0;
4374}
4375
4376static int gfx_v8_0_deactivate_hqd(struct amdgpu_device *adev, u32 req)
4377{
4378	int i, r = 0;
4379
4380	if (RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK) {
4381		WREG32_FIELD(CP_HQD_DEQUEUE_REQUEST, DEQUEUE_REQ, req);
4382		for (i = 0; i < adev->usec_timeout; i++) {
4383			if (!(RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK))
4384				break;
4385			udelay(1);
4386		}
4387		if (i == adev->usec_timeout)
4388			r = -ETIMEDOUT;
4389	}
4390	WREG32(mmCP_HQD_DEQUEUE_REQUEST, 0);
4391	WREG32(mmCP_HQD_PQ_RPTR, 0);
4392	WREG32(mmCP_HQD_PQ_WPTR, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4393
4394	return r;
4395}
4396
4397static void gfx_v8_0_mqd_set_priority(struct amdgpu_ring *ring, struct vi_mqd *mqd)
4398{
4399	struct amdgpu_device *adev = ring->adev;
4400
4401	if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
4402		if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) {
4403			mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
4404			mqd->cp_hqd_queue_priority =
4405				AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
4406		}
4407	}
4408}
4409
4410static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring)
4411{
4412	struct amdgpu_device *adev = ring->adev;
4413	struct vi_mqd *mqd = ring->mqd_ptr;
4414	uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
4415	uint32_t tmp;
4416
4417	mqd->header = 0xC0310800;
4418	mqd->compute_pipelinestat_enable = 0x00000001;
4419	mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
4420	mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
4421	mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
4422	mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
4423	mqd->compute_misc_reserved = 0x00000003;
4424	mqd->dynamic_cu_mask_addr_lo = lower_32_bits(ring->mqd_gpu_addr
4425						     + offsetof(struct vi_mqd_allocation, dynamic_cu_mask));
4426	mqd->dynamic_cu_mask_addr_hi = upper_32_bits(ring->mqd_gpu_addr
4427						     + offsetof(struct vi_mqd_allocation, dynamic_cu_mask));
4428	eop_base_addr = ring->eop_gpu_addr >> 8;
4429	mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
4430	mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
4431
4432	/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
4433	tmp = RREG32(mmCP_HQD_EOP_CONTROL);
4434	tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
4435			(order_base_2(GFX8_MEC_HPD_SIZE / 4) - 1));
4436
4437	mqd->cp_hqd_eop_control = tmp;
4438
4439	/* enable doorbell? */
4440	tmp = REG_SET_FIELD(RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL),
4441			    CP_HQD_PQ_DOORBELL_CONTROL,
4442			    DOORBELL_EN,
4443			    ring->use_doorbell ? 1 : 0);
4444
4445	mqd->cp_hqd_pq_doorbell_control = tmp;
4446
4447	/* set the pointer to the MQD */
4448	mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
4449	mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
4450
4451	/* set MQD vmid to 0 */
4452	tmp = RREG32(mmCP_MQD_CONTROL);
4453	tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
4454	mqd->cp_mqd_control = tmp;
4455
4456	/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
4457	hqd_gpu_addr = ring->gpu_addr >> 8;
4458	mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
4459	mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
4460
4461	/* set up the HQD, this is similar to CP_RB0_CNTL */
4462	tmp = RREG32(mmCP_HQD_PQ_CONTROL);
4463	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
4464			    (order_base_2(ring->ring_size / 4) - 1));
4465	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
4466			(order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1));
4467#ifdef __BIG_ENDIAN
4468	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
4469#endif
4470	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
4471	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0);
4472	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
4473	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
4474	mqd->cp_hqd_pq_control = tmp;
4475
4476	/* set the wb address whether it's enabled or not */
4477	wb_gpu_addr = ring->rptr_gpu_addr;
4478	mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
4479	mqd->cp_hqd_pq_rptr_report_addr_hi =
4480		upper_32_bits(wb_gpu_addr) & 0xffff;
4481
4482	/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
4483	wb_gpu_addr = ring->wptr_gpu_addr;
4484	mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
4485	mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
4486
4487	tmp = 0;
4488	/* enable the doorbell if requested */
4489	if (ring->use_doorbell) {
4490		tmp = RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
4491		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4492				DOORBELL_OFFSET, ring->doorbell_index);
4493
4494		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4495					 DOORBELL_EN, 1);
4496		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4497					 DOORBELL_SOURCE, 0);
4498		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4499					 DOORBELL_HIT, 0);
4500	}
4501
4502	mqd->cp_hqd_pq_doorbell_control = tmp;
4503
4504	/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
4505	ring->wptr = 0;
4506	mqd->cp_hqd_pq_wptr = ring->wptr;
4507	mqd->cp_hqd_pq_rptr = RREG32(mmCP_HQD_PQ_RPTR);
4508
4509	/* set the vmid for the queue */
4510	mqd->cp_hqd_vmid = 0;
4511
4512	tmp = RREG32(mmCP_HQD_PERSISTENT_STATE);
4513	tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
4514	mqd->cp_hqd_persistent_state = tmp;
4515
4516	/* set MTYPE */
4517	tmp = RREG32(mmCP_HQD_IB_CONTROL);
4518	tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
4519	tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MTYPE, 3);
4520	mqd->cp_hqd_ib_control = tmp;
4521
4522	tmp = RREG32(mmCP_HQD_IQ_TIMER);
4523	tmp = REG_SET_FIELD(tmp, CP_HQD_IQ_TIMER, MTYPE, 3);
4524	mqd->cp_hqd_iq_timer = tmp;
4525
4526	tmp = RREG32(mmCP_HQD_CTX_SAVE_CONTROL);
4527	tmp = REG_SET_FIELD(tmp, CP_HQD_CTX_SAVE_CONTROL, MTYPE, 3);
4528	mqd->cp_hqd_ctx_save_control = tmp;
4529
4530	/* defaults */
4531	mqd->cp_hqd_eop_rptr = RREG32(mmCP_HQD_EOP_RPTR);
4532	mqd->cp_hqd_eop_wptr = RREG32(mmCP_HQD_EOP_WPTR);
4533	mqd->cp_hqd_ctx_save_base_addr_lo = RREG32(mmCP_HQD_CTX_SAVE_BASE_ADDR_LO);
4534	mqd->cp_hqd_ctx_save_base_addr_hi = RREG32(mmCP_HQD_CTX_SAVE_BASE_ADDR_HI);
4535	mqd->cp_hqd_cntl_stack_offset = RREG32(mmCP_HQD_CNTL_STACK_OFFSET);
4536	mqd->cp_hqd_cntl_stack_size = RREG32(mmCP_HQD_CNTL_STACK_SIZE);
4537	mqd->cp_hqd_wg_state_offset = RREG32(mmCP_HQD_WG_STATE_OFFSET);
4538	mqd->cp_hqd_ctx_save_size = RREG32(mmCP_HQD_CTX_SAVE_SIZE);
4539	mqd->cp_hqd_eop_done_events = RREG32(mmCP_HQD_EOP_EVENTS);
4540	mqd->cp_hqd_error = RREG32(mmCP_HQD_ERROR);
4541	mqd->cp_hqd_eop_wptr_mem = RREG32(mmCP_HQD_EOP_WPTR_MEM);
4542	mqd->cp_hqd_eop_dones = RREG32(mmCP_HQD_EOP_DONES);
4543
4544	/* set static priority for a queue/ring */
4545	gfx_v8_0_mqd_set_priority(ring, mqd);
4546	mqd->cp_hqd_quantum = RREG32(mmCP_HQD_QUANTUM);
4547
4548	/* map_queues packet doesn't need activate the queue,
4549	 * so only kiq need set this field.
4550	 */
4551	if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
4552		mqd->cp_hqd_active = 1;
4553
4554	return 0;
4555}
4556
4557static int gfx_v8_0_mqd_commit(struct amdgpu_device *adev,
4558			struct vi_mqd *mqd)
4559{
4560	uint32_t mqd_reg;
4561	uint32_t *mqd_data;
4562
4563	/* HQD registers extend from mmCP_MQD_BASE_ADDR to mmCP_HQD_ERROR */
4564	mqd_data = &mqd->cp_mqd_base_addr_lo;
 
 
 
 
4565
4566	/* disable wptr polling */
4567	WREG32_FIELD(CP_PQ_WPTR_POLL_CNTL, EN, 0);
4568
4569	/* program all HQD registers */
4570	for (mqd_reg = mmCP_HQD_VMID; mqd_reg <= mmCP_HQD_EOP_CONTROL; mqd_reg++)
4571		WREG32(mqd_reg, mqd_data[mqd_reg - mmCP_MQD_BASE_ADDR]);
4572
4573	/* Tonga errata: EOP RPTR/WPTR should be left unmodified.
4574	 * This is safe since EOP RPTR==WPTR for any inactive HQD
4575	 * on ASICs that do not support context-save.
4576	 * EOP writes/reads can start anywhere in the ring.
4577	 */
4578	if (adev->asic_type != CHIP_TONGA) {
4579		WREG32(mmCP_HQD_EOP_RPTR, mqd->cp_hqd_eop_rptr);
4580		WREG32(mmCP_HQD_EOP_WPTR, mqd->cp_hqd_eop_wptr);
4581		WREG32(mmCP_HQD_EOP_WPTR_MEM, mqd->cp_hqd_eop_wptr_mem);
 
 
 
 
 
 
 
4582	}
 
 
4583
4584	for (mqd_reg = mmCP_HQD_EOP_EVENTS; mqd_reg <= mmCP_HQD_ERROR; mqd_reg++)
4585		WREG32(mqd_reg, mqd_data[mqd_reg - mmCP_MQD_BASE_ADDR]);
4586
4587	/* activate the HQD */
4588	for (mqd_reg = mmCP_MQD_BASE_ADDR; mqd_reg <= mmCP_HQD_ACTIVE; mqd_reg++)
4589		WREG32(mqd_reg, mqd_data[mqd_reg - mmCP_MQD_BASE_ADDR]);
4590
4591	return 0;
4592}
 
 
 
 
 
 
 
 
 
4593
4594static int gfx_v8_0_kiq_init_queue(struct amdgpu_ring *ring)
4595{
4596	struct amdgpu_device *adev = ring->adev;
4597	struct vi_mqd *mqd = ring->mqd_ptr;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4598
4599	gfx_v8_0_kiq_setting(ring);
 
4600
4601	if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
4602		/* reset MQD to a clean status */
4603		if (adev->gfx.kiq[0].mqd_backup)
4604			memcpy(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(struct vi_mqd_allocation));
 
 
 
 
4605
4606		/* reset ring buffer */
4607		ring->wptr = 0;
4608		amdgpu_ring_clear_ring(ring);
4609		mutex_lock(&adev->srbm_mutex);
4610		vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
4611		gfx_v8_0_mqd_commit(adev, mqd);
4612		vi_srbm_select(adev, 0, 0, 0, 0);
4613		mutex_unlock(&adev->srbm_mutex);
4614	} else {
4615		memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation));
4616		((struct vi_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
4617		((struct vi_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
4618		if (amdgpu_sriov_vf(adev) && adev->in_suspend)
4619			amdgpu_ring_clear_ring(ring);
4620		mutex_lock(&adev->srbm_mutex);
4621		vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
4622		gfx_v8_0_mqd_init(ring);
4623		gfx_v8_0_mqd_commit(adev, mqd);
4624		vi_srbm_select(adev, 0, 0, 0, 0);
4625		mutex_unlock(&adev->srbm_mutex);
 
 
 
 
 
 
 
 
4626
4627		if (adev->gfx.kiq[0].mqd_backup)
4628			memcpy(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(struct vi_mqd_allocation));
4629	}
4630
4631	return 0;
4632}
 
 
 
4633
4634static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
4635{
4636	struct amdgpu_device *adev = ring->adev;
4637	struct vi_mqd *mqd = ring->mqd_ptr;
4638	int mqd_idx = ring - &adev->gfx.compute_ring[0];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4639
4640	if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
4641		memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation));
4642		((struct vi_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
4643		((struct vi_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
4644		mutex_lock(&adev->srbm_mutex);
4645		vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
4646		gfx_v8_0_mqd_init(ring);
4647		vi_srbm_select(adev, 0, 0, 0, 0);
4648		mutex_unlock(&adev->srbm_mutex);
4649
4650		if (adev->gfx.mec.mqd_backup[mqd_idx])
4651			memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct vi_mqd_allocation));
4652	} else {
4653		/* restore MQD to a clean status */
4654		if (adev->gfx.mec.mqd_backup[mqd_idx])
4655			memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation));
4656		/* reset ring buffer */
4657		ring->wptr = 0;
4658		amdgpu_ring_clear_ring(ring);
4659	}
4660	return 0;
4661}
4662
4663static void gfx_v8_0_set_mec_doorbell_range(struct amdgpu_device *adev)
4664{
4665	if (adev->asic_type > CHIP_TONGA) {
4666		WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER, adev->doorbell_index.kiq << 2);
4667		WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER, adev->doorbell_index.mec_ring7 << 2);
4668	}
4669	/* enable doorbells */
4670	WREG32_FIELD(CP_PQ_STATUS, DOORBELL_ENABLE, 1);
4671}
4672
4673static int gfx_v8_0_kiq_resume(struct amdgpu_device *adev)
4674{
4675	struct amdgpu_ring *ring;
4676	int r;
4677
4678	ring = &adev->gfx.kiq[0].ring;
 
 
4679
4680	r = amdgpu_bo_reserve(ring->mqd_obj, false);
4681	if (unlikely(r != 0))
4682		return r;
4683
4684	r = amdgpu_bo_kmap(ring->mqd_obj, &ring->mqd_ptr);
4685	if (unlikely(r != 0)) {
4686		amdgpu_bo_unreserve(ring->mqd_obj);
4687		return r;
4688	}
4689
4690	gfx_v8_0_kiq_init_queue(ring);
4691	amdgpu_bo_kunmap(ring->mqd_obj);
4692	ring->mqd_ptr = NULL;
4693	amdgpu_bo_unreserve(ring->mqd_obj);
4694	return 0;
4695}
4696
4697static int gfx_v8_0_kcq_resume(struct amdgpu_device *adev)
4698{
4699	struct amdgpu_ring *ring = NULL;
4700	int r = 0, i;
4701
4702	gfx_v8_0_cp_compute_enable(adev, true);
4703
4704	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4705		ring = &adev->gfx.compute_ring[i];
4706
4707		r = amdgpu_bo_reserve(ring->mqd_obj, false);
4708		if (unlikely(r != 0))
4709			goto done;
4710		r = amdgpu_bo_kmap(ring->mqd_obj, &ring->mqd_ptr);
4711		if (!r) {
4712			r = gfx_v8_0_kcq_init_queue(ring);
4713			amdgpu_bo_kunmap(ring->mqd_obj);
4714			ring->mqd_ptr = NULL;
4715		}
4716		amdgpu_bo_unreserve(ring->mqd_obj);
4717		if (r)
4718			goto done;
4719	}
4720
4721	gfx_v8_0_set_mec_doorbell_range(adev);
4722
4723	r = gfx_v8_0_kiq_kcq_enable(adev);
4724	if (r)
4725		goto done;
4726
4727done:
4728	return r;
4729}
4730
4731static int gfx_v8_0_cp_test_all_rings(struct amdgpu_device *adev)
4732{
4733	int r, i;
4734	struct amdgpu_ring *ring;
4735
4736	/* collect all the ring_tests here, gfx, kiq, compute */
4737	ring = &adev->gfx.gfx_ring[0];
4738	r = amdgpu_ring_test_helper(ring);
4739	if (r)
4740		return r;
4741
4742	ring = &adev->gfx.kiq[0].ring;
4743	r = amdgpu_ring_test_helper(ring);
4744	if (r)
4745		return r;
4746
4747	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4748		ring = &adev->gfx.compute_ring[i];
4749		amdgpu_ring_test_helper(ring);
4750	}
4751
4752	return 0;
4753}
4754
4755static int gfx_v8_0_cp_resume(struct amdgpu_device *adev)
4756{
4757	int r;
4758
4759	if (!(adev->flags & AMD_IS_APU))
4760		gfx_v8_0_enable_gui_idle_interrupt(adev, false);
4761
4762	r = gfx_v8_0_kiq_resume(adev);
4763	if (r)
4764		return r;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4765
4766	r = gfx_v8_0_cp_gfx_resume(adev);
4767	if (r)
4768		return r;
 
 
 
 
 
 
 
 
 
4769
4770	r = gfx_v8_0_kcq_resume(adev);
4771	if (r)
4772		return r;
4773
4774	r = gfx_v8_0_cp_test_all_rings(adev);
4775	if (r)
4776		return r;
4777
4778	gfx_v8_0_enable_gui_idle_interrupt(adev, true);
4779
4780	return 0;
4781}
4782
4783static void gfx_v8_0_cp_enable(struct amdgpu_device *adev, bool enable)
4784{
4785	gfx_v8_0_cp_gfx_enable(adev, enable);
4786	gfx_v8_0_cp_compute_enable(adev, enable);
4787}
4788
4789static int gfx_v8_0_hw_init(void *handle)
4790{
4791	int r;
4792	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4793
4794	gfx_v8_0_init_golden_registers(adev);
4795	gfx_v8_0_constants_init(adev);
4796
4797	r = adev->gfx.rlc.funcs->resume(adev);
 
 
4798	if (r)
4799		return r;
4800
4801	r = gfx_v8_0_cp_resume(adev);
 
 
4802
4803	return r;
4804}
4805
4806static int gfx_v8_0_kcq_disable(struct amdgpu_device *adev)
4807{
4808	int r, i;
4809	struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring;
4810
4811	r = amdgpu_ring_alloc(kiq_ring, 6 * adev->gfx.num_compute_rings);
4812	if (r)
4813		DRM_ERROR("Failed to lock KIQ (%d).\n", r);
4814
4815	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4816		struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
4817
4818		amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
4819		amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
4820						PACKET3_UNMAP_QUEUES_ACTION(1) | /* RESET_QUEUES */
4821						PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
4822						PACKET3_UNMAP_QUEUES_ENGINE_SEL(0) |
4823						PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
4824		amdgpu_ring_write(kiq_ring, PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
4825		amdgpu_ring_write(kiq_ring, 0);
4826		amdgpu_ring_write(kiq_ring, 0);
4827		amdgpu_ring_write(kiq_ring, 0);
4828	}
4829	r = amdgpu_ring_test_helper(kiq_ring);
4830	if (r)
4831		DRM_ERROR("KCQ disable failed\n");
4832
4833	return r;
4834}
4835
4836static bool gfx_v8_0_is_idle(void *handle)
4837{
4838	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4839
4840	if (REG_GET_FIELD(RREG32(mmGRBM_STATUS), GRBM_STATUS, GUI_ACTIVE)
4841		|| RREG32(mmGRBM_STATUS2) != 0x8)
4842		return false;
4843	else
4844		return true;
4845}
4846
4847static bool gfx_v8_0_rlc_is_idle(void *handle)
4848{
4849	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4850
4851	if (RREG32(mmGRBM_STATUS2) != 0x8)
4852		return false;
4853	else
4854		return true;
4855}
4856
4857static int gfx_v8_0_wait_for_rlc_idle(void *handle)
4858{
4859	unsigned int i;
4860	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4861
4862	for (i = 0; i < adev->usec_timeout; i++) {
4863		if (gfx_v8_0_rlc_is_idle(handle))
4864			return 0;
4865
4866		udelay(1);
4867	}
4868	return -ETIMEDOUT;
4869}
4870
4871static int gfx_v8_0_wait_for_idle(void *handle)
4872{
4873	unsigned int i;
 
4874	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4875
4876	for (i = 0; i < adev->usec_timeout; i++) {
4877		if (gfx_v8_0_is_idle(handle))
4878			return 0;
4879
 
 
4880		udelay(1);
4881	}
4882	return -ETIMEDOUT;
4883}
4884
4885static int gfx_v8_0_hw_fini(void *handle)
4886{
 
4887	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4888
4889	amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
4890	amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
4891
4892	amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
4893
4894	amdgpu_irq_put(adev, &adev->gfx.sq_irq, 0);
4895
4896	/* disable KCQ to avoid CPC touch memory not valid anymore */
4897	gfx_v8_0_kcq_disable(adev);
4898
4899	if (amdgpu_sriov_vf(adev)) {
4900		pr_debug("For SRIOV client, shouldn't do anything.\n");
4901		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4902	}
4903	amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
4904	if (!gfx_v8_0_wait_for_idle(adev))
4905		gfx_v8_0_cp_enable(adev, false);
4906	else
4907		pr_err("cp is busy, skip halt cp\n");
4908	if (!gfx_v8_0_wait_for_rlc_idle(adev))
4909		adev->gfx.rlc.funcs->stop(adev);
4910	else
4911		pr_err("rlc is busy, skip halt rlc\n");
4912	amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
4913
4914	return 0;
4915}
4916
4917static int gfx_v8_0_suspend(void *handle)
4918{
4919	return gfx_v8_0_hw_fini(handle);
4920}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4921
4922static int gfx_v8_0_resume(void *handle)
4923{
4924	return gfx_v8_0_hw_init(handle);
 
 
 
 
 
 
 
 
 
 
 
 
4925}
4926
4927static bool gfx_v8_0_check_soft_reset(void *handle)
4928{
4929	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4930	u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
4931	u32 tmp;
 
4932
4933	/* GRBM_STATUS */
4934	tmp = RREG32(mmGRBM_STATUS);
4935	if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
4936		   GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
4937		   GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
4938		   GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
4939		   GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
4940		   GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK |
4941		   GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
4942		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4943						GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
4944		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4945						GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
 
 
 
 
 
4946		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
4947						SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1);
4948	}
4949
4950	/* GRBM_STATUS2 */
4951	tmp = RREG32(mmGRBM_STATUS2);
4952	if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
4953		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4954						GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
4955
4956	if (REG_GET_FIELD(tmp, GRBM_STATUS2, CPF_BUSY) ||
4957	    REG_GET_FIELD(tmp, GRBM_STATUS2, CPC_BUSY) ||
4958	    REG_GET_FIELD(tmp, GRBM_STATUS2, CPG_BUSY)) {
4959		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4960						SOFT_RESET_CPF, 1);
4961		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4962						SOFT_RESET_CPC, 1);
4963		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4964						SOFT_RESET_CPG, 1);
4965		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET,
4966						SOFT_RESET_GRBM, 1);
4967	}
4968
4969	/* SRBM_STATUS */
4970	tmp = RREG32(mmSRBM_STATUS);
4971	if (REG_GET_FIELD(tmp, SRBM_STATUS, GRBM_RQ_PENDING))
4972		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
4973						SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1);
4974	if (REG_GET_FIELD(tmp, SRBM_STATUS, SEM_BUSY))
4975		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
4976						SRBM_SOFT_RESET, SOFT_RESET_SEM, 1);
4977
4978	if (grbm_soft_reset || srbm_soft_reset) {
4979		adev->gfx.grbm_soft_reset = grbm_soft_reset;
4980		adev->gfx.srbm_soft_reset = srbm_soft_reset;
4981		return true;
4982	} else {
4983		adev->gfx.grbm_soft_reset = 0;
4984		adev->gfx.srbm_soft_reset = 0;
4985		return false;
4986	}
4987}
4988
4989static int gfx_v8_0_pre_soft_reset(void *handle)
4990{
4991	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4992	u32 grbm_soft_reset = 0;
4993
4994	if ((!adev->gfx.grbm_soft_reset) &&
4995	    (!adev->gfx.srbm_soft_reset))
4996		return 0;
4997
4998	grbm_soft_reset = adev->gfx.grbm_soft_reset;
4999
5000	/* stop the rlc */
5001	adev->gfx.rlc.funcs->stop(adev);
5002
5003	if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
5004	    REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX))
5005		/* Disable GFX parsing/prefetching */
5006		gfx_v8_0_cp_gfx_enable(adev, false);
5007
5008	if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
5009	    REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPF) ||
5010	    REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPC) ||
5011	    REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPG)) {
5012		int i;
5013
5014		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
5015			struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
5016
5017			mutex_lock(&adev->srbm_mutex);
5018			vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
5019			gfx_v8_0_deactivate_hqd(adev, 2);
5020			vi_srbm_select(adev, 0, 0, 0, 0);
5021			mutex_unlock(&adev->srbm_mutex);
5022		}
5023		/* Disable MEC parsing/prefetching */
5024		gfx_v8_0_cp_compute_enable(adev, false);
5025	}
5026
5027	return 0;
5028}
 
 
 
 
 
5029
5030static int gfx_v8_0_soft_reset(void *handle)
5031{
5032	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5033	u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
5034	u32 tmp;
5035
5036	if ((!adev->gfx.grbm_soft_reset) &&
5037	    (!adev->gfx.srbm_soft_reset))
5038		return 0;
 
 
 
5039
5040	grbm_soft_reset = adev->gfx.grbm_soft_reset;
5041	srbm_soft_reset = adev->gfx.srbm_soft_reset;
5042
5043	if (grbm_soft_reset || srbm_soft_reset) {
5044		tmp = RREG32(mmGMCON_DEBUG);
5045		tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_STALL, 1);
5046		tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_CLEAR, 1);
5047		WREG32(mmGMCON_DEBUG, tmp);
5048		udelay(50);
5049	}
5050
5051	if (grbm_soft_reset) {
5052		tmp = RREG32(mmGRBM_SOFT_RESET);
5053		tmp |= grbm_soft_reset;
5054		dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
5055		WREG32(mmGRBM_SOFT_RESET, tmp);
5056		tmp = RREG32(mmGRBM_SOFT_RESET);
5057
5058		udelay(50);
5059
5060		tmp &= ~grbm_soft_reset;
5061		WREG32(mmGRBM_SOFT_RESET, tmp);
5062		tmp = RREG32(mmGRBM_SOFT_RESET);
5063	}
5064
5065	if (srbm_soft_reset) {
5066		tmp = RREG32(mmSRBM_SOFT_RESET);
5067		tmp |= srbm_soft_reset;
5068		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
5069		WREG32(mmSRBM_SOFT_RESET, tmp);
5070		tmp = RREG32(mmSRBM_SOFT_RESET);
 
 
5071
 
5072		udelay(50);
5073
5074		tmp &= ~srbm_soft_reset;
5075		WREG32(mmSRBM_SOFT_RESET, tmp);
5076		tmp = RREG32(mmSRBM_SOFT_RESET);
5077	}
5078
5079	if (grbm_soft_reset || srbm_soft_reset) {
5080		tmp = RREG32(mmGMCON_DEBUG);
5081		tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_STALL, 0);
5082		tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_CLEAR, 0);
5083		WREG32(mmGMCON_DEBUG, tmp);
5084	}
5085
5086	/* Wait a little for things to settle down */
5087	udelay(50);
5088
5089	return 0;
5090}
5091
5092static int gfx_v8_0_post_soft_reset(void *handle)
5093{
5094	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5095	u32 grbm_soft_reset = 0;
5096
5097	if ((!adev->gfx.grbm_soft_reset) &&
5098	    (!adev->gfx.srbm_soft_reset))
5099		return 0;
5100
5101	grbm_soft_reset = adev->gfx.grbm_soft_reset;
5102
5103	if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
5104	    REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPF) ||
5105	    REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPC) ||
5106	    REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPG)) {
5107		int i;
5108
5109		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
5110			struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
5111
5112			mutex_lock(&adev->srbm_mutex);
5113			vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
5114			gfx_v8_0_deactivate_hqd(adev, 2);
5115			vi_srbm_select(adev, 0, 0, 0, 0);
5116			mutex_unlock(&adev->srbm_mutex);
5117		}
5118		gfx_v8_0_kiq_resume(adev);
5119		gfx_v8_0_kcq_resume(adev);
5120	}
5121
5122	if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
5123	    REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX))
5124		gfx_v8_0_cp_gfx_resume(adev);
5125
5126	gfx_v8_0_cp_test_all_rings(adev);
5127
5128	adev->gfx.rlc.funcs->start(adev);
5129
5130	return 0;
5131}
5132
5133/**
5134 * gfx_v8_0_get_gpu_clock_counter - return GPU clock counter snapshot
5135 *
5136 * @adev: amdgpu_device pointer
5137 *
5138 * Fetches a GPU clock counter snapshot.
5139 * Returns the 64 bit clock counter snapshot.
5140 */
5141static uint64_t gfx_v8_0_get_gpu_clock_counter(struct amdgpu_device *adev)
5142{
5143	uint64_t clock;
5144
5145	mutex_lock(&adev->gfx.gpu_clock_mutex);
5146	WREG32(mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
5147	clock = (uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_LSB) |
5148		((uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
5149	mutex_unlock(&adev->gfx.gpu_clock_mutex);
5150	return clock;
5151}
5152
5153static void gfx_v8_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
5154					  uint32_t vmid,
5155					  uint32_t gds_base, uint32_t gds_size,
5156					  uint32_t gws_base, uint32_t gws_size,
5157					  uint32_t oa_base, uint32_t oa_size)
5158{
 
 
 
 
 
 
 
 
 
5159	/* GDS Base */
5160	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5161	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5162				WRITE_DATA_DST_SEL(0)));
5163	amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_base);
5164	amdgpu_ring_write(ring, 0);
5165	amdgpu_ring_write(ring, gds_base);
5166
5167	/* GDS Size */
5168	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5169	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5170				WRITE_DATA_DST_SEL(0)));
5171	amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_size);
5172	amdgpu_ring_write(ring, 0);
5173	amdgpu_ring_write(ring, gds_size);
5174
5175	/* GWS */
5176	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5177	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5178				WRITE_DATA_DST_SEL(0)));
5179	amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].gws);
5180	amdgpu_ring_write(ring, 0);
5181	amdgpu_ring_write(ring, gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
5182
5183	/* OA */
5184	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5185	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5186				WRITE_DATA_DST_SEL(0)));
5187	amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].oa);
5188	amdgpu_ring_write(ring, 0);
5189	amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));
5190}
5191
5192static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
5193{
5194	WREG32(mmSQ_IND_INDEX,
5195		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
5196		(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
5197		(address << SQ_IND_INDEX__INDEX__SHIFT) |
5198		(SQ_IND_INDEX__FORCE_READ_MASK));
5199	return RREG32(mmSQ_IND_DATA);
5200}
5201
5202static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
5203			   uint32_t wave, uint32_t thread,
5204			   uint32_t regno, uint32_t num, uint32_t *out)
5205{
5206	WREG32(mmSQ_IND_INDEX,
5207		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
5208		(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
5209		(regno << SQ_IND_INDEX__INDEX__SHIFT) |
5210		(thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
5211		(SQ_IND_INDEX__FORCE_READ_MASK) |
5212		(SQ_IND_INDEX__AUTO_INCR_MASK));
5213	while (num--)
5214		*(out++) = RREG32(mmSQ_IND_DATA);
5215}
5216
5217static void gfx_v8_0_read_wave_data(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
5218{
5219	/* type 0 wave data */
5220	dst[(*no_fields)++] = 0;
5221	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
5222	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
5223	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
5224	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
5225	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
5226	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
5227	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
5228	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
5229	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
5230	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
5231	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
5232	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
5233	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_LO);
5234	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_HI);
5235	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_LO);
5236	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_HI);
5237	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
5238	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
5239	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_MODE);
5240}
5241
5242static void gfx_v8_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
5243				     uint32_t wave, uint32_t start,
5244				     uint32_t size, uint32_t *dst)
5245{
5246	wave_read_regs(
5247		adev, simd, wave, 0,
5248		start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
5249}
5250
5251
5252static const struct amdgpu_gfx_funcs gfx_v8_0_gfx_funcs = {
5253	.get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter,
5254	.select_se_sh = &gfx_v8_0_select_se_sh,
5255	.read_wave_data = &gfx_v8_0_read_wave_data,
5256	.read_wave_sgprs = &gfx_v8_0_read_wave_sgprs,
5257	.select_me_pipe_q = &gfx_v8_0_select_me_pipe_q
5258};
5259
5260static int gfx_v8_0_early_init(void *handle)
5261{
5262	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5263
5264	adev->gfx.xcc_mask = 1;
5265	adev->gfx.num_gfx_rings = GFX8_NUM_GFX_RINGS;
5266	adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
5267					  AMDGPU_MAX_COMPUTE_RINGS);
5268	adev->gfx.funcs = &gfx_v8_0_gfx_funcs;
5269	gfx_v8_0_set_ring_funcs(adev);
5270	gfx_v8_0_set_irq_funcs(adev);
5271	gfx_v8_0_set_gds_init(adev);
5272	gfx_v8_0_set_rlc_funcs(adev);
5273
5274	return 0;
5275}
5276
5277static int gfx_v8_0_late_init(void *handle)
5278{
5279	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5280	int r;
5281
5282	r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
5283	if (r)
5284		return r;
5285
5286	r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
5287	if (r)
5288		return r;
5289
5290	/* requires IBs so do in late init after IB pool is initialized */
5291	r = gfx_v8_0_do_edc_gpr_workarounds(adev);
5292	if (r)
5293		return r;
5294
5295	r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
5296	if (r) {
5297		DRM_ERROR("amdgpu_irq_get() failed to get IRQ for EDC, r: %d.\n", r);
5298		return r;
5299	}
5300
5301	r = amdgpu_irq_get(adev, &adev->gfx.sq_irq, 0);
5302	if (r) {
5303		DRM_ERROR(
5304			"amdgpu_irq_get() failed to get IRQ for SQ, r: %d.\n",
5305			r);
5306		return r;
5307	}
5308
5309	return 0;
5310}
5311
5312static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
5313						       bool enable)
5314{
5315	if ((adev->asic_type == CHIP_POLARIS11) ||
5316	    (adev->asic_type == CHIP_POLARIS12) ||
5317	    (adev->asic_type == CHIP_VEGAM))
5318		/* Send msg to SMU via Powerplay */
5319		amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, enable);
5320
5321	WREG32_FIELD(RLC_PG_CNTL, STATIC_PER_CU_PG_ENABLE, enable ? 1 : 0);
5322}
5323
5324static void gfx_v8_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev,
5325							bool enable)
5326{
5327	WREG32_FIELD(RLC_PG_CNTL, DYN_PER_CU_PG_ENABLE, enable ? 1 : 0);
5328}
5329
5330static void polaris11_enable_gfx_quick_mg_power_gating(struct amdgpu_device *adev,
5331		bool enable)
5332{
5333	WREG32_FIELD(RLC_PG_CNTL, QUICK_PG_ENABLE, enable ? 1 : 0);
5334}
5335
5336static void cz_enable_gfx_cg_power_gating(struct amdgpu_device *adev,
5337					  bool enable)
5338{
5339	WREG32_FIELD(RLC_PG_CNTL, GFX_POWER_GATING_ENABLE, enable ? 1 : 0);
5340}
5341
5342static void cz_enable_gfx_pipeline_power_gating(struct amdgpu_device *adev,
5343						bool enable)
5344{
5345	WREG32_FIELD(RLC_PG_CNTL, GFX_PIPELINE_PG_ENABLE, enable ? 1 : 0);
5346
5347	/* Read any GFX register to wake up GFX. */
5348	if (!enable)
5349		RREG32(mmDB_RENDER_CONTROL);
5350}
5351
5352static void cz_update_gfx_cg_power_gating(struct amdgpu_device *adev,
5353					  bool enable)
5354{
5355	if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
5356		cz_enable_gfx_cg_power_gating(adev, true);
5357		if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
5358			cz_enable_gfx_pipeline_power_gating(adev, true);
5359	} else {
5360		cz_enable_gfx_cg_power_gating(adev, false);
5361		cz_enable_gfx_pipeline_power_gating(adev, false);
5362	}
5363}
5364
5365static int gfx_v8_0_set_powergating_state(void *handle,
5366					  enum amd_powergating_state state)
5367{
5368	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5369	bool enable = (state == AMD_PG_STATE_GATE);
5370
5371	if (amdgpu_sriov_vf(adev))
5372		return 0;
5373
5374	if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG |
5375				AMD_PG_SUPPORT_RLC_SMU_HS |
5376				AMD_PG_SUPPORT_CP |
5377				AMD_PG_SUPPORT_GFX_DMG))
5378		amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
5379	switch (adev->asic_type) {
5380	case CHIP_CARRIZO:
5381	case CHIP_STONEY:
5382
5383		if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
5384			cz_enable_sck_slow_down_on_power_up(adev, true);
5385			cz_enable_sck_slow_down_on_power_down(adev, true);
5386		} else {
5387			cz_enable_sck_slow_down_on_power_up(adev, false);
5388			cz_enable_sck_slow_down_on_power_down(adev, false);
5389		}
5390		if (adev->pg_flags & AMD_PG_SUPPORT_CP)
5391			cz_enable_cp_power_gating(adev, true);
5392		else
5393			cz_enable_cp_power_gating(adev, false);
5394
5395		cz_update_gfx_cg_power_gating(adev, enable);
5396
5397		if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
5398			gfx_v8_0_enable_gfx_static_mg_power_gating(adev, true);
5399		else
5400			gfx_v8_0_enable_gfx_static_mg_power_gating(adev, false);
5401
5402		if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable)
5403			gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, true);
5404		else
5405			gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, false);
5406		break;
5407	case CHIP_POLARIS11:
5408	case CHIP_POLARIS12:
5409	case CHIP_VEGAM:
5410		if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
5411			gfx_v8_0_enable_gfx_static_mg_power_gating(adev, true);
5412		else
5413			gfx_v8_0_enable_gfx_static_mg_power_gating(adev, false);
5414
5415		if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable)
5416			gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, true);
5417		else
5418			gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, false);
5419
5420		if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_QUICK_MG) && enable)
5421			polaris11_enable_gfx_quick_mg_power_gating(adev, true);
5422		else
5423			polaris11_enable_gfx_quick_mg_power_gating(adev, false);
5424		break;
5425	default:
5426		break;
5427	}
5428	if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG |
5429				AMD_PG_SUPPORT_RLC_SMU_HS |
5430				AMD_PG_SUPPORT_CP |
5431				AMD_PG_SUPPORT_GFX_DMG))
5432		amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
5433	return 0;
5434}
5435
5436static void gfx_v8_0_get_clockgating_state(void *handle, u64 *flags)
5437{
5438	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5439	int data;
5440
5441	if (amdgpu_sriov_vf(adev))
5442		*flags = 0;
5443
5444	/* AMD_CG_SUPPORT_GFX_MGCG */
5445	data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
5446	if (!(data & RLC_CGTT_MGCG_OVERRIDE__CPF_MASK))
5447		*flags |= AMD_CG_SUPPORT_GFX_MGCG;
5448
5449	/* AMD_CG_SUPPORT_GFX_CGLG */
5450	data = RREG32(mmRLC_CGCG_CGLS_CTRL);
5451	if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
5452		*flags |= AMD_CG_SUPPORT_GFX_CGCG;
5453
5454	/* AMD_CG_SUPPORT_GFX_CGLS */
5455	if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
5456		*flags |= AMD_CG_SUPPORT_GFX_CGLS;
5457
5458	/* AMD_CG_SUPPORT_GFX_CGTS */
5459	data = RREG32(mmCGTS_SM_CTRL_REG);
5460	if (!(data & CGTS_SM_CTRL_REG__OVERRIDE_MASK))
5461		*flags |= AMD_CG_SUPPORT_GFX_CGTS;
5462
5463	/* AMD_CG_SUPPORT_GFX_CGTS_LS */
5464	if (!(data & CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK))
5465		*flags |= AMD_CG_SUPPORT_GFX_CGTS_LS;
5466
5467	/* AMD_CG_SUPPORT_GFX_RLC_LS */
5468	data = RREG32(mmRLC_MEM_SLP_CNTL);
5469	if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
5470		*flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
5471
5472	/* AMD_CG_SUPPORT_GFX_CP_LS */
5473	data = RREG32(mmCP_MEM_SLP_CNTL);
5474	if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
5475		*flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
5476}
5477
5478static void gfx_v8_0_send_serdes_cmd(struct amdgpu_device *adev,
5479				     uint32_t reg_addr, uint32_t cmd)
5480{
5481	uint32_t data;
5482
5483	gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
5484
5485	WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
5486	WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
5487
5488	data = RREG32(mmRLC_SERDES_WR_CTRL);
5489	if (adev->asic_type == CHIP_STONEY)
5490		data &= ~(RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK |
5491			  RLC_SERDES_WR_CTRL__READ_COMMAND_MASK |
5492			  RLC_SERDES_WR_CTRL__P1_SELECT_MASK |
5493			  RLC_SERDES_WR_CTRL__P2_SELECT_MASK |
5494			  RLC_SERDES_WR_CTRL__RDDATA_RESET_MASK |
5495			  RLC_SERDES_WR_CTRL__POWER_DOWN_MASK |
5496			  RLC_SERDES_WR_CTRL__POWER_UP_MASK |
5497			  RLC_SERDES_WR_CTRL__SHORT_FORMAT_MASK |
5498			  RLC_SERDES_WR_CTRL__SRBM_OVERRIDE_MASK);
5499	else
5500		data &= ~(RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK |
5501			  RLC_SERDES_WR_CTRL__READ_COMMAND_MASK |
5502			  RLC_SERDES_WR_CTRL__P1_SELECT_MASK |
5503			  RLC_SERDES_WR_CTRL__P2_SELECT_MASK |
5504			  RLC_SERDES_WR_CTRL__RDDATA_RESET_MASK |
5505			  RLC_SERDES_WR_CTRL__POWER_DOWN_MASK |
5506			  RLC_SERDES_WR_CTRL__POWER_UP_MASK |
5507			  RLC_SERDES_WR_CTRL__SHORT_FORMAT_MASK |
5508			  RLC_SERDES_WR_CTRL__BPM_DATA_MASK |
5509			  RLC_SERDES_WR_CTRL__REG_ADDR_MASK |
5510			  RLC_SERDES_WR_CTRL__SRBM_OVERRIDE_MASK);
5511	data |= (RLC_SERDES_WR_CTRL__RSVD_BPM_ADDR_MASK |
5512		 (cmd << RLC_SERDES_WR_CTRL__BPM_DATA__SHIFT) |
5513		 (reg_addr << RLC_SERDES_WR_CTRL__REG_ADDR__SHIFT) |
5514		 (0xff << RLC_SERDES_WR_CTRL__BPM_ADDR__SHIFT));
5515
5516	WREG32(mmRLC_SERDES_WR_CTRL, data);
5517}
5518
5519#define MSG_ENTER_RLC_SAFE_MODE     1
5520#define MSG_EXIT_RLC_SAFE_MODE      0
5521#define RLC_GPR_REG2__REQ_MASK 0x00000001
5522#define RLC_GPR_REG2__REQ__SHIFT 0
5523#define RLC_GPR_REG2__MESSAGE__SHIFT 0x00000001
5524#define RLC_GPR_REG2__MESSAGE_MASK 0x0000001e
5525
5526static bool gfx_v8_0_is_rlc_enabled(struct amdgpu_device *adev)
5527{
5528	uint32_t rlc_setting;
5529
5530	rlc_setting = RREG32(mmRLC_CNTL);
5531	if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
5532		return false;
5533
5534	return true;
5535}
5536
5537static void gfx_v8_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id)
5538{
5539	uint32_t data;
5540	unsigned i;
5541	data = RREG32(mmRLC_CNTL);
5542	data |= RLC_SAFE_MODE__CMD_MASK;
5543	data &= ~RLC_SAFE_MODE__MESSAGE_MASK;
5544	data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
5545	WREG32(mmRLC_SAFE_MODE, data);
5546
5547	/* wait for RLC_SAFE_MODE */
5548	for (i = 0; i < adev->usec_timeout; i++) {
5549		if ((RREG32(mmRLC_GPM_STAT) &
5550		     (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
5551		      RLC_GPM_STAT__GFX_POWER_STATUS_MASK)) ==
5552		    (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
5553		     RLC_GPM_STAT__GFX_POWER_STATUS_MASK))
5554			break;
5555		udelay(1);
5556	}
5557	for (i = 0; i < adev->usec_timeout; i++) {
5558		if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
5559			break;
5560		udelay(1);
5561	}
5562}
5563
5564static void gfx_v8_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id)
5565{
5566	uint32_t data;
5567	unsigned i;
5568
5569	data = RREG32(mmRLC_CNTL);
5570	data |= RLC_SAFE_MODE__CMD_MASK;
5571	data &= ~RLC_SAFE_MODE__MESSAGE_MASK;
5572	WREG32(mmRLC_SAFE_MODE, data);
5573
5574	for (i = 0; i < adev->usec_timeout; i++) {
5575		if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
5576			break;
5577		udelay(1);
5578	}
5579}
5580
5581static void gfx_v8_0_update_spm_vmid(struct amdgpu_device *adev, struct amdgpu_ring *ring, unsigned vmid)
5582{
5583	u32 data;
5584
5585	amdgpu_gfx_off_ctrl(adev, false);
5586
5587	if (amdgpu_sriov_is_pp_one_vf(adev))
5588		data = RREG32_NO_KIQ(mmRLC_SPM_VMID);
5589	else
5590		data = RREG32(mmRLC_SPM_VMID);
5591
5592	data &= ~RLC_SPM_VMID__RLC_SPM_VMID_MASK;
5593	data |= (vmid & RLC_SPM_VMID__RLC_SPM_VMID_MASK) << RLC_SPM_VMID__RLC_SPM_VMID__SHIFT;
5594
5595	if (amdgpu_sriov_is_pp_one_vf(adev))
5596		WREG32_NO_KIQ(mmRLC_SPM_VMID, data);
5597	else
5598		WREG32(mmRLC_SPM_VMID, data);
5599
5600	amdgpu_gfx_off_ctrl(adev, true);
5601}
5602
5603static const struct amdgpu_rlc_funcs iceland_rlc_funcs = {
5604	.is_rlc_enabled = gfx_v8_0_is_rlc_enabled,
5605	.set_safe_mode = gfx_v8_0_set_safe_mode,
5606	.unset_safe_mode = gfx_v8_0_unset_safe_mode,
5607	.init = gfx_v8_0_rlc_init,
5608	.get_csb_size = gfx_v8_0_get_csb_size,
5609	.get_csb_buffer = gfx_v8_0_get_csb_buffer,
5610	.get_cp_table_num = gfx_v8_0_cp_jump_table_num,
5611	.resume = gfx_v8_0_rlc_resume,
5612	.stop = gfx_v8_0_rlc_stop,
5613	.reset = gfx_v8_0_rlc_reset,
5614	.start = gfx_v8_0_rlc_start,
5615	.update_spm_vmid = gfx_v8_0_update_spm_vmid
5616};
5617
5618static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
5619						      bool enable)
5620{
5621	uint32_t temp, data;
5622
5623	amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
5624
5625	/* It is disabled by HW by default */
5626	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
5627		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
5628			if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS)
5629				/* 1 - RLC memory Light sleep */
5630				WREG32_FIELD(RLC_MEM_SLP_CNTL, RLC_MEM_LS_EN, 1);
 
5631
5632			if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS)
5633				WREG32_FIELD(CP_MEM_SLP_CNTL, CP_MEM_LS_EN, 1);
5634		}
 
 
5635
5636		/* 3 - RLC_CGTT_MGCG_OVERRIDE */
5637		temp = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
5638		if (adev->flags & AMD_IS_APU)
5639			data &= ~(RLC_CGTT_MGCG_OVERRIDE__CPF_MASK |
5640				  RLC_CGTT_MGCG_OVERRIDE__RLC_MASK |
5641				  RLC_CGTT_MGCG_OVERRIDE__MGCG_MASK);
5642		else
5643			data &= ~(RLC_CGTT_MGCG_OVERRIDE__CPF_MASK |
5644				  RLC_CGTT_MGCG_OVERRIDE__RLC_MASK |
5645				  RLC_CGTT_MGCG_OVERRIDE__MGCG_MASK |
5646				  RLC_CGTT_MGCG_OVERRIDE__GRBM_MASK);
5647
5648		if (temp != data)
5649			WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data);
5650
5651		/* 4 - wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
5652		gfx_v8_0_wait_for_rlc_serdes(adev);
5653
5654		/* 5 - clear mgcg override */
5655		gfx_v8_0_send_serdes_cmd(adev, BPM_REG_MGCG_OVERRIDE, CLE_BPM_SERDES_CMD);
5656
5657		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS) {
5658			/* 6 - Enable CGTS(Tree Shade) MGCG /MGLS */
5659			temp = data = RREG32(mmCGTS_SM_CTRL_REG);
5660			data &= ~(CGTS_SM_CTRL_REG__SM_MODE_MASK);
5661			data |= (0x2 << CGTS_SM_CTRL_REG__SM_MODE__SHIFT);
5662			data |= CGTS_SM_CTRL_REG__SM_MODE_ENABLE_MASK;
5663			data &= ~CGTS_SM_CTRL_REG__OVERRIDE_MASK;
5664			if ((adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) &&
5665			    (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS_LS))
5666				data &= ~CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK;
5667			data |= CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN_MASK;
5668			data |= (0x96 << CGTS_SM_CTRL_REG__ON_MONITOR_ADD__SHIFT);
5669			if (temp != data)
5670				WREG32(mmCGTS_SM_CTRL_REG, data);
5671		}
5672		udelay(50);
5673
5674		/* 7 - wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
5675		gfx_v8_0_wait_for_rlc_serdes(adev);
5676	} else {
5677		/* 1 - MGCG_OVERRIDE[0] for CP and MGCG_OVERRIDE[1] for RLC */
5678		temp = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
5679		data |= (RLC_CGTT_MGCG_OVERRIDE__CPF_MASK |
5680				RLC_CGTT_MGCG_OVERRIDE__RLC_MASK |
5681				RLC_CGTT_MGCG_OVERRIDE__MGCG_MASK |
5682				RLC_CGTT_MGCG_OVERRIDE__GRBM_MASK);
5683		if (temp != data)
5684			WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data);
5685
5686		/* 2 - disable MGLS in RLC */
5687		data = RREG32(mmRLC_MEM_SLP_CNTL);
5688		if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
5689			data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
5690			WREG32(mmRLC_MEM_SLP_CNTL, data);
5691		}
5692
5693		/* 3 - disable MGLS in CP */
5694		data = RREG32(mmCP_MEM_SLP_CNTL);
5695		if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
5696			data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
5697			WREG32(mmCP_MEM_SLP_CNTL, data);
5698		}
5699
5700		/* 4 - Disable CGTS(Tree Shade) MGCG and MGLS */
5701		temp = data = RREG32(mmCGTS_SM_CTRL_REG);
5702		data |= (CGTS_SM_CTRL_REG__OVERRIDE_MASK |
5703				CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK);
5704		if (temp != data)
5705			WREG32(mmCGTS_SM_CTRL_REG, data);
5706
5707		/* 5 - wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
5708		gfx_v8_0_wait_for_rlc_serdes(adev);
5709
5710		/* 6 - set mgcg override */
5711		gfx_v8_0_send_serdes_cmd(adev, BPM_REG_MGCG_OVERRIDE, SET_BPM_SERDES_CMD);
5712
5713		udelay(50);
5714
5715		/* 7- wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
5716		gfx_v8_0_wait_for_rlc_serdes(adev);
5717	}
5718
5719	amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
5720}
5721
5722static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
5723						      bool enable)
5724{
5725	uint32_t temp, temp1, data, data1;
5726
5727	temp = data = RREG32(mmRLC_CGCG_CGLS_CTRL);
5728
5729	amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
 
 
 
 
5730
5731	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
5732		temp1 = data1 =	RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
5733		data1 &= ~RLC_CGTT_MGCG_OVERRIDE__CGCG_MASK;
5734		if (temp1 != data1)
5735			WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data1);
5736
5737		/* : wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
5738		gfx_v8_0_wait_for_rlc_serdes(adev);
5739
5740		/* 2 - clear cgcg override */
5741		gfx_v8_0_send_serdes_cmd(adev, BPM_REG_CGCG_OVERRIDE, CLE_BPM_SERDES_CMD);
5742
5743		/* wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
5744		gfx_v8_0_wait_for_rlc_serdes(adev);
5745
5746		/* 3 - write cmd to set CGLS */
5747		gfx_v8_0_send_serdes_cmd(adev, BPM_REG_CGLS_EN, SET_BPM_SERDES_CMD);
5748
5749		/* 4 - enable cgcg */
5750		data |= RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
5751
5752		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
5753			/* enable cgls*/
5754			data |= RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
5755
5756			temp1 = data1 =	RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
5757			data1 &= ~RLC_CGTT_MGCG_OVERRIDE__CGLS_MASK;
5758
5759			if (temp1 != data1)
5760				WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data1);
5761		} else {
5762			data &= ~RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
5763		}
5764
5765		if (temp != data)
5766			WREG32(mmRLC_CGCG_CGLS_CTRL, data);
5767
5768		/* 5 enable cntx_empty_int_enable/cntx_busy_int_enable/
5769		 * Cmp_busy/GFX_Idle interrupts
5770		 */
5771		gfx_v8_0_enable_gui_idle_interrupt(adev, true);
5772	} else {
5773		/* disable cntx_empty_int_enable & GFX Idle interrupt */
5774		gfx_v8_0_enable_gui_idle_interrupt(adev, false);
5775
5776		/* TEST CGCG */
5777		temp1 = data1 =	RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
5778		data1 |= (RLC_CGTT_MGCG_OVERRIDE__CGCG_MASK |
5779				RLC_CGTT_MGCG_OVERRIDE__CGLS_MASK);
5780		if (temp1 != data1)
5781			WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data1);
5782
5783		/* read gfx register to wake up cgcg */
5784		RREG32(mmCB_CGTT_SCLK_CTRL);
5785		RREG32(mmCB_CGTT_SCLK_CTRL);
5786		RREG32(mmCB_CGTT_SCLK_CTRL);
5787		RREG32(mmCB_CGTT_SCLK_CTRL);
5788
5789		/* wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
5790		gfx_v8_0_wait_for_rlc_serdes(adev);
5791
5792		/* write cmd to Set CGCG Override */
5793		gfx_v8_0_send_serdes_cmd(adev, BPM_REG_CGCG_OVERRIDE, SET_BPM_SERDES_CMD);
5794
5795		/* wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
5796		gfx_v8_0_wait_for_rlc_serdes(adev);
5797
5798		/* write cmd to Clear CGLS */
5799		gfx_v8_0_send_serdes_cmd(adev, BPM_REG_CGLS_EN, CLE_BPM_SERDES_CMD);
5800
5801		/* disable cgcg, cgls should be disabled too. */
5802		data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK |
5803			  RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
5804		if (temp != data)
5805			WREG32(mmRLC_CGCG_CGLS_CTRL, data);
5806		/* enable interrupts again for PG */
5807		gfx_v8_0_enable_gui_idle_interrupt(adev, true);
5808	}
5809
5810	gfx_v8_0_wait_for_rlc_serdes(adev);
5811
5812	amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
5813}
5814static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev,
5815					    bool enable)
5816{
5817	if (enable) {
5818		/* CGCG/CGLS should be enabled after MGCG/MGLS/TS(CG/LS)
5819		 * ===  MGCG + MGLS + TS(CG/LS) ===
5820		 */
5821		gfx_v8_0_update_medium_grain_clock_gating(adev, enable);
5822		gfx_v8_0_update_coarse_grain_clock_gating(adev, enable);
5823	} else {
5824		/* CGCG/CGLS should be disabled before MGCG/MGLS/TS(CG/LS)
5825		 * ===  CGCG + CGLS ===
5826		 */
5827		gfx_v8_0_update_coarse_grain_clock_gating(adev, enable);
5828		gfx_v8_0_update_medium_grain_clock_gating(adev, enable);
5829	}
5830	return 0;
5831}
5832
5833static int gfx_v8_0_tonga_update_gfx_clock_gating(struct amdgpu_device *adev,
5834					  enum amd_clockgating_state state)
5835{
5836	uint32_t msg_id, pp_state = 0;
5837	uint32_t pp_support_state = 0;
5838
5839	if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_CGLS)) {
5840		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
5841			pp_support_state = PP_STATE_SUPPORT_LS;
5842			pp_state = PP_STATE_LS;
5843		}
5844		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) {
5845			pp_support_state |= PP_STATE_SUPPORT_CG;
5846			pp_state |= PP_STATE_CG;
5847		}
5848		if (state == AMD_CG_STATE_UNGATE)
5849			pp_state = 0;
5850
5851		msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
5852				PP_BLOCK_GFX_CG,
5853				pp_support_state,
5854				pp_state);
5855		amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
5856	}
5857
5858	if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)) {
5859		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
5860			pp_support_state = PP_STATE_SUPPORT_LS;
5861			pp_state = PP_STATE_LS;
5862		}
5863
5864		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) {
5865			pp_support_state |= PP_STATE_SUPPORT_CG;
5866			pp_state |= PP_STATE_CG;
5867		}
5868
5869		if (state == AMD_CG_STATE_UNGATE)
5870			pp_state = 0;
5871
5872		msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
5873				PP_BLOCK_GFX_MG,
5874				pp_support_state,
5875				pp_state);
5876		amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
5877	}
5878
5879	return 0;
5880}
5881
5882static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
5883					  enum amd_clockgating_state state)
5884{
5885
5886	uint32_t msg_id, pp_state = 0;
5887	uint32_t pp_support_state = 0;
5888
5889	if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_CGLS)) {
5890		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
5891			pp_support_state = PP_STATE_SUPPORT_LS;
5892			pp_state = PP_STATE_LS;
5893		}
5894		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) {
5895			pp_support_state |= PP_STATE_SUPPORT_CG;
5896			pp_state |= PP_STATE_CG;
5897		}
5898		if (state == AMD_CG_STATE_UNGATE)
5899			pp_state = 0;
5900
5901		msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
5902				PP_BLOCK_GFX_CG,
5903				pp_support_state,
5904				pp_state);
5905		amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
5906	}
5907
5908	if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_3D_CGCG | AMD_CG_SUPPORT_GFX_3D_CGLS)) {
5909		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) {
5910			pp_support_state = PP_STATE_SUPPORT_LS;
5911			pp_state = PP_STATE_LS;
5912		}
5913		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG) {
5914			pp_support_state |= PP_STATE_SUPPORT_CG;
5915			pp_state |= PP_STATE_CG;
5916		}
5917		if (state == AMD_CG_STATE_UNGATE)
5918			pp_state = 0;
5919
5920		msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
5921				PP_BLOCK_GFX_3D,
5922				pp_support_state,
5923				pp_state);
5924		amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
5925	}
5926
5927	if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)) {
5928		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
5929			pp_support_state = PP_STATE_SUPPORT_LS;
5930			pp_state = PP_STATE_LS;
5931		}
5932
5933		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) {
5934			pp_support_state |= PP_STATE_SUPPORT_CG;
5935			pp_state |= PP_STATE_CG;
5936		}
5937
5938		if (state == AMD_CG_STATE_UNGATE)
5939			pp_state = 0;
5940
5941		msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
5942				PP_BLOCK_GFX_MG,
5943				pp_support_state,
5944				pp_state);
5945		amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
5946	}
5947
5948	if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
5949		pp_support_state = PP_STATE_SUPPORT_LS;
5950
5951		if (state == AMD_CG_STATE_UNGATE)
5952			pp_state = 0;
5953		else
5954			pp_state = PP_STATE_LS;
5955
5956		msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
5957				PP_BLOCK_GFX_RLC,
5958				pp_support_state,
5959				pp_state);
5960		amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
5961	}
5962
5963	if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
5964		pp_support_state = PP_STATE_SUPPORT_LS;
5965
5966		if (state == AMD_CG_STATE_UNGATE)
5967			pp_state = 0;
5968		else
5969			pp_state = PP_STATE_LS;
5970		msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
5971			PP_BLOCK_GFX_CP,
5972			pp_support_state,
5973			pp_state);
5974		amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
5975	}
5976
5977	return 0;
5978}
5979
5980static int gfx_v8_0_set_clockgating_state(void *handle,
5981					  enum amd_clockgating_state state)
5982{
5983	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5984
5985	if (amdgpu_sriov_vf(adev))
5986		return 0;
5987
5988	switch (adev->asic_type) {
5989	case CHIP_FIJI:
5990	case CHIP_CARRIZO:
5991	case CHIP_STONEY:
5992		gfx_v8_0_update_gfx_clock_gating(adev,
5993						 state == AMD_CG_STATE_GATE);
5994		break;
5995	case CHIP_TONGA:
5996		gfx_v8_0_tonga_update_gfx_clock_gating(adev, state);
5997		break;
5998	case CHIP_POLARIS10:
5999	case CHIP_POLARIS11:
6000	case CHIP_POLARIS12:
6001	case CHIP_VEGAM:
6002		gfx_v8_0_polaris_update_gfx_clock_gating(adev, state);
6003		break;
6004	default:
6005		break;
6006	}
6007	return 0;
6008}
6009
6010static u64 gfx_v8_0_ring_get_rptr(struct amdgpu_ring *ring)
6011{
6012	return *ring->rptr_cpu_addr;
 
 
 
 
6013}
6014
6015static u64 gfx_v8_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
6016{
6017	struct amdgpu_device *adev = ring->adev;
 
6018
6019	if (ring->use_doorbell)
6020		/* XXX check if swapping is necessary on BE */
6021		return *ring->wptr_cpu_addr;
6022	else
6023		return RREG32(mmCP_RB0_WPTR);
 
 
6024}
6025
6026static void gfx_v8_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
6027{
6028	struct amdgpu_device *adev = ring->adev;
6029
6030	if (ring->use_doorbell) {
6031		/* XXX check if swapping is necessary on BE */
6032		*ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
6033		WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
6034	} else {
6035		WREG32(mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
6036		(void)RREG32(mmCP_RB0_WPTR);
6037	}
6038}
6039
6040static void gfx_v8_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
6041{
6042	u32 ref_and_mask, reg_mem_engine;
6043
6044	if ((ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) ||
6045	    (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)) {
6046		switch (ring->me) {
6047		case 1:
6048			ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe;
6049			break;
6050		case 2:
6051			ref_and_mask = GPU_HDP_FLUSH_DONE__CP6_MASK << ring->pipe;
6052			break;
6053		default:
6054			return;
6055		}
6056		reg_mem_engine = 0;
6057	} else {
6058		ref_and_mask = GPU_HDP_FLUSH_DONE__CP0_MASK;
6059		reg_mem_engine = WAIT_REG_MEM_ENGINE(1); /* pfp */
6060	}
6061
6062	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
6063	amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(1) | /* write, wait, write */
6064				 WAIT_REG_MEM_FUNCTION(3) |  /* == */
6065				 reg_mem_engine));
6066	amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ);
6067	amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE);
6068	amdgpu_ring_write(ring, ref_and_mask);
6069	amdgpu_ring_write(ring, ref_and_mask);
6070	amdgpu_ring_write(ring, 0x20); /* poll interval */
6071}
6072
6073static void gfx_v8_0_ring_emit_vgt_flush(struct amdgpu_ring *ring)
6074{
6075	amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
6076	amdgpu_ring_write(ring, EVENT_TYPE(VS_PARTIAL_FLUSH) |
6077		EVENT_INDEX(4));
6078
6079	amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
6080	amdgpu_ring_write(ring, EVENT_TYPE(VGT_FLUSH) |
6081		EVENT_INDEX(0));
 
6082}
6083
6084static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
6085					struct amdgpu_job *job,
6086					struct amdgpu_ib *ib,
6087					uint32_t flags)
6088{
6089	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
6090	u32 header, control = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6091
6092	if (ib->flags & AMDGPU_IB_FLAG_CE)
6093		header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
6094	else
6095		header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
6096
6097	control |= ib->length_dw | (vmid << 24);
6098
6099	if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
6100		control |= INDIRECT_BUFFER_PRE_ENB(1);
6101
6102		if (!(ib->flags & AMDGPU_IB_FLAG_CE) && vmid)
6103			gfx_v8_0_ring_emit_de_meta(ring);
6104	}
6105
6106	amdgpu_ring_write(ring, header);
6107	amdgpu_ring_write(ring,
6108#ifdef __BIG_ENDIAN
6109			  (2 << 0) |
6110#endif
6111			  (ib->gpu_addr & 0xFFFFFFFC));
6112	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
6113	amdgpu_ring_write(ring, control);
6114}
6115
6116static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
6117					  struct amdgpu_job *job,
6118					  struct amdgpu_ib *ib,
6119					  uint32_t flags)
6120{
6121	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
6122	u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
6123
6124	/* Currently, there is a high possibility to get wave ID mismatch
6125	 * between ME and GDS, leading to a hw deadlock, because ME generates
6126	 * different wave IDs than the GDS expects. This situation happens
6127	 * randomly when at least 5 compute pipes use GDS ordered append.
6128	 * The wave IDs generated by ME are also wrong after suspend/resume.
6129	 * Those are probably bugs somewhere else in the kernel driver.
6130	 *
6131	 * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
6132	 * GDS to 0 for this ring (me/pipe).
6133	 */
6134	if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
6135		amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
6136		amdgpu_ring_write(ring, mmGDS_COMPUTE_MAX_WAVE_ID - PACKET3_SET_CONFIG_REG_START);
6137		amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
6138	}
6139
6140	amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
 
 
 
 
6141	amdgpu_ring_write(ring,
6142#ifdef __BIG_ENDIAN
6143				(2 << 0) |
6144#endif
6145				(ib->gpu_addr & 0xFFFFFFFC));
6146	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
6147	amdgpu_ring_write(ring, control);
6148}
6149
6150static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
6151					 u64 seq, unsigned flags)
6152{
6153	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
6154	bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
6155
6156	/* Workaround for cache flush problems. First send a dummy EOP
6157	 * event down the pipe with seq one below.
6158	 */
6159	amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
6160	amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
6161				 EOP_TC_ACTION_EN |
6162				 EOP_TC_WB_ACTION_EN |
6163				 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
6164				 EVENT_INDEX(5)));
6165	amdgpu_ring_write(ring, addr & 0xfffffffc);
6166	amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
6167				DATA_SEL(1) | INT_SEL(0));
6168	amdgpu_ring_write(ring, lower_32_bits(seq - 1));
6169	amdgpu_ring_write(ring, upper_32_bits(seq - 1));
6170
6171	/* Then send the real EOP event down the pipe:
6172	 * EVENT_WRITE_EOP - flush caches, send int */
6173	amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
6174	amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
6175				 EOP_TC_ACTION_EN |
6176				 EOP_TC_WB_ACTION_EN |
6177				 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
6178				 EVENT_INDEX(5)));
6179	amdgpu_ring_write(ring, addr & 0xfffffffc);
6180	amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
6181			  DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
6182	amdgpu_ring_write(ring, lower_32_bits(seq));
6183	amdgpu_ring_write(ring, upper_32_bits(seq));
6184
6185}
6186
6187static void gfx_v8_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
6188{
6189	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
6190	uint32_t seq = ring->fence_drv.sync_seq;
6191	uint64_t addr = ring->fence_drv.gpu_addr;
6192
6193	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
6194	amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
6195				 WAIT_REG_MEM_FUNCTION(3) | /* equal */
6196				 WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */
6197	amdgpu_ring_write(ring, addr & 0xfffffffc);
6198	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
6199	amdgpu_ring_write(ring, seq);
6200	amdgpu_ring_write(ring, 0xffffffff);
6201	amdgpu_ring_write(ring, 4); /* poll interval */
 
 
 
 
 
 
 
 
6202}
6203
6204static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
6205					unsigned vmid, uint64_t pd_addr)
6206{
6207	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
6208
6209	amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6210
6211	/* wait for the invalidate to complete */
6212	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
6213	amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */
6214				 WAIT_REG_MEM_FUNCTION(0) |  /* always */
6215				 WAIT_REG_MEM_ENGINE(0))); /* me */
6216	amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
6217	amdgpu_ring_write(ring, 0);
6218	amdgpu_ring_write(ring, 0); /* ref */
6219	amdgpu_ring_write(ring, 0); /* mask */
6220	amdgpu_ring_write(ring, 0x20); /* poll interval */
6221
6222	/* compute doesn't have PFP */
6223	if (usepfp) {
6224		/* sync PFP to ME, otherwise we might get invalid PFP reads */
6225		amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
6226		amdgpu_ring_write(ring, 0x0);
 
 
 
 
6227	}
6228}
6229
6230static u64 gfx_v8_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
 
 
 
 
 
6231{
6232	return *ring->wptr_cpu_addr;
6233}
6234
6235static void gfx_v8_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
6236{
6237	struct amdgpu_device *adev = ring->adev;
6238
6239	/* XXX check if swapping is necessary on BE */
6240	*ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
6241	WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
6242}
6243
6244static void gfx_v8_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
6245					     u64 addr, u64 seq,
6246					     unsigned flags)
6247{
6248	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
6249	bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
6250
6251	/* RELEASE_MEM - flush caches, send int */
6252	amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 5));
6253	amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
6254				 EOP_TC_ACTION_EN |
6255				 EOP_TC_WB_ACTION_EN |
6256				 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
6257				 EVENT_INDEX(5)));
6258	amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
6259	amdgpu_ring_write(ring, addr & 0xfffffffc);
6260	amdgpu_ring_write(ring, upper_32_bits(addr));
6261	amdgpu_ring_write(ring, lower_32_bits(seq));
6262	amdgpu_ring_write(ring, upper_32_bits(seq));
6263}
6264
6265static void gfx_v8_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
6266					 u64 seq, unsigned int flags)
6267{
6268	/* we only allocate 32bit for each seq wb address */
6269	BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
6270
6271	/* write fence seq to the "addr" */
6272	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
6273	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
6274				 WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
6275	amdgpu_ring_write(ring, lower_32_bits(addr));
6276	amdgpu_ring_write(ring, upper_32_bits(addr));
6277	amdgpu_ring_write(ring, lower_32_bits(seq));
6278
6279	if (flags & AMDGPU_FENCE_FLAG_INT) {
6280		/* set register to trigger INT */
6281		amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
6282		amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
6283					 WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
6284		amdgpu_ring_write(ring, mmCPC_INT_STATUS);
6285		amdgpu_ring_write(ring, 0);
6286		amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
6287	}
6288}
6289
6290static void gfx_v8_ring_emit_sb(struct amdgpu_ring *ring)
6291{
6292	amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
6293	amdgpu_ring_write(ring, 0);
6294}
6295
6296static void gfx_v8_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
6297{
6298	uint32_t dw2 = 0;
6299
6300	if (amdgpu_sriov_vf(ring->adev))
6301		gfx_v8_0_ring_emit_ce_meta(ring);
6302
6303	dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
6304	if (flags & AMDGPU_HAVE_CTX_SWITCH) {
6305		gfx_v8_0_ring_emit_vgt_flush(ring);
6306		/* set load_global_config & load_global_uconfig */
6307		dw2 |= 0x8001;
6308		/* set load_cs_sh_regs */
6309		dw2 |= 0x01000000;
6310		/* set load_per_context_state & load_gfx_sh_regs for GFX */
6311		dw2 |= 0x10002;
6312
6313		/* set load_ce_ram if preamble presented */
6314		if (AMDGPU_PREAMBLE_IB_PRESENT & flags)
6315			dw2 |= 0x10000000;
6316	} else {
6317		/* still load_ce_ram if this is the first time preamble presented
6318		 * although there is no context switch happens.
6319		 */
6320		if (AMDGPU_PREAMBLE_IB_PRESENT_FIRST & flags)
6321			dw2 |= 0x10000000;
6322	}
6323
6324	amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
6325	amdgpu_ring_write(ring, dw2);
6326	amdgpu_ring_write(ring, 0);
6327}
6328
6329static unsigned gfx_v8_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring,
6330						  uint64_t addr)
6331{
6332	unsigned ret;
6333
6334	amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
6335	amdgpu_ring_write(ring, lower_32_bits(addr));
6336	amdgpu_ring_write(ring, upper_32_bits(addr));
6337	/* discard following DWs if *cond_exec_gpu_addr==0 */
6338	amdgpu_ring_write(ring, 0);
6339	ret = ring->wptr & ring->buf_mask;
6340	/* patch dummy value later */
6341	amdgpu_ring_write(ring, 0);
6342	return ret;
6343}
6344
6345static void gfx_v8_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
6346				    uint32_t reg_val_offs)
6347{
6348	struct amdgpu_device *adev = ring->adev;
6349
6350	amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
6351	amdgpu_ring_write(ring, 0 |	/* src: register*/
6352				(5 << 8) |	/* dst: memory */
6353				(1 << 20));	/* write confirm */
6354	amdgpu_ring_write(ring, reg);
6355	amdgpu_ring_write(ring, 0);
6356	amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
6357				reg_val_offs * 4));
6358	amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
6359				reg_val_offs * 4));
6360}
6361
6362static void gfx_v8_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
6363				  uint32_t val)
6364{
6365	uint32_t cmd;
6366
6367	switch (ring->funcs->type) {
6368	case AMDGPU_RING_TYPE_GFX:
6369		cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
 
 
 
6370		break;
6371	case AMDGPU_RING_TYPE_KIQ:
6372		cmd = 1 << 16; /* no inc addr */
 
 
 
 
6373		break;
6374	default:
6375		cmd = WR_CONFIRM;
6376		break;
6377	}
6378
6379	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
6380	amdgpu_ring_write(ring, cmd);
6381	amdgpu_ring_write(ring, reg);
6382	amdgpu_ring_write(ring, 0);
6383	amdgpu_ring_write(ring, val);
6384}
6385
6386static void gfx_v8_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid)
6387{
6388	struct amdgpu_device *adev = ring->adev;
6389	uint32_t value = 0;
6390
6391	value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
6392	value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
6393	value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
6394	value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
6395	WREG32(mmSQ_CMD, value);
6396}
6397
6398static void gfx_v8_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
6399						 enum amdgpu_interrupt_state state)
6400{
6401	WREG32_FIELD(CP_INT_CNTL_RING0, TIME_STAMP_INT_ENABLE,
6402		     state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
6403}
6404
6405static void gfx_v8_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
6406						     int me, int pipe,
6407						     enum amdgpu_interrupt_state state)
6408{
6409	u32 mec_int_cntl, mec_int_cntl_reg;
6410
6411	/*
6412	 * amdgpu controls only the first MEC. That's why this function only
6413	 * handles the setting of interrupts for this specific MEC. All other
6414	 * pipes' interrupts are set by amdkfd.
6415	 */
6416
6417	if (me == 1) {
6418		switch (pipe) {
6419		case 0:
6420			mec_int_cntl_reg = mmCP_ME1_PIPE0_INT_CNTL;
6421			break;
6422		case 1:
6423			mec_int_cntl_reg = mmCP_ME1_PIPE1_INT_CNTL;
6424			break;
6425		case 2:
6426			mec_int_cntl_reg = mmCP_ME1_PIPE2_INT_CNTL;
6427			break;
6428		case 3:
6429			mec_int_cntl_reg = mmCP_ME1_PIPE3_INT_CNTL;
6430			break;
6431		default:
6432			DRM_DEBUG("invalid pipe %d\n", pipe);
6433			return;
6434		}
6435	} else {
6436		DRM_DEBUG("invalid me %d\n", me);
6437		return;
6438	}
6439
6440	switch (state) {
6441	case AMDGPU_IRQ_STATE_DISABLE:
6442		mec_int_cntl = RREG32(mec_int_cntl_reg);
6443		mec_int_cntl &= ~CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
 
6444		WREG32(mec_int_cntl_reg, mec_int_cntl);
6445		break;
6446	case AMDGPU_IRQ_STATE_ENABLE:
6447		mec_int_cntl = RREG32(mec_int_cntl_reg);
6448		mec_int_cntl |= CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
 
6449		WREG32(mec_int_cntl_reg, mec_int_cntl);
6450		break;
6451	default:
6452		break;
6453	}
6454}
6455
6456static int gfx_v8_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
6457					     struct amdgpu_irq_src *source,
6458					     unsigned type,
6459					     enum amdgpu_interrupt_state state)
6460{
6461	WREG32_FIELD(CP_INT_CNTL_RING0, PRIV_REG_INT_ENABLE,
6462		     state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6463
6464	return 0;
6465}
6466
6467static int gfx_v8_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
6468					      struct amdgpu_irq_src *source,
6469					      unsigned type,
6470					      enum amdgpu_interrupt_state state)
6471{
6472	WREG32_FIELD(CP_INT_CNTL_RING0, PRIV_INSTR_INT_ENABLE,
6473		     state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6474
6475	return 0;
6476}
6477
6478static int gfx_v8_0_set_eop_interrupt_state(struct amdgpu_device *adev,
6479					    struct amdgpu_irq_src *src,
6480					    unsigned type,
6481					    enum amdgpu_interrupt_state state)
6482{
6483	switch (type) {
6484	case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP:
6485		gfx_v8_0_set_gfx_eop_interrupt_state(adev, state);
6486		break;
6487	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
6488		gfx_v8_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
6489		break;
6490	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
6491		gfx_v8_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
6492		break;
6493	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
6494		gfx_v8_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
6495		break;
6496	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
6497		gfx_v8_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
6498		break;
6499	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
6500		gfx_v8_0_set_compute_eop_interrupt_state(adev, 2, 0, state);
6501		break;
6502	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
6503		gfx_v8_0_set_compute_eop_interrupt_state(adev, 2, 1, state);
6504		break;
6505	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
6506		gfx_v8_0_set_compute_eop_interrupt_state(adev, 2, 2, state);
6507		break;
6508	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
6509		gfx_v8_0_set_compute_eop_interrupt_state(adev, 2, 3, state);
6510		break;
6511	default:
6512		break;
6513	}
6514	return 0;
6515}
6516
6517static int gfx_v8_0_set_cp_ecc_int_state(struct amdgpu_device *adev,
6518					 struct amdgpu_irq_src *source,
6519					 unsigned int type,
6520					 enum amdgpu_interrupt_state state)
6521{
6522	int enable_flag;
6523
6524	switch (state) {
6525	case AMDGPU_IRQ_STATE_DISABLE:
6526		enable_flag = 0;
6527		break;
6528
6529	case AMDGPU_IRQ_STATE_ENABLE:
6530		enable_flag = 1;
6531		break;
6532
6533	default:
6534		return -EINVAL;
6535	}
6536
6537	WREG32_FIELD(CP_INT_CNTL, CP_ECC_ERROR_INT_ENABLE, enable_flag);
6538	WREG32_FIELD(CP_INT_CNTL_RING0, CP_ECC_ERROR_INT_ENABLE, enable_flag);
6539	WREG32_FIELD(CP_INT_CNTL_RING1, CP_ECC_ERROR_INT_ENABLE, enable_flag);
6540	WREG32_FIELD(CP_INT_CNTL_RING2, CP_ECC_ERROR_INT_ENABLE, enable_flag);
6541	WREG32_FIELD(CPC_INT_CNTL, CP_ECC_ERROR_INT_ENABLE, enable_flag);
6542	WREG32_FIELD(CP_ME1_PIPE0_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
6543		     enable_flag);
6544	WREG32_FIELD(CP_ME1_PIPE1_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
6545		     enable_flag);
6546	WREG32_FIELD(CP_ME1_PIPE2_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
6547		     enable_flag);
6548	WREG32_FIELD(CP_ME1_PIPE3_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
6549		     enable_flag);
6550	WREG32_FIELD(CP_ME2_PIPE0_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
6551		     enable_flag);
6552	WREG32_FIELD(CP_ME2_PIPE1_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
6553		     enable_flag);
6554	WREG32_FIELD(CP_ME2_PIPE2_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
6555		     enable_flag);
6556	WREG32_FIELD(CP_ME2_PIPE3_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
6557		     enable_flag);
6558
6559	return 0;
6560}
6561
6562static int gfx_v8_0_set_sq_int_state(struct amdgpu_device *adev,
6563				     struct amdgpu_irq_src *source,
6564				     unsigned int type,
6565				     enum amdgpu_interrupt_state state)
6566{
6567	int enable_flag;
6568
6569	switch (state) {
6570	case AMDGPU_IRQ_STATE_DISABLE:
6571		enable_flag = 1;
6572		break;
6573
6574	case AMDGPU_IRQ_STATE_ENABLE:
6575		enable_flag = 0;
6576		break;
6577
6578	default:
6579		return -EINVAL;
6580	}
6581
6582	WREG32_FIELD(SQ_INTERRUPT_MSG_CTRL, STALL,
6583		     enable_flag);
6584
6585	return 0;
6586}
6587
6588static int gfx_v8_0_eop_irq(struct amdgpu_device *adev,
6589			    struct amdgpu_irq_src *source,
6590			    struct amdgpu_iv_entry *entry)
6591{
6592	int i;
6593	u8 me_id, pipe_id, queue_id;
6594	struct amdgpu_ring *ring;
6595
6596	DRM_DEBUG("IH: CP EOP\n");
6597	me_id = (entry->ring_id & 0x0c) >> 2;
6598	pipe_id = (entry->ring_id & 0x03) >> 0;
6599	queue_id = (entry->ring_id & 0x70) >> 4;
6600
6601	switch (me_id) {
6602	case 0:
6603		amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
6604		break;
6605	case 1:
6606	case 2:
6607		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
6608			ring = &adev->gfx.compute_ring[i];
6609			/* Per-queue interrupt is supported for MEC starting from VI.
6610			  * The interrupt can only be enabled/disabled per pipe instead of per queue.
6611			  */
6612			if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
6613				amdgpu_fence_process(ring);
6614		}
6615		break;
6616	}
6617	return 0;
6618}
6619
6620static void gfx_v8_0_fault(struct amdgpu_device *adev,
6621			   struct amdgpu_iv_entry *entry)
6622{
6623	u8 me_id, pipe_id, queue_id;
6624	struct amdgpu_ring *ring;
6625	int i;
6626
6627	me_id = (entry->ring_id & 0x0c) >> 2;
6628	pipe_id = (entry->ring_id & 0x03) >> 0;
6629	queue_id = (entry->ring_id & 0x70) >> 4;
6630
6631	switch (me_id) {
6632	case 0:
6633		drm_sched_fault(&adev->gfx.gfx_ring[0].sched);
6634		break;
6635	case 1:
6636	case 2:
6637		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
6638			ring = &adev->gfx.compute_ring[i];
6639			if (ring->me == me_id && ring->pipe == pipe_id &&
6640			    ring->queue == queue_id)
6641				drm_sched_fault(&ring->sched);
6642		}
6643		break;
6644	}
6645}
6646
6647static int gfx_v8_0_priv_reg_irq(struct amdgpu_device *adev,
6648				 struct amdgpu_irq_src *source,
6649				 struct amdgpu_iv_entry *entry)
6650{
6651	DRM_ERROR("Illegal register access in command stream\n");
6652	gfx_v8_0_fault(adev, entry);
6653	return 0;
6654}
6655
6656static int gfx_v8_0_priv_inst_irq(struct amdgpu_device *adev,
6657				  struct amdgpu_irq_src *source,
6658				  struct amdgpu_iv_entry *entry)
6659{
6660	DRM_ERROR("Illegal instruction in command stream\n");
6661	gfx_v8_0_fault(adev, entry);
6662	return 0;
6663}
6664
6665static int gfx_v8_0_cp_ecc_error_irq(struct amdgpu_device *adev,
6666				     struct amdgpu_irq_src *source,
6667				     struct amdgpu_iv_entry *entry)
6668{
6669	DRM_ERROR("CP EDC/ECC error detected.");
6670	return 0;
6671}
6672
6673static void gfx_v8_0_parse_sq_irq(struct amdgpu_device *adev, unsigned ih_data,
6674				  bool from_wq)
6675{
6676	u32 enc, se_id, sh_id, cu_id;
6677	char type[20];
6678	int sq_edc_source = -1;
6679
6680	enc = REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_CMN, ENCODING);
6681	se_id = REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_CMN, SE_ID);
6682
6683	switch (enc) {
6684		case 0:
6685			DRM_INFO("SQ general purpose intr detected:"
6686					"se_id %d, immed_overflow %d, host_reg_overflow %d,"
6687					"host_cmd_overflow %d, cmd_timestamp %d,"
6688					"reg_timestamp %d, thread_trace_buff_full %d,"
6689					"wlt %d, thread_trace %d.\n",
6690					se_id,
6691					REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, IMMED_OVERFLOW),
6692					REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, HOST_REG_OVERFLOW),
6693					REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, HOST_CMD_OVERFLOW),
6694					REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, CMD_TIMESTAMP),
6695					REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, REG_TIMESTAMP),
6696					REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, THREAD_TRACE_BUF_FULL),
6697					REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, WLT),
6698					REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, THREAD_TRACE)
6699					);
6700			break;
6701		case 1:
6702		case 2:
6703
6704			cu_id = REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, CU_ID);
6705			sh_id = REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, SH_ID);
6706
6707			/*
6708			 * This function can be called either directly from ISR
6709			 * or from BH in which case we can access SQ_EDC_INFO
6710			 * instance
6711			 */
6712			if (from_wq) {
6713				mutex_lock(&adev->grbm_idx_mutex);
6714				gfx_v8_0_select_se_sh(adev, se_id, sh_id, cu_id, 0);
6715
6716				sq_edc_source = REG_GET_FIELD(RREG32(mmSQ_EDC_INFO), SQ_EDC_INFO, SOURCE);
6717
6718				gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
6719				mutex_unlock(&adev->grbm_idx_mutex);
6720			}
6721
6722			if (enc == 1)
6723				sprintf(type, "instruction intr");
6724			else
6725				sprintf(type, "EDC/ECC error");
6726
6727			DRM_INFO(
6728				"SQ %s detected: "
6729					"se_id %d, sh_id %d, cu_id %d, simd_id %d, wave_id %d, vm_id %d "
6730					"trap %s, sq_ed_info.source %s.\n",
6731					type, se_id, sh_id, cu_id,
6732					REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, SIMD_ID),
6733					REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, WAVE_ID),
6734					REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, VM_ID),
6735					REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, PRIV) ? "true" : "false",
6736					(sq_edc_source != -1) ? sq_edc_source_names[sq_edc_source] : "unavailable"
6737				);
6738			break;
6739		default:
6740			DRM_ERROR("SQ invalid encoding type\n.");
6741	}
6742}
6743
6744static void gfx_v8_0_sq_irq_work_func(struct work_struct *work)
6745{
6746
6747	struct amdgpu_device *adev = container_of(work, struct amdgpu_device, gfx.sq_work.work);
6748	struct sq_work *sq_work = container_of(work, struct sq_work, work);
6749
6750	gfx_v8_0_parse_sq_irq(adev, sq_work->ih_data, true);
6751}
6752
6753static int gfx_v8_0_sq_irq(struct amdgpu_device *adev,
6754			   struct amdgpu_irq_src *source,
6755			   struct amdgpu_iv_entry *entry)
6756{
6757	unsigned ih_data = entry->src_data[0];
6758
6759	/*
6760	 * Try to submit work so SQ_EDC_INFO can be accessed from
6761	 * BH. If previous work submission hasn't finished yet
6762	 * just print whatever info is possible directly from the ISR.
6763	 */
6764	if (work_pending(&adev->gfx.sq_work.work)) {
6765		gfx_v8_0_parse_sq_irq(adev, ih_data, false);
6766	} else {
6767		adev->gfx.sq_work.ih_data = ih_data;
6768		schedule_work(&adev->gfx.sq_work.work);
6769	}
6770
6771	return 0;
6772}
6773
6774static void gfx_v8_0_emit_mem_sync(struct amdgpu_ring *ring)
6775{
6776	amdgpu_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
6777	amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
6778			  PACKET3_TC_ACTION_ENA |
6779			  PACKET3_SH_KCACHE_ACTION_ENA |
6780			  PACKET3_SH_ICACHE_ACTION_ENA |
6781			  PACKET3_TC_WB_ACTION_ENA);  /* CP_COHER_CNTL */
6782	amdgpu_ring_write(ring, 0xffffffff);  /* CP_COHER_SIZE */
6783	amdgpu_ring_write(ring, 0);  /* CP_COHER_BASE */
6784	amdgpu_ring_write(ring, 0x0000000A); /* poll interval */
6785}
6786
6787static void gfx_v8_0_emit_mem_sync_compute(struct amdgpu_ring *ring)
6788{
6789	amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5));
6790	amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
6791			  PACKET3_TC_ACTION_ENA |
6792			  PACKET3_SH_KCACHE_ACTION_ENA |
6793			  PACKET3_SH_ICACHE_ACTION_ENA |
6794			  PACKET3_TC_WB_ACTION_ENA);  /* CP_COHER_CNTL */
6795	amdgpu_ring_write(ring, 0xffffffff);	/* CP_COHER_SIZE */
6796	amdgpu_ring_write(ring, 0xff);		/* CP_COHER_SIZE_HI */
6797	amdgpu_ring_write(ring, 0);		/* CP_COHER_BASE */
6798	amdgpu_ring_write(ring, 0);		/* CP_COHER_BASE_HI */
6799	amdgpu_ring_write(ring, 0x0000000A);	/* poll interval */
6800}
6801
6802
6803/* mmSPI_WCL_PIPE_PERCENT_CS[0-7]_DEFAULT values are same */
6804#define mmSPI_WCL_PIPE_PERCENT_CS_DEFAULT	0x0000007f
6805static void gfx_v8_0_emit_wave_limit_cs(struct amdgpu_ring *ring,
6806					uint32_t pipe, bool enable)
6807{
6808	uint32_t val;
6809	uint32_t wcl_cs_reg;
6810
6811	val = enable ? 0x1 : mmSPI_WCL_PIPE_PERCENT_CS_DEFAULT;
6812
6813	switch (pipe) {
6814	case 0:
6815		wcl_cs_reg = mmSPI_WCL_PIPE_PERCENT_CS0;
6816		break;
6817	case 1:
6818		wcl_cs_reg = mmSPI_WCL_PIPE_PERCENT_CS1;
6819		break;
6820	case 2:
6821		wcl_cs_reg = mmSPI_WCL_PIPE_PERCENT_CS2;
6822		break;
6823	case 3:
6824		wcl_cs_reg = mmSPI_WCL_PIPE_PERCENT_CS3;
6825		break;
6826	default:
6827		DRM_DEBUG("invalid pipe %d\n", pipe);
6828		return;
6829	}
6830
6831	amdgpu_ring_emit_wreg(ring, wcl_cs_reg, val);
6832
6833}
6834
6835#define mmSPI_WCL_PIPE_PERCENT_GFX_DEFAULT	0x07ffffff
6836static void gfx_v8_0_emit_wave_limit(struct amdgpu_ring *ring, bool enable)
6837{
6838	struct amdgpu_device *adev = ring->adev;
6839	uint32_t val;
6840	int i;
6841
6842	/* mmSPI_WCL_PIPE_PERCENT_GFX is 7 bit multiplier register to limit
6843	 * number of gfx waves. Setting 5 bit will make sure gfx only gets
6844	 * around 25% of gpu resources.
6845	 */
6846	val = enable ? 0x1f : mmSPI_WCL_PIPE_PERCENT_GFX_DEFAULT;
6847	amdgpu_ring_emit_wreg(ring, mmSPI_WCL_PIPE_PERCENT_GFX, val);
6848
6849	/* Restrict waves for normal/low priority compute queues as well
6850	 * to get best QoS for high priority compute jobs.
6851	 *
6852	 * amdgpu controls only 1st ME(0-3 CS pipes).
6853	 */
6854	for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
6855		if (i != ring->pipe)
6856			gfx_v8_0_emit_wave_limit_cs(ring, i, enable);
6857
6858	}
6859
6860}
6861
6862static const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
6863	.name = "gfx_v8_0",
6864	.early_init = gfx_v8_0_early_init,
6865	.late_init = gfx_v8_0_late_init,
6866	.sw_init = gfx_v8_0_sw_init,
6867	.sw_fini = gfx_v8_0_sw_fini,
6868	.hw_init = gfx_v8_0_hw_init,
6869	.hw_fini = gfx_v8_0_hw_fini,
6870	.suspend = gfx_v8_0_suspend,
6871	.resume = gfx_v8_0_resume,
6872	.is_idle = gfx_v8_0_is_idle,
6873	.wait_for_idle = gfx_v8_0_wait_for_idle,
6874	.check_soft_reset = gfx_v8_0_check_soft_reset,
6875	.pre_soft_reset = gfx_v8_0_pre_soft_reset,
6876	.soft_reset = gfx_v8_0_soft_reset,
6877	.post_soft_reset = gfx_v8_0_post_soft_reset,
6878	.set_clockgating_state = gfx_v8_0_set_clockgating_state,
6879	.set_powergating_state = gfx_v8_0_set_powergating_state,
6880	.get_clockgating_state = gfx_v8_0_get_clockgating_state,
6881};
6882
6883static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
6884	.type = AMDGPU_RING_TYPE_GFX,
6885	.align_mask = 0xff,
6886	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
6887	.support_64bit_ptrs = false,
6888	.get_rptr = gfx_v8_0_ring_get_rptr,
6889	.get_wptr = gfx_v8_0_ring_get_wptr_gfx,
6890	.set_wptr = gfx_v8_0_ring_set_wptr_gfx,
6891	.emit_frame_size = /* maximum 215dw if count 16 IBs in */
6892		5 +  /* COND_EXEC */
6893		7 +  /* PIPELINE_SYNC */
6894		VI_FLUSH_GPU_TLB_NUM_WREG * 5 + 9 + /* VM_FLUSH */
6895		12 +  /* FENCE for VM_FLUSH */
6896		20 + /* GDS switch */
6897		4 + /* double SWITCH_BUFFER,
6898		       the first COND_EXEC jump to the place just
6899			   prior to this double SWITCH_BUFFER  */
6900		5 + /* COND_EXEC */
6901		7 +	 /*	HDP_flush */
6902		4 +	 /*	VGT_flush */
6903		14 + /*	CE_META */
6904		31 + /*	DE_META */
6905		3 + /* CNTX_CTRL */
6906		5 + /* HDP_INVL */
6907		12 + 12 + /* FENCE x2 */
6908		2 + /* SWITCH_BUFFER */
6909		5, /* SURFACE_SYNC */
6910	.emit_ib_size =	4, /* gfx_v8_0_ring_emit_ib_gfx */
6911	.emit_ib = gfx_v8_0_ring_emit_ib_gfx,
6912	.emit_fence = gfx_v8_0_ring_emit_fence_gfx,
6913	.emit_pipeline_sync = gfx_v8_0_ring_emit_pipeline_sync,
6914	.emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
6915	.emit_gds_switch = gfx_v8_0_ring_emit_gds_switch,
6916	.emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush,
 
6917	.test_ring = gfx_v8_0_ring_test_ring,
6918	.test_ib = gfx_v8_0_ring_test_ib,
6919	.insert_nop = amdgpu_ring_insert_nop,
6920	.pad_ib = amdgpu_ring_generic_pad_ib,
6921	.emit_switch_buffer = gfx_v8_ring_emit_sb,
6922	.emit_cntxcntl = gfx_v8_ring_emit_cntxcntl,
6923	.init_cond_exec = gfx_v8_0_ring_emit_init_cond_exec,
6924	.emit_wreg = gfx_v8_0_ring_emit_wreg,
6925	.soft_recovery = gfx_v8_0_ring_soft_recovery,
6926	.emit_mem_sync = gfx_v8_0_emit_mem_sync,
6927};
6928
6929static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
6930	.type = AMDGPU_RING_TYPE_COMPUTE,
6931	.align_mask = 0xff,
6932	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
6933	.support_64bit_ptrs = false,
6934	.get_rptr = gfx_v8_0_ring_get_rptr,
6935	.get_wptr = gfx_v8_0_ring_get_wptr_compute,
6936	.set_wptr = gfx_v8_0_ring_set_wptr_compute,
6937	.emit_frame_size =
6938		20 + /* gfx_v8_0_ring_emit_gds_switch */
6939		7 + /* gfx_v8_0_ring_emit_hdp_flush */
6940		5 + /* hdp_invalidate */
6941		7 + /* gfx_v8_0_ring_emit_pipeline_sync */
6942		VI_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + /* gfx_v8_0_ring_emit_vm_flush */
6943		7 + 7 + 7 + /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */
6944		7 + /* gfx_v8_0_emit_mem_sync_compute */
6945		5 + /* gfx_v8_0_emit_wave_limit for updating mmSPI_WCL_PIPE_PERCENT_GFX register */
6946		15, /* for updating 3 mmSPI_WCL_PIPE_PERCENT_CS registers */
6947	.emit_ib_size =	7, /* gfx_v8_0_ring_emit_ib_compute */
6948	.emit_ib = gfx_v8_0_ring_emit_ib_compute,
6949	.emit_fence = gfx_v8_0_ring_emit_fence_compute,
6950	.emit_pipeline_sync = gfx_v8_0_ring_emit_pipeline_sync,
6951	.emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
6952	.emit_gds_switch = gfx_v8_0_ring_emit_gds_switch,
6953	.emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush,
 
6954	.test_ring = gfx_v8_0_ring_test_ring,
6955	.test_ib = gfx_v8_0_ring_test_ib,
6956	.insert_nop = amdgpu_ring_insert_nop,
6957	.pad_ib = amdgpu_ring_generic_pad_ib,
6958	.emit_wreg = gfx_v8_0_ring_emit_wreg,
6959	.emit_mem_sync = gfx_v8_0_emit_mem_sync_compute,
6960	.emit_wave_limit = gfx_v8_0_emit_wave_limit,
6961};
6962
6963static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_kiq = {
6964	.type = AMDGPU_RING_TYPE_KIQ,
6965	.align_mask = 0xff,
6966	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
6967	.support_64bit_ptrs = false,
6968	.get_rptr = gfx_v8_0_ring_get_rptr,
6969	.get_wptr = gfx_v8_0_ring_get_wptr_compute,
6970	.set_wptr = gfx_v8_0_ring_set_wptr_compute,
6971	.emit_frame_size =
6972		20 + /* gfx_v8_0_ring_emit_gds_switch */
6973		7 + /* gfx_v8_0_ring_emit_hdp_flush */
6974		5 + /* hdp_invalidate */
6975		7 + /* gfx_v8_0_ring_emit_pipeline_sync */
6976		17 + /* gfx_v8_0_ring_emit_vm_flush */
6977		7 + 7 + 7, /* gfx_v8_0_ring_emit_fence_kiq x3 for user fence, vm fence */
6978	.emit_ib_size =	7, /* gfx_v8_0_ring_emit_ib_compute */
6979	.emit_fence = gfx_v8_0_ring_emit_fence_kiq,
6980	.test_ring = gfx_v8_0_ring_test_ring,
6981	.insert_nop = amdgpu_ring_insert_nop,
6982	.pad_ib = amdgpu_ring_generic_pad_ib,
6983	.emit_rreg = gfx_v8_0_ring_emit_rreg,
6984	.emit_wreg = gfx_v8_0_ring_emit_wreg,
6985};
6986
6987static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev)
6988{
6989	int i;
6990
6991	adev->gfx.kiq[0].ring.funcs = &gfx_v8_0_ring_funcs_kiq;
6992
6993	for (i = 0; i < adev->gfx.num_gfx_rings; i++)
6994		adev->gfx.gfx_ring[i].funcs = &gfx_v8_0_ring_funcs_gfx;
6995
6996	for (i = 0; i < adev->gfx.num_compute_rings; i++)
6997		adev->gfx.compute_ring[i].funcs = &gfx_v8_0_ring_funcs_compute;
6998}
6999
7000static const struct amdgpu_irq_src_funcs gfx_v8_0_eop_irq_funcs = {
7001	.set = gfx_v8_0_set_eop_interrupt_state,
7002	.process = gfx_v8_0_eop_irq,
7003};
7004
7005static const struct amdgpu_irq_src_funcs gfx_v8_0_priv_reg_irq_funcs = {
7006	.set = gfx_v8_0_set_priv_reg_fault_state,
7007	.process = gfx_v8_0_priv_reg_irq,
7008};
7009
7010static const struct amdgpu_irq_src_funcs gfx_v8_0_priv_inst_irq_funcs = {
7011	.set = gfx_v8_0_set_priv_inst_fault_state,
7012	.process = gfx_v8_0_priv_inst_irq,
7013};
7014
7015static const struct amdgpu_irq_src_funcs gfx_v8_0_cp_ecc_error_irq_funcs = {
7016	.set = gfx_v8_0_set_cp_ecc_int_state,
7017	.process = gfx_v8_0_cp_ecc_error_irq,
7018};
7019
7020static const struct amdgpu_irq_src_funcs gfx_v8_0_sq_irq_funcs = {
7021	.set = gfx_v8_0_set_sq_int_state,
7022	.process = gfx_v8_0_sq_irq,
7023};
7024
7025static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev)
7026{
7027	adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
7028	adev->gfx.eop_irq.funcs = &gfx_v8_0_eop_irq_funcs;
7029
7030	adev->gfx.priv_reg_irq.num_types = 1;
7031	adev->gfx.priv_reg_irq.funcs = &gfx_v8_0_priv_reg_irq_funcs;
7032
7033	adev->gfx.priv_inst_irq.num_types = 1;
7034	adev->gfx.priv_inst_irq.funcs = &gfx_v8_0_priv_inst_irq_funcs;
7035
7036	adev->gfx.cp_ecc_error_irq.num_types = 1;
7037	adev->gfx.cp_ecc_error_irq.funcs = &gfx_v8_0_cp_ecc_error_irq_funcs;
7038
7039	adev->gfx.sq_irq.num_types = 1;
7040	adev->gfx.sq_irq.funcs = &gfx_v8_0_sq_irq_funcs;
7041}
7042
7043static void gfx_v8_0_set_rlc_funcs(struct amdgpu_device *adev)
7044{
7045	adev->gfx.rlc.funcs = &iceland_rlc_funcs;
7046}
7047
7048static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev)
7049{
7050	/* init asci gds info */
7051	adev->gds.gds_size = RREG32(mmGDS_VMID0_SIZE);
7052	adev->gds.gws_size = 64;
7053	adev->gds.oa_size = 16;
7054	adev->gds.gds_compute_max_wave_id = RREG32(mmGDS_COMPUTE_MAX_WAVE_ID);
7055}
 
 
7056
7057static void gfx_v8_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
7058						 u32 bitmap)
7059{
7060	u32 data;
7061
7062	if (!bitmap)
7063		return;
 
 
 
7064
7065	data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
7066	data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
7067
7068	WREG32(mmGC_USER_SHADER_ARRAY_CONFIG, data);
 
 
7069}
7070
7071static u32 gfx_v8_0_get_cu_active_bitmap(struct amdgpu_device *adev)
7072{
7073	u32 data, mask;
7074
7075	data =  RREG32(mmCC_GC_SHADER_ARRAY_CONFIG) |
7076		RREG32(mmGC_USER_SHADER_ARRAY_CONFIG);
7077
7078	mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh);
 
7079
7080	return ~REG_GET_FIELD(data, CC_GC_SHADER_ARRAY_CONFIG, INACTIVE_CUS) & mask;
 
 
7081}
7082
7083static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev)
 
7084{
7085	int i, j, k, counter, active_cu_number = 0;
7086	u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
7087	struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
7088	unsigned disable_masks[4 * 2];
7089	u32 ao_cu_num;
7090
7091	memset(cu_info, 0, sizeof(*cu_info));
7092
7093	if (adev->flags & AMD_IS_APU)
7094		ao_cu_num = 2;
7095	else
7096		ao_cu_num = adev->gfx.config.max_cu_per_sh;
7097
7098	amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2);
7099
7100	mutex_lock(&adev->grbm_idx_mutex);
7101	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
7102		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
7103			mask = 1;
7104			ao_bitmap = 0;
7105			counter = 0;
7106			gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff, 0);
7107			if (i < 4 && j < 2)
7108				gfx_v8_0_set_user_cu_inactive_bitmap(
7109					adev, disable_masks[i * 2 + j]);
7110			bitmap = gfx_v8_0_get_cu_active_bitmap(adev);
7111			cu_info->bitmap[0][i][j] = bitmap;
7112
7113			for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
7114				if (bitmap & mask) {
7115					if (counter < ao_cu_num)
7116						ao_bitmap |= mask;
7117					counter ++;
7118				}
7119				mask <<= 1;
7120			}
7121			active_cu_number += counter;
7122			if (i < 2 && j < 2)
7123				ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
7124			cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
7125		}
7126	}
7127	gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
7128	mutex_unlock(&adev->grbm_idx_mutex);
7129
7130	cu_info->number = active_cu_number;
7131	cu_info->ao_cu_mask = ao_cu_mask;
7132	cu_info->simd_per_cu = NUM_SIMD_PER_CU;
7133	cu_info->max_waves_per_simd = 10;
7134	cu_info->max_scratch_slots_per_cu = 32;
7135	cu_info->wave_front_size = 64;
7136	cu_info->lds_size = 64;
7137}
7138
7139const struct amdgpu_ip_block_version gfx_v8_0_ip_block =
7140{
7141	.type = AMD_IP_BLOCK_TYPE_GFX,
7142	.major = 8,
7143	.minor = 0,
7144	.rev = 0,
7145	.funcs = &gfx_v8_0_ip_funcs,
7146};
7147
7148const struct amdgpu_ip_block_version gfx_v8_1_ip_block =
7149{
7150	.type = AMD_IP_BLOCK_TYPE_GFX,
7151	.major = 8,
7152	.minor = 1,
7153	.rev = 0,
7154	.funcs = &gfx_v8_0_ip_funcs,
7155};
7156
7157static void gfx_v8_0_ring_emit_ce_meta(struct amdgpu_ring *ring)
7158{
7159	uint64_t ce_payload_addr;
7160	int cnt_ce;
7161	union {
7162		struct vi_ce_ib_state regular;
7163		struct vi_ce_ib_state_chained_ib chained;
7164	} ce_payload = {};
7165
7166	if (ring->adev->virt.chained_ib_support) {
7167		ce_payload_addr = amdgpu_csa_vaddr(ring->adev) +
7168			offsetof(struct vi_gfx_meta_data_chained_ib, ce_payload);
7169		cnt_ce = (sizeof(ce_payload.chained) >> 2) + 4 - 2;
7170	} else {
7171		ce_payload_addr = amdgpu_csa_vaddr(ring->adev) +
7172			offsetof(struct vi_gfx_meta_data, ce_payload);
7173		cnt_ce = (sizeof(ce_payload.regular) >> 2) + 4 - 2;
7174	}
7175
7176	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt_ce));
7177	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
7178				WRITE_DATA_DST_SEL(8) |
7179				WR_CONFIRM) |
7180				WRITE_DATA_CACHE_POLICY(0));
7181	amdgpu_ring_write(ring, lower_32_bits(ce_payload_addr));
7182	amdgpu_ring_write(ring, upper_32_bits(ce_payload_addr));
7183	amdgpu_ring_write_multiple(ring, (void *)&ce_payload, cnt_ce - 2);
7184}
7185
7186static void gfx_v8_0_ring_emit_de_meta(struct amdgpu_ring *ring)
7187{
7188	uint64_t de_payload_addr, gds_addr, csa_addr;
7189	int cnt_de;
7190	union {
7191		struct vi_de_ib_state regular;
7192		struct vi_de_ib_state_chained_ib chained;
7193	} de_payload = {};
7194
7195	csa_addr = amdgpu_csa_vaddr(ring->adev);
7196	gds_addr = csa_addr + 4096;
7197	if (ring->adev->virt.chained_ib_support) {
7198		de_payload.chained.gds_backup_addrlo = lower_32_bits(gds_addr);
7199		de_payload.chained.gds_backup_addrhi = upper_32_bits(gds_addr);
7200		de_payload_addr = csa_addr + offsetof(struct vi_gfx_meta_data_chained_ib, de_payload);
7201		cnt_de = (sizeof(de_payload.chained) >> 2) + 4 - 2;
7202	} else {
7203		de_payload.regular.gds_backup_addrlo = lower_32_bits(gds_addr);
7204		de_payload.regular.gds_backup_addrhi = upper_32_bits(gds_addr);
7205		de_payload_addr = csa_addr + offsetof(struct vi_gfx_meta_data, de_payload);
7206		cnt_de = (sizeof(de_payload.regular) >> 2) + 4 - 2;
7207	}
7208
7209	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt_de));
7210	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
7211				WRITE_DATA_DST_SEL(8) |
7212				WR_CONFIRM) |
7213				WRITE_DATA_CACHE_POLICY(0));
7214	amdgpu_ring_write(ring, lower_32_bits(de_payload_addr));
7215	amdgpu_ring_write(ring, upper_32_bits(de_payload_addr));
7216	amdgpu_ring_write_multiple(ring, (void *)&de_payload, cnt_de - 2);
7217}
v4.6
   1/*
   2 * Copyright 2014 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
 
 
 
  23#include <linux/firmware.h>
  24#include "drmP.h"
 
 
  25#include "amdgpu.h"
  26#include "amdgpu_gfx.h"
 
  27#include "vi.h"
 
  28#include "vid.h"
  29#include "amdgpu_ucode.h"
 
 
  30#include "clearstate_vi.h"
  31
  32#include "gmc/gmc_8_2_d.h"
  33#include "gmc/gmc_8_2_sh_mask.h"
  34
  35#include "oss/oss_3_0_d.h"
  36#include "oss/oss_3_0_sh_mask.h"
  37
  38#include "bif/bif_5_0_d.h"
  39#include "bif/bif_5_0_sh_mask.h"
  40
  41#include "gca/gfx_8_0_d.h"
  42#include "gca/gfx_8_0_enum.h"
  43#include "gca/gfx_8_0_sh_mask.h"
  44#include "gca/gfx_8_0_enum.h"
  45
  46#include "dce/dce_10_0_d.h"
  47#include "dce/dce_10_0_sh_mask.h"
  48
 
 
 
 
  49#define GFX8_NUM_GFX_RINGS     1
  50#define GFX8_NUM_COMPUTE_RINGS 8
  51
  52#define TOPAZ_GB_ADDR_CONFIG_GOLDEN 0x22010001
  53#define CARRIZO_GB_ADDR_CONFIG_GOLDEN 0x22010001
 
  54#define TONGA_GB_ADDR_CONFIG_GOLDEN 0x22011003
  55
  56#define ARRAY_MODE(x)					((x) << GB_TILE_MODE0__ARRAY_MODE__SHIFT)
  57#define PIPE_CONFIG(x)					((x) << GB_TILE_MODE0__PIPE_CONFIG__SHIFT)
  58#define TILE_SPLIT(x)					((x) << GB_TILE_MODE0__TILE_SPLIT__SHIFT)
  59#define MICRO_TILE_MODE_NEW(x)				((x) << GB_TILE_MODE0__MICRO_TILE_MODE_NEW__SHIFT)
  60#define SAMPLE_SPLIT(x)					((x) << GB_TILE_MODE0__SAMPLE_SPLIT__SHIFT)
  61#define BANK_WIDTH(x)					((x) << GB_MACROTILE_MODE0__BANK_WIDTH__SHIFT)
  62#define BANK_HEIGHT(x)					((x) << GB_MACROTILE_MODE0__BANK_HEIGHT__SHIFT)
  63#define MACRO_TILE_ASPECT(x)				((x) << GB_MACROTILE_MODE0__MACRO_TILE_ASPECT__SHIFT)
  64#define NUM_BANKS(x)					((x) << GB_MACROTILE_MODE0__NUM_BANKS__SHIFT)
  65
  66#define RLC_CGTT_MGCG_OVERRIDE__CPF_MASK            0x00000001L
  67#define RLC_CGTT_MGCG_OVERRIDE__RLC_MASK            0x00000002L
  68#define RLC_CGTT_MGCG_OVERRIDE__MGCG_MASK           0x00000004L
  69#define RLC_CGTT_MGCG_OVERRIDE__CGCG_MASK           0x00000008L
  70#define RLC_CGTT_MGCG_OVERRIDE__CGLS_MASK           0x00000010L
  71#define RLC_CGTT_MGCG_OVERRIDE__GRBM_MASK           0x00000020L
  72
  73/* BPM SERDES CMD */
  74#define SET_BPM_SERDES_CMD    1
  75#define CLE_BPM_SERDES_CMD    0
  76
  77/* BPM Register Address*/
  78enum {
  79	BPM_REG_CGLS_EN = 0,        /* Enable/Disable CGLS */
  80	BPM_REG_CGLS_ON,            /* ON/OFF CGLS: shall be controlled by RLC FW */
  81	BPM_REG_CGCG_OVERRIDE,      /* Set/Clear CGCG Override */
  82	BPM_REG_MGCG_OVERRIDE,      /* Set/Clear MGCG Override */
  83	BPM_REG_FGCG_OVERRIDE,      /* Set/Clear FGCG Override */
  84	BPM_REG_FGCG_MAX
  85};
  86
 
 
  87MODULE_FIRMWARE("amdgpu/carrizo_ce.bin");
  88MODULE_FIRMWARE("amdgpu/carrizo_pfp.bin");
  89MODULE_FIRMWARE("amdgpu/carrizo_me.bin");
  90MODULE_FIRMWARE("amdgpu/carrizo_mec.bin");
  91MODULE_FIRMWARE("amdgpu/carrizo_mec2.bin");
  92MODULE_FIRMWARE("amdgpu/carrizo_rlc.bin");
  93
  94MODULE_FIRMWARE("amdgpu/stoney_ce.bin");
  95MODULE_FIRMWARE("amdgpu/stoney_pfp.bin");
  96MODULE_FIRMWARE("amdgpu/stoney_me.bin");
  97MODULE_FIRMWARE("amdgpu/stoney_mec.bin");
  98MODULE_FIRMWARE("amdgpu/stoney_rlc.bin");
  99
 100MODULE_FIRMWARE("amdgpu/tonga_ce.bin");
 101MODULE_FIRMWARE("amdgpu/tonga_pfp.bin");
 102MODULE_FIRMWARE("amdgpu/tonga_me.bin");
 103MODULE_FIRMWARE("amdgpu/tonga_mec.bin");
 104MODULE_FIRMWARE("amdgpu/tonga_mec2.bin");
 105MODULE_FIRMWARE("amdgpu/tonga_rlc.bin");
 106
 107MODULE_FIRMWARE("amdgpu/topaz_ce.bin");
 108MODULE_FIRMWARE("amdgpu/topaz_pfp.bin");
 109MODULE_FIRMWARE("amdgpu/topaz_me.bin");
 110MODULE_FIRMWARE("amdgpu/topaz_mec.bin");
 111MODULE_FIRMWARE("amdgpu/topaz_rlc.bin");
 112
 113MODULE_FIRMWARE("amdgpu/fiji_ce.bin");
 114MODULE_FIRMWARE("amdgpu/fiji_pfp.bin");
 115MODULE_FIRMWARE("amdgpu/fiji_me.bin");
 116MODULE_FIRMWARE("amdgpu/fiji_mec.bin");
 117MODULE_FIRMWARE("amdgpu/fiji_mec2.bin");
 118MODULE_FIRMWARE("amdgpu/fiji_rlc.bin");
 119
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 120static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
 121{
 122	{mmGDS_VMID0_BASE, mmGDS_VMID0_SIZE, mmGDS_GWS_VMID0, mmGDS_OA_VMID0},
 123	{mmGDS_VMID1_BASE, mmGDS_VMID1_SIZE, mmGDS_GWS_VMID1, mmGDS_OA_VMID1},
 124	{mmGDS_VMID2_BASE, mmGDS_VMID2_SIZE, mmGDS_GWS_VMID2, mmGDS_OA_VMID2},
 125	{mmGDS_VMID3_BASE, mmGDS_VMID3_SIZE, mmGDS_GWS_VMID3, mmGDS_OA_VMID3},
 126	{mmGDS_VMID4_BASE, mmGDS_VMID4_SIZE, mmGDS_GWS_VMID4, mmGDS_OA_VMID4},
 127	{mmGDS_VMID5_BASE, mmGDS_VMID5_SIZE, mmGDS_GWS_VMID5, mmGDS_OA_VMID5},
 128	{mmGDS_VMID6_BASE, mmGDS_VMID6_SIZE, mmGDS_GWS_VMID6, mmGDS_OA_VMID6},
 129	{mmGDS_VMID7_BASE, mmGDS_VMID7_SIZE, mmGDS_GWS_VMID7, mmGDS_OA_VMID7},
 130	{mmGDS_VMID8_BASE, mmGDS_VMID8_SIZE, mmGDS_GWS_VMID8, mmGDS_OA_VMID8},
 131	{mmGDS_VMID9_BASE, mmGDS_VMID9_SIZE, mmGDS_GWS_VMID9, mmGDS_OA_VMID9},
 132	{mmGDS_VMID10_BASE, mmGDS_VMID10_SIZE, mmGDS_GWS_VMID10, mmGDS_OA_VMID10},
 133	{mmGDS_VMID11_BASE, mmGDS_VMID11_SIZE, mmGDS_GWS_VMID11, mmGDS_OA_VMID11},
 134	{mmGDS_VMID12_BASE, mmGDS_VMID12_SIZE, mmGDS_GWS_VMID12, mmGDS_OA_VMID12},
 135	{mmGDS_VMID13_BASE, mmGDS_VMID13_SIZE, mmGDS_GWS_VMID13, mmGDS_OA_VMID13},
 136	{mmGDS_VMID14_BASE, mmGDS_VMID14_SIZE, mmGDS_GWS_VMID14, mmGDS_OA_VMID14},
 137	{mmGDS_VMID15_BASE, mmGDS_VMID15_SIZE, mmGDS_GWS_VMID15, mmGDS_OA_VMID15}
 138};
 139
 140static const u32 golden_settings_tonga_a11[] =
 141{
 142	mmCB_HW_CONTROL, 0xfffdf3cf, 0x00007208,
 143	mmCB_HW_CONTROL_3, 0x00000040, 0x00000040,
 144	mmDB_DEBUG2, 0xf00fffff, 0x00000400,
 145	mmGB_GPU_ID, 0x0000000f, 0x00000000,
 146	mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
 147	mmPA_SC_FIFO_DEPTH_CNTL, 0x000003ff, 0x000000fc,
 148	mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
 
 149	mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
 150	mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
 151	mmTCC_CTRL, 0x00100000, 0xf31fff7f,
 152	mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
 153	mmTCP_ADDR_CONFIG, 0x000003ff, 0x000002fb,
 154	mmTCP_CHAN_STEER_HI, 0xffffffff, 0x0000543b,
 155	mmTCP_CHAN_STEER_LO, 0xffffffff, 0xa9210876,
 156	mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
 157};
 158
 159static const u32 tonga_golden_common_all[] =
 160{
 161	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 162	mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x16000012,
 163	mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002A,
 164	mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
 165	mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
 166	mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
 167	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
 168	mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF
 169};
 170
 171static const u32 tonga_mgcg_cgcg_init[] =
 172{
 173	mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
 174	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 175	mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 176	mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
 177	mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
 178	mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
 179	mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100,
 180	mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
 181	mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
 182	mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
 183	mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
 184	mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
 185	mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
 186	mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
 187	mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
 188	mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
 189	mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
 190	mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
 191	mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
 192	mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
 193	mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
 194	mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
 195	mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
 196	mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
 197	mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
 198	mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
 199	mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
 200	mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 201	mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 202	mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
 203	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 204	mmCGTS_CU0_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 205	mmCGTS_CU0_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 206	mmCGTS_CU0_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
 207	mmCGTS_CU0_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 208	mmCGTS_CU0_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 209	mmCGTS_CU1_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 210	mmCGTS_CU1_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 211	mmCGTS_CU1_TA_CTRL_REG, 0xffffffff, 0x00040007,
 212	mmCGTS_CU1_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 213	mmCGTS_CU1_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 214	mmCGTS_CU2_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 215	mmCGTS_CU2_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 216	mmCGTS_CU2_TA_CTRL_REG, 0xffffffff, 0x00040007,
 217	mmCGTS_CU2_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 218	mmCGTS_CU2_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 219	mmCGTS_CU3_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 220	mmCGTS_CU3_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 221	mmCGTS_CU3_TA_CTRL_REG, 0xffffffff, 0x00040007,
 222	mmCGTS_CU3_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 223	mmCGTS_CU3_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 224	mmCGTS_CU4_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 225	mmCGTS_CU4_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 226	mmCGTS_CU4_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
 227	mmCGTS_CU4_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 228	mmCGTS_CU4_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 229	mmCGTS_CU5_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 230	mmCGTS_CU5_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 231	mmCGTS_CU5_TA_CTRL_REG, 0xffffffff, 0x00040007,
 232	mmCGTS_CU5_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 233	mmCGTS_CU5_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 234	mmCGTS_CU6_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 235	mmCGTS_CU6_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 236	mmCGTS_CU6_TA_CTRL_REG, 0xffffffff, 0x00040007,
 237	mmCGTS_CU6_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 238	mmCGTS_CU6_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 239	mmCGTS_CU7_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 240	mmCGTS_CU7_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 241	mmCGTS_CU7_TA_CTRL_REG, 0xffffffff, 0x00040007,
 242	mmCGTS_CU7_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 243	mmCGTS_CU7_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 244	mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
 245	mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
 246	mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
 247	mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
 248};
 249
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 250static const u32 fiji_golden_common_all[] =
 251{
 252	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 253	mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x3a00161a,
 254	mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002e,
 255	mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
 256	mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
 257	mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
 258	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
 259	mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF,
 260	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 261	mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x00000009,
 262};
 263
 264static const u32 golden_settings_fiji_a10[] =
 265{
 266	mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
 267	mmDB_DEBUG2, 0xf00fffff, 0x00000400,
 268	mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
 269	mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
 270	mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
 271	mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
 272	mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
 273	mmTCC_CTRL, 0x00100000, 0xf31fff7f,
 274	mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
 275	mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000ff,
 276	mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
 277};
 278
 279static const u32 fiji_mgcg_cgcg_init[] =
 280{
 281	mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
 282	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 283	mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 284	mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
 285	mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
 286	mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
 287	mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100,
 288	mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
 289	mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
 290	mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
 291	mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
 292	mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
 293	mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
 294	mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
 295	mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
 296	mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
 297	mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
 298	mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
 299	mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
 300	mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
 301	mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
 302	mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
 303	mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
 304	mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
 305	mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
 306	mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
 307	mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
 308	mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 309	mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 310	mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
 311	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 312	mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
 313	mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
 314	mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
 315	mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
 316};
 317
 318static const u32 golden_settings_iceland_a11[] =
 319{
 320	mmCB_HW_CONTROL_3, 0x00000040, 0x00000040,
 321	mmDB_DEBUG2, 0xf00fffff, 0x00000400,
 322	mmDB_DEBUG3, 0xc0000000, 0xc0000000,
 323	mmGB_GPU_ID, 0x0000000f, 0x00000000,
 324	mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
 325	mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
 326	mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x00000002,
 327	mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x00000000,
 
 328	mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
 329	mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
 330	mmTCC_CTRL, 0x00100000, 0xf31fff7f,
 331	mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
 332	mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f1,
 333	mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
 334	mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00000010,
 335};
 336
 337static const u32 iceland_golden_common_all[] =
 338{
 339	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 340	mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x00000002,
 341	mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x00000000,
 342	mmGB_ADDR_CONFIG, 0xffffffff, 0x22010001,
 343	mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
 344	mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
 345	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
 346	mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF
 347};
 348
 349static const u32 iceland_mgcg_cgcg_init[] =
 350{
 351	mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
 352	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 353	mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 354	mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
 355	mmCGTT_CP_CLK_CTRL, 0xffffffff, 0xc0000100,
 356	mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0xc0000100,
 357	mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0xc0000100,
 358	mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
 359	mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
 360	mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
 361	mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
 362	mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
 363	mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
 364	mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
 365	mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
 366	mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
 367	mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
 368	mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
 369	mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
 370	mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
 371	mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
 372	mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
 373	mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0xff000100,
 374	mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
 375	mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
 376	mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
 377	mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
 378	mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 379	mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 380	mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
 381	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 382	mmCGTS_CU0_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 383	mmCGTS_CU0_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 384	mmCGTS_CU0_TA_SQC_CTRL_REG, 0xffffffff, 0x0f840f87,
 385	mmCGTS_CU0_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 386	mmCGTS_CU0_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 387	mmCGTS_CU1_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 388	mmCGTS_CU1_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 389	mmCGTS_CU1_TA_CTRL_REG, 0xffffffff, 0x00040007,
 390	mmCGTS_CU1_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 391	mmCGTS_CU1_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 392	mmCGTS_CU2_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 393	mmCGTS_CU2_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 394	mmCGTS_CU2_TA_CTRL_REG, 0xffffffff, 0x00040007,
 395	mmCGTS_CU2_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 396	mmCGTS_CU2_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 397	mmCGTS_CU3_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 398	mmCGTS_CU3_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 399	mmCGTS_CU3_TA_CTRL_REG, 0xffffffff, 0x00040007,
 400	mmCGTS_CU3_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 401	mmCGTS_CU3_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 402	mmCGTS_CU4_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 403	mmCGTS_CU4_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 404	mmCGTS_CU4_TA_SQC_CTRL_REG, 0xffffffff, 0x0f840f87,
 405	mmCGTS_CU4_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 406	mmCGTS_CU4_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 407	mmCGTS_CU5_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 408	mmCGTS_CU5_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 409	mmCGTS_CU5_TA_CTRL_REG, 0xffffffff, 0x00040007,
 410	mmCGTS_CU5_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 411	mmCGTS_CU5_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 412	mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
 413	mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
 414	mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
 415};
 416
 417static const u32 cz_golden_settings_a11[] =
 418{
 419	mmCB_HW_CONTROL_3, 0x00000040, 0x00000040,
 420	mmDB_DEBUG2, 0xf00fffff, 0x00000400,
 421	mmGB_GPU_ID, 0x0000000f, 0x00000000,
 422	mmPA_SC_ENHANCE, 0xffffffff, 0x00000001,
 423	mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
 
 424	mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
 425	mmTA_CNTL_AUX, 0x000f000f, 0x00010000,
 
 426	mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
 427	mmTCP_ADDR_CONFIG, 0x0000000f, 0x000000f3,
 428	mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00001302
 429};
 430
 431static const u32 cz_golden_common_all[] =
 432{
 433	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 434	mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x00000002,
 435	mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x00000000,
 436	mmGB_ADDR_CONFIG, 0xffffffff, 0x22010001,
 437	mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
 438	mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
 439	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
 440	mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF
 441};
 442
 443static const u32 cz_mgcg_cgcg_init[] =
 444{
 445	mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
 446	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 447	mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 448	mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
 449	mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
 450	mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
 451	mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x00000100,
 452	mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
 453	mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
 454	mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
 455	mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
 456	mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
 457	mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
 458	mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
 459	mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
 460	mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
 461	mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
 462	mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
 463	mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
 464	mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
 465	mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
 466	mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
 467	mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
 468	mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
 469	mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
 470	mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
 471	mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
 472	mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 473	mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
 474	mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
 475	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 476	mmCGTS_CU0_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 477	mmCGTS_CU0_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 478	mmCGTS_CU0_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
 479	mmCGTS_CU0_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 480	mmCGTS_CU0_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 481	mmCGTS_CU1_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 482	mmCGTS_CU1_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 483	mmCGTS_CU1_TA_CTRL_REG, 0xffffffff, 0x00040007,
 484	mmCGTS_CU1_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 485	mmCGTS_CU1_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 486	mmCGTS_CU2_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 487	mmCGTS_CU2_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 488	mmCGTS_CU2_TA_CTRL_REG, 0xffffffff, 0x00040007,
 489	mmCGTS_CU2_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 490	mmCGTS_CU2_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 491	mmCGTS_CU3_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 492	mmCGTS_CU3_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 493	mmCGTS_CU3_TA_CTRL_REG, 0xffffffff, 0x00040007,
 494	mmCGTS_CU3_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 495	mmCGTS_CU3_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 496	mmCGTS_CU4_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 497	mmCGTS_CU4_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 498	mmCGTS_CU4_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
 499	mmCGTS_CU4_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 500	mmCGTS_CU4_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 501	mmCGTS_CU5_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 502	mmCGTS_CU5_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 503	mmCGTS_CU5_TA_CTRL_REG, 0xffffffff, 0x00040007,
 504	mmCGTS_CU5_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 505	mmCGTS_CU5_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 506	mmCGTS_CU6_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 507	mmCGTS_CU6_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 508	mmCGTS_CU6_TA_CTRL_REG, 0xffffffff, 0x00040007,
 509	mmCGTS_CU6_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 510	mmCGTS_CU6_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 511	mmCGTS_CU7_SP0_CTRL_REG, 0xffffffff, 0x00010000,
 512	mmCGTS_CU7_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
 513	mmCGTS_CU7_TA_CTRL_REG, 0xffffffff, 0x00040007,
 514	mmCGTS_CU7_SP1_CTRL_REG, 0xffffffff, 0x00060005,
 515	mmCGTS_CU7_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
 516	mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
 517	mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
 518	mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003f,
 519	mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
 520};
 521
 522static const u32 stoney_golden_settings_a11[] =
 523{
 524	mmDB_DEBUG2, 0xf00fffff, 0x00000400,
 525	mmGB_GPU_ID, 0x0000000f, 0x00000000,
 526	mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
 527	mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
 528	mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
 529	mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
 530  	mmTCC_CTRL, 0x00100000, 0xf31fff7f,
 531	mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
 532	mmTCP_ADDR_CONFIG, 0x0000000f, 0x000000f1,
 533	mmTCP_CHAN_STEER_LO, 0xffffffff, 0x10101010,
 534};
 535
 536static const u32 stoney_golden_common_all[] =
 537{
 538	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 539	mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x00000000,
 540	mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x00000000,
 541	mmGB_ADDR_CONFIG, 0xffffffff, 0x12010001,
 542	mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
 543	mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
 544	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
 545	mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF,
 546};
 547
 548static const u32 stoney_mgcg_cgcg_init[] =
 549{
 550	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 551	mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003f,
 552	mmCP_MEM_SLP_CNTL, 0xffffffff, 0x00020201,
 553	mmRLC_MEM_SLP_CNTL, 0xffffffff, 0x00020201,
 554	mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96940200,
 555	mmATC_MISC_CG, 0xffffffff, 0x000c0200,
 
 
 
 
 
 
 
 
 
 
 556};
 557
 558static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev);
 559static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev);
 560static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev);
 
 
 
 
 
 
 
 
 561
 562static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev)
 563{
 
 
 564	switch (adev->asic_type) {
 565	case CHIP_TOPAZ:
 566		amdgpu_program_register_sequence(adev,
 567						 iceland_mgcg_cgcg_init,
 568						 (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
 569		amdgpu_program_register_sequence(adev,
 570						 golden_settings_iceland_a11,
 571						 (const u32)ARRAY_SIZE(golden_settings_iceland_a11));
 572		amdgpu_program_register_sequence(adev,
 573						 iceland_golden_common_all,
 574						 (const u32)ARRAY_SIZE(iceland_golden_common_all));
 575		break;
 576	case CHIP_FIJI:
 577		amdgpu_program_register_sequence(adev,
 578						 fiji_mgcg_cgcg_init,
 579						 (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init));
 580		amdgpu_program_register_sequence(adev,
 581						 golden_settings_fiji_a10,
 582						 (const u32)ARRAY_SIZE(golden_settings_fiji_a10));
 583		amdgpu_program_register_sequence(adev,
 584						 fiji_golden_common_all,
 585						 (const u32)ARRAY_SIZE(fiji_golden_common_all));
 586		break;
 587
 588	case CHIP_TONGA:
 589		amdgpu_program_register_sequence(adev,
 590						 tonga_mgcg_cgcg_init,
 591						 (const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init));
 592		amdgpu_program_register_sequence(adev,
 593						 golden_settings_tonga_a11,
 594						 (const u32)ARRAY_SIZE(golden_settings_tonga_a11));
 595		amdgpu_program_register_sequence(adev,
 596						 tonga_golden_common_all,
 597						 (const u32)ARRAY_SIZE(tonga_golden_common_all));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 598		break;
 599	case CHIP_CARRIZO:
 600		amdgpu_program_register_sequence(adev,
 601						 cz_mgcg_cgcg_init,
 602						 (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init));
 603		amdgpu_program_register_sequence(adev,
 604						 cz_golden_settings_a11,
 605						 (const u32)ARRAY_SIZE(cz_golden_settings_a11));
 606		amdgpu_program_register_sequence(adev,
 607						 cz_golden_common_all,
 608						 (const u32)ARRAY_SIZE(cz_golden_common_all));
 609		break;
 610	case CHIP_STONEY:
 611		amdgpu_program_register_sequence(adev,
 612						 stoney_mgcg_cgcg_init,
 613						 (const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init));
 614		amdgpu_program_register_sequence(adev,
 615						 stoney_golden_settings_a11,
 616						 (const u32)ARRAY_SIZE(stoney_golden_settings_a11));
 617		amdgpu_program_register_sequence(adev,
 618						 stoney_golden_common_all,
 619						 (const u32)ARRAY_SIZE(stoney_golden_common_all));
 620		break;
 621	default:
 622		break;
 623	}
 624}
 625
 626static void gfx_v8_0_scratch_init(struct amdgpu_device *adev)
 627{
 628	int i;
 629
 630	adev->gfx.scratch.num_reg = 7;
 631	adev->gfx.scratch.reg_base = mmSCRATCH_REG0;
 632	for (i = 0; i < adev->gfx.scratch.num_reg; i++) {
 633		adev->gfx.scratch.free[i] = true;
 634		adev->gfx.scratch.reg[i] = adev->gfx.scratch.reg_base + i;
 635	}
 636}
 637
 638static int gfx_v8_0_ring_test_ring(struct amdgpu_ring *ring)
 639{
 640	struct amdgpu_device *adev = ring->adev;
 641	uint32_t scratch;
 642	uint32_t tmp = 0;
 643	unsigned i;
 644	int r;
 645
 646	r = amdgpu_gfx_scratch_get(adev, &scratch);
 647	if (r) {
 648		DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r);
 649		return r;
 650	}
 651	WREG32(scratch, 0xCAFEDEAD);
 652	r = amdgpu_ring_alloc(ring, 3);
 653	if (r) {
 654		DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
 655			  ring->idx, r);
 656		amdgpu_gfx_scratch_free(adev, scratch);
 657		return r;
 658	}
 659	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
 660	amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
 661	amdgpu_ring_write(ring, 0xDEADBEEF);
 662	amdgpu_ring_commit(ring);
 663
 664	for (i = 0; i < adev->usec_timeout; i++) {
 665		tmp = RREG32(scratch);
 666		if (tmp == 0xDEADBEEF)
 667			break;
 668		DRM_UDELAY(1);
 669	}
 670	if (i < adev->usec_timeout) {
 671		DRM_INFO("ring test on %d succeeded in %d usecs\n",
 672			 ring->idx, i);
 673	} else {
 674		DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
 675			  ring->idx, scratch, tmp);
 676		r = -EINVAL;
 677	}
 678	amdgpu_gfx_scratch_free(adev, scratch);
 679	return r;
 680}
 681
 682static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring)
 683{
 684	struct amdgpu_device *adev = ring->adev;
 685	struct amdgpu_ib ib;
 686	struct fence *f = NULL;
 687	uint32_t scratch;
 688	uint32_t tmp = 0;
 689	unsigned i;
 690	int r;
 
 691
 692	r = amdgpu_gfx_scratch_get(adev, &scratch);
 693	if (r) {
 694		DRM_ERROR("amdgpu: failed to get scratch reg (%d).\n", r);
 695		return r;
 696	}
 697	WREG32(scratch, 0xCAFEDEAD);
 
 698	memset(&ib, 0, sizeof(ib));
 699	r = amdgpu_ib_get(adev, NULL, 256, &ib);
 700	if (r) {
 701		DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
 702		goto err1;
 703	}
 704	ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
 705	ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START));
 706	ib.ptr[2] = 0xDEADBEEF;
 707	ib.length_dw = 3;
 
 
 708
 709	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
 710	if (r)
 711		goto err2;
 712
 713	r = fence_wait(f, false);
 714	if (r) {
 715		DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
 
 
 716		goto err2;
 717	}
 718	for (i = 0; i < adev->usec_timeout; i++) {
 719		tmp = RREG32(scratch);
 720		if (tmp == 0xDEADBEEF)
 721			break;
 722		DRM_UDELAY(1);
 723	}
 724	if (i < adev->usec_timeout) {
 725		DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
 726			 ring->idx, i);
 727		goto err2;
 728	} else {
 729		DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
 730			  scratch, tmp);
 731		r = -EINVAL;
 732	}
 733err2:
 734	fence_put(f);
 735	amdgpu_ib_free(adev, &ib, NULL);
 736	fence_put(f);
 737err1:
 738	amdgpu_gfx_scratch_free(adev, scratch);
 739	return r;
 740}
 741
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 742static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
 743{
 744	const char *chip_name;
 745	char fw_name[30];
 746	int err;
 747	struct amdgpu_firmware_info *info = NULL;
 748	const struct common_firmware_header *header = NULL;
 749	const struct gfx_firmware_header_v1_0 *cp_hdr;
 
 
 750
 751	DRM_DEBUG("\n");
 752
 753	switch (adev->asic_type) {
 754	case CHIP_TOPAZ:
 755		chip_name = "topaz";
 756		break;
 757	case CHIP_TONGA:
 758		chip_name = "tonga";
 759		break;
 760	case CHIP_CARRIZO:
 761		chip_name = "carrizo";
 762		break;
 763	case CHIP_FIJI:
 764		chip_name = "fiji";
 765		break;
 766	case CHIP_STONEY:
 767		chip_name = "stoney";
 768		break;
 
 
 
 
 
 
 
 
 
 
 
 
 769	default:
 770		BUG();
 771	}
 772
 773	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
 774	err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
 775	if (err)
 776		goto out;
 777	err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
 
 
 
 
 
 
 778	if (err)
 779		goto out;
 780	cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
 781	adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
 782	adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
 783
 784	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
 785	err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
 786	if (err)
 787		goto out;
 788	err = amdgpu_ucode_validate(adev->gfx.me_fw);
 
 
 
 
 
 
 789	if (err)
 790		goto out;
 791	cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
 792	adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
 
 793	adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
 794
 795	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
 796	err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
 797	if (err)
 798		goto out;
 799	err = amdgpu_ucode_validate(adev->gfx.ce_fw);
 
 
 
 
 
 
 800	if (err)
 801		goto out;
 802	cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
 803	adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
 804	adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
 805
 
 
 
 
 
 
 
 
 
 
 
 806	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
 807	err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
 808	if (err)
 809		goto out;
 810	err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
 811	cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.rlc_fw->data;
 812	adev->gfx.rlc_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
 813	adev->gfx.rlc_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 814
 815	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
 816	err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
 817	if (err)
 818		goto out;
 819	err = amdgpu_ucode_validate(adev->gfx.mec_fw);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 820	if (err)
 821		goto out;
 822	cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
 823	adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
 824	adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
 825
 826	if ((adev->asic_type != CHIP_STONEY) &&
 827	    (adev->asic_type != CHIP_TOPAZ)) {
 828		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
 829		err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
 
 
 
 
 
 
 
 
 
 830		if (!err) {
 831			err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
 832			if (err)
 833				goto out;
 834			cp_hdr = (const struct gfx_firmware_header_v1_0 *)
 835				adev->gfx.mec2_fw->data;
 836			adev->gfx.mec2_fw_version =
 837				le32_to_cpu(cp_hdr->header.ucode_version);
 838			adev->gfx.mec2_feature_version =
 839				le32_to_cpu(cp_hdr->ucode_feature_version);
 840		} else {
 841			err = 0;
 842			adev->gfx.mec2_fw = NULL;
 843		}
 844	}
 845
 846	if (adev->firmware.smu_load) {
 847		info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
 848		info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
 849		info->fw = adev->gfx.pfp_fw;
 850		header = (const struct common_firmware_header *)info->fw->data;
 851		adev->firmware.fw_size +=
 852			ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 853
 854		info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
 855		info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
 856		info->fw = adev->gfx.me_fw;
 857		header = (const struct common_firmware_header *)info->fw->data;
 858		adev->firmware.fw_size +=
 859			ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
 860
 861		info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
 862		info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
 863		info->fw = adev->gfx.ce_fw;
 864		header = (const struct common_firmware_header *)info->fw->data;
 865		adev->firmware.fw_size +=
 866			ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
 
 867
 868		info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
 869		info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
 870		info->fw = adev->gfx.rlc_fw;
 
 871		header = (const struct common_firmware_header *)info->fw->data;
 872		adev->firmware.fw_size +=
 873			ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
 874
 875		info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
 876		info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
 877		info->fw = adev->gfx.mec_fw;
 878		header = (const struct common_firmware_header *)info->fw->data;
 879		adev->firmware.fw_size +=
 880			ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
 881
 882		if (adev->gfx.mec2_fw) {
 883			info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
 884			info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
 885			info->fw = adev->gfx.mec2_fw;
 886			header = (const struct common_firmware_header *)info->fw->data;
 887			adev->firmware.fw_size +=
 888				ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
 889		}
 890
 891	}
 892
 893out:
 894	if (err) {
 895		dev_err(adev->dev,
 896			"gfx8: Failed to load firmware \"%s\"\n",
 897			fw_name);
 898		release_firmware(adev->gfx.pfp_fw);
 899		adev->gfx.pfp_fw = NULL;
 900		release_firmware(adev->gfx.me_fw);
 901		adev->gfx.me_fw = NULL;
 902		release_firmware(adev->gfx.ce_fw);
 903		adev->gfx.ce_fw = NULL;
 904		release_firmware(adev->gfx.rlc_fw);
 905		adev->gfx.rlc_fw = NULL;
 906		release_firmware(adev->gfx.mec_fw);
 907		adev->gfx.mec_fw = NULL;
 908		release_firmware(adev->gfx.mec2_fw);
 909		adev->gfx.mec2_fw = NULL;
 910	}
 911	return err;
 912}
 913
 914static void gfx_v8_0_mec_fini(struct amdgpu_device *adev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 915{
 
 916	int r;
 917
 918	if (adev->gfx.mec.hpd_eop_obj) {
 919		r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false);
 920		if (unlikely(r != 0))
 921			dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r);
 922		amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj);
 923		amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
 
 
 
 
 924
 925		amdgpu_bo_unref(&adev->gfx.mec.hpd_eop_obj);
 926		adev->gfx.mec.hpd_eop_obj = NULL;
 
 
 
 
 927	}
 
 
 
 
 
 
 928}
 929
 930#define MEC_HPD_SIZE 2048
 
 
 
 931
 932static int gfx_v8_0_mec_init(struct amdgpu_device *adev)
 933{
 934	int r;
 935	u32 *hpd;
 
 
 
 
 
 
 936
 937	/*
 938	 * we assign only 1 pipe because all other pipes will
 939	 * be handled by KFD
 940	 */
 941	adev->gfx.mec.num_mec = 1;
 942	adev->gfx.mec.num_pipe = 1;
 943	adev->gfx.mec.num_queue = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe * 8;
 944
 945	if (adev->gfx.mec.hpd_eop_obj == NULL) {
 946		r = amdgpu_bo_create(adev,
 947				     adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2,
 948				     PAGE_SIZE, true,
 949				     AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
 950				     &adev->gfx.mec.hpd_eop_obj);
 951		if (r) {
 952			dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
 953			return r;
 954		}
 955	}
 956
 957	r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false);
 958	if (unlikely(r != 0)) {
 959		gfx_v8_0_mec_fini(adev);
 960		return r;
 961	}
 962	r = amdgpu_bo_pin(adev->gfx.mec.hpd_eop_obj, AMDGPU_GEM_DOMAIN_GTT,
 963			  &adev->gfx.mec.hpd_eop_gpu_addr);
 964	if (r) {
 965		dev_warn(adev->dev, "(%d) pin HDP EOP bo failed\n", r);
 966		gfx_v8_0_mec_fini(adev);
 967		return r;
 968	}
 969	r = amdgpu_bo_kmap(adev->gfx.mec.hpd_eop_obj, (void **)&hpd);
 970	if (r) {
 971		dev_warn(adev->dev, "(%d) map HDP EOP bo failed\n", r);
 972		gfx_v8_0_mec_fini(adev);
 973		return r;
 974	}
 975
 976	memset(hpd, 0, adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2);
 977
 978	amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
 979	amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
 980
 981	return 0;
 982}
 983
 984static const u32 vgpr_init_compute_shader[] =
 985{
 986	0x7e000209, 0x7e020208,
 987	0x7e040207, 0x7e060206,
 988	0x7e080205, 0x7e0a0204,
 989	0x7e0c0203, 0x7e0e0202,
 990	0x7e100201, 0x7e120200,
 991	0x7e140209, 0x7e160208,
 992	0x7e180207, 0x7e1a0206,
 993	0x7e1c0205, 0x7e1e0204,
 994	0x7e200203, 0x7e220202,
 995	0x7e240201, 0x7e260200,
 996	0x7e280209, 0x7e2a0208,
 997	0x7e2c0207, 0x7e2e0206,
 998	0x7e300205, 0x7e320204,
 999	0x7e340203, 0x7e360202,
1000	0x7e380201, 0x7e3a0200,
1001	0x7e3c0209, 0x7e3e0208,
1002	0x7e400207, 0x7e420206,
1003	0x7e440205, 0x7e460204,
1004	0x7e480203, 0x7e4a0202,
1005	0x7e4c0201, 0x7e4e0200,
1006	0x7e500209, 0x7e520208,
1007	0x7e540207, 0x7e560206,
1008	0x7e580205, 0x7e5a0204,
1009	0x7e5c0203, 0x7e5e0202,
1010	0x7e600201, 0x7e620200,
1011	0x7e640209, 0x7e660208,
1012	0x7e680207, 0x7e6a0206,
1013	0x7e6c0205, 0x7e6e0204,
1014	0x7e700203, 0x7e720202,
1015	0x7e740201, 0x7e760200,
1016	0x7e780209, 0x7e7a0208,
1017	0x7e7c0207, 0x7e7e0206,
1018	0xbf8a0000, 0xbf810000,
1019};
1020
1021static const u32 sgpr_init_compute_shader[] =
1022{
1023	0xbe8a0100, 0xbe8c0102,
1024	0xbe8e0104, 0xbe900106,
1025	0xbe920108, 0xbe940100,
1026	0xbe960102, 0xbe980104,
1027	0xbe9a0106, 0xbe9c0108,
1028	0xbe9e0100, 0xbea00102,
1029	0xbea20104, 0xbea40106,
1030	0xbea60108, 0xbea80100,
1031	0xbeaa0102, 0xbeac0104,
1032	0xbeae0106, 0xbeb00108,
1033	0xbeb20100, 0xbeb40102,
1034	0xbeb60104, 0xbeb80106,
1035	0xbeba0108, 0xbebc0100,
1036	0xbebe0102, 0xbec00104,
1037	0xbec20106, 0xbec40108,
1038	0xbec60100, 0xbec80102,
1039	0xbee60004, 0xbee70005,
1040	0xbeea0006, 0xbeeb0007,
1041	0xbee80008, 0xbee90009,
1042	0xbefc0000, 0xbf8a0000,
1043	0xbf810000, 0x00000000,
1044};
1045
1046static const u32 vgpr_init_regs[] =
1047{
1048	mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0xffffffff,
1049	mmCOMPUTE_RESOURCE_LIMITS, 0,
1050	mmCOMPUTE_NUM_THREAD_X, 256*4,
1051	mmCOMPUTE_NUM_THREAD_Y, 1,
1052	mmCOMPUTE_NUM_THREAD_Z, 1,
 
1053	mmCOMPUTE_PGM_RSRC2, 20,
1054	mmCOMPUTE_USER_DATA_0, 0xedcedc00,
1055	mmCOMPUTE_USER_DATA_1, 0xedcedc01,
1056	mmCOMPUTE_USER_DATA_2, 0xedcedc02,
1057	mmCOMPUTE_USER_DATA_3, 0xedcedc03,
1058	mmCOMPUTE_USER_DATA_4, 0xedcedc04,
1059	mmCOMPUTE_USER_DATA_5, 0xedcedc05,
1060	mmCOMPUTE_USER_DATA_6, 0xedcedc06,
1061	mmCOMPUTE_USER_DATA_7, 0xedcedc07,
1062	mmCOMPUTE_USER_DATA_8, 0xedcedc08,
1063	mmCOMPUTE_USER_DATA_9, 0xedcedc09,
1064};
1065
1066static const u32 sgpr1_init_regs[] =
1067{
1068	mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0x0f,
1069	mmCOMPUTE_RESOURCE_LIMITS, 0x1000000,
1070	mmCOMPUTE_NUM_THREAD_X, 256*5,
1071	mmCOMPUTE_NUM_THREAD_Y, 1,
1072	mmCOMPUTE_NUM_THREAD_Z, 1,
 
1073	mmCOMPUTE_PGM_RSRC2, 20,
1074	mmCOMPUTE_USER_DATA_0, 0xedcedc00,
1075	mmCOMPUTE_USER_DATA_1, 0xedcedc01,
1076	mmCOMPUTE_USER_DATA_2, 0xedcedc02,
1077	mmCOMPUTE_USER_DATA_3, 0xedcedc03,
1078	mmCOMPUTE_USER_DATA_4, 0xedcedc04,
1079	mmCOMPUTE_USER_DATA_5, 0xedcedc05,
1080	mmCOMPUTE_USER_DATA_6, 0xedcedc06,
1081	mmCOMPUTE_USER_DATA_7, 0xedcedc07,
1082	mmCOMPUTE_USER_DATA_8, 0xedcedc08,
1083	mmCOMPUTE_USER_DATA_9, 0xedcedc09,
1084};
1085
1086static const u32 sgpr2_init_regs[] =
1087{
1088	mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0xf0,
1089	mmCOMPUTE_RESOURCE_LIMITS, 0x1000000,
1090	mmCOMPUTE_NUM_THREAD_X, 256*5,
1091	mmCOMPUTE_NUM_THREAD_Y, 1,
1092	mmCOMPUTE_NUM_THREAD_Z, 1,
 
1093	mmCOMPUTE_PGM_RSRC2, 20,
1094	mmCOMPUTE_USER_DATA_0, 0xedcedc00,
1095	mmCOMPUTE_USER_DATA_1, 0xedcedc01,
1096	mmCOMPUTE_USER_DATA_2, 0xedcedc02,
1097	mmCOMPUTE_USER_DATA_3, 0xedcedc03,
1098	mmCOMPUTE_USER_DATA_4, 0xedcedc04,
1099	mmCOMPUTE_USER_DATA_5, 0xedcedc05,
1100	mmCOMPUTE_USER_DATA_6, 0xedcedc06,
1101	mmCOMPUTE_USER_DATA_7, 0xedcedc07,
1102	mmCOMPUTE_USER_DATA_8, 0xedcedc08,
1103	mmCOMPUTE_USER_DATA_9, 0xedcedc09,
1104};
1105
1106static const u32 sec_ded_counter_registers[] =
1107{
1108	mmCPC_EDC_ATC_CNT,
1109	mmCPC_EDC_SCRATCH_CNT,
1110	mmCPC_EDC_UCODE_CNT,
1111	mmCPF_EDC_ATC_CNT,
1112	mmCPF_EDC_ROQ_CNT,
1113	mmCPF_EDC_TAG_CNT,
1114	mmCPG_EDC_ATC_CNT,
1115	mmCPG_EDC_DMA_CNT,
1116	mmCPG_EDC_TAG_CNT,
1117	mmDC_EDC_CSINVOC_CNT,
1118	mmDC_EDC_RESTORE_CNT,
1119	mmDC_EDC_STATE_CNT,
1120	mmGDS_EDC_CNT,
1121	mmGDS_EDC_GRBM_CNT,
1122	mmGDS_EDC_OA_DED,
1123	mmSPI_EDC_CNT,
1124	mmSQC_ATC_EDC_GATCL1_CNT,
1125	mmSQC_EDC_CNT,
1126	mmSQ_EDC_DED_CNT,
1127	mmSQ_EDC_INFO,
1128	mmSQ_EDC_SEC_CNT,
1129	mmTCC_EDC_CNT,
1130	mmTCP_ATC_EDC_GATCL1_CNT,
1131	mmTCP_EDC_CNT,
1132	mmTD_EDC_CNT
1133};
1134
1135static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
1136{
1137	struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
1138	struct amdgpu_ib ib;
1139	struct fence *f = NULL;
1140	int r, i;
1141	u32 tmp;
1142	unsigned total_size, vgpr_offset, sgpr_offset;
1143	u64 gpu_addr;
1144
1145	/* only supported on CZ */
1146	if (adev->asic_type != CHIP_CARRIZO)
1147		return 0;
1148
1149	/* bail if the compute ring is not ready */
1150	if (!ring->ready)
1151		return 0;
1152
1153	tmp = RREG32(mmGB_EDC_MODE);
1154	WREG32(mmGB_EDC_MODE, 0);
1155
1156	total_size =
1157		(((ARRAY_SIZE(vgpr_init_regs) / 2) * 3) + 4 + 5 + 2) * 4;
1158	total_size +=
1159		(((ARRAY_SIZE(sgpr1_init_regs) / 2) * 3) + 4 + 5 + 2) * 4;
1160	total_size +=
1161		(((ARRAY_SIZE(sgpr2_init_regs) / 2) * 3) + 4 + 5 + 2) * 4;
1162	total_size = ALIGN(total_size, 256);
1163	vgpr_offset = total_size;
1164	total_size += ALIGN(sizeof(vgpr_init_compute_shader), 256);
1165	sgpr_offset = total_size;
1166	total_size += sizeof(sgpr_init_compute_shader);
1167
1168	/* allocate an indirect buffer to put the commands in */
1169	memset(&ib, 0, sizeof(ib));
1170	r = amdgpu_ib_get(adev, NULL, total_size, &ib);
 
1171	if (r) {
1172		DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
1173		return r;
1174	}
1175
1176	/* load the compute shaders */
1177	for (i = 0; i < ARRAY_SIZE(vgpr_init_compute_shader); i++)
1178		ib.ptr[i + (vgpr_offset / 4)] = vgpr_init_compute_shader[i];
1179
1180	for (i = 0; i < ARRAY_SIZE(sgpr_init_compute_shader); i++)
1181		ib.ptr[i + (sgpr_offset / 4)] = sgpr_init_compute_shader[i];
1182
1183	/* init the ib length to 0 */
1184	ib.length_dw = 0;
1185
1186	/* VGPR */
1187	/* write the register state for the compute dispatch */
1188	for (i = 0; i < ARRAY_SIZE(vgpr_init_regs); i += 2) {
1189		ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
1190		ib.ptr[ib.length_dw++] = vgpr_init_regs[i] - PACKET3_SET_SH_REG_START;
1191		ib.ptr[ib.length_dw++] = vgpr_init_regs[i + 1];
1192	}
1193	/* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
1194	gpu_addr = (ib.gpu_addr + (u64)vgpr_offset) >> 8;
1195	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
1196	ib.ptr[ib.length_dw++] = mmCOMPUTE_PGM_LO - PACKET3_SET_SH_REG_START;
1197	ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
1198	ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
1199
1200	/* write dispatch packet */
1201	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
1202	ib.ptr[ib.length_dw++] = 8; /* x */
1203	ib.ptr[ib.length_dw++] = 1; /* y */
1204	ib.ptr[ib.length_dw++] = 1; /* z */
1205	ib.ptr[ib.length_dw++] =
1206		REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
1207
1208	/* write CS partial flush packet */
1209	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
1210	ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
1211
1212	/* SGPR1 */
1213	/* write the register state for the compute dispatch */
1214	for (i = 0; i < ARRAY_SIZE(sgpr1_init_regs); i += 2) {
1215		ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
1216		ib.ptr[ib.length_dw++] = sgpr1_init_regs[i] - PACKET3_SET_SH_REG_START;
1217		ib.ptr[ib.length_dw++] = sgpr1_init_regs[i + 1];
1218	}
1219	/* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
1220	gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
1221	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
1222	ib.ptr[ib.length_dw++] = mmCOMPUTE_PGM_LO - PACKET3_SET_SH_REG_START;
1223	ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
1224	ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
1225
1226	/* write dispatch packet */
1227	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
1228	ib.ptr[ib.length_dw++] = 8; /* x */
1229	ib.ptr[ib.length_dw++] = 1; /* y */
1230	ib.ptr[ib.length_dw++] = 1; /* z */
1231	ib.ptr[ib.length_dw++] =
1232		REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
1233
1234	/* write CS partial flush packet */
1235	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
1236	ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
1237
1238	/* SGPR2 */
1239	/* write the register state for the compute dispatch */
1240	for (i = 0; i < ARRAY_SIZE(sgpr2_init_regs); i += 2) {
1241		ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
1242		ib.ptr[ib.length_dw++] = sgpr2_init_regs[i] - PACKET3_SET_SH_REG_START;
1243		ib.ptr[ib.length_dw++] = sgpr2_init_regs[i + 1];
1244	}
1245	/* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
1246	gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
1247	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
1248	ib.ptr[ib.length_dw++] = mmCOMPUTE_PGM_LO - PACKET3_SET_SH_REG_START;
1249	ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
1250	ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
1251
1252	/* write dispatch packet */
1253	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
1254	ib.ptr[ib.length_dw++] = 8; /* x */
1255	ib.ptr[ib.length_dw++] = 1; /* y */
1256	ib.ptr[ib.length_dw++] = 1; /* z */
1257	ib.ptr[ib.length_dw++] =
1258		REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
1259
1260	/* write CS partial flush packet */
1261	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
1262	ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
1263
1264	/* shedule the ib on the ring */
1265	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
1266	if (r) {
1267		DRM_ERROR("amdgpu: ib submit failed (%d).\n", r);
1268		goto fail;
1269	}
1270
1271	/* wait for the GPU to finish processing the IB */
1272	r = fence_wait(f, false);
1273	if (r) {
1274		DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
1275		goto fail;
1276	}
1277
1278	tmp = REG_SET_FIELD(tmp, GB_EDC_MODE, DED_MODE, 2);
1279	tmp = REG_SET_FIELD(tmp, GB_EDC_MODE, PROP_FED, 1);
1280	WREG32(mmGB_EDC_MODE, tmp);
1281
1282	tmp = RREG32(mmCC_GC_EDC_CONFIG);
1283	tmp = REG_SET_FIELD(tmp, CC_GC_EDC_CONFIG, DIS_EDC, 0) | 1;
1284	WREG32(mmCC_GC_EDC_CONFIG, tmp);
1285
1286
1287	/* read back registers to clear the counters */
1288	for (i = 0; i < ARRAY_SIZE(sec_ded_counter_registers); i++)
1289		RREG32(sec_ded_counter_registers[i]);
1290
1291fail:
1292	fence_put(f);
1293	amdgpu_ib_free(adev, &ib, NULL);
1294	fence_put(f);
1295
1296	return r;
1297}
1298
1299static void gfx_v8_0_gpu_early_init(struct amdgpu_device *adev)
1300{
1301	u32 gb_addr_config;
1302	u32 mc_shared_chmap, mc_arb_ramcfg;
1303	u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map;
1304	u32 tmp;
 
1305
1306	switch (adev->asic_type) {
1307	case CHIP_TOPAZ:
1308		adev->gfx.config.max_shader_engines = 1;
1309		adev->gfx.config.max_tile_pipes = 2;
1310		adev->gfx.config.max_cu_per_sh = 6;
1311		adev->gfx.config.max_sh_per_se = 1;
1312		adev->gfx.config.max_backends_per_se = 2;
1313		adev->gfx.config.max_texture_channel_caches = 2;
1314		adev->gfx.config.max_gprs = 256;
1315		adev->gfx.config.max_gs_threads = 32;
1316		adev->gfx.config.max_hw_contexts = 8;
1317
1318		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1319		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1320		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1321		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1322		gb_addr_config = TOPAZ_GB_ADDR_CONFIG_GOLDEN;
1323		break;
1324	case CHIP_FIJI:
1325		adev->gfx.config.max_shader_engines = 4;
1326		adev->gfx.config.max_tile_pipes = 16;
1327		adev->gfx.config.max_cu_per_sh = 16;
1328		adev->gfx.config.max_sh_per_se = 1;
1329		adev->gfx.config.max_backends_per_se = 4;
1330		adev->gfx.config.max_texture_channel_caches = 16;
1331		adev->gfx.config.max_gprs = 256;
1332		adev->gfx.config.max_gs_threads = 32;
1333		adev->gfx.config.max_hw_contexts = 8;
1334
1335		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1336		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1337		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1338		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1339		gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
1340		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1341	case CHIP_TONGA:
1342		adev->gfx.config.max_shader_engines = 4;
1343		adev->gfx.config.max_tile_pipes = 8;
1344		adev->gfx.config.max_cu_per_sh = 8;
1345		adev->gfx.config.max_sh_per_se = 1;
1346		adev->gfx.config.max_backends_per_se = 2;
1347		adev->gfx.config.max_texture_channel_caches = 8;
1348		adev->gfx.config.max_gprs = 256;
1349		adev->gfx.config.max_gs_threads = 32;
1350		adev->gfx.config.max_hw_contexts = 8;
1351
1352		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1353		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1354		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1355		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1356		gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
1357		break;
1358	case CHIP_CARRIZO:
1359		adev->gfx.config.max_shader_engines = 1;
1360		adev->gfx.config.max_tile_pipes = 2;
1361		adev->gfx.config.max_sh_per_se = 1;
1362		adev->gfx.config.max_backends_per_se = 2;
1363
1364		switch (adev->pdev->revision) {
1365		case 0xc4:
1366		case 0x84:
1367		case 0xc8:
1368		case 0xcc:
1369		case 0xe1:
1370		case 0xe3:
1371			/* B10 */
1372			adev->gfx.config.max_cu_per_sh = 8;
1373			break;
1374		case 0xc5:
1375		case 0x81:
1376		case 0x85:
1377		case 0xc9:
1378		case 0xcd:
1379		case 0xe2:
1380		case 0xe4:
1381			/* B8 */
1382			adev->gfx.config.max_cu_per_sh = 6;
1383			break;
1384		case 0xc6:
1385		case 0xca:
1386		case 0xce:
1387		case 0x88:
1388			/* B6 */
1389			adev->gfx.config.max_cu_per_sh = 6;
1390			break;
1391		case 0xc7:
1392		case 0x87:
1393		case 0xcb:
1394		case 0xe5:
1395		case 0x89:
1396		default:
1397			/* B4 */
1398			adev->gfx.config.max_cu_per_sh = 4;
1399			break;
1400		}
1401
1402		adev->gfx.config.max_texture_channel_caches = 2;
1403		adev->gfx.config.max_gprs = 256;
1404		adev->gfx.config.max_gs_threads = 32;
1405		adev->gfx.config.max_hw_contexts = 8;
1406
1407		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1408		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1409		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1410		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1411		gb_addr_config = CARRIZO_GB_ADDR_CONFIG_GOLDEN;
1412		break;
1413	case CHIP_STONEY:
1414		adev->gfx.config.max_shader_engines = 1;
1415		adev->gfx.config.max_tile_pipes = 2;
1416		adev->gfx.config.max_sh_per_se = 1;
1417		adev->gfx.config.max_backends_per_se = 1;
1418
1419		switch (adev->pdev->revision) {
1420		case 0xc0:
1421		case 0xc1:
1422		case 0xc2:
1423		case 0xc4:
1424		case 0xc8:
1425		case 0xc9:
1426			adev->gfx.config.max_cu_per_sh = 3;
1427			break;
1428		case 0xd0:
1429		case 0xd1:
1430		case 0xd2:
1431		default:
1432			adev->gfx.config.max_cu_per_sh = 2;
1433			break;
1434		}
1435
1436		adev->gfx.config.max_texture_channel_caches = 2;
1437		adev->gfx.config.max_gprs = 256;
1438		adev->gfx.config.max_gs_threads = 16;
1439		adev->gfx.config.max_hw_contexts = 8;
1440
1441		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1442		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1443		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1444		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1445		gb_addr_config = CARRIZO_GB_ADDR_CONFIG_GOLDEN;
1446		break;
1447	default:
1448		adev->gfx.config.max_shader_engines = 2;
1449		adev->gfx.config.max_tile_pipes = 4;
1450		adev->gfx.config.max_cu_per_sh = 2;
1451		adev->gfx.config.max_sh_per_se = 1;
1452		adev->gfx.config.max_backends_per_se = 2;
1453		adev->gfx.config.max_texture_channel_caches = 4;
1454		adev->gfx.config.max_gprs = 256;
1455		adev->gfx.config.max_gs_threads = 32;
1456		adev->gfx.config.max_hw_contexts = 8;
1457
1458		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1459		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1460		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1461		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1462		gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
1463		break;
1464	}
1465
1466	mc_shared_chmap = RREG32(mmMC_SHARED_CHMAP);
1467	adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG);
1468	mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg;
1469
 
 
 
 
 
1470	adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
1471	adev->gfx.config.mem_max_burst_length_bytes = 256;
1472	if (adev->flags & AMD_IS_APU) {
1473		/* Get memory bank mapping mode. */
1474		tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING);
1475		dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
1476		dimm01_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
1477
1478		tmp = RREG32(mmMC_FUS_DRAM1_BANK_ADDR_MAPPING);
1479		dimm10_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
1480		dimm11_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
1481
1482		/* Validate settings in case only one DIMM installed. */
1483		if ((dimm00_addr_map == 0) || (dimm00_addr_map == 3) || (dimm00_addr_map == 4) || (dimm00_addr_map > 12))
1484			dimm00_addr_map = 0;
1485		if ((dimm01_addr_map == 0) || (dimm01_addr_map == 3) || (dimm01_addr_map == 4) || (dimm01_addr_map > 12))
1486			dimm01_addr_map = 0;
1487		if ((dimm10_addr_map == 0) || (dimm10_addr_map == 3) || (dimm10_addr_map == 4) || (dimm10_addr_map > 12))
1488			dimm10_addr_map = 0;
1489		if ((dimm11_addr_map == 0) || (dimm11_addr_map == 3) || (dimm11_addr_map == 4) || (dimm11_addr_map > 12))
1490			dimm11_addr_map = 0;
1491
1492		/* If DIMM Addr map is 8GB, ROW size should be 2KB. Otherwise 1KB. */
1493		/* If ROW size(DIMM1) != ROW size(DMIMM0), ROW size should be larger one. */
1494		if ((dimm00_addr_map == 11) || (dimm01_addr_map == 11) || (dimm10_addr_map == 11) || (dimm11_addr_map == 11))
1495			adev->gfx.config.mem_row_size_in_kb = 2;
1496		else
1497			adev->gfx.config.mem_row_size_in_kb = 1;
1498	} else {
1499		tmp = REG_GET_FIELD(mc_arb_ramcfg, MC_ARB_RAMCFG, NOOFCOLS);
1500		adev->gfx.config.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
1501		if (adev->gfx.config.mem_row_size_in_kb > 4)
1502			adev->gfx.config.mem_row_size_in_kb = 4;
1503	}
1504
1505	adev->gfx.config.shader_engine_tile_size = 32;
1506	adev->gfx.config.num_gpus = 1;
1507	adev->gfx.config.multi_gpu_tile_size = 64;
1508
1509	/* fix up row size */
1510	switch (adev->gfx.config.mem_row_size_in_kb) {
1511	case 1:
1512	default:
1513		gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 0);
1514		break;
1515	case 2:
1516		gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 1);
1517		break;
1518	case 4:
1519		gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 2);
1520		break;
1521	}
1522	adev->gfx.config.gb_addr_config = gb_addr_config;
 
 
1523}
1524
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1525static int gfx_v8_0_sw_init(void *handle)
1526{
1527	int i, r;
 
1528	struct amdgpu_ring *ring;
1529	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1530
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1531	/* EOP Event */
1532	r = amdgpu_irq_add_id(adev, 181, &adev->gfx.eop_irq);
1533	if (r)
1534		return r;
1535
1536	/* Privileged reg */
1537	r = amdgpu_irq_add_id(adev, 184, &adev->gfx.priv_reg_irq);
 
1538	if (r)
1539		return r;
1540
1541	/* Privileged inst */
1542	r = amdgpu_irq_add_id(adev, 185, &adev->gfx.priv_inst_irq);
 
1543	if (r)
1544		return r;
1545
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1546	adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
1547
1548	gfx_v8_0_scratch_init(adev);
1549
1550	r = gfx_v8_0_init_microcode(adev);
1551	if (r) {
1552		DRM_ERROR("Failed to load gfx firmware!\n");
1553		return r;
1554	}
1555
 
 
 
 
 
 
1556	r = gfx_v8_0_mec_init(adev);
1557	if (r) {
1558		DRM_ERROR("Failed to init MEC BOs!\n");
1559		return r;
1560	}
1561
1562	/* set up the gfx ring */
1563	for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
1564		ring = &adev->gfx.gfx_ring[i];
1565		ring->ring_obj = NULL;
1566		sprintf(ring->name, "gfx");
1567		/* no gfx doorbells on iceland */
1568		if (adev->asic_type != CHIP_TOPAZ) {
1569			ring->use_doorbell = true;
1570			ring->doorbell_index = AMDGPU_DOORBELL_GFX_RING0;
1571		}
1572
1573		r = amdgpu_ring_init(adev, ring, 1024 * 1024,
1574				     PACKET3(PACKET3_NOP, 0x3FFF), 0xf,
1575				     &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP,
1576				     AMDGPU_RING_TYPE_GFX);
1577		if (r)
1578			return r;
1579	}
1580
1581	/* set up the compute queues */
1582	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
1583		unsigned irq_type;
1584
1585		/* max 32 queues per MEC */
1586		if ((i >= 32) || (i >= AMDGPU_MAX_COMPUTE_RINGS)) {
1587			DRM_ERROR("Too many (%d) compute rings!\n", i);
1588			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
1589		}
1590		ring = &adev->gfx.compute_ring[i];
1591		ring->ring_obj = NULL;
1592		ring->use_doorbell = true;
1593		ring->doorbell_index = AMDGPU_DOORBELL_MEC_RING0 + i;
1594		ring->me = 1; /* first MEC */
1595		ring->pipe = i / 8;
1596		ring->queue = i % 8;
1597		sprintf(ring->name, "comp %d.%d.%d", ring->me, ring->pipe, ring->queue);
1598		irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
1599		/* type-2 packets are deprecated on MEC, use type-3 instead */
1600		r = amdgpu_ring_init(adev, ring, 1024 * 1024,
1601				     PACKET3(PACKET3_NOP, 0x3FFF), 0xf,
1602				     &adev->gfx.eop_irq, irq_type,
1603				     AMDGPU_RING_TYPE_COMPUTE);
1604		if (r)
1605			return r;
1606	}
1607
1608	/* reserve GDS, GWS and OA resource for gfx */
1609	r = amdgpu_bo_create(adev, adev->gds.mem.gfx_partition_size,
1610			PAGE_SIZE, true,
1611			AMDGPU_GEM_DOMAIN_GDS, 0, NULL,
1612			NULL, &adev->gds.gds_gfx_bo);
1613	if (r)
1614		return r;
 
1615
1616	r = amdgpu_bo_create(adev, adev->gds.gws.gfx_partition_size,
1617		PAGE_SIZE, true,
1618		AMDGPU_GEM_DOMAIN_GWS, 0, NULL,
1619		NULL, &adev->gds.gws_gfx_bo);
1620	if (r)
1621		return r;
1622
1623	r = amdgpu_bo_create(adev, adev->gds.oa.gfx_partition_size,
1624			PAGE_SIZE, true,
1625			AMDGPU_GEM_DOMAIN_OA, 0, NULL,
1626			NULL, &adev->gds.oa_gfx_bo);
1627	if (r)
1628		return r;
1629
1630	adev->gfx.ce_ram_size = 0x8000;
1631
1632	gfx_v8_0_gpu_early_init(adev);
 
 
1633
1634	return 0;
1635}
1636
1637static int gfx_v8_0_sw_fini(void *handle)
1638{
 
1639	int i;
1640	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1641
1642	amdgpu_bo_unref(&adev->gds.oa_gfx_bo);
1643	amdgpu_bo_unref(&adev->gds.gws_gfx_bo);
1644	amdgpu_bo_unref(&adev->gds.gds_gfx_bo);
1645
1646	for (i = 0; i < adev->gfx.num_gfx_rings; i++)
1647		amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
1648	for (i = 0; i < adev->gfx.num_compute_rings; i++)
1649		amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
1650
 
 
 
 
1651	gfx_v8_0_mec_fini(adev);
 
 
 
 
 
 
 
 
 
 
 
1652
1653	return 0;
1654}
1655
1656static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev)
1657{
1658	uint32_t *modearray, *mod2array;
1659	const u32 num_tile_mode_states = ARRAY_SIZE(adev->gfx.config.tile_mode_array);
1660	const u32 num_secondary_tile_mode_states = ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
1661	u32 reg_offset;
1662
1663	modearray = adev->gfx.config.tile_mode_array;
1664	mod2array = adev->gfx.config.macrotile_mode_array;
1665
1666	for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
1667		modearray[reg_offset] = 0;
1668
1669	for (reg_offset = 0; reg_offset <  num_secondary_tile_mode_states; reg_offset++)
1670		mod2array[reg_offset] = 0;
1671
1672	switch (adev->asic_type) {
1673	case CHIP_TOPAZ:
1674		modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1675				PIPE_CONFIG(ADDR_SURF_P2) |
1676				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
1677				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1678		modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1679				PIPE_CONFIG(ADDR_SURF_P2) |
1680				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
1681				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1682		modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1683				PIPE_CONFIG(ADDR_SURF_P2) |
1684				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1685				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1686		modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1687				PIPE_CONFIG(ADDR_SURF_P2) |
1688				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
1689				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1690		modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1691				PIPE_CONFIG(ADDR_SURF_P2) |
1692				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
1693				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1694		modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1695				PIPE_CONFIG(ADDR_SURF_P2) |
1696				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
1697				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1698		modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1699				PIPE_CONFIG(ADDR_SURF_P2) |
1700				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
1701				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1702		modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
1703				PIPE_CONFIG(ADDR_SURF_P2));
1704		modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1705				PIPE_CONFIG(ADDR_SURF_P2) |
1706				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1707				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1708		modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1709				 PIPE_CONFIG(ADDR_SURF_P2) |
1710				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1711				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1712		modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1713				 PIPE_CONFIG(ADDR_SURF_P2) |
1714				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1715				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1716		modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1717				 PIPE_CONFIG(ADDR_SURF_P2) |
1718				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1719				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1720		modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1721				 PIPE_CONFIG(ADDR_SURF_P2) |
1722				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1723				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1724		modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
1725				 PIPE_CONFIG(ADDR_SURF_P2) |
1726				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1727				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1728		modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1729				 PIPE_CONFIG(ADDR_SURF_P2) |
1730				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1731				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1732		modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1733				 PIPE_CONFIG(ADDR_SURF_P2) |
1734				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1735				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1736		modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1737				 PIPE_CONFIG(ADDR_SURF_P2) |
1738				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1739				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1740		modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1741				 PIPE_CONFIG(ADDR_SURF_P2) |
1742				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1743				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1744		modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
1745				 PIPE_CONFIG(ADDR_SURF_P2) |
1746				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1747				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1748		modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
1749				 PIPE_CONFIG(ADDR_SURF_P2) |
1750				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1751				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1752		modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1753				 PIPE_CONFIG(ADDR_SURF_P2) |
1754				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1755				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1756		modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
1757				 PIPE_CONFIG(ADDR_SURF_P2) |
1758				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1759				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1760		modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
1761				 PIPE_CONFIG(ADDR_SURF_P2) |
1762				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1763				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1764		modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1765				 PIPE_CONFIG(ADDR_SURF_P2) |
1766				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1767				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1768		modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1769				 PIPE_CONFIG(ADDR_SURF_P2) |
1770				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1771				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1772		modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1773				 PIPE_CONFIG(ADDR_SURF_P2) |
1774				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1775				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1776
1777		mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
1778				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1779				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1780				NUM_BANKS(ADDR_SURF_8_BANK));
1781		mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
1782				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1783				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1784				NUM_BANKS(ADDR_SURF_8_BANK));
1785		mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1786				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1787				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1788				NUM_BANKS(ADDR_SURF_8_BANK));
1789		mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1790				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1791				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1792				NUM_BANKS(ADDR_SURF_8_BANK));
1793		mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1794				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1795				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1796				NUM_BANKS(ADDR_SURF_8_BANK));
1797		mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1798				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1799				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1800				NUM_BANKS(ADDR_SURF_8_BANK));
1801		mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1802				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1803				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1804				NUM_BANKS(ADDR_SURF_8_BANK));
1805		mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
1806				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
1807				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1808				NUM_BANKS(ADDR_SURF_16_BANK));
1809		mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
1810				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1811				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1812				NUM_BANKS(ADDR_SURF_16_BANK));
1813		mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1814				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1815				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1816				 NUM_BANKS(ADDR_SURF_16_BANK));
1817		mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1818				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1819				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1820				 NUM_BANKS(ADDR_SURF_16_BANK));
1821		mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1822				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1823				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1824				 NUM_BANKS(ADDR_SURF_16_BANK));
1825		mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1826				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1827				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1828				 NUM_BANKS(ADDR_SURF_16_BANK));
1829		mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1830				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1831				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1832				 NUM_BANKS(ADDR_SURF_8_BANK));
1833
1834		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
1835			if (reg_offset != 7 && reg_offset != 12 && reg_offset != 17 &&
1836			    reg_offset != 23)
1837				WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
1838
1839		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
1840			if (reg_offset != 7)
1841				WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
1842
1843		break;
1844	case CHIP_FIJI:
 
1845		modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1846				PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1847				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
1848				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1849		modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1850				PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1851				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
1852				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1853		modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1854				PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1855				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1856				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1857		modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1858				PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1859				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
1860				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1861		modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1862				PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1863				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
1864				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1865		modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1866				PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1867				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
1868				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1869		modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1870				PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1871				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
1872				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1873		modearray[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1874				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1875				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
1876				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1877		modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
1878				PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16));
1879		modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1880				PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1881				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1882				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1883		modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1884				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1885				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1886				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1887		modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1888				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1889				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1890				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1891		modearray[12] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1892				 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1893				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1894				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1895		modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1896				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1897				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1898				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1899		modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1900				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1901				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1902				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1903		modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
1904				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1905				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1906				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1907		modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1908				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1909				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1910				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1911		modearray[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1912				 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1913				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1914				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1915		modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1916				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1917				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1918				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1919		modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1920				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1921				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1922				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1923		modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1924				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1925				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1926				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1927		modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
1928				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1929				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1930				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1931		modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
1932				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1933				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1934				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1935		modearray[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
1936				 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1937				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1938				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1939		modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1940				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1941				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1942				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1943		modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
1944				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1945				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1946				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1947		modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
1948				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1949				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1950				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1951		modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1952				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1953				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1954				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1955		modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1956				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1957				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1958				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1959		modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1960				 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1961				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1962				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1963		modearray[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1964				 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1965				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1966				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1967
1968		mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1969				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1970				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1971				NUM_BANKS(ADDR_SURF_8_BANK));
1972		mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1973				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1974				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1975				NUM_BANKS(ADDR_SURF_8_BANK));
1976		mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1977				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1978				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1979				NUM_BANKS(ADDR_SURF_8_BANK));
1980		mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1981				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1982				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1983				NUM_BANKS(ADDR_SURF_8_BANK));
1984		mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1985				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1986				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1987				NUM_BANKS(ADDR_SURF_8_BANK));
1988		mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1989				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1990				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1991				NUM_BANKS(ADDR_SURF_8_BANK));
1992		mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1993				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1994				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1995				NUM_BANKS(ADDR_SURF_8_BANK));
1996		mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1997				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
1998				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1999				NUM_BANKS(ADDR_SURF_8_BANK));
2000		mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2001				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2002				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2003				NUM_BANKS(ADDR_SURF_8_BANK));
2004		mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2005				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2006				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2007				 NUM_BANKS(ADDR_SURF_8_BANK));
2008		mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2009				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2010				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2011				 NUM_BANKS(ADDR_SURF_8_BANK));
2012		mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2013				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2014				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2015				 NUM_BANKS(ADDR_SURF_8_BANK));
2016		mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2017				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2018				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2019				 NUM_BANKS(ADDR_SURF_8_BANK));
2020		mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2021				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2022				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2023				 NUM_BANKS(ADDR_SURF_4_BANK));
2024
2025		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
2026			WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
2027
2028		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
2029			if (reg_offset != 7)
2030				WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
2031
2032		break;
2033	case CHIP_TONGA:
2034		modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2035				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2036				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2037				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2038		modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2039				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2040				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2041				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2042		modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2043				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2044				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2045				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2046		modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2047				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2048				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2049				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2050		modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2051				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2052				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2053				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2054		modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2055				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2056				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2057				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2058		modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2059				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2060				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2061				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2062		modearray[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2063				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2064				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2065				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2066		modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2067				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16));
2068		modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2069				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2070				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2071				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2072		modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2073				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2074				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2075				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2076		modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2077				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2078				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2079				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2080		modearray[12] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2081				 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2082				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2083				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2084		modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2085				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2086				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2087				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2088		modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2089				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2090				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2091				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2092		modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
2093				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2094				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2095				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2096		modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2097				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2098				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2099				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2100		modearray[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2101				 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2102				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2103				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2104		modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2105				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2106				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2107				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2108		modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2109				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2110				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2111				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2112		modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2113				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2114				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2115				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2116		modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
2117				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2118				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2119				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2120		modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2121				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2122				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2123				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2124		modearray[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2125				 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2126				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2127				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2128		modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2129				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2130				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2131				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2132		modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
2133				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2134				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2135				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2136		modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
2137				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2138				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2139				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2140		modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2141				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2142				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2143				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2144		modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2145				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2146				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2147				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2148		modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2149				 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2150				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2151				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2152		modearray[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2153				 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2154				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2155				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2156
2157		mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2158				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2159				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2160				NUM_BANKS(ADDR_SURF_16_BANK));
2161		mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2162				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2163				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2164				NUM_BANKS(ADDR_SURF_16_BANK));
2165		mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2166				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2167				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2168				NUM_BANKS(ADDR_SURF_16_BANK));
2169		mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2170				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2171				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2172				NUM_BANKS(ADDR_SURF_16_BANK));
2173		mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2174				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2175				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2176				NUM_BANKS(ADDR_SURF_16_BANK));
2177		mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2178				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2179				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2180				NUM_BANKS(ADDR_SURF_16_BANK));
2181		mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2182				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2183				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2184				NUM_BANKS(ADDR_SURF_16_BANK));
2185		mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2186				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
2187				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2188				NUM_BANKS(ADDR_SURF_16_BANK));
2189		mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2190				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2191				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2192				NUM_BANKS(ADDR_SURF_16_BANK));
2193		mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2194				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2195				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2196				 NUM_BANKS(ADDR_SURF_16_BANK));
2197		mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2198				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2199				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2200				 NUM_BANKS(ADDR_SURF_16_BANK));
2201		mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2202				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2203				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2204				 NUM_BANKS(ADDR_SURF_8_BANK));
2205		mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2206				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2207				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2208				 NUM_BANKS(ADDR_SURF_4_BANK));
2209		mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2210				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2211				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2212				 NUM_BANKS(ADDR_SURF_4_BANK));
2213
2214		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
2215			WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
2216
2217		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
2218			if (reg_offset != 7)
2219				WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
2220
2221		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2222	case CHIP_STONEY:
2223		modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2224				PIPE_CONFIG(ADDR_SURF_P2) |
2225				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2226				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2227		modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2228				PIPE_CONFIG(ADDR_SURF_P2) |
2229				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2230				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2231		modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2232				PIPE_CONFIG(ADDR_SURF_P2) |
2233				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2234				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2235		modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2236				PIPE_CONFIG(ADDR_SURF_P2) |
2237				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2238				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2239		modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2240				PIPE_CONFIG(ADDR_SURF_P2) |
2241				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2242				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2243		modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2244				PIPE_CONFIG(ADDR_SURF_P2) |
2245				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2246				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2247		modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2248				PIPE_CONFIG(ADDR_SURF_P2) |
2249				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2250				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2251		modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2252				PIPE_CONFIG(ADDR_SURF_P2));
2253		modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2254				PIPE_CONFIG(ADDR_SURF_P2) |
2255				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2256				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2257		modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2258				 PIPE_CONFIG(ADDR_SURF_P2) |
2259				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2260				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2261		modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2262				 PIPE_CONFIG(ADDR_SURF_P2) |
2263				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2264				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2265		modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2266				 PIPE_CONFIG(ADDR_SURF_P2) |
2267				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2268				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2269		modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2270				 PIPE_CONFIG(ADDR_SURF_P2) |
2271				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2272				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2273		modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
2274				 PIPE_CONFIG(ADDR_SURF_P2) |
2275				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2276				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2277		modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2278				 PIPE_CONFIG(ADDR_SURF_P2) |
2279				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2280				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2281		modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2282				 PIPE_CONFIG(ADDR_SURF_P2) |
2283				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2284				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2285		modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2286				 PIPE_CONFIG(ADDR_SURF_P2) |
2287				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2288				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2289		modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2290				 PIPE_CONFIG(ADDR_SURF_P2) |
2291				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2292				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2293		modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
2294				 PIPE_CONFIG(ADDR_SURF_P2) |
2295				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2296				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2297		modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2298				 PIPE_CONFIG(ADDR_SURF_P2) |
2299				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2300				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2301		modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2302				 PIPE_CONFIG(ADDR_SURF_P2) |
2303				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2304				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2305		modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
2306				 PIPE_CONFIG(ADDR_SURF_P2) |
2307				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2308				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2309		modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
2310				 PIPE_CONFIG(ADDR_SURF_P2) |
2311				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2312				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2313		modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2314				 PIPE_CONFIG(ADDR_SURF_P2) |
2315				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2316				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2317		modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2318				 PIPE_CONFIG(ADDR_SURF_P2) |
2319				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2320				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2321		modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2322				 PIPE_CONFIG(ADDR_SURF_P2) |
2323				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2324				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2325
2326		mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2327				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2328				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2329				NUM_BANKS(ADDR_SURF_8_BANK));
2330		mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2331				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2332				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2333				NUM_BANKS(ADDR_SURF_8_BANK));
2334		mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2335				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2336				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2337				NUM_BANKS(ADDR_SURF_8_BANK));
2338		mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2339				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2340				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2341				NUM_BANKS(ADDR_SURF_8_BANK));
2342		mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2343				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2344				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2345				NUM_BANKS(ADDR_SURF_8_BANK));
2346		mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2347				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2348				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2349				NUM_BANKS(ADDR_SURF_8_BANK));
2350		mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2351				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2352				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2353				NUM_BANKS(ADDR_SURF_8_BANK));
2354		mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
2355				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
2356				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2357				NUM_BANKS(ADDR_SURF_16_BANK));
2358		mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
2359				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2360				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2361				NUM_BANKS(ADDR_SURF_16_BANK));
2362		mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2363				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2364				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2365				 NUM_BANKS(ADDR_SURF_16_BANK));
2366		mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2367				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2368				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2369				 NUM_BANKS(ADDR_SURF_16_BANK));
2370		mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2371				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2372				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2373				 NUM_BANKS(ADDR_SURF_16_BANK));
2374		mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2375				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2376				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2377				 NUM_BANKS(ADDR_SURF_16_BANK));
2378		mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2379				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2380				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2381				 NUM_BANKS(ADDR_SURF_8_BANK));
2382
2383		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
2384			if (reg_offset != 7 && reg_offset != 12 && reg_offset != 17 &&
2385			    reg_offset != 23)
2386				WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
2387
2388		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
2389			if (reg_offset != 7)
2390				WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
2391
2392		break;
2393	default:
2394		dev_warn(adev->dev,
2395			 "Unknown chip type (%d) in function gfx_v8_0_tiling_mode_table_init() falling through to CHIP_CARRIZO\n",
2396			 adev->asic_type);
 
2397
2398	case CHIP_CARRIZO:
2399		modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2400				PIPE_CONFIG(ADDR_SURF_P2) |
2401				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2402				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2403		modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2404				PIPE_CONFIG(ADDR_SURF_P2) |
2405				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2406				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2407		modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2408				PIPE_CONFIG(ADDR_SURF_P2) |
2409				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2410				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2411		modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2412				PIPE_CONFIG(ADDR_SURF_P2) |
2413				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2414				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2415		modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2416				PIPE_CONFIG(ADDR_SURF_P2) |
2417				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2418				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2419		modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2420				PIPE_CONFIG(ADDR_SURF_P2) |
2421				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2422				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2423		modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2424				PIPE_CONFIG(ADDR_SURF_P2) |
2425				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2426				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2427		modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2428				PIPE_CONFIG(ADDR_SURF_P2));
2429		modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2430				PIPE_CONFIG(ADDR_SURF_P2) |
2431				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2432				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2433		modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2434				 PIPE_CONFIG(ADDR_SURF_P2) |
2435				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2436				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2437		modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2438				 PIPE_CONFIG(ADDR_SURF_P2) |
2439				 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2440				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2441		modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2442				 PIPE_CONFIG(ADDR_SURF_P2) |
2443				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2444				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2445		modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2446				 PIPE_CONFIG(ADDR_SURF_P2) |
2447				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2448				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2449		modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
2450				 PIPE_CONFIG(ADDR_SURF_P2) |
2451				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2452				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2453		modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2454				 PIPE_CONFIG(ADDR_SURF_P2) |
2455				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2456				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2457		modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2458				 PIPE_CONFIG(ADDR_SURF_P2) |
2459				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2460				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2461		modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2462				 PIPE_CONFIG(ADDR_SURF_P2) |
2463				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2464				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2465		modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2466				 PIPE_CONFIG(ADDR_SURF_P2) |
2467				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2468				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2469		modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
2470				 PIPE_CONFIG(ADDR_SURF_P2) |
2471				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2472				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2473		modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2474				 PIPE_CONFIG(ADDR_SURF_P2) |
2475				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2476				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2477		modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2478				 PIPE_CONFIG(ADDR_SURF_P2) |
2479				 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2480				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2481		modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
2482				 PIPE_CONFIG(ADDR_SURF_P2) |
2483				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2484				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2485		modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
2486				 PIPE_CONFIG(ADDR_SURF_P2) |
2487				 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2488				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2489		modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2490				 PIPE_CONFIG(ADDR_SURF_P2) |
2491				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2492				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2493		modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2494				 PIPE_CONFIG(ADDR_SURF_P2) |
2495				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2496				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2497		modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2498				 PIPE_CONFIG(ADDR_SURF_P2) |
2499				 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2500				 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2501
2502		mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2503				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2504				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2505				NUM_BANKS(ADDR_SURF_8_BANK));
2506		mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2507				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2508				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2509				NUM_BANKS(ADDR_SURF_8_BANK));
2510		mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2511				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2512				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2513				NUM_BANKS(ADDR_SURF_8_BANK));
2514		mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2515				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2516				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2517				NUM_BANKS(ADDR_SURF_8_BANK));
2518		mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2519				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2520				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2521				NUM_BANKS(ADDR_SURF_8_BANK));
2522		mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2523				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2524				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2525				NUM_BANKS(ADDR_SURF_8_BANK));
2526		mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2527				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2528				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2529				NUM_BANKS(ADDR_SURF_8_BANK));
2530		mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
2531				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
2532				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2533				NUM_BANKS(ADDR_SURF_16_BANK));
2534		mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
2535				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2536				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2537				NUM_BANKS(ADDR_SURF_16_BANK));
2538		mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2539				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2540				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2541				 NUM_BANKS(ADDR_SURF_16_BANK));
2542		mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2543				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2544				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2545				 NUM_BANKS(ADDR_SURF_16_BANK));
2546		mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2547				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2548				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2549				 NUM_BANKS(ADDR_SURF_16_BANK));
2550		mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2551				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2552				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2553				 NUM_BANKS(ADDR_SURF_16_BANK));
2554		mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2555				 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2556				 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2557				 NUM_BANKS(ADDR_SURF_8_BANK));
2558
2559		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
2560			if (reg_offset != 7 && reg_offset != 12 && reg_offset != 17 &&
2561			    reg_offset != 23)
2562				WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
2563
2564		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
2565			if (reg_offset != 7)
2566				WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
2567
2568		break;
2569	}
2570}
2571
2572void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num)
 
 
2573{
2574	u32 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
 
 
 
 
 
2575
2576	if ((se_num == 0xffffffff) && (sh_num == 0xffffffff)) {
2577		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
2578		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
2579	} else if (se_num == 0xffffffff) {
2580		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
2581		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
2582	} else if (sh_num == 0xffffffff) {
2583		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
2584		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
2585	} else {
2586		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
2587		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
2588	}
2589	WREG32(mmGRBM_GFX_INDEX, data);
2590}
2591
2592static u32 gfx_v8_0_create_bitmask(u32 bit_width)
 
2593{
2594	return (u32)((1ULL << bit_width) - 1);
2595}
2596
2597static u32 gfx_v8_0_get_rb_active_bitmap(struct amdgpu_device *adev)
2598{
2599	u32 data, mask;
2600
2601	data = RREG32(mmCC_RB_BACKEND_DISABLE);
2602	data |= RREG32(mmGC_USER_RB_BACKEND_DISABLE);
2603
2604	data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
2605	data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
2606
2607	mask = gfx_v8_0_create_bitmask(adev->gfx.config.max_backends_per_se /
2608				       adev->gfx.config.max_sh_per_se);
2609
2610	return (~data) & mask;
2611}
2612
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2613static void gfx_v8_0_setup_rb(struct amdgpu_device *adev)
2614{
2615	int i, j;
2616	u32 data;
 
2617	u32 active_rbs = 0;
2618	u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
2619					adev->gfx.config.max_sh_per_se;
 
2620
2621	mutex_lock(&adev->grbm_idx_mutex);
2622	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
2623		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
2624			gfx_v8_0_select_se_sh(adev, i, j);
2625			data = gfx_v8_0_get_rb_active_bitmap(adev);
2626			active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
2627					       rb_bitmap_width_per_sh);
2628		}
2629	}
2630	gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
2631	mutex_unlock(&adev->grbm_idx_mutex);
2632
2633	adev->gfx.config.backend_enable_mask = active_rbs;
2634	adev->gfx.config.num_rbs = hweight32(active_rbs);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2635}
2636
 
2637/**
2638 * gfx_v8_0_init_compute_vmid - gart enable
2639 *
2640 * @rdev: amdgpu_device pointer
2641 *
2642 * Initialize compute vmid sh_mem registers
2643 *
2644 */
2645#define DEFAULT_SH_MEM_BASES	(0x6000)
2646#define FIRST_COMPUTE_VMID	(8)
2647#define LAST_COMPUTE_VMID	(16)
2648static void gfx_v8_0_init_compute_vmid(struct amdgpu_device *adev)
2649{
2650	int i;
2651	uint32_t sh_mem_config;
2652	uint32_t sh_mem_bases;
2653
2654	/*
2655	 * Configure apertures:
2656	 * LDS:         0x60000000'00000000 - 0x60000001'00000000 (4GB)
2657	 * Scratch:     0x60000001'00000000 - 0x60000002'00000000 (4GB)
2658	 * GPUVM:       0x60010000'00000000 - 0x60020000'00000000 (1TB)
2659	 */
2660	sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
2661
2662	sh_mem_config = SH_MEM_ADDRESS_MODE_HSA64 <<
2663			SH_MEM_CONFIG__ADDRESS_MODE__SHIFT |
2664			SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
2665			SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT |
2666			MTYPE_CC << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT |
2667			SH_MEM_CONFIG__PRIVATE_ATC_MASK;
2668
2669	mutex_lock(&adev->srbm_mutex);
2670	for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
2671		vi_srbm_select(adev, 0, 0, 0, i);
2672		/* CP and shaders */
2673		WREG32(mmSH_MEM_CONFIG, sh_mem_config);
2674		WREG32(mmSH_MEM_APE1_BASE, 1);
2675		WREG32(mmSH_MEM_APE1_LIMIT, 0);
2676		WREG32(mmSH_MEM_BASES, sh_mem_bases);
2677	}
2678	vi_srbm_select(adev, 0, 0, 0, 0);
2679	mutex_unlock(&adev->srbm_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2680}
2681
2682static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
2683{
2684	u32 tmp;
 
 
 
 
 
 
 
 
 
 
 
 
 
2685	int i;
2686
2687	tmp = RREG32(mmGRBM_CNTL);
2688	tmp = REG_SET_FIELD(tmp, GRBM_CNTL, READ_TIMEOUT, 0xff);
2689	WREG32(mmGRBM_CNTL, tmp);
2690
2691	WREG32(mmGB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
2692	WREG32(mmHDP_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
2693	WREG32(mmDMIF_ADDR_CALC, adev->gfx.config.gb_addr_config);
2694
2695	gfx_v8_0_tiling_mode_table_init(adev);
2696
2697	gfx_v8_0_setup_rb(adev);
 
 
2698
2699	/* XXX SH_MEM regs */
2700	/* where to put LDS, scratch, GPUVM in FSA64 space */
 
 
 
 
 
 
 
 
2701	mutex_lock(&adev->srbm_mutex);
2702	for (i = 0; i < 16; i++) {
2703		vi_srbm_select(adev, 0, 0, 0, i);
2704		/* CP and shaders */
2705		if (i == 0) {
2706			tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, DEFAULT_MTYPE, MTYPE_UC);
2707			tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, APE1_MTYPE, MTYPE_UC);
2708			tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE,
2709					    SH_MEM_ALIGNMENT_MODE_UNALIGNED);
2710			WREG32(mmSH_MEM_CONFIG, tmp);
 
2711		} else {
2712			tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, DEFAULT_MTYPE, MTYPE_NC);
2713			tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, APE1_MTYPE, MTYPE_NC);
2714			tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE,
2715					    SH_MEM_ALIGNMENT_MODE_UNALIGNED);
2716			WREG32(mmSH_MEM_CONFIG, tmp);
 
 
2717		}
2718
2719		WREG32(mmSH_MEM_APE1_BASE, 1);
2720		WREG32(mmSH_MEM_APE1_LIMIT, 0);
2721		WREG32(mmSH_MEM_BASES, 0);
2722	}
2723	vi_srbm_select(adev, 0, 0, 0, 0);
2724	mutex_unlock(&adev->srbm_mutex);
2725
2726	gfx_v8_0_init_compute_vmid(adev);
 
2727
2728	mutex_lock(&adev->grbm_idx_mutex);
2729	/*
2730	 * making sure that the following register writes will be broadcasted
2731	 * to all the shaders
2732	 */
2733	gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
2734
2735	WREG32(mmPA_SC_FIFO_SIZE,
2736		   (adev->gfx.config.sc_prim_fifo_size_frontend <<
2737			PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) |
2738		   (adev->gfx.config.sc_prim_fifo_size_backend <<
2739			PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT) |
2740		   (adev->gfx.config.sc_hiz_tile_fifo_size <<
2741			PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) |
2742		   (adev->gfx.config.sc_earlyz_tile_fifo_size <<
2743			PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT));
 
 
 
 
 
 
 
 
2744	mutex_unlock(&adev->grbm_idx_mutex);
2745
2746}
2747
2748static void gfx_v8_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
2749{
2750	u32 i, j, k;
2751	u32 mask;
2752
2753	mutex_lock(&adev->grbm_idx_mutex);
2754	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
2755		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
2756			gfx_v8_0_select_se_sh(adev, i, j);
2757			for (k = 0; k < adev->usec_timeout; k++) {
2758				if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY) == 0)
2759					break;
2760				udelay(1);
2761			}
 
 
 
 
 
 
 
 
2762		}
2763	}
2764	gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
2765	mutex_unlock(&adev->grbm_idx_mutex);
2766
2767	mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
2768		RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
2769		RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
2770		RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
2771	for (k = 0; k < adev->usec_timeout; k++) {
2772		if ((RREG32(mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
2773			break;
2774		udelay(1);
2775	}
2776}
2777
2778static void gfx_v8_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
2779					       bool enable)
2780{
2781	u32 tmp = RREG32(mmCP_INT_CNTL_RING0);
2782
2783	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
2784	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
2785	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
2786	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0);
2787
2788	WREG32(mmCP_INT_CNTL_RING0, tmp);
2789}
2790
2791void gfx_v8_0_rlc_stop(struct amdgpu_device *adev)
2792{
2793	u32 tmp = RREG32(mmRLC_CNTL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2794
2795	tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 0);
2796	WREG32(mmRLC_CNTL, tmp);
 
 
 
 
 
 
 
 
 
 
 
 
 
2797
2798	gfx_v8_0_enable_gui_idle_interrupt(adev, false);
 
 
 
 
 
 
2799
2800	gfx_v8_0_wait_for_rlc_serdes(adev);
 
2801}
2802
2803static void gfx_v8_0_rlc_reset(struct amdgpu_device *adev)
2804{
2805	u32 tmp = RREG32(mmGRBM_SOFT_RESET);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2806
2807	tmp = REG_SET_FIELD(tmp, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
2808	WREG32(mmGRBM_SOFT_RESET, tmp);
2809	udelay(50);
2810	tmp = REG_SET_FIELD(tmp, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
2811	WREG32(mmGRBM_SOFT_RESET, tmp);
2812	udelay(50);
2813}
2814
2815static void gfx_v8_0_rlc_start(struct amdgpu_device *adev)
2816{
2817	u32 tmp = RREG32(mmRLC_CNTL);
 
 
2818
2819	tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 1);
2820	WREG32(mmRLC_CNTL, tmp);
 
 
 
2821
2822	/* carrizo do enable cp interrupt after cp inited */
2823	if (!(adev->flags & AMD_IS_APU))
2824		gfx_v8_0_enable_gui_idle_interrupt(adev, true);
 
 
 
 
 
 
 
2825
2826	udelay(50);
 
 
 
2827}
2828
2829static int gfx_v8_0_rlc_load_microcode(struct amdgpu_device *adev)
2830{
2831	const struct rlc_firmware_header_v2_0 *hdr;
2832	const __le32 *fw_data;
2833	unsigned i, fw_size;
2834
2835	if (!adev->gfx.rlc_fw)
2836		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2837
2838	hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
2839	amdgpu_ucode_print_rlc_hdr(&hdr->header);
2840
2841	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
2842			   le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2843	fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
2844
2845	WREG32(mmRLC_GPM_UCODE_ADDR, 0);
2846	for (i = 0; i < fw_size; i++)
2847		WREG32(mmRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
2848	WREG32(mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
2849
2850	return 0;
 
2851}
2852
2853static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
2854{
2855	int r;
 
2856
2857	gfx_v8_0_rlc_stop(adev);
 
 
2858
2859	/* disable CG */
2860	WREG32(mmRLC_CGCG_CGLS_CTRL, 0);
 
 
 
 
 
2861
2862	/* disable PG */
2863	WREG32(mmRLC_PG_CNTL, 0);
2864
2865	gfx_v8_0_rlc_reset(adev);
2866
2867	if (!adev->pp_enabled) {
2868		if (!adev->firmware.smu_load) {
2869			/* legacy rlc firmware loading */
2870			r = gfx_v8_0_rlc_load_microcode(adev);
2871			if (r)
2872				return r;
2873		} else {
2874			r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
2875							AMDGPU_UCODE_ID_RLC_G);
2876			if (r)
2877				return -EINVAL;
2878		}
2879	}
2880
2881	gfx_v8_0_rlc_start(adev);
 
 
 
2882
2883	return 0;
2884}
2885
2886static void gfx_v8_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
2887{
2888	int i;
2889	u32 tmp = RREG32(mmCP_ME_CNTL);
2890
2891	if (enable) {
2892		tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 0);
2893		tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 0);
2894		tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 0);
2895	} else {
2896		tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 1);
2897		tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1);
2898		tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1);
2899		for (i = 0; i < adev->gfx.num_gfx_rings; i++)
2900			adev->gfx.gfx_ring[i].ready = false;
2901	}
2902	WREG32(mmCP_ME_CNTL, tmp);
2903	udelay(50);
2904}
2905
2906static int gfx_v8_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
2907{
2908	const struct gfx_firmware_header_v1_0 *pfp_hdr;
2909	const struct gfx_firmware_header_v1_0 *ce_hdr;
2910	const struct gfx_firmware_header_v1_0 *me_hdr;
2911	const __le32 *fw_data;
2912	unsigned i, fw_size;
2913
2914	if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
2915		return -EINVAL;
2916
2917	pfp_hdr = (const struct gfx_firmware_header_v1_0 *)
2918		adev->gfx.pfp_fw->data;
2919	ce_hdr = (const struct gfx_firmware_header_v1_0 *)
2920		adev->gfx.ce_fw->data;
2921	me_hdr = (const struct gfx_firmware_header_v1_0 *)
2922		adev->gfx.me_fw->data;
2923
2924	amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
2925	amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
2926	amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
2927
2928	gfx_v8_0_cp_gfx_enable(adev, false);
2929
2930	/* PFP */
2931	fw_data = (const __le32 *)
2932		(adev->gfx.pfp_fw->data +
2933		 le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
2934	fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
2935	WREG32(mmCP_PFP_UCODE_ADDR, 0);
2936	for (i = 0; i < fw_size; i++)
2937		WREG32(mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
2938	WREG32(mmCP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version);
2939
2940	/* CE */
2941	fw_data = (const __le32 *)
2942		(adev->gfx.ce_fw->data +
2943		 le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
2944	fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
2945	WREG32(mmCP_CE_UCODE_ADDR, 0);
2946	for (i = 0; i < fw_size; i++)
2947		WREG32(mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
2948	WREG32(mmCP_CE_UCODE_ADDR, adev->gfx.ce_fw_version);
2949
2950	/* ME */
2951	fw_data = (const __le32 *)
2952		(adev->gfx.me_fw->data +
2953		 le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
2954	fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
2955	WREG32(mmCP_ME_RAM_WADDR, 0);
2956	for (i = 0; i < fw_size; i++)
2957		WREG32(mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++));
2958	WREG32(mmCP_ME_RAM_WADDR, adev->gfx.me_fw_version);
2959
2960	return 0;
2961}
2962
2963static u32 gfx_v8_0_get_csb_size(struct amdgpu_device *adev)
2964{
2965	u32 count = 0;
2966	const struct cs_section_def *sect = NULL;
2967	const struct cs_extent_def *ext = NULL;
2968
2969	/* begin clear state */
2970	count += 2;
2971	/* context control state */
2972	count += 3;
2973
2974	for (sect = vi_cs_data; sect->section != NULL; ++sect) {
2975		for (ext = sect->section; ext->extent != NULL; ++ext) {
2976			if (sect->id == SECT_CONTEXT)
2977				count += 2 + ext->reg_count;
2978			else
2979				return 0;
2980		}
2981	}
2982	/* pa_sc_raster_config/pa_sc_raster_config1 */
2983	count += 4;
2984	/* end clear state */
2985	count += 2;
2986	/* clear state */
2987	count += 2;
2988
2989	return count;
2990}
2991
2992static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev)
2993{
2994	struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
2995	const struct cs_section_def *sect = NULL;
2996	const struct cs_extent_def *ext = NULL;
2997	int r, i;
2998
2999	/* init the CP */
3000	WREG32(mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1);
3001	WREG32(mmCP_ENDIAN_SWAP, 0);
3002	WREG32(mmCP_DEVICE_ID, 1);
3003
3004	gfx_v8_0_cp_gfx_enable(adev, true);
3005
3006	r = amdgpu_ring_alloc(ring, gfx_v8_0_get_csb_size(adev) + 4);
3007	if (r) {
3008		DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
3009		return r;
3010	}
3011
3012	/* clear state buffer */
3013	amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3014	amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
3015
3016	amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
3017	amdgpu_ring_write(ring, 0x80000000);
3018	amdgpu_ring_write(ring, 0x80000000);
3019
3020	for (sect = vi_cs_data; sect->section != NULL; ++sect) {
3021		for (ext = sect->section; ext->extent != NULL; ++ext) {
3022			if (sect->id == SECT_CONTEXT) {
3023				amdgpu_ring_write(ring,
3024				       PACKET3(PACKET3_SET_CONTEXT_REG,
3025					       ext->reg_count));
3026				amdgpu_ring_write(ring,
3027				       ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
3028				for (i = 0; i < ext->reg_count; i++)
3029					amdgpu_ring_write(ring, ext->extent[i]);
3030			}
3031		}
3032	}
3033
3034	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
3035	amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
3036	switch (adev->asic_type) {
3037	case CHIP_TONGA:
3038		amdgpu_ring_write(ring, 0x16000012);
3039		amdgpu_ring_write(ring, 0x0000002A);
3040		break;
3041	case CHIP_FIJI:
3042		amdgpu_ring_write(ring, 0x3a00161a);
3043		amdgpu_ring_write(ring, 0x0000002e);
3044		break;
3045	case CHIP_TOPAZ:
3046	case CHIP_CARRIZO:
3047		amdgpu_ring_write(ring, 0x00000002);
3048		amdgpu_ring_write(ring, 0x00000000);
3049		break;
3050	case CHIP_STONEY:
3051		amdgpu_ring_write(ring, 0x00000000);
3052		amdgpu_ring_write(ring, 0x00000000);
3053		break;
3054	default:
3055		BUG();
3056	}
3057
3058	amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3059	amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
3060
3061	amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
3062	amdgpu_ring_write(ring, 0);
3063
3064	/* init the CE partitions */
3065	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
3066	amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
3067	amdgpu_ring_write(ring, 0x8000);
3068	amdgpu_ring_write(ring, 0x8000);
3069
3070	amdgpu_ring_commit(ring);
3071
3072	return 0;
3073}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3074
3075static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
3076{
3077	struct amdgpu_ring *ring;
3078	u32 tmp;
3079	u32 rb_bufsz;
3080	u64 rb_addr, rptr_addr;
3081	int r;
3082
3083	/* Set the write pointer delay */
3084	WREG32(mmCP_RB_WPTR_DELAY, 0);
3085
3086	/* set the RB to use vmid 0 */
3087	WREG32(mmCP_RB_VMID, 0);
3088
3089	/* Set ring buffer size */
3090	ring = &adev->gfx.gfx_ring[0];
3091	rb_bufsz = order_base_2(ring->ring_size / 8);
3092	tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz);
3093	tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2);
3094	tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, MTYPE, 3);
3095	tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, MIN_IB_AVAILSZ, 1);
3096#ifdef __BIG_ENDIAN
3097	tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, BUF_SWAP, 1);
3098#endif
3099	WREG32(mmCP_RB0_CNTL, tmp);
3100
3101	/* Initialize the ring buffer's read and write pointers */
3102	WREG32(mmCP_RB0_CNTL, tmp | CP_RB0_CNTL__RB_RPTR_WR_ENA_MASK);
3103	ring->wptr = 0;
3104	WREG32(mmCP_RB0_WPTR, ring->wptr);
3105
3106	/* set the wb address wether it's enabled or not */
3107	rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
3108	WREG32(mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
3109	WREG32(mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
3110
 
 
 
3111	mdelay(1);
3112	WREG32(mmCP_RB0_CNTL, tmp);
3113
3114	rb_addr = ring->gpu_addr >> 8;
3115	WREG32(mmCP_RB0_BASE, rb_addr);
3116	WREG32(mmCP_RB0_BASE_HI, upper_32_bits(rb_addr));
3117
3118	/* no gfx doorbells on iceland */
3119	if (adev->asic_type != CHIP_TOPAZ) {
3120		tmp = RREG32(mmCP_RB_DOORBELL_CONTROL);
3121		if (ring->use_doorbell) {
3122			tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3123					    DOORBELL_OFFSET, ring->doorbell_index);
3124			tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3125					    DOORBELL_EN, 1);
3126		} else {
3127			tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3128					    DOORBELL_EN, 0);
3129		}
3130		WREG32(mmCP_RB_DOORBELL_CONTROL, tmp);
3131
3132		if (adev->asic_type == CHIP_TONGA) {
3133			tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
3134					    DOORBELL_RANGE_LOWER,
3135					    AMDGPU_DOORBELL_GFX_RING0);
3136			WREG32(mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
3137
3138			WREG32(mmCP_RB_DOORBELL_RANGE_UPPER,
3139			       CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK);
3140		}
3141
3142	}
3143
3144	/* start the ring */
 
3145	gfx_v8_0_cp_gfx_start(adev);
3146	ring->ready = true;
3147	r = amdgpu_ring_test_ring(ring);
3148	if (r) {
3149		ring->ready = false;
3150		return r;
3151	}
3152
3153	return 0;
3154}
3155
3156static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
3157{
3158	int i;
3159
3160	if (enable) {
3161		WREG32(mmCP_MEC_CNTL, 0);
3162	} else {
3163		WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
3164		for (i = 0; i < adev->gfx.num_compute_rings; i++)
3165			adev->gfx.compute_ring[i].ready = false;
3166	}
3167	udelay(50);
3168}
3169
3170static int gfx_v8_0_cp_compute_load_microcode(struct amdgpu_device *adev)
 
3171{
3172	const struct gfx_firmware_header_v1_0 *mec_hdr;
3173	const __le32 *fw_data;
3174	unsigned i, fw_size;
3175
3176	if (!adev->gfx.mec_fw)
3177		return -EINVAL;
 
 
 
 
 
 
3178
3179	gfx_v8_0_cp_compute_enable(adev, false);
 
 
 
 
3180
3181	mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
3182	amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
 
 
 
 
 
 
 
 
 
3183
3184	fw_data = (const __le32 *)
3185		(adev->gfx.mec_fw->data +
3186		 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
3187	fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4;
3188
3189	/* MEC1 */
3190	WREG32(mmCP_MEC_ME1_UCODE_ADDR, 0);
3191	for (i = 0; i < fw_size; i++)
3192		WREG32(mmCP_MEC_ME1_UCODE_DATA, le32_to_cpup(fw_data+i));
3193	WREG32(mmCP_MEC_ME1_UCODE_ADDR, adev->gfx.mec_fw_version);
3194
3195	/* Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */
3196	if (adev->gfx.mec2_fw) {
3197		const struct gfx_firmware_header_v1_0 *mec2_hdr;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3198
3199		mec2_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
3200		amdgpu_ucode_print_gfx_hdr(&mec2_hdr->header);
 
 
 
 
 
 
 
 
 
 
 
 
 
3201
3202		fw_data = (const __le32 *)
3203			(adev->gfx.mec2_fw->data +
3204			 le32_to_cpu(mec2_hdr->header.ucode_array_offset_bytes));
3205		fw_size = le32_to_cpu(mec2_hdr->header.ucode_size_bytes) / 4;
3206
3207		WREG32(mmCP_MEC_ME2_UCODE_ADDR, 0);
3208		for (i = 0; i < fw_size; i++)
3209			WREG32(mmCP_MEC_ME2_UCODE_DATA, le32_to_cpup(fw_data+i));
3210		WREG32(mmCP_MEC_ME2_UCODE_ADDR, adev->gfx.mec2_fw_version);
3211	}
3212
3213	return 0;
3214}
3215
3216struct vi_mqd {
3217	uint32_t header;  /* ordinal0 */
3218	uint32_t compute_dispatch_initiator;  /* ordinal1 */
3219	uint32_t compute_dim_x;  /* ordinal2 */
3220	uint32_t compute_dim_y;  /* ordinal3 */
3221	uint32_t compute_dim_z;  /* ordinal4 */
3222	uint32_t compute_start_x;  /* ordinal5 */
3223	uint32_t compute_start_y;  /* ordinal6 */
3224	uint32_t compute_start_z;  /* ordinal7 */
3225	uint32_t compute_num_thread_x;  /* ordinal8 */
3226	uint32_t compute_num_thread_y;  /* ordinal9 */
3227	uint32_t compute_num_thread_z;  /* ordinal10 */
3228	uint32_t compute_pipelinestat_enable;  /* ordinal11 */
3229	uint32_t compute_perfcount_enable;  /* ordinal12 */
3230	uint32_t compute_pgm_lo;  /* ordinal13 */
3231	uint32_t compute_pgm_hi;  /* ordinal14 */
3232	uint32_t compute_tba_lo;  /* ordinal15 */
3233	uint32_t compute_tba_hi;  /* ordinal16 */
3234	uint32_t compute_tma_lo;  /* ordinal17 */
3235	uint32_t compute_tma_hi;  /* ordinal18 */
3236	uint32_t compute_pgm_rsrc1;  /* ordinal19 */
3237	uint32_t compute_pgm_rsrc2;  /* ordinal20 */
3238	uint32_t compute_vmid;  /* ordinal21 */
3239	uint32_t compute_resource_limits;  /* ordinal22 */
3240	uint32_t compute_static_thread_mgmt_se0;  /* ordinal23 */
3241	uint32_t compute_static_thread_mgmt_se1;  /* ordinal24 */
3242	uint32_t compute_tmpring_size;  /* ordinal25 */
3243	uint32_t compute_static_thread_mgmt_se2;  /* ordinal26 */
3244	uint32_t compute_static_thread_mgmt_se3;  /* ordinal27 */
3245	uint32_t compute_restart_x;  /* ordinal28 */
3246	uint32_t compute_restart_y;  /* ordinal29 */
3247	uint32_t compute_restart_z;  /* ordinal30 */
3248	uint32_t compute_thread_trace_enable;  /* ordinal31 */
3249	uint32_t compute_misc_reserved;  /* ordinal32 */
3250	uint32_t compute_dispatch_id;  /* ordinal33 */
3251	uint32_t compute_threadgroup_id;  /* ordinal34 */
3252	uint32_t compute_relaunch;  /* ordinal35 */
3253	uint32_t compute_wave_restore_addr_lo;  /* ordinal36 */
3254	uint32_t compute_wave_restore_addr_hi;  /* ordinal37 */
3255	uint32_t compute_wave_restore_control;  /* ordinal38 */
3256	uint32_t reserved9;  /* ordinal39 */
3257	uint32_t reserved10;  /* ordinal40 */
3258	uint32_t reserved11;  /* ordinal41 */
3259	uint32_t reserved12;  /* ordinal42 */
3260	uint32_t reserved13;  /* ordinal43 */
3261	uint32_t reserved14;  /* ordinal44 */
3262	uint32_t reserved15;  /* ordinal45 */
3263	uint32_t reserved16;  /* ordinal46 */
3264	uint32_t reserved17;  /* ordinal47 */
3265	uint32_t reserved18;  /* ordinal48 */
3266	uint32_t reserved19;  /* ordinal49 */
3267	uint32_t reserved20;  /* ordinal50 */
3268	uint32_t reserved21;  /* ordinal51 */
3269	uint32_t reserved22;  /* ordinal52 */
3270	uint32_t reserved23;  /* ordinal53 */
3271	uint32_t reserved24;  /* ordinal54 */
3272	uint32_t reserved25;  /* ordinal55 */
3273	uint32_t reserved26;  /* ordinal56 */
3274	uint32_t reserved27;  /* ordinal57 */
3275	uint32_t reserved28;  /* ordinal58 */
3276	uint32_t reserved29;  /* ordinal59 */
3277	uint32_t reserved30;  /* ordinal60 */
3278	uint32_t reserved31;  /* ordinal61 */
3279	uint32_t reserved32;  /* ordinal62 */
3280	uint32_t reserved33;  /* ordinal63 */
3281	uint32_t reserved34;  /* ordinal64 */
3282	uint32_t compute_user_data_0;  /* ordinal65 */
3283	uint32_t compute_user_data_1;  /* ordinal66 */
3284	uint32_t compute_user_data_2;  /* ordinal67 */
3285	uint32_t compute_user_data_3;  /* ordinal68 */
3286	uint32_t compute_user_data_4;  /* ordinal69 */
3287	uint32_t compute_user_data_5;  /* ordinal70 */
3288	uint32_t compute_user_data_6;  /* ordinal71 */
3289	uint32_t compute_user_data_7;  /* ordinal72 */
3290	uint32_t compute_user_data_8;  /* ordinal73 */
3291	uint32_t compute_user_data_9;  /* ordinal74 */
3292	uint32_t compute_user_data_10;  /* ordinal75 */
3293	uint32_t compute_user_data_11;  /* ordinal76 */
3294	uint32_t compute_user_data_12;  /* ordinal77 */
3295	uint32_t compute_user_data_13;  /* ordinal78 */
3296	uint32_t compute_user_data_14;  /* ordinal79 */
3297	uint32_t compute_user_data_15;  /* ordinal80 */
3298	uint32_t cp_compute_csinvoc_count_lo;  /* ordinal81 */
3299	uint32_t cp_compute_csinvoc_count_hi;  /* ordinal82 */
3300	uint32_t reserved35;  /* ordinal83 */
3301	uint32_t reserved36;  /* ordinal84 */
3302	uint32_t reserved37;  /* ordinal85 */
3303	uint32_t cp_mqd_query_time_lo;  /* ordinal86 */
3304	uint32_t cp_mqd_query_time_hi;  /* ordinal87 */
3305	uint32_t cp_mqd_connect_start_time_lo;  /* ordinal88 */
3306	uint32_t cp_mqd_connect_start_time_hi;  /* ordinal89 */
3307	uint32_t cp_mqd_connect_end_time_lo;  /* ordinal90 */
3308	uint32_t cp_mqd_connect_end_time_hi;  /* ordinal91 */
3309	uint32_t cp_mqd_connect_end_wf_count;  /* ordinal92 */
3310	uint32_t cp_mqd_connect_end_pq_rptr;  /* ordinal93 */
3311	uint32_t cp_mqd_connect_end_pq_wptr;  /* ordinal94 */
3312	uint32_t cp_mqd_connect_end_ib_rptr;  /* ordinal95 */
3313	uint32_t reserved38;  /* ordinal96 */
3314	uint32_t reserved39;  /* ordinal97 */
3315	uint32_t cp_mqd_save_start_time_lo;  /* ordinal98 */
3316	uint32_t cp_mqd_save_start_time_hi;  /* ordinal99 */
3317	uint32_t cp_mqd_save_end_time_lo;  /* ordinal100 */
3318	uint32_t cp_mqd_save_end_time_hi;  /* ordinal101 */
3319	uint32_t cp_mqd_restore_start_time_lo;  /* ordinal102 */
3320	uint32_t cp_mqd_restore_start_time_hi;  /* ordinal103 */
3321	uint32_t cp_mqd_restore_end_time_lo;  /* ordinal104 */
3322	uint32_t cp_mqd_restore_end_time_hi;  /* ordinal105 */
3323	uint32_t reserved40;  /* ordinal106 */
3324	uint32_t reserved41;  /* ordinal107 */
3325	uint32_t gds_cs_ctxsw_cnt0;  /* ordinal108 */
3326	uint32_t gds_cs_ctxsw_cnt1;  /* ordinal109 */
3327	uint32_t gds_cs_ctxsw_cnt2;  /* ordinal110 */
3328	uint32_t gds_cs_ctxsw_cnt3;  /* ordinal111 */
3329	uint32_t reserved42;  /* ordinal112 */
3330	uint32_t reserved43;  /* ordinal113 */
3331	uint32_t cp_pq_exe_status_lo;  /* ordinal114 */
3332	uint32_t cp_pq_exe_status_hi;  /* ordinal115 */
3333	uint32_t cp_packet_id_lo;  /* ordinal116 */
3334	uint32_t cp_packet_id_hi;  /* ordinal117 */
3335	uint32_t cp_packet_exe_status_lo;  /* ordinal118 */
3336	uint32_t cp_packet_exe_status_hi;  /* ordinal119 */
3337	uint32_t gds_save_base_addr_lo;  /* ordinal120 */
3338	uint32_t gds_save_base_addr_hi;  /* ordinal121 */
3339	uint32_t gds_save_mask_lo;  /* ordinal122 */
3340	uint32_t gds_save_mask_hi;  /* ordinal123 */
3341	uint32_t ctx_save_base_addr_lo;  /* ordinal124 */
3342	uint32_t ctx_save_base_addr_hi;  /* ordinal125 */
3343	uint32_t reserved44;  /* ordinal126 */
3344	uint32_t reserved45;  /* ordinal127 */
3345	uint32_t cp_mqd_base_addr_lo;  /* ordinal128 */
3346	uint32_t cp_mqd_base_addr_hi;  /* ordinal129 */
3347	uint32_t cp_hqd_active;  /* ordinal130 */
3348	uint32_t cp_hqd_vmid;  /* ordinal131 */
3349	uint32_t cp_hqd_persistent_state;  /* ordinal132 */
3350	uint32_t cp_hqd_pipe_priority;  /* ordinal133 */
3351	uint32_t cp_hqd_queue_priority;  /* ordinal134 */
3352	uint32_t cp_hqd_quantum;  /* ordinal135 */
3353	uint32_t cp_hqd_pq_base_lo;  /* ordinal136 */
3354	uint32_t cp_hqd_pq_base_hi;  /* ordinal137 */
3355	uint32_t cp_hqd_pq_rptr;  /* ordinal138 */
3356	uint32_t cp_hqd_pq_rptr_report_addr_lo;  /* ordinal139 */
3357	uint32_t cp_hqd_pq_rptr_report_addr_hi;  /* ordinal140 */
3358	uint32_t cp_hqd_pq_wptr_poll_addr;  /* ordinal141 */
3359	uint32_t cp_hqd_pq_wptr_poll_addr_hi;  /* ordinal142 */
3360	uint32_t cp_hqd_pq_doorbell_control;  /* ordinal143 */
3361	uint32_t cp_hqd_pq_wptr;  /* ordinal144 */
3362	uint32_t cp_hqd_pq_control;  /* ordinal145 */
3363	uint32_t cp_hqd_ib_base_addr_lo;  /* ordinal146 */
3364	uint32_t cp_hqd_ib_base_addr_hi;  /* ordinal147 */
3365	uint32_t cp_hqd_ib_rptr;  /* ordinal148 */
3366	uint32_t cp_hqd_ib_control;  /* ordinal149 */
3367	uint32_t cp_hqd_iq_timer;  /* ordinal150 */
3368	uint32_t cp_hqd_iq_rptr;  /* ordinal151 */
3369	uint32_t cp_hqd_dequeue_request;  /* ordinal152 */
3370	uint32_t cp_hqd_dma_offload;  /* ordinal153 */
3371	uint32_t cp_hqd_sema_cmd;  /* ordinal154 */
3372	uint32_t cp_hqd_msg_type;  /* ordinal155 */
3373	uint32_t cp_hqd_atomic0_preop_lo;  /* ordinal156 */
3374	uint32_t cp_hqd_atomic0_preop_hi;  /* ordinal157 */
3375	uint32_t cp_hqd_atomic1_preop_lo;  /* ordinal158 */
3376	uint32_t cp_hqd_atomic1_preop_hi;  /* ordinal159 */
3377	uint32_t cp_hqd_hq_status0;  /* ordinal160 */
3378	uint32_t cp_hqd_hq_control0;  /* ordinal161 */
3379	uint32_t cp_mqd_control;  /* ordinal162 */
3380	uint32_t cp_hqd_hq_status1;  /* ordinal163 */
3381	uint32_t cp_hqd_hq_control1;  /* ordinal164 */
3382	uint32_t cp_hqd_eop_base_addr_lo;  /* ordinal165 */
3383	uint32_t cp_hqd_eop_base_addr_hi;  /* ordinal166 */
3384	uint32_t cp_hqd_eop_control;  /* ordinal167 */
3385	uint32_t cp_hqd_eop_rptr;  /* ordinal168 */
3386	uint32_t cp_hqd_eop_wptr;  /* ordinal169 */
3387	uint32_t cp_hqd_eop_done_events;  /* ordinal170 */
3388	uint32_t cp_hqd_ctx_save_base_addr_lo;  /* ordinal171 */
3389	uint32_t cp_hqd_ctx_save_base_addr_hi;  /* ordinal172 */
3390	uint32_t cp_hqd_ctx_save_control;  /* ordinal173 */
3391	uint32_t cp_hqd_cntl_stack_offset;  /* ordinal174 */
3392	uint32_t cp_hqd_cntl_stack_size;  /* ordinal175 */
3393	uint32_t cp_hqd_wg_state_offset;  /* ordinal176 */
3394	uint32_t cp_hqd_ctx_save_size;  /* ordinal177 */
3395	uint32_t cp_hqd_gds_resource_state;  /* ordinal178 */
3396	uint32_t cp_hqd_error;  /* ordinal179 */
3397	uint32_t cp_hqd_eop_wptr_mem;  /* ordinal180 */
3398	uint32_t cp_hqd_eop_dones;  /* ordinal181 */
3399	uint32_t reserved46;  /* ordinal182 */
3400	uint32_t reserved47;  /* ordinal183 */
3401	uint32_t reserved48;  /* ordinal184 */
3402	uint32_t reserved49;  /* ordinal185 */
3403	uint32_t reserved50;  /* ordinal186 */
3404	uint32_t reserved51;  /* ordinal187 */
3405	uint32_t reserved52;  /* ordinal188 */
3406	uint32_t reserved53;  /* ordinal189 */
3407	uint32_t reserved54;  /* ordinal190 */
3408	uint32_t reserved55;  /* ordinal191 */
3409	uint32_t iqtimer_pkt_header;  /* ordinal192 */
3410	uint32_t iqtimer_pkt_dw0;  /* ordinal193 */
3411	uint32_t iqtimer_pkt_dw1;  /* ordinal194 */
3412	uint32_t iqtimer_pkt_dw2;  /* ordinal195 */
3413	uint32_t iqtimer_pkt_dw3;  /* ordinal196 */
3414	uint32_t iqtimer_pkt_dw4;  /* ordinal197 */
3415	uint32_t iqtimer_pkt_dw5;  /* ordinal198 */
3416	uint32_t iqtimer_pkt_dw6;  /* ordinal199 */
3417	uint32_t iqtimer_pkt_dw7;  /* ordinal200 */
3418	uint32_t iqtimer_pkt_dw8;  /* ordinal201 */
3419	uint32_t iqtimer_pkt_dw9;  /* ordinal202 */
3420	uint32_t iqtimer_pkt_dw10;  /* ordinal203 */
3421	uint32_t iqtimer_pkt_dw11;  /* ordinal204 */
3422	uint32_t iqtimer_pkt_dw12;  /* ordinal205 */
3423	uint32_t iqtimer_pkt_dw13;  /* ordinal206 */
3424	uint32_t iqtimer_pkt_dw14;  /* ordinal207 */
3425	uint32_t iqtimer_pkt_dw15;  /* ordinal208 */
3426	uint32_t iqtimer_pkt_dw16;  /* ordinal209 */
3427	uint32_t iqtimer_pkt_dw17;  /* ordinal210 */
3428	uint32_t iqtimer_pkt_dw18;  /* ordinal211 */
3429	uint32_t iqtimer_pkt_dw19;  /* ordinal212 */
3430	uint32_t iqtimer_pkt_dw20;  /* ordinal213 */
3431	uint32_t iqtimer_pkt_dw21;  /* ordinal214 */
3432	uint32_t iqtimer_pkt_dw22;  /* ordinal215 */
3433	uint32_t iqtimer_pkt_dw23;  /* ordinal216 */
3434	uint32_t iqtimer_pkt_dw24;  /* ordinal217 */
3435	uint32_t iqtimer_pkt_dw25;  /* ordinal218 */
3436	uint32_t iqtimer_pkt_dw26;  /* ordinal219 */
3437	uint32_t iqtimer_pkt_dw27;  /* ordinal220 */
3438	uint32_t iqtimer_pkt_dw28;  /* ordinal221 */
3439	uint32_t iqtimer_pkt_dw29;  /* ordinal222 */
3440	uint32_t iqtimer_pkt_dw30;  /* ordinal223 */
3441	uint32_t iqtimer_pkt_dw31;  /* ordinal224 */
3442	uint32_t reserved56;  /* ordinal225 */
3443	uint32_t reserved57;  /* ordinal226 */
3444	uint32_t reserved58;  /* ordinal227 */
3445	uint32_t set_resources_header;  /* ordinal228 */
3446	uint32_t set_resources_dw1;  /* ordinal229 */
3447	uint32_t set_resources_dw2;  /* ordinal230 */
3448	uint32_t set_resources_dw3;  /* ordinal231 */
3449	uint32_t set_resources_dw4;  /* ordinal232 */
3450	uint32_t set_resources_dw5;  /* ordinal233 */
3451	uint32_t set_resources_dw6;  /* ordinal234 */
3452	uint32_t set_resources_dw7;  /* ordinal235 */
3453	uint32_t reserved59;  /* ordinal236 */
3454	uint32_t reserved60;  /* ordinal237 */
3455	uint32_t reserved61;  /* ordinal238 */
3456	uint32_t reserved62;  /* ordinal239 */
3457	uint32_t reserved63;  /* ordinal240 */
3458	uint32_t reserved64;  /* ordinal241 */
3459	uint32_t reserved65;  /* ordinal242 */
3460	uint32_t reserved66;  /* ordinal243 */
3461	uint32_t reserved67;  /* ordinal244 */
3462	uint32_t reserved68;  /* ordinal245 */
3463	uint32_t reserved69;  /* ordinal246 */
3464	uint32_t reserved70;  /* ordinal247 */
3465	uint32_t reserved71;  /* ordinal248 */
3466	uint32_t reserved72;  /* ordinal249 */
3467	uint32_t reserved73;  /* ordinal250 */
3468	uint32_t reserved74;  /* ordinal251 */
3469	uint32_t reserved75;  /* ordinal252 */
3470	uint32_t reserved76;  /* ordinal253 */
3471	uint32_t reserved77;  /* ordinal254 */
3472	uint32_t reserved78;  /* ordinal255 */
3473
3474	uint32_t reserved_t[256]; /* Reserve 256 dword buffer used by ucode */
3475};
3476
3477static void gfx_v8_0_cp_compute_fini(struct amdgpu_device *adev)
3478{
3479	int i, r;
3480
3481	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3482		struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
 
 
 
 
 
 
3483
3484		if (ring->mqd_obj) {
3485			r = amdgpu_bo_reserve(ring->mqd_obj, false);
3486			if (unlikely(r != 0))
3487				dev_warn(adev->dev, "(%d) reserve MQD bo failed\n", r);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3488
3489			amdgpu_bo_unpin(ring->mqd_obj);
3490			amdgpu_bo_unreserve(ring->mqd_obj);
 
 
 
 
3491
3492			amdgpu_bo_unref(&ring->mqd_obj);
3493			ring->mqd_obj = NULL;
3494		}
 
 
 
3495	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3496}
3497
3498static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev)
 
3499{
3500	int r, i, j;
3501	u32 tmp;
3502	bool use_doorbell = true;
3503	u64 hqd_gpu_addr;
3504	u64 mqd_gpu_addr;
3505	u64 eop_gpu_addr;
3506	u64 wb_gpu_addr;
3507	u32 *buf;
3508	struct vi_mqd *mqd;
3509
3510	/* init the pipes */
3511	mutex_lock(&adev->srbm_mutex);
3512	for (i = 0; i < (adev->gfx.mec.num_pipe * adev->gfx.mec.num_mec); i++) {
3513		int me = (i < 4) ? 1 : 2;
3514		int pipe = (i < 4) ? i : (i - 4);
3515
3516		eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + (i * MEC_HPD_SIZE);
3517		eop_gpu_addr >>= 8;
3518
3519		vi_srbm_select(adev, me, pipe, 0, 0);
3520
3521		/* write the EOP addr */
3522		WREG32(mmCP_HQD_EOP_BASE_ADDR, eop_gpu_addr);
3523		WREG32(mmCP_HQD_EOP_BASE_ADDR_HI, upper_32_bits(eop_gpu_addr));
3524
3525		/* set the VMID assigned */
3526		WREG32(mmCP_HQD_VMID, 0);
3527
3528		/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
3529		tmp = RREG32(mmCP_HQD_EOP_CONTROL);
3530		tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
3531				    (order_base_2(MEC_HPD_SIZE / 4) - 1));
3532		WREG32(mmCP_HQD_EOP_CONTROL, tmp);
3533	}
3534	vi_srbm_select(adev, 0, 0, 0, 0);
3535	mutex_unlock(&adev->srbm_mutex);
3536
3537	/* init the queues.  Just two for now. */
3538	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3539		struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
 
 
 
3540
3541		if (ring->mqd_obj == NULL) {
3542			r = amdgpu_bo_create(adev,
3543					     sizeof(struct vi_mqd),
3544					     PAGE_SIZE, true,
3545					     AMDGPU_GEM_DOMAIN_GTT, 0, NULL,
3546					     NULL, &ring->mqd_obj);
3547			if (r) {
3548				dev_warn(adev->dev, "(%d) create MQD bo failed\n", r);
3549				return r;
3550			}
3551		}
3552
3553		r = amdgpu_bo_reserve(ring->mqd_obj, false);
3554		if (unlikely(r != 0)) {
3555			gfx_v8_0_cp_compute_fini(adev);
3556			return r;
3557		}
3558		r = amdgpu_bo_pin(ring->mqd_obj, AMDGPU_GEM_DOMAIN_GTT,
3559				  &mqd_gpu_addr);
3560		if (r) {
3561			dev_warn(adev->dev, "(%d) pin MQD bo failed\n", r);
3562			gfx_v8_0_cp_compute_fini(adev);
3563			return r;
3564		}
3565		r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&buf);
3566		if (r) {
3567			dev_warn(adev->dev, "(%d) map MQD bo failed\n", r);
3568			gfx_v8_0_cp_compute_fini(adev);
3569			return r;
3570		}
3571
3572		/* init the mqd struct */
3573		memset(buf, 0, sizeof(struct vi_mqd));
3574
3575		mqd = (struct vi_mqd *)buf;
3576		mqd->header = 0xC0310800;
3577		mqd->compute_pipelinestat_enable = 0x00000001;
3578		mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
3579		mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
3580		mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
3581		mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
3582		mqd->compute_misc_reserved = 0x00000003;
3583
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3584		mutex_lock(&adev->srbm_mutex);
3585		vi_srbm_select(adev, ring->me,
3586			       ring->pipe,
3587			       ring->queue, 0);
3588
3589		/* disable wptr polling */
3590		tmp = RREG32(mmCP_PQ_WPTR_POLL_CNTL);
3591		tmp = REG_SET_FIELD(tmp, CP_PQ_WPTR_POLL_CNTL, EN, 0);
3592		WREG32(mmCP_PQ_WPTR_POLL_CNTL, tmp);
3593
3594		mqd->cp_hqd_eop_base_addr_lo =
3595			RREG32(mmCP_HQD_EOP_BASE_ADDR);
3596		mqd->cp_hqd_eop_base_addr_hi =
3597			RREG32(mmCP_HQD_EOP_BASE_ADDR_HI);
3598
3599		/* enable doorbell? */
3600		tmp = RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
3601		if (use_doorbell) {
3602			tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
3603		} else {
3604			tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 0);
3605		}
3606		WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, tmp);
3607		mqd->cp_hqd_pq_doorbell_control = tmp;
3608
3609		/* disable the queue if it's active */
3610		mqd->cp_hqd_dequeue_request = 0;
3611		mqd->cp_hqd_pq_rptr = 0;
3612		mqd->cp_hqd_pq_wptr= 0;
3613		if (RREG32(mmCP_HQD_ACTIVE) & 1) {
3614			WREG32(mmCP_HQD_DEQUEUE_REQUEST, 1);
3615			for (j = 0; j < adev->usec_timeout; j++) {
3616				if (!(RREG32(mmCP_HQD_ACTIVE) & 1))
3617					break;
3618				udelay(1);
3619			}
3620			WREG32(mmCP_HQD_DEQUEUE_REQUEST, mqd->cp_hqd_dequeue_request);
3621			WREG32(mmCP_HQD_PQ_RPTR, mqd->cp_hqd_pq_rptr);
3622			WREG32(mmCP_HQD_PQ_WPTR, mqd->cp_hqd_pq_wptr);
3623		}
3624
3625		/* set the pointer to the MQD */
3626		mqd->cp_mqd_base_addr_lo = mqd_gpu_addr & 0xfffffffc;
3627		mqd->cp_mqd_base_addr_hi = upper_32_bits(mqd_gpu_addr);
3628		WREG32(mmCP_MQD_BASE_ADDR, mqd->cp_mqd_base_addr_lo);
3629		WREG32(mmCP_MQD_BASE_ADDR_HI, mqd->cp_mqd_base_addr_hi);
3630
3631		/* set MQD vmid to 0 */
3632		tmp = RREG32(mmCP_MQD_CONTROL);
3633		tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
3634		WREG32(mmCP_MQD_CONTROL, tmp);
3635		mqd->cp_mqd_control = tmp;
3636
3637		/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
3638		hqd_gpu_addr = ring->gpu_addr >> 8;
3639		mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
3640		mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
3641		WREG32(mmCP_HQD_PQ_BASE, mqd->cp_hqd_pq_base_lo);
3642		WREG32(mmCP_HQD_PQ_BASE_HI, mqd->cp_hqd_pq_base_hi);
3643
3644		/* set up the HQD, this is similar to CP_RB0_CNTL */
3645		tmp = RREG32(mmCP_HQD_PQ_CONTROL);
3646		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
3647				    (order_base_2(ring->ring_size / 4) - 1));
3648		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
3649			       ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
3650#ifdef __BIG_ENDIAN
3651		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
3652#endif
3653		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
3654		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0);
3655		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
3656		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
3657		WREG32(mmCP_HQD_PQ_CONTROL, tmp);
3658		mqd->cp_hqd_pq_control = tmp;
3659
3660		/* set the wb address wether it's enabled or not */
3661		wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
3662		mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
3663		mqd->cp_hqd_pq_rptr_report_addr_hi =
3664			upper_32_bits(wb_gpu_addr) & 0xffff;
3665		WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR,
3666		       mqd->cp_hqd_pq_rptr_report_addr_lo);
3667		WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
3668		       mqd->cp_hqd_pq_rptr_report_addr_hi);
3669
3670		/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
3671		wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
3672		mqd->cp_hqd_pq_wptr_poll_addr = wb_gpu_addr & 0xfffffffc;
3673		mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
3674		WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR, mqd->cp_hqd_pq_wptr_poll_addr);
3675		WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
3676		       mqd->cp_hqd_pq_wptr_poll_addr_hi);
3677
3678		/* enable the doorbell if requested */
3679		if (use_doorbell) {
3680			if ((adev->asic_type == CHIP_CARRIZO) ||
3681			    (adev->asic_type == CHIP_FIJI) ||
3682			    (adev->asic_type == CHIP_STONEY)) {
3683				WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER,
3684				       AMDGPU_DOORBELL_KIQ << 2);
3685				WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER,
3686				       AMDGPU_DOORBELL_MEC_RING7 << 2);
3687			}
3688			tmp = RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
3689			tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3690					    DOORBELL_OFFSET, ring->doorbell_index);
3691			tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
3692			tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_SOURCE, 0);
3693			tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_HIT, 0);
3694			mqd->cp_hqd_pq_doorbell_control = tmp;
3695
3696		} else {
3697			mqd->cp_hqd_pq_doorbell_control = 0;
3698		}
3699		WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL,
3700		       mqd->cp_hqd_pq_doorbell_control);
 
 
 
 
3701
3702		/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
 
 
 
 
 
 
3703		ring->wptr = 0;
3704		mqd->cp_hqd_pq_wptr = ring->wptr;
3705		WREG32(mmCP_HQD_PQ_WPTR, mqd->cp_hqd_pq_wptr);
3706		mqd->cp_hqd_pq_rptr = RREG32(mmCP_HQD_PQ_RPTR);
3707
3708		/* set the vmid for the queue */
3709		mqd->cp_hqd_vmid = 0;
3710		WREG32(mmCP_HQD_VMID, mqd->cp_hqd_vmid);
3711
3712		tmp = RREG32(mmCP_HQD_PERSISTENT_STATE);
3713		tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
3714		WREG32(mmCP_HQD_PERSISTENT_STATE, tmp);
3715		mqd->cp_hqd_persistent_state = tmp;
3716		if (adev->asic_type == CHIP_STONEY) {
3717			tmp = RREG32(mmCP_ME1_PIPE3_INT_CNTL);
3718			tmp = REG_SET_FIELD(tmp, CP_ME1_PIPE3_INT_CNTL, GENERIC2_INT_ENABLE, 1);
3719			WREG32(mmCP_ME1_PIPE3_INT_CNTL, tmp);
3720		}
 
 
3721
3722		/* activate the queue */
3723		mqd->cp_hqd_active = 1;
3724		WREG32(mmCP_HQD_ACTIVE, mqd->cp_hqd_active);
3725
3726		vi_srbm_select(adev, 0, 0, 0, 0);
3727		mutex_unlock(&adev->srbm_mutex);
 
3728
3729		amdgpu_bo_kunmap(ring->mqd_obj);
 
3730		amdgpu_bo_unreserve(ring->mqd_obj);
 
3731	}
3732
3733	if (use_doorbell) {
3734		tmp = RREG32(mmCP_PQ_STATUS);
3735		tmp = REG_SET_FIELD(tmp, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
3736		WREG32(mmCP_PQ_STATUS, tmp);
3737	}
 
 
 
 
 
 
3738
3739	gfx_v8_0_cp_compute_enable(adev, true);
3740
3741	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3742		struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
3743
3744		ring->ready = true;
3745		r = amdgpu_ring_test_ring(ring);
 
 
 
 
 
 
 
 
3746		if (r)
3747			ring->ready = false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3748	}
3749
3750	return 0;
3751}
3752
3753static int gfx_v8_0_cp_resume(struct amdgpu_device *adev)
3754{
3755	int r;
3756
3757	if (!(adev->flags & AMD_IS_APU))
3758		gfx_v8_0_enable_gui_idle_interrupt(adev, false);
3759
3760	if (!adev->pp_enabled) {
3761		if (!adev->firmware.smu_load) {
3762			/* legacy firmware loading */
3763			r = gfx_v8_0_cp_gfx_load_microcode(adev);
3764			if (r)
3765				return r;
3766
3767			r = gfx_v8_0_cp_compute_load_microcode(adev);
3768			if (r)
3769				return r;
3770		} else {
3771			r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
3772							AMDGPU_UCODE_ID_CP_CE);
3773			if (r)
3774				return -EINVAL;
3775
3776			r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
3777							AMDGPU_UCODE_ID_CP_PFP);
3778			if (r)
3779				return -EINVAL;
3780
3781			r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
3782							AMDGPU_UCODE_ID_CP_ME);
3783			if (r)
3784				return -EINVAL;
3785
3786			if (adev->asic_type == CHIP_TOPAZ) {
3787				r = gfx_v8_0_cp_compute_load_microcode(adev);
3788				if (r)
3789					return r;
3790			} else {
3791				r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
3792										 AMDGPU_UCODE_ID_CP_MEC1);
3793				if (r)
3794					return -EINVAL;
3795			}
3796		}
3797	}
3798
3799	r = gfx_v8_0_cp_gfx_resume(adev);
3800	if (r)
3801		return r;
3802
3803	r = gfx_v8_0_cp_compute_resume(adev);
3804	if (r)
3805		return r;
3806
3807	gfx_v8_0_enable_gui_idle_interrupt(adev, true);
3808
3809	return 0;
3810}
3811
3812static void gfx_v8_0_cp_enable(struct amdgpu_device *adev, bool enable)
3813{
3814	gfx_v8_0_cp_gfx_enable(adev, enable);
3815	gfx_v8_0_cp_compute_enable(adev, enable);
3816}
3817
3818static int gfx_v8_0_hw_init(void *handle)
3819{
3820	int r;
3821	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3822
3823	gfx_v8_0_init_golden_registers(adev);
 
3824
3825	gfx_v8_0_gpu_init(adev);
3826
3827	r = gfx_v8_0_rlc_resume(adev);
3828	if (r)
3829		return r;
3830
3831	r = gfx_v8_0_cp_resume(adev);
3832	if (r)
3833		return r;
3834
3835	return r;
3836}
3837
3838static int gfx_v8_0_hw_fini(void *handle)
3839{
3840	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 
 
 
 
 
 
 
3841
3842	amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
3843	amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
3844	gfx_v8_0_cp_enable(adev, false);
3845	gfx_v8_0_rlc_stop(adev);
3846	gfx_v8_0_cp_compute_fini(adev);
 
 
 
 
 
 
 
 
 
3847
3848	return 0;
3849}
3850
3851static int gfx_v8_0_suspend(void *handle)
3852{
3853	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3854
3855	return gfx_v8_0_hw_fini(adev);
 
 
 
 
3856}
3857
3858static int gfx_v8_0_resume(void *handle)
3859{
3860	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3861
3862	return gfx_v8_0_hw_init(adev);
 
 
 
3863}
3864
3865static bool gfx_v8_0_is_idle(void *handle)
3866{
 
3867	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3868
3869	if (REG_GET_FIELD(RREG32(mmGRBM_STATUS), GRBM_STATUS, GUI_ACTIVE))
3870		return false;
3871	else
3872		return true;
 
 
 
3873}
3874
3875static int gfx_v8_0_wait_for_idle(void *handle)
3876{
3877	unsigned i;
3878	u32 tmp;
3879	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3880
3881	for (i = 0; i < adev->usec_timeout; i++) {
3882		/* read MC_STATUS */
3883		tmp = RREG32(mmGRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK;
3884
3885		if (!REG_GET_FIELD(tmp, GRBM_STATUS, GUI_ACTIVE))
3886			return 0;
3887		udelay(1);
3888	}
3889	return -ETIMEDOUT;
3890}
3891
3892static void gfx_v8_0_print_status(void *handle)
3893{
3894	int i;
3895	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3896
3897	dev_info(adev->dev, "GFX 8.x registers\n");
3898	dev_info(adev->dev, "  GRBM_STATUS=0x%08X\n",
3899		 RREG32(mmGRBM_STATUS));
3900	dev_info(adev->dev, "  GRBM_STATUS2=0x%08X\n",
3901		 RREG32(mmGRBM_STATUS2));
3902	dev_info(adev->dev, "  GRBM_STATUS_SE0=0x%08X\n",
3903		 RREG32(mmGRBM_STATUS_SE0));
3904	dev_info(adev->dev, "  GRBM_STATUS_SE1=0x%08X\n",
3905		 RREG32(mmGRBM_STATUS_SE1));
3906	dev_info(adev->dev, "  GRBM_STATUS_SE2=0x%08X\n",
3907		 RREG32(mmGRBM_STATUS_SE2));
3908	dev_info(adev->dev, "  GRBM_STATUS_SE3=0x%08X\n",
3909		 RREG32(mmGRBM_STATUS_SE3));
3910	dev_info(adev->dev, "  CP_STAT = 0x%08x\n", RREG32(mmCP_STAT));
3911	dev_info(adev->dev, "  CP_STALLED_STAT1 = 0x%08x\n",
3912		 RREG32(mmCP_STALLED_STAT1));
3913	dev_info(adev->dev, "  CP_STALLED_STAT2 = 0x%08x\n",
3914		 RREG32(mmCP_STALLED_STAT2));
3915	dev_info(adev->dev, "  CP_STALLED_STAT3 = 0x%08x\n",
3916		 RREG32(mmCP_STALLED_STAT3));
3917	dev_info(adev->dev, "  CP_CPF_BUSY_STAT = 0x%08x\n",
3918		 RREG32(mmCP_CPF_BUSY_STAT));
3919	dev_info(adev->dev, "  CP_CPF_STALLED_STAT1 = 0x%08x\n",
3920		 RREG32(mmCP_CPF_STALLED_STAT1));
3921	dev_info(adev->dev, "  CP_CPF_STATUS = 0x%08x\n", RREG32(mmCP_CPF_STATUS));
3922	dev_info(adev->dev, "  CP_CPC_BUSY_STAT = 0x%08x\n", RREG32(mmCP_CPC_BUSY_STAT));
3923	dev_info(adev->dev, "  CP_CPC_STALLED_STAT1 = 0x%08x\n",
3924		 RREG32(mmCP_CPC_STALLED_STAT1));
3925	dev_info(adev->dev, "  CP_CPC_STATUS = 0x%08x\n", RREG32(mmCP_CPC_STATUS));
3926
3927	for (i = 0; i < 32; i++) {
3928		dev_info(adev->dev, "  GB_TILE_MODE%d=0x%08X\n",
3929			 i, RREG32(mmGB_TILE_MODE0 + (i * 4)));
3930	}
3931	for (i = 0; i < 16; i++) {
3932		dev_info(adev->dev, "  GB_MACROTILE_MODE%d=0x%08X\n",
3933			 i, RREG32(mmGB_MACROTILE_MODE0 + (i * 4)));
3934	}
3935	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
3936		dev_info(adev->dev, "  se: %d\n", i);
3937		gfx_v8_0_select_se_sh(adev, i, 0xffffffff);
3938		dev_info(adev->dev, "  PA_SC_RASTER_CONFIG=0x%08X\n",
3939			 RREG32(mmPA_SC_RASTER_CONFIG));
3940		dev_info(adev->dev, "  PA_SC_RASTER_CONFIG_1=0x%08X\n",
3941			 RREG32(mmPA_SC_RASTER_CONFIG_1));
3942	}
3943	gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
3944
3945	dev_info(adev->dev, "  GB_ADDR_CONFIG=0x%08X\n",
3946		 RREG32(mmGB_ADDR_CONFIG));
3947	dev_info(adev->dev, "  HDP_ADDR_CONFIG=0x%08X\n",
3948		 RREG32(mmHDP_ADDR_CONFIG));
3949	dev_info(adev->dev, "  DMIF_ADDR_CALC=0x%08X\n",
3950		 RREG32(mmDMIF_ADDR_CALC));
3951
3952	dev_info(adev->dev, "  CP_MEQ_THRESHOLDS=0x%08X\n",
3953		 RREG32(mmCP_MEQ_THRESHOLDS));
3954	dev_info(adev->dev, "  SX_DEBUG_1=0x%08X\n",
3955		 RREG32(mmSX_DEBUG_1));
3956	dev_info(adev->dev, "  TA_CNTL_AUX=0x%08X\n",
3957		 RREG32(mmTA_CNTL_AUX));
3958	dev_info(adev->dev, "  SPI_CONFIG_CNTL=0x%08X\n",
3959		 RREG32(mmSPI_CONFIG_CNTL));
3960	dev_info(adev->dev, "  SQ_CONFIG=0x%08X\n",
3961		 RREG32(mmSQ_CONFIG));
3962	dev_info(adev->dev, "  DB_DEBUG=0x%08X\n",
3963		 RREG32(mmDB_DEBUG));
3964	dev_info(adev->dev, "  DB_DEBUG2=0x%08X\n",
3965		 RREG32(mmDB_DEBUG2));
3966	dev_info(adev->dev, "  DB_DEBUG3=0x%08X\n",
3967		 RREG32(mmDB_DEBUG3));
3968	dev_info(adev->dev, "  CB_HW_CONTROL=0x%08X\n",
3969		 RREG32(mmCB_HW_CONTROL));
3970	dev_info(adev->dev, "  SPI_CONFIG_CNTL_1=0x%08X\n",
3971		 RREG32(mmSPI_CONFIG_CNTL_1));
3972	dev_info(adev->dev, "  PA_SC_FIFO_SIZE=0x%08X\n",
3973		 RREG32(mmPA_SC_FIFO_SIZE));
3974	dev_info(adev->dev, "  VGT_NUM_INSTANCES=0x%08X\n",
3975		 RREG32(mmVGT_NUM_INSTANCES));
3976	dev_info(adev->dev, "  CP_PERFMON_CNTL=0x%08X\n",
3977		 RREG32(mmCP_PERFMON_CNTL));
3978	dev_info(adev->dev, "  PA_SC_FORCE_EOV_MAX_CNTS=0x%08X\n",
3979		 RREG32(mmPA_SC_FORCE_EOV_MAX_CNTS));
3980	dev_info(adev->dev, "  VGT_CACHE_INVALIDATION=0x%08X\n",
3981		 RREG32(mmVGT_CACHE_INVALIDATION));
3982	dev_info(adev->dev, "  VGT_GS_VERTEX_REUSE=0x%08X\n",
3983		 RREG32(mmVGT_GS_VERTEX_REUSE));
3984	dev_info(adev->dev, "  PA_SC_LINE_STIPPLE_STATE=0x%08X\n",
3985		 RREG32(mmPA_SC_LINE_STIPPLE_STATE));
3986	dev_info(adev->dev, "  PA_CL_ENHANCE=0x%08X\n",
3987		 RREG32(mmPA_CL_ENHANCE));
3988	dev_info(adev->dev, "  PA_SC_ENHANCE=0x%08X\n",
3989		 RREG32(mmPA_SC_ENHANCE));
3990
3991	dev_info(adev->dev, "  CP_ME_CNTL=0x%08X\n",
3992		 RREG32(mmCP_ME_CNTL));
3993	dev_info(adev->dev, "  CP_MAX_CONTEXT=0x%08X\n",
3994		 RREG32(mmCP_MAX_CONTEXT));
3995	dev_info(adev->dev, "  CP_ENDIAN_SWAP=0x%08X\n",
3996		 RREG32(mmCP_ENDIAN_SWAP));
3997	dev_info(adev->dev, "  CP_DEVICE_ID=0x%08X\n",
3998		 RREG32(mmCP_DEVICE_ID));
3999
4000	dev_info(adev->dev, "  CP_SEM_WAIT_TIMER=0x%08X\n",
4001		 RREG32(mmCP_SEM_WAIT_TIMER));
4002
4003	dev_info(adev->dev, "  CP_RB_WPTR_DELAY=0x%08X\n",
4004		 RREG32(mmCP_RB_WPTR_DELAY));
4005	dev_info(adev->dev, "  CP_RB_VMID=0x%08X\n",
4006		 RREG32(mmCP_RB_VMID));
4007	dev_info(adev->dev, "  CP_RB0_CNTL=0x%08X\n",
4008		 RREG32(mmCP_RB0_CNTL));
4009	dev_info(adev->dev, "  CP_RB0_WPTR=0x%08X\n",
4010		 RREG32(mmCP_RB0_WPTR));
4011	dev_info(adev->dev, "  CP_RB0_RPTR_ADDR=0x%08X\n",
4012		 RREG32(mmCP_RB0_RPTR_ADDR));
4013	dev_info(adev->dev, "  CP_RB0_RPTR_ADDR_HI=0x%08X\n",
4014		 RREG32(mmCP_RB0_RPTR_ADDR_HI));
4015	dev_info(adev->dev, "  CP_RB0_CNTL=0x%08X\n",
4016		 RREG32(mmCP_RB0_CNTL));
4017	dev_info(adev->dev, "  CP_RB0_BASE=0x%08X\n",
4018		 RREG32(mmCP_RB0_BASE));
4019	dev_info(adev->dev, "  CP_RB0_BASE_HI=0x%08X\n",
4020		 RREG32(mmCP_RB0_BASE_HI));
4021	dev_info(adev->dev, "  CP_MEC_CNTL=0x%08X\n",
4022		 RREG32(mmCP_MEC_CNTL));
4023	dev_info(adev->dev, "  CP_CPF_DEBUG=0x%08X\n",
4024		 RREG32(mmCP_CPF_DEBUG));
4025
4026	dev_info(adev->dev, "  SCRATCH_ADDR=0x%08X\n",
4027		 RREG32(mmSCRATCH_ADDR));
4028	dev_info(adev->dev, "  SCRATCH_UMSK=0x%08X\n",
4029		 RREG32(mmSCRATCH_UMSK));
4030
4031	dev_info(adev->dev, "  CP_INT_CNTL_RING0=0x%08X\n",
4032		 RREG32(mmCP_INT_CNTL_RING0));
4033	dev_info(adev->dev, "  RLC_LB_CNTL=0x%08X\n",
4034		 RREG32(mmRLC_LB_CNTL));
4035	dev_info(adev->dev, "  RLC_CNTL=0x%08X\n",
4036		 RREG32(mmRLC_CNTL));
4037	dev_info(adev->dev, "  RLC_CGCG_CGLS_CTRL=0x%08X\n",
4038		 RREG32(mmRLC_CGCG_CGLS_CTRL));
4039	dev_info(adev->dev, "  RLC_LB_CNTR_INIT=0x%08X\n",
4040		 RREG32(mmRLC_LB_CNTR_INIT));
4041	dev_info(adev->dev, "  RLC_LB_CNTR_MAX=0x%08X\n",
4042		 RREG32(mmRLC_LB_CNTR_MAX));
4043	dev_info(adev->dev, "  RLC_LB_INIT_CU_MASK=0x%08X\n",
4044		 RREG32(mmRLC_LB_INIT_CU_MASK));
4045	dev_info(adev->dev, "  RLC_LB_PARAMS=0x%08X\n",
4046		 RREG32(mmRLC_LB_PARAMS));
4047	dev_info(adev->dev, "  RLC_LB_CNTL=0x%08X\n",
4048		 RREG32(mmRLC_LB_CNTL));
4049	dev_info(adev->dev, "  RLC_MC_CNTL=0x%08X\n",
4050		 RREG32(mmRLC_MC_CNTL));
4051	dev_info(adev->dev, "  RLC_UCODE_CNTL=0x%08X\n",
4052		 RREG32(mmRLC_UCODE_CNTL));
4053
4054	mutex_lock(&adev->srbm_mutex);
4055	for (i = 0; i < 16; i++) {
4056		vi_srbm_select(adev, 0, 0, 0, i);
4057		dev_info(adev->dev, "  VM %d:\n", i);
4058		dev_info(adev->dev, "  SH_MEM_CONFIG=0x%08X\n",
4059			 RREG32(mmSH_MEM_CONFIG));
4060		dev_info(adev->dev, "  SH_MEM_APE1_BASE=0x%08X\n",
4061			 RREG32(mmSH_MEM_APE1_BASE));
4062		dev_info(adev->dev, "  SH_MEM_APE1_LIMIT=0x%08X\n",
4063			 RREG32(mmSH_MEM_APE1_LIMIT));
4064		dev_info(adev->dev, "  SH_MEM_BASES=0x%08X\n",
4065			 RREG32(mmSH_MEM_BASES));
4066	}
4067	vi_srbm_select(adev, 0, 0, 0, 0);
4068	mutex_unlock(&adev->srbm_mutex);
4069}
4070
4071static int gfx_v8_0_soft_reset(void *handle)
4072{
 
4073	u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
4074	u32 tmp;
4075	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4076
4077	/* GRBM_STATUS */
4078	tmp = RREG32(mmGRBM_STATUS);
4079	if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
4080		   GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
4081		   GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
4082		   GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
4083		   GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
4084		   GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) {
 
4085		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4086						GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
4087		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4088						GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
4089	}
4090
4091	if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
4092		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4093						GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
4094		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
4095						SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1);
4096	}
4097
4098	/* GRBM_STATUS2 */
4099	tmp = RREG32(mmGRBM_STATUS2);
4100	if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
4101		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4102						GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
4103
 
 
 
 
 
 
 
 
 
 
 
 
 
4104	/* SRBM_STATUS */
4105	tmp = RREG32(mmSRBM_STATUS);
4106	if (REG_GET_FIELD(tmp, SRBM_STATUS, GRBM_RQ_PENDING))
4107		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
4108						SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1);
 
 
 
4109
4110	if (grbm_soft_reset || srbm_soft_reset) {
4111		gfx_v8_0_print_status((void *)adev);
4112		/* stop the rlc */
4113		gfx_v8_0_rlc_stop(adev);
 
 
 
 
 
 
 
 
 
 
 
4114
 
 
 
 
 
 
 
 
 
 
 
4115		/* Disable GFX parsing/prefetching */
4116		gfx_v8_0_cp_gfx_enable(adev, false);
4117
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4118		/* Disable MEC parsing/prefetching */
4119		gfx_v8_0_cp_compute_enable(adev, false);
 
4120
4121		if (grbm_soft_reset || srbm_soft_reset) {
4122			tmp = RREG32(mmGMCON_DEBUG);
4123			tmp = REG_SET_FIELD(tmp,
4124					    GMCON_DEBUG, GFX_STALL, 1);
4125			tmp = REG_SET_FIELD(tmp,
4126					    GMCON_DEBUG, GFX_CLEAR, 1);
4127			WREG32(mmGMCON_DEBUG, tmp);
4128
4129			udelay(50);
4130		}
 
 
 
4131
4132		if (grbm_soft_reset) {
4133			tmp = RREG32(mmGRBM_SOFT_RESET);
4134			tmp |= grbm_soft_reset;
4135			dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
4136			WREG32(mmGRBM_SOFT_RESET, tmp);
4137			tmp = RREG32(mmGRBM_SOFT_RESET);
4138
4139			udelay(50);
 
4140
4141			tmp &= ~grbm_soft_reset;
4142			WREG32(mmGRBM_SOFT_RESET, tmp);
4143			tmp = RREG32(mmGRBM_SOFT_RESET);
4144		}
 
 
 
4145
4146		if (srbm_soft_reset) {
4147			tmp = RREG32(mmSRBM_SOFT_RESET);
4148			tmp |= srbm_soft_reset;
4149			dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
4150			WREG32(mmSRBM_SOFT_RESET, tmp);
4151			tmp = RREG32(mmSRBM_SOFT_RESET);
4152
4153			udelay(50);
4154
4155			tmp &= ~srbm_soft_reset;
4156			WREG32(mmSRBM_SOFT_RESET, tmp);
4157			tmp = RREG32(mmSRBM_SOFT_RESET);
4158		}
4159
4160		if (grbm_soft_reset || srbm_soft_reset) {
4161			tmp = RREG32(mmGMCON_DEBUG);
4162			tmp = REG_SET_FIELD(tmp,
4163					    GMCON_DEBUG, GFX_STALL, 0);
4164			tmp = REG_SET_FIELD(tmp,
4165					    GMCON_DEBUG, GFX_CLEAR, 0);
4166			WREG32(mmGMCON_DEBUG, tmp);
4167		}
4168
4169		/* Wait a little for things to settle down */
4170		udelay(50);
4171		gfx_v8_0_print_status((void *)adev);
 
 
 
 
 
 
 
 
 
 
4172	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4173	return 0;
4174}
4175
4176/**
4177 * gfx_v8_0_get_gpu_clock_counter - return GPU clock counter snapshot
4178 *
4179 * @adev: amdgpu_device pointer
4180 *
4181 * Fetches a GPU clock counter snapshot.
4182 * Returns the 64 bit clock counter snapshot.
4183 */
4184uint64_t gfx_v8_0_get_gpu_clock_counter(struct amdgpu_device *adev)
4185{
4186	uint64_t clock;
4187
4188	mutex_lock(&adev->gfx.gpu_clock_mutex);
4189	WREG32(mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
4190	clock = (uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_LSB) |
4191		((uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
4192	mutex_unlock(&adev->gfx.gpu_clock_mutex);
4193	return clock;
4194}
4195
4196static void gfx_v8_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
4197					  uint32_t vmid,
4198					  uint32_t gds_base, uint32_t gds_size,
4199					  uint32_t gws_base, uint32_t gws_size,
4200					  uint32_t oa_base, uint32_t oa_size)
4201{
4202	gds_base = gds_base >> AMDGPU_GDS_SHIFT;
4203	gds_size = gds_size >> AMDGPU_GDS_SHIFT;
4204
4205	gws_base = gws_base >> AMDGPU_GWS_SHIFT;
4206	gws_size = gws_size >> AMDGPU_GWS_SHIFT;
4207
4208	oa_base = oa_base >> AMDGPU_OA_SHIFT;
4209	oa_size = oa_size >> AMDGPU_OA_SHIFT;
4210
4211	/* GDS Base */
4212	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4213	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4214				WRITE_DATA_DST_SEL(0)));
4215	amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_base);
4216	amdgpu_ring_write(ring, 0);
4217	amdgpu_ring_write(ring, gds_base);
4218
4219	/* GDS Size */
4220	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4221	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4222				WRITE_DATA_DST_SEL(0)));
4223	amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_size);
4224	amdgpu_ring_write(ring, 0);
4225	amdgpu_ring_write(ring, gds_size);
4226
4227	/* GWS */
4228	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4229	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4230				WRITE_DATA_DST_SEL(0)));
4231	amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].gws);
4232	amdgpu_ring_write(ring, 0);
4233	amdgpu_ring_write(ring, gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
4234
4235	/* OA */
4236	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4237	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4238				WRITE_DATA_DST_SEL(0)));
4239	amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].oa);
4240	amdgpu_ring_write(ring, 0);
4241	amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));
4242}
4243
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4244static int gfx_v8_0_early_init(void *handle)
4245{
4246	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4247
 
4248	adev->gfx.num_gfx_rings = GFX8_NUM_GFX_RINGS;
4249	adev->gfx.num_compute_rings = GFX8_NUM_COMPUTE_RINGS;
 
 
4250	gfx_v8_0_set_ring_funcs(adev);
4251	gfx_v8_0_set_irq_funcs(adev);
4252	gfx_v8_0_set_gds_init(adev);
 
4253
4254	return 0;
4255}
4256
4257static int gfx_v8_0_late_init(void *handle)
4258{
4259	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4260	int r;
4261
4262	r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
4263	if (r)
4264		return r;
4265
4266	r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
4267	if (r)
4268		return r;
4269
4270	/* requires IBs so do in late init after IB pool is initialized */
4271	r = gfx_v8_0_do_edc_gpr_workarounds(adev);
4272	if (r)
4273		return r;
4274
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4275	return 0;
4276}
4277
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4278static int gfx_v8_0_set_powergating_state(void *handle,
4279					  enum amd_powergating_state state)
4280{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4281	return 0;
4282}
4283
4284static void fiji_send_serdes_cmd(struct amdgpu_device *adev,
4285		uint32_t reg_addr, uint32_t cmd)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4286{
4287	uint32_t data;
4288
4289	gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
4290
4291	WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
4292	WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
4293
4294	data = RREG32(mmRLC_SERDES_WR_CTRL);
4295	data &= ~(RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK |
4296			RLC_SERDES_WR_CTRL__READ_COMMAND_MASK |
4297			RLC_SERDES_WR_CTRL__P1_SELECT_MASK |
4298			RLC_SERDES_WR_CTRL__P2_SELECT_MASK |
4299			RLC_SERDES_WR_CTRL__RDDATA_RESET_MASK |
4300			RLC_SERDES_WR_CTRL__POWER_DOWN_MASK |
4301			RLC_SERDES_WR_CTRL__POWER_UP_MASK |
4302			RLC_SERDES_WR_CTRL__SHORT_FORMAT_MASK |
4303			RLC_SERDES_WR_CTRL__BPM_DATA_MASK |
4304			RLC_SERDES_WR_CTRL__REG_ADDR_MASK |
4305			RLC_SERDES_WR_CTRL__SRBM_OVERRIDE_MASK);
 
 
 
 
 
 
 
 
 
 
 
4306	data |= (RLC_SERDES_WR_CTRL__RSVD_BPM_ADDR_MASK |
4307			(cmd << RLC_SERDES_WR_CTRL__BPM_DATA__SHIFT) |
4308			(reg_addr << RLC_SERDES_WR_CTRL__REG_ADDR__SHIFT) |
4309			(0xff << RLC_SERDES_WR_CTRL__BPM_ADDR__SHIFT));
4310
4311	WREG32(mmRLC_SERDES_WR_CTRL, data);
4312}
4313
4314static void fiji_update_medium_grain_clock_gating(struct amdgpu_device *adev,
4315		bool enable)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4316{
4317	uint32_t temp, data;
4318
 
 
4319	/* It is disabled by HW by default */
4320	if (enable) {
4321		/* 1 - RLC memory Light sleep */
4322		temp = data = RREG32(mmRLC_MEM_SLP_CNTL);
4323		data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
4324		if (temp != data)
4325			WREG32(mmRLC_MEM_SLP_CNTL, data);
4326
4327		/* 2 - CP memory Light sleep */
4328		temp = data = RREG32(mmCP_MEM_SLP_CNTL);
4329		data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
4330		if (temp != data)
4331			WREG32(mmCP_MEM_SLP_CNTL, data);
4332
4333		/* 3 - RLC_CGTT_MGCG_OVERRIDE */
4334		temp = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
4335		data &= ~(RLC_CGTT_MGCG_OVERRIDE__CPF_MASK |
4336				RLC_CGTT_MGCG_OVERRIDE__RLC_MASK |
4337				RLC_CGTT_MGCG_OVERRIDE__MGCG_MASK |
4338				RLC_CGTT_MGCG_OVERRIDE__GRBM_MASK);
 
 
 
 
 
4339
4340		if (temp != data)
4341			WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data);
4342
4343		/* 4 - wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
4344		gfx_v8_0_wait_for_rlc_serdes(adev);
4345
4346		/* 5 - clear mgcg override */
4347		fiji_send_serdes_cmd(adev, BPM_REG_MGCG_OVERRIDE, CLE_BPM_SERDES_CMD);
4348
4349		/* 6 - Enable CGTS(Tree Shade) MGCG /MGLS */
4350		temp = data = RREG32(mmCGTS_SM_CTRL_REG);
4351		data &= ~(CGTS_SM_CTRL_REG__SM_MODE_MASK);
4352		data |= (0x2 << CGTS_SM_CTRL_REG__SM_MODE__SHIFT);
4353		data |= CGTS_SM_CTRL_REG__SM_MODE_ENABLE_MASK;
4354		data &= ~CGTS_SM_CTRL_REG__OVERRIDE_MASK;
4355		data &= ~CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK;
4356		data |= CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN_MASK;
4357		data |= (0x96 << CGTS_SM_CTRL_REG__ON_MONITOR_ADD__SHIFT);
4358		if (temp != data)
4359			WREG32(mmCGTS_SM_CTRL_REG, data);
 
 
 
 
4360		udelay(50);
4361
4362		/* 7 - wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
4363		gfx_v8_0_wait_for_rlc_serdes(adev);
4364	} else {
4365		/* 1 - MGCG_OVERRIDE[0] for CP and MGCG_OVERRIDE[1] for RLC */
4366		temp = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
4367		data |= (RLC_CGTT_MGCG_OVERRIDE__CPF_MASK |
4368				RLC_CGTT_MGCG_OVERRIDE__RLC_MASK |
4369				RLC_CGTT_MGCG_OVERRIDE__MGCG_MASK |
4370				RLC_CGTT_MGCG_OVERRIDE__GRBM_MASK);
4371		if (temp != data)
4372			WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data);
4373
4374		/* 2 - disable MGLS in RLC */
4375		data = RREG32(mmRLC_MEM_SLP_CNTL);
4376		if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
4377			data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
4378			WREG32(mmRLC_MEM_SLP_CNTL, data);
4379		}
4380
4381		/* 3 - disable MGLS in CP */
4382		data = RREG32(mmCP_MEM_SLP_CNTL);
4383		if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
4384			data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
4385			WREG32(mmCP_MEM_SLP_CNTL, data);
4386		}
4387
4388		/* 4 - Disable CGTS(Tree Shade) MGCG and MGLS */
4389		temp = data = RREG32(mmCGTS_SM_CTRL_REG);
4390		data |= (CGTS_SM_CTRL_REG__OVERRIDE_MASK |
4391				CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK);
4392		if (temp != data)
4393			WREG32(mmCGTS_SM_CTRL_REG, data);
4394
4395		/* 5 - wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
4396		gfx_v8_0_wait_for_rlc_serdes(adev);
4397
4398		/* 6 - set mgcg override */
4399		fiji_send_serdes_cmd(adev, BPM_REG_MGCG_OVERRIDE, SET_BPM_SERDES_CMD);
4400
4401		udelay(50);
4402
4403		/* 7- wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
4404		gfx_v8_0_wait_for_rlc_serdes(adev);
4405	}
 
 
4406}
4407
4408static void fiji_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
4409		bool enable)
4410{
4411	uint32_t temp, temp1, data, data1;
4412
4413	temp = data = RREG32(mmRLC_CGCG_CGLS_CTRL);
4414
4415	if (enable) {
4416		/* 1 enable cntx_empty_int_enable/cntx_busy_int_enable/
4417		 * Cmp_busy/GFX_Idle interrupts
4418		 */
4419		gfx_v8_0_enable_gui_idle_interrupt(adev, true);
4420
 
4421		temp1 = data1 =	RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
4422		data1 &= ~RLC_CGTT_MGCG_OVERRIDE__CGCG_MASK;
4423		if (temp1 != data1)
4424			WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data1);
4425
4426		/* 2 wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
4427		gfx_v8_0_wait_for_rlc_serdes(adev);
4428
4429		/* 3 - clear cgcg override */
4430		fiji_send_serdes_cmd(adev, BPM_REG_CGCG_OVERRIDE, CLE_BPM_SERDES_CMD);
4431
4432		/* wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
4433		gfx_v8_0_wait_for_rlc_serdes(adev);
4434
4435		/* 4 - write cmd to set CGLS */
4436		fiji_send_serdes_cmd(adev, BPM_REG_CGLS_EN, SET_BPM_SERDES_CMD);
4437
4438		/* 5 - enable cgcg */
4439		data |= RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
4440
4441		/* enable cgls*/
4442		data |= RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
 
4443
4444		temp1 = data1 =	RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
4445		data1 &= ~RLC_CGTT_MGCG_OVERRIDE__CGLS_MASK;
4446
4447		if (temp1 != data1)
4448			WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data1);
 
 
 
4449
4450		if (temp != data)
4451			WREG32(mmRLC_CGCG_CGLS_CTRL, data);
 
 
 
 
 
4452	} else {
4453		/* disable cntx_empty_int_enable & GFX Idle interrupt */
4454		gfx_v8_0_enable_gui_idle_interrupt(adev, false);
4455
4456		/* TEST CGCG */
4457		temp1 = data1 =	RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
4458		data1 |= (RLC_CGTT_MGCG_OVERRIDE__CGCG_MASK |
4459				RLC_CGTT_MGCG_OVERRIDE__CGLS_MASK);
4460		if (temp1 != data1)
4461			WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data1);
4462
4463		/* read gfx register to wake up cgcg */
4464		RREG32(mmCB_CGTT_SCLK_CTRL);
4465		RREG32(mmCB_CGTT_SCLK_CTRL);
4466		RREG32(mmCB_CGTT_SCLK_CTRL);
4467		RREG32(mmCB_CGTT_SCLK_CTRL);
4468
4469		/* wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
4470		gfx_v8_0_wait_for_rlc_serdes(adev);
4471
4472		/* write cmd to Set CGCG Overrride */
4473		fiji_send_serdes_cmd(adev, BPM_REG_CGCG_OVERRIDE, SET_BPM_SERDES_CMD);
4474
4475		/* wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
4476		gfx_v8_0_wait_for_rlc_serdes(adev);
4477
4478		/* write cmd to Clear CGLS */
4479		fiji_send_serdes_cmd(adev, BPM_REG_CGLS_EN, CLE_BPM_SERDES_CMD);
4480
4481		/* disable cgcg, cgls should be disabled too. */
4482		data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK |
4483				RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
4484		if (temp != data)
4485			WREG32(mmRLC_CGCG_CGLS_CTRL, data);
 
 
4486	}
 
 
 
 
4487}
4488static int fiji_update_gfx_clock_gating(struct amdgpu_device *adev,
4489		bool enable)
4490{
4491	if (enable) {
4492		/* CGCG/CGLS should be enabled after MGCG/MGLS/TS(CG/LS)
4493		 * ===  MGCG + MGLS + TS(CG/LS) ===
4494		 */
4495		fiji_update_medium_grain_clock_gating(adev, enable);
4496		fiji_update_coarse_grain_clock_gating(adev, enable);
4497	} else {
4498		/* CGCG/CGLS should be disabled before MGCG/MGLS/TS(CG/LS)
4499		 * ===  CGCG + CGLS ===
4500		 */
4501		fiji_update_coarse_grain_clock_gating(adev, enable);
4502		fiji_update_medium_grain_clock_gating(adev, enable);
4503	}
4504	return 0;
4505}
4506
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4507static int gfx_v8_0_set_clockgating_state(void *handle,
4508					  enum amd_clockgating_state state)
4509{
4510	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4511
 
 
 
4512	switch (adev->asic_type) {
4513	case CHIP_FIJI:
4514		fiji_update_gfx_clock_gating(adev,
4515				state == AMD_CG_STATE_GATE ? true : false);
 
 
 
 
 
 
 
 
 
 
 
4516		break;
4517	default:
4518		break;
4519	}
4520	return 0;
4521}
4522
4523static u32 gfx_v8_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
4524{
4525	u32 rptr;
4526
4527	rptr = ring->adev->wb.wb[ring->rptr_offs];
4528
4529	return rptr;
4530}
4531
4532static u32 gfx_v8_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
4533{
4534	struct amdgpu_device *adev = ring->adev;
4535	u32 wptr;
4536
4537	if (ring->use_doorbell)
4538		/* XXX check if swapping is necessary on BE */
4539		wptr = ring->adev->wb.wb[ring->wptr_offs];
4540	else
4541		wptr = RREG32(mmCP_RB0_WPTR);
4542
4543	return wptr;
4544}
4545
4546static void gfx_v8_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
4547{
4548	struct amdgpu_device *adev = ring->adev;
4549
4550	if (ring->use_doorbell) {
4551		/* XXX check if swapping is necessary on BE */
4552		adev->wb.wb[ring->wptr_offs] = ring->wptr;
4553		WDOORBELL32(ring->doorbell_index, ring->wptr);
4554	} else {
4555		WREG32(mmCP_RB0_WPTR, ring->wptr);
4556		(void)RREG32(mmCP_RB0_WPTR);
4557	}
4558}
4559
4560static void gfx_v8_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
4561{
4562	u32 ref_and_mask, reg_mem_engine;
4563
4564	if (ring->type == AMDGPU_RING_TYPE_COMPUTE) {
 
4565		switch (ring->me) {
4566		case 1:
4567			ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe;
4568			break;
4569		case 2:
4570			ref_and_mask = GPU_HDP_FLUSH_DONE__CP6_MASK << ring->pipe;
4571			break;
4572		default:
4573			return;
4574		}
4575		reg_mem_engine = 0;
4576	} else {
4577		ref_and_mask = GPU_HDP_FLUSH_DONE__CP0_MASK;
4578		reg_mem_engine = WAIT_REG_MEM_ENGINE(1); /* pfp */
4579	}
4580
4581	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
4582	amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(1) | /* write, wait, write */
4583				 WAIT_REG_MEM_FUNCTION(3) |  /* == */
4584				 reg_mem_engine));
4585	amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ);
4586	amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE);
4587	amdgpu_ring_write(ring, ref_and_mask);
4588	amdgpu_ring_write(ring, ref_and_mask);
4589	amdgpu_ring_write(ring, 0x20); /* poll interval */
4590}
4591
4592static void gfx_v8_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
4593{
4594	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4595	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4596				 WRITE_DATA_DST_SEL(0) |
4597				 WR_CONFIRM));
4598	amdgpu_ring_write(ring, mmHDP_DEBUG0);
4599	amdgpu_ring_write(ring, 0);
4600	amdgpu_ring_write(ring, 1);
4601
4602}
4603
4604static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
4605				  struct amdgpu_ib *ib)
 
 
4606{
4607	bool need_ctx_switch = ring->current_ctx != ib->ctx;
4608	u32 header, control = 0;
4609	u32 next_rptr = ring->wptr + 5;
4610
4611	/* drop the CE preamble IB for the same context */
4612	if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && !need_ctx_switch)
4613		return;
4614
4615	if (need_ctx_switch)
4616		next_rptr += 2;
4617
4618	next_rptr += 4;
4619	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4620	amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
4621	amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
4622	amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
4623	amdgpu_ring_write(ring, next_rptr);
4624
4625	/* insert SWITCH_BUFFER packet before first IB in the ring frame */
4626	if (need_ctx_switch) {
4627		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
4628		amdgpu_ring_write(ring, 0);
4629	}
4630
4631	if (ib->flags & AMDGPU_IB_FLAG_CE)
4632		header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
4633	else
4634		header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
4635
4636	control |= ib->length_dw | (ib->vm_id << 24);
 
 
 
 
 
 
 
4637
4638	amdgpu_ring_write(ring, header);
4639	amdgpu_ring_write(ring,
4640#ifdef __BIG_ENDIAN
4641			  (2 << 0) |
4642#endif
4643			  (ib->gpu_addr & 0xFFFFFFFC));
4644	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
4645	amdgpu_ring_write(ring, control);
4646}
4647
4648static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
4649				  struct amdgpu_ib *ib)
4650{
4651	u32 header, control = 0;
4652	u32 next_rptr = ring->wptr + 5;
4653
4654	control |= INDIRECT_BUFFER_VALID;
4655
4656	next_rptr += 4;
4657	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4658	amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
4659	amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
4660	amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
4661	amdgpu_ring_write(ring, next_rptr);
 
 
 
 
 
 
 
 
 
4662
4663	header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
4664
4665	control |= ib->length_dw | (ib->vm_id << 24);
4666
4667	amdgpu_ring_write(ring, header);
4668	amdgpu_ring_write(ring,
4669#ifdef __BIG_ENDIAN
4670					  (2 << 0) |
4671#endif
4672					  (ib->gpu_addr & 0xFFFFFFFC));
4673	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
4674	amdgpu_ring_write(ring, control);
4675}
4676
4677static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
4678					 u64 seq, unsigned flags)
4679{
4680	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
4681	bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
4682
4683	/* EVENT_WRITE_EOP - flush caches, send int */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4684	amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
4685	amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
4686				 EOP_TC_ACTION_EN |
 
4687				 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
4688				 EVENT_INDEX(5)));
4689	amdgpu_ring_write(ring, addr & 0xfffffffc);
4690	amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
4691			  DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
4692	amdgpu_ring_write(ring, lower_32_bits(seq));
4693	amdgpu_ring_write(ring, upper_32_bits(seq));
4694
4695}
4696
4697static void gfx_v8_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
4698{
4699	int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
4700	uint32_t seq = ring->fence_drv.sync_seq;
4701	uint64_t addr = ring->fence_drv.gpu_addr;
4702
4703	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
4704	amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
4705				 WAIT_REG_MEM_FUNCTION(3) | /* equal */
4706				 WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */
4707	amdgpu_ring_write(ring, addr & 0xfffffffc);
4708	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
4709	amdgpu_ring_write(ring, seq);
4710	amdgpu_ring_write(ring, 0xffffffff);
4711	amdgpu_ring_write(ring, 4); /* poll interval */
4712
4713	if (usepfp) {
4714		/* synce CE with ME to prevent CE fetch CEIB before context switch done */
4715		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
4716		amdgpu_ring_write(ring, 0);
4717		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
4718		amdgpu_ring_write(ring, 0);
4719	}
4720}
4721
4722static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
4723					unsigned vm_id, uint64_t pd_addr)
4724{
4725	int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
4726
4727	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4728	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
4729				 WRITE_DATA_DST_SEL(0)) |
4730				 WR_CONFIRM);
4731	if (vm_id < 8) {
4732		amdgpu_ring_write(ring,
4733				  (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
4734	} else {
4735		amdgpu_ring_write(ring,
4736				  (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8));
4737	}
4738	amdgpu_ring_write(ring, 0);
4739	amdgpu_ring_write(ring, pd_addr >> 12);
4740
4741	/* bits 0-15 are the VM contexts0-15 */
4742	/* invalidate the cache */
4743	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4744	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4745				 WRITE_DATA_DST_SEL(0)));
4746	amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
4747	amdgpu_ring_write(ring, 0);
4748	amdgpu_ring_write(ring, 1 << vm_id);
4749
4750	/* wait for the invalidate to complete */
4751	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
4752	amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */
4753				 WAIT_REG_MEM_FUNCTION(0) |  /* always */
4754				 WAIT_REG_MEM_ENGINE(0))); /* me */
4755	amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
4756	amdgpu_ring_write(ring, 0);
4757	amdgpu_ring_write(ring, 0); /* ref */
4758	amdgpu_ring_write(ring, 0); /* mask */
4759	amdgpu_ring_write(ring, 0x20); /* poll interval */
4760
4761	/* compute doesn't have PFP */
4762	if (usepfp) {
4763		/* sync PFP to ME, otherwise we might get invalid PFP reads */
4764		amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
4765		amdgpu_ring_write(ring, 0x0);
4766		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
4767		amdgpu_ring_write(ring, 0);
4768		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
4769		amdgpu_ring_write(ring, 0);
4770	}
4771}
4772
4773static u32 gfx_v8_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
4774{
4775	return ring->adev->wb.wb[ring->rptr_offs];
4776}
4777
4778static u32 gfx_v8_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
4779{
4780	return ring->adev->wb.wb[ring->wptr_offs];
4781}
4782
4783static void gfx_v8_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
4784{
4785	struct amdgpu_device *adev = ring->adev;
4786
4787	/* XXX check if swapping is necessary on BE */
4788	adev->wb.wb[ring->wptr_offs] = ring->wptr;
4789	WDOORBELL32(ring->doorbell_index, ring->wptr);
4790}
4791
4792static void gfx_v8_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
4793					     u64 addr, u64 seq,
4794					     unsigned flags)
4795{
4796	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
4797	bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
4798
4799	/* RELEASE_MEM - flush caches, send int */
4800	amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 5));
4801	amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
4802				 EOP_TC_ACTION_EN |
4803				 EOP_TC_WB_ACTION_EN |
4804				 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
4805				 EVENT_INDEX(5)));
4806	amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
4807	amdgpu_ring_write(ring, addr & 0xfffffffc);
4808	amdgpu_ring_write(ring, upper_32_bits(addr));
4809	amdgpu_ring_write(ring, lower_32_bits(seq));
4810	amdgpu_ring_write(ring, upper_32_bits(seq));
4811}
4812
4813static void gfx_v8_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
4814						 enum amdgpu_interrupt_state state)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4815{
4816	u32 cp_int_cntl;
4817
4818	switch (state) {
4819	case AMDGPU_IRQ_STATE_DISABLE:
4820		cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
4821		cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
4822					    TIME_STAMP_INT_ENABLE, 0);
4823		WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
4824		break;
4825	case AMDGPU_IRQ_STATE_ENABLE:
4826		cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
4827		cp_int_cntl =
4828			REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
4829				      TIME_STAMP_INT_ENABLE, 1);
4830		WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
4831		break;
4832	default:
 
4833		break;
4834	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4835}
4836
4837static void gfx_v8_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
4838						     int me, int pipe,
4839						     enum amdgpu_interrupt_state state)
4840{
4841	u32 mec_int_cntl, mec_int_cntl_reg;
4842
4843	/*
4844	 * amdgpu controls only pipe 0 of MEC1. That's why this function only
4845	 * handles the setting of interrupts for this specific pipe. All other
4846	 * pipes' interrupts are set by amdkfd.
4847	 */
4848
4849	if (me == 1) {
4850		switch (pipe) {
4851		case 0:
4852			mec_int_cntl_reg = mmCP_ME1_PIPE0_INT_CNTL;
4853			break;
 
 
 
 
 
 
 
 
 
4854		default:
4855			DRM_DEBUG("invalid pipe %d\n", pipe);
4856			return;
4857		}
4858	} else {
4859		DRM_DEBUG("invalid me %d\n", me);
4860		return;
4861	}
4862
4863	switch (state) {
4864	case AMDGPU_IRQ_STATE_DISABLE:
4865		mec_int_cntl = RREG32(mec_int_cntl_reg);
4866		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
4867					     TIME_STAMP_INT_ENABLE, 0);
4868		WREG32(mec_int_cntl_reg, mec_int_cntl);
4869		break;
4870	case AMDGPU_IRQ_STATE_ENABLE:
4871		mec_int_cntl = RREG32(mec_int_cntl_reg);
4872		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
4873					     TIME_STAMP_INT_ENABLE, 1);
4874		WREG32(mec_int_cntl_reg, mec_int_cntl);
4875		break;
4876	default:
4877		break;
4878	}
4879}
4880
4881static int gfx_v8_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
4882					     struct amdgpu_irq_src *source,
4883					     unsigned type,
4884					     enum amdgpu_interrupt_state state)
4885{
4886	u32 cp_int_cntl;
4887
4888	switch (state) {
4889	case AMDGPU_IRQ_STATE_DISABLE:
4890		cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
4891		cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
4892					    PRIV_REG_INT_ENABLE, 0);
4893		WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
4894		break;
4895	case AMDGPU_IRQ_STATE_ENABLE:
4896		cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
4897		cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
4898					    PRIV_REG_INT_ENABLE, 1);
4899		WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
4900		break;
4901	default:
4902		break;
4903	}
4904
4905	return 0;
4906}
4907
4908static int gfx_v8_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
4909					      struct amdgpu_irq_src *source,
4910					      unsigned type,
4911					      enum amdgpu_interrupt_state state)
4912{
4913	u32 cp_int_cntl;
4914
4915	switch (state) {
4916	case AMDGPU_IRQ_STATE_DISABLE:
4917		cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
4918		cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
4919					    PRIV_INSTR_INT_ENABLE, 0);
4920		WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
4921		break;
4922	case AMDGPU_IRQ_STATE_ENABLE:
4923		cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
4924		cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
4925					    PRIV_INSTR_INT_ENABLE, 1);
4926		WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
4927		break;
4928	default:
4929		break;
4930	}
4931
4932	return 0;
4933}
4934
4935static int gfx_v8_0_set_eop_interrupt_state(struct amdgpu_device *adev,
4936					    struct amdgpu_irq_src *src,
4937					    unsigned type,
4938					    enum amdgpu_interrupt_state state)
4939{
4940	switch (type) {
4941	case AMDGPU_CP_IRQ_GFX_EOP:
4942		gfx_v8_0_set_gfx_eop_interrupt_state(adev, state);
4943		break;
4944	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
4945		gfx_v8_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
4946		break;
4947	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
4948		gfx_v8_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
4949		break;
4950	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
4951		gfx_v8_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
4952		break;
4953	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
4954		gfx_v8_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
4955		break;
4956	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
4957		gfx_v8_0_set_compute_eop_interrupt_state(adev, 2, 0, state);
4958		break;
4959	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
4960		gfx_v8_0_set_compute_eop_interrupt_state(adev, 2, 1, state);
4961		break;
4962	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
4963		gfx_v8_0_set_compute_eop_interrupt_state(adev, 2, 2, state);
4964		break;
4965	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
4966		gfx_v8_0_set_compute_eop_interrupt_state(adev, 2, 3, state);
4967		break;
4968	default:
4969		break;
4970	}
4971	return 0;
4972}
4973
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4974static int gfx_v8_0_eop_irq(struct amdgpu_device *adev,
4975			    struct amdgpu_irq_src *source,
4976			    struct amdgpu_iv_entry *entry)
4977{
4978	int i;
4979	u8 me_id, pipe_id, queue_id;
4980	struct amdgpu_ring *ring;
4981
4982	DRM_DEBUG("IH: CP EOP\n");
4983	me_id = (entry->ring_id & 0x0c) >> 2;
4984	pipe_id = (entry->ring_id & 0x03) >> 0;
4985	queue_id = (entry->ring_id & 0x70) >> 4;
4986
4987	switch (me_id) {
4988	case 0:
4989		amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
4990		break;
4991	case 1:
4992	case 2:
4993		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4994			ring = &adev->gfx.compute_ring[i];
4995			/* Per-queue interrupt is supported for MEC starting from VI.
4996			  * The interrupt can only be enabled/disabled per pipe instead of per queue.
4997			  */
4998			if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
4999				amdgpu_fence_process(ring);
5000		}
5001		break;
5002	}
5003	return 0;
5004}
5005
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5006static int gfx_v8_0_priv_reg_irq(struct amdgpu_device *adev,
5007				 struct amdgpu_irq_src *source,
5008				 struct amdgpu_iv_entry *entry)
5009{
5010	DRM_ERROR("Illegal register access in command stream\n");
5011	schedule_work(&adev->reset_work);
5012	return 0;
5013}
5014
5015static int gfx_v8_0_priv_inst_irq(struct amdgpu_device *adev,
5016				  struct amdgpu_irq_src *source,
5017				  struct amdgpu_iv_entry *entry)
5018{
5019	DRM_ERROR("Illegal instruction in command stream\n");
5020	schedule_work(&adev->reset_work);
5021	return 0;
5022}
5023
5024const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5025	.early_init = gfx_v8_0_early_init,
5026	.late_init = gfx_v8_0_late_init,
5027	.sw_init = gfx_v8_0_sw_init,
5028	.sw_fini = gfx_v8_0_sw_fini,
5029	.hw_init = gfx_v8_0_hw_init,
5030	.hw_fini = gfx_v8_0_hw_fini,
5031	.suspend = gfx_v8_0_suspend,
5032	.resume = gfx_v8_0_resume,
5033	.is_idle = gfx_v8_0_is_idle,
5034	.wait_for_idle = gfx_v8_0_wait_for_idle,
 
 
5035	.soft_reset = gfx_v8_0_soft_reset,
5036	.print_status = gfx_v8_0_print_status,
5037	.set_clockgating_state = gfx_v8_0_set_clockgating_state,
5038	.set_powergating_state = gfx_v8_0_set_powergating_state,
 
5039};
5040
5041static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
5042	.get_rptr = gfx_v8_0_ring_get_rptr_gfx,
 
 
 
 
5043	.get_wptr = gfx_v8_0_ring_get_wptr_gfx,
5044	.set_wptr = gfx_v8_0_ring_set_wptr_gfx,
5045	.parse_cs = NULL,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5046	.emit_ib = gfx_v8_0_ring_emit_ib_gfx,
5047	.emit_fence = gfx_v8_0_ring_emit_fence_gfx,
5048	.emit_pipeline_sync = gfx_v8_0_ring_emit_pipeline_sync,
5049	.emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
5050	.emit_gds_switch = gfx_v8_0_ring_emit_gds_switch,
5051	.emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush,
5052	.emit_hdp_invalidate = gfx_v8_0_ring_emit_hdp_invalidate,
5053	.test_ring = gfx_v8_0_ring_test_ring,
5054	.test_ib = gfx_v8_0_ring_test_ib,
5055	.insert_nop = amdgpu_ring_insert_nop,
5056	.pad_ib = amdgpu_ring_generic_pad_ib,
 
 
 
 
 
 
5057};
5058
5059static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
5060	.get_rptr = gfx_v8_0_ring_get_rptr_compute,
 
 
 
 
5061	.get_wptr = gfx_v8_0_ring_get_wptr_compute,
5062	.set_wptr = gfx_v8_0_ring_set_wptr_compute,
5063	.parse_cs = NULL,
 
 
 
 
 
 
 
 
 
 
5064	.emit_ib = gfx_v8_0_ring_emit_ib_compute,
5065	.emit_fence = gfx_v8_0_ring_emit_fence_compute,
5066	.emit_pipeline_sync = gfx_v8_0_ring_emit_pipeline_sync,
5067	.emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
5068	.emit_gds_switch = gfx_v8_0_ring_emit_gds_switch,
5069	.emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush,
5070	.emit_hdp_invalidate = gfx_v8_0_ring_emit_hdp_invalidate,
5071	.test_ring = gfx_v8_0_ring_test_ring,
5072	.test_ib = gfx_v8_0_ring_test_ib,
5073	.insert_nop = amdgpu_ring_insert_nop,
5074	.pad_ib = amdgpu_ring_generic_pad_ib,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5075};
5076
5077static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev)
5078{
5079	int i;
5080
 
 
5081	for (i = 0; i < adev->gfx.num_gfx_rings; i++)
5082		adev->gfx.gfx_ring[i].funcs = &gfx_v8_0_ring_funcs_gfx;
5083
5084	for (i = 0; i < adev->gfx.num_compute_rings; i++)
5085		adev->gfx.compute_ring[i].funcs = &gfx_v8_0_ring_funcs_compute;
5086}
5087
5088static const struct amdgpu_irq_src_funcs gfx_v8_0_eop_irq_funcs = {
5089	.set = gfx_v8_0_set_eop_interrupt_state,
5090	.process = gfx_v8_0_eop_irq,
5091};
5092
5093static const struct amdgpu_irq_src_funcs gfx_v8_0_priv_reg_irq_funcs = {
5094	.set = gfx_v8_0_set_priv_reg_fault_state,
5095	.process = gfx_v8_0_priv_reg_irq,
5096};
5097
5098static const struct amdgpu_irq_src_funcs gfx_v8_0_priv_inst_irq_funcs = {
5099	.set = gfx_v8_0_set_priv_inst_fault_state,
5100	.process = gfx_v8_0_priv_inst_irq,
5101};
5102
 
 
 
 
 
 
 
 
 
 
5103static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev)
5104{
5105	adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
5106	adev->gfx.eop_irq.funcs = &gfx_v8_0_eop_irq_funcs;
5107
5108	adev->gfx.priv_reg_irq.num_types = 1;
5109	adev->gfx.priv_reg_irq.funcs = &gfx_v8_0_priv_reg_irq_funcs;
5110
5111	adev->gfx.priv_inst_irq.num_types = 1;
5112	adev->gfx.priv_inst_irq.funcs = &gfx_v8_0_priv_inst_irq_funcs;
 
 
 
 
 
 
 
 
 
 
 
5113}
5114
5115static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev)
5116{
5117	/* init asci gds info */
5118	adev->gds.mem.total_size = RREG32(mmGDS_VMID0_SIZE);
5119	adev->gds.gws.total_size = 64;
5120	adev->gds.oa.total_size = 16;
5121
5122	if (adev->gds.mem.total_size == 64 * 1024) {
5123		adev->gds.mem.gfx_partition_size = 4096;
5124		adev->gds.mem.cs_partition_size = 4096;
5125
5126		adev->gds.gws.gfx_partition_size = 4;
5127		adev->gds.gws.cs_partition_size = 4;
 
 
5128
5129		adev->gds.oa.gfx_partition_size = 4;
5130		adev->gds.oa.cs_partition_size = 1;
5131	} else {
5132		adev->gds.mem.gfx_partition_size = 1024;
5133		adev->gds.mem.cs_partition_size = 1024;
5134
5135		adev->gds.gws.gfx_partition_size = 16;
5136		adev->gds.gws.cs_partition_size = 16;
5137
5138		adev->gds.oa.gfx_partition_size = 4;
5139		adev->gds.oa.cs_partition_size = 4;
5140	}
5141}
5142
5143static u32 gfx_v8_0_get_cu_active_bitmap(struct amdgpu_device *adev)
5144{
5145	u32 data, mask;
5146
5147	data = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG);
5148	data |= RREG32(mmGC_USER_SHADER_ARRAY_CONFIG);
5149
5150	data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
5151	data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
5152
5153	mask = gfx_v8_0_create_bitmask(adev->gfx.config.max_cu_per_sh);
5154
5155	return (~data) & mask;
5156}
5157
5158int gfx_v8_0_get_cu_info(struct amdgpu_device *adev,
5159			 struct amdgpu_cu_info *cu_info)
5160{
5161	int i, j, k, counter, active_cu_number = 0;
5162	u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
 
 
 
 
 
5163
5164	if (!adev || !cu_info)
5165		return -EINVAL;
 
 
5166
5167	memset(cu_info, 0, sizeof(*cu_info));
5168
5169	mutex_lock(&adev->grbm_idx_mutex);
5170	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
5171		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
5172			mask = 1;
5173			ao_bitmap = 0;
5174			counter = 0;
5175			gfx_v8_0_select_se_sh(adev, i, j);
 
 
 
5176			bitmap = gfx_v8_0_get_cu_active_bitmap(adev);
5177			cu_info->bitmap[i][j] = bitmap;
5178
5179			for (k = 0; k < 16; k ++) {
5180				if (bitmap & mask) {
5181					if (counter < 2)
5182						ao_bitmap |= mask;
5183					counter ++;
5184				}
5185				mask <<= 1;
5186			}
5187			active_cu_number += counter;
5188			ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
 
 
5189		}
5190	}
5191	gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
5192	mutex_unlock(&adev->grbm_idx_mutex);
5193
5194	cu_info->number = active_cu_number;
5195	cu_info->ao_cu_mask = ao_cu_mask;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5196
5197	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5198}