Linux Audio

Check our new training course

Loading...
v5.9
   1/*
   2 * Copyright 2014 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23
  24#include <linux/firmware.h>
  25#include <linux/module.h>
  26
  27#include "amdgpu.h"
  28#include "amdgpu_ih.h"
  29#include "amdgpu_gfx.h"
  30#include "cikd.h"
  31#include "cik.h"
  32#include "cik_structs.h"
  33#include "atom.h"
  34#include "amdgpu_ucode.h"
  35#include "clearstate_ci.h"
  36
  37#include "dce/dce_8_0_d.h"
  38#include "dce/dce_8_0_sh_mask.h"
  39
  40#include "bif/bif_4_1_d.h"
  41#include "bif/bif_4_1_sh_mask.h"
  42
  43#include "gca/gfx_7_0_d.h"
  44#include "gca/gfx_7_2_enum.h"
  45#include "gca/gfx_7_2_sh_mask.h"
  46
  47#include "gmc/gmc_7_0_d.h"
  48#include "gmc/gmc_7_0_sh_mask.h"
  49
  50#include "oss/oss_2_0_d.h"
  51#include "oss/oss_2_0_sh_mask.h"
  52
  53#define NUM_SIMD_PER_CU 0x4 /* missing from the gfx_7 IP headers */
  54
  55#define GFX7_NUM_GFX_RINGS     1
  56#define GFX7_MEC_HPD_SIZE      2048
  57
  58static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev);
  59static void gfx_v7_0_set_irq_funcs(struct amdgpu_device *adev);
  60static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev);
  61
  62MODULE_FIRMWARE("amdgpu/bonaire_pfp.bin");
  63MODULE_FIRMWARE("amdgpu/bonaire_me.bin");
  64MODULE_FIRMWARE("amdgpu/bonaire_ce.bin");
  65MODULE_FIRMWARE("amdgpu/bonaire_rlc.bin");
  66MODULE_FIRMWARE("amdgpu/bonaire_mec.bin");
  67
  68MODULE_FIRMWARE("amdgpu/hawaii_pfp.bin");
  69MODULE_FIRMWARE("amdgpu/hawaii_me.bin");
  70MODULE_FIRMWARE("amdgpu/hawaii_ce.bin");
  71MODULE_FIRMWARE("amdgpu/hawaii_rlc.bin");
  72MODULE_FIRMWARE("amdgpu/hawaii_mec.bin");
  73
  74MODULE_FIRMWARE("amdgpu/kaveri_pfp.bin");
  75MODULE_FIRMWARE("amdgpu/kaveri_me.bin");
  76MODULE_FIRMWARE("amdgpu/kaveri_ce.bin");
  77MODULE_FIRMWARE("amdgpu/kaveri_rlc.bin");
  78MODULE_FIRMWARE("amdgpu/kaveri_mec.bin");
  79MODULE_FIRMWARE("amdgpu/kaveri_mec2.bin");
  80
  81MODULE_FIRMWARE("amdgpu/kabini_pfp.bin");
  82MODULE_FIRMWARE("amdgpu/kabini_me.bin");
  83MODULE_FIRMWARE("amdgpu/kabini_ce.bin");
  84MODULE_FIRMWARE("amdgpu/kabini_rlc.bin");
  85MODULE_FIRMWARE("amdgpu/kabini_mec.bin");
  86
  87MODULE_FIRMWARE("amdgpu/mullins_pfp.bin");
  88MODULE_FIRMWARE("amdgpu/mullins_me.bin");
  89MODULE_FIRMWARE("amdgpu/mullins_ce.bin");
  90MODULE_FIRMWARE("amdgpu/mullins_rlc.bin");
  91MODULE_FIRMWARE("amdgpu/mullins_mec.bin");
  92
  93static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
  94{
  95	{mmGDS_VMID0_BASE, mmGDS_VMID0_SIZE, mmGDS_GWS_VMID0, mmGDS_OA_VMID0},
  96	{mmGDS_VMID1_BASE, mmGDS_VMID1_SIZE, mmGDS_GWS_VMID1, mmGDS_OA_VMID1},
  97	{mmGDS_VMID2_BASE, mmGDS_VMID2_SIZE, mmGDS_GWS_VMID2, mmGDS_OA_VMID2},
  98	{mmGDS_VMID3_BASE, mmGDS_VMID3_SIZE, mmGDS_GWS_VMID3, mmGDS_OA_VMID3},
  99	{mmGDS_VMID4_BASE, mmGDS_VMID4_SIZE, mmGDS_GWS_VMID4, mmGDS_OA_VMID4},
 100	{mmGDS_VMID5_BASE, mmGDS_VMID5_SIZE, mmGDS_GWS_VMID5, mmGDS_OA_VMID5},
 101	{mmGDS_VMID6_BASE, mmGDS_VMID6_SIZE, mmGDS_GWS_VMID6, mmGDS_OA_VMID6},
 102	{mmGDS_VMID7_BASE, mmGDS_VMID7_SIZE, mmGDS_GWS_VMID7, mmGDS_OA_VMID7},
 103	{mmGDS_VMID8_BASE, mmGDS_VMID8_SIZE, mmGDS_GWS_VMID8, mmGDS_OA_VMID8},
 104	{mmGDS_VMID9_BASE, mmGDS_VMID9_SIZE, mmGDS_GWS_VMID9, mmGDS_OA_VMID9},
 105	{mmGDS_VMID10_BASE, mmGDS_VMID10_SIZE, mmGDS_GWS_VMID10, mmGDS_OA_VMID10},
 106	{mmGDS_VMID11_BASE, mmGDS_VMID11_SIZE, mmGDS_GWS_VMID11, mmGDS_OA_VMID11},
 107	{mmGDS_VMID12_BASE, mmGDS_VMID12_SIZE, mmGDS_GWS_VMID12, mmGDS_OA_VMID12},
 108	{mmGDS_VMID13_BASE, mmGDS_VMID13_SIZE, mmGDS_GWS_VMID13, mmGDS_OA_VMID13},
 109	{mmGDS_VMID14_BASE, mmGDS_VMID14_SIZE, mmGDS_GWS_VMID14, mmGDS_OA_VMID14},
 110	{mmGDS_VMID15_BASE, mmGDS_VMID15_SIZE, mmGDS_GWS_VMID15, mmGDS_OA_VMID15}
 111};
 112
 113static const u32 spectre_rlc_save_restore_register_list[] =
 114{
 115	(0x0e00 << 16) | (0xc12c >> 2),
 116	0x00000000,
 117	(0x0e00 << 16) | (0xc140 >> 2),
 118	0x00000000,
 119	(0x0e00 << 16) | (0xc150 >> 2),
 120	0x00000000,
 121	(0x0e00 << 16) | (0xc15c >> 2),
 122	0x00000000,
 123	(0x0e00 << 16) | (0xc168 >> 2),
 124	0x00000000,
 125	(0x0e00 << 16) | (0xc170 >> 2),
 126	0x00000000,
 127	(0x0e00 << 16) | (0xc178 >> 2),
 128	0x00000000,
 129	(0x0e00 << 16) | (0xc204 >> 2),
 130	0x00000000,
 131	(0x0e00 << 16) | (0xc2b4 >> 2),
 132	0x00000000,
 133	(0x0e00 << 16) | (0xc2b8 >> 2),
 134	0x00000000,
 135	(0x0e00 << 16) | (0xc2bc >> 2),
 136	0x00000000,
 137	(0x0e00 << 16) | (0xc2c0 >> 2),
 138	0x00000000,
 139	(0x0e00 << 16) | (0x8228 >> 2),
 140	0x00000000,
 141	(0x0e00 << 16) | (0x829c >> 2),
 142	0x00000000,
 143	(0x0e00 << 16) | (0x869c >> 2),
 144	0x00000000,
 145	(0x0600 << 16) | (0x98f4 >> 2),
 146	0x00000000,
 147	(0x0e00 << 16) | (0x98f8 >> 2),
 148	0x00000000,
 149	(0x0e00 << 16) | (0x9900 >> 2),
 150	0x00000000,
 151	(0x0e00 << 16) | (0xc260 >> 2),
 152	0x00000000,
 153	(0x0e00 << 16) | (0x90e8 >> 2),
 154	0x00000000,
 155	(0x0e00 << 16) | (0x3c000 >> 2),
 156	0x00000000,
 157	(0x0e00 << 16) | (0x3c00c >> 2),
 158	0x00000000,
 159	(0x0e00 << 16) | (0x8c1c >> 2),
 160	0x00000000,
 161	(0x0e00 << 16) | (0x9700 >> 2),
 162	0x00000000,
 163	(0x0e00 << 16) | (0xcd20 >> 2),
 164	0x00000000,
 165	(0x4e00 << 16) | (0xcd20 >> 2),
 166	0x00000000,
 167	(0x5e00 << 16) | (0xcd20 >> 2),
 168	0x00000000,
 169	(0x6e00 << 16) | (0xcd20 >> 2),
 170	0x00000000,
 171	(0x7e00 << 16) | (0xcd20 >> 2),
 172	0x00000000,
 173	(0x8e00 << 16) | (0xcd20 >> 2),
 174	0x00000000,
 175	(0x9e00 << 16) | (0xcd20 >> 2),
 176	0x00000000,
 177	(0xae00 << 16) | (0xcd20 >> 2),
 178	0x00000000,
 179	(0xbe00 << 16) | (0xcd20 >> 2),
 180	0x00000000,
 181	(0x0e00 << 16) | (0x89bc >> 2),
 182	0x00000000,
 183	(0x0e00 << 16) | (0x8900 >> 2),
 184	0x00000000,
 185	0x3,
 186	(0x0e00 << 16) | (0xc130 >> 2),
 187	0x00000000,
 188	(0x0e00 << 16) | (0xc134 >> 2),
 189	0x00000000,
 190	(0x0e00 << 16) | (0xc1fc >> 2),
 191	0x00000000,
 192	(0x0e00 << 16) | (0xc208 >> 2),
 193	0x00000000,
 194	(0x0e00 << 16) | (0xc264 >> 2),
 195	0x00000000,
 196	(0x0e00 << 16) | (0xc268 >> 2),
 197	0x00000000,
 198	(0x0e00 << 16) | (0xc26c >> 2),
 199	0x00000000,
 200	(0x0e00 << 16) | (0xc270 >> 2),
 201	0x00000000,
 202	(0x0e00 << 16) | (0xc274 >> 2),
 203	0x00000000,
 204	(0x0e00 << 16) | (0xc278 >> 2),
 205	0x00000000,
 206	(0x0e00 << 16) | (0xc27c >> 2),
 207	0x00000000,
 208	(0x0e00 << 16) | (0xc280 >> 2),
 209	0x00000000,
 210	(0x0e00 << 16) | (0xc284 >> 2),
 211	0x00000000,
 212	(0x0e00 << 16) | (0xc288 >> 2),
 213	0x00000000,
 214	(0x0e00 << 16) | (0xc28c >> 2),
 215	0x00000000,
 216	(0x0e00 << 16) | (0xc290 >> 2),
 217	0x00000000,
 218	(0x0e00 << 16) | (0xc294 >> 2),
 219	0x00000000,
 220	(0x0e00 << 16) | (0xc298 >> 2),
 221	0x00000000,
 222	(0x0e00 << 16) | (0xc29c >> 2),
 223	0x00000000,
 224	(0x0e00 << 16) | (0xc2a0 >> 2),
 225	0x00000000,
 226	(0x0e00 << 16) | (0xc2a4 >> 2),
 227	0x00000000,
 228	(0x0e00 << 16) | (0xc2a8 >> 2),
 229	0x00000000,
 230	(0x0e00 << 16) | (0xc2ac  >> 2),
 231	0x00000000,
 232	(0x0e00 << 16) | (0xc2b0 >> 2),
 233	0x00000000,
 234	(0x0e00 << 16) | (0x301d0 >> 2),
 235	0x00000000,
 236	(0x0e00 << 16) | (0x30238 >> 2),
 237	0x00000000,
 238	(0x0e00 << 16) | (0x30250 >> 2),
 239	0x00000000,
 240	(0x0e00 << 16) | (0x30254 >> 2),
 241	0x00000000,
 242	(0x0e00 << 16) | (0x30258 >> 2),
 243	0x00000000,
 244	(0x0e00 << 16) | (0x3025c >> 2),
 245	0x00000000,
 246	(0x4e00 << 16) | (0xc900 >> 2),
 247	0x00000000,
 248	(0x5e00 << 16) | (0xc900 >> 2),
 249	0x00000000,
 250	(0x6e00 << 16) | (0xc900 >> 2),
 251	0x00000000,
 252	(0x7e00 << 16) | (0xc900 >> 2),
 253	0x00000000,
 254	(0x8e00 << 16) | (0xc900 >> 2),
 255	0x00000000,
 256	(0x9e00 << 16) | (0xc900 >> 2),
 257	0x00000000,
 258	(0xae00 << 16) | (0xc900 >> 2),
 259	0x00000000,
 260	(0xbe00 << 16) | (0xc900 >> 2),
 261	0x00000000,
 262	(0x4e00 << 16) | (0xc904 >> 2),
 263	0x00000000,
 264	(0x5e00 << 16) | (0xc904 >> 2),
 265	0x00000000,
 266	(0x6e00 << 16) | (0xc904 >> 2),
 267	0x00000000,
 268	(0x7e00 << 16) | (0xc904 >> 2),
 269	0x00000000,
 270	(0x8e00 << 16) | (0xc904 >> 2),
 271	0x00000000,
 272	(0x9e00 << 16) | (0xc904 >> 2),
 273	0x00000000,
 274	(0xae00 << 16) | (0xc904 >> 2),
 275	0x00000000,
 276	(0xbe00 << 16) | (0xc904 >> 2),
 277	0x00000000,
 278	(0x4e00 << 16) | (0xc908 >> 2),
 279	0x00000000,
 280	(0x5e00 << 16) | (0xc908 >> 2),
 281	0x00000000,
 282	(0x6e00 << 16) | (0xc908 >> 2),
 283	0x00000000,
 284	(0x7e00 << 16) | (0xc908 >> 2),
 285	0x00000000,
 286	(0x8e00 << 16) | (0xc908 >> 2),
 287	0x00000000,
 288	(0x9e00 << 16) | (0xc908 >> 2),
 289	0x00000000,
 290	(0xae00 << 16) | (0xc908 >> 2),
 291	0x00000000,
 292	(0xbe00 << 16) | (0xc908 >> 2),
 293	0x00000000,
 294	(0x4e00 << 16) | (0xc90c >> 2),
 295	0x00000000,
 296	(0x5e00 << 16) | (0xc90c >> 2),
 297	0x00000000,
 298	(0x6e00 << 16) | (0xc90c >> 2),
 299	0x00000000,
 300	(0x7e00 << 16) | (0xc90c >> 2),
 301	0x00000000,
 302	(0x8e00 << 16) | (0xc90c >> 2),
 303	0x00000000,
 304	(0x9e00 << 16) | (0xc90c >> 2),
 305	0x00000000,
 306	(0xae00 << 16) | (0xc90c >> 2),
 307	0x00000000,
 308	(0xbe00 << 16) | (0xc90c >> 2),
 309	0x00000000,
 310	(0x4e00 << 16) | (0xc910 >> 2),
 311	0x00000000,
 312	(0x5e00 << 16) | (0xc910 >> 2),
 313	0x00000000,
 314	(0x6e00 << 16) | (0xc910 >> 2),
 315	0x00000000,
 316	(0x7e00 << 16) | (0xc910 >> 2),
 317	0x00000000,
 318	(0x8e00 << 16) | (0xc910 >> 2),
 319	0x00000000,
 320	(0x9e00 << 16) | (0xc910 >> 2),
 321	0x00000000,
 322	(0xae00 << 16) | (0xc910 >> 2),
 323	0x00000000,
 324	(0xbe00 << 16) | (0xc910 >> 2),
 325	0x00000000,
 326	(0x0e00 << 16) | (0xc99c >> 2),
 327	0x00000000,
 328	(0x0e00 << 16) | (0x9834 >> 2),
 329	0x00000000,
 330	(0x0000 << 16) | (0x30f00 >> 2),
 331	0x00000000,
 332	(0x0001 << 16) | (0x30f00 >> 2),
 333	0x00000000,
 334	(0x0000 << 16) | (0x30f04 >> 2),
 335	0x00000000,
 336	(0x0001 << 16) | (0x30f04 >> 2),
 337	0x00000000,
 338	(0x0000 << 16) | (0x30f08 >> 2),
 339	0x00000000,
 340	(0x0001 << 16) | (0x30f08 >> 2),
 341	0x00000000,
 342	(0x0000 << 16) | (0x30f0c >> 2),
 343	0x00000000,
 344	(0x0001 << 16) | (0x30f0c >> 2),
 345	0x00000000,
 346	(0x0600 << 16) | (0x9b7c >> 2),
 347	0x00000000,
 348	(0x0e00 << 16) | (0x8a14 >> 2),
 349	0x00000000,
 350	(0x0e00 << 16) | (0x8a18 >> 2),
 351	0x00000000,
 352	(0x0600 << 16) | (0x30a00 >> 2),
 353	0x00000000,
 354	(0x0e00 << 16) | (0x8bf0 >> 2),
 355	0x00000000,
 356	(0x0e00 << 16) | (0x8bcc >> 2),
 357	0x00000000,
 358	(0x0e00 << 16) | (0x8b24 >> 2),
 359	0x00000000,
 360	(0x0e00 << 16) | (0x30a04 >> 2),
 361	0x00000000,
 362	(0x0600 << 16) | (0x30a10 >> 2),
 363	0x00000000,
 364	(0x0600 << 16) | (0x30a14 >> 2),
 365	0x00000000,
 366	(0x0600 << 16) | (0x30a18 >> 2),
 367	0x00000000,
 368	(0x0600 << 16) | (0x30a2c >> 2),
 369	0x00000000,
 370	(0x0e00 << 16) | (0xc700 >> 2),
 371	0x00000000,
 372	(0x0e00 << 16) | (0xc704 >> 2),
 373	0x00000000,
 374	(0x0e00 << 16) | (0xc708 >> 2),
 375	0x00000000,
 376	(0x0e00 << 16) | (0xc768 >> 2),
 377	0x00000000,
 378	(0x0400 << 16) | (0xc770 >> 2),
 379	0x00000000,
 380	(0x0400 << 16) | (0xc774 >> 2),
 381	0x00000000,
 382	(0x0400 << 16) | (0xc778 >> 2),
 383	0x00000000,
 384	(0x0400 << 16) | (0xc77c >> 2),
 385	0x00000000,
 386	(0x0400 << 16) | (0xc780 >> 2),
 387	0x00000000,
 388	(0x0400 << 16) | (0xc784 >> 2),
 389	0x00000000,
 390	(0x0400 << 16) | (0xc788 >> 2),
 391	0x00000000,
 392	(0x0400 << 16) | (0xc78c >> 2),
 393	0x00000000,
 394	(0x0400 << 16) | (0xc798 >> 2),
 395	0x00000000,
 396	(0x0400 << 16) | (0xc79c >> 2),
 397	0x00000000,
 398	(0x0400 << 16) | (0xc7a0 >> 2),
 399	0x00000000,
 400	(0x0400 << 16) | (0xc7a4 >> 2),
 401	0x00000000,
 402	(0x0400 << 16) | (0xc7a8 >> 2),
 403	0x00000000,
 404	(0x0400 << 16) | (0xc7ac >> 2),
 405	0x00000000,
 406	(0x0400 << 16) | (0xc7b0 >> 2),
 407	0x00000000,
 408	(0x0400 << 16) | (0xc7b4 >> 2),
 409	0x00000000,
 410	(0x0e00 << 16) | (0x9100 >> 2),
 411	0x00000000,
 412	(0x0e00 << 16) | (0x3c010 >> 2),
 413	0x00000000,
 414	(0x0e00 << 16) | (0x92a8 >> 2),
 415	0x00000000,
 416	(0x0e00 << 16) | (0x92ac >> 2),
 417	0x00000000,
 418	(0x0e00 << 16) | (0x92b4 >> 2),
 419	0x00000000,
 420	(0x0e00 << 16) | (0x92b8 >> 2),
 421	0x00000000,
 422	(0x0e00 << 16) | (0x92bc >> 2),
 423	0x00000000,
 424	(0x0e00 << 16) | (0x92c0 >> 2),
 425	0x00000000,
 426	(0x0e00 << 16) | (0x92c4 >> 2),
 427	0x00000000,
 428	(0x0e00 << 16) | (0x92c8 >> 2),
 429	0x00000000,
 430	(0x0e00 << 16) | (0x92cc >> 2),
 431	0x00000000,
 432	(0x0e00 << 16) | (0x92d0 >> 2),
 433	0x00000000,
 434	(0x0e00 << 16) | (0x8c00 >> 2),
 435	0x00000000,
 436	(0x0e00 << 16) | (0x8c04 >> 2),
 437	0x00000000,
 438	(0x0e00 << 16) | (0x8c20 >> 2),
 439	0x00000000,
 440	(0x0e00 << 16) | (0x8c38 >> 2),
 441	0x00000000,
 442	(0x0e00 << 16) | (0x8c3c >> 2),
 443	0x00000000,
 444	(0x0e00 << 16) | (0xae00 >> 2),
 445	0x00000000,
 446	(0x0e00 << 16) | (0x9604 >> 2),
 447	0x00000000,
 448	(0x0e00 << 16) | (0xac08 >> 2),
 449	0x00000000,
 450	(0x0e00 << 16) | (0xac0c >> 2),
 451	0x00000000,
 452	(0x0e00 << 16) | (0xac10 >> 2),
 453	0x00000000,
 454	(0x0e00 << 16) | (0xac14 >> 2),
 455	0x00000000,
 456	(0x0e00 << 16) | (0xac58 >> 2),
 457	0x00000000,
 458	(0x0e00 << 16) | (0xac68 >> 2),
 459	0x00000000,
 460	(0x0e00 << 16) | (0xac6c >> 2),
 461	0x00000000,
 462	(0x0e00 << 16) | (0xac70 >> 2),
 463	0x00000000,
 464	(0x0e00 << 16) | (0xac74 >> 2),
 465	0x00000000,
 466	(0x0e00 << 16) | (0xac78 >> 2),
 467	0x00000000,
 468	(0x0e00 << 16) | (0xac7c >> 2),
 469	0x00000000,
 470	(0x0e00 << 16) | (0xac80 >> 2),
 471	0x00000000,
 472	(0x0e00 << 16) | (0xac84 >> 2),
 473	0x00000000,
 474	(0x0e00 << 16) | (0xac88 >> 2),
 475	0x00000000,
 476	(0x0e00 << 16) | (0xac8c >> 2),
 477	0x00000000,
 478	(0x0e00 << 16) | (0x970c >> 2),
 479	0x00000000,
 480	(0x0e00 << 16) | (0x9714 >> 2),
 481	0x00000000,
 482	(0x0e00 << 16) | (0x9718 >> 2),
 483	0x00000000,
 484	(0x0e00 << 16) | (0x971c >> 2),
 485	0x00000000,
 486	(0x0e00 << 16) | (0x31068 >> 2),
 487	0x00000000,
 488	(0x4e00 << 16) | (0x31068 >> 2),
 489	0x00000000,
 490	(0x5e00 << 16) | (0x31068 >> 2),
 491	0x00000000,
 492	(0x6e00 << 16) | (0x31068 >> 2),
 493	0x00000000,
 494	(0x7e00 << 16) | (0x31068 >> 2),
 495	0x00000000,
 496	(0x8e00 << 16) | (0x31068 >> 2),
 497	0x00000000,
 498	(0x9e00 << 16) | (0x31068 >> 2),
 499	0x00000000,
 500	(0xae00 << 16) | (0x31068 >> 2),
 501	0x00000000,
 502	(0xbe00 << 16) | (0x31068 >> 2),
 503	0x00000000,
 504	(0x0e00 << 16) | (0xcd10 >> 2),
 505	0x00000000,
 506	(0x0e00 << 16) | (0xcd14 >> 2),
 507	0x00000000,
 508	(0x0e00 << 16) | (0x88b0 >> 2),
 509	0x00000000,
 510	(0x0e00 << 16) | (0x88b4 >> 2),
 511	0x00000000,
 512	(0x0e00 << 16) | (0x88b8 >> 2),
 513	0x00000000,
 514	(0x0e00 << 16) | (0x88bc >> 2),
 515	0x00000000,
 516	(0x0400 << 16) | (0x89c0 >> 2),
 517	0x00000000,
 518	(0x0e00 << 16) | (0x88c4 >> 2),
 519	0x00000000,
 520	(0x0e00 << 16) | (0x88c8 >> 2),
 521	0x00000000,
 522	(0x0e00 << 16) | (0x88d0 >> 2),
 523	0x00000000,
 524	(0x0e00 << 16) | (0x88d4 >> 2),
 525	0x00000000,
 526	(0x0e00 << 16) | (0x88d8 >> 2),
 527	0x00000000,
 528	(0x0e00 << 16) | (0x8980 >> 2),
 529	0x00000000,
 530	(0x0e00 << 16) | (0x30938 >> 2),
 531	0x00000000,
 532	(0x0e00 << 16) | (0x3093c >> 2),
 533	0x00000000,
 534	(0x0e00 << 16) | (0x30940 >> 2),
 535	0x00000000,
 536	(0x0e00 << 16) | (0x89a0 >> 2),
 537	0x00000000,
 538	(0x0e00 << 16) | (0x30900 >> 2),
 539	0x00000000,
 540	(0x0e00 << 16) | (0x30904 >> 2),
 541	0x00000000,
 542	(0x0e00 << 16) | (0x89b4 >> 2),
 543	0x00000000,
 544	(0x0e00 << 16) | (0x3c210 >> 2),
 545	0x00000000,
 546	(0x0e00 << 16) | (0x3c214 >> 2),
 547	0x00000000,
 548	(0x0e00 << 16) | (0x3c218 >> 2),
 549	0x00000000,
 550	(0x0e00 << 16) | (0x8904 >> 2),
 551	0x00000000,
 552	0x5,
 553	(0x0e00 << 16) | (0x8c28 >> 2),
 554	(0x0e00 << 16) | (0x8c2c >> 2),
 555	(0x0e00 << 16) | (0x8c30 >> 2),
 556	(0x0e00 << 16) | (0x8c34 >> 2),
 557	(0x0e00 << 16) | (0x9600 >> 2),
 558};
 559
 560static const u32 kalindi_rlc_save_restore_register_list[] =
 561{
 562	(0x0e00 << 16) | (0xc12c >> 2),
 563	0x00000000,
 564	(0x0e00 << 16) | (0xc140 >> 2),
 565	0x00000000,
 566	(0x0e00 << 16) | (0xc150 >> 2),
 567	0x00000000,
 568	(0x0e00 << 16) | (0xc15c >> 2),
 569	0x00000000,
 570	(0x0e00 << 16) | (0xc168 >> 2),
 571	0x00000000,
 572	(0x0e00 << 16) | (0xc170 >> 2),
 573	0x00000000,
 574	(0x0e00 << 16) | (0xc204 >> 2),
 575	0x00000000,
 576	(0x0e00 << 16) | (0xc2b4 >> 2),
 577	0x00000000,
 578	(0x0e00 << 16) | (0xc2b8 >> 2),
 579	0x00000000,
 580	(0x0e00 << 16) | (0xc2bc >> 2),
 581	0x00000000,
 582	(0x0e00 << 16) | (0xc2c0 >> 2),
 583	0x00000000,
 584	(0x0e00 << 16) | (0x8228 >> 2),
 585	0x00000000,
 586	(0x0e00 << 16) | (0x829c >> 2),
 587	0x00000000,
 588	(0x0e00 << 16) | (0x869c >> 2),
 589	0x00000000,
 590	(0x0600 << 16) | (0x98f4 >> 2),
 591	0x00000000,
 592	(0x0e00 << 16) | (0x98f8 >> 2),
 593	0x00000000,
 594	(0x0e00 << 16) | (0x9900 >> 2),
 595	0x00000000,
 596	(0x0e00 << 16) | (0xc260 >> 2),
 597	0x00000000,
 598	(0x0e00 << 16) | (0x90e8 >> 2),
 599	0x00000000,
 600	(0x0e00 << 16) | (0x3c000 >> 2),
 601	0x00000000,
 602	(0x0e00 << 16) | (0x3c00c >> 2),
 603	0x00000000,
 604	(0x0e00 << 16) | (0x8c1c >> 2),
 605	0x00000000,
 606	(0x0e00 << 16) | (0x9700 >> 2),
 607	0x00000000,
 608	(0x0e00 << 16) | (0xcd20 >> 2),
 609	0x00000000,
 610	(0x4e00 << 16) | (0xcd20 >> 2),
 611	0x00000000,
 612	(0x5e00 << 16) | (0xcd20 >> 2),
 613	0x00000000,
 614	(0x6e00 << 16) | (0xcd20 >> 2),
 615	0x00000000,
 616	(0x7e00 << 16) | (0xcd20 >> 2),
 617	0x00000000,
 618	(0x0e00 << 16) | (0x89bc >> 2),
 619	0x00000000,
 620	(0x0e00 << 16) | (0x8900 >> 2),
 621	0x00000000,
 622	0x3,
 623	(0x0e00 << 16) | (0xc130 >> 2),
 624	0x00000000,
 625	(0x0e00 << 16) | (0xc134 >> 2),
 626	0x00000000,
 627	(0x0e00 << 16) | (0xc1fc >> 2),
 628	0x00000000,
 629	(0x0e00 << 16) | (0xc208 >> 2),
 630	0x00000000,
 631	(0x0e00 << 16) | (0xc264 >> 2),
 632	0x00000000,
 633	(0x0e00 << 16) | (0xc268 >> 2),
 634	0x00000000,
 635	(0x0e00 << 16) | (0xc26c >> 2),
 636	0x00000000,
 637	(0x0e00 << 16) | (0xc270 >> 2),
 638	0x00000000,
 639	(0x0e00 << 16) | (0xc274 >> 2),
 640	0x00000000,
 641	(0x0e00 << 16) | (0xc28c >> 2),
 642	0x00000000,
 643	(0x0e00 << 16) | (0xc290 >> 2),
 644	0x00000000,
 645	(0x0e00 << 16) | (0xc294 >> 2),
 646	0x00000000,
 647	(0x0e00 << 16) | (0xc298 >> 2),
 648	0x00000000,
 649	(0x0e00 << 16) | (0xc2a0 >> 2),
 650	0x00000000,
 651	(0x0e00 << 16) | (0xc2a4 >> 2),
 652	0x00000000,
 653	(0x0e00 << 16) | (0xc2a8 >> 2),
 654	0x00000000,
 655	(0x0e00 << 16) | (0xc2ac >> 2),
 656	0x00000000,
 657	(0x0e00 << 16) | (0x301d0 >> 2),
 658	0x00000000,
 659	(0x0e00 << 16) | (0x30238 >> 2),
 660	0x00000000,
 661	(0x0e00 << 16) | (0x30250 >> 2),
 662	0x00000000,
 663	(0x0e00 << 16) | (0x30254 >> 2),
 664	0x00000000,
 665	(0x0e00 << 16) | (0x30258 >> 2),
 666	0x00000000,
 667	(0x0e00 << 16) | (0x3025c >> 2),
 668	0x00000000,
 669	(0x4e00 << 16) | (0xc900 >> 2),
 670	0x00000000,
 671	(0x5e00 << 16) | (0xc900 >> 2),
 672	0x00000000,
 673	(0x6e00 << 16) | (0xc900 >> 2),
 674	0x00000000,
 675	(0x7e00 << 16) | (0xc900 >> 2),
 676	0x00000000,
 677	(0x4e00 << 16) | (0xc904 >> 2),
 678	0x00000000,
 679	(0x5e00 << 16) | (0xc904 >> 2),
 680	0x00000000,
 681	(0x6e00 << 16) | (0xc904 >> 2),
 682	0x00000000,
 683	(0x7e00 << 16) | (0xc904 >> 2),
 684	0x00000000,
 685	(0x4e00 << 16) | (0xc908 >> 2),
 686	0x00000000,
 687	(0x5e00 << 16) | (0xc908 >> 2),
 688	0x00000000,
 689	(0x6e00 << 16) | (0xc908 >> 2),
 690	0x00000000,
 691	(0x7e00 << 16) | (0xc908 >> 2),
 692	0x00000000,
 693	(0x4e00 << 16) | (0xc90c >> 2),
 694	0x00000000,
 695	(0x5e00 << 16) | (0xc90c >> 2),
 696	0x00000000,
 697	(0x6e00 << 16) | (0xc90c >> 2),
 698	0x00000000,
 699	(0x7e00 << 16) | (0xc90c >> 2),
 700	0x00000000,
 701	(0x4e00 << 16) | (0xc910 >> 2),
 702	0x00000000,
 703	(0x5e00 << 16) | (0xc910 >> 2),
 704	0x00000000,
 705	(0x6e00 << 16) | (0xc910 >> 2),
 706	0x00000000,
 707	(0x7e00 << 16) | (0xc910 >> 2),
 708	0x00000000,
 709	(0x0e00 << 16) | (0xc99c >> 2),
 710	0x00000000,
 711	(0x0e00 << 16) | (0x9834 >> 2),
 712	0x00000000,
 713	(0x0000 << 16) | (0x30f00 >> 2),
 714	0x00000000,
 715	(0x0000 << 16) | (0x30f04 >> 2),
 716	0x00000000,
 717	(0x0000 << 16) | (0x30f08 >> 2),
 718	0x00000000,
 719	(0x0000 << 16) | (0x30f0c >> 2),
 720	0x00000000,
 721	(0x0600 << 16) | (0x9b7c >> 2),
 722	0x00000000,
 723	(0x0e00 << 16) | (0x8a14 >> 2),
 724	0x00000000,
 725	(0x0e00 << 16) | (0x8a18 >> 2),
 726	0x00000000,
 727	(0x0600 << 16) | (0x30a00 >> 2),
 728	0x00000000,
 729	(0x0e00 << 16) | (0x8bf0 >> 2),
 730	0x00000000,
 731	(0x0e00 << 16) | (0x8bcc >> 2),
 732	0x00000000,
 733	(0x0e00 << 16) | (0x8b24 >> 2),
 734	0x00000000,
 735	(0x0e00 << 16) | (0x30a04 >> 2),
 736	0x00000000,
 737	(0x0600 << 16) | (0x30a10 >> 2),
 738	0x00000000,
 739	(0x0600 << 16) | (0x30a14 >> 2),
 740	0x00000000,
 741	(0x0600 << 16) | (0x30a18 >> 2),
 742	0x00000000,
 743	(0x0600 << 16) | (0x30a2c >> 2),
 744	0x00000000,
 745	(0x0e00 << 16) | (0xc700 >> 2),
 746	0x00000000,
 747	(0x0e00 << 16) | (0xc704 >> 2),
 748	0x00000000,
 749	(0x0e00 << 16) | (0xc708 >> 2),
 750	0x00000000,
 751	(0x0e00 << 16) | (0xc768 >> 2),
 752	0x00000000,
 753	(0x0400 << 16) | (0xc770 >> 2),
 754	0x00000000,
 755	(0x0400 << 16) | (0xc774 >> 2),
 756	0x00000000,
 757	(0x0400 << 16) | (0xc798 >> 2),
 758	0x00000000,
 759	(0x0400 << 16) | (0xc79c >> 2),
 760	0x00000000,
 761	(0x0e00 << 16) | (0x9100 >> 2),
 762	0x00000000,
 763	(0x0e00 << 16) | (0x3c010 >> 2),
 764	0x00000000,
 765	(0x0e00 << 16) | (0x8c00 >> 2),
 766	0x00000000,
 767	(0x0e00 << 16) | (0x8c04 >> 2),
 768	0x00000000,
 769	(0x0e00 << 16) | (0x8c20 >> 2),
 770	0x00000000,
 771	(0x0e00 << 16) | (0x8c38 >> 2),
 772	0x00000000,
 773	(0x0e00 << 16) | (0x8c3c >> 2),
 774	0x00000000,
 775	(0x0e00 << 16) | (0xae00 >> 2),
 776	0x00000000,
 777	(0x0e00 << 16) | (0x9604 >> 2),
 778	0x00000000,
 779	(0x0e00 << 16) | (0xac08 >> 2),
 780	0x00000000,
 781	(0x0e00 << 16) | (0xac0c >> 2),
 782	0x00000000,
 783	(0x0e00 << 16) | (0xac10 >> 2),
 784	0x00000000,
 785	(0x0e00 << 16) | (0xac14 >> 2),
 786	0x00000000,
 787	(0x0e00 << 16) | (0xac58 >> 2),
 788	0x00000000,
 789	(0x0e00 << 16) | (0xac68 >> 2),
 790	0x00000000,
 791	(0x0e00 << 16) | (0xac6c >> 2),
 792	0x00000000,
 793	(0x0e00 << 16) | (0xac70 >> 2),
 794	0x00000000,
 795	(0x0e00 << 16) | (0xac74 >> 2),
 796	0x00000000,
 797	(0x0e00 << 16) | (0xac78 >> 2),
 798	0x00000000,
 799	(0x0e00 << 16) | (0xac7c >> 2),
 800	0x00000000,
 801	(0x0e00 << 16) | (0xac80 >> 2),
 802	0x00000000,
 803	(0x0e00 << 16) | (0xac84 >> 2),
 804	0x00000000,
 805	(0x0e00 << 16) | (0xac88 >> 2),
 806	0x00000000,
 807	(0x0e00 << 16) | (0xac8c >> 2),
 808	0x00000000,
 809	(0x0e00 << 16) | (0x970c >> 2),
 810	0x00000000,
 811	(0x0e00 << 16) | (0x9714 >> 2),
 812	0x00000000,
 813	(0x0e00 << 16) | (0x9718 >> 2),
 814	0x00000000,
 815	(0x0e00 << 16) | (0x971c >> 2),
 816	0x00000000,
 817	(0x0e00 << 16) | (0x31068 >> 2),
 818	0x00000000,
 819	(0x4e00 << 16) | (0x31068 >> 2),
 820	0x00000000,
 821	(0x5e00 << 16) | (0x31068 >> 2),
 822	0x00000000,
 823	(0x6e00 << 16) | (0x31068 >> 2),
 824	0x00000000,
 825	(0x7e00 << 16) | (0x31068 >> 2),
 826	0x00000000,
 827	(0x0e00 << 16) | (0xcd10 >> 2),
 828	0x00000000,
 829	(0x0e00 << 16) | (0xcd14 >> 2),
 830	0x00000000,
 831	(0x0e00 << 16) | (0x88b0 >> 2),
 832	0x00000000,
 833	(0x0e00 << 16) | (0x88b4 >> 2),
 834	0x00000000,
 835	(0x0e00 << 16) | (0x88b8 >> 2),
 836	0x00000000,
 837	(0x0e00 << 16) | (0x88bc >> 2),
 838	0x00000000,
 839	(0x0400 << 16) | (0x89c0 >> 2),
 840	0x00000000,
 841	(0x0e00 << 16) | (0x88c4 >> 2),
 842	0x00000000,
 843	(0x0e00 << 16) | (0x88c8 >> 2),
 844	0x00000000,
 845	(0x0e00 << 16) | (0x88d0 >> 2),
 846	0x00000000,
 847	(0x0e00 << 16) | (0x88d4 >> 2),
 848	0x00000000,
 849	(0x0e00 << 16) | (0x88d8 >> 2),
 850	0x00000000,
 851	(0x0e00 << 16) | (0x8980 >> 2),
 852	0x00000000,
 853	(0x0e00 << 16) | (0x30938 >> 2),
 854	0x00000000,
 855	(0x0e00 << 16) | (0x3093c >> 2),
 856	0x00000000,
 857	(0x0e00 << 16) | (0x30940 >> 2),
 858	0x00000000,
 859	(0x0e00 << 16) | (0x89a0 >> 2),
 860	0x00000000,
 861	(0x0e00 << 16) | (0x30900 >> 2),
 862	0x00000000,
 863	(0x0e00 << 16) | (0x30904 >> 2),
 864	0x00000000,
 865	(0x0e00 << 16) | (0x89b4 >> 2),
 866	0x00000000,
 867	(0x0e00 << 16) | (0x3e1fc >> 2),
 868	0x00000000,
 869	(0x0e00 << 16) | (0x3c210 >> 2),
 870	0x00000000,
 871	(0x0e00 << 16) | (0x3c214 >> 2),
 872	0x00000000,
 873	(0x0e00 << 16) | (0x3c218 >> 2),
 874	0x00000000,
 875	(0x0e00 << 16) | (0x8904 >> 2),
 876	0x00000000,
 877	0x5,
 878	(0x0e00 << 16) | (0x8c28 >> 2),
 879	(0x0e00 << 16) | (0x8c2c >> 2),
 880	(0x0e00 << 16) | (0x8c30 >> 2),
 881	(0x0e00 << 16) | (0x8c34 >> 2),
 882	(0x0e00 << 16) | (0x9600 >> 2),
 883};
 884
 885static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev);
 886static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer);
 
 887static void gfx_v7_0_init_pg(struct amdgpu_device *adev);
 888static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev);
 889
 890/*
 891 * Core functions
 892 */
 893/**
 894 * gfx_v7_0_init_microcode - load ucode images from disk
 895 *
 896 * @adev: amdgpu_device pointer
 897 *
 898 * Use the firmware interface to load the ucode images into
 899 * the driver (not loaded into hw).
 900 * Returns 0 on success, error on failure.
 901 */
 902static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
 903{
 904	const char *chip_name;
 905	char fw_name[30];
 906	int err;
 907
 908	DRM_DEBUG("\n");
 909
 910	switch (adev->asic_type) {
 911	case CHIP_BONAIRE:
 912		chip_name = "bonaire";
 913		break;
 914	case CHIP_HAWAII:
 915		chip_name = "hawaii";
 916		break;
 917	case CHIP_KAVERI:
 918		chip_name = "kaveri";
 919		break;
 920	case CHIP_KABINI:
 921		chip_name = "kabini";
 922		break;
 923	case CHIP_MULLINS:
 924		chip_name = "mullins";
 925		break;
 926	default: BUG();
 927	}
 928
 929	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
 930	err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
 931	if (err)
 932		goto out;
 933	err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
 934	if (err)
 935		goto out;
 936
 937	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
 938	err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
 939	if (err)
 940		goto out;
 941	err = amdgpu_ucode_validate(adev->gfx.me_fw);
 942	if (err)
 943		goto out;
 944
 945	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
 946	err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
 947	if (err)
 948		goto out;
 949	err = amdgpu_ucode_validate(adev->gfx.ce_fw);
 950	if (err)
 951		goto out;
 952
 953	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
 954	err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
 955	if (err)
 956		goto out;
 957	err = amdgpu_ucode_validate(adev->gfx.mec_fw);
 958	if (err)
 959		goto out;
 960
 961	if (adev->asic_type == CHIP_KAVERI) {
 962		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
 963		err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
 964		if (err)
 965			goto out;
 966		err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
 967		if (err)
 968			goto out;
 969	}
 970
 971	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
 972	err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
 973	if (err)
 974		goto out;
 975	err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
 976
 977out:
 978	if (err) {
 979		pr_err("gfx7: Failed to load firmware \"%s\"\n", fw_name);
 
 
 980		release_firmware(adev->gfx.pfp_fw);
 981		adev->gfx.pfp_fw = NULL;
 982		release_firmware(adev->gfx.me_fw);
 983		adev->gfx.me_fw = NULL;
 984		release_firmware(adev->gfx.ce_fw);
 985		adev->gfx.ce_fw = NULL;
 986		release_firmware(adev->gfx.mec_fw);
 987		adev->gfx.mec_fw = NULL;
 988		release_firmware(adev->gfx.mec2_fw);
 989		adev->gfx.mec2_fw = NULL;
 990		release_firmware(adev->gfx.rlc_fw);
 991		adev->gfx.rlc_fw = NULL;
 992	}
 993	return err;
 994}
 995
 996static void gfx_v7_0_free_microcode(struct amdgpu_device *adev)
 997{
 998	release_firmware(adev->gfx.pfp_fw);
 999	adev->gfx.pfp_fw = NULL;
1000	release_firmware(adev->gfx.me_fw);
1001	adev->gfx.me_fw = NULL;
1002	release_firmware(adev->gfx.ce_fw);
1003	adev->gfx.ce_fw = NULL;
1004	release_firmware(adev->gfx.mec_fw);
1005	adev->gfx.mec_fw = NULL;
1006	release_firmware(adev->gfx.mec2_fw);
1007	adev->gfx.mec2_fw = NULL;
1008	release_firmware(adev->gfx.rlc_fw);
1009	adev->gfx.rlc_fw = NULL;
1010}
1011
1012/**
1013 * gfx_v7_0_tiling_mode_table_init - init the hw tiling table
1014 *
1015 * @adev: amdgpu_device pointer
1016 *
1017 * Starting with SI, the tiling setup is done globally in a
1018 * set of 32 tiling modes.  Rather than selecting each set of
1019 * parameters per surface as on older asics, we just select
1020 * which index in the tiling table we want to use, and the
1021 * surface uses those parameters (CIK).
1022 */
1023static void gfx_v7_0_tiling_mode_table_init(struct amdgpu_device *adev)
1024{
1025	const u32 num_tile_mode_states =
1026			ARRAY_SIZE(adev->gfx.config.tile_mode_array);
1027	const u32 num_secondary_tile_mode_states =
1028			ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
1029	u32 reg_offset, split_equal_to_row_size;
1030	uint32_t *tile, *macrotile;
1031
1032	tile = adev->gfx.config.tile_mode_array;
1033	macrotile = adev->gfx.config.macrotile_mode_array;
1034
1035	switch (adev->gfx.config.mem_row_size_in_kb) {
1036	case 1:
1037		split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_1KB;
1038		break;
1039	case 2:
1040	default:
1041		split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_2KB;
1042		break;
1043	case 4:
1044		split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_4KB;
1045		break;
1046	}
1047
1048	for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
1049		tile[reg_offset] = 0;
1050	for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
1051		macrotile[reg_offset] = 0;
1052
1053	switch (adev->asic_type) {
1054	case CHIP_BONAIRE:
1055		tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1056			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1057			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
1058			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1059		tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1060			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1061			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
1062			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1063		tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1064			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1065			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1066			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1067		tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1068			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1069			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
1070			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1071		tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1072			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1073			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1074			   TILE_SPLIT(split_equal_to_row_size));
1075		tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1076			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1077			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1078		tile[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1079			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1080			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1081			   TILE_SPLIT(split_equal_to_row_size));
1082		tile[7] = (TILE_SPLIT(split_equal_to_row_size));
1083		tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
1084			   PIPE_CONFIG(ADDR_SURF_P4_16x16));
1085		tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1086			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1087			   MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
1088		tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1089			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1090			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1091			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1092		tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1093			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1094			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1095			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1096		tile[12] = (TILE_SPLIT(split_equal_to_row_size));
1097		tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1098			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1099			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
1100		tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1101			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1102			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1103			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1104		tile[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
1105			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1106			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1107			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1108		tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1109			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1110			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1111			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1112		tile[17] = (TILE_SPLIT(split_equal_to_row_size));
1113		tile[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1114			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1115			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1116			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1117		tile[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1118			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1119			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
1120		tile[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1121			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1122			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1123			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1124		tile[21] =  (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
1125			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1126			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1127			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1128		tile[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
1129			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1130			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1131			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1132		tile[23] = (TILE_SPLIT(split_equal_to_row_size));
1133		tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1134			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1135			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1136			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1137		tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
1138			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1139			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1140			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1141		tile[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
1142			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1143			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1144			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1145		tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1146			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1147			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
1148		tile[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1149			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1150			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1151			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1152		tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1153			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1154			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1155			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1156		tile[30] = (TILE_SPLIT(split_equal_to_row_size));
1157
1158		macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1159				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1160				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1161				NUM_BANKS(ADDR_SURF_16_BANK));
1162		macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1163				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1164				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1165				NUM_BANKS(ADDR_SURF_16_BANK));
1166		macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1167				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1168				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1169				NUM_BANKS(ADDR_SURF_16_BANK));
1170		macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1171				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1172				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1173				NUM_BANKS(ADDR_SURF_16_BANK));
1174		macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1175				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1176				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1177				NUM_BANKS(ADDR_SURF_16_BANK));
1178		macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1179				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1180				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1181				NUM_BANKS(ADDR_SURF_8_BANK));
1182		macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1183				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1184				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1185				NUM_BANKS(ADDR_SURF_4_BANK));
1186		macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1187				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
1188				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1189				NUM_BANKS(ADDR_SURF_16_BANK));
1190		macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1191				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1192				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1193				NUM_BANKS(ADDR_SURF_16_BANK));
1194		macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1195				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1196				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1197				NUM_BANKS(ADDR_SURF_16_BANK));
1198		macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1199				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1200				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1201				NUM_BANKS(ADDR_SURF_16_BANK));
1202		macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1203				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1204				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1205				NUM_BANKS(ADDR_SURF_16_BANK));
1206		macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1207				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1208				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1209				NUM_BANKS(ADDR_SURF_8_BANK));
1210		macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1211				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1212				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1213				NUM_BANKS(ADDR_SURF_4_BANK));
1214
1215		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
1216			WREG32(mmGB_TILE_MODE0 + reg_offset, tile[reg_offset]);
1217		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
1218			if (reg_offset != 7)
1219				WREG32(mmGB_MACROTILE_MODE0 + reg_offset, macrotile[reg_offset]);
1220		break;
1221	case CHIP_HAWAII:
1222		tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1223			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1224			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
1225			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1226		tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1227			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1228			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
1229			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1230		tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1231			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1232			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1233			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1234		tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1235			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1236			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
1237			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1238		tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1239			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1240			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1241			   TILE_SPLIT(split_equal_to_row_size));
1242		tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1243			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1244			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1245			   TILE_SPLIT(split_equal_to_row_size));
1246		tile[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1247			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1248			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1249			   TILE_SPLIT(split_equal_to_row_size));
1250		tile[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1251			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1252			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1253			   TILE_SPLIT(split_equal_to_row_size));
1254		tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
1255			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16));
1256		tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1257			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1258			   MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
1259		tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1260			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1261			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1262			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1263		tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1264			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1265			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1266			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1267		tile[12] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
1268			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1269			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1270			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1271		tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1272			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1273			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
1274		tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1275			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1276			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1277			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1278		tile[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
1279			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1280			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1281			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1282		tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1283			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1284			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1285			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1286		tile[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1287			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1288			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1289			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1290		tile[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1291			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1292			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1293			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1294		tile[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1295			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1296			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING));
1297		tile[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1298			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1299			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1300			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1301		tile[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
1302			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1303			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1304			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1305		tile[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
1306			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1307			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1308			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1309		tile[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
1310			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1311			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1312			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1313		tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1314			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1315			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1316			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1317		tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
1318			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1319			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1320			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1321		tile[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
1322			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1323			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1324			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1325		tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1326			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1327			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
1328		tile[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1329			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1330			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1331			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1332		tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1333			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1334			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1335			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1336		tile[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1337			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1338			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1339			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1340
1341		macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1342				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1343				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1344				NUM_BANKS(ADDR_SURF_16_BANK));
1345		macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1346				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1347				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1348				NUM_BANKS(ADDR_SURF_16_BANK));
1349		macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1350				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1351				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1352				NUM_BANKS(ADDR_SURF_16_BANK));
1353		macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1354				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1355				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1356				NUM_BANKS(ADDR_SURF_16_BANK));
1357		macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1358				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1359				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1360				NUM_BANKS(ADDR_SURF_8_BANK));
1361		macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1362				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1363				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1364				NUM_BANKS(ADDR_SURF_4_BANK));
1365		macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1366				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1367				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1368				NUM_BANKS(ADDR_SURF_4_BANK));
1369		macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1370				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1371				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1372				NUM_BANKS(ADDR_SURF_16_BANK));
1373		macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1374				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1375				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1376				NUM_BANKS(ADDR_SURF_16_BANK));
1377		macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1378				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1379				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1380				NUM_BANKS(ADDR_SURF_16_BANK));
1381		macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1382				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1383				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1384				NUM_BANKS(ADDR_SURF_8_BANK));
1385		macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1386				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1387				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1388				NUM_BANKS(ADDR_SURF_16_BANK));
1389		macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1390				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1391				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1392				NUM_BANKS(ADDR_SURF_8_BANK));
1393		macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1394				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1395				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1396				NUM_BANKS(ADDR_SURF_4_BANK));
1397
1398		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
1399			WREG32(mmGB_TILE_MODE0 + reg_offset, tile[reg_offset]);
1400		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
1401			if (reg_offset != 7)
1402				WREG32(mmGB_MACROTILE_MODE0 + reg_offset, macrotile[reg_offset]);
1403		break;
1404	case CHIP_KABINI:
1405	case CHIP_KAVERI:
1406	case CHIP_MULLINS:
1407	default:
1408		tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1409			   PIPE_CONFIG(ADDR_SURF_P2) |
1410			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
1411			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1412		tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1413			   PIPE_CONFIG(ADDR_SURF_P2) |
1414			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
1415			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1416		tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1417			   PIPE_CONFIG(ADDR_SURF_P2) |
1418			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1419			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1420		tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1421			   PIPE_CONFIG(ADDR_SURF_P2) |
1422			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
1423			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1424		tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1425			   PIPE_CONFIG(ADDR_SURF_P2) |
1426			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1427			   TILE_SPLIT(split_equal_to_row_size));
1428		tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1429			   PIPE_CONFIG(ADDR_SURF_P2) |
1430			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1431		tile[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1432			   PIPE_CONFIG(ADDR_SURF_P2) |
1433			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1434			   TILE_SPLIT(split_equal_to_row_size));
1435		tile[7] = (TILE_SPLIT(split_equal_to_row_size));
1436		tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
1437			   PIPE_CONFIG(ADDR_SURF_P2));
1438		tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1439			   PIPE_CONFIG(ADDR_SURF_P2) |
1440			   MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
1441		tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1442			    PIPE_CONFIG(ADDR_SURF_P2) |
1443			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1444			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1445		tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1446			    PIPE_CONFIG(ADDR_SURF_P2) |
1447			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1448			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1449		tile[12] = (TILE_SPLIT(split_equal_to_row_size));
1450		tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1451			    PIPE_CONFIG(ADDR_SURF_P2) |
1452			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
1453		tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1454			    PIPE_CONFIG(ADDR_SURF_P2) |
1455			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1456			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1457		tile[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
1458			    PIPE_CONFIG(ADDR_SURF_P2) |
1459			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1460			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1461		tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1462			    PIPE_CONFIG(ADDR_SURF_P2) |
1463			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1464			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1465		tile[17] = (TILE_SPLIT(split_equal_to_row_size));
1466		tile[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1467			    PIPE_CONFIG(ADDR_SURF_P2) |
1468			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1469			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1470		tile[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1471			    PIPE_CONFIG(ADDR_SURF_P2) |
1472			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING));
1473		tile[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1474			    PIPE_CONFIG(ADDR_SURF_P2) |
1475			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1476			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1477		tile[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
1478			    PIPE_CONFIG(ADDR_SURF_P2) |
1479			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1480			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1481		tile[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
1482			    PIPE_CONFIG(ADDR_SURF_P2) |
1483			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1484			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1485		tile[23] = (TILE_SPLIT(split_equal_to_row_size));
1486		tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1487			    PIPE_CONFIG(ADDR_SURF_P2) |
1488			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1489			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1490		tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
1491			    PIPE_CONFIG(ADDR_SURF_P2) |
1492			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1493			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1494		tile[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
1495			    PIPE_CONFIG(ADDR_SURF_P2) |
1496			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1497			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1498		tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1499			    PIPE_CONFIG(ADDR_SURF_P2) |
1500			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
1501		tile[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1502			    PIPE_CONFIG(ADDR_SURF_P2) |
1503			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1504			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1505		tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1506			    PIPE_CONFIG(ADDR_SURF_P2) |
1507			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1508			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1509		tile[30] = (TILE_SPLIT(split_equal_to_row_size));
1510
1511		macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1512				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1513				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1514				NUM_BANKS(ADDR_SURF_8_BANK));
1515		macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1516				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1517				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1518				NUM_BANKS(ADDR_SURF_8_BANK));
1519		macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1520				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1521				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1522				NUM_BANKS(ADDR_SURF_8_BANK));
1523		macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1524				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1525				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1526				NUM_BANKS(ADDR_SURF_8_BANK));
1527		macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1528				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1529				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1530				NUM_BANKS(ADDR_SURF_8_BANK));
1531		macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1532				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1533				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1534				NUM_BANKS(ADDR_SURF_8_BANK));
1535		macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1536				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1537				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1538				NUM_BANKS(ADDR_SURF_8_BANK));
1539		macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
1540				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
1541				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1542				NUM_BANKS(ADDR_SURF_16_BANK));
1543		macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
1544				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1545				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1546				NUM_BANKS(ADDR_SURF_16_BANK));
1547		macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1548				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1549				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1550				NUM_BANKS(ADDR_SURF_16_BANK));
1551		macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1552				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1553				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1554				NUM_BANKS(ADDR_SURF_16_BANK));
1555		macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1556				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1557				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1558				NUM_BANKS(ADDR_SURF_16_BANK));
1559		macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1560				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1561				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1562				NUM_BANKS(ADDR_SURF_16_BANK));
1563		macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1564				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1565				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1566				NUM_BANKS(ADDR_SURF_8_BANK));
1567
1568		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
1569			WREG32(mmGB_TILE_MODE0 + reg_offset, tile[reg_offset]);
1570		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
1571			if (reg_offset != 7)
1572				WREG32(mmGB_MACROTILE_MODE0 + reg_offset, macrotile[reg_offset]);
1573		break;
1574	}
1575}
1576
1577/**
1578 * gfx_v7_0_select_se_sh - select which SE, SH to address
1579 *
1580 * @adev: amdgpu_device pointer
1581 * @se_num: shader engine to address
1582 * @sh_num: sh block to address
1583 *
1584 * Select which SE, SH combinations to address. Certain
1585 * registers are instanced per SE or SH.  0xffffffff means
1586 * broadcast to all SEs or SHs (CIK).
1587 */
1588static void gfx_v7_0_select_se_sh(struct amdgpu_device *adev,
1589				  u32 se_num, u32 sh_num, u32 instance)
1590{
1591	u32 data;
1592
1593	if (instance == 0xffffffff)
1594		data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
1595	else
1596		data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
1597
1598	if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
1599		data |= GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK |
1600			GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK;
1601	else if (se_num == 0xffffffff)
1602		data |= GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK |
1603			(sh_num << GRBM_GFX_INDEX__SH_INDEX__SHIFT);
1604	else if (sh_num == 0xffffffff)
1605		data |= GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK |
1606			(se_num << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
1607	else
1608		data |= (sh_num << GRBM_GFX_INDEX__SH_INDEX__SHIFT) |
1609			(se_num << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
1610	WREG32(mmGRBM_GFX_INDEX, data);
1611}
1612
1613/**
 
 
 
 
 
 
 
 
 
 
 
 
 
1614 * gfx_v7_0_get_rb_active_bitmap - computes the mask of enabled RBs
1615 *
1616 * @adev: amdgpu_device pointer
1617 *
1618 * Calculates the bitmask of enabled RBs (CIK).
1619 * Returns the enabled RB bitmask.
1620 */
1621static u32 gfx_v7_0_get_rb_active_bitmap(struct amdgpu_device *adev)
1622{
1623	u32 data, mask;
1624
1625	data = RREG32(mmCC_RB_BACKEND_DISABLE);
1626	data |= RREG32(mmGC_USER_RB_BACKEND_DISABLE);
1627
1628	data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
1629	data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
1630
1631	mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se /
1632					 adev->gfx.config.max_sh_per_se);
1633
1634	return (~data) & mask;
1635}
1636
1637static void
1638gfx_v7_0_raster_config(struct amdgpu_device *adev, u32 *rconf, u32 *rconf1)
1639{
1640	switch (adev->asic_type) {
1641	case CHIP_BONAIRE:
1642		*rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) |
1643			  SE_XSEL(1) | SE_YSEL(1);
1644		*rconf1 |= 0x0;
1645		break;
1646	case CHIP_HAWAII:
1647		*rconf |= RB_MAP_PKR0(2) | RB_MAP_PKR1(2) |
1648			  RB_XSEL2(1) | PKR_MAP(2) | PKR_XSEL(1) |
1649			  PKR_YSEL(1) | SE_MAP(2) | SE_XSEL(2) |
1650			  SE_YSEL(3);
1651		*rconf1 |= SE_PAIR_MAP(2) | SE_PAIR_XSEL(3) |
1652			   SE_PAIR_YSEL(2);
1653		break;
1654	case CHIP_KAVERI:
1655		*rconf |= RB_MAP_PKR0(2);
1656		*rconf1 |= 0x0;
1657		break;
1658	case CHIP_KABINI:
1659	case CHIP_MULLINS:
1660		*rconf |= 0x0;
1661		*rconf1 |= 0x0;
1662		break;
1663	default:
1664		DRM_ERROR("unknown asic: 0x%x\n", adev->asic_type);
1665		break;
1666	}
1667}
1668
1669static void
1670gfx_v7_0_write_harvested_raster_configs(struct amdgpu_device *adev,
1671					u32 raster_config, u32 raster_config_1,
1672					unsigned rb_mask, unsigned num_rb)
1673{
1674	unsigned sh_per_se = max_t(unsigned, adev->gfx.config.max_sh_per_se, 1);
1675	unsigned num_se = max_t(unsigned, adev->gfx.config.max_shader_engines, 1);
1676	unsigned rb_per_pkr = min_t(unsigned, num_rb / num_se / sh_per_se, 2);
1677	unsigned rb_per_se = num_rb / num_se;
1678	unsigned se_mask[4];
1679	unsigned se;
1680
1681	se_mask[0] = ((1 << rb_per_se) - 1) & rb_mask;
1682	se_mask[1] = (se_mask[0] << rb_per_se) & rb_mask;
1683	se_mask[2] = (se_mask[1] << rb_per_se) & rb_mask;
1684	se_mask[3] = (se_mask[2] << rb_per_se) & rb_mask;
1685
1686	WARN_ON(!(num_se == 1 || num_se == 2 || num_se == 4));
1687	WARN_ON(!(sh_per_se == 1 || sh_per_se == 2));
1688	WARN_ON(!(rb_per_pkr == 1 || rb_per_pkr == 2));
1689
1690	if ((num_se > 2) && ((!se_mask[0] && !se_mask[1]) ||
1691			     (!se_mask[2] && !se_mask[3]))) {
1692		raster_config_1 &= ~SE_PAIR_MAP_MASK;
1693
1694		if (!se_mask[0] && !se_mask[1]) {
1695			raster_config_1 |=
1696				SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_3);
1697		} else {
1698			raster_config_1 |=
1699				SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_0);
1700		}
1701	}
1702
1703	for (se = 0; se < num_se; se++) {
1704		unsigned raster_config_se = raster_config;
1705		unsigned pkr0_mask = ((1 << rb_per_pkr) - 1) << (se * rb_per_se);
1706		unsigned pkr1_mask = pkr0_mask << rb_per_pkr;
1707		int idx = (se / 2) * 2;
1708
1709		if ((num_se > 1) && (!se_mask[idx] || !se_mask[idx + 1])) {
1710			raster_config_se &= ~SE_MAP_MASK;
1711
1712			if (!se_mask[idx]) {
1713				raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_3);
1714			} else {
1715				raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_0);
1716			}
1717		}
1718
1719		pkr0_mask &= rb_mask;
1720		pkr1_mask &= rb_mask;
1721		if (rb_per_se > 2 && (!pkr0_mask || !pkr1_mask)) {
1722			raster_config_se &= ~PKR_MAP_MASK;
1723
1724			if (!pkr0_mask) {
1725				raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_3);
1726			} else {
1727				raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_0);
1728			}
1729		}
1730
1731		if (rb_per_se >= 2) {
1732			unsigned rb0_mask = 1 << (se * rb_per_se);
1733			unsigned rb1_mask = rb0_mask << 1;
1734
1735			rb0_mask &= rb_mask;
1736			rb1_mask &= rb_mask;
1737			if (!rb0_mask || !rb1_mask) {
1738				raster_config_se &= ~RB_MAP_PKR0_MASK;
1739
1740				if (!rb0_mask) {
1741					raster_config_se |=
1742						RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_3);
1743				} else {
1744					raster_config_se |=
1745						RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_0);
1746				}
1747			}
1748
1749			if (rb_per_se > 2) {
1750				rb0_mask = 1 << (se * rb_per_se + rb_per_pkr);
1751				rb1_mask = rb0_mask << 1;
1752				rb0_mask &= rb_mask;
1753				rb1_mask &= rb_mask;
1754				if (!rb0_mask || !rb1_mask) {
1755					raster_config_se &= ~RB_MAP_PKR1_MASK;
1756
1757					if (!rb0_mask) {
1758						raster_config_se |=
1759							RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_3);
1760					} else {
1761						raster_config_se |=
1762							RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_0);
1763					}
1764				}
1765			}
1766		}
1767
1768		/* GRBM_GFX_INDEX has a different offset on CI+ */
1769		gfx_v7_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff);
1770		WREG32(mmPA_SC_RASTER_CONFIG, raster_config_se);
1771		WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
1772	}
1773
1774	/* GRBM_GFX_INDEX has a different offset on CI+ */
1775	gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1776}
1777
1778/**
1779 * gfx_v7_0_setup_rb - setup the RBs on the asic
1780 *
1781 * @adev: amdgpu_device pointer
1782 * @se_num: number of SEs (shader engines) for the asic
1783 * @sh_per_se: number of SH blocks per SE for the asic
1784 *
1785 * Configures per-SE/SH RB registers (CIK).
1786 */
1787static void gfx_v7_0_setup_rb(struct amdgpu_device *adev)
1788{
1789	int i, j;
1790	u32 data;
1791	u32 raster_config = 0, raster_config_1 = 0;
1792	u32 active_rbs = 0;
1793	u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
1794					adev->gfx.config.max_sh_per_se;
1795	unsigned num_rb_pipes;
1796
1797	mutex_lock(&adev->grbm_idx_mutex);
1798	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1799		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1800			gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff);
1801			data = gfx_v7_0_get_rb_active_bitmap(adev);
1802			active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
1803					       rb_bitmap_width_per_sh);
1804		}
1805	}
1806	gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1807
1808	adev->gfx.config.backend_enable_mask = active_rbs;
1809	adev->gfx.config.num_rbs = hweight32(active_rbs);
1810
1811	num_rb_pipes = min_t(unsigned, adev->gfx.config.max_backends_per_se *
1812			     adev->gfx.config.max_shader_engines, 16);
1813
1814	gfx_v7_0_raster_config(adev, &raster_config, &raster_config_1);
1815
1816	if (!adev->gfx.config.backend_enable_mask ||
1817			adev->gfx.config.num_rbs >= num_rb_pipes) {
1818		WREG32(mmPA_SC_RASTER_CONFIG, raster_config);
1819		WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
1820	} else {
1821		gfx_v7_0_write_harvested_raster_configs(adev, raster_config, raster_config_1,
1822							adev->gfx.config.backend_enable_mask,
1823							num_rb_pipes);
1824	}
1825
1826	/* cache the values for userspace */
1827	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1828		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1829			gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff);
1830			adev->gfx.config.rb_config[i][j].rb_backend_disable =
1831				RREG32(mmCC_RB_BACKEND_DISABLE);
1832			adev->gfx.config.rb_config[i][j].user_rb_backend_disable =
1833				RREG32(mmGC_USER_RB_BACKEND_DISABLE);
1834			adev->gfx.config.rb_config[i][j].raster_config =
1835				RREG32(mmPA_SC_RASTER_CONFIG);
1836			adev->gfx.config.rb_config[i][j].raster_config_1 =
1837				RREG32(mmPA_SC_RASTER_CONFIG_1);
1838		}
1839	}
1840	gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1841	mutex_unlock(&adev->grbm_idx_mutex);
1842}
1843
1844/**
1845 * gfx_v7_0_init_compute_vmid - gart enable
1846 *
1847 * @adev: amdgpu_device pointer
1848 *
1849 * Initialize compute vmid sh_mem registers
1850 *
1851 */
1852#define DEFAULT_SH_MEM_BASES	(0x6000)
1853static void gfx_v7_0_init_compute_vmid(struct amdgpu_device *adev)
 
 
1854{
1855	int i;
1856	uint32_t sh_mem_config;
1857	uint32_t sh_mem_bases;
1858
1859	/*
1860	 * Configure apertures:
1861	 * LDS:         0x60000000'00000000 - 0x60000001'00000000 (4GB)
1862	 * Scratch:     0x60000001'00000000 - 0x60000002'00000000 (4GB)
1863	 * GPUVM:       0x60010000'00000000 - 0x60020000'00000000 (1TB)
1864	*/
1865	sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
1866	sh_mem_config = SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
1867			SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
1868	sh_mem_config |= MTYPE_NONCACHED << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT;
1869	mutex_lock(&adev->srbm_mutex);
1870	for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
1871		cik_srbm_select(adev, 0, 0, 0, i);
1872		/* CP and shaders */
1873		WREG32(mmSH_MEM_CONFIG, sh_mem_config);
1874		WREG32(mmSH_MEM_APE1_BASE, 1);
1875		WREG32(mmSH_MEM_APE1_LIMIT, 0);
1876		WREG32(mmSH_MEM_BASES, sh_mem_bases);
1877	}
1878	cik_srbm_select(adev, 0, 0, 0, 0);
1879	mutex_unlock(&adev->srbm_mutex);
1880
1881	/* Initialize all compute VMIDs to have no GDS, GWS, or OA
1882	   acccess. These should be enabled by FW for target VMIDs. */
1883	for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
1884		WREG32(amdgpu_gds_reg_offset[i].mem_base, 0);
1885		WREG32(amdgpu_gds_reg_offset[i].mem_size, 0);
1886		WREG32(amdgpu_gds_reg_offset[i].gws, 0);
1887		WREG32(amdgpu_gds_reg_offset[i].oa, 0);
1888	}
1889}
1890
1891static void gfx_v7_0_init_gds_vmid(struct amdgpu_device *adev)
1892{
1893	int vmid;
1894
1895	/*
1896	 * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA
1897	 * access. Compute VMIDs should be enabled by FW for target VMIDs,
1898	 * the driver can enable them for graphics. VMID0 should maintain
1899	 * access so that HWS firmware can save/restore entries.
1900	 */
1901	for (vmid = 1; vmid < 16; vmid++) {
1902		WREG32(amdgpu_gds_reg_offset[vmid].mem_base, 0);
1903		WREG32(amdgpu_gds_reg_offset[vmid].mem_size, 0);
1904		WREG32(amdgpu_gds_reg_offset[vmid].gws, 0);
1905		WREG32(amdgpu_gds_reg_offset[vmid].oa, 0);
1906	}
1907}
1908
1909static void gfx_v7_0_config_init(struct amdgpu_device *adev)
1910{
1911	adev->gfx.config.double_offchip_lds_buf = 1;
1912}
1913
1914/**
1915 * gfx_v7_0_constants_init - setup the 3D engine
1916 *
1917 * @adev: amdgpu_device pointer
1918 *
1919 * init the gfx constants such as the 3D engine, tiling configuration
1920 * registers, maximum number of quad pipes, render backends...
1921 */
1922static void gfx_v7_0_constants_init(struct amdgpu_device *adev)
1923{
1924	u32 sh_mem_cfg, sh_static_mem_cfg, sh_mem_base;
1925	u32 tmp;
1926	int i;
1927
1928	WREG32(mmGRBM_CNTL, (0xff << GRBM_CNTL__READ_TIMEOUT__SHIFT));
1929
1930	WREG32(mmGB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
1931	WREG32(mmHDP_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
1932	WREG32(mmDMIF_ADDR_CALC, adev->gfx.config.gb_addr_config);
1933
1934	gfx_v7_0_tiling_mode_table_init(adev);
1935
1936	gfx_v7_0_setup_rb(adev);
1937	gfx_v7_0_get_cu_info(adev);
1938	gfx_v7_0_config_init(adev);
1939
1940	/* set HW defaults for 3D engine */
1941	WREG32(mmCP_MEQ_THRESHOLDS,
1942	       (0x30 << CP_MEQ_THRESHOLDS__MEQ1_START__SHIFT) |
1943	       (0x60 << CP_MEQ_THRESHOLDS__MEQ2_START__SHIFT));
1944
1945	mutex_lock(&adev->grbm_idx_mutex);
1946	/*
1947	 * making sure that the following register writes will be broadcasted
1948	 * to all the shaders
1949	 */
1950	gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1951
1952	/* XXX SH_MEM regs */
1953	/* where to put LDS, scratch, GPUVM in FSA64 space */
1954	sh_mem_cfg = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
1955				   SH_MEM_ALIGNMENT_MODE_UNALIGNED);
1956	sh_mem_cfg = REG_SET_FIELD(sh_mem_cfg, SH_MEM_CONFIG, DEFAULT_MTYPE,
1957				   MTYPE_NC);
1958	sh_mem_cfg = REG_SET_FIELD(sh_mem_cfg, SH_MEM_CONFIG, APE1_MTYPE,
1959				   MTYPE_UC);
1960	sh_mem_cfg = REG_SET_FIELD(sh_mem_cfg, SH_MEM_CONFIG, PRIVATE_ATC, 0);
1961
1962	sh_static_mem_cfg = REG_SET_FIELD(0, SH_STATIC_MEM_CONFIG,
1963				   SWIZZLE_ENABLE, 1);
1964	sh_static_mem_cfg = REG_SET_FIELD(sh_static_mem_cfg, SH_STATIC_MEM_CONFIG,
1965				   ELEMENT_SIZE, 1);
1966	sh_static_mem_cfg = REG_SET_FIELD(sh_static_mem_cfg, SH_STATIC_MEM_CONFIG,
1967				   INDEX_STRIDE, 3);
1968	WREG32(mmSH_STATIC_MEM_CONFIG, sh_static_mem_cfg);
1969
1970	mutex_lock(&adev->srbm_mutex);
1971	for (i = 0; i < adev->vm_manager.id_mgr[0].num_ids; i++) {
1972		if (i == 0)
1973			sh_mem_base = 0;
1974		else
1975			sh_mem_base = adev->gmc.shared_aperture_start >> 48;
1976		cik_srbm_select(adev, 0, 0, 0, i);
1977		/* CP and shaders */
1978		WREG32(mmSH_MEM_CONFIG, sh_mem_cfg);
1979		WREG32(mmSH_MEM_APE1_BASE, 1);
1980		WREG32(mmSH_MEM_APE1_LIMIT, 0);
1981		WREG32(mmSH_MEM_BASES, sh_mem_base);
1982	}
1983	cik_srbm_select(adev, 0, 0, 0, 0);
1984	mutex_unlock(&adev->srbm_mutex);
1985
1986	gfx_v7_0_init_compute_vmid(adev);
1987	gfx_v7_0_init_gds_vmid(adev);
1988
1989	WREG32(mmSX_DEBUG_1, 0x20);
1990
1991	WREG32(mmTA_CNTL_AUX, 0x00010000);
1992
1993	tmp = RREG32(mmSPI_CONFIG_CNTL);
1994	tmp |= 0x03000000;
1995	WREG32(mmSPI_CONFIG_CNTL, tmp);
1996
1997	WREG32(mmSQ_CONFIG, 1);
1998
1999	WREG32(mmDB_DEBUG, 0);
2000
2001	tmp = RREG32(mmDB_DEBUG2) & ~0xf00fffff;
2002	tmp |= 0x00000400;
2003	WREG32(mmDB_DEBUG2, tmp);
2004
2005	tmp = RREG32(mmDB_DEBUG3) & ~0x0002021c;
2006	tmp |= 0x00020200;
2007	WREG32(mmDB_DEBUG3, tmp);
2008
2009	tmp = RREG32(mmCB_HW_CONTROL) & ~0x00010000;
2010	tmp |= 0x00018208;
2011	WREG32(mmCB_HW_CONTROL, tmp);
2012
2013	WREG32(mmSPI_CONFIG_CNTL_1, (4 << SPI_CONFIG_CNTL_1__VTX_DONE_DELAY__SHIFT));
2014
2015	WREG32(mmPA_SC_FIFO_SIZE,
2016		((adev->gfx.config.sc_prim_fifo_size_frontend << PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) |
2017		(adev->gfx.config.sc_prim_fifo_size_backend << PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT) |
2018		(adev->gfx.config.sc_hiz_tile_fifo_size << PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) |
2019		(adev->gfx.config.sc_earlyz_tile_fifo_size << PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT)));
2020
2021	WREG32(mmVGT_NUM_INSTANCES, 1);
2022
2023	WREG32(mmCP_PERFMON_CNTL, 0);
2024
2025	WREG32(mmSQ_CONFIG, 0);
2026
2027	WREG32(mmPA_SC_FORCE_EOV_MAX_CNTS,
2028		((4095 << PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_CLK_CNT__SHIFT) |
2029		(255 << PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_REZ_CNT__SHIFT)));
2030
2031	WREG32(mmVGT_CACHE_INVALIDATION,
2032		(VC_AND_TC << VGT_CACHE_INVALIDATION__CACHE_INVALIDATION__SHIFT) |
2033		(ES_AND_GS_AUTO << VGT_CACHE_INVALIDATION__AUTO_INVLD_EN__SHIFT));
2034
2035	WREG32(mmVGT_GS_VERTEX_REUSE, 16);
2036	WREG32(mmPA_SC_LINE_STIPPLE_STATE, 0);
2037
2038	WREG32(mmPA_CL_ENHANCE, PA_CL_ENHANCE__CLIP_VTX_REORDER_ENA_MASK |
2039			(3 << PA_CL_ENHANCE__NUM_CLIP_SEQ__SHIFT));
2040	WREG32(mmPA_SC_ENHANCE, PA_SC_ENHANCE__ENABLE_PA_SC_OUT_OF_ORDER_MASK);
2041
2042	tmp = RREG32(mmSPI_ARB_PRIORITY);
2043	tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS0, 2);
2044	tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS1, 2);
2045	tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS2, 2);
2046	tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS3, 2);
2047	WREG32(mmSPI_ARB_PRIORITY, tmp);
2048
2049	mutex_unlock(&adev->grbm_idx_mutex);
2050
2051	udelay(50);
2052}
2053
2054/*
2055 * GPU scratch registers helpers function.
2056 */
2057/**
2058 * gfx_v7_0_scratch_init - setup driver info for CP scratch regs
2059 *
2060 * @adev: amdgpu_device pointer
2061 *
2062 * Set up the number and offset of the CP scratch registers.
2063 * NOTE: use of CP scratch registers is a legacy inferface and
2064 * is not used by default on newer asics (r6xx+).  On newer asics,
2065 * memory buffers are used for fences rather than scratch regs.
2066 */
2067static void gfx_v7_0_scratch_init(struct amdgpu_device *adev)
2068{
2069	adev->gfx.scratch.num_reg = 8;
 
 
2070	adev->gfx.scratch.reg_base = mmSCRATCH_REG0;
2071	adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1;
 
 
 
2072}
2073
2074/**
2075 * gfx_v7_0_ring_test_ring - basic gfx ring test
2076 *
2077 * @adev: amdgpu_device pointer
2078 * @ring: amdgpu_ring structure holding ring information
2079 *
2080 * Allocate a scratch register and write to it using the gfx ring (CIK).
2081 * Provides a basic gfx ring test to verify that the ring is working.
2082 * Used by gfx_v7_0_cp_gfx_resume();
2083 * Returns 0 on success, error on failure.
2084 */
2085static int gfx_v7_0_ring_test_ring(struct amdgpu_ring *ring)
2086{
2087	struct amdgpu_device *adev = ring->adev;
2088	uint32_t scratch;
2089	uint32_t tmp = 0;
2090	unsigned i;
2091	int r;
2092
2093	r = amdgpu_gfx_scratch_get(adev, &scratch);
2094	if (r)
 
2095		return r;
2096
2097	WREG32(scratch, 0xCAFEDEAD);
2098	r = amdgpu_ring_alloc(ring, 3);
2099	if (r)
2100		goto error_free_scratch;
2101
 
 
2102	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
2103	amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
2104	amdgpu_ring_write(ring, 0xDEADBEEF);
2105	amdgpu_ring_commit(ring);
2106
2107	for (i = 0; i < adev->usec_timeout; i++) {
2108		tmp = RREG32(scratch);
2109		if (tmp == 0xDEADBEEF)
2110			break;
2111		udelay(1);
 
 
 
 
 
 
 
2112	}
2113	if (i >= adev->usec_timeout)
2114		r = -ETIMEDOUT;
2115
2116error_free_scratch:
2117	amdgpu_gfx_scratch_free(adev, scratch);
2118	return r;
2119}
2120
2121/**
2122 * gfx_v7_0_ring_emit_hdp - emit an hdp flush on the cp
2123 *
2124 * @adev: amdgpu_device pointer
2125 * @ridx: amdgpu ring index
2126 *
2127 * Emits an hdp flush on the cp.
2128 */
2129static void gfx_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
2130{
2131	u32 ref_and_mask;
2132	int usepfp = ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ? 0 : 1;
2133
2134	if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
2135		switch (ring->me) {
2136		case 1:
2137			ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe;
2138			break;
2139		case 2:
2140			ref_and_mask = GPU_HDP_FLUSH_DONE__CP6_MASK << ring->pipe;
2141			break;
2142		default:
2143			return;
2144		}
2145	} else {
2146		ref_and_mask = GPU_HDP_FLUSH_DONE__CP0_MASK;
2147	}
2148
2149	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
2150	amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(1) | /* write, wait, write */
2151				 WAIT_REG_MEM_FUNCTION(3) |  /* == */
2152				 WAIT_REG_MEM_ENGINE(usepfp)));   /* pfp or me */
2153	amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ);
2154	amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE);
2155	amdgpu_ring_write(ring, ref_and_mask);
2156	amdgpu_ring_write(ring, ref_and_mask);
2157	amdgpu_ring_write(ring, 0x20); /* poll interval */
2158}
2159
2160static void gfx_v7_0_ring_emit_vgt_flush(struct amdgpu_ring *ring)
2161{
2162	amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
2163	amdgpu_ring_write(ring, EVENT_TYPE(VS_PARTIAL_FLUSH) |
2164		EVENT_INDEX(4));
2165
2166	amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
2167	amdgpu_ring_write(ring, EVENT_TYPE(VGT_FLUSH) |
2168		EVENT_INDEX(0));
2169}
2170
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2171/**
2172 * gfx_v7_0_ring_emit_fence_gfx - emit a fence on the gfx ring
2173 *
2174 * @adev: amdgpu_device pointer
2175 * @fence: amdgpu fence object
2176 *
2177 * Emits a fence sequnce number on the gfx ring and flushes
2178 * GPU caches.
2179 */
2180static void gfx_v7_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
2181					 u64 seq, unsigned flags)
2182{
2183	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
2184	bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
2185	/* Workaround for cache flush problems. First send a dummy EOP
2186	 * event down the pipe with seq one below.
2187	 */
2188	amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
2189	amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
2190				 EOP_TC_ACTION_EN |
2191				 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
2192				 EVENT_INDEX(5)));
2193	amdgpu_ring_write(ring, addr & 0xfffffffc);
2194	amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
2195				DATA_SEL(1) | INT_SEL(0));
2196	amdgpu_ring_write(ring, lower_32_bits(seq - 1));
2197	amdgpu_ring_write(ring, upper_32_bits(seq - 1));
2198
2199	/* Then send the real EOP event down the pipe. */
2200	amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
2201	amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
2202				 EOP_TC_ACTION_EN |
2203				 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
2204				 EVENT_INDEX(5)));
2205	amdgpu_ring_write(ring, addr & 0xfffffffc);
2206	amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
2207				DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
2208	amdgpu_ring_write(ring, lower_32_bits(seq));
2209	amdgpu_ring_write(ring, upper_32_bits(seq));
2210}
2211
2212/**
2213 * gfx_v7_0_ring_emit_fence_compute - emit a fence on the compute ring
2214 *
2215 * @adev: amdgpu_device pointer
2216 * @fence: amdgpu fence object
2217 *
2218 * Emits a fence sequnce number on the compute ring and flushes
2219 * GPU caches.
2220 */
2221static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
2222					     u64 addr, u64 seq,
2223					     unsigned flags)
2224{
2225	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
2226	bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
2227
2228	/* RELEASE_MEM - flush caches, send int */
2229	amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 5));
2230	amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
2231				 EOP_TC_ACTION_EN |
2232				 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
2233				 EVENT_INDEX(5)));
2234	amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
2235	amdgpu_ring_write(ring, addr & 0xfffffffc);
2236	amdgpu_ring_write(ring, upper_32_bits(addr));
2237	amdgpu_ring_write(ring, lower_32_bits(seq));
2238	amdgpu_ring_write(ring, upper_32_bits(seq));
2239}
2240
2241/*
2242 * IB stuff
2243 */
2244/**
2245 * gfx_v7_0_ring_emit_ib - emit an IB (Indirect Buffer) on the ring
2246 *
2247 * @ring: amdgpu_ring structure holding ring information
2248 * @ib: amdgpu indirect buffer object
2249 *
2250 * Emits an DE (drawing engine) or CE (constant engine) IB
2251 * on the gfx ring.  IBs are usually generated by userspace
2252 * acceleration drivers and submitted to the kernel for
2253 * sheduling on the ring.  This function schedules the IB
2254 * on the gfx ring for execution by the GPU.
2255 */
2256static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
2257					struct amdgpu_job *job,
2258					struct amdgpu_ib *ib,
2259					uint32_t flags)
2260{
2261	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
2262	u32 header, control = 0;
2263
2264	/* insert SWITCH_BUFFER packet before first IB in the ring frame */
2265	if (flags & AMDGPU_HAVE_CTX_SWITCH) {
2266		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
2267		amdgpu_ring_write(ring, 0);
2268	}
2269
2270	if (ib->flags & AMDGPU_IB_FLAG_CE)
2271		header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
2272	else
2273		header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
2274
2275	control |= ib->length_dw | (vmid << 24);
2276
2277	amdgpu_ring_write(ring, header);
2278	amdgpu_ring_write(ring,
2279#ifdef __BIG_ENDIAN
2280			  (2 << 0) |
2281#endif
2282			  (ib->gpu_addr & 0xFFFFFFFC));
2283	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
2284	amdgpu_ring_write(ring, control);
2285}
2286
2287static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
2288					  struct amdgpu_job *job,
2289					  struct amdgpu_ib *ib,
2290					  uint32_t flags)
2291{
2292	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
2293	u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
2294
2295	/* Currently, there is a high possibility to get wave ID mismatch
2296	 * between ME and GDS, leading to a hw deadlock, because ME generates
2297	 * different wave IDs than the GDS expects. This situation happens
2298	 * randomly when at least 5 compute pipes use GDS ordered append.
2299	 * The wave IDs generated by ME are also wrong after suspend/resume.
2300	 * Those are probably bugs somewhere else in the kernel driver.
2301	 *
2302	 * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
2303	 * GDS to 0 for this ring (me/pipe).
2304	 */
2305	if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
2306		amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2307		amdgpu_ring_write(ring, mmGDS_COMPUTE_MAX_WAVE_ID - PACKET3_SET_CONFIG_REG_START);
2308		amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
2309	}
2310
2311	amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
2312	amdgpu_ring_write(ring,
2313#ifdef __BIG_ENDIAN
2314					  (2 << 0) |
2315#endif
2316					  (ib->gpu_addr & 0xFFFFFFFC));
2317	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
2318	amdgpu_ring_write(ring, control);
2319}
2320
2321static void gfx_v7_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
2322{
2323	uint32_t dw2 = 0;
2324
2325	dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
2326	if (flags & AMDGPU_HAVE_CTX_SWITCH) {
2327		gfx_v7_0_ring_emit_vgt_flush(ring);
2328		/* set load_global_config & load_global_uconfig */
2329		dw2 |= 0x8001;
2330		/* set load_cs_sh_regs */
2331		dw2 |= 0x01000000;
2332		/* set load_per_context_state & load_gfx_sh_regs */
2333		dw2 |= 0x10002;
2334	}
2335
2336	amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
2337	amdgpu_ring_write(ring, dw2);
2338	amdgpu_ring_write(ring, 0);
2339}
2340
2341/**
2342 * gfx_v7_0_ring_test_ib - basic ring IB test
2343 *
2344 * @ring: amdgpu_ring structure holding ring information
2345 *
2346 * Allocate an IB and execute it on the gfx ring (CIK).
2347 * Provides a basic gfx ring test to verify that IBs are working.
2348 * Returns 0 on success, error on failure.
2349 */
2350static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
2351{
2352	struct amdgpu_device *adev = ring->adev;
2353	struct amdgpu_ib ib;
2354	struct dma_fence *f = NULL;
2355	uint32_t scratch;
2356	uint32_t tmp = 0;
2357	long r;
2358
2359	r = amdgpu_gfx_scratch_get(adev, &scratch);
2360	if (r)
 
2361		return r;
2362
2363	WREG32(scratch, 0xCAFEDEAD);
2364	memset(&ib, 0, sizeof(ib));
2365	r = amdgpu_ib_get(adev, NULL, 256,
2366					AMDGPU_IB_POOL_DIRECT, &ib);
2367	if (r)
2368		goto err1;
2369
2370	ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
2371	ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START));
2372	ib.ptr[2] = 0xDEADBEEF;
2373	ib.length_dw = 3;
2374
2375	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
2376	if (r)
2377		goto err2;
2378
2379	r = dma_fence_wait_timeout(f, false, timeout);
2380	if (r == 0) {
 
2381		r = -ETIMEDOUT;
2382		goto err2;
2383	} else if (r < 0) {
 
2384		goto err2;
2385	}
2386	tmp = RREG32(scratch);
2387	if (tmp == 0xDEADBEEF)
 
2388		r = 0;
2389	else
 
 
2390		r = -EINVAL;
 
2391
2392err2:
2393	amdgpu_ib_free(adev, &ib, NULL);
2394	dma_fence_put(f);
2395err1:
2396	amdgpu_gfx_scratch_free(adev, scratch);
2397	return r;
2398}
2399
2400/*
2401 * CP.
2402 * On CIK, gfx and compute now have independant command processors.
2403 *
2404 * GFX
2405 * Gfx consists of a single ring and can process both gfx jobs and
2406 * compute jobs.  The gfx CP consists of three microengines (ME):
2407 * PFP - Pre-Fetch Parser
2408 * ME - Micro Engine
2409 * CE - Constant Engine
2410 * The PFP and ME make up what is considered the Drawing Engine (DE).
2411 * The CE is an asynchronous engine used for updating buffer desciptors
2412 * used by the DE so that they can be loaded into cache in parallel
2413 * while the DE is processing state update packets.
2414 *
2415 * Compute
2416 * The compute CP consists of two microengines (ME):
2417 * MEC1 - Compute MicroEngine 1
2418 * MEC2 - Compute MicroEngine 2
2419 * Each MEC supports 4 compute pipes and each pipe supports 8 queues.
2420 * The queues are exposed to userspace and are programmed directly
2421 * by the compute runtime.
2422 */
2423/**
2424 * gfx_v7_0_cp_gfx_enable - enable/disable the gfx CP MEs
2425 *
2426 * @adev: amdgpu_device pointer
2427 * @enable: enable or disable the MEs
2428 *
2429 * Halts or unhalts the gfx MEs.
2430 */
2431static void gfx_v7_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
2432{
2433	if (enable)
 
 
2434		WREG32(mmCP_ME_CNTL, 0);
2435	else
2436		WREG32(mmCP_ME_CNTL, (CP_ME_CNTL__ME_HALT_MASK |
2437				      CP_ME_CNTL__PFP_HALT_MASK |
2438				      CP_ME_CNTL__CE_HALT_MASK));
 
2439	udelay(50);
2440}
2441
2442/**
2443 * gfx_v7_0_cp_gfx_load_microcode - load the gfx CP ME ucode
2444 *
2445 * @adev: amdgpu_device pointer
2446 *
2447 * Loads the gfx PFP, ME, and CE ucode.
2448 * Returns 0 for success, -EINVAL if the ucode is not available.
2449 */
2450static int gfx_v7_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
2451{
2452	const struct gfx_firmware_header_v1_0 *pfp_hdr;
2453	const struct gfx_firmware_header_v1_0 *ce_hdr;
2454	const struct gfx_firmware_header_v1_0 *me_hdr;
2455	const __le32 *fw_data;
2456	unsigned i, fw_size;
2457
2458	if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
2459		return -EINVAL;
2460
2461	pfp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
2462	ce_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
2463	me_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
2464
2465	amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
2466	amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
2467	amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
2468	adev->gfx.pfp_fw_version = le32_to_cpu(pfp_hdr->header.ucode_version);
2469	adev->gfx.ce_fw_version = le32_to_cpu(ce_hdr->header.ucode_version);
2470	adev->gfx.me_fw_version = le32_to_cpu(me_hdr->header.ucode_version);
2471	adev->gfx.me_feature_version = le32_to_cpu(me_hdr->ucode_feature_version);
2472	adev->gfx.ce_feature_version = le32_to_cpu(ce_hdr->ucode_feature_version);
2473	adev->gfx.pfp_feature_version = le32_to_cpu(pfp_hdr->ucode_feature_version);
2474
2475	gfx_v7_0_cp_gfx_enable(adev, false);
2476
2477	/* PFP */
2478	fw_data = (const __le32 *)
2479		(adev->gfx.pfp_fw->data +
2480		 le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
2481	fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
2482	WREG32(mmCP_PFP_UCODE_ADDR, 0);
2483	for (i = 0; i < fw_size; i++)
2484		WREG32(mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
2485	WREG32(mmCP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version);
2486
2487	/* CE */
2488	fw_data = (const __le32 *)
2489		(adev->gfx.ce_fw->data +
2490		 le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
2491	fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
2492	WREG32(mmCP_CE_UCODE_ADDR, 0);
2493	for (i = 0; i < fw_size; i++)
2494		WREG32(mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
2495	WREG32(mmCP_CE_UCODE_ADDR, adev->gfx.ce_fw_version);
2496
2497	/* ME */
2498	fw_data = (const __le32 *)
2499		(adev->gfx.me_fw->data +
2500		 le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
2501	fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
2502	WREG32(mmCP_ME_RAM_WADDR, 0);
2503	for (i = 0; i < fw_size; i++)
2504		WREG32(mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++));
2505	WREG32(mmCP_ME_RAM_WADDR, adev->gfx.me_fw_version);
2506
2507	return 0;
2508}
2509
2510/**
2511 * gfx_v7_0_cp_gfx_start - start the gfx ring
2512 *
2513 * @adev: amdgpu_device pointer
2514 *
2515 * Enables the ring and loads the clear state context and other
2516 * packets required to init the ring.
2517 * Returns 0 for success, error for failure.
2518 */
2519static int gfx_v7_0_cp_gfx_start(struct amdgpu_device *adev)
2520{
2521	struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
2522	const struct cs_section_def *sect = NULL;
2523	const struct cs_extent_def *ext = NULL;
2524	int r, i;
2525
2526	/* init the CP */
2527	WREG32(mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1);
2528	WREG32(mmCP_ENDIAN_SWAP, 0);
2529	WREG32(mmCP_DEVICE_ID, 1);
2530
2531	gfx_v7_0_cp_gfx_enable(adev, true);
2532
2533	r = amdgpu_ring_alloc(ring, gfx_v7_0_get_csb_size(adev) + 8);
2534	if (r) {
2535		DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
2536		return r;
2537	}
2538
2539	/* init the CE partitions.  CE only used for gfx on CIK */
2540	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
2541	amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
2542	amdgpu_ring_write(ring, 0x8000);
2543	amdgpu_ring_write(ring, 0x8000);
2544
2545	/* clear state buffer */
2546	amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2547	amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
2548
2549	amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
2550	amdgpu_ring_write(ring, 0x80000000);
2551	amdgpu_ring_write(ring, 0x80000000);
2552
2553	for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
2554		for (ext = sect->section; ext->extent != NULL; ++ext) {
2555			if (sect->id == SECT_CONTEXT) {
2556				amdgpu_ring_write(ring,
2557						  PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
2558				amdgpu_ring_write(ring, ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
2559				for (i = 0; i < ext->reg_count; i++)
2560					amdgpu_ring_write(ring, ext->extent[i]);
2561			}
2562		}
2563	}
2564
2565	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
2566	amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
2567	amdgpu_ring_write(ring, adev->gfx.config.rb_config[0][0].raster_config);
2568	amdgpu_ring_write(ring, adev->gfx.config.rb_config[0][0].raster_config_1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2569
2570	amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2571	amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
2572
2573	amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
2574	amdgpu_ring_write(ring, 0);
2575
2576	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
2577	amdgpu_ring_write(ring, 0x00000316);
2578	amdgpu_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
2579	amdgpu_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
2580
2581	amdgpu_ring_commit(ring);
2582
2583	return 0;
2584}
2585
2586/**
2587 * gfx_v7_0_cp_gfx_resume - setup the gfx ring buffer registers
2588 *
2589 * @adev: amdgpu_device pointer
2590 *
2591 * Program the location and size of the gfx ring buffer
2592 * and test it to make sure it's working.
2593 * Returns 0 for success, error for failure.
2594 */
2595static int gfx_v7_0_cp_gfx_resume(struct amdgpu_device *adev)
2596{
2597	struct amdgpu_ring *ring;
2598	u32 tmp;
2599	u32 rb_bufsz;
2600	u64 rb_addr, rptr_addr;
2601	int r;
2602
2603	WREG32(mmCP_SEM_WAIT_TIMER, 0x0);
2604	if (adev->asic_type != CHIP_HAWAII)
2605		WREG32(mmCP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
2606
2607	/* Set the write pointer delay */
2608	WREG32(mmCP_RB_WPTR_DELAY, 0);
2609
2610	/* set the RB to use vmid 0 */
2611	WREG32(mmCP_RB_VMID, 0);
2612
2613	WREG32(mmSCRATCH_ADDR, 0);
2614
2615	/* ring 0 - compute and gfx */
2616	/* Set ring buffer size */
2617	ring = &adev->gfx.gfx_ring[0];
2618	rb_bufsz = order_base_2(ring->ring_size / 8);
2619	tmp = (order_base_2(AMDGPU_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
2620#ifdef __BIG_ENDIAN
2621	tmp |= 2 << CP_RB0_CNTL__BUF_SWAP__SHIFT;
2622#endif
2623	WREG32(mmCP_RB0_CNTL, tmp);
2624
2625	/* Initialize the ring buffer's read and write pointers */
2626	WREG32(mmCP_RB0_CNTL, tmp | CP_RB0_CNTL__RB_RPTR_WR_ENA_MASK);
2627	ring->wptr = 0;
2628	WREG32(mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
2629
2630	/* set the wb address wether it's enabled or not */
2631	rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
2632	WREG32(mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
2633	WREG32(mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
2634
2635	/* scratch register shadowing is no longer supported */
2636	WREG32(mmSCRATCH_UMSK, 0);
2637
2638	mdelay(1);
2639	WREG32(mmCP_RB0_CNTL, tmp);
2640
2641	rb_addr = ring->gpu_addr >> 8;
2642	WREG32(mmCP_RB0_BASE, rb_addr);
2643	WREG32(mmCP_RB0_BASE_HI, upper_32_bits(rb_addr));
2644
2645	/* start the ring */
2646	gfx_v7_0_cp_gfx_start(adev);
2647	r = amdgpu_ring_test_helper(ring);
2648	if (r)
 
 
2649		return r;
 
2650
2651	return 0;
2652}
2653
2654static u64 gfx_v7_0_ring_get_rptr(struct amdgpu_ring *ring)
2655{
2656	return ring->adev->wb.wb[ring->rptr_offs];
2657}
2658
2659static u64 gfx_v7_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
2660{
2661	struct amdgpu_device *adev = ring->adev;
2662
2663	return RREG32(mmCP_RB0_WPTR);
2664}
2665
2666static void gfx_v7_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
2667{
2668	struct amdgpu_device *adev = ring->adev;
2669
2670	WREG32(mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
2671	(void)RREG32(mmCP_RB0_WPTR);
2672}
2673
2674static u64 gfx_v7_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
2675{
2676	/* XXX check if swapping is necessary on BE */
2677	return ring->adev->wb.wb[ring->wptr_offs];
2678}
2679
2680static void gfx_v7_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
2681{
2682	struct amdgpu_device *adev = ring->adev;
2683
2684	/* XXX check if swapping is necessary on BE */
2685	adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
2686	WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
2687}
2688
2689/**
2690 * gfx_v7_0_cp_compute_enable - enable/disable the compute CP MEs
2691 *
2692 * @adev: amdgpu_device pointer
2693 * @enable: enable or disable the MEs
2694 *
2695 * Halts or unhalts the compute MEs.
2696 */
2697static void gfx_v7_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
2698{
2699	if (enable)
 
 
2700		WREG32(mmCP_MEC_CNTL, 0);
2701	else
2702		WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK |
2703				       CP_MEC_CNTL__MEC_ME2_HALT_MASK));
 
 
2704	udelay(50);
2705}
2706
2707/**
2708 * gfx_v7_0_cp_compute_load_microcode - load the compute CP ME ucode
2709 *
2710 * @adev: amdgpu_device pointer
2711 *
2712 * Loads the compute MEC1&2 ucode.
2713 * Returns 0 for success, -EINVAL if the ucode is not available.
2714 */
2715static int gfx_v7_0_cp_compute_load_microcode(struct amdgpu_device *adev)
2716{
2717	const struct gfx_firmware_header_v1_0 *mec_hdr;
2718	const __le32 *fw_data;
2719	unsigned i, fw_size;
2720
2721	if (!adev->gfx.mec_fw)
2722		return -EINVAL;
2723
2724	mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
2725	amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
2726	adev->gfx.mec_fw_version = le32_to_cpu(mec_hdr->header.ucode_version);
2727	adev->gfx.mec_feature_version = le32_to_cpu(
2728					mec_hdr->ucode_feature_version);
2729
2730	gfx_v7_0_cp_compute_enable(adev, false);
2731
2732	/* MEC1 */
2733	fw_data = (const __le32 *)
2734		(adev->gfx.mec_fw->data +
2735		 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
2736	fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4;
2737	WREG32(mmCP_MEC_ME1_UCODE_ADDR, 0);
2738	for (i = 0; i < fw_size; i++)
2739		WREG32(mmCP_MEC_ME1_UCODE_DATA, le32_to_cpup(fw_data++));
2740	WREG32(mmCP_MEC_ME1_UCODE_ADDR, 0);
2741
2742	if (adev->asic_type == CHIP_KAVERI) {
2743		const struct gfx_firmware_header_v1_0 *mec2_hdr;
2744
2745		if (!adev->gfx.mec2_fw)
2746			return -EINVAL;
2747
2748		mec2_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
2749		amdgpu_ucode_print_gfx_hdr(&mec2_hdr->header);
2750		adev->gfx.mec2_fw_version = le32_to_cpu(mec2_hdr->header.ucode_version);
2751		adev->gfx.mec2_feature_version = le32_to_cpu(
2752				mec2_hdr->ucode_feature_version);
2753
2754		/* MEC2 */
2755		fw_data = (const __le32 *)
2756			(adev->gfx.mec2_fw->data +
2757			 le32_to_cpu(mec2_hdr->header.ucode_array_offset_bytes));
2758		fw_size = le32_to_cpu(mec2_hdr->header.ucode_size_bytes) / 4;
2759		WREG32(mmCP_MEC_ME2_UCODE_ADDR, 0);
2760		for (i = 0; i < fw_size; i++)
2761			WREG32(mmCP_MEC_ME2_UCODE_DATA, le32_to_cpup(fw_data++));
2762		WREG32(mmCP_MEC_ME2_UCODE_ADDR, 0);
2763	}
2764
2765	return 0;
2766}
2767
2768/**
2769 * gfx_v7_0_cp_compute_fini - stop the compute queues
2770 *
2771 * @adev: amdgpu_device pointer
2772 *
2773 * Stop the compute queues and tear down the driver queue
2774 * info.
2775 */
2776static void gfx_v7_0_cp_compute_fini(struct amdgpu_device *adev)
2777{
2778	int i;
2779
2780	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2781		struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
2782
2783		amdgpu_bo_free_kernel(&ring->mqd_obj, NULL, NULL);
 
 
 
 
 
 
 
 
 
 
2784	}
2785}
2786
2787static void gfx_v7_0_mec_fini(struct amdgpu_device *adev)
2788{
2789	amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
 
 
 
 
 
 
 
 
 
 
 
2790}
2791
 
 
2792static int gfx_v7_0_mec_init(struct amdgpu_device *adev)
2793{
2794	int r;
2795	u32 *hpd;
2796	size_t mec_hpd_size;
2797
2798	bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
2799
2800	/* take ownership of the relevant compute queues */
2801	amdgpu_gfx_compute_queue_acquire(adev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2802
2803	/* allocate space for ALL pipes (even the ones we don't own) */
2804	mec_hpd_size = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec
2805		* GFX7_MEC_HPD_SIZE * 2;
2806
2807	r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
2808				      AMDGPU_GEM_DOMAIN_VRAM,
2809				      &adev->gfx.mec.hpd_eop_obj,
2810				      &adev->gfx.mec.hpd_eop_gpu_addr,
2811				      (void **)&hpd);
 
 
 
 
2812	if (r) {
2813		dev_warn(adev->dev, "(%d) create, pin or map of HDP EOP bo failed\n", r);
2814		gfx_v7_0_mec_fini(adev);
2815		return r;
2816	}
2817
2818	/* clear memory.  Not sure if this is required or not */
2819	memset(hpd, 0, mec_hpd_size);
2820
2821	amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
2822	amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
2823
2824	return 0;
2825}
2826
2827struct hqd_registers
2828{
2829	u32 cp_mqd_base_addr;
2830	u32 cp_mqd_base_addr_hi;
2831	u32 cp_hqd_active;
2832	u32 cp_hqd_vmid;
2833	u32 cp_hqd_persistent_state;
2834	u32 cp_hqd_pipe_priority;
2835	u32 cp_hqd_queue_priority;
2836	u32 cp_hqd_quantum;
2837	u32 cp_hqd_pq_base;
2838	u32 cp_hqd_pq_base_hi;
2839	u32 cp_hqd_pq_rptr;
2840	u32 cp_hqd_pq_rptr_report_addr;
2841	u32 cp_hqd_pq_rptr_report_addr_hi;
2842	u32 cp_hqd_pq_wptr_poll_addr;
2843	u32 cp_hqd_pq_wptr_poll_addr_hi;
2844	u32 cp_hqd_pq_doorbell_control;
2845	u32 cp_hqd_pq_wptr;
2846	u32 cp_hqd_pq_control;
2847	u32 cp_hqd_ib_base_addr;
2848	u32 cp_hqd_ib_base_addr_hi;
2849	u32 cp_hqd_ib_rptr;
2850	u32 cp_hqd_ib_control;
2851	u32 cp_hqd_iq_timer;
2852	u32 cp_hqd_iq_rptr;
2853	u32 cp_hqd_dequeue_request;
2854	u32 cp_hqd_dma_offload;
2855	u32 cp_hqd_sema_cmd;
2856	u32 cp_hqd_msg_type;
2857	u32 cp_hqd_atomic0_preop_lo;
2858	u32 cp_hqd_atomic0_preop_hi;
2859	u32 cp_hqd_atomic1_preop_lo;
2860	u32 cp_hqd_atomic1_preop_hi;
2861	u32 cp_hqd_hq_scheduler0;
2862	u32 cp_hqd_hq_scheduler1;
2863	u32 cp_mqd_control;
2864};
2865
2866static void gfx_v7_0_compute_pipe_init(struct amdgpu_device *adev,
2867				       int mec, int pipe)
2868{
2869	u64 eop_gpu_addr;
2870	u32 tmp;
2871	size_t eop_offset = (mec * adev->gfx.mec.num_pipe_per_mec + pipe)
2872			    * GFX7_MEC_HPD_SIZE * 2;
2873
2874	mutex_lock(&adev->srbm_mutex);
2875	eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + eop_offset;
2876
2877	cik_srbm_select(adev, mec + 1, pipe, 0, 0);
2878
2879	/* write the EOP addr */
2880	WREG32(mmCP_HPD_EOP_BASE_ADDR, eop_gpu_addr >> 8);
2881	WREG32(mmCP_HPD_EOP_BASE_ADDR_HI, upper_32_bits(eop_gpu_addr) >> 8);
2882
2883	/* set the VMID assigned */
2884	WREG32(mmCP_HPD_EOP_VMID, 0);
2885
2886	/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
2887	tmp = RREG32(mmCP_HPD_EOP_CONTROL);
2888	tmp &= ~CP_HPD_EOP_CONTROL__EOP_SIZE_MASK;
2889	tmp |= order_base_2(GFX7_MEC_HPD_SIZE / 8);
2890	WREG32(mmCP_HPD_EOP_CONTROL, tmp);
2891
2892	cik_srbm_select(adev, 0, 0, 0, 0);
2893	mutex_unlock(&adev->srbm_mutex);
2894}
2895
2896static int gfx_v7_0_mqd_deactivate(struct amdgpu_device *adev)
2897{
2898	int i;
2899
2900	/* disable the queue if it's active */
2901	if (RREG32(mmCP_HQD_ACTIVE) & 1) {
2902		WREG32(mmCP_HQD_DEQUEUE_REQUEST, 1);
2903		for (i = 0; i < adev->usec_timeout; i++) {
2904			if (!(RREG32(mmCP_HQD_ACTIVE) & 1))
2905				break;
2906			udelay(1);
2907		}
2908
2909		if (i == adev->usec_timeout)
2910			return -ETIMEDOUT;
2911
2912		WREG32(mmCP_HQD_DEQUEUE_REQUEST, 0);
2913		WREG32(mmCP_HQD_PQ_RPTR, 0);
2914		WREG32(mmCP_HQD_PQ_WPTR, 0);
2915	}
2916
2917	return 0;
2918}
2919
2920static void gfx_v7_0_mqd_init(struct amdgpu_device *adev,
2921			     struct cik_mqd *mqd,
2922			     uint64_t mqd_gpu_addr,
2923			     struct amdgpu_ring *ring)
2924{
2925	u64 hqd_gpu_addr;
2926	u64 wb_gpu_addr;
2927
2928	/* init the mqd struct */
2929	memset(mqd, 0, sizeof(struct cik_mqd));
2930
2931	mqd->header = 0xC0310800;
2932	mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
2933	mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
2934	mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
2935	mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
2936
2937	/* enable doorbell? */
2938	mqd->cp_hqd_pq_doorbell_control =
2939		RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
2940	if (ring->use_doorbell)
2941		mqd->cp_hqd_pq_doorbell_control |= CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK;
2942	else
2943		mqd->cp_hqd_pq_doorbell_control &= ~CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK;
2944
2945	/* set the pointer to the MQD */
2946	mqd->cp_mqd_base_addr_lo = mqd_gpu_addr & 0xfffffffc;
2947	mqd->cp_mqd_base_addr_hi = upper_32_bits(mqd_gpu_addr);
2948
2949	/* set MQD vmid to 0 */
2950	mqd->cp_mqd_control = RREG32(mmCP_MQD_CONTROL);
2951	mqd->cp_mqd_control &= ~CP_MQD_CONTROL__VMID_MASK;
2952
2953	/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
2954	hqd_gpu_addr = ring->gpu_addr >> 8;
2955	mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
2956	mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
2957
2958	/* set up the HQD, this is similar to CP_RB0_CNTL */
2959	mqd->cp_hqd_pq_control = RREG32(mmCP_HQD_PQ_CONTROL);
2960	mqd->cp_hqd_pq_control &=
2961		~(CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK |
2962				CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE_MASK);
2963
2964	mqd->cp_hqd_pq_control |=
2965		order_base_2(ring->ring_size / 8);
2966	mqd->cp_hqd_pq_control |=
2967		(order_base_2(AMDGPU_GPU_PAGE_SIZE/8) << 8);
2968#ifdef __BIG_ENDIAN
2969	mqd->cp_hqd_pq_control |=
2970		2 << CP_HQD_PQ_CONTROL__ENDIAN_SWAP__SHIFT;
2971#endif
2972	mqd->cp_hqd_pq_control &=
2973		~(CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK |
2974				CP_HQD_PQ_CONTROL__ROQ_PQ_IB_FLIP_MASK |
2975				CP_HQD_PQ_CONTROL__PQ_VOLATILE_MASK);
2976	mqd->cp_hqd_pq_control |=
2977		CP_HQD_PQ_CONTROL__PRIV_STATE_MASK |
2978		CP_HQD_PQ_CONTROL__KMD_QUEUE_MASK; /* assuming kernel queue control */
2979
2980	/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
2981	wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
2982	mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
2983	mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
2984
2985	/* set the wb address wether it's enabled or not */
2986	wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
2987	mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
2988	mqd->cp_hqd_pq_rptr_report_addr_hi =
2989		upper_32_bits(wb_gpu_addr) & 0xffff;
2990
2991	/* enable the doorbell if requested */
2992	if (ring->use_doorbell) {
2993		mqd->cp_hqd_pq_doorbell_control =
2994			RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
2995		mqd->cp_hqd_pq_doorbell_control &=
2996			~CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET_MASK;
2997		mqd->cp_hqd_pq_doorbell_control |=
2998			(ring->doorbell_index <<
2999			 CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT);
3000		mqd->cp_hqd_pq_doorbell_control |=
3001			CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK;
3002		mqd->cp_hqd_pq_doorbell_control &=
3003			~(CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_SOURCE_MASK |
3004					CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_HIT_MASK);
3005
3006	} else {
3007		mqd->cp_hqd_pq_doorbell_control = 0;
3008	}
3009
3010	/* read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3011	ring->wptr = 0;
3012	mqd->cp_hqd_pq_wptr = lower_32_bits(ring->wptr);
3013	mqd->cp_hqd_pq_rptr = RREG32(mmCP_HQD_PQ_RPTR);
3014
3015	/* set the vmid for the queue */
3016	mqd->cp_hqd_vmid = 0;
3017
3018	/* defaults */
3019	mqd->cp_hqd_ib_control = RREG32(mmCP_HQD_IB_CONTROL);
3020	mqd->cp_hqd_ib_base_addr_lo = RREG32(mmCP_HQD_IB_BASE_ADDR);
3021	mqd->cp_hqd_ib_base_addr_hi = RREG32(mmCP_HQD_IB_BASE_ADDR_HI);
3022	mqd->cp_hqd_ib_rptr = RREG32(mmCP_HQD_IB_RPTR);
3023	mqd->cp_hqd_persistent_state = RREG32(mmCP_HQD_PERSISTENT_STATE);
3024	mqd->cp_hqd_sema_cmd = RREG32(mmCP_HQD_SEMA_CMD);
3025	mqd->cp_hqd_msg_type = RREG32(mmCP_HQD_MSG_TYPE);
3026	mqd->cp_hqd_atomic0_preop_lo = RREG32(mmCP_HQD_ATOMIC0_PREOP_LO);
3027	mqd->cp_hqd_atomic0_preop_hi = RREG32(mmCP_HQD_ATOMIC0_PREOP_HI);
3028	mqd->cp_hqd_atomic1_preop_lo = RREG32(mmCP_HQD_ATOMIC1_PREOP_LO);
3029	mqd->cp_hqd_atomic1_preop_hi = RREG32(mmCP_HQD_ATOMIC1_PREOP_HI);
3030	mqd->cp_hqd_pq_rptr = RREG32(mmCP_HQD_PQ_RPTR);
3031	mqd->cp_hqd_quantum = RREG32(mmCP_HQD_QUANTUM);
3032	mqd->cp_hqd_pipe_priority = RREG32(mmCP_HQD_PIPE_PRIORITY);
3033	mqd->cp_hqd_queue_priority = RREG32(mmCP_HQD_QUEUE_PRIORITY);
3034	mqd->cp_hqd_iq_rptr = RREG32(mmCP_HQD_IQ_RPTR);
3035
3036	/* activate the queue */
3037	mqd->cp_hqd_active = 1;
3038}
3039
3040static int gfx_v7_0_mqd_commit(struct amdgpu_device *adev, struct cik_mqd *mqd)
3041{
3042	uint32_t tmp;
3043	uint32_t mqd_reg;
3044	uint32_t *mqd_data;
3045
3046	/* HQD registers extend from mmCP_MQD_BASE_ADDR to mmCP_MQD_CONTROL */
3047	mqd_data = &mqd->cp_mqd_base_addr_lo;
3048
3049	/* disable wptr polling */
3050	tmp = RREG32(mmCP_PQ_WPTR_POLL_CNTL);
3051	tmp = REG_SET_FIELD(tmp, CP_PQ_WPTR_POLL_CNTL, EN, 0);
3052	WREG32(mmCP_PQ_WPTR_POLL_CNTL, tmp);
3053
3054	/* program all HQD registers */
3055	for (mqd_reg = mmCP_HQD_VMID; mqd_reg <= mmCP_MQD_CONTROL; mqd_reg++)
3056		WREG32(mqd_reg, mqd_data[mqd_reg - mmCP_MQD_BASE_ADDR]);
3057
3058	/* activate the HQD */
3059	for (mqd_reg = mmCP_MQD_BASE_ADDR; mqd_reg <= mmCP_HQD_ACTIVE; mqd_reg++)
3060		WREG32(mqd_reg, mqd_data[mqd_reg - mmCP_MQD_BASE_ADDR]);
3061
3062	return 0;
3063}
3064
3065static int gfx_v7_0_compute_queue_init(struct amdgpu_device *adev, int ring_id)
3066{
3067	int r;
3068	u64 mqd_gpu_addr;
3069	struct cik_mqd *mqd;
3070	struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
3071
3072	r = amdgpu_bo_create_reserved(adev, sizeof(struct cik_mqd), PAGE_SIZE,
3073				      AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
3074				      &mqd_gpu_addr, (void **)&mqd);
3075	if (r) {
3076		dev_warn(adev->dev, "(%d) create MQD bo failed\n", r);
3077		return r;
3078	}
3079
3080	mutex_lock(&adev->srbm_mutex);
3081	cik_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3082
3083	gfx_v7_0_mqd_init(adev, mqd, mqd_gpu_addr, ring);
3084	gfx_v7_0_mqd_deactivate(adev);
3085	gfx_v7_0_mqd_commit(adev, mqd);
3086
3087	cik_srbm_select(adev, 0, 0, 0, 0);
3088	mutex_unlock(&adev->srbm_mutex);
3089
3090	amdgpu_bo_kunmap(ring->mqd_obj);
3091	amdgpu_bo_unreserve(ring->mqd_obj);
3092	return 0;
3093}
3094
3095/**
3096 * gfx_v7_0_cp_compute_resume - setup the compute queue registers
3097 *
3098 * @adev: amdgpu_device pointer
3099 *
3100 * Program the compute queues and test them to make sure they
3101 * are working.
3102 * Returns 0 for success, error for failure.
3103 */
3104static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev)
3105{
3106	int r, i, j;
3107	u32 tmp;
 
 
 
 
 
 
 
3108	struct amdgpu_ring *ring;
3109
3110	/* fix up chicken bits */
3111	tmp = RREG32(mmCP_CPF_DEBUG);
3112	tmp |= (1 << 23);
3113	WREG32(mmCP_CPF_DEBUG, tmp);
3114
3115	/* init all pipes (even the ones we don't own) */
3116	for (i = 0; i < adev->gfx.mec.num_mec; i++)
3117		for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++)
3118			gfx_v7_0_compute_pipe_init(adev, i, j);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3119
3120	/* init the queues */
3121	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3122		r = gfx_v7_0_compute_queue_init(adev, i);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3123		if (r) {
 
3124			gfx_v7_0_cp_compute_fini(adev);
3125			return r;
3126		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3127	}
3128
3129	gfx_v7_0_cp_compute_enable(adev, true);
3130
3131	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3132		ring = &adev->gfx.compute_ring[i];
3133		amdgpu_ring_test_helper(ring);
 
 
 
3134	}
3135
3136	return 0;
3137}
3138
3139static void gfx_v7_0_cp_enable(struct amdgpu_device *adev, bool enable)
3140{
3141	gfx_v7_0_cp_gfx_enable(adev, enable);
3142	gfx_v7_0_cp_compute_enable(adev, enable);
3143}
3144
3145static int gfx_v7_0_cp_load_microcode(struct amdgpu_device *adev)
3146{
3147	int r;
3148
3149	r = gfx_v7_0_cp_gfx_load_microcode(adev);
3150	if (r)
3151		return r;
3152	r = gfx_v7_0_cp_compute_load_microcode(adev);
3153	if (r)
3154		return r;
3155
3156	return 0;
3157}
3158
3159static void gfx_v7_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
3160					       bool enable)
3161{
3162	u32 tmp = RREG32(mmCP_INT_CNTL_RING0);
3163
3164	if (enable)
3165		tmp |= (CP_INT_CNTL_RING0__CNTX_BUSY_INT_ENABLE_MASK |
3166				CP_INT_CNTL_RING0__CNTX_EMPTY_INT_ENABLE_MASK);
3167	else
3168		tmp &= ~(CP_INT_CNTL_RING0__CNTX_BUSY_INT_ENABLE_MASK |
3169				CP_INT_CNTL_RING0__CNTX_EMPTY_INT_ENABLE_MASK);
3170	WREG32(mmCP_INT_CNTL_RING0, tmp);
3171}
3172
3173static int gfx_v7_0_cp_resume(struct amdgpu_device *adev)
3174{
3175	int r;
3176
3177	gfx_v7_0_enable_gui_idle_interrupt(adev, false);
3178
3179	r = gfx_v7_0_cp_load_microcode(adev);
3180	if (r)
3181		return r;
3182
3183	r = gfx_v7_0_cp_gfx_resume(adev);
3184	if (r)
3185		return r;
3186	r = gfx_v7_0_cp_compute_resume(adev);
3187	if (r)
3188		return r;
3189
3190	gfx_v7_0_enable_gui_idle_interrupt(adev, true);
3191
3192	return 0;
3193}
3194
3195/**
3196 * gfx_v7_0_ring_emit_vm_flush - cik vm flush using the CP
3197 *
3198 * @ring: the ring to emmit the commands to
3199 *
3200 * Sync the command pipeline with the PFP. E.g. wait for everything
3201 * to be completed.
3202 */
3203static void gfx_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
3204{
3205	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
3206	uint32_t seq = ring->fence_drv.sync_seq;
3207	uint64_t addr = ring->fence_drv.gpu_addr;
3208
3209	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
3210	amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
3211				 WAIT_REG_MEM_FUNCTION(3) | /* equal */
3212				 WAIT_REG_MEM_ENGINE(usepfp)));   /* pfp or me */
3213	amdgpu_ring_write(ring, addr & 0xfffffffc);
3214	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
3215	amdgpu_ring_write(ring, seq);
3216	amdgpu_ring_write(ring, 0xffffffff);
3217	amdgpu_ring_write(ring, 4); /* poll interval */
3218
3219	if (usepfp) {
3220		/* synce CE with ME to prevent CE fetch CEIB before context switch done */
3221		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3222		amdgpu_ring_write(ring, 0);
3223		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3224		amdgpu_ring_write(ring, 0);
3225	}
3226}
3227
3228/*
3229 * vm
3230 * VMID 0 is the physical GPU addresses as used by the kernel.
3231 * VMIDs 1-15 are used for userspace clients and are handled
3232 * by the amdgpu vm/hsa code.
3233 */
3234/**
3235 * gfx_v7_0_ring_emit_vm_flush - cik vm flush using the CP
3236 *
3237 * @adev: amdgpu_device pointer
3238 *
3239 * Update the page table base and flush the VM TLB
3240 * using the CP (CIK).
3241 */
3242static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
3243					unsigned vmid, uint64_t pd_addr)
3244{
3245	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
3246
3247	amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3248
3249	/* wait for the invalidate to complete */
3250	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
3251	amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */
3252				 WAIT_REG_MEM_FUNCTION(0) |  /* always */
3253				 WAIT_REG_MEM_ENGINE(0))); /* me */
3254	amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
3255	amdgpu_ring_write(ring, 0);
3256	amdgpu_ring_write(ring, 0); /* ref */
3257	amdgpu_ring_write(ring, 0); /* mask */
3258	amdgpu_ring_write(ring, 0x20); /* poll interval */
3259
3260	/* compute doesn't have PFP */
3261	if (usepfp) {
3262		/* sync PFP to ME, otherwise we might get invalid PFP reads */
3263		amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
3264		amdgpu_ring_write(ring, 0x0);
3265
3266		/* synce CE with ME to prevent CE fetch CEIB before context switch done */
3267		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3268		amdgpu_ring_write(ring, 0);
3269		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3270		amdgpu_ring_write(ring, 0);
3271	}
3272}
3273
3274static void gfx_v7_0_ring_emit_wreg(struct amdgpu_ring *ring,
3275				    uint32_t reg, uint32_t val)
3276{
3277	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
3278
3279	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3280	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
3281				 WRITE_DATA_DST_SEL(0)));
3282	amdgpu_ring_write(ring, reg);
3283	amdgpu_ring_write(ring, 0);
3284	amdgpu_ring_write(ring, val);
3285}
3286
3287/*
3288 * RLC
3289 * The RLC is a multi-purpose microengine that handles a
3290 * variety of functions.
3291 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3292static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
3293{
3294	const u32 *src_ptr;
3295	u32 dws;
 
3296	const struct cs_section_def *cs_data;
3297	int r;
3298
3299	/* allocate rlc buffers */
3300	if (adev->flags & AMD_IS_APU) {
3301		if (adev->asic_type == CHIP_KAVERI) {
3302			adev->gfx.rlc.reg_list = spectre_rlc_save_restore_register_list;
3303			adev->gfx.rlc.reg_list_size =
3304				(u32)ARRAY_SIZE(spectre_rlc_save_restore_register_list);
3305		} else {
3306			adev->gfx.rlc.reg_list = kalindi_rlc_save_restore_register_list;
3307			adev->gfx.rlc.reg_list_size =
3308				(u32)ARRAY_SIZE(kalindi_rlc_save_restore_register_list);
3309		}
3310	}
3311	adev->gfx.rlc.cs_data = ci_cs_data;
3312	adev->gfx.rlc.cp_table_size = ALIGN(CP_ME_TABLE_SIZE * 5 * 4, 2048); /* CP JT */
3313	adev->gfx.rlc.cp_table_size += 64 * 1024; /* GDS */
3314
3315	src_ptr = adev->gfx.rlc.reg_list;
3316	dws = adev->gfx.rlc.reg_list_size;
3317	dws += (5 * 16) + 48 + 48 + 64;
3318
3319	cs_data = adev->gfx.rlc.cs_data;
3320
3321	if (src_ptr) {
3322		/* init save restore block */
3323		r = amdgpu_gfx_rlc_init_sr(adev, dws);
3324		if (r)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3325			return r;
 
 
 
 
 
 
 
3326	}
3327
3328	if (cs_data) {
3329		/* init clear state block */
3330		r = amdgpu_gfx_rlc_init_csb(adev);
3331		if (r)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3332			return r;
 
 
 
 
 
 
 
 
 
 
 
 
 
3333	}
3334
3335	if (adev->gfx.rlc.cp_table_size) {
3336		r = amdgpu_gfx_rlc_init_cpt(adev);
3337		if (r)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3338			return r;
3339	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3340
3341	/* init spm vmid with 0xf */
3342	if (adev->gfx.rlc.funcs->update_spm_vmid)
3343		adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
 
3344
3345	return 0;
3346}
3347
3348static void gfx_v7_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
3349{
3350	u32 tmp;
3351
3352	tmp = RREG32(mmRLC_LB_CNTL);
3353	if (enable)
3354		tmp |= RLC_LB_CNTL__LOAD_BALANCE_ENABLE_MASK;
3355	else
3356		tmp &= ~RLC_LB_CNTL__LOAD_BALANCE_ENABLE_MASK;
3357	WREG32(mmRLC_LB_CNTL, tmp);
3358}
3359
3360static void gfx_v7_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
3361{
3362	u32 i, j, k;
3363	u32 mask;
3364
3365	mutex_lock(&adev->grbm_idx_mutex);
3366	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
3367		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
3368			gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff);
3369			for (k = 0; k < adev->usec_timeout; k++) {
3370				if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY) == 0)
3371					break;
3372				udelay(1);
3373			}
3374		}
3375	}
3376	gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3377	mutex_unlock(&adev->grbm_idx_mutex);
3378
3379	mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
3380		RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
3381		RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
3382		RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
3383	for (k = 0; k < adev->usec_timeout; k++) {
3384		if ((RREG32(mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
3385			break;
3386		udelay(1);
3387	}
3388}
3389
3390static void gfx_v7_0_update_rlc(struct amdgpu_device *adev, u32 rlc)
3391{
3392	u32 tmp;
3393
3394	tmp = RREG32(mmRLC_CNTL);
3395	if (tmp != rlc)
3396		WREG32(mmRLC_CNTL, rlc);
3397}
3398
3399static u32 gfx_v7_0_halt_rlc(struct amdgpu_device *adev)
3400{
3401	u32 data, orig;
3402
3403	orig = data = RREG32(mmRLC_CNTL);
3404
3405	if (data & RLC_CNTL__RLC_ENABLE_F32_MASK) {
3406		u32 i;
3407
3408		data &= ~RLC_CNTL__RLC_ENABLE_F32_MASK;
3409		WREG32(mmRLC_CNTL, data);
3410
3411		for (i = 0; i < adev->usec_timeout; i++) {
3412			if ((RREG32(mmRLC_GPM_STAT) & RLC_GPM_STAT__RLC_BUSY_MASK) == 0)
3413				break;
3414			udelay(1);
3415		}
3416
3417		gfx_v7_0_wait_for_rlc_serdes(adev);
3418	}
3419
3420	return orig;
3421}
3422
3423static bool gfx_v7_0_is_rlc_enabled(struct amdgpu_device *adev)
3424{
3425	return true;
3426}
3427
3428static void gfx_v7_0_set_safe_mode(struct amdgpu_device *adev)
3429{
3430	u32 tmp, i, mask;
3431
3432	tmp = 0x1 | (1 << 1);
3433	WREG32(mmRLC_GPR_REG2, tmp);
3434
3435	mask = RLC_GPM_STAT__GFX_POWER_STATUS_MASK |
3436		RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK;
3437	for (i = 0; i < adev->usec_timeout; i++) {
3438		if ((RREG32(mmRLC_GPM_STAT) & mask) == mask)
3439			break;
3440		udelay(1);
3441	}
3442
3443	for (i = 0; i < adev->usec_timeout; i++) {
3444		if ((RREG32(mmRLC_GPR_REG2) & 0x1) == 0)
3445			break;
3446		udelay(1);
3447	}
3448}
3449
3450static void gfx_v7_0_unset_safe_mode(struct amdgpu_device *adev)
3451{
3452	u32 tmp;
3453
3454	tmp = 0x1 | (0 << 1);
3455	WREG32(mmRLC_GPR_REG2, tmp);
3456}
3457
3458/**
3459 * gfx_v7_0_rlc_stop - stop the RLC ME
3460 *
3461 * @adev: amdgpu_device pointer
3462 *
3463 * Halt the RLC ME (MicroEngine) (CIK).
3464 */
3465static void gfx_v7_0_rlc_stop(struct amdgpu_device *adev)
3466{
3467	WREG32(mmRLC_CNTL, 0);
3468
3469	gfx_v7_0_enable_gui_idle_interrupt(adev, false);
3470
3471	gfx_v7_0_wait_for_rlc_serdes(adev);
3472}
3473
3474/**
3475 * gfx_v7_0_rlc_start - start the RLC ME
3476 *
3477 * @adev: amdgpu_device pointer
3478 *
3479 * Unhalt the RLC ME (MicroEngine) (CIK).
3480 */
3481static void gfx_v7_0_rlc_start(struct amdgpu_device *adev)
3482{
3483	WREG32(mmRLC_CNTL, RLC_CNTL__RLC_ENABLE_F32_MASK);
3484
3485	gfx_v7_0_enable_gui_idle_interrupt(adev, true);
3486
3487	udelay(50);
3488}
3489
3490static void gfx_v7_0_rlc_reset(struct amdgpu_device *adev)
3491{
3492	u32 tmp = RREG32(mmGRBM_SOFT_RESET);
3493
3494	tmp |= GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK;
3495	WREG32(mmGRBM_SOFT_RESET, tmp);
3496	udelay(50);
3497	tmp &= ~GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK;
3498	WREG32(mmGRBM_SOFT_RESET, tmp);
3499	udelay(50);
3500}
3501
3502/**
3503 * gfx_v7_0_rlc_resume - setup the RLC hw
3504 *
3505 * @adev: amdgpu_device pointer
3506 *
3507 * Initialize the RLC registers, load the ucode,
3508 * and start the RLC (CIK).
3509 * Returns 0 for success, -EINVAL if the ucode is not available.
3510 */
3511static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev)
3512{
3513	const struct rlc_firmware_header_v1_0 *hdr;
3514	const __le32 *fw_data;
3515	unsigned i, fw_size;
3516	u32 tmp;
3517
3518	if (!adev->gfx.rlc_fw)
3519		return -EINVAL;
3520
3521	hdr = (const struct rlc_firmware_header_v1_0 *)adev->gfx.rlc_fw->data;
3522	amdgpu_ucode_print_rlc_hdr(&hdr->header);
3523	adev->gfx.rlc_fw_version = le32_to_cpu(hdr->header.ucode_version);
3524	adev->gfx.rlc_feature_version = le32_to_cpu(
3525					hdr->ucode_feature_version);
3526
3527	adev->gfx.rlc.funcs->stop(adev);
3528
3529	/* disable CG */
3530	tmp = RREG32(mmRLC_CGCG_CGLS_CTRL) & 0xfffffffc;
3531	WREG32(mmRLC_CGCG_CGLS_CTRL, tmp);
3532
3533	adev->gfx.rlc.funcs->reset(adev);
3534
3535	gfx_v7_0_init_pg(adev);
3536
3537	WREG32(mmRLC_LB_CNTR_INIT, 0);
3538	WREG32(mmRLC_LB_CNTR_MAX, 0x00008000);
3539
3540	mutex_lock(&adev->grbm_idx_mutex);
3541	gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3542	WREG32(mmRLC_LB_INIT_CU_MASK, 0xffffffff);
3543	WREG32(mmRLC_LB_PARAMS, 0x00600408);
3544	WREG32(mmRLC_LB_CNTL, 0x80000004);
3545	mutex_unlock(&adev->grbm_idx_mutex);
3546
3547	WREG32(mmRLC_MC_CNTL, 0);
3548	WREG32(mmRLC_UCODE_CNTL, 0);
3549
3550	fw_data = (const __le32 *)
3551		(adev->gfx.rlc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
3552	fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
3553	WREG32(mmRLC_GPM_UCODE_ADDR, 0);
3554	for (i = 0; i < fw_size; i++)
3555		WREG32(mmRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
3556	WREG32(mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
3557
3558	/* XXX - find out what chips support lbpw */
3559	gfx_v7_0_enable_lbpw(adev, false);
3560
3561	if (adev->asic_type == CHIP_BONAIRE)
3562		WREG32(mmRLC_DRIVER_CPDMA_STATUS, 0);
3563
3564	adev->gfx.rlc.funcs->start(adev);
3565
3566	return 0;
3567}
3568
3569static void gfx_v7_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
3570{
3571	u32 data;
3572
3573	data = RREG32(mmRLC_SPM_VMID);
3574
3575	data &= ~RLC_SPM_VMID__RLC_SPM_VMID_MASK;
3576	data |= (vmid & RLC_SPM_VMID__RLC_SPM_VMID_MASK) << RLC_SPM_VMID__RLC_SPM_VMID__SHIFT;
3577
3578	WREG32(mmRLC_SPM_VMID, data);
3579}
3580
3581static void gfx_v7_0_enable_cgcg(struct amdgpu_device *adev, bool enable)
3582{
3583	u32 data, orig, tmp, tmp2;
3584
3585	orig = data = RREG32(mmRLC_CGCG_CGLS_CTRL);
3586
3587	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
3588		gfx_v7_0_enable_gui_idle_interrupt(adev, true);
3589
3590		tmp = gfx_v7_0_halt_rlc(adev);
3591
3592		mutex_lock(&adev->grbm_idx_mutex);
3593		gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3594		WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
3595		WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
3596		tmp2 = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK |
3597			RLC_SERDES_WR_CTRL__CGCG_OVERRIDE_0_MASK |
3598			RLC_SERDES_WR_CTRL__CGLS_ENABLE_MASK;
3599		WREG32(mmRLC_SERDES_WR_CTRL, tmp2);
3600		mutex_unlock(&adev->grbm_idx_mutex);
3601
3602		gfx_v7_0_update_rlc(adev, tmp);
3603
3604		data |= RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
3605		if (orig != data)
3606			WREG32(mmRLC_CGCG_CGLS_CTRL, data);
3607
3608	} else {
3609		gfx_v7_0_enable_gui_idle_interrupt(adev, false);
3610
3611		RREG32(mmCB_CGTT_SCLK_CTRL);
3612		RREG32(mmCB_CGTT_SCLK_CTRL);
3613		RREG32(mmCB_CGTT_SCLK_CTRL);
3614		RREG32(mmCB_CGTT_SCLK_CTRL);
3615
3616		data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
3617		if (orig != data)
3618			WREG32(mmRLC_CGCG_CGLS_CTRL, data);
3619
3620		gfx_v7_0_enable_gui_idle_interrupt(adev, true);
3621	}
 
 
 
 
3622}
3623
3624static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
3625{
3626	u32 data, orig, tmp = 0;
3627
3628	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
3629		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
3630			if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
3631				orig = data = RREG32(mmCP_MEM_SLP_CNTL);
3632				data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
3633				if (orig != data)
3634					WREG32(mmCP_MEM_SLP_CNTL, data);
3635			}
3636		}
3637
3638		orig = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
3639		data |= 0x00000001;
3640		data &= 0xfffffffd;
3641		if (orig != data)
3642			WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data);
3643
3644		tmp = gfx_v7_0_halt_rlc(adev);
3645
3646		mutex_lock(&adev->grbm_idx_mutex);
3647		gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3648		WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
3649		WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
3650		data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK |
3651			RLC_SERDES_WR_CTRL__MGCG_OVERRIDE_0_MASK;
3652		WREG32(mmRLC_SERDES_WR_CTRL, data);
3653		mutex_unlock(&adev->grbm_idx_mutex);
3654
3655		gfx_v7_0_update_rlc(adev, tmp);
3656
3657		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS) {
3658			orig = data = RREG32(mmCGTS_SM_CTRL_REG);
3659			data &= ~CGTS_SM_CTRL_REG__SM_MODE_MASK;
3660			data |= (0x2 << CGTS_SM_CTRL_REG__SM_MODE__SHIFT);
3661			data |= CGTS_SM_CTRL_REG__SM_MODE_ENABLE_MASK;
3662			data &= ~CGTS_SM_CTRL_REG__OVERRIDE_MASK;
3663			if ((adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) &&
3664			    (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS_LS))
3665				data &= ~CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK;
3666			data &= ~CGTS_SM_CTRL_REG__ON_MONITOR_ADD_MASK;
3667			data |= CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN_MASK;
3668			data |= (0x96 << CGTS_SM_CTRL_REG__ON_MONITOR_ADD__SHIFT);
3669			if (orig != data)
3670				WREG32(mmCGTS_SM_CTRL_REG, data);
3671		}
3672	} else {
3673		orig = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
3674		data |= 0x00000003;
3675		if (orig != data)
3676			WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data);
3677
3678		data = RREG32(mmRLC_MEM_SLP_CNTL);
3679		if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
3680			data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
3681			WREG32(mmRLC_MEM_SLP_CNTL, data);
3682		}
3683
3684		data = RREG32(mmCP_MEM_SLP_CNTL);
3685		if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
3686			data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
3687			WREG32(mmCP_MEM_SLP_CNTL, data);
3688		}
3689
3690		orig = data = RREG32(mmCGTS_SM_CTRL_REG);
3691		data |= CGTS_SM_CTRL_REG__OVERRIDE_MASK | CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK;
3692		if (orig != data)
3693			WREG32(mmCGTS_SM_CTRL_REG, data);
3694
3695		tmp = gfx_v7_0_halt_rlc(adev);
3696
3697		mutex_lock(&adev->grbm_idx_mutex);
3698		gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3699		WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
3700		WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
3701		data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK | RLC_SERDES_WR_CTRL__MGCG_OVERRIDE_1_MASK;
3702		WREG32(mmRLC_SERDES_WR_CTRL, data);
3703		mutex_unlock(&adev->grbm_idx_mutex);
3704
3705		gfx_v7_0_update_rlc(adev, tmp);
3706	}
3707}
3708
3709static void gfx_v7_0_update_cg(struct amdgpu_device *adev,
3710			       bool enable)
3711{
3712	gfx_v7_0_enable_gui_idle_interrupt(adev, false);
3713	/* order matters! */
3714	if (enable) {
3715		gfx_v7_0_enable_mgcg(adev, true);
3716		gfx_v7_0_enable_cgcg(adev, true);
3717	} else {
3718		gfx_v7_0_enable_cgcg(adev, false);
3719		gfx_v7_0_enable_mgcg(adev, false);
3720	}
3721	gfx_v7_0_enable_gui_idle_interrupt(adev, true);
3722}
3723
3724static void gfx_v7_0_enable_sclk_slowdown_on_pu(struct amdgpu_device *adev,
3725						bool enable)
3726{
3727	u32 data, orig;
3728
3729	orig = data = RREG32(mmRLC_PG_CNTL);
3730	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS))
3731		data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
3732	else
3733		data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
3734	if (orig != data)
3735		WREG32(mmRLC_PG_CNTL, data);
3736}
3737
3738static void gfx_v7_0_enable_sclk_slowdown_on_pd(struct amdgpu_device *adev,
3739						bool enable)
3740{
3741	u32 data, orig;
3742
3743	orig = data = RREG32(mmRLC_PG_CNTL);
3744	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS))
3745		data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
3746	else
3747		data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
3748	if (orig != data)
3749		WREG32(mmRLC_PG_CNTL, data);
3750}
3751
3752static void gfx_v7_0_enable_cp_pg(struct amdgpu_device *adev, bool enable)
3753{
3754	u32 data, orig;
3755
3756	orig = data = RREG32(mmRLC_PG_CNTL);
3757	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_CP))
3758		data &= ~0x8000;
3759	else
3760		data |= 0x8000;
3761	if (orig != data)
3762		WREG32(mmRLC_PG_CNTL, data);
3763}
3764
3765static void gfx_v7_0_enable_gds_pg(struct amdgpu_device *adev, bool enable)
3766{
3767	u32 data, orig;
3768
3769	orig = data = RREG32(mmRLC_PG_CNTL);
3770	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GDS))
3771		data &= ~0x2000;
3772	else
3773		data |= 0x2000;
3774	if (orig != data)
3775		WREG32(mmRLC_PG_CNTL, data);
3776}
3777
3778static int gfx_v7_0_cp_pg_table_num(struct amdgpu_device *adev)
3779{
 
 
 
 
 
 
3780	if (adev->asic_type == CHIP_KAVERI)
3781		return 5;
3782	else
3783		return 4;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3784}
3785
3786static void gfx_v7_0_enable_gfx_cgpg(struct amdgpu_device *adev,
3787				     bool enable)
3788{
3789	u32 data, orig;
3790
3791	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
3792		orig = data = RREG32(mmRLC_PG_CNTL);
3793		data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
3794		if (orig != data)
3795			WREG32(mmRLC_PG_CNTL, data);
3796
3797		orig = data = RREG32(mmRLC_AUTO_PG_CTRL);
3798		data |= RLC_AUTO_PG_CTRL__AUTO_PG_EN_MASK;
3799		if (orig != data)
3800			WREG32(mmRLC_AUTO_PG_CTRL, data);
3801	} else {
3802		orig = data = RREG32(mmRLC_PG_CNTL);
3803		data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
3804		if (orig != data)
3805			WREG32(mmRLC_PG_CNTL, data);
3806
3807		orig = data = RREG32(mmRLC_AUTO_PG_CTRL);
3808		data &= ~RLC_AUTO_PG_CTRL__AUTO_PG_EN_MASK;
3809		if (orig != data)
3810			WREG32(mmRLC_AUTO_PG_CTRL, data);
3811
3812		data = RREG32(mmDB_RENDER_CONTROL);
3813	}
3814}
3815
3816static void gfx_v7_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
3817						 u32 bitmap)
3818{
3819	u32 data;
3820
3821	if (!bitmap)
3822		return;
3823
3824	data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
3825	data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
3826
3827	WREG32(mmGC_USER_SHADER_ARRAY_CONFIG, data);
3828}
3829
3830static u32 gfx_v7_0_get_cu_active_bitmap(struct amdgpu_device *adev)
3831{
3832	u32 data, mask;
3833
3834	data = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG);
3835	data |= RREG32(mmGC_USER_SHADER_ARRAY_CONFIG);
3836
3837	data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
3838	data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
3839
3840	mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh);
3841
3842	return (~data) & mask;
3843}
3844
3845static void gfx_v7_0_init_ao_cu_mask(struct amdgpu_device *adev)
3846{
3847	u32 tmp;
3848
3849	WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask);
3850
3851	tmp = RREG32(mmRLC_MAX_PG_CU);
3852	tmp &= ~RLC_MAX_PG_CU__MAX_POWERED_UP_CU_MASK;
3853	tmp |= (adev->gfx.cu_info.number << RLC_MAX_PG_CU__MAX_POWERED_UP_CU__SHIFT);
3854	WREG32(mmRLC_MAX_PG_CU, tmp);
3855}
3856
3857static void gfx_v7_0_enable_gfx_static_mgpg(struct amdgpu_device *adev,
3858					    bool enable)
3859{
3860	u32 data, orig;
3861
3862	orig = data = RREG32(mmRLC_PG_CNTL);
3863	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG))
3864		data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
3865	else
3866		data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
3867	if (orig != data)
3868		WREG32(mmRLC_PG_CNTL, data);
3869}
3870
3871static void gfx_v7_0_enable_gfx_dynamic_mgpg(struct amdgpu_device *adev,
3872					     bool enable)
3873{
3874	u32 data, orig;
3875
3876	orig = data = RREG32(mmRLC_PG_CNTL);
3877	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG))
3878		data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
3879	else
3880		data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
3881	if (orig != data)
3882		WREG32(mmRLC_PG_CNTL, data);
3883}
3884
3885#define RLC_SAVE_AND_RESTORE_STARTING_OFFSET 0x90
3886#define RLC_CLEAR_STATE_DESCRIPTOR_OFFSET    0x3D
3887
3888static void gfx_v7_0_init_gfx_cgpg(struct amdgpu_device *adev)
3889{
3890	u32 data, orig;
3891	u32 i;
3892
3893	if (adev->gfx.rlc.cs_data) {
3894		WREG32(mmRLC_GPM_SCRATCH_ADDR, RLC_CLEAR_STATE_DESCRIPTOR_OFFSET);
3895		WREG32(mmRLC_GPM_SCRATCH_DATA, upper_32_bits(adev->gfx.rlc.clear_state_gpu_addr));
3896		WREG32(mmRLC_GPM_SCRATCH_DATA, lower_32_bits(adev->gfx.rlc.clear_state_gpu_addr));
3897		WREG32(mmRLC_GPM_SCRATCH_DATA, adev->gfx.rlc.clear_state_size);
3898	} else {
3899		WREG32(mmRLC_GPM_SCRATCH_ADDR, RLC_CLEAR_STATE_DESCRIPTOR_OFFSET);
3900		for (i = 0; i < 3; i++)
3901			WREG32(mmRLC_GPM_SCRATCH_DATA, 0);
3902	}
3903	if (adev->gfx.rlc.reg_list) {
3904		WREG32(mmRLC_GPM_SCRATCH_ADDR, RLC_SAVE_AND_RESTORE_STARTING_OFFSET);
3905		for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
3906			WREG32(mmRLC_GPM_SCRATCH_DATA, adev->gfx.rlc.reg_list[i]);
3907	}
3908
3909	orig = data = RREG32(mmRLC_PG_CNTL);
3910	data |= RLC_PG_CNTL__GFX_POWER_GATING_SRC_MASK;
3911	if (orig != data)
3912		WREG32(mmRLC_PG_CNTL, data);
3913
3914	WREG32(mmRLC_SAVE_AND_RESTORE_BASE, adev->gfx.rlc.save_restore_gpu_addr >> 8);
3915	WREG32(mmRLC_JUMP_TABLE_RESTORE, adev->gfx.rlc.cp_table_gpu_addr >> 8);
3916
3917	data = RREG32(mmCP_RB_WPTR_POLL_CNTL);
3918	data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK;
3919	data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
3920	WREG32(mmCP_RB_WPTR_POLL_CNTL, data);
3921
3922	data = 0x10101010;
3923	WREG32(mmRLC_PG_DELAY, data);
3924
3925	data = RREG32(mmRLC_PG_DELAY_2);
3926	data &= ~0xff;
3927	data |= 0x3;
3928	WREG32(mmRLC_PG_DELAY_2, data);
3929
3930	data = RREG32(mmRLC_AUTO_PG_CTRL);
3931	data &= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK;
3932	data |= (0x700 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT);
3933	WREG32(mmRLC_AUTO_PG_CTRL, data);
3934
3935}
3936
3937static void gfx_v7_0_update_gfx_pg(struct amdgpu_device *adev, bool enable)
3938{
3939	gfx_v7_0_enable_gfx_cgpg(adev, enable);
3940	gfx_v7_0_enable_gfx_static_mgpg(adev, enable);
3941	gfx_v7_0_enable_gfx_dynamic_mgpg(adev, enable);
3942}
3943
3944static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev)
3945{
3946	u32 count = 0;
3947	const struct cs_section_def *sect = NULL;
3948	const struct cs_extent_def *ext = NULL;
3949
3950	if (adev->gfx.rlc.cs_data == NULL)
3951		return 0;
3952
3953	/* begin clear state */
3954	count += 2;
3955	/* context control state */
3956	count += 3;
3957
3958	for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
3959		for (ext = sect->section; ext->extent != NULL; ++ext) {
3960			if (sect->id == SECT_CONTEXT)
3961				count += 2 + ext->reg_count;
3962			else
3963				return 0;
3964		}
3965	}
3966	/* pa_sc_raster_config/pa_sc_raster_config1 */
3967	count += 4;
3968	/* end clear state */
3969	count += 2;
3970	/* clear state */
3971	count += 2;
3972
3973	return count;
3974}
3975
3976static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev,
3977				    volatile u32 *buffer)
3978{
3979	u32 count = 0, i;
3980	const struct cs_section_def *sect = NULL;
3981	const struct cs_extent_def *ext = NULL;
3982
3983	if (adev->gfx.rlc.cs_data == NULL)
3984		return;
3985	if (buffer == NULL)
3986		return;
3987
3988	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3989	buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
3990
3991	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
3992	buffer[count++] = cpu_to_le32(0x80000000);
3993	buffer[count++] = cpu_to_le32(0x80000000);
3994
3995	for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
3996		for (ext = sect->section; ext->extent != NULL; ++ext) {
3997			if (sect->id == SECT_CONTEXT) {
3998				buffer[count++] =
3999					cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
4000				buffer[count++] = cpu_to_le32(ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
4001				for (i = 0; i < ext->reg_count; i++)
4002					buffer[count++] = cpu_to_le32(ext->extent[i]);
4003			} else {
4004				return;
4005			}
4006		}
4007	}
4008
4009	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2));
4010	buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
4011	switch (adev->asic_type) {
4012	case CHIP_BONAIRE:
4013		buffer[count++] = cpu_to_le32(0x16000012);
4014		buffer[count++] = cpu_to_le32(0x00000000);
4015		break;
4016	case CHIP_KAVERI:
4017		buffer[count++] = cpu_to_le32(0x00000000); /* XXX */
4018		buffer[count++] = cpu_to_le32(0x00000000);
4019		break;
4020	case CHIP_KABINI:
4021	case CHIP_MULLINS:
4022		buffer[count++] = cpu_to_le32(0x00000000); /* XXX */
4023		buffer[count++] = cpu_to_le32(0x00000000);
4024		break;
4025	case CHIP_HAWAII:
4026		buffer[count++] = cpu_to_le32(0x3a00161a);
4027		buffer[count++] = cpu_to_le32(0x0000002e);
4028		break;
4029	default:
4030		buffer[count++] = cpu_to_le32(0x00000000);
4031		buffer[count++] = cpu_to_le32(0x00000000);
4032		break;
4033	}
4034
4035	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
4036	buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
4037
4038	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
4039	buffer[count++] = cpu_to_le32(0);
4040}
4041
4042static void gfx_v7_0_init_pg(struct amdgpu_device *adev)
4043{
4044	if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
4045			      AMD_PG_SUPPORT_GFX_SMG |
4046			      AMD_PG_SUPPORT_GFX_DMG |
4047			      AMD_PG_SUPPORT_CP |
4048			      AMD_PG_SUPPORT_GDS |
4049			      AMD_PG_SUPPORT_RLC_SMU_HS)) {
4050		gfx_v7_0_enable_sclk_slowdown_on_pu(adev, true);
4051		gfx_v7_0_enable_sclk_slowdown_on_pd(adev, true);
4052		if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
4053			gfx_v7_0_init_gfx_cgpg(adev);
4054			gfx_v7_0_enable_cp_pg(adev, true);
4055			gfx_v7_0_enable_gds_pg(adev, true);
4056		}
4057		gfx_v7_0_init_ao_cu_mask(adev);
4058		gfx_v7_0_update_gfx_pg(adev, true);
4059	}
4060}
4061
4062static void gfx_v7_0_fini_pg(struct amdgpu_device *adev)
4063{
4064	if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
4065			      AMD_PG_SUPPORT_GFX_SMG |
4066			      AMD_PG_SUPPORT_GFX_DMG |
4067			      AMD_PG_SUPPORT_CP |
4068			      AMD_PG_SUPPORT_GDS |
4069			      AMD_PG_SUPPORT_RLC_SMU_HS)) {
4070		gfx_v7_0_update_gfx_pg(adev, false);
4071		if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
4072			gfx_v7_0_enable_cp_pg(adev, false);
4073			gfx_v7_0_enable_gds_pg(adev, false);
4074		}
4075	}
4076}
4077
4078/**
4079 * gfx_v7_0_get_gpu_clock_counter - return GPU clock counter snapshot
4080 *
4081 * @adev: amdgpu_device pointer
4082 *
4083 * Fetches a GPU clock counter snapshot (SI).
4084 * Returns the 64 bit clock counter snapshot.
4085 */
4086static uint64_t gfx_v7_0_get_gpu_clock_counter(struct amdgpu_device *adev)
4087{
4088	uint64_t clock;
4089
4090	mutex_lock(&adev->gfx.gpu_clock_mutex);
4091	WREG32(mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
4092	clock = (uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_LSB) |
4093		((uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
4094	mutex_unlock(&adev->gfx.gpu_clock_mutex);
4095	return clock;
4096}
4097
4098static void gfx_v7_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
4099					  uint32_t vmid,
4100					  uint32_t gds_base, uint32_t gds_size,
4101					  uint32_t gws_base, uint32_t gws_size,
4102					  uint32_t oa_base, uint32_t oa_size)
4103{
 
 
 
 
 
 
 
 
 
4104	/* GDS Base */
4105	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4106	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4107				WRITE_DATA_DST_SEL(0)));
4108	amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_base);
4109	amdgpu_ring_write(ring, 0);
4110	amdgpu_ring_write(ring, gds_base);
4111
4112	/* GDS Size */
4113	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4114	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4115				WRITE_DATA_DST_SEL(0)));
4116	amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_size);
4117	amdgpu_ring_write(ring, 0);
4118	amdgpu_ring_write(ring, gds_size);
4119
4120	/* GWS */
4121	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4122	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4123				WRITE_DATA_DST_SEL(0)));
4124	amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].gws);
4125	amdgpu_ring_write(ring, 0);
4126	amdgpu_ring_write(ring, gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
4127
4128	/* OA */
4129	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4130	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4131				WRITE_DATA_DST_SEL(0)));
4132	amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].oa);
4133	amdgpu_ring_write(ring, 0);
4134	amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));
4135}
4136
4137static void gfx_v7_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid)
4138{
4139	struct amdgpu_device *adev = ring->adev;
4140	uint32_t value = 0;
4141
4142	value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
4143	value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
4144	value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
4145	value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
4146	WREG32(mmSQ_CMD, value);
4147}
4148
4149static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
4150{
4151	WREG32(mmSQ_IND_INDEX,
4152		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
4153		(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
4154		(address << SQ_IND_INDEX__INDEX__SHIFT) |
4155		(SQ_IND_INDEX__FORCE_READ_MASK));
4156	return RREG32(mmSQ_IND_DATA);
4157}
4158
4159static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
4160			   uint32_t wave, uint32_t thread,
4161			   uint32_t regno, uint32_t num, uint32_t *out)
4162{
4163	WREG32(mmSQ_IND_INDEX,
4164		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
4165		(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
4166		(regno << SQ_IND_INDEX__INDEX__SHIFT) |
4167		(thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
4168		(SQ_IND_INDEX__FORCE_READ_MASK) |
4169		(SQ_IND_INDEX__AUTO_INCR_MASK));
4170	while (num--)
4171		*(out++) = RREG32(mmSQ_IND_DATA);
4172}
4173
4174static void gfx_v7_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
4175{
4176	/* type 0 wave data */
4177	dst[(*no_fields)++] = 0;
4178	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
4179	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
4180	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
4181	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
4182	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
4183	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
4184	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
4185	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
4186	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
4187	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
4188	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
4189	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
4190	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_LO);
4191	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_HI);
4192	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_LO);
4193	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_HI);
4194	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
4195	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
4196}
4197
4198static void gfx_v7_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
4199				     uint32_t wave, uint32_t start,
4200				     uint32_t size, uint32_t *dst)
4201{
4202	wave_read_regs(
4203		adev, simd, wave, 0,
4204		start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
4205}
4206
4207static void gfx_v7_0_select_me_pipe_q(struct amdgpu_device *adev,
4208				  u32 me, u32 pipe, u32 q, u32 vm)
4209{
4210	cik_srbm_select(adev, me, pipe, q, vm);
4211}
4212
4213static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = {
4214	.get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter,
4215	.select_se_sh = &gfx_v7_0_select_se_sh,
4216	.read_wave_data = &gfx_v7_0_read_wave_data,
4217	.read_wave_sgprs = &gfx_v7_0_read_wave_sgprs,
4218	.select_me_pipe_q = &gfx_v7_0_select_me_pipe_q
4219};
4220
4221static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = {
4222	.is_rlc_enabled = gfx_v7_0_is_rlc_enabled,
4223	.set_safe_mode = gfx_v7_0_set_safe_mode,
4224	.unset_safe_mode = gfx_v7_0_unset_safe_mode,
4225	.init = gfx_v7_0_rlc_init,
4226	.get_csb_size = gfx_v7_0_get_csb_size,
4227	.get_csb_buffer = gfx_v7_0_get_csb_buffer,
4228	.get_cp_table_num = gfx_v7_0_cp_pg_table_num,
4229	.resume = gfx_v7_0_rlc_resume,
4230	.stop = gfx_v7_0_rlc_stop,
4231	.reset = gfx_v7_0_rlc_reset,
4232	.start = gfx_v7_0_rlc_start,
4233	.update_spm_vmid = gfx_v7_0_update_spm_vmid
4234};
4235
4236static int gfx_v7_0_early_init(void *handle)
4237{
4238	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4239
4240	adev->gfx.num_gfx_rings = GFX7_NUM_GFX_RINGS;
4241	adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
4242	adev->gfx.funcs = &gfx_v7_0_gfx_funcs;
4243	adev->gfx.rlc.funcs = &gfx_v7_0_rlc_funcs;
4244	gfx_v7_0_set_ring_funcs(adev);
4245	gfx_v7_0_set_irq_funcs(adev);
4246	gfx_v7_0_set_gds_init(adev);
4247
4248	return 0;
4249}
4250
4251static int gfx_v7_0_late_init(void *handle)
4252{
4253	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4254	int r;
4255
4256	r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
4257	if (r)
4258		return r;
4259
4260	r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
4261	if (r)
4262		return r;
4263
4264	return 0;
4265}
4266
4267static void gfx_v7_0_gpu_early_init(struct amdgpu_device *adev)
4268{
4269	u32 gb_addr_config;
4270	u32 mc_arb_ramcfg;
4271	u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map;
4272	u32 tmp;
4273
4274	switch (adev->asic_type) {
4275	case CHIP_BONAIRE:
4276		adev->gfx.config.max_shader_engines = 2;
4277		adev->gfx.config.max_tile_pipes = 4;
4278		adev->gfx.config.max_cu_per_sh = 7;
4279		adev->gfx.config.max_sh_per_se = 1;
4280		adev->gfx.config.max_backends_per_se = 2;
4281		adev->gfx.config.max_texture_channel_caches = 4;
4282		adev->gfx.config.max_gprs = 256;
4283		adev->gfx.config.max_gs_threads = 32;
4284		adev->gfx.config.max_hw_contexts = 8;
4285
4286		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
4287		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
4288		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
4289		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
4290		gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
4291		break;
4292	case CHIP_HAWAII:
4293		adev->gfx.config.max_shader_engines = 4;
4294		adev->gfx.config.max_tile_pipes = 16;
4295		adev->gfx.config.max_cu_per_sh = 11;
4296		adev->gfx.config.max_sh_per_se = 1;
4297		adev->gfx.config.max_backends_per_se = 4;
4298		adev->gfx.config.max_texture_channel_caches = 16;
4299		adev->gfx.config.max_gprs = 256;
4300		adev->gfx.config.max_gs_threads = 32;
4301		adev->gfx.config.max_hw_contexts = 8;
4302
4303		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
4304		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
4305		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
4306		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
4307		gb_addr_config = HAWAII_GB_ADDR_CONFIG_GOLDEN;
4308		break;
4309	case CHIP_KAVERI:
4310		adev->gfx.config.max_shader_engines = 1;
4311		adev->gfx.config.max_tile_pipes = 4;
4312		adev->gfx.config.max_cu_per_sh = 8;
4313		adev->gfx.config.max_backends_per_se = 2;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4314		adev->gfx.config.max_sh_per_se = 1;
4315		adev->gfx.config.max_texture_channel_caches = 4;
4316		adev->gfx.config.max_gprs = 256;
4317		adev->gfx.config.max_gs_threads = 16;
4318		adev->gfx.config.max_hw_contexts = 8;
4319
4320		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
4321		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
4322		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
4323		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
4324		gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
4325		break;
4326	case CHIP_KABINI:
4327	case CHIP_MULLINS:
4328	default:
4329		adev->gfx.config.max_shader_engines = 1;
4330		adev->gfx.config.max_tile_pipes = 2;
4331		adev->gfx.config.max_cu_per_sh = 2;
4332		adev->gfx.config.max_sh_per_se = 1;
4333		adev->gfx.config.max_backends_per_se = 1;
4334		adev->gfx.config.max_texture_channel_caches = 2;
4335		adev->gfx.config.max_gprs = 256;
4336		adev->gfx.config.max_gs_threads = 16;
4337		adev->gfx.config.max_hw_contexts = 8;
4338
4339		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
4340		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
4341		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
4342		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
4343		gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
4344		break;
4345	}
4346
 
4347	adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG);
4348	mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg;
4349
4350	adev->gfx.config.num_banks = REG_GET_FIELD(mc_arb_ramcfg,
4351				MC_ARB_RAMCFG, NOOFBANK);
4352	adev->gfx.config.num_ranks = REG_GET_FIELD(mc_arb_ramcfg,
4353				MC_ARB_RAMCFG, NOOFRANKS);
4354
4355	adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
4356	adev->gfx.config.mem_max_burst_length_bytes = 256;
4357	if (adev->flags & AMD_IS_APU) {
4358		/* Get memory bank mapping mode. */
4359		tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING);
4360		dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
4361		dimm01_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
4362
4363		tmp = RREG32(mmMC_FUS_DRAM1_BANK_ADDR_MAPPING);
4364		dimm10_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
4365		dimm11_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
4366
4367		/* Validate settings in case only one DIMM installed. */
4368		if ((dimm00_addr_map == 0) || (dimm00_addr_map == 3) || (dimm00_addr_map == 4) || (dimm00_addr_map > 12))
4369			dimm00_addr_map = 0;
4370		if ((dimm01_addr_map == 0) || (dimm01_addr_map == 3) || (dimm01_addr_map == 4) || (dimm01_addr_map > 12))
4371			dimm01_addr_map = 0;
4372		if ((dimm10_addr_map == 0) || (dimm10_addr_map == 3) || (dimm10_addr_map == 4) || (dimm10_addr_map > 12))
4373			dimm10_addr_map = 0;
4374		if ((dimm11_addr_map == 0) || (dimm11_addr_map == 3) || (dimm11_addr_map == 4) || (dimm11_addr_map > 12))
4375			dimm11_addr_map = 0;
4376
4377		/* If DIMM Addr map is 8GB, ROW size should be 2KB. Otherwise 1KB. */
4378		/* If ROW size(DIMM1) != ROW size(DMIMM0), ROW size should be larger one. */
4379		if ((dimm00_addr_map == 11) || (dimm01_addr_map == 11) || (dimm10_addr_map == 11) || (dimm11_addr_map == 11))
4380			adev->gfx.config.mem_row_size_in_kb = 2;
4381		else
4382			adev->gfx.config.mem_row_size_in_kb = 1;
4383	} else {
4384		tmp = (mc_arb_ramcfg & MC_ARB_RAMCFG__NOOFCOLS_MASK) >> MC_ARB_RAMCFG__NOOFCOLS__SHIFT;
4385		adev->gfx.config.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
4386		if (adev->gfx.config.mem_row_size_in_kb > 4)
4387			adev->gfx.config.mem_row_size_in_kb = 4;
4388	}
4389	/* XXX use MC settings? */
4390	adev->gfx.config.shader_engine_tile_size = 32;
4391	adev->gfx.config.num_gpus = 1;
4392	adev->gfx.config.multi_gpu_tile_size = 64;
4393
4394	/* fix up row size */
4395	gb_addr_config &= ~GB_ADDR_CONFIG__ROW_SIZE_MASK;
4396	switch (adev->gfx.config.mem_row_size_in_kb) {
4397	case 1:
4398	default:
4399		gb_addr_config |= (0 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT);
4400		break;
4401	case 2:
4402		gb_addr_config |= (1 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT);
4403		break;
4404	case 4:
4405		gb_addr_config |= (2 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT);
4406		break;
4407	}
4408	adev->gfx.config.gb_addr_config = gb_addr_config;
4409}
4410
4411static int gfx_v7_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
4412					int mec, int pipe, int queue)
4413{
4414	int r;
4415	unsigned irq_type;
4416	struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
4417
4418	/* mec0 is me1 */
4419	ring->me = mec + 1;
4420	ring->pipe = pipe;
4421	ring->queue = queue;
4422
4423	ring->ring_obj = NULL;
4424	ring->use_doorbell = true;
4425	ring->doorbell_index = adev->doorbell_index.mec_ring0 + ring_id;
4426	sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
4427
4428	irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
4429		+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
4430		+ ring->pipe;
4431
4432	/* type-2 packets are deprecated on MEC, use type-3 instead */
4433	r = amdgpu_ring_init(adev, ring, 1024,
4434			     &adev->gfx.eop_irq, irq_type,
4435			     AMDGPU_RING_PRIO_DEFAULT);
4436	if (r)
4437		return r;
4438
4439
4440	return 0;
4441}
4442
4443static int gfx_v7_0_sw_init(void *handle)
4444{
4445	struct amdgpu_ring *ring;
4446	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4447	int i, j, k, r, ring_id;
4448
4449	switch (adev->asic_type) {
4450	case CHIP_KAVERI:
4451		adev->gfx.mec.num_mec = 2;
4452		break;
4453	case CHIP_BONAIRE:
4454	case CHIP_HAWAII:
4455	case CHIP_KABINI:
4456	case CHIP_MULLINS:
4457	default:
4458		adev->gfx.mec.num_mec = 1;
4459		break;
4460	}
4461	adev->gfx.mec.num_pipe_per_mec = 4;
4462	adev->gfx.mec.num_queue_per_pipe = 8;
4463
4464	/* EOP Event */
4465	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 181, &adev->gfx.eop_irq);
4466	if (r)
4467		return r;
4468
4469	/* Privileged reg */
4470	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 184,
4471			      &adev->gfx.priv_reg_irq);
4472	if (r)
4473		return r;
4474
4475	/* Privileged inst */
4476	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 185,
4477			      &adev->gfx.priv_inst_irq);
4478	if (r)
4479		return r;
4480
4481	gfx_v7_0_scratch_init(adev);
4482
4483	r = gfx_v7_0_init_microcode(adev);
4484	if (r) {
4485		DRM_ERROR("Failed to load gfx firmware!\n");
4486		return r;
4487	}
4488
4489	r = adev->gfx.rlc.funcs->init(adev);
4490	if (r) {
4491		DRM_ERROR("Failed to init rlc BOs!\n");
4492		return r;
4493	}
4494
4495	/* allocate mec buffers */
4496	r = gfx_v7_0_mec_init(adev);
4497	if (r) {
4498		DRM_ERROR("Failed to init MEC BOs!\n");
4499		return r;
4500	}
4501
4502	for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
4503		ring = &adev->gfx.gfx_ring[i];
4504		ring->ring_obj = NULL;
4505		sprintf(ring->name, "gfx");
4506		r = amdgpu_ring_init(adev, ring, 1024,
4507				     &adev->gfx.eop_irq,
4508				     AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
4509				     AMDGPU_RING_PRIO_DEFAULT);
4510		if (r)
4511			return r;
4512	}
4513
4514	/* set up the compute queues - allocate horizontally across pipes */
4515	ring_id = 0;
4516	for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
4517		for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
4518			for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
4519				if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k, j))
4520					continue;
4521
4522				r = gfx_v7_0_compute_ring_init(adev,
4523								ring_id,
4524								i, k, j);
4525				if (r)
4526					return r;
4527
4528				ring_id++;
4529			}
 
 
4530		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4531	}
4532
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4533	adev->gfx.ce_ram_size = 0x8000;
4534
4535	gfx_v7_0_gpu_early_init(adev);
4536
4537	return r;
4538}
4539
4540static int gfx_v7_0_sw_fini(void *handle)
4541{
4542	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4543	int i;
 
 
 
 
 
4544
4545	for (i = 0; i < adev->gfx.num_gfx_rings; i++)
4546		amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
4547	for (i = 0; i < adev->gfx.num_compute_rings; i++)
4548		amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
4549
4550	gfx_v7_0_cp_compute_fini(adev);
4551	amdgpu_gfx_rlc_fini(adev);
4552	gfx_v7_0_mec_fini(adev);
4553	amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
4554				&adev->gfx.rlc.clear_state_gpu_addr,
4555				(void **)&adev->gfx.rlc.cs_ptr);
4556	if (adev->gfx.rlc.cp_table_size) {
4557		amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
4558				&adev->gfx.rlc.cp_table_gpu_addr,
4559				(void **)&adev->gfx.rlc.cp_table_ptr);
4560	}
4561	gfx_v7_0_free_microcode(adev);
4562
4563	return 0;
4564}
4565
4566static int gfx_v7_0_hw_init(void *handle)
4567{
4568	int r;
4569	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4570
4571	gfx_v7_0_constants_init(adev);
4572
4573	/* init CSB */
4574	adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
4575	/* init rlc */
4576	r = adev->gfx.rlc.funcs->resume(adev);
4577	if (r)
4578		return r;
4579
4580	r = gfx_v7_0_cp_resume(adev);
4581	if (r)
4582		return r;
4583
4584	return r;
4585}
4586
4587static int gfx_v7_0_hw_fini(void *handle)
4588{
4589	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4590
4591	amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
4592	amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
4593	gfx_v7_0_cp_enable(adev, false);
4594	adev->gfx.rlc.funcs->stop(adev);
4595	gfx_v7_0_fini_pg(adev);
4596
4597	return 0;
4598}
4599
4600static int gfx_v7_0_suspend(void *handle)
4601{
4602	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4603
4604	return gfx_v7_0_hw_fini(adev);
4605}
4606
4607static int gfx_v7_0_resume(void *handle)
4608{
4609	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4610
4611	return gfx_v7_0_hw_init(adev);
4612}
4613
4614static bool gfx_v7_0_is_idle(void *handle)
4615{
4616	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4617
4618	if (RREG32(mmGRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK)
4619		return false;
4620	else
4621		return true;
4622}
4623
4624static int gfx_v7_0_wait_for_idle(void *handle)
4625{
4626	unsigned i;
4627	u32 tmp;
4628	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4629
4630	for (i = 0; i < adev->usec_timeout; i++) {
4631		/* read MC_STATUS */
4632		tmp = RREG32(mmGRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK;
4633
4634		if (!tmp)
4635			return 0;
4636		udelay(1);
4637	}
4638	return -ETIMEDOUT;
4639}
4640
4641static int gfx_v7_0_soft_reset(void *handle)
4642{
4643	u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
4644	u32 tmp;
4645	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4646
4647	/* GRBM_STATUS */
4648	tmp = RREG32(mmGRBM_STATUS);
4649	if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
4650		   GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
4651		   GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
4652		   GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
4653		   GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
4654		   GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK))
4655		grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_CP_MASK |
4656			GRBM_SOFT_RESET__SOFT_RESET_GFX_MASK;
4657
4658	if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
4659		grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_CP_MASK;
4660		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_GRBM_MASK;
4661	}
4662
4663	/* GRBM_STATUS2 */
4664	tmp = RREG32(mmGRBM_STATUS2);
4665	if (tmp & GRBM_STATUS2__RLC_BUSY_MASK)
4666		grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK;
4667
4668	/* SRBM_STATUS */
4669	tmp = RREG32(mmSRBM_STATUS);
4670	if (tmp & SRBM_STATUS__GRBM_RQ_PENDING_MASK)
4671		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_GRBM_MASK;
4672
4673	if (grbm_soft_reset || srbm_soft_reset) {
4674		/* disable CG/PG */
4675		gfx_v7_0_fini_pg(adev);
4676		gfx_v7_0_update_cg(adev, false);
4677
4678		/* stop the rlc */
4679		adev->gfx.rlc.funcs->stop(adev);
4680
4681		/* Disable GFX parsing/prefetching */
4682		WREG32(mmCP_ME_CNTL, CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK);
4683
4684		/* Disable MEC parsing/prefetching */
4685		WREG32(mmCP_MEC_CNTL, CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK);
4686
4687		if (grbm_soft_reset) {
4688			tmp = RREG32(mmGRBM_SOFT_RESET);
4689			tmp |= grbm_soft_reset;
4690			dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
4691			WREG32(mmGRBM_SOFT_RESET, tmp);
4692			tmp = RREG32(mmGRBM_SOFT_RESET);
4693
4694			udelay(50);
4695
4696			tmp &= ~grbm_soft_reset;
4697			WREG32(mmGRBM_SOFT_RESET, tmp);
4698			tmp = RREG32(mmGRBM_SOFT_RESET);
4699		}
4700
4701		if (srbm_soft_reset) {
4702			tmp = RREG32(mmSRBM_SOFT_RESET);
4703			tmp |= srbm_soft_reset;
4704			dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
4705			WREG32(mmSRBM_SOFT_RESET, tmp);
4706			tmp = RREG32(mmSRBM_SOFT_RESET);
4707
4708			udelay(50);
4709
4710			tmp &= ~srbm_soft_reset;
4711			WREG32(mmSRBM_SOFT_RESET, tmp);
4712			tmp = RREG32(mmSRBM_SOFT_RESET);
4713		}
4714		/* Wait a little for things to settle down */
4715		udelay(50);
4716	}
4717	return 0;
4718}
4719
4720static void gfx_v7_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
4721						 enum amdgpu_interrupt_state state)
4722{
4723	u32 cp_int_cntl;
4724
4725	switch (state) {
4726	case AMDGPU_IRQ_STATE_DISABLE:
4727		cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
4728		cp_int_cntl &= ~CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
4729		WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
4730		break;
4731	case AMDGPU_IRQ_STATE_ENABLE:
4732		cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
4733		cp_int_cntl |= CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
4734		WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
4735		break;
4736	default:
4737		break;
4738	}
4739}
4740
4741static void gfx_v7_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
4742						     int me, int pipe,
4743						     enum amdgpu_interrupt_state state)
4744{
4745	u32 mec_int_cntl, mec_int_cntl_reg;
4746
4747	/*
4748	 * amdgpu controls only the first MEC. That's why this function only
4749	 * handles the setting of interrupts for this specific MEC. All other
4750	 * pipes' interrupts are set by amdkfd.
4751	 */
4752
4753	if (me == 1) {
4754		switch (pipe) {
4755		case 0:
4756			mec_int_cntl_reg = mmCP_ME1_PIPE0_INT_CNTL;
4757			break;
4758		case 1:
4759			mec_int_cntl_reg = mmCP_ME1_PIPE1_INT_CNTL;
4760			break;
4761		case 2:
4762			mec_int_cntl_reg = mmCP_ME1_PIPE2_INT_CNTL;
4763			break;
4764		case 3:
4765			mec_int_cntl_reg = mmCP_ME1_PIPE3_INT_CNTL;
4766			break;
4767		default:
4768			DRM_DEBUG("invalid pipe %d\n", pipe);
4769			return;
4770		}
4771	} else {
4772		DRM_DEBUG("invalid me %d\n", me);
4773		return;
4774	}
4775
4776	switch (state) {
4777	case AMDGPU_IRQ_STATE_DISABLE:
4778		mec_int_cntl = RREG32(mec_int_cntl_reg);
4779		mec_int_cntl &= ~CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
4780		WREG32(mec_int_cntl_reg, mec_int_cntl);
4781		break;
4782	case AMDGPU_IRQ_STATE_ENABLE:
4783		mec_int_cntl = RREG32(mec_int_cntl_reg);
4784		mec_int_cntl |= CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
4785		WREG32(mec_int_cntl_reg, mec_int_cntl);
4786		break;
4787	default:
4788		break;
4789	}
4790}
4791
4792static int gfx_v7_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
4793					     struct amdgpu_irq_src *src,
4794					     unsigned type,
4795					     enum amdgpu_interrupt_state state)
4796{
4797	u32 cp_int_cntl;
4798
4799	switch (state) {
4800	case AMDGPU_IRQ_STATE_DISABLE:
4801		cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
4802		cp_int_cntl &= ~CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK;
4803		WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
4804		break;
4805	case AMDGPU_IRQ_STATE_ENABLE:
4806		cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
4807		cp_int_cntl |= CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK;
4808		WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
4809		break;
4810	default:
4811		break;
4812	}
4813
4814	return 0;
4815}
4816
4817static int gfx_v7_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
4818					      struct amdgpu_irq_src *src,
4819					      unsigned type,
4820					      enum amdgpu_interrupt_state state)
4821{
4822	u32 cp_int_cntl;
4823
4824	switch (state) {
4825	case AMDGPU_IRQ_STATE_DISABLE:
4826		cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
4827		cp_int_cntl &= ~CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK;
4828		WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
4829		break;
4830	case AMDGPU_IRQ_STATE_ENABLE:
4831		cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
4832		cp_int_cntl |= CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK;
4833		WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
4834		break;
4835	default:
4836		break;
4837	}
4838
4839	return 0;
4840}
4841
4842static int gfx_v7_0_set_eop_interrupt_state(struct amdgpu_device *adev,
4843					    struct amdgpu_irq_src *src,
4844					    unsigned type,
4845					    enum amdgpu_interrupt_state state)
4846{
4847	switch (type) {
4848	case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP:
4849		gfx_v7_0_set_gfx_eop_interrupt_state(adev, state);
4850		break;
4851	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
4852		gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
4853		break;
4854	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
4855		gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
4856		break;
4857	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
4858		gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
4859		break;
4860	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
4861		gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
4862		break;
4863	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
4864		gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 0, state);
4865		break;
4866	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
4867		gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 1, state);
4868		break;
4869	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
4870		gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 2, state);
4871		break;
4872	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
4873		gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 3, state);
4874		break;
4875	default:
4876		break;
4877	}
4878	return 0;
4879}
4880
4881static int gfx_v7_0_eop_irq(struct amdgpu_device *adev,
4882			    struct amdgpu_irq_src *source,
4883			    struct amdgpu_iv_entry *entry)
4884{
4885	u8 me_id, pipe_id;
4886	struct amdgpu_ring *ring;
4887	int i;
4888
4889	DRM_DEBUG("IH: CP EOP\n");
4890	me_id = (entry->ring_id & 0x0c) >> 2;
4891	pipe_id = (entry->ring_id & 0x03) >> 0;
4892	switch (me_id) {
4893	case 0:
4894		amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
4895		break;
4896	case 1:
4897	case 2:
4898		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4899			ring = &adev->gfx.compute_ring[i];
4900			if ((ring->me == me_id) && (ring->pipe == pipe_id))
4901				amdgpu_fence_process(ring);
4902		}
4903		break;
4904	}
4905	return 0;
4906}
4907
4908static void gfx_v7_0_fault(struct amdgpu_device *adev,
4909			   struct amdgpu_iv_entry *entry)
4910{
4911	struct amdgpu_ring *ring;
4912	u8 me_id, pipe_id;
4913	int i;
4914
4915	me_id = (entry->ring_id & 0x0c) >> 2;
4916	pipe_id = (entry->ring_id & 0x03) >> 0;
4917	switch (me_id) {
4918	case 0:
4919		drm_sched_fault(&adev->gfx.gfx_ring[0].sched);
4920		break;
4921	case 1:
4922	case 2:
4923		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4924			ring = &adev->gfx.compute_ring[i];
4925			if ((ring->me == me_id) && (ring->pipe == pipe_id))
4926				drm_sched_fault(&ring->sched);
4927		}
4928		break;
4929	}
4930}
4931
4932static int gfx_v7_0_priv_reg_irq(struct amdgpu_device *adev,
4933				 struct amdgpu_irq_src *source,
4934				 struct amdgpu_iv_entry *entry)
4935{
4936	DRM_ERROR("Illegal register access in command stream\n");
4937	gfx_v7_0_fault(adev, entry);
4938	return 0;
4939}
4940
4941static int gfx_v7_0_priv_inst_irq(struct amdgpu_device *adev,
4942				  struct amdgpu_irq_src *source,
4943				  struct amdgpu_iv_entry *entry)
4944{
4945	DRM_ERROR("Illegal instruction in command stream\n");
4946	// XXX soft reset the gfx block only
4947	gfx_v7_0_fault(adev, entry);
4948	return 0;
4949}
4950
4951static int gfx_v7_0_set_clockgating_state(void *handle,
4952					  enum amd_clockgating_state state)
4953{
4954	bool gate = false;
4955	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4956
4957	if (state == AMD_CG_STATE_GATE)
4958		gate = true;
4959
4960	gfx_v7_0_enable_gui_idle_interrupt(adev, false);
4961	/* order matters! */
4962	if (gate) {
4963		gfx_v7_0_enable_mgcg(adev, true);
4964		gfx_v7_0_enable_cgcg(adev, true);
4965	} else {
4966		gfx_v7_0_enable_cgcg(adev, false);
4967		gfx_v7_0_enable_mgcg(adev, false);
4968	}
4969	gfx_v7_0_enable_gui_idle_interrupt(adev, true);
4970
4971	return 0;
4972}
4973
4974static int gfx_v7_0_set_powergating_state(void *handle,
4975					  enum amd_powergating_state state)
4976{
4977	bool gate = false;
4978	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4979
4980	if (state == AMD_PG_STATE_GATE)
4981		gate = true;
4982
4983	if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
4984			      AMD_PG_SUPPORT_GFX_SMG |
4985			      AMD_PG_SUPPORT_GFX_DMG |
4986			      AMD_PG_SUPPORT_CP |
4987			      AMD_PG_SUPPORT_GDS |
4988			      AMD_PG_SUPPORT_RLC_SMU_HS)) {
4989		gfx_v7_0_update_gfx_pg(adev, gate);
4990		if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
4991			gfx_v7_0_enable_cp_pg(adev, gate);
4992			gfx_v7_0_enable_gds_pg(adev, gate);
4993		}
4994	}
4995
4996	return 0;
4997}
4998
4999static void gfx_v7_0_emit_mem_sync(struct amdgpu_ring *ring)
5000{
5001	amdgpu_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
5002	amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
5003			  PACKET3_TC_ACTION_ENA |
5004			  PACKET3_SH_KCACHE_ACTION_ENA |
5005			  PACKET3_SH_ICACHE_ACTION_ENA);  /* CP_COHER_CNTL */
5006	amdgpu_ring_write(ring, 0xffffffff);  /* CP_COHER_SIZE */
5007	amdgpu_ring_write(ring, 0);  /* CP_COHER_BASE */
5008	amdgpu_ring_write(ring, 0x0000000A); /* poll interval */
5009}
5010
5011static void gfx_v7_0_emit_mem_sync_compute(struct amdgpu_ring *ring)
5012{
5013	amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5));
5014	amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
5015			  PACKET3_TC_ACTION_ENA |
5016			  PACKET3_SH_KCACHE_ACTION_ENA |
5017			  PACKET3_SH_ICACHE_ACTION_ENA);  /* CP_COHER_CNTL */
5018	amdgpu_ring_write(ring, 0xffffffff);	/* CP_COHER_SIZE */
5019	amdgpu_ring_write(ring, 0xff);		/* CP_COHER_SIZE_HI */
5020	amdgpu_ring_write(ring, 0);		/* CP_COHER_BASE */
5021	amdgpu_ring_write(ring, 0);		/* CP_COHER_BASE_HI */
5022	amdgpu_ring_write(ring, 0x0000000A);	/* poll interval */
5023}
5024
5025static const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
5026	.name = "gfx_v7_0",
5027	.early_init = gfx_v7_0_early_init,
5028	.late_init = gfx_v7_0_late_init,
5029	.sw_init = gfx_v7_0_sw_init,
5030	.sw_fini = gfx_v7_0_sw_fini,
5031	.hw_init = gfx_v7_0_hw_init,
5032	.hw_fini = gfx_v7_0_hw_fini,
5033	.suspend = gfx_v7_0_suspend,
5034	.resume = gfx_v7_0_resume,
5035	.is_idle = gfx_v7_0_is_idle,
5036	.wait_for_idle = gfx_v7_0_wait_for_idle,
5037	.soft_reset = gfx_v7_0_soft_reset,
5038	.set_clockgating_state = gfx_v7_0_set_clockgating_state,
5039	.set_powergating_state = gfx_v7_0_set_powergating_state,
5040};
5041
5042static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
5043	.type = AMDGPU_RING_TYPE_GFX,
5044	.align_mask = 0xff,
5045	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
5046	.support_64bit_ptrs = false,
5047	.get_rptr = gfx_v7_0_ring_get_rptr,
5048	.get_wptr = gfx_v7_0_ring_get_wptr_gfx,
5049	.set_wptr = gfx_v7_0_ring_set_wptr_gfx,
5050	.emit_frame_size =
5051		20 + /* gfx_v7_0_ring_emit_gds_switch */
5052		7 + /* gfx_v7_0_ring_emit_hdp_flush */
5053		5 + /* hdp invalidate */
5054		12 + 12 + 12 + /* gfx_v7_0_ring_emit_fence_gfx x3 for user fence, vm fence */
5055		7 + 4 + /* gfx_v7_0_ring_emit_pipeline_sync */
5056		CIK_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + 6 + /* gfx_v7_0_ring_emit_vm_flush */
5057		3 + 4 + /* gfx_v7_ring_emit_cntxcntl including vgt flush*/
5058		5, /* SURFACE_SYNC */
5059	.emit_ib_size = 4, /* gfx_v7_0_ring_emit_ib_gfx */
5060	.emit_ib = gfx_v7_0_ring_emit_ib_gfx,
5061	.emit_fence = gfx_v7_0_ring_emit_fence_gfx,
5062	.emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync,
5063	.emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
5064	.emit_gds_switch = gfx_v7_0_ring_emit_gds_switch,
5065	.emit_hdp_flush = gfx_v7_0_ring_emit_hdp_flush,
 
5066	.test_ring = gfx_v7_0_ring_test_ring,
5067	.test_ib = gfx_v7_0_ring_test_ib,
5068	.insert_nop = amdgpu_ring_insert_nop,
5069	.pad_ib = amdgpu_ring_generic_pad_ib,
5070	.emit_cntxcntl = gfx_v7_ring_emit_cntxcntl,
5071	.emit_wreg = gfx_v7_0_ring_emit_wreg,
5072	.soft_recovery = gfx_v7_0_ring_soft_recovery,
5073	.emit_mem_sync = gfx_v7_0_emit_mem_sync,
5074};
5075
5076static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
5077	.type = AMDGPU_RING_TYPE_COMPUTE,
5078	.align_mask = 0xff,
5079	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
5080	.support_64bit_ptrs = false,
5081	.get_rptr = gfx_v7_0_ring_get_rptr,
5082	.get_wptr = gfx_v7_0_ring_get_wptr_compute,
5083	.set_wptr = gfx_v7_0_ring_set_wptr_compute,
5084	.emit_frame_size =
5085		20 + /* gfx_v7_0_ring_emit_gds_switch */
5086		7 + /* gfx_v7_0_ring_emit_hdp_flush */
5087		5 + /* hdp invalidate */
5088		7 + /* gfx_v7_0_ring_emit_pipeline_sync */
5089		CIK_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + /* gfx_v7_0_ring_emit_vm_flush */
5090		7 + 7 + 7 + /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */
5091		7, /* gfx_v7_0_emit_mem_sync_compute */
5092	.emit_ib_size =	7, /* gfx_v7_0_ring_emit_ib_compute */
5093	.emit_ib = gfx_v7_0_ring_emit_ib_compute,
5094	.emit_fence = gfx_v7_0_ring_emit_fence_compute,
5095	.emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync,
5096	.emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
5097	.emit_gds_switch = gfx_v7_0_ring_emit_gds_switch,
5098	.emit_hdp_flush = gfx_v7_0_ring_emit_hdp_flush,
 
5099	.test_ring = gfx_v7_0_ring_test_ring,
5100	.test_ib = gfx_v7_0_ring_test_ib,
5101	.insert_nop = amdgpu_ring_insert_nop,
5102	.pad_ib = amdgpu_ring_generic_pad_ib,
5103	.emit_wreg = gfx_v7_0_ring_emit_wreg,
5104	.emit_mem_sync = gfx_v7_0_emit_mem_sync_compute,
5105};
5106
5107static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev)
5108{
5109	int i;
5110
5111	for (i = 0; i < adev->gfx.num_gfx_rings; i++)
5112		adev->gfx.gfx_ring[i].funcs = &gfx_v7_0_ring_funcs_gfx;
5113	for (i = 0; i < adev->gfx.num_compute_rings; i++)
5114		adev->gfx.compute_ring[i].funcs = &gfx_v7_0_ring_funcs_compute;
5115}
5116
5117static const struct amdgpu_irq_src_funcs gfx_v7_0_eop_irq_funcs = {
5118	.set = gfx_v7_0_set_eop_interrupt_state,
5119	.process = gfx_v7_0_eop_irq,
5120};
5121
5122static const struct amdgpu_irq_src_funcs gfx_v7_0_priv_reg_irq_funcs = {
5123	.set = gfx_v7_0_set_priv_reg_fault_state,
5124	.process = gfx_v7_0_priv_reg_irq,
5125};
5126
5127static const struct amdgpu_irq_src_funcs gfx_v7_0_priv_inst_irq_funcs = {
5128	.set = gfx_v7_0_set_priv_inst_fault_state,
5129	.process = gfx_v7_0_priv_inst_irq,
5130};
5131
5132static void gfx_v7_0_set_irq_funcs(struct amdgpu_device *adev)
5133{
5134	adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
5135	adev->gfx.eop_irq.funcs = &gfx_v7_0_eop_irq_funcs;
5136
5137	adev->gfx.priv_reg_irq.num_types = 1;
5138	adev->gfx.priv_reg_irq.funcs = &gfx_v7_0_priv_reg_irq_funcs;
5139
5140	adev->gfx.priv_inst_irq.num_types = 1;
5141	adev->gfx.priv_inst_irq.funcs = &gfx_v7_0_priv_inst_irq_funcs;
5142}
5143
5144static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev)
5145{
5146	/* init asci gds info */
5147	adev->gds.gds_size = RREG32(mmGDS_VMID0_SIZE);
5148	adev->gds.gws_size = 64;
5149	adev->gds.oa_size = 16;
5150	adev->gds.gds_compute_max_wave_id = RREG32(mmGDS_COMPUTE_MAX_WAVE_ID);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5151}
5152
5153
5154static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev)
5155{
5156	int i, j, k, counter, active_cu_number = 0;
5157	u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
5158	struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
5159	unsigned disable_masks[4 * 2];
5160	u32 ao_cu_num;
5161
5162	if (adev->flags & AMD_IS_APU)
5163		ao_cu_num = 2;
5164	else
5165		ao_cu_num = adev->gfx.config.max_cu_per_sh;
5166
5167	memset(cu_info, 0, sizeof(*cu_info));
5168
5169	amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2);
5170
5171	mutex_lock(&adev->grbm_idx_mutex);
5172	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
5173		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
5174			mask = 1;
5175			ao_bitmap = 0;
5176			counter = 0;
5177			gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff);
5178			if (i < 4 && j < 2)
5179				gfx_v7_0_set_user_cu_inactive_bitmap(
5180					adev, disable_masks[i * 2 + j]);
5181			bitmap = gfx_v7_0_get_cu_active_bitmap(adev);
5182			cu_info->bitmap[i][j] = bitmap;
5183
5184			for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
5185				if (bitmap & mask) {
5186					if (counter < ao_cu_num)
5187						ao_bitmap |= mask;
5188					counter ++;
5189				}
5190				mask <<= 1;
5191			}
5192			active_cu_number += counter;
5193			if (i < 2 && j < 2)
5194				ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
5195			cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
5196		}
5197	}
5198	gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
5199	mutex_unlock(&adev->grbm_idx_mutex);
5200
5201	cu_info->number = active_cu_number;
5202	cu_info->ao_cu_mask = ao_cu_mask;
5203	cu_info->simd_per_cu = NUM_SIMD_PER_CU;
5204	cu_info->max_waves_per_simd = 10;
5205	cu_info->max_scratch_slots_per_cu = 32;
5206	cu_info->wave_front_size = 64;
5207	cu_info->lds_size = 64;
5208}
5209
5210static const struct amdgpu_ip_block_version gfx_v7_0_ip_block =
5211{
5212	.type = AMD_IP_BLOCK_TYPE_GFX,
5213	.major = 7,
5214	.minor = 0,
5215	.rev = 0,
5216	.funcs = &gfx_v7_0_ip_funcs,
5217};
5218
5219const struct amdgpu_ip_block_version gfx_v7_1_ip_block =
5220{
5221	.type = AMD_IP_BLOCK_TYPE_GFX,
5222	.major = 7,
5223	.minor = 1,
5224	.rev = 0,
5225	.funcs = &gfx_v7_0_ip_funcs,
5226};
5227
5228const struct amdgpu_ip_block_version gfx_v7_2_ip_block =
5229{
5230	.type = AMD_IP_BLOCK_TYPE_GFX,
5231	.major = 7,
5232	.minor = 2,
5233	.rev = 0,
5234	.funcs = &gfx_v7_0_ip_funcs,
5235};
5236
5237const struct amdgpu_ip_block_version gfx_v7_3_ip_block =
5238{
5239	.type = AMD_IP_BLOCK_TYPE_GFX,
5240	.major = 7,
5241	.minor = 3,
5242	.rev = 0,
5243	.funcs = &gfx_v7_0_ip_funcs,
5244};
v4.10.11
   1/*
   2 * Copyright 2014 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
 
  23#include <linux/firmware.h>
  24#include "drmP.h"
 
  25#include "amdgpu.h"
  26#include "amdgpu_ih.h"
  27#include "amdgpu_gfx.h"
  28#include "cikd.h"
  29#include "cik.h"
 
  30#include "atom.h"
  31#include "amdgpu_ucode.h"
  32#include "clearstate_ci.h"
  33
  34#include "dce/dce_8_0_d.h"
  35#include "dce/dce_8_0_sh_mask.h"
  36
  37#include "bif/bif_4_1_d.h"
  38#include "bif/bif_4_1_sh_mask.h"
  39
  40#include "gca/gfx_7_0_d.h"
  41#include "gca/gfx_7_2_enum.h"
  42#include "gca/gfx_7_2_sh_mask.h"
  43
  44#include "gmc/gmc_7_0_d.h"
  45#include "gmc/gmc_7_0_sh_mask.h"
  46
  47#include "oss/oss_2_0_d.h"
  48#include "oss/oss_2_0_sh_mask.h"
  49
 
 
  50#define GFX7_NUM_GFX_RINGS     1
  51#define GFX7_NUM_COMPUTE_RINGS 8
  52
  53static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev);
  54static void gfx_v7_0_set_irq_funcs(struct amdgpu_device *adev);
  55static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev);
  56
  57MODULE_FIRMWARE("radeon/bonaire_pfp.bin");
  58MODULE_FIRMWARE("radeon/bonaire_me.bin");
  59MODULE_FIRMWARE("radeon/bonaire_ce.bin");
  60MODULE_FIRMWARE("radeon/bonaire_rlc.bin");
  61MODULE_FIRMWARE("radeon/bonaire_mec.bin");
  62
  63MODULE_FIRMWARE("radeon/hawaii_pfp.bin");
  64MODULE_FIRMWARE("radeon/hawaii_me.bin");
  65MODULE_FIRMWARE("radeon/hawaii_ce.bin");
  66MODULE_FIRMWARE("radeon/hawaii_rlc.bin");
  67MODULE_FIRMWARE("radeon/hawaii_mec.bin");
  68
  69MODULE_FIRMWARE("radeon/kaveri_pfp.bin");
  70MODULE_FIRMWARE("radeon/kaveri_me.bin");
  71MODULE_FIRMWARE("radeon/kaveri_ce.bin");
  72MODULE_FIRMWARE("radeon/kaveri_rlc.bin");
  73MODULE_FIRMWARE("radeon/kaveri_mec.bin");
  74MODULE_FIRMWARE("radeon/kaveri_mec2.bin");
  75
  76MODULE_FIRMWARE("radeon/kabini_pfp.bin");
  77MODULE_FIRMWARE("radeon/kabini_me.bin");
  78MODULE_FIRMWARE("radeon/kabini_ce.bin");
  79MODULE_FIRMWARE("radeon/kabini_rlc.bin");
  80MODULE_FIRMWARE("radeon/kabini_mec.bin");
  81
  82MODULE_FIRMWARE("radeon/mullins_pfp.bin");
  83MODULE_FIRMWARE("radeon/mullins_me.bin");
  84MODULE_FIRMWARE("radeon/mullins_ce.bin");
  85MODULE_FIRMWARE("radeon/mullins_rlc.bin");
  86MODULE_FIRMWARE("radeon/mullins_mec.bin");
  87
  88static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
  89{
  90	{mmGDS_VMID0_BASE, mmGDS_VMID0_SIZE, mmGDS_GWS_VMID0, mmGDS_OA_VMID0},
  91	{mmGDS_VMID1_BASE, mmGDS_VMID1_SIZE, mmGDS_GWS_VMID1, mmGDS_OA_VMID1},
  92	{mmGDS_VMID2_BASE, mmGDS_VMID2_SIZE, mmGDS_GWS_VMID2, mmGDS_OA_VMID2},
  93	{mmGDS_VMID3_BASE, mmGDS_VMID3_SIZE, mmGDS_GWS_VMID3, mmGDS_OA_VMID3},
  94	{mmGDS_VMID4_BASE, mmGDS_VMID4_SIZE, mmGDS_GWS_VMID4, mmGDS_OA_VMID4},
  95	{mmGDS_VMID5_BASE, mmGDS_VMID5_SIZE, mmGDS_GWS_VMID5, mmGDS_OA_VMID5},
  96	{mmGDS_VMID6_BASE, mmGDS_VMID6_SIZE, mmGDS_GWS_VMID6, mmGDS_OA_VMID6},
  97	{mmGDS_VMID7_BASE, mmGDS_VMID7_SIZE, mmGDS_GWS_VMID7, mmGDS_OA_VMID7},
  98	{mmGDS_VMID8_BASE, mmGDS_VMID8_SIZE, mmGDS_GWS_VMID8, mmGDS_OA_VMID8},
  99	{mmGDS_VMID9_BASE, mmGDS_VMID9_SIZE, mmGDS_GWS_VMID9, mmGDS_OA_VMID9},
 100	{mmGDS_VMID10_BASE, mmGDS_VMID10_SIZE, mmGDS_GWS_VMID10, mmGDS_OA_VMID10},
 101	{mmGDS_VMID11_BASE, mmGDS_VMID11_SIZE, mmGDS_GWS_VMID11, mmGDS_OA_VMID11},
 102	{mmGDS_VMID12_BASE, mmGDS_VMID12_SIZE, mmGDS_GWS_VMID12, mmGDS_OA_VMID12},
 103	{mmGDS_VMID13_BASE, mmGDS_VMID13_SIZE, mmGDS_GWS_VMID13, mmGDS_OA_VMID13},
 104	{mmGDS_VMID14_BASE, mmGDS_VMID14_SIZE, mmGDS_GWS_VMID14, mmGDS_OA_VMID14},
 105	{mmGDS_VMID15_BASE, mmGDS_VMID15_SIZE, mmGDS_GWS_VMID15, mmGDS_OA_VMID15}
 106};
 107
 108static const u32 spectre_rlc_save_restore_register_list[] =
 109{
 110	(0x0e00 << 16) | (0xc12c >> 2),
 111	0x00000000,
 112	(0x0e00 << 16) | (0xc140 >> 2),
 113	0x00000000,
 114	(0x0e00 << 16) | (0xc150 >> 2),
 115	0x00000000,
 116	(0x0e00 << 16) | (0xc15c >> 2),
 117	0x00000000,
 118	(0x0e00 << 16) | (0xc168 >> 2),
 119	0x00000000,
 120	(0x0e00 << 16) | (0xc170 >> 2),
 121	0x00000000,
 122	(0x0e00 << 16) | (0xc178 >> 2),
 123	0x00000000,
 124	(0x0e00 << 16) | (0xc204 >> 2),
 125	0x00000000,
 126	(0x0e00 << 16) | (0xc2b4 >> 2),
 127	0x00000000,
 128	(0x0e00 << 16) | (0xc2b8 >> 2),
 129	0x00000000,
 130	(0x0e00 << 16) | (0xc2bc >> 2),
 131	0x00000000,
 132	(0x0e00 << 16) | (0xc2c0 >> 2),
 133	0x00000000,
 134	(0x0e00 << 16) | (0x8228 >> 2),
 135	0x00000000,
 136	(0x0e00 << 16) | (0x829c >> 2),
 137	0x00000000,
 138	(0x0e00 << 16) | (0x869c >> 2),
 139	0x00000000,
 140	(0x0600 << 16) | (0x98f4 >> 2),
 141	0x00000000,
 142	(0x0e00 << 16) | (0x98f8 >> 2),
 143	0x00000000,
 144	(0x0e00 << 16) | (0x9900 >> 2),
 145	0x00000000,
 146	(0x0e00 << 16) | (0xc260 >> 2),
 147	0x00000000,
 148	(0x0e00 << 16) | (0x90e8 >> 2),
 149	0x00000000,
 150	(0x0e00 << 16) | (0x3c000 >> 2),
 151	0x00000000,
 152	(0x0e00 << 16) | (0x3c00c >> 2),
 153	0x00000000,
 154	(0x0e00 << 16) | (0x8c1c >> 2),
 155	0x00000000,
 156	(0x0e00 << 16) | (0x9700 >> 2),
 157	0x00000000,
 158	(0x0e00 << 16) | (0xcd20 >> 2),
 159	0x00000000,
 160	(0x4e00 << 16) | (0xcd20 >> 2),
 161	0x00000000,
 162	(0x5e00 << 16) | (0xcd20 >> 2),
 163	0x00000000,
 164	(0x6e00 << 16) | (0xcd20 >> 2),
 165	0x00000000,
 166	(0x7e00 << 16) | (0xcd20 >> 2),
 167	0x00000000,
 168	(0x8e00 << 16) | (0xcd20 >> 2),
 169	0x00000000,
 170	(0x9e00 << 16) | (0xcd20 >> 2),
 171	0x00000000,
 172	(0xae00 << 16) | (0xcd20 >> 2),
 173	0x00000000,
 174	(0xbe00 << 16) | (0xcd20 >> 2),
 175	0x00000000,
 176	(0x0e00 << 16) | (0x89bc >> 2),
 177	0x00000000,
 178	(0x0e00 << 16) | (0x8900 >> 2),
 179	0x00000000,
 180	0x3,
 181	(0x0e00 << 16) | (0xc130 >> 2),
 182	0x00000000,
 183	(0x0e00 << 16) | (0xc134 >> 2),
 184	0x00000000,
 185	(0x0e00 << 16) | (0xc1fc >> 2),
 186	0x00000000,
 187	(0x0e00 << 16) | (0xc208 >> 2),
 188	0x00000000,
 189	(0x0e00 << 16) | (0xc264 >> 2),
 190	0x00000000,
 191	(0x0e00 << 16) | (0xc268 >> 2),
 192	0x00000000,
 193	(0x0e00 << 16) | (0xc26c >> 2),
 194	0x00000000,
 195	(0x0e00 << 16) | (0xc270 >> 2),
 196	0x00000000,
 197	(0x0e00 << 16) | (0xc274 >> 2),
 198	0x00000000,
 199	(0x0e00 << 16) | (0xc278 >> 2),
 200	0x00000000,
 201	(0x0e00 << 16) | (0xc27c >> 2),
 202	0x00000000,
 203	(0x0e00 << 16) | (0xc280 >> 2),
 204	0x00000000,
 205	(0x0e00 << 16) | (0xc284 >> 2),
 206	0x00000000,
 207	(0x0e00 << 16) | (0xc288 >> 2),
 208	0x00000000,
 209	(0x0e00 << 16) | (0xc28c >> 2),
 210	0x00000000,
 211	(0x0e00 << 16) | (0xc290 >> 2),
 212	0x00000000,
 213	(0x0e00 << 16) | (0xc294 >> 2),
 214	0x00000000,
 215	(0x0e00 << 16) | (0xc298 >> 2),
 216	0x00000000,
 217	(0x0e00 << 16) | (0xc29c >> 2),
 218	0x00000000,
 219	(0x0e00 << 16) | (0xc2a0 >> 2),
 220	0x00000000,
 221	(0x0e00 << 16) | (0xc2a4 >> 2),
 222	0x00000000,
 223	(0x0e00 << 16) | (0xc2a8 >> 2),
 224	0x00000000,
 225	(0x0e00 << 16) | (0xc2ac  >> 2),
 226	0x00000000,
 227	(0x0e00 << 16) | (0xc2b0 >> 2),
 228	0x00000000,
 229	(0x0e00 << 16) | (0x301d0 >> 2),
 230	0x00000000,
 231	(0x0e00 << 16) | (0x30238 >> 2),
 232	0x00000000,
 233	(0x0e00 << 16) | (0x30250 >> 2),
 234	0x00000000,
 235	(0x0e00 << 16) | (0x30254 >> 2),
 236	0x00000000,
 237	(0x0e00 << 16) | (0x30258 >> 2),
 238	0x00000000,
 239	(0x0e00 << 16) | (0x3025c >> 2),
 240	0x00000000,
 241	(0x4e00 << 16) | (0xc900 >> 2),
 242	0x00000000,
 243	(0x5e00 << 16) | (0xc900 >> 2),
 244	0x00000000,
 245	(0x6e00 << 16) | (0xc900 >> 2),
 246	0x00000000,
 247	(0x7e00 << 16) | (0xc900 >> 2),
 248	0x00000000,
 249	(0x8e00 << 16) | (0xc900 >> 2),
 250	0x00000000,
 251	(0x9e00 << 16) | (0xc900 >> 2),
 252	0x00000000,
 253	(0xae00 << 16) | (0xc900 >> 2),
 254	0x00000000,
 255	(0xbe00 << 16) | (0xc900 >> 2),
 256	0x00000000,
 257	(0x4e00 << 16) | (0xc904 >> 2),
 258	0x00000000,
 259	(0x5e00 << 16) | (0xc904 >> 2),
 260	0x00000000,
 261	(0x6e00 << 16) | (0xc904 >> 2),
 262	0x00000000,
 263	(0x7e00 << 16) | (0xc904 >> 2),
 264	0x00000000,
 265	(0x8e00 << 16) | (0xc904 >> 2),
 266	0x00000000,
 267	(0x9e00 << 16) | (0xc904 >> 2),
 268	0x00000000,
 269	(0xae00 << 16) | (0xc904 >> 2),
 270	0x00000000,
 271	(0xbe00 << 16) | (0xc904 >> 2),
 272	0x00000000,
 273	(0x4e00 << 16) | (0xc908 >> 2),
 274	0x00000000,
 275	(0x5e00 << 16) | (0xc908 >> 2),
 276	0x00000000,
 277	(0x6e00 << 16) | (0xc908 >> 2),
 278	0x00000000,
 279	(0x7e00 << 16) | (0xc908 >> 2),
 280	0x00000000,
 281	(0x8e00 << 16) | (0xc908 >> 2),
 282	0x00000000,
 283	(0x9e00 << 16) | (0xc908 >> 2),
 284	0x00000000,
 285	(0xae00 << 16) | (0xc908 >> 2),
 286	0x00000000,
 287	(0xbe00 << 16) | (0xc908 >> 2),
 288	0x00000000,
 289	(0x4e00 << 16) | (0xc90c >> 2),
 290	0x00000000,
 291	(0x5e00 << 16) | (0xc90c >> 2),
 292	0x00000000,
 293	(0x6e00 << 16) | (0xc90c >> 2),
 294	0x00000000,
 295	(0x7e00 << 16) | (0xc90c >> 2),
 296	0x00000000,
 297	(0x8e00 << 16) | (0xc90c >> 2),
 298	0x00000000,
 299	(0x9e00 << 16) | (0xc90c >> 2),
 300	0x00000000,
 301	(0xae00 << 16) | (0xc90c >> 2),
 302	0x00000000,
 303	(0xbe00 << 16) | (0xc90c >> 2),
 304	0x00000000,
 305	(0x4e00 << 16) | (0xc910 >> 2),
 306	0x00000000,
 307	(0x5e00 << 16) | (0xc910 >> 2),
 308	0x00000000,
 309	(0x6e00 << 16) | (0xc910 >> 2),
 310	0x00000000,
 311	(0x7e00 << 16) | (0xc910 >> 2),
 312	0x00000000,
 313	(0x8e00 << 16) | (0xc910 >> 2),
 314	0x00000000,
 315	(0x9e00 << 16) | (0xc910 >> 2),
 316	0x00000000,
 317	(0xae00 << 16) | (0xc910 >> 2),
 318	0x00000000,
 319	(0xbe00 << 16) | (0xc910 >> 2),
 320	0x00000000,
 321	(0x0e00 << 16) | (0xc99c >> 2),
 322	0x00000000,
 323	(0x0e00 << 16) | (0x9834 >> 2),
 324	0x00000000,
 325	(0x0000 << 16) | (0x30f00 >> 2),
 326	0x00000000,
 327	(0x0001 << 16) | (0x30f00 >> 2),
 328	0x00000000,
 329	(0x0000 << 16) | (0x30f04 >> 2),
 330	0x00000000,
 331	(0x0001 << 16) | (0x30f04 >> 2),
 332	0x00000000,
 333	(0x0000 << 16) | (0x30f08 >> 2),
 334	0x00000000,
 335	(0x0001 << 16) | (0x30f08 >> 2),
 336	0x00000000,
 337	(0x0000 << 16) | (0x30f0c >> 2),
 338	0x00000000,
 339	(0x0001 << 16) | (0x30f0c >> 2),
 340	0x00000000,
 341	(0x0600 << 16) | (0x9b7c >> 2),
 342	0x00000000,
 343	(0x0e00 << 16) | (0x8a14 >> 2),
 344	0x00000000,
 345	(0x0e00 << 16) | (0x8a18 >> 2),
 346	0x00000000,
 347	(0x0600 << 16) | (0x30a00 >> 2),
 348	0x00000000,
 349	(0x0e00 << 16) | (0x8bf0 >> 2),
 350	0x00000000,
 351	(0x0e00 << 16) | (0x8bcc >> 2),
 352	0x00000000,
 353	(0x0e00 << 16) | (0x8b24 >> 2),
 354	0x00000000,
 355	(0x0e00 << 16) | (0x30a04 >> 2),
 356	0x00000000,
 357	(0x0600 << 16) | (0x30a10 >> 2),
 358	0x00000000,
 359	(0x0600 << 16) | (0x30a14 >> 2),
 360	0x00000000,
 361	(0x0600 << 16) | (0x30a18 >> 2),
 362	0x00000000,
 363	(0x0600 << 16) | (0x30a2c >> 2),
 364	0x00000000,
 365	(0x0e00 << 16) | (0xc700 >> 2),
 366	0x00000000,
 367	(0x0e00 << 16) | (0xc704 >> 2),
 368	0x00000000,
 369	(0x0e00 << 16) | (0xc708 >> 2),
 370	0x00000000,
 371	(0x0e00 << 16) | (0xc768 >> 2),
 372	0x00000000,
 373	(0x0400 << 16) | (0xc770 >> 2),
 374	0x00000000,
 375	(0x0400 << 16) | (0xc774 >> 2),
 376	0x00000000,
 377	(0x0400 << 16) | (0xc778 >> 2),
 378	0x00000000,
 379	(0x0400 << 16) | (0xc77c >> 2),
 380	0x00000000,
 381	(0x0400 << 16) | (0xc780 >> 2),
 382	0x00000000,
 383	(0x0400 << 16) | (0xc784 >> 2),
 384	0x00000000,
 385	(0x0400 << 16) | (0xc788 >> 2),
 386	0x00000000,
 387	(0x0400 << 16) | (0xc78c >> 2),
 388	0x00000000,
 389	(0x0400 << 16) | (0xc798 >> 2),
 390	0x00000000,
 391	(0x0400 << 16) | (0xc79c >> 2),
 392	0x00000000,
 393	(0x0400 << 16) | (0xc7a0 >> 2),
 394	0x00000000,
 395	(0x0400 << 16) | (0xc7a4 >> 2),
 396	0x00000000,
 397	(0x0400 << 16) | (0xc7a8 >> 2),
 398	0x00000000,
 399	(0x0400 << 16) | (0xc7ac >> 2),
 400	0x00000000,
 401	(0x0400 << 16) | (0xc7b0 >> 2),
 402	0x00000000,
 403	(0x0400 << 16) | (0xc7b4 >> 2),
 404	0x00000000,
 405	(0x0e00 << 16) | (0x9100 >> 2),
 406	0x00000000,
 407	(0x0e00 << 16) | (0x3c010 >> 2),
 408	0x00000000,
 409	(0x0e00 << 16) | (0x92a8 >> 2),
 410	0x00000000,
 411	(0x0e00 << 16) | (0x92ac >> 2),
 412	0x00000000,
 413	(0x0e00 << 16) | (0x92b4 >> 2),
 414	0x00000000,
 415	(0x0e00 << 16) | (0x92b8 >> 2),
 416	0x00000000,
 417	(0x0e00 << 16) | (0x92bc >> 2),
 418	0x00000000,
 419	(0x0e00 << 16) | (0x92c0 >> 2),
 420	0x00000000,
 421	(0x0e00 << 16) | (0x92c4 >> 2),
 422	0x00000000,
 423	(0x0e00 << 16) | (0x92c8 >> 2),
 424	0x00000000,
 425	(0x0e00 << 16) | (0x92cc >> 2),
 426	0x00000000,
 427	(0x0e00 << 16) | (0x92d0 >> 2),
 428	0x00000000,
 429	(0x0e00 << 16) | (0x8c00 >> 2),
 430	0x00000000,
 431	(0x0e00 << 16) | (0x8c04 >> 2),
 432	0x00000000,
 433	(0x0e00 << 16) | (0x8c20 >> 2),
 434	0x00000000,
 435	(0x0e00 << 16) | (0x8c38 >> 2),
 436	0x00000000,
 437	(0x0e00 << 16) | (0x8c3c >> 2),
 438	0x00000000,
 439	(0x0e00 << 16) | (0xae00 >> 2),
 440	0x00000000,
 441	(0x0e00 << 16) | (0x9604 >> 2),
 442	0x00000000,
 443	(0x0e00 << 16) | (0xac08 >> 2),
 444	0x00000000,
 445	(0x0e00 << 16) | (0xac0c >> 2),
 446	0x00000000,
 447	(0x0e00 << 16) | (0xac10 >> 2),
 448	0x00000000,
 449	(0x0e00 << 16) | (0xac14 >> 2),
 450	0x00000000,
 451	(0x0e00 << 16) | (0xac58 >> 2),
 452	0x00000000,
 453	(0x0e00 << 16) | (0xac68 >> 2),
 454	0x00000000,
 455	(0x0e00 << 16) | (0xac6c >> 2),
 456	0x00000000,
 457	(0x0e00 << 16) | (0xac70 >> 2),
 458	0x00000000,
 459	(0x0e00 << 16) | (0xac74 >> 2),
 460	0x00000000,
 461	(0x0e00 << 16) | (0xac78 >> 2),
 462	0x00000000,
 463	(0x0e00 << 16) | (0xac7c >> 2),
 464	0x00000000,
 465	(0x0e00 << 16) | (0xac80 >> 2),
 466	0x00000000,
 467	(0x0e00 << 16) | (0xac84 >> 2),
 468	0x00000000,
 469	(0x0e00 << 16) | (0xac88 >> 2),
 470	0x00000000,
 471	(0x0e00 << 16) | (0xac8c >> 2),
 472	0x00000000,
 473	(0x0e00 << 16) | (0x970c >> 2),
 474	0x00000000,
 475	(0x0e00 << 16) | (0x9714 >> 2),
 476	0x00000000,
 477	(0x0e00 << 16) | (0x9718 >> 2),
 478	0x00000000,
 479	(0x0e00 << 16) | (0x971c >> 2),
 480	0x00000000,
 481	(0x0e00 << 16) | (0x31068 >> 2),
 482	0x00000000,
 483	(0x4e00 << 16) | (0x31068 >> 2),
 484	0x00000000,
 485	(0x5e00 << 16) | (0x31068 >> 2),
 486	0x00000000,
 487	(0x6e00 << 16) | (0x31068 >> 2),
 488	0x00000000,
 489	(0x7e00 << 16) | (0x31068 >> 2),
 490	0x00000000,
 491	(0x8e00 << 16) | (0x31068 >> 2),
 492	0x00000000,
 493	(0x9e00 << 16) | (0x31068 >> 2),
 494	0x00000000,
 495	(0xae00 << 16) | (0x31068 >> 2),
 496	0x00000000,
 497	(0xbe00 << 16) | (0x31068 >> 2),
 498	0x00000000,
 499	(0x0e00 << 16) | (0xcd10 >> 2),
 500	0x00000000,
 501	(0x0e00 << 16) | (0xcd14 >> 2),
 502	0x00000000,
 503	(0x0e00 << 16) | (0x88b0 >> 2),
 504	0x00000000,
 505	(0x0e00 << 16) | (0x88b4 >> 2),
 506	0x00000000,
 507	(0x0e00 << 16) | (0x88b8 >> 2),
 508	0x00000000,
 509	(0x0e00 << 16) | (0x88bc >> 2),
 510	0x00000000,
 511	(0x0400 << 16) | (0x89c0 >> 2),
 512	0x00000000,
 513	(0x0e00 << 16) | (0x88c4 >> 2),
 514	0x00000000,
 515	(0x0e00 << 16) | (0x88c8 >> 2),
 516	0x00000000,
 517	(0x0e00 << 16) | (0x88d0 >> 2),
 518	0x00000000,
 519	(0x0e00 << 16) | (0x88d4 >> 2),
 520	0x00000000,
 521	(0x0e00 << 16) | (0x88d8 >> 2),
 522	0x00000000,
 523	(0x0e00 << 16) | (0x8980 >> 2),
 524	0x00000000,
 525	(0x0e00 << 16) | (0x30938 >> 2),
 526	0x00000000,
 527	(0x0e00 << 16) | (0x3093c >> 2),
 528	0x00000000,
 529	(0x0e00 << 16) | (0x30940 >> 2),
 530	0x00000000,
 531	(0x0e00 << 16) | (0x89a0 >> 2),
 532	0x00000000,
 533	(0x0e00 << 16) | (0x30900 >> 2),
 534	0x00000000,
 535	(0x0e00 << 16) | (0x30904 >> 2),
 536	0x00000000,
 537	(0x0e00 << 16) | (0x89b4 >> 2),
 538	0x00000000,
 539	(0x0e00 << 16) | (0x3c210 >> 2),
 540	0x00000000,
 541	(0x0e00 << 16) | (0x3c214 >> 2),
 542	0x00000000,
 543	(0x0e00 << 16) | (0x3c218 >> 2),
 544	0x00000000,
 545	(0x0e00 << 16) | (0x8904 >> 2),
 546	0x00000000,
 547	0x5,
 548	(0x0e00 << 16) | (0x8c28 >> 2),
 549	(0x0e00 << 16) | (0x8c2c >> 2),
 550	(0x0e00 << 16) | (0x8c30 >> 2),
 551	(0x0e00 << 16) | (0x8c34 >> 2),
 552	(0x0e00 << 16) | (0x9600 >> 2),
 553};
 554
 555static const u32 kalindi_rlc_save_restore_register_list[] =
 556{
 557	(0x0e00 << 16) | (0xc12c >> 2),
 558	0x00000000,
 559	(0x0e00 << 16) | (0xc140 >> 2),
 560	0x00000000,
 561	(0x0e00 << 16) | (0xc150 >> 2),
 562	0x00000000,
 563	(0x0e00 << 16) | (0xc15c >> 2),
 564	0x00000000,
 565	(0x0e00 << 16) | (0xc168 >> 2),
 566	0x00000000,
 567	(0x0e00 << 16) | (0xc170 >> 2),
 568	0x00000000,
 569	(0x0e00 << 16) | (0xc204 >> 2),
 570	0x00000000,
 571	(0x0e00 << 16) | (0xc2b4 >> 2),
 572	0x00000000,
 573	(0x0e00 << 16) | (0xc2b8 >> 2),
 574	0x00000000,
 575	(0x0e00 << 16) | (0xc2bc >> 2),
 576	0x00000000,
 577	(0x0e00 << 16) | (0xc2c0 >> 2),
 578	0x00000000,
 579	(0x0e00 << 16) | (0x8228 >> 2),
 580	0x00000000,
 581	(0x0e00 << 16) | (0x829c >> 2),
 582	0x00000000,
 583	(0x0e00 << 16) | (0x869c >> 2),
 584	0x00000000,
 585	(0x0600 << 16) | (0x98f4 >> 2),
 586	0x00000000,
 587	(0x0e00 << 16) | (0x98f8 >> 2),
 588	0x00000000,
 589	(0x0e00 << 16) | (0x9900 >> 2),
 590	0x00000000,
 591	(0x0e00 << 16) | (0xc260 >> 2),
 592	0x00000000,
 593	(0x0e00 << 16) | (0x90e8 >> 2),
 594	0x00000000,
 595	(0x0e00 << 16) | (0x3c000 >> 2),
 596	0x00000000,
 597	(0x0e00 << 16) | (0x3c00c >> 2),
 598	0x00000000,
 599	(0x0e00 << 16) | (0x8c1c >> 2),
 600	0x00000000,
 601	(0x0e00 << 16) | (0x9700 >> 2),
 602	0x00000000,
 603	(0x0e00 << 16) | (0xcd20 >> 2),
 604	0x00000000,
 605	(0x4e00 << 16) | (0xcd20 >> 2),
 606	0x00000000,
 607	(0x5e00 << 16) | (0xcd20 >> 2),
 608	0x00000000,
 609	(0x6e00 << 16) | (0xcd20 >> 2),
 610	0x00000000,
 611	(0x7e00 << 16) | (0xcd20 >> 2),
 612	0x00000000,
 613	(0x0e00 << 16) | (0x89bc >> 2),
 614	0x00000000,
 615	(0x0e00 << 16) | (0x8900 >> 2),
 616	0x00000000,
 617	0x3,
 618	(0x0e00 << 16) | (0xc130 >> 2),
 619	0x00000000,
 620	(0x0e00 << 16) | (0xc134 >> 2),
 621	0x00000000,
 622	(0x0e00 << 16) | (0xc1fc >> 2),
 623	0x00000000,
 624	(0x0e00 << 16) | (0xc208 >> 2),
 625	0x00000000,
 626	(0x0e00 << 16) | (0xc264 >> 2),
 627	0x00000000,
 628	(0x0e00 << 16) | (0xc268 >> 2),
 629	0x00000000,
 630	(0x0e00 << 16) | (0xc26c >> 2),
 631	0x00000000,
 632	(0x0e00 << 16) | (0xc270 >> 2),
 633	0x00000000,
 634	(0x0e00 << 16) | (0xc274 >> 2),
 635	0x00000000,
 636	(0x0e00 << 16) | (0xc28c >> 2),
 637	0x00000000,
 638	(0x0e00 << 16) | (0xc290 >> 2),
 639	0x00000000,
 640	(0x0e00 << 16) | (0xc294 >> 2),
 641	0x00000000,
 642	(0x0e00 << 16) | (0xc298 >> 2),
 643	0x00000000,
 644	(0x0e00 << 16) | (0xc2a0 >> 2),
 645	0x00000000,
 646	(0x0e00 << 16) | (0xc2a4 >> 2),
 647	0x00000000,
 648	(0x0e00 << 16) | (0xc2a8 >> 2),
 649	0x00000000,
 650	(0x0e00 << 16) | (0xc2ac >> 2),
 651	0x00000000,
 652	(0x0e00 << 16) | (0x301d0 >> 2),
 653	0x00000000,
 654	(0x0e00 << 16) | (0x30238 >> 2),
 655	0x00000000,
 656	(0x0e00 << 16) | (0x30250 >> 2),
 657	0x00000000,
 658	(0x0e00 << 16) | (0x30254 >> 2),
 659	0x00000000,
 660	(0x0e00 << 16) | (0x30258 >> 2),
 661	0x00000000,
 662	(0x0e00 << 16) | (0x3025c >> 2),
 663	0x00000000,
 664	(0x4e00 << 16) | (0xc900 >> 2),
 665	0x00000000,
 666	(0x5e00 << 16) | (0xc900 >> 2),
 667	0x00000000,
 668	(0x6e00 << 16) | (0xc900 >> 2),
 669	0x00000000,
 670	(0x7e00 << 16) | (0xc900 >> 2),
 671	0x00000000,
 672	(0x4e00 << 16) | (0xc904 >> 2),
 673	0x00000000,
 674	(0x5e00 << 16) | (0xc904 >> 2),
 675	0x00000000,
 676	(0x6e00 << 16) | (0xc904 >> 2),
 677	0x00000000,
 678	(0x7e00 << 16) | (0xc904 >> 2),
 679	0x00000000,
 680	(0x4e00 << 16) | (0xc908 >> 2),
 681	0x00000000,
 682	(0x5e00 << 16) | (0xc908 >> 2),
 683	0x00000000,
 684	(0x6e00 << 16) | (0xc908 >> 2),
 685	0x00000000,
 686	(0x7e00 << 16) | (0xc908 >> 2),
 687	0x00000000,
 688	(0x4e00 << 16) | (0xc90c >> 2),
 689	0x00000000,
 690	(0x5e00 << 16) | (0xc90c >> 2),
 691	0x00000000,
 692	(0x6e00 << 16) | (0xc90c >> 2),
 693	0x00000000,
 694	(0x7e00 << 16) | (0xc90c >> 2),
 695	0x00000000,
 696	(0x4e00 << 16) | (0xc910 >> 2),
 697	0x00000000,
 698	(0x5e00 << 16) | (0xc910 >> 2),
 699	0x00000000,
 700	(0x6e00 << 16) | (0xc910 >> 2),
 701	0x00000000,
 702	(0x7e00 << 16) | (0xc910 >> 2),
 703	0x00000000,
 704	(0x0e00 << 16) | (0xc99c >> 2),
 705	0x00000000,
 706	(0x0e00 << 16) | (0x9834 >> 2),
 707	0x00000000,
 708	(0x0000 << 16) | (0x30f00 >> 2),
 709	0x00000000,
 710	(0x0000 << 16) | (0x30f04 >> 2),
 711	0x00000000,
 712	(0x0000 << 16) | (0x30f08 >> 2),
 713	0x00000000,
 714	(0x0000 << 16) | (0x30f0c >> 2),
 715	0x00000000,
 716	(0x0600 << 16) | (0x9b7c >> 2),
 717	0x00000000,
 718	(0x0e00 << 16) | (0x8a14 >> 2),
 719	0x00000000,
 720	(0x0e00 << 16) | (0x8a18 >> 2),
 721	0x00000000,
 722	(0x0600 << 16) | (0x30a00 >> 2),
 723	0x00000000,
 724	(0x0e00 << 16) | (0x8bf0 >> 2),
 725	0x00000000,
 726	(0x0e00 << 16) | (0x8bcc >> 2),
 727	0x00000000,
 728	(0x0e00 << 16) | (0x8b24 >> 2),
 729	0x00000000,
 730	(0x0e00 << 16) | (0x30a04 >> 2),
 731	0x00000000,
 732	(0x0600 << 16) | (0x30a10 >> 2),
 733	0x00000000,
 734	(0x0600 << 16) | (0x30a14 >> 2),
 735	0x00000000,
 736	(0x0600 << 16) | (0x30a18 >> 2),
 737	0x00000000,
 738	(0x0600 << 16) | (0x30a2c >> 2),
 739	0x00000000,
 740	(0x0e00 << 16) | (0xc700 >> 2),
 741	0x00000000,
 742	(0x0e00 << 16) | (0xc704 >> 2),
 743	0x00000000,
 744	(0x0e00 << 16) | (0xc708 >> 2),
 745	0x00000000,
 746	(0x0e00 << 16) | (0xc768 >> 2),
 747	0x00000000,
 748	(0x0400 << 16) | (0xc770 >> 2),
 749	0x00000000,
 750	(0x0400 << 16) | (0xc774 >> 2),
 751	0x00000000,
 752	(0x0400 << 16) | (0xc798 >> 2),
 753	0x00000000,
 754	(0x0400 << 16) | (0xc79c >> 2),
 755	0x00000000,
 756	(0x0e00 << 16) | (0x9100 >> 2),
 757	0x00000000,
 758	(0x0e00 << 16) | (0x3c010 >> 2),
 759	0x00000000,
 760	(0x0e00 << 16) | (0x8c00 >> 2),
 761	0x00000000,
 762	(0x0e00 << 16) | (0x8c04 >> 2),
 763	0x00000000,
 764	(0x0e00 << 16) | (0x8c20 >> 2),
 765	0x00000000,
 766	(0x0e00 << 16) | (0x8c38 >> 2),
 767	0x00000000,
 768	(0x0e00 << 16) | (0x8c3c >> 2),
 769	0x00000000,
 770	(0x0e00 << 16) | (0xae00 >> 2),
 771	0x00000000,
 772	(0x0e00 << 16) | (0x9604 >> 2),
 773	0x00000000,
 774	(0x0e00 << 16) | (0xac08 >> 2),
 775	0x00000000,
 776	(0x0e00 << 16) | (0xac0c >> 2),
 777	0x00000000,
 778	(0x0e00 << 16) | (0xac10 >> 2),
 779	0x00000000,
 780	(0x0e00 << 16) | (0xac14 >> 2),
 781	0x00000000,
 782	(0x0e00 << 16) | (0xac58 >> 2),
 783	0x00000000,
 784	(0x0e00 << 16) | (0xac68 >> 2),
 785	0x00000000,
 786	(0x0e00 << 16) | (0xac6c >> 2),
 787	0x00000000,
 788	(0x0e00 << 16) | (0xac70 >> 2),
 789	0x00000000,
 790	(0x0e00 << 16) | (0xac74 >> 2),
 791	0x00000000,
 792	(0x0e00 << 16) | (0xac78 >> 2),
 793	0x00000000,
 794	(0x0e00 << 16) | (0xac7c >> 2),
 795	0x00000000,
 796	(0x0e00 << 16) | (0xac80 >> 2),
 797	0x00000000,
 798	(0x0e00 << 16) | (0xac84 >> 2),
 799	0x00000000,
 800	(0x0e00 << 16) | (0xac88 >> 2),
 801	0x00000000,
 802	(0x0e00 << 16) | (0xac8c >> 2),
 803	0x00000000,
 804	(0x0e00 << 16) | (0x970c >> 2),
 805	0x00000000,
 806	(0x0e00 << 16) | (0x9714 >> 2),
 807	0x00000000,
 808	(0x0e00 << 16) | (0x9718 >> 2),
 809	0x00000000,
 810	(0x0e00 << 16) | (0x971c >> 2),
 811	0x00000000,
 812	(0x0e00 << 16) | (0x31068 >> 2),
 813	0x00000000,
 814	(0x4e00 << 16) | (0x31068 >> 2),
 815	0x00000000,
 816	(0x5e00 << 16) | (0x31068 >> 2),
 817	0x00000000,
 818	(0x6e00 << 16) | (0x31068 >> 2),
 819	0x00000000,
 820	(0x7e00 << 16) | (0x31068 >> 2),
 821	0x00000000,
 822	(0x0e00 << 16) | (0xcd10 >> 2),
 823	0x00000000,
 824	(0x0e00 << 16) | (0xcd14 >> 2),
 825	0x00000000,
 826	(0x0e00 << 16) | (0x88b0 >> 2),
 827	0x00000000,
 828	(0x0e00 << 16) | (0x88b4 >> 2),
 829	0x00000000,
 830	(0x0e00 << 16) | (0x88b8 >> 2),
 831	0x00000000,
 832	(0x0e00 << 16) | (0x88bc >> 2),
 833	0x00000000,
 834	(0x0400 << 16) | (0x89c0 >> 2),
 835	0x00000000,
 836	(0x0e00 << 16) | (0x88c4 >> 2),
 837	0x00000000,
 838	(0x0e00 << 16) | (0x88c8 >> 2),
 839	0x00000000,
 840	(0x0e00 << 16) | (0x88d0 >> 2),
 841	0x00000000,
 842	(0x0e00 << 16) | (0x88d4 >> 2),
 843	0x00000000,
 844	(0x0e00 << 16) | (0x88d8 >> 2),
 845	0x00000000,
 846	(0x0e00 << 16) | (0x8980 >> 2),
 847	0x00000000,
 848	(0x0e00 << 16) | (0x30938 >> 2),
 849	0x00000000,
 850	(0x0e00 << 16) | (0x3093c >> 2),
 851	0x00000000,
 852	(0x0e00 << 16) | (0x30940 >> 2),
 853	0x00000000,
 854	(0x0e00 << 16) | (0x89a0 >> 2),
 855	0x00000000,
 856	(0x0e00 << 16) | (0x30900 >> 2),
 857	0x00000000,
 858	(0x0e00 << 16) | (0x30904 >> 2),
 859	0x00000000,
 860	(0x0e00 << 16) | (0x89b4 >> 2),
 861	0x00000000,
 862	(0x0e00 << 16) | (0x3e1fc >> 2),
 863	0x00000000,
 864	(0x0e00 << 16) | (0x3c210 >> 2),
 865	0x00000000,
 866	(0x0e00 << 16) | (0x3c214 >> 2),
 867	0x00000000,
 868	(0x0e00 << 16) | (0x3c218 >> 2),
 869	0x00000000,
 870	(0x0e00 << 16) | (0x8904 >> 2),
 871	0x00000000,
 872	0x5,
 873	(0x0e00 << 16) | (0x8c28 >> 2),
 874	(0x0e00 << 16) | (0x8c2c >> 2),
 875	(0x0e00 << 16) | (0x8c30 >> 2),
 876	(0x0e00 << 16) | (0x8c34 >> 2),
 877	(0x0e00 << 16) | (0x9600 >> 2),
 878};
 879
 880static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev);
 881static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer);
 882static void gfx_v7_0_init_cp_pg_table(struct amdgpu_device *adev);
 883static void gfx_v7_0_init_pg(struct amdgpu_device *adev);
 884static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev);
 885
 886/*
 887 * Core functions
 888 */
 889/**
 890 * gfx_v7_0_init_microcode - load ucode images from disk
 891 *
 892 * @adev: amdgpu_device pointer
 893 *
 894 * Use the firmware interface to load the ucode images into
 895 * the driver (not loaded into hw).
 896 * Returns 0 on success, error on failure.
 897 */
 898static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
 899{
 900	const char *chip_name;
 901	char fw_name[30];
 902	int err;
 903
 904	DRM_DEBUG("\n");
 905
 906	switch (adev->asic_type) {
 907	case CHIP_BONAIRE:
 908		chip_name = "bonaire";
 909		break;
 910	case CHIP_HAWAII:
 911		chip_name = "hawaii";
 912		break;
 913	case CHIP_KAVERI:
 914		chip_name = "kaveri";
 915		break;
 916	case CHIP_KABINI:
 917		chip_name = "kabini";
 918		break;
 919	case CHIP_MULLINS:
 920		chip_name = "mullins";
 921		break;
 922	default: BUG();
 923	}
 924
 925	snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
 926	err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
 927	if (err)
 928		goto out;
 929	err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
 930	if (err)
 931		goto out;
 932
 933	snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
 934	err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
 935	if (err)
 936		goto out;
 937	err = amdgpu_ucode_validate(adev->gfx.me_fw);
 938	if (err)
 939		goto out;
 940
 941	snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
 942	err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
 943	if (err)
 944		goto out;
 945	err = amdgpu_ucode_validate(adev->gfx.ce_fw);
 946	if (err)
 947		goto out;
 948
 949	snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec.bin", chip_name);
 950	err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
 951	if (err)
 952		goto out;
 953	err = amdgpu_ucode_validate(adev->gfx.mec_fw);
 954	if (err)
 955		goto out;
 956
 957	if (adev->asic_type == CHIP_KAVERI) {
 958		snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec2.bin", chip_name);
 959		err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
 960		if (err)
 961			goto out;
 962		err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
 963		if (err)
 964			goto out;
 965	}
 966
 967	snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name);
 968	err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
 969	if (err)
 970		goto out;
 971	err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
 972
 973out:
 974	if (err) {
 975		printk(KERN_ERR
 976		       "gfx7: Failed to load firmware \"%s\"\n",
 977		       fw_name);
 978		release_firmware(adev->gfx.pfp_fw);
 979		adev->gfx.pfp_fw = NULL;
 980		release_firmware(adev->gfx.me_fw);
 981		adev->gfx.me_fw = NULL;
 982		release_firmware(adev->gfx.ce_fw);
 983		adev->gfx.ce_fw = NULL;
 984		release_firmware(adev->gfx.mec_fw);
 985		adev->gfx.mec_fw = NULL;
 986		release_firmware(adev->gfx.mec2_fw);
 987		adev->gfx.mec2_fw = NULL;
 988		release_firmware(adev->gfx.rlc_fw);
 989		adev->gfx.rlc_fw = NULL;
 990	}
 991	return err;
 992}
 993
 994static void gfx_v7_0_free_microcode(struct amdgpu_device *adev)
 995{
 996	release_firmware(adev->gfx.pfp_fw);
 997	adev->gfx.pfp_fw = NULL;
 998	release_firmware(adev->gfx.me_fw);
 999	adev->gfx.me_fw = NULL;
1000	release_firmware(adev->gfx.ce_fw);
1001	adev->gfx.ce_fw = NULL;
1002	release_firmware(adev->gfx.mec_fw);
1003	adev->gfx.mec_fw = NULL;
1004	release_firmware(adev->gfx.mec2_fw);
1005	adev->gfx.mec2_fw = NULL;
1006	release_firmware(adev->gfx.rlc_fw);
1007	adev->gfx.rlc_fw = NULL;
1008}
1009
1010/**
1011 * gfx_v7_0_tiling_mode_table_init - init the hw tiling table
1012 *
1013 * @adev: amdgpu_device pointer
1014 *
1015 * Starting with SI, the tiling setup is done globally in a
1016 * set of 32 tiling modes.  Rather than selecting each set of
1017 * parameters per surface as on older asics, we just select
1018 * which index in the tiling table we want to use, and the
1019 * surface uses those parameters (CIK).
1020 */
1021static void gfx_v7_0_tiling_mode_table_init(struct amdgpu_device *adev)
1022{
1023	const u32 num_tile_mode_states =
1024			ARRAY_SIZE(adev->gfx.config.tile_mode_array);
1025	const u32 num_secondary_tile_mode_states =
1026			ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
1027	u32 reg_offset, split_equal_to_row_size;
1028	uint32_t *tile, *macrotile;
1029
1030	tile = adev->gfx.config.tile_mode_array;
1031	macrotile = adev->gfx.config.macrotile_mode_array;
1032
1033	switch (adev->gfx.config.mem_row_size_in_kb) {
1034	case 1:
1035		split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_1KB;
1036		break;
1037	case 2:
1038	default:
1039		split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_2KB;
1040		break;
1041	case 4:
1042		split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_4KB;
1043		break;
1044	}
1045
1046	for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
1047		tile[reg_offset] = 0;
1048	for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
1049		macrotile[reg_offset] = 0;
1050
1051	switch (adev->asic_type) {
1052	case CHIP_BONAIRE:
1053		tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1054			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1055			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
1056			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1057		tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1058			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1059			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
1060			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1061		tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1062			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1063			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1064			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1065		tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1066			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1067			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
1068			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1069		tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1070			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1071			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1072			   TILE_SPLIT(split_equal_to_row_size));
1073		tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1074			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1075			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1076		tile[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1077			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1078			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1079			   TILE_SPLIT(split_equal_to_row_size));
1080		tile[7] = (TILE_SPLIT(split_equal_to_row_size));
1081		tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
1082			   PIPE_CONFIG(ADDR_SURF_P4_16x16));
1083		tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1084			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1085			   MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
1086		tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1087			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1088			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1089			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1090		tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1091			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1092			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1093			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1094		tile[12] = (TILE_SPLIT(split_equal_to_row_size));
1095		tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1096			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1097			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
1098		tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1099			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1100			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1101			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1102		tile[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
1103			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1104			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1105			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1106		tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1107			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1108			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1109			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1110		tile[17] = (TILE_SPLIT(split_equal_to_row_size));
1111		tile[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1112			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1113			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1114			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1115		tile[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1116			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1117			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
1118		tile[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1119			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1120			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1121			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1122		tile[21] =  (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
1123			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1124			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1125			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1126		tile[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
1127			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1128			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1129			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1130		tile[23] = (TILE_SPLIT(split_equal_to_row_size));
1131		tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1132			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1133			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1134			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1135		tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
1136			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1137			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1138			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1139		tile[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
1140			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1141			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1142			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1143		tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1144			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1145			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
1146		tile[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1147			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1148			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1149			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1150		tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1151			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1152			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1153			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1154		tile[30] = (TILE_SPLIT(split_equal_to_row_size));
1155
1156		macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1157				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1158				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1159				NUM_BANKS(ADDR_SURF_16_BANK));
1160		macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1161				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1162				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1163				NUM_BANKS(ADDR_SURF_16_BANK));
1164		macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1165				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1166				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1167				NUM_BANKS(ADDR_SURF_16_BANK));
1168		macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1169				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1170				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1171				NUM_BANKS(ADDR_SURF_16_BANK));
1172		macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1173				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1174				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1175				NUM_BANKS(ADDR_SURF_16_BANK));
1176		macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1177				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1178				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1179				NUM_BANKS(ADDR_SURF_8_BANK));
1180		macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1181				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1182				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1183				NUM_BANKS(ADDR_SURF_4_BANK));
1184		macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1185				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
1186				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1187				NUM_BANKS(ADDR_SURF_16_BANK));
1188		macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1189				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1190				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1191				NUM_BANKS(ADDR_SURF_16_BANK));
1192		macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1193				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1194				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1195				NUM_BANKS(ADDR_SURF_16_BANK));
1196		macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1197				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1198				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1199				NUM_BANKS(ADDR_SURF_16_BANK));
1200		macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1201				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1202				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1203				NUM_BANKS(ADDR_SURF_16_BANK));
1204		macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1205				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1206				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1207				NUM_BANKS(ADDR_SURF_8_BANK));
1208		macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1209				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1210				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1211				NUM_BANKS(ADDR_SURF_4_BANK));
1212
1213		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
1214			WREG32(mmGB_TILE_MODE0 + reg_offset, tile[reg_offset]);
1215		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
1216			if (reg_offset != 7)
1217				WREG32(mmGB_MACROTILE_MODE0 + reg_offset, macrotile[reg_offset]);
1218		break;
1219	case CHIP_HAWAII:
1220		tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1221			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1222			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
1223			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1224		tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1225			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1226			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
1227			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1228		tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1229			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1230			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1231			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1232		tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1233			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1234			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
1235			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1236		tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1237			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1238			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1239			   TILE_SPLIT(split_equal_to_row_size));
1240		tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1241			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1242			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1243			   TILE_SPLIT(split_equal_to_row_size));
1244		tile[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1245			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1246			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1247			   TILE_SPLIT(split_equal_to_row_size));
1248		tile[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1249			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1250			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1251			   TILE_SPLIT(split_equal_to_row_size));
1252		tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
1253			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16));
1254		tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1255			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1256			   MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
1257		tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1258			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1259			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1260			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1261		tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1262			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1263			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1264			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1265		tile[12] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
1266			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1267			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1268			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1269		tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1270			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1271			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
1272		tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1273			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1274			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1275			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1276		tile[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
1277			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1278			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1279			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1280		tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1281			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1282			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1283			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1284		tile[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1285			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1286			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1287			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1288		tile[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1289			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1290			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1291			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1292		tile[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1293			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1294			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING));
1295		tile[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1296			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1297			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1298			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1299		tile[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
1300			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1301			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1302			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1303		tile[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
1304			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1305			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1306			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1307		tile[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
1308			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1309			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1310			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1311		tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1312			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1313			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1314			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1315		tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
1316			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1317			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1318			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1319		tile[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
1320			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1321			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1322			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1323		tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1324			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1325			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
1326		tile[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1327			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1328			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1329			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1330		tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1331			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1332			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1333			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1334		tile[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1335			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1336			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1337			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1338
1339		macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1340				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1341				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1342				NUM_BANKS(ADDR_SURF_16_BANK));
1343		macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1344				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1345				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1346				NUM_BANKS(ADDR_SURF_16_BANK));
1347		macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1348				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1349				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1350				NUM_BANKS(ADDR_SURF_16_BANK));
1351		macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1352				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1353				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1354				NUM_BANKS(ADDR_SURF_16_BANK));
1355		macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1356				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1357				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1358				NUM_BANKS(ADDR_SURF_8_BANK));
1359		macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1360				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1361				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1362				NUM_BANKS(ADDR_SURF_4_BANK));
1363		macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1364				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1365				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1366				NUM_BANKS(ADDR_SURF_4_BANK));
1367		macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1368				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1369				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1370				NUM_BANKS(ADDR_SURF_16_BANK));
1371		macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1372				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1373				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1374				NUM_BANKS(ADDR_SURF_16_BANK));
1375		macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1376				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1377				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1378				NUM_BANKS(ADDR_SURF_16_BANK));
1379		macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1380				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1381				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1382				NUM_BANKS(ADDR_SURF_8_BANK));
1383		macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1384				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1385				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1386				NUM_BANKS(ADDR_SURF_16_BANK));
1387		macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1388				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1389				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1390				NUM_BANKS(ADDR_SURF_8_BANK));
1391		macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1392				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1393				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1394				NUM_BANKS(ADDR_SURF_4_BANK));
1395
1396		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
1397			WREG32(mmGB_TILE_MODE0 + reg_offset, tile[reg_offset]);
1398		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
1399			if (reg_offset != 7)
1400				WREG32(mmGB_MACROTILE_MODE0 + reg_offset, macrotile[reg_offset]);
1401		break;
1402	case CHIP_KABINI:
1403	case CHIP_KAVERI:
1404	case CHIP_MULLINS:
1405	default:
1406		tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1407			   PIPE_CONFIG(ADDR_SURF_P2) |
1408			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
1409			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1410		tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1411			   PIPE_CONFIG(ADDR_SURF_P2) |
1412			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
1413			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1414		tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1415			   PIPE_CONFIG(ADDR_SURF_P2) |
1416			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1417			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1418		tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1419			   PIPE_CONFIG(ADDR_SURF_P2) |
1420			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
1421			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1422		tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1423			   PIPE_CONFIG(ADDR_SURF_P2) |
1424			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1425			   TILE_SPLIT(split_equal_to_row_size));
1426		tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1427			   PIPE_CONFIG(ADDR_SURF_P2) |
1428			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1429		tile[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1430			   PIPE_CONFIG(ADDR_SURF_P2) |
1431			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1432			   TILE_SPLIT(split_equal_to_row_size));
1433		tile[7] = (TILE_SPLIT(split_equal_to_row_size));
1434		tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
1435			   PIPE_CONFIG(ADDR_SURF_P2));
1436		tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1437			   PIPE_CONFIG(ADDR_SURF_P2) |
1438			   MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
1439		tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1440			    PIPE_CONFIG(ADDR_SURF_P2) |
1441			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1442			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1443		tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1444			    PIPE_CONFIG(ADDR_SURF_P2) |
1445			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1446			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1447		tile[12] = (TILE_SPLIT(split_equal_to_row_size));
1448		tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1449			    PIPE_CONFIG(ADDR_SURF_P2) |
1450			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
1451		tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1452			    PIPE_CONFIG(ADDR_SURF_P2) |
1453			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1454			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1455		tile[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
1456			    PIPE_CONFIG(ADDR_SURF_P2) |
1457			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1458			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1459		tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1460			    PIPE_CONFIG(ADDR_SURF_P2) |
1461			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1462			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1463		tile[17] = (TILE_SPLIT(split_equal_to_row_size));
1464		tile[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1465			    PIPE_CONFIG(ADDR_SURF_P2) |
1466			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1467			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1468		tile[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1469			    PIPE_CONFIG(ADDR_SURF_P2) |
1470			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING));
1471		tile[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1472			    PIPE_CONFIG(ADDR_SURF_P2) |
1473			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1474			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1475		tile[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
1476			    PIPE_CONFIG(ADDR_SURF_P2) |
1477			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1478			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1479		tile[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
1480			    PIPE_CONFIG(ADDR_SURF_P2) |
1481			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1482			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1483		tile[23] = (TILE_SPLIT(split_equal_to_row_size));
1484		tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1485			    PIPE_CONFIG(ADDR_SURF_P2) |
1486			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1487			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1488		tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
1489			    PIPE_CONFIG(ADDR_SURF_P2) |
1490			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1491			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1492		tile[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
1493			    PIPE_CONFIG(ADDR_SURF_P2) |
1494			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1495			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1496		tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1497			    PIPE_CONFIG(ADDR_SURF_P2) |
1498			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
1499		tile[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1500			    PIPE_CONFIG(ADDR_SURF_P2) |
1501			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1502			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1503		tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1504			    PIPE_CONFIG(ADDR_SURF_P2) |
1505			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1506			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1507		tile[30] = (TILE_SPLIT(split_equal_to_row_size));
1508
1509		macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1510				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1511				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1512				NUM_BANKS(ADDR_SURF_8_BANK));
1513		macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1514				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1515				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1516				NUM_BANKS(ADDR_SURF_8_BANK));
1517		macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1518				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1519				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1520				NUM_BANKS(ADDR_SURF_8_BANK));
1521		macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1522				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1523				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1524				NUM_BANKS(ADDR_SURF_8_BANK));
1525		macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1526				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1527				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1528				NUM_BANKS(ADDR_SURF_8_BANK));
1529		macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1530				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1531				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1532				NUM_BANKS(ADDR_SURF_8_BANK));
1533		macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1534				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1535				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1536				NUM_BANKS(ADDR_SURF_8_BANK));
1537		macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
1538				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
1539				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1540				NUM_BANKS(ADDR_SURF_16_BANK));
1541		macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
1542				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1543				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1544				NUM_BANKS(ADDR_SURF_16_BANK));
1545		macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1546				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1547				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1548				NUM_BANKS(ADDR_SURF_16_BANK));
1549		macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1550				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1551				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1552				NUM_BANKS(ADDR_SURF_16_BANK));
1553		macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1554				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1555				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1556				NUM_BANKS(ADDR_SURF_16_BANK));
1557		macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1558				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1559				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1560				NUM_BANKS(ADDR_SURF_16_BANK));
1561		macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1562				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1563				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1564				NUM_BANKS(ADDR_SURF_8_BANK));
1565
1566		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
1567			WREG32(mmGB_TILE_MODE0 + reg_offset, tile[reg_offset]);
1568		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
1569			if (reg_offset != 7)
1570				WREG32(mmGB_MACROTILE_MODE0 + reg_offset, macrotile[reg_offset]);
1571		break;
1572	}
1573}
1574
1575/**
1576 * gfx_v7_0_select_se_sh - select which SE, SH to address
1577 *
1578 * @adev: amdgpu_device pointer
1579 * @se_num: shader engine to address
1580 * @sh_num: sh block to address
1581 *
1582 * Select which SE, SH combinations to address. Certain
1583 * registers are instanced per SE or SH.  0xffffffff means
1584 * broadcast to all SEs or SHs (CIK).
1585 */
1586static void gfx_v7_0_select_se_sh(struct amdgpu_device *adev,
1587				  u32 se_num, u32 sh_num, u32 instance)
1588{
1589	u32 data;
1590
1591	if (instance == 0xffffffff)
1592		data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
1593	else
1594		data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
1595
1596	if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
1597		data |= GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK |
1598			GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK;
1599	else if (se_num == 0xffffffff)
1600		data |= GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK |
1601			(sh_num << GRBM_GFX_INDEX__SH_INDEX__SHIFT);
1602	else if (sh_num == 0xffffffff)
1603		data |= GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK |
1604			(se_num << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
1605	else
1606		data |= (sh_num << GRBM_GFX_INDEX__SH_INDEX__SHIFT) |
1607			(se_num << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
1608	WREG32(mmGRBM_GFX_INDEX, data);
1609}
1610
1611/**
1612 * gfx_v7_0_create_bitmask - create a bitmask
1613 *
1614 * @bit_width: length of the mask
1615 *
1616 * create a variable length bit mask (CIK).
1617 * Returns the bitmask.
1618 */
1619static u32 gfx_v7_0_create_bitmask(u32 bit_width)
1620{
1621	return (u32)((1ULL << bit_width) - 1);
1622}
1623
1624/**
1625 * gfx_v7_0_get_rb_active_bitmap - computes the mask of enabled RBs
1626 *
1627 * @adev: amdgpu_device pointer
1628 *
1629 * Calculates the bitmask of enabled RBs (CIK).
1630 * Returns the enabled RB bitmask.
1631 */
1632static u32 gfx_v7_0_get_rb_active_bitmap(struct amdgpu_device *adev)
1633{
1634	u32 data, mask;
1635
1636	data = RREG32(mmCC_RB_BACKEND_DISABLE);
1637	data |= RREG32(mmGC_USER_RB_BACKEND_DISABLE);
1638
1639	data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
1640	data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
1641
1642	mask = gfx_v7_0_create_bitmask(adev->gfx.config.max_backends_per_se /
1643				       adev->gfx.config.max_sh_per_se);
1644
1645	return (~data) & mask;
1646}
1647
1648static void
1649gfx_v7_0_raster_config(struct amdgpu_device *adev, u32 *rconf, u32 *rconf1)
1650{
1651	switch (adev->asic_type) {
1652	case CHIP_BONAIRE:
1653		*rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) |
1654			  SE_XSEL(1) | SE_YSEL(1);
1655		*rconf1 |= 0x0;
1656		break;
1657	case CHIP_HAWAII:
1658		*rconf |= RB_MAP_PKR0(2) | RB_MAP_PKR1(2) |
1659			  RB_XSEL2(1) | PKR_MAP(2) | PKR_XSEL(1) |
1660			  PKR_YSEL(1) | SE_MAP(2) | SE_XSEL(2) |
1661			  SE_YSEL(3);
1662		*rconf1 |= SE_PAIR_MAP(2) | SE_PAIR_XSEL(3) |
1663			   SE_PAIR_YSEL(2);
1664		break;
1665	case CHIP_KAVERI:
1666		*rconf |= RB_MAP_PKR0(2);
1667		*rconf1 |= 0x0;
1668		break;
1669	case CHIP_KABINI:
1670	case CHIP_MULLINS:
1671		*rconf |= 0x0;
1672		*rconf1 |= 0x0;
1673		break;
1674	default:
1675		DRM_ERROR("unknown asic: 0x%x\n", adev->asic_type);
1676		break;
1677	}
1678}
1679
1680static void
1681gfx_v7_0_write_harvested_raster_configs(struct amdgpu_device *adev,
1682					u32 raster_config, u32 raster_config_1,
1683					unsigned rb_mask, unsigned num_rb)
1684{
1685	unsigned sh_per_se = max_t(unsigned, adev->gfx.config.max_sh_per_se, 1);
1686	unsigned num_se = max_t(unsigned, adev->gfx.config.max_shader_engines, 1);
1687	unsigned rb_per_pkr = min_t(unsigned, num_rb / num_se / sh_per_se, 2);
1688	unsigned rb_per_se = num_rb / num_se;
1689	unsigned se_mask[4];
1690	unsigned se;
1691
1692	se_mask[0] = ((1 << rb_per_se) - 1) & rb_mask;
1693	se_mask[1] = (se_mask[0] << rb_per_se) & rb_mask;
1694	se_mask[2] = (se_mask[1] << rb_per_se) & rb_mask;
1695	se_mask[3] = (se_mask[2] << rb_per_se) & rb_mask;
1696
1697	WARN_ON(!(num_se == 1 || num_se == 2 || num_se == 4));
1698	WARN_ON(!(sh_per_se == 1 || sh_per_se == 2));
1699	WARN_ON(!(rb_per_pkr == 1 || rb_per_pkr == 2));
1700
1701	if ((num_se > 2) && ((!se_mask[0] && !se_mask[1]) ||
1702			     (!se_mask[2] && !se_mask[3]))) {
1703		raster_config_1 &= ~SE_PAIR_MAP_MASK;
1704
1705		if (!se_mask[0] && !se_mask[1]) {
1706			raster_config_1 |=
1707				SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_3);
1708		} else {
1709			raster_config_1 |=
1710				SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_0);
1711		}
1712	}
1713
1714	for (se = 0; se < num_se; se++) {
1715		unsigned raster_config_se = raster_config;
1716		unsigned pkr0_mask = ((1 << rb_per_pkr) - 1) << (se * rb_per_se);
1717		unsigned pkr1_mask = pkr0_mask << rb_per_pkr;
1718		int idx = (se / 2) * 2;
1719
1720		if ((num_se > 1) && (!se_mask[idx] || !se_mask[idx + 1])) {
1721			raster_config_se &= ~SE_MAP_MASK;
1722
1723			if (!se_mask[idx]) {
1724				raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_3);
1725			} else {
1726				raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_0);
1727			}
1728		}
1729
1730		pkr0_mask &= rb_mask;
1731		pkr1_mask &= rb_mask;
1732		if (rb_per_se > 2 && (!pkr0_mask || !pkr1_mask)) {
1733			raster_config_se &= ~PKR_MAP_MASK;
1734
1735			if (!pkr0_mask) {
1736				raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_3);
1737			} else {
1738				raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_0);
1739			}
1740		}
1741
1742		if (rb_per_se >= 2) {
1743			unsigned rb0_mask = 1 << (se * rb_per_se);
1744			unsigned rb1_mask = rb0_mask << 1;
1745
1746			rb0_mask &= rb_mask;
1747			rb1_mask &= rb_mask;
1748			if (!rb0_mask || !rb1_mask) {
1749				raster_config_se &= ~RB_MAP_PKR0_MASK;
1750
1751				if (!rb0_mask) {
1752					raster_config_se |=
1753						RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_3);
1754				} else {
1755					raster_config_se |=
1756						RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_0);
1757				}
1758			}
1759
1760			if (rb_per_se > 2) {
1761				rb0_mask = 1 << (se * rb_per_se + rb_per_pkr);
1762				rb1_mask = rb0_mask << 1;
1763				rb0_mask &= rb_mask;
1764				rb1_mask &= rb_mask;
1765				if (!rb0_mask || !rb1_mask) {
1766					raster_config_se &= ~RB_MAP_PKR1_MASK;
1767
1768					if (!rb0_mask) {
1769						raster_config_se |=
1770							RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_3);
1771					} else {
1772						raster_config_se |=
1773							RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_0);
1774					}
1775				}
1776			}
1777		}
1778
1779		/* GRBM_GFX_INDEX has a different offset on CI+ */
1780		gfx_v7_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff);
1781		WREG32(mmPA_SC_RASTER_CONFIG, raster_config_se);
1782		WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
1783	}
1784
1785	/* GRBM_GFX_INDEX has a different offset on CI+ */
1786	gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1787}
1788
1789/**
1790 * gfx_v7_0_setup_rb - setup the RBs on the asic
1791 *
1792 * @adev: amdgpu_device pointer
1793 * @se_num: number of SEs (shader engines) for the asic
1794 * @sh_per_se: number of SH blocks per SE for the asic
1795 *
1796 * Configures per-SE/SH RB registers (CIK).
1797 */
1798static void gfx_v7_0_setup_rb(struct amdgpu_device *adev)
1799{
1800	int i, j;
1801	u32 data;
1802	u32 raster_config = 0, raster_config_1 = 0;
1803	u32 active_rbs = 0;
1804	u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
1805					adev->gfx.config.max_sh_per_se;
1806	unsigned num_rb_pipes;
1807
1808	mutex_lock(&adev->grbm_idx_mutex);
1809	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1810		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1811			gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff);
1812			data = gfx_v7_0_get_rb_active_bitmap(adev);
1813			active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
1814					       rb_bitmap_width_per_sh);
1815		}
1816	}
1817	gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1818
1819	adev->gfx.config.backend_enable_mask = active_rbs;
1820	adev->gfx.config.num_rbs = hweight32(active_rbs);
1821
1822	num_rb_pipes = min_t(unsigned, adev->gfx.config.max_backends_per_se *
1823			     adev->gfx.config.max_shader_engines, 16);
1824
1825	gfx_v7_0_raster_config(adev, &raster_config, &raster_config_1);
1826
1827	if (!adev->gfx.config.backend_enable_mask ||
1828			adev->gfx.config.num_rbs >= num_rb_pipes) {
1829		WREG32(mmPA_SC_RASTER_CONFIG, raster_config);
1830		WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
1831	} else {
1832		gfx_v7_0_write_harvested_raster_configs(adev, raster_config, raster_config_1,
1833							adev->gfx.config.backend_enable_mask,
1834							num_rb_pipes);
1835	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1836	mutex_unlock(&adev->grbm_idx_mutex);
1837}
1838
1839/**
1840 * gmc_v7_0_init_compute_vmid - gart enable
1841 *
1842 * @rdev: amdgpu_device pointer
1843 *
1844 * Initialize compute vmid sh_mem registers
1845 *
1846 */
1847#define DEFAULT_SH_MEM_BASES	(0x6000)
1848#define FIRST_COMPUTE_VMID	(8)
1849#define LAST_COMPUTE_VMID	(16)
1850static void gmc_v7_0_init_compute_vmid(struct amdgpu_device *adev)
1851{
1852	int i;
1853	uint32_t sh_mem_config;
1854	uint32_t sh_mem_bases;
1855
1856	/*
1857	 * Configure apertures:
1858	 * LDS:         0x60000000'00000000 - 0x60000001'00000000 (4GB)
1859	 * Scratch:     0x60000001'00000000 - 0x60000002'00000000 (4GB)
1860	 * GPUVM:       0x60010000'00000000 - 0x60020000'00000000 (1TB)
1861	*/
1862	sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
1863	sh_mem_config = SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
1864			SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
1865	sh_mem_config |= MTYPE_NONCACHED << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT;
1866	mutex_lock(&adev->srbm_mutex);
1867	for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
1868		cik_srbm_select(adev, 0, 0, 0, i);
1869		/* CP and shaders */
1870		WREG32(mmSH_MEM_CONFIG, sh_mem_config);
1871		WREG32(mmSH_MEM_APE1_BASE, 1);
1872		WREG32(mmSH_MEM_APE1_LIMIT, 0);
1873		WREG32(mmSH_MEM_BASES, sh_mem_bases);
1874	}
1875	cik_srbm_select(adev, 0, 0, 0, 0);
1876	mutex_unlock(&adev->srbm_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1877}
1878
1879/**
1880 * gfx_v7_0_gpu_init - setup the 3D engine
1881 *
1882 * @adev: amdgpu_device pointer
1883 *
1884 * Configures the 3D engine and tiling configuration
1885 * registers so that the 3D engine is usable.
1886 */
1887static void gfx_v7_0_gpu_init(struct amdgpu_device *adev)
1888{
1889	u32 tmp, sh_mem_cfg;
 
1890	int i;
1891
1892	WREG32(mmGRBM_CNTL, (0xff << GRBM_CNTL__READ_TIMEOUT__SHIFT));
1893
1894	WREG32(mmGB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
1895	WREG32(mmHDP_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
1896	WREG32(mmDMIF_ADDR_CALC, adev->gfx.config.gb_addr_config);
1897
1898	gfx_v7_0_tiling_mode_table_init(adev);
1899
1900	gfx_v7_0_setup_rb(adev);
1901	gfx_v7_0_get_cu_info(adev);
 
1902
1903	/* set HW defaults for 3D engine */
1904	WREG32(mmCP_MEQ_THRESHOLDS,
1905	       (0x30 << CP_MEQ_THRESHOLDS__MEQ1_START__SHIFT) |
1906	       (0x60 << CP_MEQ_THRESHOLDS__MEQ2_START__SHIFT));
1907
1908	mutex_lock(&adev->grbm_idx_mutex);
1909	/*
1910	 * making sure that the following register writes will be broadcasted
1911	 * to all the shaders
1912	 */
1913	gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1914
1915	/* XXX SH_MEM regs */
1916	/* where to put LDS, scratch, GPUVM in FSA64 space */
1917	sh_mem_cfg = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
1918				   SH_MEM_ALIGNMENT_MODE_UNALIGNED);
 
 
 
 
 
 
 
 
 
 
 
 
 
1919
1920	mutex_lock(&adev->srbm_mutex);
1921	for (i = 0; i < 16; i++) {
 
 
 
 
1922		cik_srbm_select(adev, 0, 0, 0, i);
1923		/* CP and shaders */
1924		WREG32(mmSH_MEM_CONFIG, sh_mem_cfg);
1925		WREG32(mmSH_MEM_APE1_BASE, 1);
1926		WREG32(mmSH_MEM_APE1_LIMIT, 0);
1927		WREG32(mmSH_MEM_BASES, 0);
1928	}
1929	cik_srbm_select(adev, 0, 0, 0, 0);
1930	mutex_unlock(&adev->srbm_mutex);
1931
1932	gmc_v7_0_init_compute_vmid(adev);
 
1933
1934	WREG32(mmSX_DEBUG_1, 0x20);
1935
1936	WREG32(mmTA_CNTL_AUX, 0x00010000);
1937
1938	tmp = RREG32(mmSPI_CONFIG_CNTL);
1939	tmp |= 0x03000000;
1940	WREG32(mmSPI_CONFIG_CNTL, tmp);
1941
1942	WREG32(mmSQ_CONFIG, 1);
1943
1944	WREG32(mmDB_DEBUG, 0);
1945
1946	tmp = RREG32(mmDB_DEBUG2) & ~0xf00fffff;
1947	tmp |= 0x00000400;
1948	WREG32(mmDB_DEBUG2, tmp);
1949
1950	tmp = RREG32(mmDB_DEBUG3) & ~0x0002021c;
1951	tmp |= 0x00020200;
1952	WREG32(mmDB_DEBUG3, tmp);
1953
1954	tmp = RREG32(mmCB_HW_CONTROL) & ~0x00010000;
1955	tmp |= 0x00018208;
1956	WREG32(mmCB_HW_CONTROL, tmp);
1957
1958	WREG32(mmSPI_CONFIG_CNTL_1, (4 << SPI_CONFIG_CNTL_1__VTX_DONE_DELAY__SHIFT));
1959
1960	WREG32(mmPA_SC_FIFO_SIZE,
1961		((adev->gfx.config.sc_prim_fifo_size_frontend << PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) |
1962		(adev->gfx.config.sc_prim_fifo_size_backend << PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT) |
1963		(adev->gfx.config.sc_hiz_tile_fifo_size << PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) |
1964		(adev->gfx.config.sc_earlyz_tile_fifo_size << PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT)));
1965
1966	WREG32(mmVGT_NUM_INSTANCES, 1);
1967
1968	WREG32(mmCP_PERFMON_CNTL, 0);
1969
1970	WREG32(mmSQ_CONFIG, 0);
1971
1972	WREG32(mmPA_SC_FORCE_EOV_MAX_CNTS,
1973		((4095 << PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_CLK_CNT__SHIFT) |
1974		(255 << PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_REZ_CNT__SHIFT)));
1975
1976	WREG32(mmVGT_CACHE_INVALIDATION,
1977		(VC_AND_TC << VGT_CACHE_INVALIDATION__CACHE_INVALIDATION__SHIFT) |
1978		(ES_AND_GS_AUTO << VGT_CACHE_INVALIDATION__AUTO_INVLD_EN__SHIFT));
1979
1980	WREG32(mmVGT_GS_VERTEX_REUSE, 16);
1981	WREG32(mmPA_SC_LINE_STIPPLE_STATE, 0);
1982
1983	WREG32(mmPA_CL_ENHANCE, PA_CL_ENHANCE__CLIP_VTX_REORDER_ENA_MASK |
1984			(3 << PA_CL_ENHANCE__NUM_CLIP_SEQ__SHIFT));
1985	WREG32(mmPA_SC_ENHANCE, PA_SC_ENHANCE__ENABLE_PA_SC_OUT_OF_ORDER_MASK);
 
 
 
 
 
 
 
 
1986	mutex_unlock(&adev->grbm_idx_mutex);
1987
1988	udelay(50);
1989}
1990
1991/*
1992 * GPU scratch registers helpers function.
1993 */
1994/**
1995 * gfx_v7_0_scratch_init - setup driver info for CP scratch regs
1996 *
1997 * @adev: amdgpu_device pointer
1998 *
1999 * Set up the number and offset of the CP scratch registers.
2000 * NOTE: use of CP scratch registers is a legacy inferface and
2001 * is not used by default on newer asics (r6xx+).  On newer asics,
2002 * memory buffers are used for fences rather than scratch regs.
2003 */
2004static void gfx_v7_0_scratch_init(struct amdgpu_device *adev)
2005{
2006	int i;
2007
2008	adev->gfx.scratch.num_reg = 7;
2009	adev->gfx.scratch.reg_base = mmSCRATCH_REG0;
2010	for (i = 0; i < adev->gfx.scratch.num_reg; i++) {
2011		adev->gfx.scratch.free[i] = true;
2012		adev->gfx.scratch.reg[i] = adev->gfx.scratch.reg_base + i;
2013	}
2014}
2015
2016/**
2017 * gfx_v7_0_ring_test_ring - basic gfx ring test
2018 *
2019 * @adev: amdgpu_device pointer
2020 * @ring: amdgpu_ring structure holding ring information
2021 *
2022 * Allocate a scratch register and write to it using the gfx ring (CIK).
2023 * Provides a basic gfx ring test to verify that the ring is working.
2024 * Used by gfx_v7_0_cp_gfx_resume();
2025 * Returns 0 on success, error on failure.
2026 */
2027static int gfx_v7_0_ring_test_ring(struct amdgpu_ring *ring)
2028{
2029	struct amdgpu_device *adev = ring->adev;
2030	uint32_t scratch;
2031	uint32_t tmp = 0;
2032	unsigned i;
2033	int r;
2034
2035	r = amdgpu_gfx_scratch_get(adev, &scratch);
2036	if (r) {
2037		DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r);
2038		return r;
2039	}
2040	WREG32(scratch, 0xCAFEDEAD);
2041	r = amdgpu_ring_alloc(ring, 3);
2042	if (r) {
2043		DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", ring->idx, r);
2044		amdgpu_gfx_scratch_free(adev, scratch);
2045		return r;
2046	}
2047	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
2048	amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
2049	amdgpu_ring_write(ring, 0xDEADBEEF);
2050	amdgpu_ring_commit(ring);
2051
2052	for (i = 0; i < adev->usec_timeout; i++) {
2053		tmp = RREG32(scratch);
2054		if (tmp == 0xDEADBEEF)
2055			break;
2056		DRM_UDELAY(1);
2057	}
2058	if (i < adev->usec_timeout) {
2059		DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
2060	} else {
2061		DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
2062			  ring->idx, scratch, tmp);
2063		r = -EINVAL;
2064	}
 
 
 
 
2065	amdgpu_gfx_scratch_free(adev, scratch);
2066	return r;
2067}
2068
2069/**
2070 * gfx_v7_0_ring_emit_hdp - emit an hdp flush on the cp
2071 *
2072 * @adev: amdgpu_device pointer
2073 * @ridx: amdgpu ring index
2074 *
2075 * Emits an hdp flush on the cp.
2076 */
2077static void gfx_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
2078{
2079	u32 ref_and_mask;
2080	int usepfp = ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ? 0 : 1;
2081
2082	if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
2083		switch (ring->me) {
2084		case 1:
2085			ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe;
2086			break;
2087		case 2:
2088			ref_and_mask = GPU_HDP_FLUSH_DONE__CP6_MASK << ring->pipe;
2089			break;
2090		default:
2091			return;
2092		}
2093	} else {
2094		ref_and_mask = GPU_HDP_FLUSH_DONE__CP0_MASK;
2095	}
2096
2097	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
2098	amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(1) | /* write, wait, write */
2099				 WAIT_REG_MEM_FUNCTION(3) |  /* == */
2100				 WAIT_REG_MEM_ENGINE(usepfp)));   /* pfp or me */
2101	amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ);
2102	amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE);
2103	amdgpu_ring_write(ring, ref_and_mask);
2104	amdgpu_ring_write(ring, ref_and_mask);
2105	amdgpu_ring_write(ring, 0x20); /* poll interval */
2106}
2107
2108static void gfx_v7_0_ring_emit_vgt_flush(struct amdgpu_ring *ring)
2109{
2110	amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
2111	amdgpu_ring_write(ring, EVENT_TYPE(VS_PARTIAL_FLUSH) |
2112		EVENT_INDEX(4));
2113
2114	amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
2115	amdgpu_ring_write(ring, EVENT_TYPE(VGT_FLUSH) |
2116		EVENT_INDEX(0));
2117}
2118
2119
2120/**
2121 * gfx_v7_0_ring_emit_hdp_invalidate - emit an hdp invalidate on the cp
2122 *
2123 * @adev: amdgpu_device pointer
2124 * @ridx: amdgpu ring index
2125 *
2126 * Emits an hdp invalidate on the cp.
2127 */
2128static void gfx_v7_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
2129{
2130	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
2131	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
2132				 WRITE_DATA_DST_SEL(0) |
2133				 WR_CONFIRM));
2134	amdgpu_ring_write(ring, mmHDP_DEBUG0);
2135	amdgpu_ring_write(ring, 0);
2136	amdgpu_ring_write(ring, 1);
2137}
2138
2139/**
2140 * gfx_v7_0_ring_emit_fence_gfx - emit a fence on the gfx ring
2141 *
2142 * @adev: amdgpu_device pointer
2143 * @fence: amdgpu fence object
2144 *
2145 * Emits a fence sequnce number on the gfx ring and flushes
2146 * GPU caches.
2147 */
2148static void gfx_v7_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
2149					 u64 seq, unsigned flags)
2150{
2151	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
2152	bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
2153	/* Workaround for cache flush problems. First send a dummy EOP
2154	 * event down the pipe with seq one below.
2155	 */
2156	amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
2157	amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
2158				 EOP_TC_ACTION_EN |
2159				 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
2160				 EVENT_INDEX(5)));
2161	amdgpu_ring_write(ring, addr & 0xfffffffc);
2162	amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
2163				DATA_SEL(1) | INT_SEL(0));
2164	amdgpu_ring_write(ring, lower_32_bits(seq - 1));
2165	amdgpu_ring_write(ring, upper_32_bits(seq - 1));
2166
2167	/* Then send the real EOP event down the pipe. */
2168	amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
2169	amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
2170				 EOP_TC_ACTION_EN |
2171				 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
2172				 EVENT_INDEX(5)));
2173	amdgpu_ring_write(ring, addr & 0xfffffffc);
2174	amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
2175				DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
2176	amdgpu_ring_write(ring, lower_32_bits(seq));
2177	amdgpu_ring_write(ring, upper_32_bits(seq));
2178}
2179
2180/**
2181 * gfx_v7_0_ring_emit_fence_compute - emit a fence on the compute ring
2182 *
2183 * @adev: amdgpu_device pointer
2184 * @fence: amdgpu fence object
2185 *
2186 * Emits a fence sequnce number on the compute ring and flushes
2187 * GPU caches.
2188 */
2189static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
2190					     u64 addr, u64 seq,
2191					     unsigned flags)
2192{
2193	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
2194	bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
2195
2196	/* RELEASE_MEM - flush caches, send int */
2197	amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 5));
2198	amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
2199				 EOP_TC_ACTION_EN |
2200				 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
2201				 EVENT_INDEX(5)));
2202	amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
2203	amdgpu_ring_write(ring, addr & 0xfffffffc);
2204	amdgpu_ring_write(ring, upper_32_bits(addr));
2205	amdgpu_ring_write(ring, lower_32_bits(seq));
2206	amdgpu_ring_write(ring, upper_32_bits(seq));
2207}
2208
2209/*
2210 * IB stuff
2211 */
2212/**
2213 * gfx_v7_0_ring_emit_ib - emit an IB (Indirect Buffer) on the ring
2214 *
2215 * @ring: amdgpu_ring structure holding ring information
2216 * @ib: amdgpu indirect buffer object
2217 *
2218 * Emits an DE (drawing engine) or CE (constant engine) IB
2219 * on the gfx ring.  IBs are usually generated by userspace
2220 * acceleration drivers and submitted to the kernel for
2221 * sheduling on the ring.  This function schedules the IB
2222 * on the gfx ring for execution by the GPU.
2223 */
2224static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
2225				      struct amdgpu_ib *ib,
2226				      unsigned vm_id, bool ctx_switch)
 
2227{
 
2228	u32 header, control = 0;
2229
2230	/* insert SWITCH_BUFFER packet before first IB in the ring frame */
2231	if (ctx_switch) {
2232		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
2233		amdgpu_ring_write(ring, 0);
2234	}
2235
2236	if (ib->flags & AMDGPU_IB_FLAG_CE)
2237		header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
2238	else
2239		header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
2240
2241	control |= ib->length_dw | (vm_id << 24);
2242
2243	amdgpu_ring_write(ring, header);
2244	amdgpu_ring_write(ring,
2245#ifdef __BIG_ENDIAN
2246			  (2 << 0) |
2247#endif
2248			  (ib->gpu_addr & 0xFFFFFFFC));
2249	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
2250	amdgpu_ring_write(ring, control);
2251}
2252
2253static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
 
2254					  struct amdgpu_ib *ib,
2255					  unsigned vm_id, bool ctx_switch)
2256{
2257	u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vm_id << 24);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2258
2259	amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
2260	amdgpu_ring_write(ring,
2261#ifdef __BIG_ENDIAN
2262					  (2 << 0) |
2263#endif
2264					  (ib->gpu_addr & 0xFFFFFFFC));
2265	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
2266	amdgpu_ring_write(ring, control);
2267}
2268
2269static void gfx_v7_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
2270{
2271	uint32_t dw2 = 0;
2272
2273	dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
2274	if (flags & AMDGPU_HAVE_CTX_SWITCH) {
2275		gfx_v7_0_ring_emit_vgt_flush(ring);
2276		/* set load_global_config & load_global_uconfig */
2277		dw2 |= 0x8001;
2278		/* set load_cs_sh_regs */
2279		dw2 |= 0x01000000;
2280		/* set load_per_context_state & load_gfx_sh_regs */
2281		dw2 |= 0x10002;
2282	}
2283
2284	amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
2285	amdgpu_ring_write(ring, dw2);
2286	amdgpu_ring_write(ring, 0);
2287}
2288
2289/**
2290 * gfx_v7_0_ring_test_ib - basic ring IB test
2291 *
2292 * @ring: amdgpu_ring structure holding ring information
2293 *
2294 * Allocate an IB and execute it on the gfx ring (CIK).
2295 * Provides a basic gfx ring test to verify that IBs are working.
2296 * Returns 0 on success, error on failure.
2297 */
2298static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
2299{
2300	struct amdgpu_device *adev = ring->adev;
2301	struct amdgpu_ib ib;
2302	struct dma_fence *f = NULL;
2303	uint32_t scratch;
2304	uint32_t tmp = 0;
2305	long r;
2306
2307	r = amdgpu_gfx_scratch_get(adev, &scratch);
2308	if (r) {
2309		DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r);
2310		return r;
2311	}
2312	WREG32(scratch, 0xCAFEDEAD);
2313	memset(&ib, 0, sizeof(ib));
2314	r = amdgpu_ib_get(adev, NULL, 256, &ib);
2315	if (r) {
2316		DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
2317		goto err1;
2318	}
2319	ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
2320	ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START));
2321	ib.ptr[2] = 0xDEADBEEF;
2322	ib.length_dw = 3;
2323
2324	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f);
2325	if (r)
2326		goto err2;
2327
2328	r = dma_fence_wait_timeout(f, false, timeout);
2329	if (r == 0) {
2330		DRM_ERROR("amdgpu: IB test timed out\n");
2331		r = -ETIMEDOUT;
2332		goto err2;
2333	} else if (r < 0) {
2334		DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
2335		goto err2;
2336	}
2337	tmp = RREG32(scratch);
2338	if (tmp == 0xDEADBEEF) {
2339		DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
2340		r = 0;
2341	} else {
2342		DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
2343			  scratch, tmp);
2344		r = -EINVAL;
2345	}
2346
2347err2:
2348	amdgpu_ib_free(adev, &ib, NULL);
2349	dma_fence_put(f);
2350err1:
2351	amdgpu_gfx_scratch_free(adev, scratch);
2352	return r;
2353}
2354
2355/*
2356 * CP.
2357 * On CIK, gfx and compute now have independant command processors.
2358 *
2359 * GFX
2360 * Gfx consists of a single ring and can process both gfx jobs and
2361 * compute jobs.  The gfx CP consists of three microengines (ME):
2362 * PFP - Pre-Fetch Parser
2363 * ME - Micro Engine
2364 * CE - Constant Engine
2365 * The PFP and ME make up what is considered the Drawing Engine (DE).
2366 * The CE is an asynchronous engine used for updating buffer desciptors
2367 * used by the DE so that they can be loaded into cache in parallel
2368 * while the DE is processing state update packets.
2369 *
2370 * Compute
2371 * The compute CP consists of two microengines (ME):
2372 * MEC1 - Compute MicroEngine 1
2373 * MEC2 - Compute MicroEngine 2
2374 * Each MEC supports 4 compute pipes and each pipe supports 8 queues.
2375 * The queues are exposed to userspace and are programmed directly
2376 * by the compute runtime.
2377 */
2378/**
2379 * gfx_v7_0_cp_gfx_enable - enable/disable the gfx CP MEs
2380 *
2381 * @adev: amdgpu_device pointer
2382 * @enable: enable or disable the MEs
2383 *
2384 * Halts or unhalts the gfx MEs.
2385 */
2386static void gfx_v7_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
2387{
2388	int i;
2389
2390	if (enable) {
2391		WREG32(mmCP_ME_CNTL, 0);
2392	} else {
2393		WREG32(mmCP_ME_CNTL, (CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK));
2394		for (i = 0; i < adev->gfx.num_gfx_rings; i++)
2395			adev->gfx.gfx_ring[i].ready = false;
2396	}
2397	udelay(50);
2398}
2399
2400/**
2401 * gfx_v7_0_cp_gfx_load_microcode - load the gfx CP ME ucode
2402 *
2403 * @adev: amdgpu_device pointer
2404 *
2405 * Loads the gfx PFP, ME, and CE ucode.
2406 * Returns 0 for success, -EINVAL if the ucode is not available.
2407 */
2408static int gfx_v7_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
2409{
2410	const struct gfx_firmware_header_v1_0 *pfp_hdr;
2411	const struct gfx_firmware_header_v1_0 *ce_hdr;
2412	const struct gfx_firmware_header_v1_0 *me_hdr;
2413	const __le32 *fw_data;
2414	unsigned i, fw_size;
2415
2416	if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
2417		return -EINVAL;
2418
2419	pfp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
2420	ce_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
2421	me_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
2422
2423	amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
2424	amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
2425	amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
2426	adev->gfx.pfp_fw_version = le32_to_cpu(pfp_hdr->header.ucode_version);
2427	adev->gfx.ce_fw_version = le32_to_cpu(ce_hdr->header.ucode_version);
2428	adev->gfx.me_fw_version = le32_to_cpu(me_hdr->header.ucode_version);
2429	adev->gfx.me_feature_version = le32_to_cpu(me_hdr->ucode_feature_version);
2430	adev->gfx.ce_feature_version = le32_to_cpu(ce_hdr->ucode_feature_version);
2431	adev->gfx.pfp_feature_version = le32_to_cpu(pfp_hdr->ucode_feature_version);
2432
2433	gfx_v7_0_cp_gfx_enable(adev, false);
2434
2435	/* PFP */
2436	fw_data = (const __le32 *)
2437		(adev->gfx.pfp_fw->data +
2438		 le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
2439	fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
2440	WREG32(mmCP_PFP_UCODE_ADDR, 0);
2441	for (i = 0; i < fw_size; i++)
2442		WREG32(mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
2443	WREG32(mmCP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version);
2444
2445	/* CE */
2446	fw_data = (const __le32 *)
2447		(adev->gfx.ce_fw->data +
2448		 le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
2449	fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
2450	WREG32(mmCP_CE_UCODE_ADDR, 0);
2451	for (i = 0; i < fw_size; i++)
2452		WREG32(mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
2453	WREG32(mmCP_CE_UCODE_ADDR, adev->gfx.ce_fw_version);
2454
2455	/* ME */
2456	fw_data = (const __le32 *)
2457		(adev->gfx.me_fw->data +
2458		 le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
2459	fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
2460	WREG32(mmCP_ME_RAM_WADDR, 0);
2461	for (i = 0; i < fw_size; i++)
2462		WREG32(mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++));
2463	WREG32(mmCP_ME_RAM_WADDR, adev->gfx.me_fw_version);
2464
2465	return 0;
2466}
2467
2468/**
2469 * gfx_v7_0_cp_gfx_start - start the gfx ring
2470 *
2471 * @adev: amdgpu_device pointer
2472 *
2473 * Enables the ring and loads the clear state context and other
2474 * packets required to init the ring.
2475 * Returns 0 for success, error for failure.
2476 */
2477static int gfx_v7_0_cp_gfx_start(struct amdgpu_device *adev)
2478{
2479	struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
2480	const struct cs_section_def *sect = NULL;
2481	const struct cs_extent_def *ext = NULL;
2482	int r, i;
2483
2484	/* init the CP */
2485	WREG32(mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1);
2486	WREG32(mmCP_ENDIAN_SWAP, 0);
2487	WREG32(mmCP_DEVICE_ID, 1);
2488
2489	gfx_v7_0_cp_gfx_enable(adev, true);
2490
2491	r = amdgpu_ring_alloc(ring, gfx_v7_0_get_csb_size(adev) + 8);
2492	if (r) {
2493		DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
2494		return r;
2495	}
2496
2497	/* init the CE partitions.  CE only used for gfx on CIK */
2498	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
2499	amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
2500	amdgpu_ring_write(ring, 0x8000);
2501	amdgpu_ring_write(ring, 0x8000);
2502
2503	/* clear state buffer */
2504	amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2505	amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
2506
2507	amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
2508	amdgpu_ring_write(ring, 0x80000000);
2509	amdgpu_ring_write(ring, 0x80000000);
2510
2511	for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
2512		for (ext = sect->section; ext->extent != NULL; ++ext) {
2513			if (sect->id == SECT_CONTEXT) {
2514				amdgpu_ring_write(ring,
2515						  PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
2516				amdgpu_ring_write(ring, ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
2517				for (i = 0; i < ext->reg_count; i++)
2518					amdgpu_ring_write(ring, ext->extent[i]);
2519			}
2520		}
2521	}
2522
2523	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
2524	amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
2525	switch (adev->asic_type) {
2526	case CHIP_BONAIRE:
2527		amdgpu_ring_write(ring, 0x16000012);
2528		amdgpu_ring_write(ring, 0x00000000);
2529		break;
2530	case CHIP_KAVERI:
2531		amdgpu_ring_write(ring, 0x00000000); /* XXX */
2532		amdgpu_ring_write(ring, 0x00000000);
2533		break;
2534	case CHIP_KABINI:
2535	case CHIP_MULLINS:
2536		amdgpu_ring_write(ring, 0x00000000); /* XXX */
2537		amdgpu_ring_write(ring, 0x00000000);
2538		break;
2539	case CHIP_HAWAII:
2540		amdgpu_ring_write(ring, 0x3a00161a);
2541		amdgpu_ring_write(ring, 0x0000002e);
2542		break;
2543	default:
2544		amdgpu_ring_write(ring, 0x00000000);
2545		amdgpu_ring_write(ring, 0x00000000);
2546		break;
2547	}
2548
2549	amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2550	amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
2551
2552	amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
2553	amdgpu_ring_write(ring, 0);
2554
2555	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
2556	amdgpu_ring_write(ring, 0x00000316);
2557	amdgpu_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
2558	amdgpu_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
2559
2560	amdgpu_ring_commit(ring);
2561
2562	return 0;
2563}
2564
2565/**
2566 * gfx_v7_0_cp_gfx_resume - setup the gfx ring buffer registers
2567 *
2568 * @adev: amdgpu_device pointer
2569 *
2570 * Program the location and size of the gfx ring buffer
2571 * and test it to make sure it's working.
2572 * Returns 0 for success, error for failure.
2573 */
2574static int gfx_v7_0_cp_gfx_resume(struct amdgpu_device *adev)
2575{
2576	struct amdgpu_ring *ring;
2577	u32 tmp;
2578	u32 rb_bufsz;
2579	u64 rb_addr, rptr_addr;
2580	int r;
2581
2582	WREG32(mmCP_SEM_WAIT_TIMER, 0x0);
2583	if (adev->asic_type != CHIP_HAWAII)
2584		WREG32(mmCP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
2585
2586	/* Set the write pointer delay */
2587	WREG32(mmCP_RB_WPTR_DELAY, 0);
2588
2589	/* set the RB to use vmid 0 */
2590	WREG32(mmCP_RB_VMID, 0);
2591
2592	WREG32(mmSCRATCH_ADDR, 0);
2593
2594	/* ring 0 - compute and gfx */
2595	/* Set ring buffer size */
2596	ring = &adev->gfx.gfx_ring[0];
2597	rb_bufsz = order_base_2(ring->ring_size / 8);
2598	tmp = (order_base_2(AMDGPU_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
2599#ifdef __BIG_ENDIAN
2600	tmp |= 2 << CP_RB0_CNTL__BUF_SWAP__SHIFT;
2601#endif
2602	WREG32(mmCP_RB0_CNTL, tmp);
2603
2604	/* Initialize the ring buffer's read and write pointers */
2605	WREG32(mmCP_RB0_CNTL, tmp | CP_RB0_CNTL__RB_RPTR_WR_ENA_MASK);
2606	ring->wptr = 0;
2607	WREG32(mmCP_RB0_WPTR, ring->wptr);
2608
2609	/* set the wb address wether it's enabled or not */
2610	rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
2611	WREG32(mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
2612	WREG32(mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
2613
2614	/* scratch register shadowing is no longer supported */
2615	WREG32(mmSCRATCH_UMSK, 0);
2616
2617	mdelay(1);
2618	WREG32(mmCP_RB0_CNTL, tmp);
2619
2620	rb_addr = ring->gpu_addr >> 8;
2621	WREG32(mmCP_RB0_BASE, rb_addr);
2622	WREG32(mmCP_RB0_BASE_HI, upper_32_bits(rb_addr));
2623
2624	/* start the ring */
2625	gfx_v7_0_cp_gfx_start(adev);
2626	ring->ready = true;
2627	r = amdgpu_ring_test_ring(ring);
2628	if (r) {
2629		ring->ready = false;
2630		return r;
2631	}
2632
2633	return 0;
2634}
2635
2636static u32 gfx_v7_0_ring_get_rptr(struct amdgpu_ring *ring)
2637{
2638	return ring->adev->wb.wb[ring->rptr_offs];
2639}
2640
2641static u32 gfx_v7_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
2642{
2643	struct amdgpu_device *adev = ring->adev;
2644
2645	return RREG32(mmCP_RB0_WPTR);
2646}
2647
2648static void gfx_v7_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
2649{
2650	struct amdgpu_device *adev = ring->adev;
2651
2652	WREG32(mmCP_RB0_WPTR, ring->wptr);
2653	(void)RREG32(mmCP_RB0_WPTR);
2654}
2655
2656static u32 gfx_v7_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
2657{
2658	/* XXX check if swapping is necessary on BE */
2659	return ring->adev->wb.wb[ring->wptr_offs];
2660}
2661
2662static void gfx_v7_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
2663{
2664	struct amdgpu_device *adev = ring->adev;
2665
2666	/* XXX check if swapping is necessary on BE */
2667	adev->wb.wb[ring->wptr_offs] = ring->wptr;
2668	WDOORBELL32(ring->doorbell_index, ring->wptr);
2669}
2670
2671/**
2672 * gfx_v7_0_cp_compute_enable - enable/disable the compute CP MEs
2673 *
2674 * @adev: amdgpu_device pointer
2675 * @enable: enable or disable the MEs
2676 *
2677 * Halts or unhalts the compute MEs.
2678 */
2679static void gfx_v7_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
2680{
2681	int i;
2682
2683	if (enable) {
2684		WREG32(mmCP_MEC_CNTL, 0);
2685	} else {
2686		WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
2687		for (i = 0; i < adev->gfx.num_compute_rings; i++)
2688			adev->gfx.compute_ring[i].ready = false;
2689	}
2690	udelay(50);
2691}
2692
2693/**
2694 * gfx_v7_0_cp_compute_load_microcode - load the compute CP ME ucode
2695 *
2696 * @adev: amdgpu_device pointer
2697 *
2698 * Loads the compute MEC1&2 ucode.
2699 * Returns 0 for success, -EINVAL if the ucode is not available.
2700 */
2701static int gfx_v7_0_cp_compute_load_microcode(struct amdgpu_device *adev)
2702{
2703	const struct gfx_firmware_header_v1_0 *mec_hdr;
2704	const __le32 *fw_data;
2705	unsigned i, fw_size;
2706
2707	if (!adev->gfx.mec_fw)
2708		return -EINVAL;
2709
2710	mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
2711	amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
2712	adev->gfx.mec_fw_version = le32_to_cpu(mec_hdr->header.ucode_version);
2713	adev->gfx.mec_feature_version = le32_to_cpu(
2714					mec_hdr->ucode_feature_version);
2715
2716	gfx_v7_0_cp_compute_enable(adev, false);
2717
2718	/* MEC1 */
2719	fw_data = (const __le32 *)
2720		(adev->gfx.mec_fw->data +
2721		 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
2722	fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4;
2723	WREG32(mmCP_MEC_ME1_UCODE_ADDR, 0);
2724	for (i = 0; i < fw_size; i++)
2725		WREG32(mmCP_MEC_ME1_UCODE_DATA, le32_to_cpup(fw_data++));
2726	WREG32(mmCP_MEC_ME1_UCODE_ADDR, 0);
2727
2728	if (adev->asic_type == CHIP_KAVERI) {
2729		const struct gfx_firmware_header_v1_0 *mec2_hdr;
2730
2731		if (!adev->gfx.mec2_fw)
2732			return -EINVAL;
2733
2734		mec2_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
2735		amdgpu_ucode_print_gfx_hdr(&mec2_hdr->header);
2736		adev->gfx.mec2_fw_version = le32_to_cpu(mec2_hdr->header.ucode_version);
2737		adev->gfx.mec2_feature_version = le32_to_cpu(
2738				mec2_hdr->ucode_feature_version);
2739
2740		/* MEC2 */
2741		fw_data = (const __le32 *)
2742			(adev->gfx.mec2_fw->data +
2743			 le32_to_cpu(mec2_hdr->header.ucode_array_offset_bytes));
2744		fw_size = le32_to_cpu(mec2_hdr->header.ucode_size_bytes) / 4;
2745		WREG32(mmCP_MEC_ME2_UCODE_ADDR, 0);
2746		for (i = 0; i < fw_size; i++)
2747			WREG32(mmCP_MEC_ME2_UCODE_DATA, le32_to_cpup(fw_data++));
2748		WREG32(mmCP_MEC_ME2_UCODE_ADDR, 0);
2749	}
2750
2751	return 0;
2752}
2753
2754/**
2755 * gfx_v7_0_cp_compute_fini - stop the compute queues
2756 *
2757 * @adev: amdgpu_device pointer
2758 *
2759 * Stop the compute queues and tear down the driver queue
2760 * info.
2761 */
2762static void gfx_v7_0_cp_compute_fini(struct amdgpu_device *adev)
2763{
2764	int i, r;
2765
2766	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2767		struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
2768
2769		if (ring->mqd_obj) {
2770			r = amdgpu_bo_reserve(ring->mqd_obj, false);
2771			if (unlikely(r != 0))
2772				dev_warn(adev->dev, "(%d) reserve MQD bo failed\n", r);
2773
2774			amdgpu_bo_unpin(ring->mqd_obj);
2775			amdgpu_bo_unreserve(ring->mqd_obj);
2776
2777			amdgpu_bo_unref(&ring->mqd_obj);
2778			ring->mqd_obj = NULL;
2779		}
2780	}
2781}
2782
2783static void gfx_v7_0_mec_fini(struct amdgpu_device *adev)
2784{
2785	int r;
2786
2787	if (adev->gfx.mec.hpd_eop_obj) {
2788		r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false);
2789		if (unlikely(r != 0))
2790			dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r);
2791		amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj);
2792		amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
2793
2794		amdgpu_bo_unref(&adev->gfx.mec.hpd_eop_obj);
2795		adev->gfx.mec.hpd_eop_obj = NULL;
2796	}
2797}
2798
2799#define MEC_HPD_SIZE 2048
2800
2801static int gfx_v7_0_mec_init(struct amdgpu_device *adev)
2802{
2803	int r;
2804	u32 *hpd;
 
 
 
2805
2806	/*
2807	 * KV:    2 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 64 Queues total
2808	 * CI/KB: 1 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 32 Queues total
2809	 * Nonetheless, we assign only 1 pipe because all other pipes will
2810	 * be handled by KFD
2811	 */
2812	adev->gfx.mec.num_mec = 1;
2813	adev->gfx.mec.num_pipe = 1;
2814	adev->gfx.mec.num_queue = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe * 8;
2815
2816	if (adev->gfx.mec.hpd_eop_obj == NULL) {
2817		r = amdgpu_bo_create(adev,
2818				     adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2,
2819				     PAGE_SIZE, true,
2820				     AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
2821				     &adev->gfx.mec.hpd_eop_obj);
2822		if (r) {
2823			dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
2824			return r;
2825		}
2826	}
2827
2828	r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false);
2829	if (unlikely(r != 0)) {
2830		gfx_v7_0_mec_fini(adev);
2831		return r;
2832	}
2833	r = amdgpu_bo_pin(adev->gfx.mec.hpd_eop_obj, AMDGPU_GEM_DOMAIN_GTT,
2834			  &adev->gfx.mec.hpd_eop_gpu_addr);
2835	if (r) {
2836		dev_warn(adev->dev, "(%d) pin HDP EOP bo failed\n", r);
2837		gfx_v7_0_mec_fini(adev);
2838		return r;
2839	}
2840	r = amdgpu_bo_kmap(adev->gfx.mec.hpd_eop_obj, (void **)&hpd);
2841	if (r) {
2842		dev_warn(adev->dev, "(%d) map HDP EOP bo failed\n", r);
2843		gfx_v7_0_mec_fini(adev);
2844		return r;
2845	}
2846
2847	/* clear memory.  Not sure if this is required or not */
2848	memset(hpd, 0, adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2);
2849
2850	amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
2851	amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
2852
2853	return 0;
2854}
2855
2856struct hqd_registers
2857{
2858	u32 cp_mqd_base_addr;
2859	u32 cp_mqd_base_addr_hi;
2860	u32 cp_hqd_active;
2861	u32 cp_hqd_vmid;
2862	u32 cp_hqd_persistent_state;
2863	u32 cp_hqd_pipe_priority;
2864	u32 cp_hqd_queue_priority;
2865	u32 cp_hqd_quantum;
2866	u32 cp_hqd_pq_base;
2867	u32 cp_hqd_pq_base_hi;
2868	u32 cp_hqd_pq_rptr;
2869	u32 cp_hqd_pq_rptr_report_addr;
2870	u32 cp_hqd_pq_rptr_report_addr_hi;
2871	u32 cp_hqd_pq_wptr_poll_addr;
2872	u32 cp_hqd_pq_wptr_poll_addr_hi;
2873	u32 cp_hqd_pq_doorbell_control;
2874	u32 cp_hqd_pq_wptr;
2875	u32 cp_hqd_pq_control;
2876	u32 cp_hqd_ib_base_addr;
2877	u32 cp_hqd_ib_base_addr_hi;
2878	u32 cp_hqd_ib_rptr;
2879	u32 cp_hqd_ib_control;
2880	u32 cp_hqd_iq_timer;
2881	u32 cp_hqd_iq_rptr;
2882	u32 cp_hqd_dequeue_request;
2883	u32 cp_hqd_dma_offload;
2884	u32 cp_hqd_sema_cmd;
2885	u32 cp_hqd_msg_type;
2886	u32 cp_hqd_atomic0_preop_lo;
2887	u32 cp_hqd_atomic0_preop_hi;
2888	u32 cp_hqd_atomic1_preop_lo;
2889	u32 cp_hqd_atomic1_preop_hi;
2890	u32 cp_hqd_hq_scheduler0;
2891	u32 cp_hqd_hq_scheduler1;
2892	u32 cp_mqd_control;
2893};
2894
2895struct bonaire_mqd
 
2896{
2897	u32 header;
2898	u32 dispatch_initiator;
2899	u32 dimensions[3];
2900	u32 start_idx[3];
2901	u32 num_threads[3];
2902	u32 pipeline_stat_enable;
2903	u32 perf_counter_enable;
2904	u32 pgm[2];
2905	u32 tba[2];
2906	u32 tma[2];
2907	u32 pgm_rsrc[2];
2908	u32 vmid;
2909	u32 resource_limits;
2910	u32 static_thread_mgmt01[2];
2911	u32 tmp_ring_size;
2912	u32 static_thread_mgmt23[2];
2913	u32 restart[3];
2914	u32 thread_trace_enable;
2915	u32 reserved1;
2916	u32 user_data[16];
2917	u32 vgtcs_invoke_count[2];
2918	struct hqd_registers queue_state;
2919	u32 dequeue_cntr;
2920	u32 interrupt_queue[64];
2921};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2922
2923/**
2924 * gfx_v7_0_cp_compute_resume - setup the compute queue registers
2925 *
2926 * @adev: amdgpu_device pointer
2927 *
2928 * Program the compute queues and test them to make sure they
2929 * are working.
2930 * Returns 0 for success, error for failure.
2931 */
2932static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev)
2933{
2934	int r, i, j;
2935	u32 tmp;
2936	bool use_doorbell = true;
2937	u64 hqd_gpu_addr;
2938	u64 mqd_gpu_addr;
2939	u64 eop_gpu_addr;
2940	u64 wb_gpu_addr;
2941	u32 *buf;
2942	struct bonaire_mqd *mqd;
2943	struct amdgpu_ring *ring;
2944
2945	/* fix up chicken bits */
2946	tmp = RREG32(mmCP_CPF_DEBUG);
2947	tmp |= (1 << 23);
2948	WREG32(mmCP_CPF_DEBUG, tmp);
2949
2950	/* init the pipes */
2951	mutex_lock(&adev->srbm_mutex);
2952	for (i = 0; i < (adev->gfx.mec.num_pipe * adev->gfx.mec.num_mec); i++) {
2953		int me = (i < 4) ? 1 : 2;
2954		int pipe = (i < 4) ? i : (i - 4);
2955
2956		eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + (i * MEC_HPD_SIZE * 2);
2957
2958		cik_srbm_select(adev, me, pipe, 0, 0);
2959
2960		/* write the EOP addr */
2961		WREG32(mmCP_HPD_EOP_BASE_ADDR, eop_gpu_addr >> 8);
2962		WREG32(mmCP_HPD_EOP_BASE_ADDR_HI, upper_32_bits(eop_gpu_addr) >> 8);
2963
2964		/* set the VMID assigned */
2965		WREG32(mmCP_HPD_EOP_VMID, 0);
2966
2967		/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
2968		tmp = RREG32(mmCP_HPD_EOP_CONTROL);
2969		tmp &= ~CP_HPD_EOP_CONTROL__EOP_SIZE_MASK;
2970		tmp |= order_base_2(MEC_HPD_SIZE / 8);
2971		WREG32(mmCP_HPD_EOP_CONTROL, tmp);
2972	}
2973	cik_srbm_select(adev, 0, 0, 0, 0);
2974	mutex_unlock(&adev->srbm_mutex);
2975
2976	/* init the queues.  Just two for now. */
2977	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2978		ring = &adev->gfx.compute_ring[i];
2979
2980		if (ring->mqd_obj == NULL) {
2981			r = amdgpu_bo_create(adev,
2982					     sizeof(struct bonaire_mqd),
2983					     PAGE_SIZE, true,
2984					     AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
2985					     &ring->mqd_obj);
2986			if (r) {
2987				dev_warn(adev->dev, "(%d) create MQD bo failed\n", r);
2988				return r;
2989			}
2990		}
2991
2992		r = amdgpu_bo_reserve(ring->mqd_obj, false);
2993		if (unlikely(r != 0)) {
2994			gfx_v7_0_cp_compute_fini(adev);
2995			return r;
2996		}
2997		r = amdgpu_bo_pin(ring->mqd_obj, AMDGPU_GEM_DOMAIN_GTT,
2998				  &mqd_gpu_addr);
2999		if (r) {
3000			dev_warn(adev->dev, "(%d) pin MQD bo failed\n", r);
3001			gfx_v7_0_cp_compute_fini(adev);
3002			return r;
3003		}
3004		r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&buf);
3005		if (r) {
3006			dev_warn(adev->dev, "(%d) map MQD bo failed\n", r);
3007			gfx_v7_0_cp_compute_fini(adev);
3008			return r;
3009		}
3010
3011		/* init the mqd struct */
3012		memset(buf, 0, sizeof(struct bonaire_mqd));
3013
3014		mqd = (struct bonaire_mqd *)buf;
3015		mqd->header = 0xC0310800;
3016		mqd->static_thread_mgmt01[0] = 0xffffffff;
3017		mqd->static_thread_mgmt01[1] = 0xffffffff;
3018		mqd->static_thread_mgmt23[0] = 0xffffffff;
3019		mqd->static_thread_mgmt23[1] = 0xffffffff;
3020
3021		mutex_lock(&adev->srbm_mutex);
3022		cik_srbm_select(adev, ring->me,
3023				ring->pipe,
3024				ring->queue, 0);
3025
3026		/* disable wptr polling */
3027		tmp = RREG32(mmCP_PQ_WPTR_POLL_CNTL);
3028		tmp &= ~CP_PQ_WPTR_POLL_CNTL__EN_MASK;
3029		WREG32(mmCP_PQ_WPTR_POLL_CNTL, tmp);
3030
3031		/* enable doorbell? */
3032		mqd->queue_state.cp_hqd_pq_doorbell_control =
3033			RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
3034		if (use_doorbell)
3035			mqd->queue_state.cp_hqd_pq_doorbell_control |= CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK;
3036		else
3037			mqd->queue_state.cp_hqd_pq_doorbell_control &= ~CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK;
3038		WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL,
3039		       mqd->queue_state.cp_hqd_pq_doorbell_control);
3040
3041		/* disable the queue if it's active */
3042		mqd->queue_state.cp_hqd_dequeue_request = 0;
3043		mqd->queue_state.cp_hqd_pq_rptr = 0;
3044		mqd->queue_state.cp_hqd_pq_wptr= 0;
3045		if (RREG32(mmCP_HQD_ACTIVE) & 1) {
3046			WREG32(mmCP_HQD_DEQUEUE_REQUEST, 1);
3047			for (j = 0; j < adev->usec_timeout; j++) {
3048				if (!(RREG32(mmCP_HQD_ACTIVE) & 1))
3049					break;
3050				udelay(1);
3051			}
3052			WREG32(mmCP_HQD_DEQUEUE_REQUEST, mqd->queue_state.cp_hqd_dequeue_request);
3053			WREG32(mmCP_HQD_PQ_RPTR, mqd->queue_state.cp_hqd_pq_rptr);
3054			WREG32(mmCP_HQD_PQ_WPTR, mqd->queue_state.cp_hqd_pq_wptr);
3055		}
3056
3057		/* set the pointer to the MQD */
3058		mqd->queue_state.cp_mqd_base_addr = mqd_gpu_addr & 0xfffffffc;
3059		mqd->queue_state.cp_mqd_base_addr_hi = upper_32_bits(mqd_gpu_addr);
3060		WREG32(mmCP_MQD_BASE_ADDR, mqd->queue_state.cp_mqd_base_addr);
3061		WREG32(mmCP_MQD_BASE_ADDR_HI, mqd->queue_state.cp_mqd_base_addr_hi);
3062		/* set MQD vmid to 0 */
3063		mqd->queue_state.cp_mqd_control = RREG32(mmCP_MQD_CONTROL);
3064		mqd->queue_state.cp_mqd_control &= ~CP_MQD_CONTROL__VMID_MASK;
3065		WREG32(mmCP_MQD_CONTROL, mqd->queue_state.cp_mqd_control);
3066
3067		/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
3068		hqd_gpu_addr = ring->gpu_addr >> 8;
3069		mqd->queue_state.cp_hqd_pq_base = hqd_gpu_addr;
3070		mqd->queue_state.cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
3071		WREG32(mmCP_HQD_PQ_BASE, mqd->queue_state.cp_hqd_pq_base);
3072		WREG32(mmCP_HQD_PQ_BASE_HI, mqd->queue_state.cp_hqd_pq_base_hi);
3073
3074		/* set up the HQD, this is similar to CP_RB0_CNTL */
3075		mqd->queue_state.cp_hqd_pq_control = RREG32(mmCP_HQD_PQ_CONTROL);
3076		mqd->queue_state.cp_hqd_pq_control &=
3077			~(CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK |
3078					CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE_MASK);
3079
3080		mqd->queue_state.cp_hqd_pq_control |=
3081			order_base_2(ring->ring_size / 8);
3082		mqd->queue_state.cp_hqd_pq_control |=
3083			(order_base_2(AMDGPU_GPU_PAGE_SIZE/8) << 8);
3084#ifdef __BIG_ENDIAN
3085		mqd->queue_state.cp_hqd_pq_control |=
3086			2 << CP_HQD_PQ_CONTROL__ENDIAN_SWAP__SHIFT;
3087#endif
3088		mqd->queue_state.cp_hqd_pq_control &=
3089			~(CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK |
3090				CP_HQD_PQ_CONTROL__ROQ_PQ_IB_FLIP_MASK |
3091				CP_HQD_PQ_CONTROL__PQ_VOLATILE_MASK);
3092		mqd->queue_state.cp_hqd_pq_control |=
3093			CP_HQD_PQ_CONTROL__PRIV_STATE_MASK |
3094			CP_HQD_PQ_CONTROL__KMD_QUEUE_MASK; /* assuming kernel queue control */
3095		WREG32(mmCP_HQD_PQ_CONTROL, mqd->queue_state.cp_hqd_pq_control);
3096
3097		/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
3098		wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
3099		mqd->queue_state.cp_hqd_pq_wptr_poll_addr = wb_gpu_addr & 0xfffffffc;
3100		mqd->queue_state.cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
3101		WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR, mqd->queue_state.cp_hqd_pq_wptr_poll_addr);
3102		WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
3103		       mqd->queue_state.cp_hqd_pq_wptr_poll_addr_hi);
3104
3105		/* set the wb address wether it's enabled or not */
3106		wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
3107		mqd->queue_state.cp_hqd_pq_rptr_report_addr = wb_gpu_addr & 0xfffffffc;
3108		mqd->queue_state.cp_hqd_pq_rptr_report_addr_hi =
3109			upper_32_bits(wb_gpu_addr) & 0xffff;
3110		WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR,
3111		       mqd->queue_state.cp_hqd_pq_rptr_report_addr);
3112		WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
3113		       mqd->queue_state.cp_hqd_pq_rptr_report_addr_hi);
3114
3115		/* enable the doorbell if requested */
3116		if (use_doorbell) {
3117			mqd->queue_state.cp_hqd_pq_doorbell_control =
3118				RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
3119			mqd->queue_state.cp_hqd_pq_doorbell_control &=
3120				~CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET_MASK;
3121			mqd->queue_state.cp_hqd_pq_doorbell_control |=
3122				(ring->doorbell_index <<
3123				 CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT);
3124			mqd->queue_state.cp_hqd_pq_doorbell_control |=
3125				CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK;
3126			mqd->queue_state.cp_hqd_pq_doorbell_control &=
3127				~(CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_SOURCE_MASK |
3128				CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_HIT_MASK);
3129
3130		} else {
3131			mqd->queue_state.cp_hqd_pq_doorbell_control = 0;
3132		}
3133		WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL,
3134		       mqd->queue_state.cp_hqd_pq_doorbell_control);
3135
3136		/* read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3137		ring->wptr = 0;
3138		mqd->queue_state.cp_hqd_pq_wptr = ring->wptr;
3139		WREG32(mmCP_HQD_PQ_WPTR, mqd->queue_state.cp_hqd_pq_wptr);
3140		mqd->queue_state.cp_hqd_pq_rptr = RREG32(mmCP_HQD_PQ_RPTR);
3141
3142		/* set the vmid for the queue */
3143		mqd->queue_state.cp_hqd_vmid = 0;
3144		WREG32(mmCP_HQD_VMID, mqd->queue_state.cp_hqd_vmid);
3145
3146		/* activate the queue */
3147		mqd->queue_state.cp_hqd_active = 1;
3148		WREG32(mmCP_HQD_ACTIVE, mqd->queue_state.cp_hqd_active);
3149
3150		cik_srbm_select(adev, 0, 0, 0, 0);
3151		mutex_unlock(&adev->srbm_mutex);
3152
3153		amdgpu_bo_kunmap(ring->mqd_obj);
3154		amdgpu_bo_unreserve(ring->mqd_obj);
3155
3156		ring->ready = true;
3157	}
3158
3159	gfx_v7_0_cp_compute_enable(adev, true);
3160
3161	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3162		ring = &adev->gfx.compute_ring[i];
3163
3164		r = amdgpu_ring_test_ring(ring);
3165		if (r)
3166			ring->ready = false;
3167	}
3168
3169	return 0;
3170}
3171
3172static void gfx_v7_0_cp_enable(struct amdgpu_device *adev, bool enable)
3173{
3174	gfx_v7_0_cp_gfx_enable(adev, enable);
3175	gfx_v7_0_cp_compute_enable(adev, enable);
3176}
3177
3178static int gfx_v7_0_cp_load_microcode(struct amdgpu_device *adev)
3179{
3180	int r;
3181
3182	r = gfx_v7_0_cp_gfx_load_microcode(adev);
3183	if (r)
3184		return r;
3185	r = gfx_v7_0_cp_compute_load_microcode(adev);
3186	if (r)
3187		return r;
3188
3189	return 0;
3190}
3191
3192static void gfx_v7_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
3193					       bool enable)
3194{
3195	u32 tmp = RREG32(mmCP_INT_CNTL_RING0);
3196
3197	if (enable)
3198		tmp |= (CP_INT_CNTL_RING0__CNTX_BUSY_INT_ENABLE_MASK |
3199				CP_INT_CNTL_RING0__CNTX_EMPTY_INT_ENABLE_MASK);
3200	else
3201		tmp &= ~(CP_INT_CNTL_RING0__CNTX_BUSY_INT_ENABLE_MASK |
3202				CP_INT_CNTL_RING0__CNTX_EMPTY_INT_ENABLE_MASK);
3203	WREG32(mmCP_INT_CNTL_RING0, tmp);
3204}
3205
3206static int gfx_v7_0_cp_resume(struct amdgpu_device *adev)
3207{
3208	int r;
3209
3210	gfx_v7_0_enable_gui_idle_interrupt(adev, false);
3211
3212	r = gfx_v7_0_cp_load_microcode(adev);
3213	if (r)
3214		return r;
3215
3216	r = gfx_v7_0_cp_gfx_resume(adev);
3217	if (r)
3218		return r;
3219	r = gfx_v7_0_cp_compute_resume(adev);
3220	if (r)
3221		return r;
3222
3223	gfx_v7_0_enable_gui_idle_interrupt(adev, true);
3224
3225	return 0;
3226}
3227
3228/**
3229 * gfx_v7_0_ring_emit_vm_flush - cik vm flush using the CP
3230 *
3231 * @ring: the ring to emmit the commands to
3232 *
3233 * Sync the command pipeline with the PFP. E.g. wait for everything
3234 * to be completed.
3235 */
3236static void gfx_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
3237{
3238	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
3239	uint32_t seq = ring->fence_drv.sync_seq;
3240	uint64_t addr = ring->fence_drv.gpu_addr;
3241
3242	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
3243	amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
3244				 WAIT_REG_MEM_FUNCTION(3) | /* equal */
3245				 WAIT_REG_MEM_ENGINE(usepfp)));   /* pfp or me */
3246	amdgpu_ring_write(ring, addr & 0xfffffffc);
3247	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
3248	amdgpu_ring_write(ring, seq);
3249	amdgpu_ring_write(ring, 0xffffffff);
3250	amdgpu_ring_write(ring, 4); /* poll interval */
3251
3252	if (usepfp) {
3253		/* synce CE with ME to prevent CE fetch CEIB before context switch done */
3254		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3255		amdgpu_ring_write(ring, 0);
3256		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3257		amdgpu_ring_write(ring, 0);
3258	}
3259}
3260
3261/*
3262 * vm
3263 * VMID 0 is the physical GPU addresses as used by the kernel.
3264 * VMIDs 1-15 are used for userspace clients and are handled
3265 * by the amdgpu vm/hsa code.
3266 */
3267/**
3268 * gfx_v7_0_ring_emit_vm_flush - cik vm flush using the CP
3269 *
3270 * @adev: amdgpu_device pointer
3271 *
3272 * Update the page table base and flush the VM TLB
3273 * using the CP (CIK).
3274 */
3275static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
3276					unsigned vm_id, uint64_t pd_addr)
3277{
3278	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
3279
3280	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3281	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
3282				 WRITE_DATA_DST_SEL(0)));
3283	if (vm_id < 8) {
3284		amdgpu_ring_write(ring,
3285				  (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
3286	} else {
3287		amdgpu_ring_write(ring,
3288				  (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8));
3289	}
3290	amdgpu_ring_write(ring, 0);
3291	amdgpu_ring_write(ring, pd_addr >> 12);
3292
3293	/* bits 0-15 are the VM contexts0-15 */
3294	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3295	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
3296				 WRITE_DATA_DST_SEL(0)));
3297	amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
3298	amdgpu_ring_write(ring, 0);
3299	amdgpu_ring_write(ring, 1 << vm_id);
3300
3301	/* wait for the invalidate to complete */
3302	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
3303	amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */
3304				 WAIT_REG_MEM_FUNCTION(0) |  /* always */
3305				 WAIT_REG_MEM_ENGINE(0))); /* me */
3306	amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
3307	amdgpu_ring_write(ring, 0);
3308	amdgpu_ring_write(ring, 0); /* ref */
3309	amdgpu_ring_write(ring, 0); /* mask */
3310	amdgpu_ring_write(ring, 0x20); /* poll interval */
3311
3312	/* compute doesn't have PFP */
3313	if (usepfp) {
3314		/* sync PFP to ME, otherwise we might get invalid PFP reads */
3315		amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
3316		amdgpu_ring_write(ring, 0x0);
3317
3318		/* synce CE with ME to prevent CE fetch CEIB before context switch done */
3319		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3320		amdgpu_ring_write(ring, 0);
3321		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3322		amdgpu_ring_write(ring, 0);
3323	}
3324}
3325
 
 
 
 
 
 
 
 
 
 
 
 
 
3326/*
3327 * RLC
3328 * The RLC is a multi-purpose microengine that handles a
3329 * variety of functions.
3330 */
3331static void gfx_v7_0_rlc_fini(struct amdgpu_device *adev)
3332{
3333	int r;
3334
3335	/* save restore block */
3336	if (adev->gfx.rlc.save_restore_obj) {
3337		r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, false);
3338		if (unlikely(r != 0))
3339			dev_warn(adev->dev, "(%d) reserve RLC sr bo failed\n", r);
3340		amdgpu_bo_unpin(adev->gfx.rlc.save_restore_obj);
3341		amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
3342
3343		amdgpu_bo_unref(&adev->gfx.rlc.save_restore_obj);
3344		adev->gfx.rlc.save_restore_obj = NULL;
3345	}
3346
3347	/* clear state block */
3348	if (adev->gfx.rlc.clear_state_obj) {
3349		r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
3350		if (unlikely(r != 0))
3351			dev_warn(adev->dev, "(%d) reserve RLC c bo failed\n", r);
3352		amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
3353		amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
3354
3355		amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
3356		adev->gfx.rlc.clear_state_obj = NULL;
3357	}
3358
3359	/* clear state block */
3360	if (adev->gfx.rlc.cp_table_obj) {
3361		r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false);
3362		if (unlikely(r != 0))
3363			dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
3364		amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj);
3365		amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
3366
3367		amdgpu_bo_unref(&adev->gfx.rlc.cp_table_obj);
3368		adev->gfx.rlc.cp_table_obj = NULL;
3369	}
3370}
3371
3372static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
3373{
3374	const u32 *src_ptr;
3375	volatile u32 *dst_ptr;
3376	u32 dws, i;
3377	const struct cs_section_def *cs_data;
3378	int r;
3379
3380	/* allocate rlc buffers */
3381	if (adev->flags & AMD_IS_APU) {
3382		if (adev->asic_type == CHIP_KAVERI) {
3383			adev->gfx.rlc.reg_list = spectre_rlc_save_restore_register_list;
3384			adev->gfx.rlc.reg_list_size =
3385				(u32)ARRAY_SIZE(spectre_rlc_save_restore_register_list);
3386		} else {
3387			adev->gfx.rlc.reg_list = kalindi_rlc_save_restore_register_list;
3388			adev->gfx.rlc.reg_list_size =
3389				(u32)ARRAY_SIZE(kalindi_rlc_save_restore_register_list);
3390		}
3391	}
3392	adev->gfx.rlc.cs_data = ci_cs_data;
3393	adev->gfx.rlc.cp_table_size = ALIGN(CP_ME_TABLE_SIZE * 5 * 4, 2048); /* CP JT */
3394	adev->gfx.rlc.cp_table_size += 64 * 1024; /* GDS */
3395
3396	src_ptr = adev->gfx.rlc.reg_list;
3397	dws = adev->gfx.rlc.reg_list_size;
3398	dws += (5 * 16) + 48 + 48 + 64;
3399
3400	cs_data = adev->gfx.rlc.cs_data;
3401
3402	if (src_ptr) {
3403		/* save restore block */
3404		if (adev->gfx.rlc.save_restore_obj == NULL) {
3405			r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
3406					     AMDGPU_GEM_DOMAIN_VRAM,
3407					     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
3408					     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
3409					     NULL, NULL,
3410					     &adev->gfx.rlc.save_restore_obj);
3411			if (r) {
3412				dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r);
3413				return r;
3414			}
3415		}
3416
3417		r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, false);
3418		if (unlikely(r != 0)) {
3419			gfx_v7_0_rlc_fini(adev);
3420			return r;
3421		}
3422		r = amdgpu_bo_pin(adev->gfx.rlc.save_restore_obj, AMDGPU_GEM_DOMAIN_VRAM,
3423				  &adev->gfx.rlc.save_restore_gpu_addr);
3424		if (r) {
3425			amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
3426			dev_warn(adev->dev, "(%d) pin RLC sr bo failed\n", r);
3427			gfx_v7_0_rlc_fini(adev);
3428			return r;
3429		}
3430
3431		r = amdgpu_bo_kmap(adev->gfx.rlc.save_restore_obj, (void **)&adev->gfx.rlc.sr_ptr);
3432		if (r) {
3433			dev_warn(adev->dev, "(%d) map RLC sr bo failed\n", r);
3434			gfx_v7_0_rlc_fini(adev);
3435			return r;
3436		}
3437		/* write the sr buffer */
3438		dst_ptr = adev->gfx.rlc.sr_ptr;
3439		for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
3440			dst_ptr[i] = cpu_to_le32(src_ptr[i]);
3441		amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj);
3442		amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
3443	}
3444
3445	if (cs_data) {
3446		/* clear state block */
3447		adev->gfx.rlc.clear_state_size = dws = gfx_v7_0_get_csb_size(adev);
3448
3449		if (adev->gfx.rlc.clear_state_obj == NULL) {
3450			r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
3451					     AMDGPU_GEM_DOMAIN_VRAM,
3452					     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
3453					     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
3454					     NULL, NULL,
3455					     &adev->gfx.rlc.clear_state_obj);
3456			if (r) {
3457				dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
3458				gfx_v7_0_rlc_fini(adev);
3459				return r;
3460			}
3461		}
3462		r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
3463		if (unlikely(r != 0)) {
3464			gfx_v7_0_rlc_fini(adev);
3465			return r;
3466		}
3467		r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, AMDGPU_GEM_DOMAIN_VRAM,
3468				  &adev->gfx.rlc.clear_state_gpu_addr);
3469		if (r) {
3470			amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
3471			dev_warn(adev->dev, "(%d) pin RLC c bo failed\n", r);
3472			gfx_v7_0_rlc_fini(adev);
3473			return r;
3474		}
3475
3476		r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, (void **)&adev->gfx.rlc.cs_ptr);
3477		if (r) {
3478			dev_warn(adev->dev, "(%d) map RLC c bo failed\n", r);
3479			gfx_v7_0_rlc_fini(adev);
3480			return r;
3481		}
3482		/* set up the cs buffer */
3483		dst_ptr = adev->gfx.rlc.cs_ptr;
3484		gfx_v7_0_get_csb_buffer(adev, dst_ptr);
3485		amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
3486		amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
3487	}
3488
3489	if (adev->gfx.rlc.cp_table_size) {
3490		if (adev->gfx.rlc.cp_table_obj == NULL) {
3491			r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true,
3492					     AMDGPU_GEM_DOMAIN_VRAM,
3493					     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
3494					     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
3495					     NULL, NULL,
3496					     &adev->gfx.rlc.cp_table_obj);
3497			if (r) {
3498				dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
3499				gfx_v7_0_rlc_fini(adev);
3500				return r;
3501			}
3502		}
3503
3504		r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false);
3505		if (unlikely(r != 0)) {
3506			dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
3507			gfx_v7_0_rlc_fini(adev);
3508			return r;
3509		}
3510		r = amdgpu_bo_pin(adev->gfx.rlc.cp_table_obj, AMDGPU_GEM_DOMAIN_VRAM,
3511				  &adev->gfx.rlc.cp_table_gpu_addr);
3512		if (r) {
3513			amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
3514			dev_warn(adev->dev, "(%d) pin RLC cp_table bo failed\n", r);
3515			gfx_v7_0_rlc_fini(adev);
3516			return r;
3517		}
3518		r = amdgpu_bo_kmap(adev->gfx.rlc.cp_table_obj, (void **)&adev->gfx.rlc.cp_table_ptr);
3519		if (r) {
3520			dev_warn(adev->dev, "(%d) map RLC cp table bo failed\n", r);
3521			gfx_v7_0_rlc_fini(adev);
3522			return r;
3523		}
3524
3525		gfx_v7_0_init_cp_pg_table(adev);
3526
3527		amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
3528		amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
3529
3530	}
3531
3532	return 0;
3533}
3534
3535static void gfx_v7_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
3536{
3537	u32 tmp;
3538
3539	tmp = RREG32(mmRLC_LB_CNTL);
3540	if (enable)
3541		tmp |= RLC_LB_CNTL__LOAD_BALANCE_ENABLE_MASK;
3542	else
3543		tmp &= ~RLC_LB_CNTL__LOAD_BALANCE_ENABLE_MASK;
3544	WREG32(mmRLC_LB_CNTL, tmp);
3545}
3546
3547static void gfx_v7_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
3548{
3549	u32 i, j, k;
3550	u32 mask;
3551
3552	mutex_lock(&adev->grbm_idx_mutex);
3553	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
3554		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
3555			gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff);
3556			for (k = 0; k < adev->usec_timeout; k++) {
3557				if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY) == 0)
3558					break;
3559				udelay(1);
3560			}
3561		}
3562	}
3563	gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3564	mutex_unlock(&adev->grbm_idx_mutex);
3565
3566	mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
3567		RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
3568		RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
3569		RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
3570	for (k = 0; k < adev->usec_timeout; k++) {
3571		if ((RREG32(mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
3572			break;
3573		udelay(1);
3574	}
3575}
3576
3577static void gfx_v7_0_update_rlc(struct amdgpu_device *adev, u32 rlc)
3578{
3579	u32 tmp;
3580
3581	tmp = RREG32(mmRLC_CNTL);
3582	if (tmp != rlc)
3583		WREG32(mmRLC_CNTL, rlc);
3584}
3585
3586static u32 gfx_v7_0_halt_rlc(struct amdgpu_device *adev)
3587{
3588	u32 data, orig;
3589
3590	orig = data = RREG32(mmRLC_CNTL);
3591
3592	if (data & RLC_CNTL__RLC_ENABLE_F32_MASK) {
3593		u32 i;
3594
3595		data &= ~RLC_CNTL__RLC_ENABLE_F32_MASK;
3596		WREG32(mmRLC_CNTL, data);
3597
3598		for (i = 0; i < adev->usec_timeout; i++) {
3599			if ((RREG32(mmRLC_GPM_STAT) & RLC_GPM_STAT__RLC_BUSY_MASK) == 0)
3600				break;
3601			udelay(1);
3602		}
3603
3604		gfx_v7_0_wait_for_rlc_serdes(adev);
3605	}
3606
3607	return orig;
3608}
3609
3610static void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
 
 
 
 
 
3611{
3612	u32 tmp, i, mask;
3613
3614	tmp = 0x1 | (1 << 1);
3615	WREG32(mmRLC_GPR_REG2, tmp);
3616
3617	mask = RLC_GPM_STAT__GFX_POWER_STATUS_MASK |
3618		RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK;
3619	for (i = 0; i < adev->usec_timeout; i++) {
3620		if ((RREG32(mmRLC_GPM_STAT) & mask) == mask)
3621			break;
3622		udelay(1);
3623	}
3624
3625	for (i = 0; i < adev->usec_timeout; i++) {
3626		if ((RREG32(mmRLC_GPR_REG2) & 0x1) == 0)
3627			break;
3628		udelay(1);
3629	}
3630}
3631
3632static void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev)
3633{
3634	u32 tmp;
3635
3636	tmp = 0x1 | (0 << 1);
3637	WREG32(mmRLC_GPR_REG2, tmp);
3638}
3639
3640/**
3641 * gfx_v7_0_rlc_stop - stop the RLC ME
3642 *
3643 * @adev: amdgpu_device pointer
3644 *
3645 * Halt the RLC ME (MicroEngine) (CIK).
3646 */
3647static void gfx_v7_0_rlc_stop(struct amdgpu_device *adev)
3648{
3649	WREG32(mmRLC_CNTL, 0);
3650
3651	gfx_v7_0_enable_gui_idle_interrupt(adev, false);
3652
3653	gfx_v7_0_wait_for_rlc_serdes(adev);
3654}
3655
3656/**
3657 * gfx_v7_0_rlc_start - start the RLC ME
3658 *
3659 * @adev: amdgpu_device pointer
3660 *
3661 * Unhalt the RLC ME (MicroEngine) (CIK).
3662 */
3663static void gfx_v7_0_rlc_start(struct amdgpu_device *adev)
3664{
3665	WREG32(mmRLC_CNTL, RLC_CNTL__RLC_ENABLE_F32_MASK);
3666
3667	gfx_v7_0_enable_gui_idle_interrupt(adev, true);
3668
3669	udelay(50);
3670}
3671
3672static void gfx_v7_0_rlc_reset(struct amdgpu_device *adev)
3673{
3674	u32 tmp = RREG32(mmGRBM_SOFT_RESET);
3675
3676	tmp |= GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK;
3677	WREG32(mmGRBM_SOFT_RESET, tmp);
3678	udelay(50);
3679	tmp &= ~GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK;
3680	WREG32(mmGRBM_SOFT_RESET, tmp);
3681	udelay(50);
3682}
3683
3684/**
3685 * gfx_v7_0_rlc_resume - setup the RLC hw
3686 *
3687 * @adev: amdgpu_device pointer
3688 *
3689 * Initialize the RLC registers, load the ucode,
3690 * and start the RLC (CIK).
3691 * Returns 0 for success, -EINVAL if the ucode is not available.
3692 */
3693static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev)
3694{
3695	const struct rlc_firmware_header_v1_0 *hdr;
3696	const __le32 *fw_data;
3697	unsigned i, fw_size;
3698	u32 tmp;
3699
3700	if (!adev->gfx.rlc_fw)
3701		return -EINVAL;
3702
3703	hdr = (const struct rlc_firmware_header_v1_0 *)adev->gfx.rlc_fw->data;
3704	amdgpu_ucode_print_rlc_hdr(&hdr->header);
3705	adev->gfx.rlc_fw_version = le32_to_cpu(hdr->header.ucode_version);
3706	adev->gfx.rlc_feature_version = le32_to_cpu(
3707					hdr->ucode_feature_version);
3708
3709	gfx_v7_0_rlc_stop(adev);
3710
3711	/* disable CG */
3712	tmp = RREG32(mmRLC_CGCG_CGLS_CTRL) & 0xfffffffc;
3713	WREG32(mmRLC_CGCG_CGLS_CTRL, tmp);
3714
3715	gfx_v7_0_rlc_reset(adev);
3716
3717	gfx_v7_0_init_pg(adev);
3718
3719	WREG32(mmRLC_LB_CNTR_INIT, 0);
3720	WREG32(mmRLC_LB_CNTR_MAX, 0x00008000);
3721
3722	mutex_lock(&adev->grbm_idx_mutex);
3723	gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3724	WREG32(mmRLC_LB_INIT_CU_MASK, 0xffffffff);
3725	WREG32(mmRLC_LB_PARAMS, 0x00600408);
3726	WREG32(mmRLC_LB_CNTL, 0x80000004);
3727	mutex_unlock(&adev->grbm_idx_mutex);
3728
3729	WREG32(mmRLC_MC_CNTL, 0);
3730	WREG32(mmRLC_UCODE_CNTL, 0);
3731
3732	fw_data = (const __le32 *)
3733		(adev->gfx.rlc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
3734	fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
3735	WREG32(mmRLC_GPM_UCODE_ADDR, 0);
3736	for (i = 0; i < fw_size; i++)
3737		WREG32(mmRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
3738	WREG32(mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
3739
3740	/* XXX - find out what chips support lbpw */
3741	gfx_v7_0_enable_lbpw(adev, false);
3742
3743	if (adev->asic_type == CHIP_BONAIRE)
3744		WREG32(mmRLC_DRIVER_CPDMA_STATUS, 0);
3745
3746	gfx_v7_0_rlc_start(adev);
3747
3748	return 0;
3749}
3750
 
 
 
 
 
 
 
 
 
 
 
 
3751static void gfx_v7_0_enable_cgcg(struct amdgpu_device *adev, bool enable)
3752{
3753	u32 data, orig, tmp, tmp2;
3754
3755	orig = data = RREG32(mmRLC_CGCG_CGLS_CTRL);
3756
3757	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
3758		gfx_v7_0_enable_gui_idle_interrupt(adev, true);
3759
3760		tmp = gfx_v7_0_halt_rlc(adev);
3761
3762		mutex_lock(&adev->grbm_idx_mutex);
3763		gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3764		WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
3765		WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
3766		tmp2 = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK |
3767			RLC_SERDES_WR_CTRL__CGCG_OVERRIDE_0_MASK |
3768			RLC_SERDES_WR_CTRL__CGLS_ENABLE_MASK;
3769		WREG32(mmRLC_SERDES_WR_CTRL, tmp2);
3770		mutex_unlock(&adev->grbm_idx_mutex);
3771
3772		gfx_v7_0_update_rlc(adev, tmp);
3773
3774		data |= RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
 
 
 
3775	} else {
3776		gfx_v7_0_enable_gui_idle_interrupt(adev, false);
3777
3778		RREG32(mmCB_CGTT_SCLK_CTRL);
3779		RREG32(mmCB_CGTT_SCLK_CTRL);
3780		RREG32(mmCB_CGTT_SCLK_CTRL);
3781		RREG32(mmCB_CGTT_SCLK_CTRL);
3782
3783		data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
 
 
 
 
3784	}
3785
3786	if (orig != data)
3787		WREG32(mmRLC_CGCG_CGLS_CTRL, data);
3788
3789}
3790
3791static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
3792{
3793	u32 data, orig, tmp = 0;
3794
3795	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
3796		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
3797			if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
3798				orig = data = RREG32(mmCP_MEM_SLP_CNTL);
3799				data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
3800				if (orig != data)
3801					WREG32(mmCP_MEM_SLP_CNTL, data);
3802			}
3803		}
3804
3805		orig = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
3806		data |= 0x00000001;
3807		data &= 0xfffffffd;
3808		if (orig != data)
3809			WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data);
3810
3811		tmp = gfx_v7_0_halt_rlc(adev);
3812
3813		mutex_lock(&adev->grbm_idx_mutex);
3814		gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3815		WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
3816		WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
3817		data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK |
3818			RLC_SERDES_WR_CTRL__MGCG_OVERRIDE_0_MASK;
3819		WREG32(mmRLC_SERDES_WR_CTRL, data);
3820		mutex_unlock(&adev->grbm_idx_mutex);
3821
3822		gfx_v7_0_update_rlc(adev, tmp);
3823
3824		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS) {
3825			orig = data = RREG32(mmCGTS_SM_CTRL_REG);
3826			data &= ~CGTS_SM_CTRL_REG__SM_MODE_MASK;
3827			data |= (0x2 << CGTS_SM_CTRL_REG__SM_MODE__SHIFT);
3828			data |= CGTS_SM_CTRL_REG__SM_MODE_ENABLE_MASK;
3829			data &= ~CGTS_SM_CTRL_REG__OVERRIDE_MASK;
3830			if ((adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) &&
3831			    (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS_LS))
3832				data &= ~CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK;
3833			data &= ~CGTS_SM_CTRL_REG__ON_MONITOR_ADD_MASK;
3834			data |= CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN_MASK;
3835			data |= (0x96 << CGTS_SM_CTRL_REG__ON_MONITOR_ADD__SHIFT);
3836			if (orig != data)
3837				WREG32(mmCGTS_SM_CTRL_REG, data);
3838		}
3839	} else {
3840		orig = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
3841		data |= 0x00000003;
3842		if (orig != data)
3843			WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data);
3844
3845		data = RREG32(mmRLC_MEM_SLP_CNTL);
3846		if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
3847			data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
3848			WREG32(mmRLC_MEM_SLP_CNTL, data);
3849		}
3850
3851		data = RREG32(mmCP_MEM_SLP_CNTL);
3852		if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
3853			data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
3854			WREG32(mmCP_MEM_SLP_CNTL, data);
3855		}
3856
3857		orig = data = RREG32(mmCGTS_SM_CTRL_REG);
3858		data |= CGTS_SM_CTRL_REG__OVERRIDE_MASK | CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK;
3859		if (orig != data)
3860			WREG32(mmCGTS_SM_CTRL_REG, data);
3861
3862		tmp = gfx_v7_0_halt_rlc(adev);
3863
3864		mutex_lock(&adev->grbm_idx_mutex);
3865		gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3866		WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
3867		WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
3868		data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK | RLC_SERDES_WR_CTRL__MGCG_OVERRIDE_1_MASK;
3869		WREG32(mmRLC_SERDES_WR_CTRL, data);
3870		mutex_unlock(&adev->grbm_idx_mutex);
3871
3872		gfx_v7_0_update_rlc(adev, tmp);
3873	}
3874}
3875
3876static void gfx_v7_0_update_cg(struct amdgpu_device *adev,
3877			       bool enable)
3878{
3879	gfx_v7_0_enable_gui_idle_interrupt(adev, false);
3880	/* order matters! */
3881	if (enable) {
3882		gfx_v7_0_enable_mgcg(adev, true);
3883		gfx_v7_0_enable_cgcg(adev, true);
3884	} else {
3885		gfx_v7_0_enable_cgcg(adev, false);
3886		gfx_v7_0_enable_mgcg(adev, false);
3887	}
3888	gfx_v7_0_enable_gui_idle_interrupt(adev, true);
3889}
3890
3891static void gfx_v7_0_enable_sclk_slowdown_on_pu(struct amdgpu_device *adev,
3892						bool enable)
3893{
3894	u32 data, orig;
3895
3896	orig = data = RREG32(mmRLC_PG_CNTL);
3897	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS))
3898		data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
3899	else
3900		data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
3901	if (orig != data)
3902		WREG32(mmRLC_PG_CNTL, data);
3903}
3904
3905static void gfx_v7_0_enable_sclk_slowdown_on_pd(struct amdgpu_device *adev,
3906						bool enable)
3907{
3908	u32 data, orig;
3909
3910	orig = data = RREG32(mmRLC_PG_CNTL);
3911	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS))
3912		data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
3913	else
3914		data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
3915	if (orig != data)
3916		WREG32(mmRLC_PG_CNTL, data);
3917}
3918
3919static void gfx_v7_0_enable_cp_pg(struct amdgpu_device *adev, bool enable)
3920{
3921	u32 data, orig;
3922
3923	orig = data = RREG32(mmRLC_PG_CNTL);
3924	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_CP))
3925		data &= ~0x8000;
3926	else
3927		data |= 0x8000;
3928	if (orig != data)
3929		WREG32(mmRLC_PG_CNTL, data);
3930}
3931
3932static void gfx_v7_0_enable_gds_pg(struct amdgpu_device *adev, bool enable)
3933{
3934	u32 data, orig;
3935
3936	orig = data = RREG32(mmRLC_PG_CNTL);
3937	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GDS))
3938		data &= ~0x2000;
3939	else
3940		data |= 0x2000;
3941	if (orig != data)
3942		WREG32(mmRLC_PG_CNTL, data);
3943}
3944
3945static void gfx_v7_0_init_cp_pg_table(struct amdgpu_device *adev)
3946{
3947	const __le32 *fw_data;
3948	volatile u32 *dst_ptr;
3949	int me, i, max_me = 4;
3950	u32 bo_offset = 0;
3951	u32 table_offset, table_size;
3952
3953	if (adev->asic_type == CHIP_KAVERI)
3954		max_me = 5;
3955
3956	if (adev->gfx.rlc.cp_table_ptr == NULL)
3957		return;
3958
3959	/* write the cp table buffer */
3960	dst_ptr = adev->gfx.rlc.cp_table_ptr;
3961	for (me = 0; me < max_me; me++) {
3962		if (me == 0) {
3963			const struct gfx_firmware_header_v1_0 *hdr =
3964				(const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
3965			fw_data = (const __le32 *)
3966				(adev->gfx.ce_fw->data +
3967				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
3968			table_offset = le32_to_cpu(hdr->jt_offset);
3969			table_size = le32_to_cpu(hdr->jt_size);
3970		} else if (me == 1) {
3971			const struct gfx_firmware_header_v1_0 *hdr =
3972				(const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
3973			fw_data = (const __le32 *)
3974				(adev->gfx.pfp_fw->data +
3975				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
3976			table_offset = le32_to_cpu(hdr->jt_offset);
3977			table_size = le32_to_cpu(hdr->jt_size);
3978		} else if (me == 2) {
3979			const struct gfx_firmware_header_v1_0 *hdr =
3980				(const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
3981			fw_data = (const __le32 *)
3982				(adev->gfx.me_fw->data +
3983				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
3984			table_offset = le32_to_cpu(hdr->jt_offset);
3985			table_size = le32_to_cpu(hdr->jt_size);
3986		} else if (me == 3) {
3987			const struct gfx_firmware_header_v1_0 *hdr =
3988				(const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
3989			fw_data = (const __le32 *)
3990				(adev->gfx.mec_fw->data +
3991				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
3992			table_offset = le32_to_cpu(hdr->jt_offset);
3993			table_size = le32_to_cpu(hdr->jt_size);
3994		} else {
3995			const struct gfx_firmware_header_v1_0 *hdr =
3996				(const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
3997			fw_data = (const __le32 *)
3998				(adev->gfx.mec2_fw->data +
3999				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
4000			table_offset = le32_to_cpu(hdr->jt_offset);
4001			table_size = le32_to_cpu(hdr->jt_size);
4002		}
4003
4004		for (i = 0; i < table_size; i ++) {
4005			dst_ptr[bo_offset + i] =
4006				cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
4007		}
4008
4009		bo_offset += table_size;
4010	}
4011}
4012
4013static void gfx_v7_0_enable_gfx_cgpg(struct amdgpu_device *adev,
4014				     bool enable)
4015{
4016	u32 data, orig;
4017
4018	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
4019		orig = data = RREG32(mmRLC_PG_CNTL);
4020		data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
4021		if (orig != data)
4022			WREG32(mmRLC_PG_CNTL, data);
4023
4024		orig = data = RREG32(mmRLC_AUTO_PG_CTRL);
4025		data |= RLC_AUTO_PG_CTRL__AUTO_PG_EN_MASK;
4026		if (orig != data)
4027			WREG32(mmRLC_AUTO_PG_CTRL, data);
4028	} else {
4029		orig = data = RREG32(mmRLC_PG_CNTL);
4030		data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
4031		if (orig != data)
4032			WREG32(mmRLC_PG_CNTL, data);
4033
4034		orig = data = RREG32(mmRLC_AUTO_PG_CTRL);
4035		data &= ~RLC_AUTO_PG_CTRL__AUTO_PG_EN_MASK;
4036		if (orig != data)
4037			WREG32(mmRLC_AUTO_PG_CTRL, data);
4038
4039		data = RREG32(mmDB_RENDER_CONTROL);
4040	}
4041}
4042
4043static void gfx_v7_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
4044						 u32 bitmap)
4045{
4046	u32 data;
4047
4048	if (!bitmap)
4049		return;
4050
4051	data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
4052	data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
4053
4054	WREG32(mmGC_USER_SHADER_ARRAY_CONFIG, data);
4055}
4056
4057static u32 gfx_v7_0_get_cu_active_bitmap(struct amdgpu_device *adev)
4058{
4059	u32 data, mask;
4060
4061	data = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG);
4062	data |= RREG32(mmGC_USER_SHADER_ARRAY_CONFIG);
4063
4064	data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
4065	data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
4066
4067	mask = gfx_v7_0_create_bitmask(adev->gfx.config.max_cu_per_sh);
4068
4069	return (~data) & mask;
4070}
4071
4072static void gfx_v7_0_init_ao_cu_mask(struct amdgpu_device *adev)
4073{
4074	u32 tmp;
4075
4076	WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask);
4077
4078	tmp = RREG32(mmRLC_MAX_PG_CU);
4079	tmp &= ~RLC_MAX_PG_CU__MAX_POWERED_UP_CU_MASK;
4080	tmp |= (adev->gfx.cu_info.number << RLC_MAX_PG_CU__MAX_POWERED_UP_CU__SHIFT);
4081	WREG32(mmRLC_MAX_PG_CU, tmp);
4082}
4083
4084static void gfx_v7_0_enable_gfx_static_mgpg(struct amdgpu_device *adev,
4085					    bool enable)
4086{
4087	u32 data, orig;
4088
4089	orig = data = RREG32(mmRLC_PG_CNTL);
4090	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG))
4091		data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
4092	else
4093		data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
4094	if (orig != data)
4095		WREG32(mmRLC_PG_CNTL, data);
4096}
4097
4098static void gfx_v7_0_enable_gfx_dynamic_mgpg(struct amdgpu_device *adev,
4099					     bool enable)
4100{
4101	u32 data, orig;
4102
4103	orig = data = RREG32(mmRLC_PG_CNTL);
4104	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG))
4105		data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
4106	else
4107		data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
4108	if (orig != data)
4109		WREG32(mmRLC_PG_CNTL, data);
4110}
4111
4112#define RLC_SAVE_AND_RESTORE_STARTING_OFFSET 0x90
4113#define RLC_CLEAR_STATE_DESCRIPTOR_OFFSET    0x3D
4114
4115static void gfx_v7_0_init_gfx_cgpg(struct amdgpu_device *adev)
4116{
4117	u32 data, orig;
4118	u32 i;
4119
4120	if (adev->gfx.rlc.cs_data) {
4121		WREG32(mmRLC_GPM_SCRATCH_ADDR, RLC_CLEAR_STATE_DESCRIPTOR_OFFSET);
4122		WREG32(mmRLC_GPM_SCRATCH_DATA, upper_32_bits(adev->gfx.rlc.clear_state_gpu_addr));
4123		WREG32(mmRLC_GPM_SCRATCH_DATA, lower_32_bits(adev->gfx.rlc.clear_state_gpu_addr));
4124		WREG32(mmRLC_GPM_SCRATCH_DATA, adev->gfx.rlc.clear_state_size);
4125	} else {
4126		WREG32(mmRLC_GPM_SCRATCH_ADDR, RLC_CLEAR_STATE_DESCRIPTOR_OFFSET);
4127		for (i = 0; i < 3; i++)
4128			WREG32(mmRLC_GPM_SCRATCH_DATA, 0);
4129	}
4130	if (adev->gfx.rlc.reg_list) {
4131		WREG32(mmRLC_GPM_SCRATCH_ADDR, RLC_SAVE_AND_RESTORE_STARTING_OFFSET);
4132		for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
4133			WREG32(mmRLC_GPM_SCRATCH_DATA, adev->gfx.rlc.reg_list[i]);
4134	}
4135
4136	orig = data = RREG32(mmRLC_PG_CNTL);
4137	data |= RLC_PG_CNTL__GFX_POWER_GATING_SRC_MASK;
4138	if (orig != data)
4139		WREG32(mmRLC_PG_CNTL, data);
4140
4141	WREG32(mmRLC_SAVE_AND_RESTORE_BASE, adev->gfx.rlc.save_restore_gpu_addr >> 8);
4142	WREG32(mmRLC_JUMP_TABLE_RESTORE, adev->gfx.rlc.cp_table_gpu_addr >> 8);
4143
4144	data = RREG32(mmCP_RB_WPTR_POLL_CNTL);
4145	data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK;
4146	data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
4147	WREG32(mmCP_RB_WPTR_POLL_CNTL, data);
4148
4149	data = 0x10101010;
4150	WREG32(mmRLC_PG_DELAY, data);
4151
4152	data = RREG32(mmRLC_PG_DELAY_2);
4153	data &= ~0xff;
4154	data |= 0x3;
4155	WREG32(mmRLC_PG_DELAY_2, data);
4156
4157	data = RREG32(mmRLC_AUTO_PG_CTRL);
4158	data &= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK;
4159	data |= (0x700 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT);
4160	WREG32(mmRLC_AUTO_PG_CTRL, data);
4161
4162}
4163
4164static void gfx_v7_0_update_gfx_pg(struct amdgpu_device *adev, bool enable)
4165{
4166	gfx_v7_0_enable_gfx_cgpg(adev, enable);
4167	gfx_v7_0_enable_gfx_static_mgpg(adev, enable);
4168	gfx_v7_0_enable_gfx_dynamic_mgpg(adev, enable);
4169}
4170
4171static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev)
4172{
4173	u32 count = 0;
4174	const struct cs_section_def *sect = NULL;
4175	const struct cs_extent_def *ext = NULL;
4176
4177	if (adev->gfx.rlc.cs_data == NULL)
4178		return 0;
4179
4180	/* begin clear state */
4181	count += 2;
4182	/* context control state */
4183	count += 3;
4184
4185	for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
4186		for (ext = sect->section; ext->extent != NULL; ++ext) {
4187			if (sect->id == SECT_CONTEXT)
4188				count += 2 + ext->reg_count;
4189			else
4190				return 0;
4191		}
4192	}
4193	/* pa_sc_raster_config/pa_sc_raster_config1 */
4194	count += 4;
4195	/* end clear state */
4196	count += 2;
4197	/* clear state */
4198	count += 2;
4199
4200	return count;
4201}
4202
4203static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev,
4204				    volatile u32 *buffer)
4205{
4206	u32 count = 0, i;
4207	const struct cs_section_def *sect = NULL;
4208	const struct cs_extent_def *ext = NULL;
4209
4210	if (adev->gfx.rlc.cs_data == NULL)
4211		return;
4212	if (buffer == NULL)
4213		return;
4214
4215	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
4216	buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
4217
4218	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
4219	buffer[count++] = cpu_to_le32(0x80000000);
4220	buffer[count++] = cpu_to_le32(0x80000000);
4221
4222	for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
4223		for (ext = sect->section; ext->extent != NULL; ++ext) {
4224			if (sect->id == SECT_CONTEXT) {
4225				buffer[count++] =
4226					cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
4227				buffer[count++] = cpu_to_le32(ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
4228				for (i = 0; i < ext->reg_count; i++)
4229					buffer[count++] = cpu_to_le32(ext->extent[i]);
4230			} else {
4231				return;
4232			}
4233		}
4234	}
4235
4236	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2));
4237	buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
4238	switch (adev->asic_type) {
4239	case CHIP_BONAIRE:
4240		buffer[count++] = cpu_to_le32(0x16000012);
4241		buffer[count++] = cpu_to_le32(0x00000000);
4242		break;
4243	case CHIP_KAVERI:
4244		buffer[count++] = cpu_to_le32(0x00000000); /* XXX */
4245		buffer[count++] = cpu_to_le32(0x00000000);
4246		break;
4247	case CHIP_KABINI:
4248	case CHIP_MULLINS:
4249		buffer[count++] = cpu_to_le32(0x00000000); /* XXX */
4250		buffer[count++] = cpu_to_le32(0x00000000);
4251		break;
4252	case CHIP_HAWAII:
4253		buffer[count++] = cpu_to_le32(0x3a00161a);
4254		buffer[count++] = cpu_to_le32(0x0000002e);
4255		break;
4256	default:
4257		buffer[count++] = cpu_to_le32(0x00000000);
4258		buffer[count++] = cpu_to_le32(0x00000000);
4259		break;
4260	}
4261
4262	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
4263	buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
4264
4265	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
4266	buffer[count++] = cpu_to_le32(0);
4267}
4268
4269static void gfx_v7_0_init_pg(struct amdgpu_device *adev)
4270{
4271	if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
4272			      AMD_PG_SUPPORT_GFX_SMG |
4273			      AMD_PG_SUPPORT_GFX_DMG |
4274			      AMD_PG_SUPPORT_CP |
4275			      AMD_PG_SUPPORT_GDS |
4276			      AMD_PG_SUPPORT_RLC_SMU_HS)) {
4277		gfx_v7_0_enable_sclk_slowdown_on_pu(adev, true);
4278		gfx_v7_0_enable_sclk_slowdown_on_pd(adev, true);
4279		if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
4280			gfx_v7_0_init_gfx_cgpg(adev);
4281			gfx_v7_0_enable_cp_pg(adev, true);
4282			gfx_v7_0_enable_gds_pg(adev, true);
4283		}
4284		gfx_v7_0_init_ao_cu_mask(adev);
4285		gfx_v7_0_update_gfx_pg(adev, true);
4286	}
4287}
4288
4289static void gfx_v7_0_fini_pg(struct amdgpu_device *adev)
4290{
4291	if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
4292			      AMD_PG_SUPPORT_GFX_SMG |
4293			      AMD_PG_SUPPORT_GFX_DMG |
4294			      AMD_PG_SUPPORT_CP |
4295			      AMD_PG_SUPPORT_GDS |
4296			      AMD_PG_SUPPORT_RLC_SMU_HS)) {
4297		gfx_v7_0_update_gfx_pg(adev, false);
4298		if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
4299			gfx_v7_0_enable_cp_pg(adev, false);
4300			gfx_v7_0_enable_gds_pg(adev, false);
4301		}
4302	}
4303}
4304
4305/**
4306 * gfx_v7_0_get_gpu_clock_counter - return GPU clock counter snapshot
4307 *
4308 * @adev: amdgpu_device pointer
4309 *
4310 * Fetches a GPU clock counter snapshot (SI).
4311 * Returns the 64 bit clock counter snapshot.
4312 */
4313static uint64_t gfx_v7_0_get_gpu_clock_counter(struct amdgpu_device *adev)
4314{
4315	uint64_t clock;
4316
4317	mutex_lock(&adev->gfx.gpu_clock_mutex);
4318	WREG32(mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
4319	clock = (uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_LSB) |
4320		((uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
4321	mutex_unlock(&adev->gfx.gpu_clock_mutex);
4322	return clock;
4323}
4324
4325static void gfx_v7_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
4326					  uint32_t vmid,
4327					  uint32_t gds_base, uint32_t gds_size,
4328					  uint32_t gws_base, uint32_t gws_size,
4329					  uint32_t oa_base, uint32_t oa_size)
4330{
4331	gds_base = gds_base >> AMDGPU_GDS_SHIFT;
4332	gds_size = gds_size >> AMDGPU_GDS_SHIFT;
4333
4334	gws_base = gws_base >> AMDGPU_GWS_SHIFT;
4335	gws_size = gws_size >> AMDGPU_GWS_SHIFT;
4336
4337	oa_base = oa_base >> AMDGPU_OA_SHIFT;
4338	oa_size = oa_size >> AMDGPU_OA_SHIFT;
4339
4340	/* GDS Base */
4341	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4342	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4343				WRITE_DATA_DST_SEL(0)));
4344	amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_base);
4345	amdgpu_ring_write(ring, 0);
4346	amdgpu_ring_write(ring, gds_base);
4347
4348	/* GDS Size */
4349	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4350	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4351				WRITE_DATA_DST_SEL(0)));
4352	amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_size);
4353	amdgpu_ring_write(ring, 0);
4354	amdgpu_ring_write(ring, gds_size);
4355
4356	/* GWS */
4357	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4358	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4359				WRITE_DATA_DST_SEL(0)));
4360	amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].gws);
4361	amdgpu_ring_write(ring, 0);
4362	amdgpu_ring_write(ring, gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
4363
4364	/* OA */
4365	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4366	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4367				WRITE_DATA_DST_SEL(0)));
4368	amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].oa);
4369	amdgpu_ring_write(ring, 0);
4370	amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));
4371}
4372
 
 
 
 
 
 
 
 
 
 
 
 
4373static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
4374{
4375	WREG32(mmSQ_IND_INDEX,
4376		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
4377		(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
4378		(address << SQ_IND_INDEX__INDEX__SHIFT) |
4379		(SQ_IND_INDEX__FORCE_READ_MASK));
4380	return RREG32(mmSQ_IND_DATA);
4381}
4382
4383static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
4384			   uint32_t wave, uint32_t thread,
4385			   uint32_t regno, uint32_t num, uint32_t *out)
4386{
4387	WREG32(mmSQ_IND_INDEX,
4388		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
4389		(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
4390		(regno << SQ_IND_INDEX__INDEX__SHIFT) |
4391		(thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
4392		(SQ_IND_INDEX__FORCE_READ_MASK) |
4393		(SQ_IND_INDEX__AUTO_INCR_MASK));
4394	while (num--)
4395		*(out++) = RREG32(mmSQ_IND_DATA);
4396}
4397
4398static void gfx_v7_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
4399{
4400	/* type 0 wave data */
4401	dst[(*no_fields)++] = 0;
4402	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
4403	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
4404	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
4405	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
4406	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
4407	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
4408	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
4409	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
4410	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
4411	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
4412	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
4413	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
4414	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_LO);
4415	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_HI);
4416	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_LO);
4417	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_HI);
4418	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
4419	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
4420}
4421
4422static void gfx_v7_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
4423				     uint32_t wave, uint32_t start,
4424				     uint32_t size, uint32_t *dst)
4425{
4426	wave_read_regs(
4427		adev, simd, wave, 0,
4428		start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
4429}
4430
 
 
 
 
 
 
4431static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = {
4432	.get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter,
4433	.select_se_sh = &gfx_v7_0_select_se_sh,
4434	.read_wave_data = &gfx_v7_0_read_wave_data,
4435	.read_wave_sgprs = &gfx_v7_0_read_wave_sgprs,
 
4436};
4437
4438static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = {
4439	.enter_safe_mode = gfx_v7_0_enter_rlc_safe_mode,
4440	.exit_safe_mode = gfx_v7_0_exit_rlc_safe_mode
 
 
 
 
 
 
 
 
 
 
4441};
4442
4443static int gfx_v7_0_early_init(void *handle)
4444{
4445	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4446
4447	adev->gfx.num_gfx_rings = GFX7_NUM_GFX_RINGS;
4448	adev->gfx.num_compute_rings = GFX7_NUM_COMPUTE_RINGS;
4449	adev->gfx.funcs = &gfx_v7_0_gfx_funcs;
4450	adev->gfx.rlc.funcs = &gfx_v7_0_rlc_funcs;
4451	gfx_v7_0_set_ring_funcs(adev);
4452	gfx_v7_0_set_irq_funcs(adev);
4453	gfx_v7_0_set_gds_init(adev);
4454
4455	return 0;
4456}
4457
4458static int gfx_v7_0_late_init(void *handle)
4459{
4460	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4461	int r;
4462
4463	r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
4464	if (r)
4465		return r;
4466
4467	r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
4468	if (r)
4469		return r;
4470
4471	return 0;
4472}
4473
4474static void gfx_v7_0_gpu_early_init(struct amdgpu_device *adev)
4475{
4476	u32 gb_addr_config;
4477	u32 mc_shared_chmap, mc_arb_ramcfg;
4478	u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map;
4479	u32 tmp;
4480
4481	switch (adev->asic_type) {
4482	case CHIP_BONAIRE:
4483		adev->gfx.config.max_shader_engines = 2;
4484		adev->gfx.config.max_tile_pipes = 4;
4485		adev->gfx.config.max_cu_per_sh = 7;
4486		adev->gfx.config.max_sh_per_se = 1;
4487		adev->gfx.config.max_backends_per_se = 2;
4488		adev->gfx.config.max_texture_channel_caches = 4;
4489		adev->gfx.config.max_gprs = 256;
4490		adev->gfx.config.max_gs_threads = 32;
4491		adev->gfx.config.max_hw_contexts = 8;
4492
4493		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
4494		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
4495		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
4496		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
4497		gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
4498		break;
4499	case CHIP_HAWAII:
4500		adev->gfx.config.max_shader_engines = 4;
4501		adev->gfx.config.max_tile_pipes = 16;
4502		adev->gfx.config.max_cu_per_sh = 11;
4503		adev->gfx.config.max_sh_per_se = 1;
4504		adev->gfx.config.max_backends_per_se = 4;
4505		adev->gfx.config.max_texture_channel_caches = 16;
4506		adev->gfx.config.max_gprs = 256;
4507		adev->gfx.config.max_gs_threads = 32;
4508		adev->gfx.config.max_hw_contexts = 8;
4509
4510		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
4511		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
4512		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
4513		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
4514		gb_addr_config = HAWAII_GB_ADDR_CONFIG_GOLDEN;
4515		break;
4516	case CHIP_KAVERI:
4517		adev->gfx.config.max_shader_engines = 1;
4518		adev->gfx.config.max_tile_pipes = 4;
4519		if ((adev->pdev->device == 0x1304) ||
4520		    (adev->pdev->device == 0x1305) ||
4521		    (adev->pdev->device == 0x130C) ||
4522		    (adev->pdev->device == 0x130F) ||
4523		    (adev->pdev->device == 0x1310) ||
4524		    (adev->pdev->device == 0x1311) ||
4525		    (adev->pdev->device == 0x131C)) {
4526			adev->gfx.config.max_cu_per_sh = 8;
4527			adev->gfx.config.max_backends_per_se = 2;
4528		} else if ((adev->pdev->device == 0x1309) ||
4529			   (adev->pdev->device == 0x130A) ||
4530			   (adev->pdev->device == 0x130D) ||
4531			   (adev->pdev->device == 0x1313) ||
4532			   (adev->pdev->device == 0x131D)) {
4533			adev->gfx.config.max_cu_per_sh = 6;
4534			adev->gfx.config.max_backends_per_se = 2;
4535		} else if ((adev->pdev->device == 0x1306) ||
4536			   (adev->pdev->device == 0x1307) ||
4537			   (adev->pdev->device == 0x130B) ||
4538			   (adev->pdev->device == 0x130E) ||
4539			   (adev->pdev->device == 0x1315) ||
4540			   (adev->pdev->device == 0x131B)) {
4541			adev->gfx.config.max_cu_per_sh = 4;
4542			adev->gfx.config.max_backends_per_se = 1;
4543		} else {
4544			adev->gfx.config.max_cu_per_sh = 3;
4545			adev->gfx.config.max_backends_per_se = 1;
4546		}
4547		adev->gfx.config.max_sh_per_se = 1;
4548		adev->gfx.config.max_texture_channel_caches = 4;
4549		adev->gfx.config.max_gprs = 256;
4550		adev->gfx.config.max_gs_threads = 16;
4551		adev->gfx.config.max_hw_contexts = 8;
4552
4553		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
4554		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
4555		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
4556		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
4557		gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
4558		break;
4559	case CHIP_KABINI:
4560	case CHIP_MULLINS:
4561	default:
4562		adev->gfx.config.max_shader_engines = 1;
4563		adev->gfx.config.max_tile_pipes = 2;
4564		adev->gfx.config.max_cu_per_sh = 2;
4565		adev->gfx.config.max_sh_per_se = 1;
4566		adev->gfx.config.max_backends_per_se = 1;
4567		adev->gfx.config.max_texture_channel_caches = 2;
4568		adev->gfx.config.max_gprs = 256;
4569		adev->gfx.config.max_gs_threads = 16;
4570		adev->gfx.config.max_hw_contexts = 8;
4571
4572		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
4573		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
4574		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
4575		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
4576		gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
4577		break;
4578	}
4579
4580	mc_shared_chmap = RREG32(mmMC_SHARED_CHMAP);
4581	adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG);
4582	mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg;
4583
 
 
 
 
 
4584	adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
4585	adev->gfx.config.mem_max_burst_length_bytes = 256;
4586	if (adev->flags & AMD_IS_APU) {
4587		/* Get memory bank mapping mode. */
4588		tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING);
4589		dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
4590		dimm01_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
4591
4592		tmp = RREG32(mmMC_FUS_DRAM1_BANK_ADDR_MAPPING);
4593		dimm10_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
4594		dimm11_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
4595
4596		/* Validate settings in case only one DIMM installed. */
4597		if ((dimm00_addr_map == 0) || (dimm00_addr_map == 3) || (dimm00_addr_map == 4) || (dimm00_addr_map > 12))
4598			dimm00_addr_map = 0;
4599		if ((dimm01_addr_map == 0) || (dimm01_addr_map == 3) || (dimm01_addr_map == 4) || (dimm01_addr_map > 12))
4600			dimm01_addr_map = 0;
4601		if ((dimm10_addr_map == 0) || (dimm10_addr_map == 3) || (dimm10_addr_map == 4) || (dimm10_addr_map > 12))
4602			dimm10_addr_map = 0;
4603		if ((dimm11_addr_map == 0) || (dimm11_addr_map == 3) || (dimm11_addr_map == 4) || (dimm11_addr_map > 12))
4604			dimm11_addr_map = 0;
4605
4606		/* If DIMM Addr map is 8GB, ROW size should be 2KB. Otherwise 1KB. */
4607		/* If ROW size(DIMM1) != ROW size(DMIMM0), ROW size should be larger one. */
4608		if ((dimm00_addr_map == 11) || (dimm01_addr_map == 11) || (dimm10_addr_map == 11) || (dimm11_addr_map == 11))
4609			adev->gfx.config.mem_row_size_in_kb = 2;
4610		else
4611			adev->gfx.config.mem_row_size_in_kb = 1;
4612	} else {
4613		tmp = (mc_arb_ramcfg & MC_ARB_RAMCFG__NOOFCOLS_MASK) >> MC_ARB_RAMCFG__NOOFCOLS__SHIFT;
4614		adev->gfx.config.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
4615		if (adev->gfx.config.mem_row_size_in_kb > 4)
4616			adev->gfx.config.mem_row_size_in_kb = 4;
4617	}
4618	/* XXX use MC settings? */
4619	adev->gfx.config.shader_engine_tile_size = 32;
4620	adev->gfx.config.num_gpus = 1;
4621	adev->gfx.config.multi_gpu_tile_size = 64;
4622
4623	/* fix up row size */
4624	gb_addr_config &= ~GB_ADDR_CONFIG__ROW_SIZE_MASK;
4625	switch (adev->gfx.config.mem_row_size_in_kb) {
4626	case 1:
4627	default:
4628		gb_addr_config |= (0 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT);
4629		break;
4630	case 2:
4631		gb_addr_config |= (1 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT);
4632		break;
4633	case 4:
4634		gb_addr_config |= (2 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT);
4635		break;
4636	}
4637	adev->gfx.config.gb_addr_config = gb_addr_config;
4638}
4639
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4640static int gfx_v7_0_sw_init(void *handle)
4641{
4642	struct amdgpu_ring *ring;
4643	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4644	int i, r;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4645
4646	/* EOP Event */
4647	r = amdgpu_irq_add_id(adev, 181, &adev->gfx.eop_irq);
4648	if (r)
4649		return r;
4650
4651	/* Privileged reg */
4652	r = amdgpu_irq_add_id(adev, 184, &adev->gfx.priv_reg_irq);
 
4653	if (r)
4654		return r;
4655
4656	/* Privileged inst */
4657	r = amdgpu_irq_add_id(adev, 185, &adev->gfx.priv_inst_irq);
 
4658	if (r)
4659		return r;
4660
4661	gfx_v7_0_scratch_init(adev);
4662
4663	r = gfx_v7_0_init_microcode(adev);
4664	if (r) {
4665		DRM_ERROR("Failed to load gfx firmware!\n");
4666		return r;
4667	}
4668
4669	r = gfx_v7_0_rlc_init(adev);
4670	if (r) {
4671		DRM_ERROR("Failed to init rlc BOs!\n");
4672		return r;
4673	}
4674
4675	/* allocate mec buffers */
4676	r = gfx_v7_0_mec_init(adev);
4677	if (r) {
4678		DRM_ERROR("Failed to init MEC BOs!\n");
4679		return r;
4680	}
4681
4682	for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
4683		ring = &adev->gfx.gfx_ring[i];
4684		ring->ring_obj = NULL;
4685		sprintf(ring->name, "gfx");
4686		r = amdgpu_ring_init(adev, ring, 1024,
4687				     &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP);
 
 
4688		if (r)
4689			return r;
4690	}
4691
4692	/* set up the compute queues */
4693	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4694		unsigned irq_type;
 
 
 
 
 
 
 
 
 
 
4695
4696		/* max 32 queues per MEC */
4697		if ((i >= 32) || (i >= AMDGPU_MAX_COMPUTE_RINGS)) {
4698			DRM_ERROR("Too many (%d) compute rings!\n", i);
4699			break;
4700		}
4701		ring = &adev->gfx.compute_ring[i];
4702		ring->ring_obj = NULL;
4703		ring->use_doorbell = true;
4704		ring->doorbell_index = AMDGPU_DOORBELL_MEC_RING0 + i;
4705		ring->me = 1; /* first MEC */
4706		ring->pipe = i / 8;
4707		ring->queue = i % 8;
4708		sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
4709		irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
4710		/* type-2 packets are deprecated on MEC, use type-3 instead */
4711		r = amdgpu_ring_init(adev, ring, 1024,
4712				     &adev->gfx.eop_irq, irq_type);
4713		if (r)
4714			return r;
4715	}
4716
4717	/* reserve GDS, GWS and OA resource for gfx */
4718	r = amdgpu_bo_create_kernel(adev, adev->gds.mem.gfx_partition_size,
4719				    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GDS,
4720				    &adev->gds.gds_gfx_bo, NULL, NULL);
4721	if (r)
4722		return r;
4723
4724	r = amdgpu_bo_create_kernel(adev, adev->gds.gws.gfx_partition_size,
4725				    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GWS,
4726				    &adev->gds.gws_gfx_bo, NULL, NULL);
4727	if (r)
4728		return r;
4729
4730	r = amdgpu_bo_create_kernel(adev, adev->gds.oa.gfx_partition_size,
4731				    PAGE_SIZE, AMDGPU_GEM_DOMAIN_OA,
4732				    &adev->gds.oa_gfx_bo, NULL, NULL);
4733	if (r)
4734		return r;
4735
4736	adev->gfx.ce_ram_size = 0x8000;
4737
4738	gfx_v7_0_gpu_early_init(adev);
4739
4740	return r;
4741}
4742
4743static int gfx_v7_0_sw_fini(void *handle)
4744{
 
4745	int i;
4746	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4747
4748	amdgpu_bo_free_kernel(&adev->gds.oa_gfx_bo, NULL, NULL);
4749	amdgpu_bo_free_kernel(&adev->gds.gws_gfx_bo, NULL, NULL);
4750	amdgpu_bo_free_kernel(&adev->gds.gds_gfx_bo, NULL, NULL);
4751
4752	for (i = 0; i < adev->gfx.num_gfx_rings; i++)
4753		amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
4754	for (i = 0; i < adev->gfx.num_compute_rings; i++)
4755		amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
4756
4757	gfx_v7_0_cp_compute_fini(adev);
4758	gfx_v7_0_rlc_fini(adev);
4759	gfx_v7_0_mec_fini(adev);
 
 
 
 
 
 
 
 
4760	gfx_v7_0_free_microcode(adev);
4761
4762	return 0;
4763}
4764
4765static int gfx_v7_0_hw_init(void *handle)
4766{
4767	int r;
4768	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4769
4770	gfx_v7_0_gpu_init(adev);
4771
 
 
4772	/* init rlc */
4773	r = gfx_v7_0_rlc_resume(adev);
4774	if (r)
4775		return r;
4776
4777	r = gfx_v7_0_cp_resume(adev);
4778	if (r)
4779		return r;
4780
4781	return r;
4782}
4783
4784static int gfx_v7_0_hw_fini(void *handle)
4785{
4786	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4787
4788	amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
4789	amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
4790	gfx_v7_0_cp_enable(adev, false);
4791	gfx_v7_0_rlc_stop(adev);
4792	gfx_v7_0_fini_pg(adev);
4793
4794	return 0;
4795}
4796
4797static int gfx_v7_0_suspend(void *handle)
4798{
4799	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4800
4801	return gfx_v7_0_hw_fini(adev);
4802}
4803
4804static int gfx_v7_0_resume(void *handle)
4805{
4806	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4807
4808	return gfx_v7_0_hw_init(adev);
4809}
4810
4811static bool gfx_v7_0_is_idle(void *handle)
4812{
4813	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4814
4815	if (RREG32(mmGRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK)
4816		return false;
4817	else
4818		return true;
4819}
4820
4821static int gfx_v7_0_wait_for_idle(void *handle)
4822{
4823	unsigned i;
4824	u32 tmp;
4825	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4826
4827	for (i = 0; i < adev->usec_timeout; i++) {
4828		/* read MC_STATUS */
4829		tmp = RREG32(mmGRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK;
4830
4831		if (!tmp)
4832			return 0;
4833		udelay(1);
4834	}
4835	return -ETIMEDOUT;
4836}
4837
4838static int gfx_v7_0_soft_reset(void *handle)
4839{
4840	u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
4841	u32 tmp;
4842	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4843
4844	/* GRBM_STATUS */
4845	tmp = RREG32(mmGRBM_STATUS);
4846	if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
4847		   GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
4848		   GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
4849		   GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
4850		   GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
4851		   GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK))
4852		grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_CP_MASK |
4853			GRBM_SOFT_RESET__SOFT_RESET_GFX_MASK;
4854
4855	if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
4856		grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_CP_MASK;
4857		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_GRBM_MASK;
4858	}
4859
4860	/* GRBM_STATUS2 */
4861	tmp = RREG32(mmGRBM_STATUS2);
4862	if (tmp & GRBM_STATUS2__RLC_BUSY_MASK)
4863		grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK;
4864
4865	/* SRBM_STATUS */
4866	tmp = RREG32(mmSRBM_STATUS);
4867	if (tmp & SRBM_STATUS__GRBM_RQ_PENDING_MASK)
4868		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_GRBM_MASK;
4869
4870	if (grbm_soft_reset || srbm_soft_reset) {
4871		/* disable CG/PG */
4872		gfx_v7_0_fini_pg(adev);
4873		gfx_v7_0_update_cg(adev, false);
4874
4875		/* stop the rlc */
4876		gfx_v7_0_rlc_stop(adev);
4877
4878		/* Disable GFX parsing/prefetching */
4879		WREG32(mmCP_ME_CNTL, CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK);
4880
4881		/* Disable MEC parsing/prefetching */
4882		WREG32(mmCP_MEC_CNTL, CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK);
4883
4884		if (grbm_soft_reset) {
4885			tmp = RREG32(mmGRBM_SOFT_RESET);
4886			tmp |= grbm_soft_reset;
4887			dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
4888			WREG32(mmGRBM_SOFT_RESET, tmp);
4889			tmp = RREG32(mmGRBM_SOFT_RESET);
4890
4891			udelay(50);
4892
4893			tmp &= ~grbm_soft_reset;
4894			WREG32(mmGRBM_SOFT_RESET, tmp);
4895			tmp = RREG32(mmGRBM_SOFT_RESET);
4896		}
4897
4898		if (srbm_soft_reset) {
4899			tmp = RREG32(mmSRBM_SOFT_RESET);
4900			tmp |= srbm_soft_reset;
4901			dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
4902			WREG32(mmSRBM_SOFT_RESET, tmp);
4903			tmp = RREG32(mmSRBM_SOFT_RESET);
4904
4905			udelay(50);
4906
4907			tmp &= ~srbm_soft_reset;
4908			WREG32(mmSRBM_SOFT_RESET, tmp);
4909			tmp = RREG32(mmSRBM_SOFT_RESET);
4910		}
4911		/* Wait a little for things to settle down */
4912		udelay(50);
4913	}
4914	return 0;
4915}
4916
4917static void gfx_v7_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
4918						 enum amdgpu_interrupt_state state)
4919{
4920	u32 cp_int_cntl;
4921
4922	switch (state) {
4923	case AMDGPU_IRQ_STATE_DISABLE:
4924		cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
4925		cp_int_cntl &= ~CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
4926		WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
4927		break;
4928	case AMDGPU_IRQ_STATE_ENABLE:
4929		cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
4930		cp_int_cntl |= CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
4931		WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
4932		break;
4933	default:
4934		break;
4935	}
4936}
4937
4938static void gfx_v7_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
4939						     int me, int pipe,
4940						     enum amdgpu_interrupt_state state)
4941{
4942	u32 mec_int_cntl, mec_int_cntl_reg;
4943
4944	/*
4945	 * amdgpu controls only pipe 0 of MEC1. That's why this function only
4946	 * handles the setting of interrupts for this specific pipe. All other
4947	 * pipes' interrupts are set by amdkfd.
4948	 */
4949
4950	if (me == 1) {
4951		switch (pipe) {
4952		case 0:
4953			mec_int_cntl_reg = mmCP_ME1_PIPE0_INT_CNTL;
4954			break;
 
 
 
 
 
 
 
 
 
4955		default:
4956			DRM_DEBUG("invalid pipe %d\n", pipe);
4957			return;
4958		}
4959	} else {
4960		DRM_DEBUG("invalid me %d\n", me);
4961		return;
4962	}
4963
4964	switch (state) {
4965	case AMDGPU_IRQ_STATE_DISABLE:
4966		mec_int_cntl = RREG32(mec_int_cntl_reg);
4967		mec_int_cntl &= ~CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
4968		WREG32(mec_int_cntl_reg, mec_int_cntl);
4969		break;
4970	case AMDGPU_IRQ_STATE_ENABLE:
4971		mec_int_cntl = RREG32(mec_int_cntl_reg);
4972		mec_int_cntl |= CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
4973		WREG32(mec_int_cntl_reg, mec_int_cntl);
4974		break;
4975	default:
4976		break;
4977	}
4978}
4979
4980static int gfx_v7_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
4981					     struct amdgpu_irq_src *src,
4982					     unsigned type,
4983					     enum amdgpu_interrupt_state state)
4984{
4985	u32 cp_int_cntl;
4986
4987	switch (state) {
4988	case AMDGPU_IRQ_STATE_DISABLE:
4989		cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
4990		cp_int_cntl &= ~CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK;
4991		WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
4992		break;
4993	case AMDGPU_IRQ_STATE_ENABLE:
4994		cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
4995		cp_int_cntl |= CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK;
4996		WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
4997		break;
4998	default:
4999		break;
5000	}
5001
5002	return 0;
5003}
5004
5005static int gfx_v7_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
5006					      struct amdgpu_irq_src *src,
5007					      unsigned type,
5008					      enum amdgpu_interrupt_state state)
5009{
5010	u32 cp_int_cntl;
5011
5012	switch (state) {
5013	case AMDGPU_IRQ_STATE_DISABLE:
5014		cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
5015		cp_int_cntl &= ~CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK;
5016		WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
5017		break;
5018	case AMDGPU_IRQ_STATE_ENABLE:
5019		cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
5020		cp_int_cntl |= CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK;
5021		WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
5022		break;
5023	default:
5024		break;
5025	}
5026
5027	return 0;
5028}
5029
5030static int gfx_v7_0_set_eop_interrupt_state(struct amdgpu_device *adev,
5031					    struct amdgpu_irq_src *src,
5032					    unsigned type,
5033					    enum amdgpu_interrupt_state state)
5034{
5035	switch (type) {
5036	case AMDGPU_CP_IRQ_GFX_EOP:
5037		gfx_v7_0_set_gfx_eop_interrupt_state(adev, state);
5038		break;
5039	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
5040		gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
5041		break;
5042	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
5043		gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
5044		break;
5045	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
5046		gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
5047		break;
5048	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
5049		gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
5050		break;
5051	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
5052		gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 0, state);
5053		break;
5054	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
5055		gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 1, state);
5056		break;
5057	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
5058		gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 2, state);
5059		break;
5060	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
5061		gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 3, state);
5062		break;
5063	default:
5064		break;
5065	}
5066	return 0;
5067}
5068
5069static int gfx_v7_0_eop_irq(struct amdgpu_device *adev,
5070			    struct amdgpu_irq_src *source,
5071			    struct amdgpu_iv_entry *entry)
5072{
5073	u8 me_id, pipe_id;
5074	struct amdgpu_ring *ring;
5075	int i;
5076
5077	DRM_DEBUG("IH: CP EOP\n");
5078	me_id = (entry->ring_id & 0x0c) >> 2;
5079	pipe_id = (entry->ring_id & 0x03) >> 0;
5080	switch (me_id) {
5081	case 0:
5082		amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
5083		break;
5084	case 1:
5085	case 2:
5086		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
5087			ring = &adev->gfx.compute_ring[i];
5088			if ((ring->me == me_id) && (ring->pipe == pipe_id))
5089				amdgpu_fence_process(ring);
5090		}
5091		break;
5092	}
5093	return 0;
5094}
5095
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5096static int gfx_v7_0_priv_reg_irq(struct amdgpu_device *adev,
5097				 struct amdgpu_irq_src *source,
5098				 struct amdgpu_iv_entry *entry)
5099{
5100	DRM_ERROR("Illegal register access in command stream\n");
5101	schedule_work(&adev->reset_work);
5102	return 0;
5103}
5104
5105static int gfx_v7_0_priv_inst_irq(struct amdgpu_device *adev,
5106				  struct amdgpu_irq_src *source,
5107				  struct amdgpu_iv_entry *entry)
5108{
5109	DRM_ERROR("Illegal instruction in command stream\n");
5110	// XXX soft reset the gfx block only
5111	schedule_work(&adev->reset_work);
5112	return 0;
5113}
5114
5115static int gfx_v7_0_set_clockgating_state(void *handle,
5116					  enum amd_clockgating_state state)
5117{
5118	bool gate = false;
5119	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5120
5121	if (state == AMD_CG_STATE_GATE)
5122		gate = true;
5123
5124	gfx_v7_0_enable_gui_idle_interrupt(adev, false);
5125	/* order matters! */
5126	if (gate) {
5127		gfx_v7_0_enable_mgcg(adev, true);
5128		gfx_v7_0_enable_cgcg(adev, true);
5129	} else {
5130		gfx_v7_0_enable_cgcg(adev, false);
5131		gfx_v7_0_enable_mgcg(adev, false);
5132	}
5133	gfx_v7_0_enable_gui_idle_interrupt(adev, true);
5134
5135	return 0;
5136}
5137
5138static int gfx_v7_0_set_powergating_state(void *handle,
5139					  enum amd_powergating_state state)
5140{
5141	bool gate = false;
5142	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5143
5144	if (state == AMD_PG_STATE_GATE)
5145		gate = true;
5146
5147	if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
5148			      AMD_PG_SUPPORT_GFX_SMG |
5149			      AMD_PG_SUPPORT_GFX_DMG |
5150			      AMD_PG_SUPPORT_CP |
5151			      AMD_PG_SUPPORT_GDS |
5152			      AMD_PG_SUPPORT_RLC_SMU_HS)) {
5153		gfx_v7_0_update_gfx_pg(adev, gate);
5154		if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
5155			gfx_v7_0_enable_cp_pg(adev, gate);
5156			gfx_v7_0_enable_gds_pg(adev, gate);
5157		}
5158	}
5159
5160	return 0;
5161}
5162
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5163static const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
5164	.name = "gfx_v7_0",
5165	.early_init = gfx_v7_0_early_init,
5166	.late_init = gfx_v7_0_late_init,
5167	.sw_init = gfx_v7_0_sw_init,
5168	.sw_fini = gfx_v7_0_sw_fini,
5169	.hw_init = gfx_v7_0_hw_init,
5170	.hw_fini = gfx_v7_0_hw_fini,
5171	.suspend = gfx_v7_0_suspend,
5172	.resume = gfx_v7_0_resume,
5173	.is_idle = gfx_v7_0_is_idle,
5174	.wait_for_idle = gfx_v7_0_wait_for_idle,
5175	.soft_reset = gfx_v7_0_soft_reset,
5176	.set_clockgating_state = gfx_v7_0_set_clockgating_state,
5177	.set_powergating_state = gfx_v7_0_set_powergating_state,
5178};
5179
5180static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
5181	.type = AMDGPU_RING_TYPE_GFX,
5182	.align_mask = 0xff,
5183	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
 
5184	.get_rptr = gfx_v7_0_ring_get_rptr,
5185	.get_wptr = gfx_v7_0_ring_get_wptr_gfx,
5186	.set_wptr = gfx_v7_0_ring_set_wptr_gfx,
5187	.emit_frame_size =
5188		20 + /* gfx_v7_0_ring_emit_gds_switch */
5189		7 + /* gfx_v7_0_ring_emit_hdp_flush */
5190		5 + /* gfx_v7_0_ring_emit_hdp_invalidate */
5191		12 + 12 + 12 + /* gfx_v7_0_ring_emit_fence_gfx x3 for user fence, vm fence */
5192		7 + 4 + /* gfx_v7_0_ring_emit_pipeline_sync */
5193		17 + 6 + /* gfx_v7_0_ring_emit_vm_flush */
5194		3 + 4, /* gfx_v7_ring_emit_cntxcntl including vgt flush*/
 
5195	.emit_ib_size = 4, /* gfx_v7_0_ring_emit_ib_gfx */
5196	.emit_ib = gfx_v7_0_ring_emit_ib_gfx,
5197	.emit_fence = gfx_v7_0_ring_emit_fence_gfx,
5198	.emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync,
5199	.emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
5200	.emit_gds_switch = gfx_v7_0_ring_emit_gds_switch,
5201	.emit_hdp_flush = gfx_v7_0_ring_emit_hdp_flush,
5202	.emit_hdp_invalidate = gfx_v7_0_ring_emit_hdp_invalidate,
5203	.test_ring = gfx_v7_0_ring_test_ring,
5204	.test_ib = gfx_v7_0_ring_test_ib,
5205	.insert_nop = amdgpu_ring_insert_nop,
5206	.pad_ib = amdgpu_ring_generic_pad_ib,
5207	.emit_cntxcntl = gfx_v7_ring_emit_cntxcntl,
 
 
 
5208};
5209
5210static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
5211	.type = AMDGPU_RING_TYPE_COMPUTE,
5212	.align_mask = 0xff,
5213	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
 
5214	.get_rptr = gfx_v7_0_ring_get_rptr,
5215	.get_wptr = gfx_v7_0_ring_get_wptr_compute,
5216	.set_wptr = gfx_v7_0_ring_set_wptr_compute,
5217	.emit_frame_size =
5218		20 + /* gfx_v7_0_ring_emit_gds_switch */
5219		7 + /* gfx_v7_0_ring_emit_hdp_flush */
5220		5 + /* gfx_v7_0_ring_emit_hdp_invalidate */
5221		7 + /* gfx_v7_0_ring_emit_pipeline_sync */
5222		17 + /* gfx_v7_0_ring_emit_vm_flush */
5223		7 + 7 + 7, /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */
5224	.emit_ib_size =	4, /* gfx_v7_0_ring_emit_ib_compute */
 
5225	.emit_ib = gfx_v7_0_ring_emit_ib_compute,
5226	.emit_fence = gfx_v7_0_ring_emit_fence_compute,
5227	.emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync,
5228	.emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
5229	.emit_gds_switch = gfx_v7_0_ring_emit_gds_switch,
5230	.emit_hdp_flush = gfx_v7_0_ring_emit_hdp_flush,
5231	.emit_hdp_invalidate = gfx_v7_0_ring_emit_hdp_invalidate,
5232	.test_ring = gfx_v7_0_ring_test_ring,
5233	.test_ib = gfx_v7_0_ring_test_ib,
5234	.insert_nop = amdgpu_ring_insert_nop,
5235	.pad_ib = amdgpu_ring_generic_pad_ib,
 
 
5236};
5237
5238static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev)
5239{
5240	int i;
5241
5242	for (i = 0; i < adev->gfx.num_gfx_rings; i++)
5243		adev->gfx.gfx_ring[i].funcs = &gfx_v7_0_ring_funcs_gfx;
5244	for (i = 0; i < adev->gfx.num_compute_rings; i++)
5245		adev->gfx.compute_ring[i].funcs = &gfx_v7_0_ring_funcs_compute;
5246}
5247
5248static const struct amdgpu_irq_src_funcs gfx_v7_0_eop_irq_funcs = {
5249	.set = gfx_v7_0_set_eop_interrupt_state,
5250	.process = gfx_v7_0_eop_irq,
5251};
5252
5253static const struct amdgpu_irq_src_funcs gfx_v7_0_priv_reg_irq_funcs = {
5254	.set = gfx_v7_0_set_priv_reg_fault_state,
5255	.process = gfx_v7_0_priv_reg_irq,
5256};
5257
5258static const struct amdgpu_irq_src_funcs gfx_v7_0_priv_inst_irq_funcs = {
5259	.set = gfx_v7_0_set_priv_inst_fault_state,
5260	.process = gfx_v7_0_priv_inst_irq,
5261};
5262
5263static void gfx_v7_0_set_irq_funcs(struct amdgpu_device *adev)
5264{
5265	adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
5266	adev->gfx.eop_irq.funcs = &gfx_v7_0_eop_irq_funcs;
5267
5268	adev->gfx.priv_reg_irq.num_types = 1;
5269	adev->gfx.priv_reg_irq.funcs = &gfx_v7_0_priv_reg_irq_funcs;
5270
5271	adev->gfx.priv_inst_irq.num_types = 1;
5272	adev->gfx.priv_inst_irq.funcs = &gfx_v7_0_priv_inst_irq_funcs;
5273}
5274
5275static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev)
5276{
5277	/* init asci gds info */
5278	adev->gds.mem.total_size = RREG32(mmGDS_VMID0_SIZE);
5279	adev->gds.gws.total_size = 64;
5280	adev->gds.oa.total_size = 16;
5281
5282	if (adev->gds.mem.total_size == 64 * 1024) {
5283		adev->gds.mem.gfx_partition_size = 4096;
5284		adev->gds.mem.cs_partition_size = 4096;
5285
5286		adev->gds.gws.gfx_partition_size = 4;
5287		adev->gds.gws.cs_partition_size = 4;
5288
5289		adev->gds.oa.gfx_partition_size = 4;
5290		adev->gds.oa.cs_partition_size = 1;
5291	} else {
5292		adev->gds.mem.gfx_partition_size = 1024;
5293		adev->gds.mem.cs_partition_size = 1024;
5294
5295		adev->gds.gws.gfx_partition_size = 16;
5296		adev->gds.gws.cs_partition_size = 16;
5297
5298		adev->gds.oa.gfx_partition_size = 4;
5299		adev->gds.oa.cs_partition_size = 4;
5300	}
5301}
5302
5303
5304static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev)
5305{
5306	int i, j, k, counter, active_cu_number = 0;
5307	u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
5308	struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
5309	unsigned disable_masks[4 * 2];
 
 
 
 
 
 
5310
5311	memset(cu_info, 0, sizeof(*cu_info));
5312
5313	amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2);
5314
5315	mutex_lock(&adev->grbm_idx_mutex);
5316	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
5317		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
5318			mask = 1;
5319			ao_bitmap = 0;
5320			counter = 0;
5321			gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff);
5322			if (i < 4 && j < 2)
5323				gfx_v7_0_set_user_cu_inactive_bitmap(
5324					adev, disable_masks[i * 2 + j]);
5325			bitmap = gfx_v7_0_get_cu_active_bitmap(adev);
5326			cu_info->bitmap[i][j] = bitmap;
5327
5328			for (k = 0; k < 16; k ++) {
5329				if (bitmap & mask) {
5330					if (counter < 2)
5331						ao_bitmap |= mask;
5332					counter ++;
5333				}
5334				mask <<= 1;
5335			}
5336			active_cu_number += counter;
5337			ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
 
 
5338		}
5339	}
5340	gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
5341	mutex_unlock(&adev->grbm_idx_mutex);
5342
5343	cu_info->number = active_cu_number;
5344	cu_info->ao_cu_mask = ao_cu_mask;
 
 
 
 
 
5345}
5346
5347const struct amdgpu_ip_block_version gfx_v7_0_ip_block =
5348{
5349	.type = AMD_IP_BLOCK_TYPE_GFX,
5350	.major = 7,
5351	.minor = 0,
5352	.rev = 0,
5353	.funcs = &gfx_v7_0_ip_funcs,
5354};
5355
5356const struct amdgpu_ip_block_version gfx_v7_1_ip_block =
5357{
5358	.type = AMD_IP_BLOCK_TYPE_GFX,
5359	.major = 7,
5360	.minor = 1,
5361	.rev = 0,
5362	.funcs = &gfx_v7_0_ip_funcs,
5363};
5364
5365const struct amdgpu_ip_block_version gfx_v7_2_ip_block =
5366{
5367	.type = AMD_IP_BLOCK_TYPE_GFX,
5368	.major = 7,
5369	.minor = 2,
5370	.rev = 0,
5371	.funcs = &gfx_v7_0_ip_funcs,
5372};
5373
5374const struct amdgpu_ip_block_version gfx_v7_3_ip_block =
5375{
5376	.type = AMD_IP_BLOCK_TYPE_GFX,
5377	.major = 7,
5378	.minor = 3,
5379	.rev = 0,
5380	.funcs = &gfx_v7_0_ip_funcs,
5381};