Linux Audio

Check our new training course

Loading...
v4.10.11
   1/*
   2 * Copyright 2014 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
 
  23#include <linux/firmware.h>
  24#include "drmP.h"
 
  25#include "amdgpu.h"
  26#include "amdgpu_ih.h"
  27#include "amdgpu_gfx.h"
  28#include "cikd.h"
  29#include "cik.h"
 
  30#include "atom.h"
  31#include "amdgpu_ucode.h"
  32#include "clearstate_ci.h"
  33
  34#include "dce/dce_8_0_d.h"
  35#include "dce/dce_8_0_sh_mask.h"
  36
  37#include "bif/bif_4_1_d.h"
  38#include "bif/bif_4_1_sh_mask.h"
  39
  40#include "gca/gfx_7_0_d.h"
  41#include "gca/gfx_7_2_enum.h"
  42#include "gca/gfx_7_2_sh_mask.h"
  43
  44#include "gmc/gmc_7_0_d.h"
  45#include "gmc/gmc_7_0_sh_mask.h"
  46
  47#include "oss/oss_2_0_d.h"
  48#include "oss/oss_2_0_sh_mask.h"
  49
 
 
  50#define GFX7_NUM_GFX_RINGS     1
  51#define GFX7_NUM_COMPUTE_RINGS 8
  52
  53static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev);
  54static void gfx_v7_0_set_irq_funcs(struct amdgpu_device *adev);
  55static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev);
  56
  57MODULE_FIRMWARE("radeon/bonaire_pfp.bin");
  58MODULE_FIRMWARE("radeon/bonaire_me.bin");
  59MODULE_FIRMWARE("radeon/bonaire_ce.bin");
  60MODULE_FIRMWARE("radeon/bonaire_rlc.bin");
  61MODULE_FIRMWARE("radeon/bonaire_mec.bin");
  62
  63MODULE_FIRMWARE("radeon/hawaii_pfp.bin");
  64MODULE_FIRMWARE("radeon/hawaii_me.bin");
  65MODULE_FIRMWARE("radeon/hawaii_ce.bin");
  66MODULE_FIRMWARE("radeon/hawaii_rlc.bin");
  67MODULE_FIRMWARE("radeon/hawaii_mec.bin");
  68
  69MODULE_FIRMWARE("radeon/kaveri_pfp.bin");
  70MODULE_FIRMWARE("radeon/kaveri_me.bin");
  71MODULE_FIRMWARE("radeon/kaveri_ce.bin");
  72MODULE_FIRMWARE("radeon/kaveri_rlc.bin");
  73MODULE_FIRMWARE("radeon/kaveri_mec.bin");
  74MODULE_FIRMWARE("radeon/kaveri_mec2.bin");
  75
  76MODULE_FIRMWARE("radeon/kabini_pfp.bin");
  77MODULE_FIRMWARE("radeon/kabini_me.bin");
  78MODULE_FIRMWARE("radeon/kabini_ce.bin");
  79MODULE_FIRMWARE("radeon/kabini_rlc.bin");
  80MODULE_FIRMWARE("radeon/kabini_mec.bin");
  81
  82MODULE_FIRMWARE("radeon/mullins_pfp.bin");
  83MODULE_FIRMWARE("radeon/mullins_me.bin");
  84MODULE_FIRMWARE("radeon/mullins_ce.bin");
  85MODULE_FIRMWARE("radeon/mullins_rlc.bin");
  86MODULE_FIRMWARE("radeon/mullins_mec.bin");
  87
  88static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
  89{
  90	{mmGDS_VMID0_BASE, mmGDS_VMID0_SIZE, mmGDS_GWS_VMID0, mmGDS_OA_VMID0},
  91	{mmGDS_VMID1_BASE, mmGDS_VMID1_SIZE, mmGDS_GWS_VMID1, mmGDS_OA_VMID1},
  92	{mmGDS_VMID2_BASE, mmGDS_VMID2_SIZE, mmGDS_GWS_VMID2, mmGDS_OA_VMID2},
  93	{mmGDS_VMID3_BASE, mmGDS_VMID3_SIZE, mmGDS_GWS_VMID3, mmGDS_OA_VMID3},
  94	{mmGDS_VMID4_BASE, mmGDS_VMID4_SIZE, mmGDS_GWS_VMID4, mmGDS_OA_VMID4},
  95	{mmGDS_VMID5_BASE, mmGDS_VMID5_SIZE, mmGDS_GWS_VMID5, mmGDS_OA_VMID5},
  96	{mmGDS_VMID6_BASE, mmGDS_VMID6_SIZE, mmGDS_GWS_VMID6, mmGDS_OA_VMID6},
  97	{mmGDS_VMID7_BASE, mmGDS_VMID7_SIZE, mmGDS_GWS_VMID7, mmGDS_OA_VMID7},
  98	{mmGDS_VMID8_BASE, mmGDS_VMID8_SIZE, mmGDS_GWS_VMID8, mmGDS_OA_VMID8},
  99	{mmGDS_VMID9_BASE, mmGDS_VMID9_SIZE, mmGDS_GWS_VMID9, mmGDS_OA_VMID9},
 100	{mmGDS_VMID10_BASE, mmGDS_VMID10_SIZE, mmGDS_GWS_VMID10, mmGDS_OA_VMID10},
 101	{mmGDS_VMID11_BASE, mmGDS_VMID11_SIZE, mmGDS_GWS_VMID11, mmGDS_OA_VMID11},
 102	{mmGDS_VMID12_BASE, mmGDS_VMID12_SIZE, mmGDS_GWS_VMID12, mmGDS_OA_VMID12},
 103	{mmGDS_VMID13_BASE, mmGDS_VMID13_SIZE, mmGDS_GWS_VMID13, mmGDS_OA_VMID13},
 104	{mmGDS_VMID14_BASE, mmGDS_VMID14_SIZE, mmGDS_GWS_VMID14, mmGDS_OA_VMID14},
 105	{mmGDS_VMID15_BASE, mmGDS_VMID15_SIZE, mmGDS_GWS_VMID15, mmGDS_OA_VMID15}
 106};
 107
 108static const u32 spectre_rlc_save_restore_register_list[] =
 109{
 110	(0x0e00 << 16) | (0xc12c >> 2),
 111	0x00000000,
 112	(0x0e00 << 16) | (0xc140 >> 2),
 113	0x00000000,
 114	(0x0e00 << 16) | (0xc150 >> 2),
 115	0x00000000,
 116	(0x0e00 << 16) | (0xc15c >> 2),
 117	0x00000000,
 118	(0x0e00 << 16) | (0xc168 >> 2),
 119	0x00000000,
 120	(0x0e00 << 16) | (0xc170 >> 2),
 121	0x00000000,
 122	(0x0e00 << 16) | (0xc178 >> 2),
 123	0x00000000,
 124	(0x0e00 << 16) | (0xc204 >> 2),
 125	0x00000000,
 126	(0x0e00 << 16) | (0xc2b4 >> 2),
 127	0x00000000,
 128	(0x0e00 << 16) | (0xc2b8 >> 2),
 129	0x00000000,
 130	(0x0e00 << 16) | (0xc2bc >> 2),
 131	0x00000000,
 132	(0x0e00 << 16) | (0xc2c0 >> 2),
 133	0x00000000,
 134	(0x0e00 << 16) | (0x8228 >> 2),
 135	0x00000000,
 136	(0x0e00 << 16) | (0x829c >> 2),
 137	0x00000000,
 138	(0x0e00 << 16) | (0x869c >> 2),
 139	0x00000000,
 140	(0x0600 << 16) | (0x98f4 >> 2),
 141	0x00000000,
 142	(0x0e00 << 16) | (0x98f8 >> 2),
 143	0x00000000,
 144	(0x0e00 << 16) | (0x9900 >> 2),
 145	0x00000000,
 146	(0x0e00 << 16) | (0xc260 >> 2),
 147	0x00000000,
 148	(0x0e00 << 16) | (0x90e8 >> 2),
 149	0x00000000,
 150	(0x0e00 << 16) | (0x3c000 >> 2),
 151	0x00000000,
 152	(0x0e00 << 16) | (0x3c00c >> 2),
 153	0x00000000,
 154	(0x0e00 << 16) | (0x8c1c >> 2),
 155	0x00000000,
 156	(0x0e00 << 16) | (0x9700 >> 2),
 157	0x00000000,
 158	(0x0e00 << 16) | (0xcd20 >> 2),
 159	0x00000000,
 160	(0x4e00 << 16) | (0xcd20 >> 2),
 161	0x00000000,
 162	(0x5e00 << 16) | (0xcd20 >> 2),
 163	0x00000000,
 164	(0x6e00 << 16) | (0xcd20 >> 2),
 165	0x00000000,
 166	(0x7e00 << 16) | (0xcd20 >> 2),
 167	0x00000000,
 168	(0x8e00 << 16) | (0xcd20 >> 2),
 169	0x00000000,
 170	(0x9e00 << 16) | (0xcd20 >> 2),
 171	0x00000000,
 172	(0xae00 << 16) | (0xcd20 >> 2),
 173	0x00000000,
 174	(0xbe00 << 16) | (0xcd20 >> 2),
 175	0x00000000,
 176	(0x0e00 << 16) | (0x89bc >> 2),
 177	0x00000000,
 178	(0x0e00 << 16) | (0x8900 >> 2),
 179	0x00000000,
 180	0x3,
 181	(0x0e00 << 16) | (0xc130 >> 2),
 182	0x00000000,
 183	(0x0e00 << 16) | (0xc134 >> 2),
 184	0x00000000,
 185	(0x0e00 << 16) | (0xc1fc >> 2),
 186	0x00000000,
 187	(0x0e00 << 16) | (0xc208 >> 2),
 188	0x00000000,
 189	(0x0e00 << 16) | (0xc264 >> 2),
 190	0x00000000,
 191	(0x0e00 << 16) | (0xc268 >> 2),
 192	0x00000000,
 193	(0x0e00 << 16) | (0xc26c >> 2),
 194	0x00000000,
 195	(0x0e00 << 16) | (0xc270 >> 2),
 196	0x00000000,
 197	(0x0e00 << 16) | (0xc274 >> 2),
 198	0x00000000,
 199	(0x0e00 << 16) | (0xc278 >> 2),
 200	0x00000000,
 201	(0x0e00 << 16) | (0xc27c >> 2),
 202	0x00000000,
 203	(0x0e00 << 16) | (0xc280 >> 2),
 204	0x00000000,
 205	(0x0e00 << 16) | (0xc284 >> 2),
 206	0x00000000,
 207	(0x0e00 << 16) | (0xc288 >> 2),
 208	0x00000000,
 209	(0x0e00 << 16) | (0xc28c >> 2),
 210	0x00000000,
 211	(0x0e00 << 16) | (0xc290 >> 2),
 212	0x00000000,
 213	(0x0e00 << 16) | (0xc294 >> 2),
 214	0x00000000,
 215	(0x0e00 << 16) | (0xc298 >> 2),
 216	0x00000000,
 217	(0x0e00 << 16) | (0xc29c >> 2),
 218	0x00000000,
 219	(0x0e00 << 16) | (0xc2a0 >> 2),
 220	0x00000000,
 221	(0x0e00 << 16) | (0xc2a4 >> 2),
 222	0x00000000,
 223	(0x0e00 << 16) | (0xc2a8 >> 2),
 224	0x00000000,
 225	(0x0e00 << 16) | (0xc2ac  >> 2),
 226	0x00000000,
 227	(0x0e00 << 16) | (0xc2b0 >> 2),
 228	0x00000000,
 229	(0x0e00 << 16) | (0x301d0 >> 2),
 230	0x00000000,
 231	(0x0e00 << 16) | (0x30238 >> 2),
 232	0x00000000,
 233	(0x0e00 << 16) | (0x30250 >> 2),
 234	0x00000000,
 235	(0x0e00 << 16) | (0x30254 >> 2),
 236	0x00000000,
 237	(0x0e00 << 16) | (0x30258 >> 2),
 238	0x00000000,
 239	(0x0e00 << 16) | (0x3025c >> 2),
 240	0x00000000,
 241	(0x4e00 << 16) | (0xc900 >> 2),
 242	0x00000000,
 243	(0x5e00 << 16) | (0xc900 >> 2),
 244	0x00000000,
 245	(0x6e00 << 16) | (0xc900 >> 2),
 246	0x00000000,
 247	(0x7e00 << 16) | (0xc900 >> 2),
 248	0x00000000,
 249	(0x8e00 << 16) | (0xc900 >> 2),
 250	0x00000000,
 251	(0x9e00 << 16) | (0xc900 >> 2),
 252	0x00000000,
 253	(0xae00 << 16) | (0xc900 >> 2),
 254	0x00000000,
 255	(0xbe00 << 16) | (0xc900 >> 2),
 256	0x00000000,
 257	(0x4e00 << 16) | (0xc904 >> 2),
 258	0x00000000,
 259	(0x5e00 << 16) | (0xc904 >> 2),
 260	0x00000000,
 261	(0x6e00 << 16) | (0xc904 >> 2),
 262	0x00000000,
 263	(0x7e00 << 16) | (0xc904 >> 2),
 264	0x00000000,
 265	(0x8e00 << 16) | (0xc904 >> 2),
 266	0x00000000,
 267	(0x9e00 << 16) | (0xc904 >> 2),
 268	0x00000000,
 269	(0xae00 << 16) | (0xc904 >> 2),
 270	0x00000000,
 271	(0xbe00 << 16) | (0xc904 >> 2),
 272	0x00000000,
 273	(0x4e00 << 16) | (0xc908 >> 2),
 274	0x00000000,
 275	(0x5e00 << 16) | (0xc908 >> 2),
 276	0x00000000,
 277	(0x6e00 << 16) | (0xc908 >> 2),
 278	0x00000000,
 279	(0x7e00 << 16) | (0xc908 >> 2),
 280	0x00000000,
 281	(0x8e00 << 16) | (0xc908 >> 2),
 282	0x00000000,
 283	(0x9e00 << 16) | (0xc908 >> 2),
 284	0x00000000,
 285	(0xae00 << 16) | (0xc908 >> 2),
 286	0x00000000,
 287	(0xbe00 << 16) | (0xc908 >> 2),
 288	0x00000000,
 289	(0x4e00 << 16) | (0xc90c >> 2),
 290	0x00000000,
 291	(0x5e00 << 16) | (0xc90c >> 2),
 292	0x00000000,
 293	(0x6e00 << 16) | (0xc90c >> 2),
 294	0x00000000,
 295	(0x7e00 << 16) | (0xc90c >> 2),
 296	0x00000000,
 297	(0x8e00 << 16) | (0xc90c >> 2),
 298	0x00000000,
 299	(0x9e00 << 16) | (0xc90c >> 2),
 300	0x00000000,
 301	(0xae00 << 16) | (0xc90c >> 2),
 302	0x00000000,
 303	(0xbe00 << 16) | (0xc90c >> 2),
 304	0x00000000,
 305	(0x4e00 << 16) | (0xc910 >> 2),
 306	0x00000000,
 307	(0x5e00 << 16) | (0xc910 >> 2),
 308	0x00000000,
 309	(0x6e00 << 16) | (0xc910 >> 2),
 310	0x00000000,
 311	(0x7e00 << 16) | (0xc910 >> 2),
 312	0x00000000,
 313	(0x8e00 << 16) | (0xc910 >> 2),
 314	0x00000000,
 315	(0x9e00 << 16) | (0xc910 >> 2),
 316	0x00000000,
 317	(0xae00 << 16) | (0xc910 >> 2),
 318	0x00000000,
 319	(0xbe00 << 16) | (0xc910 >> 2),
 320	0x00000000,
 321	(0x0e00 << 16) | (0xc99c >> 2),
 322	0x00000000,
 323	(0x0e00 << 16) | (0x9834 >> 2),
 324	0x00000000,
 325	(0x0000 << 16) | (0x30f00 >> 2),
 326	0x00000000,
 327	(0x0001 << 16) | (0x30f00 >> 2),
 328	0x00000000,
 329	(0x0000 << 16) | (0x30f04 >> 2),
 330	0x00000000,
 331	(0x0001 << 16) | (0x30f04 >> 2),
 332	0x00000000,
 333	(0x0000 << 16) | (0x30f08 >> 2),
 334	0x00000000,
 335	(0x0001 << 16) | (0x30f08 >> 2),
 336	0x00000000,
 337	(0x0000 << 16) | (0x30f0c >> 2),
 338	0x00000000,
 339	(0x0001 << 16) | (0x30f0c >> 2),
 340	0x00000000,
 341	(0x0600 << 16) | (0x9b7c >> 2),
 342	0x00000000,
 343	(0x0e00 << 16) | (0x8a14 >> 2),
 344	0x00000000,
 345	(0x0e00 << 16) | (0x8a18 >> 2),
 346	0x00000000,
 347	(0x0600 << 16) | (0x30a00 >> 2),
 348	0x00000000,
 349	(0x0e00 << 16) | (0x8bf0 >> 2),
 350	0x00000000,
 351	(0x0e00 << 16) | (0x8bcc >> 2),
 352	0x00000000,
 353	(0x0e00 << 16) | (0x8b24 >> 2),
 354	0x00000000,
 355	(0x0e00 << 16) | (0x30a04 >> 2),
 356	0x00000000,
 357	(0x0600 << 16) | (0x30a10 >> 2),
 358	0x00000000,
 359	(0x0600 << 16) | (0x30a14 >> 2),
 360	0x00000000,
 361	(0x0600 << 16) | (0x30a18 >> 2),
 362	0x00000000,
 363	(0x0600 << 16) | (0x30a2c >> 2),
 364	0x00000000,
 365	(0x0e00 << 16) | (0xc700 >> 2),
 366	0x00000000,
 367	(0x0e00 << 16) | (0xc704 >> 2),
 368	0x00000000,
 369	(0x0e00 << 16) | (0xc708 >> 2),
 370	0x00000000,
 371	(0x0e00 << 16) | (0xc768 >> 2),
 372	0x00000000,
 373	(0x0400 << 16) | (0xc770 >> 2),
 374	0x00000000,
 375	(0x0400 << 16) | (0xc774 >> 2),
 376	0x00000000,
 377	(0x0400 << 16) | (0xc778 >> 2),
 378	0x00000000,
 379	(0x0400 << 16) | (0xc77c >> 2),
 380	0x00000000,
 381	(0x0400 << 16) | (0xc780 >> 2),
 382	0x00000000,
 383	(0x0400 << 16) | (0xc784 >> 2),
 384	0x00000000,
 385	(0x0400 << 16) | (0xc788 >> 2),
 386	0x00000000,
 387	(0x0400 << 16) | (0xc78c >> 2),
 388	0x00000000,
 389	(0x0400 << 16) | (0xc798 >> 2),
 390	0x00000000,
 391	(0x0400 << 16) | (0xc79c >> 2),
 392	0x00000000,
 393	(0x0400 << 16) | (0xc7a0 >> 2),
 394	0x00000000,
 395	(0x0400 << 16) | (0xc7a4 >> 2),
 396	0x00000000,
 397	(0x0400 << 16) | (0xc7a8 >> 2),
 398	0x00000000,
 399	(0x0400 << 16) | (0xc7ac >> 2),
 400	0x00000000,
 401	(0x0400 << 16) | (0xc7b0 >> 2),
 402	0x00000000,
 403	(0x0400 << 16) | (0xc7b4 >> 2),
 404	0x00000000,
 405	(0x0e00 << 16) | (0x9100 >> 2),
 406	0x00000000,
 407	(0x0e00 << 16) | (0x3c010 >> 2),
 408	0x00000000,
 409	(0x0e00 << 16) | (0x92a8 >> 2),
 410	0x00000000,
 411	(0x0e00 << 16) | (0x92ac >> 2),
 412	0x00000000,
 413	(0x0e00 << 16) | (0x92b4 >> 2),
 414	0x00000000,
 415	(0x0e00 << 16) | (0x92b8 >> 2),
 416	0x00000000,
 417	(0x0e00 << 16) | (0x92bc >> 2),
 418	0x00000000,
 419	(0x0e00 << 16) | (0x92c0 >> 2),
 420	0x00000000,
 421	(0x0e00 << 16) | (0x92c4 >> 2),
 422	0x00000000,
 423	(0x0e00 << 16) | (0x92c8 >> 2),
 424	0x00000000,
 425	(0x0e00 << 16) | (0x92cc >> 2),
 426	0x00000000,
 427	(0x0e00 << 16) | (0x92d0 >> 2),
 428	0x00000000,
 429	(0x0e00 << 16) | (0x8c00 >> 2),
 430	0x00000000,
 431	(0x0e00 << 16) | (0x8c04 >> 2),
 432	0x00000000,
 433	(0x0e00 << 16) | (0x8c20 >> 2),
 434	0x00000000,
 435	(0x0e00 << 16) | (0x8c38 >> 2),
 436	0x00000000,
 437	(0x0e00 << 16) | (0x8c3c >> 2),
 438	0x00000000,
 439	(0x0e00 << 16) | (0xae00 >> 2),
 440	0x00000000,
 441	(0x0e00 << 16) | (0x9604 >> 2),
 442	0x00000000,
 443	(0x0e00 << 16) | (0xac08 >> 2),
 444	0x00000000,
 445	(0x0e00 << 16) | (0xac0c >> 2),
 446	0x00000000,
 447	(0x0e00 << 16) | (0xac10 >> 2),
 448	0x00000000,
 449	(0x0e00 << 16) | (0xac14 >> 2),
 450	0x00000000,
 451	(0x0e00 << 16) | (0xac58 >> 2),
 452	0x00000000,
 453	(0x0e00 << 16) | (0xac68 >> 2),
 454	0x00000000,
 455	(0x0e00 << 16) | (0xac6c >> 2),
 456	0x00000000,
 457	(0x0e00 << 16) | (0xac70 >> 2),
 458	0x00000000,
 459	(0x0e00 << 16) | (0xac74 >> 2),
 460	0x00000000,
 461	(0x0e00 << 16) | (0xac78 >> 2),
 462	0x00000000,
 463	(0x0e00 << 16) | (0xac7c >> 2),
 464	0x00000000,
 465	(0x0e00 << 16) | (0xac80 >> 2),
 466	0x00000000,
 467	(0x0e00 << 16) | (0xac84 >> 2),
 468	0x00000000,
 469	(0x0e00 << 16) | (0xac88 >> 2),
 470	0x00000000,
 471	(0x0e00 << 16) | (0xac8c >> 2),
 472	0x00000000,
 473	(0x0e00 << 16) | (0x970c >> 2),
 474	0x00000000,
 475	(0x0e00 << 16) | (0x9714 >> 2),
 476	0x00000000,
 477	(0x0e00 << 16) | (0x9718 >> 2),
 478	0x00000000,
 479	(0x0e00 << 16) | (0x971c >> 2),
 480	0x00000000,
 481	(0x0e00 << 16) | (0x31068 >> 2),
 482	0x00000000,
 483	(0x4e00 << 16) | (0x31068 >> 2),
 484	0x00000000,
 485	(0x5e00 << 16) | (0x31068 >> 2),
 486	0x00000000,
 487	(0x6e00 << 16) | (0x31068 >> 2),
 488	0x00000000,
 489	(0x7e00 << 16) | (0x31068 >> 2),
 490	0x00000000,
 491	(0x8e00 << 16) | (0x31068 >> 2),
 492	0x00000000,
 493	(0x9e00 << 16) | (0x31068 >> 2),
 494	0x00000000,
 495	(0xae00 << 16) | (0x31068 >> 2),
 496	0x00000000,
 497	(0xbe00 << 16) | (0x31068 >> 2),
 498	0x00000000,
 499	(0x0e00 << 16) | (0xcd10 >> 2),
 500	0x00000000,
 501	(0x0e00 << 16) | (0xcd14 >> 2),
 502	0x00000000,
 503	(0x0e00 << 16) | (0x88b0 >> 2),
 504	0x00000000,
 505	(0x0e00 << 16) | (0x88b4 >> 2),
 506	0x00000000,
 507	(0x0e00 << 16) | (0x88b8 >> 2),
 508	0x00000000,
 509	(0x0e00 << 16) | (0x88bc >> 2),
 510	0x00000000,
 511	(0x0400 << 16) | (0x89c0 >> 2),
 512	0x00000000,
 513	(0x0e00 << 16) | (0x88c4 >> 2),
 514	0x00000000,
 515	(0x0e00 << 16) | (0x88c8 >> 2),
 516	0x00000000,
 517	(0x0e00 << 16) | (0x88d0 >> 2),
 518	0x00000000,
 519	(0x0e00 << 16) | (0x88d4 >> 2),
 520	0x00000000,
 521	(0x0e00 << 16) | (0x88d8 >> 2),
 522	0x00000000,
 523	(0x0e00 << 16) | (0x8980 >> 2),
 524	0x00000000,
 525	(0x0e00 << 16) | (0x30938 >> 2),
 526	0x00000000,
 527	(0x0e00 << 16) | (0x3093c >> 2),
 528	0x00000000,
 529	(0x0e00 << 16) | (0x30940 >> 2),
 530	0x00000000,
 531	(0x0e00 << 16) | (0x89a0 >> 2),
 532	0x00000000,
 533	(0x0e00 << 16) | (0x30900 >> 2),
 534	0x00000000,
 535	(0x0e00 << 16) | (0x30904 >> 2),
 536	0x00000000,
 537	(0x0e00 << 16) | (0x89b4 >> 2),
 538	0x00000000,
 539	(0x0e00 << 16) | (0x3c210 >> 2),
 540	0x00000000,
 541	(0x0e00 << 16) | (0x3c214 >> 2),
 542	0x00000000,
 543	(0x0e00 << 16) | (0x3c218 >> 2),
 544	0x00000000,
 545	(0x0e00 << 16) | (0x8904 >> 2),
 546	0x00000000,
 547	0x5,
 548	(0x0e00 << 16) | (0x8c28 >> 2),
 549	(0x0e00 << 16) | (0x8c2c >> 2),
 550	(0x0e00 << 16) | (0x8c30 >> 2),
 551	(0x0e00 << 16) | (0x8c34 >> 2),
 552	(0x0e00 << 16) | (0x9600 >> 2),
 553};
 554
 555static const u32 kalindi_rlc_save_restore_register_list[] =
 556{
 557	(0x0e00 << 16) | (0xc12c >> 2),
 558	0x00000000,
 559	(0x0e00 << 16) | (0xc140 >> 2),
 560	0x00000000,
 561	(0x0e00 << 16) | (0xc150 >> 2),
 562	0x00000000,
 563	(0x0e00 << 16) | (0xc15c >> 2),
 564	0x00000000,
 565	(0x0e00 << 16) | (0xc168 >> 2),
 566	0x00000000,
 567	(0x0e00 << 16) | (0xc170 >> 2),
 568	0x00000000,
 569	(0x0e00 << 16) | (0xc204 >> 2),
 570	0x00000000,
 571	(0x0e00 << 16) | (0xc2b4 >> 2),
 572	0x00000000,
 573	(0x0e00 << 16) | (0xc2b8 >> 2),
 574	0x00000000,
 575	(0x0e00 << 16) | (0xc2bc >> 2),
 576	0x00000000,
 577	(0x0e00 << 16) | (0xc2c0 >> 2),
 578	0x00000000,
 579	(0x0e00 << 16) | (0x8228 >> 2),
 580	0x00000000,
 581	(0x0e00 << 16) | (0x829c >> 2),
 582	0x00000000,
 583	(0x0e00 << 16) | (0x869c >> 2),
 584	0x00000000,
 585	(0x0600 << 16) | (0x98f4 >> 2),
 586	0x00000000,
 587	(0x0e00 << 16) | (0x98f8 >> 2),
 588	0x00000000,
 589	(0x0e00 << 16) | (0x9900 >> 2),
 590	0x00000000,
 591	(0x0e00 << 16) | (0xc260 >> 2),
 592	0x00000000,
 593	(0x0e00 << 16) | (0x90e8 >> 2),
 594	0x00000000,
 595	(0x0e00 << 16) | (0x3c000 >> 2),
 596	0x00000000,
 597	(0x0e00 << 16) | (0x3c00c >> 2),
 598	0x00000000,
 599	(0x0e00 << 16) | (0x8c1c >> 2),
 600	0x00000000,
 601	(0x0e00 << 16) | (0x9700 >> 2),
 602	0x00000000,
 603	(0x0e00 << 16) | (0xcd20 >> 2),
 604	0x00000000,
 605	(0x4e00 << 16) | (0xcd20 >> 2),
 606	0x00000000,
 607	(0x5e00 << 16) | (0xcd20 >> 2),
 608	0x00000000,
 609	(0x6e00 << 16) | (0xcd20 >> 2),
 610	0x00000000,
 611	(0x7e00 << 16) | (0xcd20 >> 2),
 612	0x00000000,
 613	(0x0e00 << 16) | (0x89bc >> 2),
 614	0x00000000,
 615	(0x0e00 << 16) | (0x8900 >> 2),
 616	0x00000000,
 617	0x3,
 618	(0x0e00 << 16) | (0xc130 >> 2),
 619	0x00000000,
 620	(0x0e00 << 16) | (0xc134 >> 2),
 621	0x00000000,
 622	(0x0e00 << 16) | (0xc1fc >> 2),
 623	0x00000000,
 624	(0x0e00 << 16) | (0xc208 >> 2),
 625	0x00000000,
 626	(0x0e00 << 16) | (0xc264 >> 2),
 627	0x00000000,
 628	(0x0e00 << 16) | (0xc268 >> 2),
 629	0x00000000,
 630	(0x0e00 << 16) | (0xc26c >> 2),
 631	0x00000000,
 632	(0x0e00 << 16) | (0xc270 >> 2),
 633	0x00000000,
 634	(0x0e00 << 16) | (0xc274 >> 2),
 635	0x00000000,
 636	(0x0e00 << 16) | (0xc28c >> 2),
 637	0x00000000,
 638	(0x0e00 << 16) | (0xc290 >> 2),
 639	0x00000000,
 640	(0x0e00 << 16) | (0xc294 >> 2),
 641	0x00000000,
 642	(0x0e00 << 16) | (0xc298 >> 2),
 643	0x00000000,
 644	(0x0e00 << 16) | (0xc2a0 >> 2),
 645	0x00000000,
 646	(0x0e00 << 16) | (0xc2a4 >> 2),
 647	0x00000000,
 648	(0x0e00 << 16) | (0xc2a8 >> 2),
 649	0x00000000,
 650	(0x0e00 << 16) | (0xc2ac >> 2),
 651	0x00000000,
 652	(0x0e00 << 16) | (0x301d0 >> 2),
 653	0x00000000,
 654	(0x0e00 << 16) | (0x30238 >> 2),
 655	0x00000000,
 656	(0x0e00 << 16) | (0x30250 >> 2),
 657	0x00000000,
 658	(0x0e00 << 16) | (0x30254 >> 2),
 659	0x00000000,
 660	(0x0e00 << 16) | (0x30258 >> 2),
 661	0x00000000,
 662	(0x0e00 << 16) | (0x3025c >> 2),
 663	0x00000000,
 664	(0x4e00 << 16) | (0xc900 >> 2),
 665	0x00000000,
 666	(0x5e00 << 16) | (0xc900 >> 2),
 667	0x00000000,
 668	(0x6e00 << 16) | (0xc900 >> 2),
 669	0x00000000,
 670	(0x7e00 << 16) | (0xc900 >> 2),
 671	0x00000000,
 672	(0x4e00 << 16) | (0xc904 >> 2),
 673	0x00000000,
 674	(0x5e00 << 16) | (0xc904 >> 2),
 675	0x00000000,
 676	(0x6e00 << 16) | (0xc904 >> 2),
 677	0x00000000,
 678	(0x7e00 << 16) | (0xc904 >> 2),
 679	0x00000000,
 680	(0x4e00 << 16) | (0xc908 >> 2),
 681	0x00000000,
 682	(0x5e00 << 16) | (0xc908 >> 2),
 683	0x00000000,
 684	(0x6e00 << 16) | (0xc908 >> 2),
 685	0x00000000,
 686	(0x7e00 << 16) | (0xc908 >> 2),
 687	0x00000000,
 688	(0x4e00 << 16) | (0xc90c >> 2),
 689	0x00000000,
 690	(0x5e00 << 16) | (0xc90c >> 2),
 691	0x00000000,
 692	(0x6e00 << 16) | (0xc90c >> 2),
 693	0x00000000,
 694	(0x7e00 << 16) | (0xc90c >> 2),
 695	0x00000000,
 696	(0x4e00 << 16) | (0xc910 >> 2),
 697	0x00000000,
 698	(0x5e00 << 16) | (0xc910 >> 2),
 699	0x00000000,
 700	(0x6e00 << 16) | (0xc910 >> 2),
 701	0x00000000,
 702	(0x7e00 << 16) | (0xc910 >> 2),
 703	0x00000000,
 704	(0x0e00 << 16) | (0xc99c >> 2),
 705	0x00000000,
 706	(0x0e00 << 16) | (0x9834 >> 2),
 707	0x00000000,
 708	(0x0000 << 16) | (0x30f00 >> 2),
 709	0x00000000,
 710	(0x0000 << 16) | (0x30f04 >> 2),
 711	0x00000000,
 712	(0x0000 << 16) | (0x30f08 >> 2),
 713	0x00000000,
 714	(0x0000 << 16) | (0x30f0c >> 2),
 715	0x00000000,
 716	(0x0600 << 16) | (0x9b7c >> 2),
 717	0x00000000,
 718	(0x0e00 << 16) | (0x8a14 >> 2),
 719	0x00000000,
 720	(0x0e00 << 16) | (0x8a18 >> 2),
 721	0x00000000,
 722	(0x0600 << 16) | (0x30a00 >> 2),
 723	0x00000000,
 724	(0x0e00 << 16) | (0x8bf0 >> 2),
 725	0x00000000,
 726	(0x0e00 << 16) | (0x8bcc >> 2),
 727	0x00000000,
 728	(0x0e00 << 16) | (0x8b24 >> 2),
 729	0x00000000,
 730	(0x0e00 << 16) | (0x30a04 >> 2),
 731	0x00000000,
 732	(0x0600 << 16) | (0x30a10 >> 2),
 733	0x00000000,
 734	(0x0600 << 16) | (0x30a14 >> 2),
 735	0x00000000,
 736	(0x0600 << 16) | (0x30a18 >> 2),
 737	0x00000000,
 738	(0x0600 << 16) | (0x30a2c >> 2),
 739	0x00000000,
 740	(0x0e00 << 16) | (0xc700 >> 2),
 741	0x00000000,
 742	(0x0e00 << 16) | (0xc704 >> 2),
 743	0x00000000,
 744	(0x0e00 << 16) | (0xc708 >> 2),
 745	0x00000000,
 746	(0x0e00 << 16) | (0xc768 >> 2),
 747	0x00000000,
 748	(0x0400 << 16) | (0xc770 >> 2),
 749	0x00000000,
 750	(0x0400 << 16) | (0xc774 >> 2),
 751	0x00000000,
 752	(0x0400 << 16) | (0xc798 >> 2),
 753	0x00000000,
 754	(0x0400 << 16) | (0xc79c >> 2),
 755	0x00000000,
 756	(0x0e00 << 16) | (0x9100 >> 2),
 757	0x00000000,
 758	(0x0e00 << 16) | (0x3c010 >> 2),
 759	0x00000000,
 760	(0x0e00 << 16) | (0x8c00 >> 2),
 761	0x00000000,
 762	(0x0e00 << 16) | (0x8c04 >> 2),
 763	0x00000000,
 764	(0x0e00 << 16) | (0x8c20 >> 2),
 765	0x00000000,
 766	(0x0e00 << 16) | (0x8c38 >> 2),
 767	0x00000000,
 768	(0x0e00 << 16) | (0x8c3c >> 2),
 769	0x00000000,
 770	(0x0e00 << 16) | (0xae00 >> 2),
 771	0x00000000,
 772	(0x0e00 << 16) | (0x9604 >> 2),
 773	0x00000000,
 774	(0x0e00 << 16) | (0xac08 >> 2),
 775	0x00000000,
 776	(0x0e00 << 16) | (0xac0c >> 2),
 777	0x00000000,
 778	(0x0e00 << 16) | (0xac10 >> 2),
 779	0x00000000,
 780	(0x0e00 << 16) | (0xac14 >> 2),
 781	0x00000000,
 782	(0x0e00 << 16) | (0xac58 >> 2),
 783	0x00000000,
 784	(0x0e00 << 16) | (0xac68 >> 2),
 785	0x00000000,
 786	(0x0e00 << 16) | (0xac6c >> 2),
 787	0x00000000,
 788	(0x0e00 << 16) | (0xac70 >> 2),
 789	0x00000000,
 790	(0x0e00 << 16) | (0xac74 >> 2),
 791	0x00000000,
 792	(0x0e00 << 16) | (0xac78 >> 2),
 793	0x00000000,
 794	(0x0e00 << 16) | (0xac7c >> 2),
 795	0x00000000,
 796	(0x0e00 << 16) | (0xac80 >> 2),
 797	0x00000000,
 798	(0x0e00 << 16) | (0xac84 >> 2),
 799	0x00000000,
 800	(0x0e00 << 16) | (0xac88 >> 2),
 801	0x00000000,
 802	(0x0e00 << 16) | (0xac8c >> 2),
 803	0x00000000,
 804	(0x0e00 << 16) | (0x970c >> 2),
 805	0x00000000,
 806	(0x0e00 << 16) | (0x9714 >> 2),
 807	0x00000000,
 808	(0x0e00 << 16) | (0x9718 >> 2),
 809	0x00000000,
 810	(0x0e00 << 16) | (0x971c >> 2),
 811	0x00000000,
 812	(0x0e00 << 16) | (0x31068 >> 2),
 813	0x00000000,
 814	(0x4e00 << 16) | (0x31068 >> 2),
 815	0x00000000,
 816	(0x5e00 << 16) | (0x31068 >> 2),
 817	0x00000000,
 818	(0x6e00 << 16) | (0x31068 >> 2),
 819	0x00000000,
 820	(0x7e00 << 16) | (0x31068 >> 2),
 821	0x00000000,
 822	(0x0e00 << 16) | (0xcd10 >> 2),
 823	0x00000000,
 824	(0x0e00 << 16) | (0xcd14 >> 2),
 825	0x00000000,
 826	(0x0e00 << 16) | (0x88b0 >> 2),
 827	0x00000000,
 828	(0x0e00 << 16) | (0x88b4 >> 2),
 829	0x00000000,
 830	(0x0e00 << 16) | (0x88b8 >> 2),
 831	0x00000000,
 832	(0x0e00 << 16) | (0x88bc >> 2),
 833	0x00000000,
 834	(0x0400 << 16) | (0x89c0 >> 2),
 835	0x00000000,
 836	(0x0e00 << 16) | (0x88c4 >> 2),
 837	0x00000000,
 838	(0x0e00 << 16) | (0x88c8 >> 2),
 839	0x00000000,
 840	(0x0e00 << 16) | (0x88d0 >> 2),
 841	0x00000000,
 842	(0x0e00 << 16) | (0x88d4 >> 2),
 843	0x00000000,
 844	(0x0e00 << 16) | (0x88d8 >> 2),
 845	0x00000000,
 846	(0x0e00 << 16) | (0x8980 >> 2),
 847	0x00000000,
 848	(0x0e00 << 16) | (0x30938 >> 2),
 849	0x00000000,
 850	(0x0e00 << 16) | (0x3093c >> 2),
 851	0x00000000,
 852	(0x0e00 << 16) | (0x30940 >> 2),
 853	0x00000000,
 854	(0x0e00 << 16) | (0x89a0 >> 2),
 855	0x00000000,
 856	(0x0e00 << 16) | (0x30900 >> 2),
 857	0x00000000,
 858	(0x0e00 << 16) | (0x30904 >> 2),
 859	0x00000000,
 860	(0x0e00 << 16) | (0x89b4 >> 2),
 861	0x00000000,
 862	(0x0e00 << 16) | (0x3e1fc >> 2),
 863	0x00000000,
 864	(0x0e00 << 16) | (0x3c210 >> 2),
 865	0x00000000,
 866	(0x0e00 << 16) | (0x3c214 >> 2),
 867	0x00000000,
 868	(0x0e00 << 16) | (0x3c218 >> 2),
 869	0x00000000,
 870	(0x0e00 << 16) | (0x8904 >> 2),
 871	0x00000000,
 872	0x5,
 873	(0x0e00 << 16) | (0x8c28 >> 2),
 874	(0x0e00 << 16) | (0x8c2c >> 2),
 875	(0x0e00 << 16) | (0x8c30 >> 2),
 876	(0x0e00 << 16) | (0x8c34 >> 2),
 877	(0x0e00 << 16) | (0x9600 >> 2),
 878};
 879
 880static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev);
 881static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer);
 882static void gfx_v7_0_init_cp_pg_table(struct amdgpu_device *adev);
 883static void gfx_v7_0_init_pg(struct amdgpu_device *adev);
 884static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev);
 885
 
 
 
 
 
 
 
 
 
 
 886/*
 887 * Core functions
 888 */
 889/**
 890 * gfx_v7_0_init_microcode - load ucode images from disk
 891 *
 892 * @adev: amdgpu_device pointer
 893 *
 894 * Use the firmware interface to load the ucode images into
 895 * the driver (not loaded into hw).
 896 * Returns 0 on success, error on failure.
 897 */
 898static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
 899{
 900	const char *chip_name;
 901	char fw_name[30];
 902	int err;
 903
 904	DRM_DEBUG("\n");
 905
 906	switch (adev->asic_type) {
 907	case CHIP_BONAIRE:
 908		chip_name = "bonaire";
 909		break;
 910	case CHIP_HAWAII:
 911		chip_name = "hawaii";
 912		break;
 913	case CHIP_KAVERI:
 914		chip_name = "kaveri";
 915		break;
 916	case CHIP_KABINI:
 917		chip_name = "kabini";
 918		break;
 919	case CHIP_MULLINS:
 920		chip_name = "mullins";
 921		break;
 922	default: BUG();
 
 923	}
 924
 925	snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
 926	err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
 927	if (err)
 928		goto out;
 929	err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
 930	if (err)
 931		goto out;
 932
 933	snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
 934	err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
 935	if (err)
 936		goto out;
 937	err = amdgpu_ucode_validate(adev->gfx.me_fw);
 938	if (err)
 939		goto out;
 940
 941	snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
 942	err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
 943	if (err)
 944		goto out;
 945	err = amdgpu_ucode_validate(adev->gfx.ce_fw);
 946	if (err)
 947		goto out;
 948
 949	snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec.bin", chip_name);
 950	err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
 951	if (err)
 952		goto out;
 953	err = amdgpu_ucode_validate(adev->gfx.mec_fw);
 954	if (err)
 955		goto out;
 956
 957	if (adev->asic_type == CHIP_KAVERI) {
 958		snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec2.bin", chip_name);
 959		err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
 960		if (err)
 961			goto out;
 962		err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
 963		if (err)
 964			goto out;
 965	}
 966
 967	snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name);
 968	err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
 969	if (err)
 970		goto out;
 971	err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
 972
 973out:
 974	if (err) {
 975		printk(KERN_ERR
 976		       "gfx7: Failed to load firmware \"%s\"\n",
 977		       fw_name);
 978		release_firmware(adev->gfx.pfp_fw);
 979		adev->gfx.pfp_fw = NULL;
 980		release_firmware(adev->gfx.me_fw);
 981		adev->gfx.me_fw = NULL;
 982		release_firmware(adev->gfx.ce_fw);
 983		adev->gfx.ce_fw = NULL;
 984		release_firmware(adev->gfx.mec_fw);
 985		adev->gfx.mec_fw = NULL;
 986		release_firmware(adev->gfx.mec2_fw);
 987		adev->gfx.mec2_fw = NULL;
 988		release_firmware(adev->gfx.rlc_fw);
 989		adev->gfx.rlc_fw = NULL;
 990	}
 991	return err;
 992}
 993
 994static void gfx_v7_0_free_microcode(struct amdgpu_device *adev)
 995{
 996	release_firmware(adev->gfx.pfp_fw);
 997	adev->gfx.pfp_fw = NULL;
 998	release_firmware(adev->gfx.me_fw);
 999	adev->gfx.me_fw = NULL;
1000	release_firmware(adev->gfx.ce_fw);
1001	adev->gfx.ce_fw = NULL;
1002	release_firmware(adev->gfx.mec_fw);
1003	adev->gfx.mec_fw = NULL;
1004	release_firmware(adev->gfx.mec2_fw);
1005	adev->gfx.mec2_fw = NULL;
1006	release_firmware(adev->gfx.rlc_fw);
1007	adev->gfx.rlc_fw = NULL;
1008}
1009
1010/**
1011 * gfx_v7_0_tiling_mode_table_init - init the hw tiling table
1012 *
1013 * @adev: amdgpu_device pointer
1014 *
1015 * Starting with SI, the tiling setup is done globally in a
1016 * set of 32 tiling modes.  Rather than selecting each set of
1017 * parameters per surface as on older asics, we just select
1018 * which index in the tiling table we want to use, and the
1019 * surface uses those parameters (CIK).
1020 */
1021static void gfx_v7_0_tiling_mode_table_init(struct amdgpu_device *adev)
1022{
1023	const u32 num_tile_mode_states =
1024			ARRAY_SIZE(adev->gfx.config.tile_mode_array);
1025	const u32 num_secondary_tile_mode_states =
1026			ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
1027	u32 reg_offset, split_equal_to_row_size;
1028	uint32_t *tile, *macrotile;
1029
1030	tile = adev->gfx.config.tile_mode_array;
1031	macrotile = adev->gfx.config.macrotile_mode_array;
1032
1033	switch (adev->gfx.config.mem_row_size_in_kb) {
1034	case 1:
1035		split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_1KB;
1036		break;
1037	case 2:
1038	default:
1039		split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_2KB;
1040		break;
1041	case 4:
1042		split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_4KB;
1043		break;
1044	}
1045
1046	for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
1047		tile[reg_offset] = 0;
1048	for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
1049		macrotile[reg_offset] = 0;
1050
1051	switch (adev->asic_type) {
1052	case CHIP_BONAIRE:
1053		tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1054			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1055			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
1056			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1057		tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1058			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1059			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
1060			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1061		tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1062			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1063			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1064			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1065		tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1066			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1067			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
1068			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1069		tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1070			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1071			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1072			   TILE_SPLIT(split_equal_to_row_size));
1073		tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1074			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1075			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1076		tile[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1077			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1078			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1079			   TILE_SPLIT(split_equal_to_row_size));
1080		tile[7] = (TILE_SPLIT(split_equal_to_row_size));
1081		tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
1082			   PIPE_CONFIG(ADDR_SURF_P4_16x16));
1083		tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1084			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1085			   MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
1086		tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1087			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1088			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1089			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1090		tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1091			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1092			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1093			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1094		tile[12] = (TILE_SPLIT(split_equal_to_row_size));
1095		tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1096			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1097			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
1098		tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1099			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1100			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1101			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1102		tile[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
1103			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1104			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1105			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1106		tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1107			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1108			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1109			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1110		tile[17] = (TILE_SPLIT(split_equal_to_row_size));
1111		tile[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1112			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1113			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1114			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1115		tile[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1116			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1117			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
1118		tile[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1119			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1120			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1121			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1122		tile[21] =  (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
1123			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1124			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1125			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1126		tile[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
1127			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1128			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1129			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1130		tile[23] = (TILE_SPLIT(split_equal_to_row_size));
1131		tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1132			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1133			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1134			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1135		tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
1136			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1137			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1138			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1139		tile[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
1140			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1141			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1142			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1143		tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1144			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1145			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
1146		tile[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1147			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1148			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1149			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1150		tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1151			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1152			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1153			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1154		tile[30] = (TILE_SPLIT(split_equal_to_row_size));
1155
1156		macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1157				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1158				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1159				NUM_BANKS(ADDR_SURF_16_BANK));
1160		macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1161				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1162				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1163				NUM_BANKS(ADDR_SURF_16_BANK));
1164		macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1165				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1166				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1167				NUM_BANKS(ADDR_SURF_16_BANK));
1168		macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1169				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1170				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1171				NUM_BANKS(ADDR_SURF_16_BANK));
1172		macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1173				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1174				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1175				NUM_BANKS(ADDR_SURF_16_BANK));
1176		macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1177				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1178				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1179				NUM_BANKS(ADDR_SURF_8_BANK));
1180		macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1181				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1182				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1183				NUM_BANKS(ADDR_SURF_4_BANK));
1184		macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1185				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
1186				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1187				NUM_BANKS(ADDR_SURF_16_BANK));
1188		macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1189				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1190				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1191				NUM_BANKS(ADDR_SURF_16_BANK));
1192		macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1193				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1194				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1195				NUM_BANKS(ADDR_SURF_16_BANK));
1196		macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1197				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1198				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1199				NUM_BANKS(ADDR_SURF_16_BANK));
1200		macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1201				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1202				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1203				NUM_BANKS(ADDR_SURF_16_BANK));
1204		macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1205				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1206				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1207				NUM_BANKS(ADDR_SURF_8_BANK));
1208		macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1209				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1210				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1211				NUM_BANKS(ADDR_SURF_4_BANK));
1212
1213		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
1214			WREG32(mmGB_TILE_MODE0 + reg_offset, tile[reg_offset]);
1215		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
1216			if (reg_offset != 7)
1217				WREG32(mmGB_MACROTILE_MODE0 + reg_offset, macrotile[reg_offset]);
1218		break;
1219	case CHIP_HAWAII:
1220		tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1221			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1222			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
1223			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1224		tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1225			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1226			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
1227			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1228		tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1229			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1230			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1231			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1232		tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1233			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1234			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
1235			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1236		tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1237			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1238			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1239			   TILE_SPLIT(split_equal_to_row_size));
1240		tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1241			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1242			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1243			   TILE_SPLIT(split_equal_to_row_size));
1244		tile[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1245			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1246			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1247			   TILE_SPLIT(split_equal_to_row_size));
1248		tile[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1249			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1250			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1251			   TILE_SPLIT(split_equal_to_row_size));
1252		tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
1253			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16));
1254		tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1255			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1256			   MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
1257		tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1258			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1259			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1260			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1261		tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1262			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1263			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1264			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1265		tile[12] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
1266			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1267			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1268			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1269		tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1270			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1271			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
1272		tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1273			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1274			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1275			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1276		tile[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
1277			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1278			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1279			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1280		tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1281			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1282			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1283			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1284		tile[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1285			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1286			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1287			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1288		tile[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1289			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1290			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1291			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1292		tile[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1293			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1294			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING));
1295		tile[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1296			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1297			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1298			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1299		tile[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
1300			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1301			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1302			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1303		tile[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
1304			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1305			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1306			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1307		tile[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
1308			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1309			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1310			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1311		tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1312			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1313			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1314			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1315		tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
1316			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1317			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1318			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1319		tile[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
1320			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1321			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1322			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1323		tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1324			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1325			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
1326		tile[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1327			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1328			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1329			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1330		tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1331			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1332			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1333			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1334		tile[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1335			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1336			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1337			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1338
1339		macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1340				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1341				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1342				NUM_BANKS(ADDR_SURF_16_BANK));
1343		macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1344				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1345				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1346				NUM_BANKS(ADDR_SURF_16_BANK));
1347		macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1348				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1349				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1350				NUM_BANKS(ADDR_SURF_16_BANK));
1351		macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1352				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1353				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1354				NUM_BANKS(ADDR_SURF_16_BANK));
1355		macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1356				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1357				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1358				NUM_BANKS(ADDR_SURF_8_BANK));
1359		macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1360				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1361				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1362				NUM_BANKS(ADDR_SURF_4_BANK));
1363		macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1364				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1365				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1366				NUM_BANKS(ADDR_SURF_4_BANK));
1367		macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1368				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1369				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1370				NUM_BANKS(ADDR_SURF_16_BANK));
1371		macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1372				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1373				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1374				NUM_BANKS(ADDR_SURF_16_BANK));
1375		macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1376				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1377				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1378				NUM_BANKS(ADDR_SURF_16_BANK));
1379		macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1380				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1381				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1382				NUM_BANKS(ADDR_SURF_8_BANK));
1383		macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1384				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1385				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1386				NUM_BANKS(ADDR_SURF_16_BANK));
1387		macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1388				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1389				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1390				NUM_BANKS(ADDR_SURF_8_BANK));
1391		macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1392				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1393				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1394				NUM_BANKS(ADDR_SURF_4_BANK));
1395
1396		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
1397			WREG32(mmGB_TILE_MODE0 + reg_offset, tile[reg_offset]);
1398		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
1399			if (reg_offset != 7)
1400				WREG32(mmGB_MACROTILE_MODE0 + reg_offset, macrotile[reg_offset]);
1401		break;
1402	case CHIP_KABINI:
1403	case CHIP_KAVERI:
1404	case CHIP_MULLINS:
1405	default:
1406		tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1407			   PIPE_CONFIG(ADDR_SURF_P2) |
1408			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
1409			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1410		tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1411			   PIPE_CONFIG(ADDR_SURF_P2) |
1412			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
1413			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1414		tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1415			   PIPE_CONFIG(ADDR_SURF_P2) |
1416			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1417			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1418		tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1419			   PIPE_CONFIG(ADDR_SURF_P2) |
1420			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
1421			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1422		tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1423			   PIPE_CONFIG(ADDR_SURF_P2) |
1424			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1425			   TILE_SPLIT(split_equal_to_row_size));
1426		tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1427			   PIPE_CONFIG(ADDR_SURF_P2) |
1428			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1429		tile[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1430			   PIPE_CONFIG(ADDR_SURF_P2) |
1431			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1432			   TILE_SPLIT(split_equal_to_row_size));
1433		tile[7] = (TILE_SPLIT(split_equal_to_row_size));
1434		tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
1435			   PIPE_CONFIG(ADDR_SURF_P2));
1436		tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1437			   PIPE_CONFIG(ADDR_SURF_P2) |
1438			   MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
1439		tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1440			    PIPE_CONFIG(ADDR_SURF_P2) |
1441			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1442			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1443		tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1444			    PIPE_CONFIG(ADDR_SURF_P2) |
1445			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1446			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1447		tile[12] = (TILE_SPLIT(split_equal_to_row_size));
1448		tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1449			    PIPE_CONFIG(ADDR_SURF_P2) |
1450			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
1451		tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1452			    PIPE_CONFIG(ADDR_SURF_P2) |
1453			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1454			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1455		tile[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
1456			    PIPE_CONFIG(ADDR_SURF_P2) |
1457			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1458			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1459		tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1460			    PIPE_CONFIG(ADDR_SURF_P2) |
1461			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1462			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1463		tile[17] = (TILE_SPLIT(split_equal_to_row_size));
1464		tile[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1465			    PIPE_CONFIG(ADDR_SURF_P2) |
1466			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1467			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1468		tile[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1469			    PIPE_CONFIG(ADDR_SURF_P2) |
1470			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING));
1471		tile[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1472			    PIPE_CONFIG(ADDR_SURF_P2) |
1473			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1474			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1475		tile[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
1476			    PIPE_CONFIG(ADDR_SURF_P2) |
1477			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1478			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1479		tile[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
1480			    PIPE_CONFIG(ADDR_SURF_P2) |
1481			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1482			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1483		tile[23] = (TILE_SPLIT(split_equal_to_row_size));
1484		tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1485			    PIPE_CONFIG(ADDR_SURF_P2) |
1486			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1487			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1488		tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
1489			    PIPE_CONFIG(ADDR_SURF_P2) |
1490			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1491			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1492		tile[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
1493			    PIPE_CONFIG(ADDR_SURF_P2) |
1494			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1495			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1496		tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1497			    PIPE_CONFIG(ADDR_SURF_P2) |
1498			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
1499		tile[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1500			    PIPE_CONFIG(ADDR_SURF_P2) |
1501			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1502			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1503		tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1504			    PIPE_CONFIG(ADDR_SURF_P2) |
1505			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1506			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1507		tile[30] = (TILE_SPLIT(split_equal_to_row_size));
1508
1509		macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1510				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1511				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1512				NUM_BANKS(ADDR_SURF_8_BANK));
1513		macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1514				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1515				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1516				NUM_BANKS(ADDR_SURF_8_BANK));
1517		macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1518				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1519				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1520				NUM_BANKS(ADDR_SURF_8_BANK));
1521		macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1522				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1523				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1524				NUM_BANKS(ADDR_SURF_8_BANK));
1525		macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1526				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1527				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1528				NUM_BANKS(ADDR_SURF_8_BANK));
1529		macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1530				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1531				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1532				NUM_BANKS(ADDR_SURF_8_BANK));
1533		macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1534				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1535				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1536				NUM_BANKS(ADDR_SURF_8_BANK));
1537		macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
1538				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
1539				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1540				NUM_BANKS(ADDR_SURF_16_BANK));
1541		macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
1542				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1543				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1544				NUM_BANKS(ADDR_SURF_16_BANK));
1545		macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1546				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1547				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1548				NUM_BANKS(ADDR_SURF_16_BANK));
1549		macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1550				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1551				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1552				NUM_BANKS(ADDR_SURF_16_BANK));
1553		macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1554				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1555				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1556				NUM_BANKS(ADDR_SURF_16_BANK));
1557		macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1558				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1559				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1560				NUM_BANKS(ADDR_SURF_16_BANK));
1561		macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1562				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1563				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1564				NUM_BANKS(ADDR_SURF_8_BANK));
1565
1566		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
1567			WREG32(mmGB_TILE_MODE0 + reg_offset, tile[reg_offset]);
1568		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
1569			if (reg_offset != 7)
1570				WREG32(mmGB_MACROTILE_MODE0 + reg_offset, macrotile[reg_offset]);
1571		break;
1572	}
1573}
1574
1575/**
1576 * gfx_v7_0_select_se_sh - select which SE, SH to address
1577 *
1578 * @adev: amdgpu_device pointer
1579 * @se_num: shader engine to address
1580 * @sh_num: sh block to address
1581 *
1582 * Select which SE, SH combinations to address. Certain
1583 * registers are instanced per SE or SH.  0xffffffff means
1584 * broadcast to all SEs or SHs (CIK).
1585 */
1586static void gfx_v7_0_select_se_sh(struct amdgpu_device *adev,
1587				  u32 se_num, u32 sh_num, u32 instance)
 
1588{
1589	u32 data;
1590
1591	if (instance == 0xffffffff)
1592		data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
1593	else
1594		data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
1595
1596	if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
1597		data |= GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK |
1598			GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK;
1599	else if (se_num == 0xffffffff)
1600		data |= GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK |
1601			(sh_num << GRBM_GFX_INDEX__SH_INDEX__SHIFT);
1602	else if (sh_num == 0xffffffff)
1603		data |= GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK |
1604			(se_num << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
1605	else
1606		data |= (sh_num << GRBM_GFX_INDEX__SH_INDEX__SHIFT) |
1607			(se_num << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
1608	WREG32(mmGRBM_GFX_INDEX, data);
1609}
1610
1611/**
1612 * gfx_v7_0_create_bitmask - create a bitmask
1613 *
1614 * @bit_width: length of the mask
1615 *
1616 * create a variable length bit mask (CIK).
1617 * Returns the bitmask.
1618 */
1619static u32 gfx_v7_0_create_bitmask(u32 bit_width)
1620{
1621	return (u32)((1ULL << bit_width) - 1);
1622}
1623
1624/**
1625 * gfx_v7_0_get_rb_active_bitmap - computes the mask of enabled RBs
1626 *
1627 * @adev: amdgpu_device pointer
1628 *
1629 * Calculates the bitmask of enabled RBs (CIK).
1630 * Returns the enabled RB bitmask.
1631 */
1632static u32 gfx_v7_0_get_rb_active_bitmap(struct amdgpu_device *adev)
1633{
1634	u32 data, mask;
1635
1636	data = RREG32(mmCC_RB_BACKEND_DISABLE);
1637	data |= RREG32(mmGC_USER_RB_BACKEND_DISABLE);
1638
1639	data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
1640	data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
1641
1642	mask = gfx_v7_0_create_bitmask(adev->gfx.config.max_backends_per_se /
1643				       adev->gfx.config.max_sh_per_se);
1644
1645	return (~data) & mask;
1646}
1647
1648static void
1649gfx_v7_0_raster_config(struct amdgpu_device *adev, u32 *rconf, u32 *rconf1)
1650{
1651	switch (adev->asic_type) {
1652	case CHIP_BONAIRE:
1653		*rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) |
1654			  SE_XSEL(1) | SE_YSEL(1);
1655		*rconf1 |= 0x0;
1656		break;
1657	case CHIP_HAWAII:
1658		*rconf |= RB_MAP_PKR0(2) | RB_MAP_PKR1(2) |
1659			  RB_XSEL2(1) | PKR_MAP(2) | PKR_XSEL(1) |
1660			  PKR_YSEL(1) | SE_MAP(2) | SE_XSEL(2) |
1661			  SE_YSEL(3);
1662		*rconf1 |= SE_PAIR_MAP(2) | SE_PAIR_XSEL(3) |
1663			   SE_PAIR_YSEL(2);
1664		break;
1665	case CHIP_KAVERI:
1666		*rconf |= RB_MAP_PKR0(2);
1667		*rconf1 |= 0x0;
1668		break;
1669	case CHIP_KABINI:
1670	case CHIP_MULLINS:
1671		*rconf |= 0x0;
1672		*rconf1 |= 0x0;
1673		break;
1674	default:
1675		DRM_ERROR("unknown asic: 0x%x\n", adev->asic_type);
1676		break;
1677	}
1678}
1679
1680static void
1681gfx_v7_0_write_harvested_raster_configs(struct amdgpu_device *adev,
1682					u32 raster_config, u32 raster_config_1,
1683					unsigned rb_mask, unsigned num_rb)
1684{
1685	unsigned sh_per_se = max_t(unsigned, adev->gfx.config.max_sh_per_se, 1);
1686	unsigned num_se = max_t(unsigned, adev->gfx.config.max_shader_engines, 1);
1687	unsigned rb_per_pkr = min_t(unsigned, num_rb / num_se / sh_per_se, 2);
1688	unsigned rb_per_se = num_rb / num_se;
1689	unsigned se_mask[4];
1690	unsigned se;
1691
1692	se_mask[0] = ((1 << rb_per_se) - 1) & rb_mask;
1693	se_mask[1] = (se_mask[0] << rb_per_se) & rb_mask;
1694	se_mask[2] = (se_mask[1] << rb_per_se) & rb_mask;
1695	se_mask[3] = (se_mask[2] << rb_per_se) & rb_mask;
1696
1697	WARN_ON(!(num_se == 1 || num_se == 2 || num_se == 4));
1698	WARN_ON(!(sh_per_se == 1 || sh_per_se == 2));
1699	WARN_ON(!(rb_per_pkr == 1 || rb_per_pkr == 2));
1700
1701	if ((num_se > 2) && ((!se_mask[0] && !se_mask[1]) ||
1702			     (!se_mask[2] && !se_mask[3]))) {
1703		raster_config_1 &= ~SE_PAIR_MAP_MASK;
1704
1705		if (!se_mask[0] && !se_mask[1]) {
1706			raster_config_1 |=
1707				SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_3);
1708		} else {
1709			raster_config_1 |=
1710				SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_0);
1711		}
1712	}
1713
1714	for (se = 0; se < num_se; se++) {
1715		unsigned raster_config_se = raster_config;
1716		unsigned pkr0_mask = ((1 << rb_per_pkr) - 1) << (se * rb_per_se);
1717		unsigned pkr1_mask = pkr0_mask << rb_per_pkr;
1718		int idx = (se / 2) * 2;
1719
1720		if ((num_se > 1) && (!se_mask[idx] || !se_mask[idx + 1])) {
1721			raster_config_se &= ~SE_MAP_MASK;
1722
1723			if (!se_mask[idx]) {
1724				raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_3);
1725			} else {
1726				raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_0);
1727			}
1728		}
1729
1730		pkr0_mask &= rb_mask;
1731		pkr1_mask &= rb_mask;
1732		if (rb_per_se > 2 && (!pkr0_mask || !pkr1_mask)) {
1733			raster_config_se &= ~PKR_MAP_MASK;
1734
1735			if (!pkr0_mask) {
1736				raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_3);
1737			} else {
1738				raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_0);
1739			}
1740		}
1741
1742		if (rb_per_se >= 2) {
1743			unsigned rb0_mask = 1 << (se * rb_per_se);
1744			unsigned rb1_mask = rb0_mask << 1;
1745
1746			rb0_mask &= rb_mask;
1747			rb1_mask &= rb_mask;
1748			if (!rb0_mask || !rb1_mask) {
1749				raster_config_se &= ~RB_MAP_PKR0_MASK;
1750
1751				if (!rb0_mask) {
1752					raster_config_se |=
1753						RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_3);
1754				} else {
1755					raster_config_se |=
1756						RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_0);
1757				}
1758			}
1759
1760			if (rb_per_se > 2) {
1761				rb0_mask = 1 << (se * rb_per_se + rb_per_pkr);
1762				rb1_mask = rb0_mask << 1;
1763				rb0_mask &= rb_mask;
1764				rb1_mask &= rb_mask;
1765				if (!rb0_mask || !rb1_mask) {
1766					raster_config_se &= ~RB_MAP_PKR1_MASK;
1767
1768					if (!rb0_mask) {
1769						raster_config_se |=
1770							RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_3);
1771					} else {
1772						raster_config_se |=
1773							RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_0);
1774					}
1775				}
1776			}
1777		}
1778
1779		/* GRBM_GFX_INDEX has a different offset on CI+ */
1780		gfx_v7_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff);
1781		WREG32(mmPA_SC_RASTER_CONFIG, raster_config_se);
1782		WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
1783	}
1784
1785	/* GRBM_GFX_INDEX has a different offset on CI+ */
1786	gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1787}
1788
1789/**
1790 * gfx_v7_0_setup_rb - setup the RBs on the asic
1791 *
1792 * @adev: amdgpu_device pointer
1793 * @se_num: number of SEs (shader engines) for the asic
1794 * @sh_per_se: number of SH blocks per SE for the asic
1795 *
1796 * Configures per-SE/SH RB registers (CIK).
1797 */
1798static void gfx_v7_0_setup_rb(struct amdgpu_device *adev)
1799{
1800	int i, j;
1801	u32 data;
1802	u32 raster_config = 0, raster_config_1 = 0;
1803	u32 active_rbs = 0;
1804	u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
1805					adev->gfx.config.max_sh_per_se;
1806	unsigned num_rb_pipes;
1807
1808	mutex_lock(&adev->grbm_idx_mutex);
1809	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1810		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1811			gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff);
1812			data = gfx_v7_0_get_rb_active_bitmap(adev);
1813			active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
1814					       rb_bitmap_width_per_sh);
1815		}
1816	}
1817	gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1818
1819	adev->gfx.config.backend_enable_mask = active_rbs;
1820	adev->gfx.config.num_rbs = hweight32(active_rbs);
1821
1822	num_rb_pipes = min_t(unsigned, adev->gfx.config.max_backends_per_se *
1823			     adev->gfx.config.max_shader_engines, 16);
1824
1825	gfx_v7_0_raster_config(adev, &raster_config, &raster_config_1);
1826
1827	if (!adev->gfx.config.backend_enable_mask ||
1828			adev->gfx.config.num_rbs >= num_rb_pipes) {
1829		WREG32(mmPA_SC_RASTER_CONFIG, raster_config);
1830		WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
1831	} else {
1832		gfx_v7_0_write_harvested_raster_configs(adev, raster_config, raster_config_1,
1833							adev->gfx.config.backend_enable_mask,
1834							num_rb_pipes);
1835	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1836	mutex_unlock(&adev->grbm_idx_mutex);
1837}
1838
 
1839/**
1840 * gmc_v7_0_init_compute_vmid - gart enable
1841 *
1842 * @rdev: amdgpu_device pointer
1843 *
1844 * Initialize compute vmid sh_mem registers
1845 *
1846 */
1847#define DEFAULT_SH_MEM_BASES	(0x6000)
1848#define FIRST_COMPUTE_VMID	(8)
1849#define LAST_COMPUTE_VMID	(16)
1850static void gmc_v7_0_init_compute_vmid(struct amdgpu_device *adev)
1851{
1852	int i;
1853	uint32_t sh_mem_config;
1854	uint32_t sh_mem_bases;
1855
1856	/*
1857	 * Configure apertures:
1858	 * LDS:         0x60000000'00000000 - 0x60000001'00000000 (4GB)
1859	 * Scratch:     0x60000001'00000000 - 0x60000002'00000000 (4GB)
1860	 * GPUVM:       0x60010000'00000000 - 0x60020000'00000000 (1TB)
1861	*/
1862	sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
1863	sh_mem_config = SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
1864			SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
1865	sh_mem_config |= MTYPE_NONCACHED << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT;
1866	mutex_lock(&adev->srbm_mutex);
1867	for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
1868		cik_srbm_select(adev, 0, 0, 0, i);
1869		/* CP and shaders */
1870		WREG32(mmSH_MEM_CONFIG, sh_mem_config);
1871		WREG32(mmSH_MEM_APE1_BASE, 1);
1872		WREG32(mmSH_MEM_APE1_LIMIT, 0);
1873		WREG32(mmSH_MEM_BASES, sh_mem_bases);
1874	}
1875	cik_srbm_select(adev, 0, 0, 0, 0);
1876	mutex_unlock(&adev->srbm_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1877}
1878
1879/**
1880 * gfx_v7_0_gpu_init - setup the 3D engine
1881 *
1882 * @adev: amdgpu_device pointer
1883 *
1884 * Configures the 3D engine and tiling configuration
1885 * registers so that the 3D engine is usable.
1886 */
1887static void gfx_v7_0_gpu_init(struct amdgpu_device *adev)
1888{
1889	u32 tmp, sh_mem_cfg;
 
1890	int i;
1891
1892	WREG32(mmGRBM_CNTL, (0xff << GRBM_CNTL__READ_TIMEOUT__SHIFT));
1893
1894	WREG32(mmGB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
1895	WREG32(mmHDP_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
1896	WREG32(mmDMIF_ADDR_CALC, adev->gfx.config.gb_addr_config);
1897
1898	gfx_v7_0_tiling_mode_table_init(adev);
1899
1900	gfx_v7_0_setup_rb(adev);
1901	gfx_v7_0_get_cu_info(adev);
 
1902
1903	/* set HW defaults for 3D engine */
1904	WREG32(mmCP_MEQ_THRESHOLDS,
1905	       (0x30 << CP_MEQ_THRESHOLDS__MEQ1_START__SHIFT) |
1906	       (0x60 << CP_MEQ_THRESHOLDS__MEQ2_START__SHIFT));
1907
1908	mutex_lock(&adev->grbm_idx_mutex);
1909	/*
1910	 * making sure that the following register writes will be broadcasted
1911	 * to all the shaders
1912	 */
1913	gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1914
1915	/* XXX SH_MEM regs */
1916	/* where to put LDS, scratch, GPUVM in FSA64 space */
1917	sh_mem_cfg = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
1918				   SH_MEM_ALIGNMENT_MODE_UNALIGNED);
 
 
 
 
 
 
 
 
 
 
 
 
 
1919
1920	mutex_lock(&adev->srbm_mutex);
1921	for (i = 0; i < 16; i++) {
 
 
 
 
1922		cik_srbm_select(adev, 0, 0, 0, i);
1923		/* CP and shaders */
1924		WREG32(mmSH_MEM_CONFIG, sh_mem_cfg);
1925		WREG32(mmSH_MEM_APE1_BASE, 1);
1926		WREG32(mmSH_MEM_APE1_LIMIT, 0);
1927		WREG32(mmSH_MEM_BASES, 0);
1928	}
1929	cik_srbm_select(adev, 0, 0, 0, 0);
1930	mutex_unlock(&adev->srbm_mutex);
1931
1932	gmc_v7_0_init_compute_vmid(adev);
 
1933
1934	WREG32(mmSX_DEBUG_1, 0x20);
1935
1936	WREG32(mmTA_CNTL_AUX, 0x00010000);
1937
1938	tmp = RREG32(mmSPI_CONFIG_CNTL);
1939	tmp |= 0x03000000;
1940	WREG32(mmSPI_CONFIG_CNTL, tmp);
1941
1942	WREG32(mmSQ_CONFIG, 1);
1943
1944	WREG32(mmDB_DEBUG, 0);
1945
1946	tmp = RREG32(mmDB_DEBUG2) & ~0xf00fffff;
1947	tmp |= 0x00000400;
1948	WREG32(mmDB_DEBUG2, tmp);
1949
1950	tmp = RREG32(mmDB_DEBUG3) & ~0x0002021c;
1951	tmp |= 0x00020200;
1952	WREG32(mmDB_DEBUG3, tmp);
1953
1954	tmp = RREG32(mmCB_HW_CONTROL) & ~0x00010000;
1955	tmp |= 0x00018208;
1956	WREG32(mmCB_HW_CONTROL, tmp);
1957
1958	WREG32(mmSPI_CONFIG_CNTL_1, (4 << SPI_CONFIG_CNTL_1__VTX_DONE_DELAY__SHIFT));
1959
1960	WREG32(mmPA_SC_FIFO_SIZE,
1961		((adev->gfx.config.sc_prim_fifo_size_frontend << PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) |
1962		(adev->gfx.config.sc_prim_fifo_size_backend << PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT) |
1963		(adev->gfx.config.sc_hiz_tile_fifo_size << PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) |
1964		(adev->gfx.config.sc_earlyz_tile_fifo_size << PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT)));
1965
1966	WREG32(mmVGT_NUM_INSTANCES, 1);
1967
1968	WREG32(mmCP_PERFMON_CNTL, 0);
1969
1970	WREG32(mmSQ_CONFIG, 0);
1971
1972	WREG32(mmPA_SC_FORCE_EOV_MAX_CNTS,
1973		((4095 << PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_CLK_CNT__SHIFT) |
1974		(255 << PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_REZ_CNT__SHIFT)));
1975
1976	WREG32(mmVGT_CACHE_INVALIDATION,
1977		(VC_AND_TC << VGT_CACHE_INVALIDATION__CACHE_INVALIDATION__SHIFT) |
1978		(ES_AND_GS_AUTO << VGT_CACHE_INVALIDATION__AUTO_INVLD_EN__SHIFT));
1979
1980	WREG32(mmVGT_GS_VERTEX_REUSE, 16);
1981	WREG32(mmPA_SC_LINE_STIPPLE_STATE, 0);
1982
1983	WREG32(mmPA_CL_ENHANCE, PA_CL_ENHANCE__CLIP_VTX_REORDER_ENA_MASK |
1984			(3 << PA_CL_ENHANCE__NUM_CLIP_SEQ__SHIFT));
1985	WREG32(mmPA_SC_ENHANCE, PA_SC_ENHANCE__ENABLE_PA_SC_OUT_OF_ORDER_MASK);
1986	mutex_unlock(&adev->grbm_idx_mutex);
1987
1988	udelay(50);
1989}
 
 
 
 
1990
1991/*
1992 * GPU scratch registers helpers function.
1993 */
1994/**
1995 * gfx_v7_0_scratch_init - setup driver info for CP scratch regs
1996 *
1997 * @adev: amdgpu_device pointer
1998 *
1999 * Set up the number and offset of the CP scratch registers.
2000 * NOTE: use of CP scratch registers is a legacy inferface and
2001 * is not used by default on newer asics (r6xx+).  On newer asics,
2002 * memory buffers are used for fences rather than scratch regs.
2003 */
2004static void gfx_v7_0_scratch_init(struct amdgpu_device *adev)
2005{
2006	int i;
2007
2008	adev->gfx.scratch.num_reg = 7;
2009	adev->gfx.scratch.reg_base = mmSCRATCH_REG0;
2010	for (i = 0; i < adev->gfx.scratch.num_reg; i++) {
2011		adev->gfx.scratch.free[i] = true;
2012		adev->gfx.scratch.reg[i] = adev->gfx.scratch.reg_base + i;
2013	}
2014}
2015
2016/**
2017 * gfx_v7_0_ring_test_ring - basic gfx ring test
2018 *
2019 * @adev: amdgpu_device pointer
2020 * @ring: amdgpu_ring structure holding ring information
2021 *
2022 * Allocate a scratch register and write to it using the gfx ring (CIK).
2023 * Provides a basic gfx ring test to verify that the ring is working.
2024 * Used by gfx_v7_0_cp_gfx_resume();
2025 * Returns 0 on success, error on failure.
2026 */
2027static int gfx_v7_0_ring_test_ring(struct amdgpu_ring *ring)
2028{
2029	struct amdgpu_device *adev = ring->adev;
2030	uint32_t scratch;
2031	uint32_t tmp = 0;
2032	unsigned i;
2033	int r;
2034
2035	r = amdgpu_gfx_scratch_get(adev, &scratch);
2036	if (r) {
2037		DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r);
2038		return r;
2039	}
2040	WREG32(scratch, 0xCAFEDEAD);
2041	r = amdgpu_ring_alloc(ring, 3);
2042	if (r) {
2043		DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", ring->idx, r);
2044		amdgpu_gfx_scratch_free(adev, scratch);
2045		return r;
2046	}
2047	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
2048	amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
2049	amdgpu_ring_write(ring, 0xDEADBEEF);
2050	amdgpu_ring_commit(ring);
2051
2052	for (i = 0; i < adev->usec_timeout; i++) {
2053		tmp = RREG32(scratch);
2054		if (tmp == 0xDEADBEEF)
2055			break;
2056		DRM_UDELAY(1);
2057	}
2058	if (i < adev->usec_timeout) {
2059		DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
2060	} else {
2061		DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
2062			  ring->idx, scratch, tmp);
2063		r = -EINVAL;
2064	}
2065	amdgpu_gfx_scratch_free(adev, scratch);
 
2066	return r;
2067}
2068
2069/**
2070 * gfx_v7_0_ring_emit_hdp - emit an hdp flush on the cp
2071 *
2072 * @adev: amdgpu_device pointer
2073 * @ridx: amdgpu ring index
2074 *
2075 * Emits an hdp flush on the cp.
2076 */
2077static void gfx_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
2078{
2079	u32 ref_and_mask;
2080	int usepfp = ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ? 0 : 1;
2081
2082	if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
2083		switch (ring->me) {
2084		case 1:
2085			ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe;
2086			break;
2087		case 2:
2088			ref_and_mask = GPU_HDP_FLUSH_DONE__CP6_MASK << ring->pipe;
2089			break;
2090		default:
2091			return;
2092		}
2093	} else {
2094		ref_and_mask = GPU_HDP_FLUSH_DONE__CP0_MASK;
2095	}
2096
2097	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
2098	amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(1) | /* write, wait, write */
2099				 WAIT_REG_MEM_FUNCTION(3) |  /* == */
2100				 WAIT_REG_MEM_ENGINE(usepfp)));   /* pfp or me */
2101	amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ);
2102	amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE);
2103	amdgpu_ring_write(ring, ref_and_mask);
2104	amdgpu_ring_write(ring, ref_and_mask);
2105	amdgpu_ring_write(ring, 0x20); /* poll interval */
2106}
2107
2108static void gfx_v7_0_ring_emit_vgt_flush(struct amdgpu_ring *ring)
2109{
2110	amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
2111	amdgpu_ring_write(ring, EVENT_TYPE(VS_PARTIAL_FLUSH) |
2112		EVENT_INDEX(4));
2113
2114	amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
2115	amdgpu_ring_write(ring, EVENT_TYPE(VGT_FLUSH) |
2116		EVENT_INDEX(0));
2117}
2118
2119
2120/**
2121 * gfx_v7_0_ring_emit_hdp_invalidate - emit an hdp invalidate on the cp
2122 *
2123 * @adev: amdgpu_device pointer
2124 * @ridx: amdgpu ring index
2125 *
2126 * Emits an hdp invalidate on the cp.
2127 */
2128static void gfx_v7_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
2129{
2130	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
2131	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
2132				 WRITE_DATA_DST_SEL(0) |
2133				 WR_CONFIRM));
2134	amdgpu_ring_write(ring, mmHDP_DEBUG0);
2135	amdgpu_ring_write(ring, 0);
2136	amdgpu_ring_write(ring, 1);
2137}
2138
2139/**
2140 * gfx_v7_0_ring_emit_fence_gfx - emit a fence on the gfx ring
2141 *
2142 * @adev: amdgpu_device pointer
2143 * @fence: amdgpu fence object
 
 
2144 *
2145 * Emits a fence sequnce number on the gfx ring and flushes
2146 * GPU caches.
2147 */
2148static void gfx_v7_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
2149					 u64 seq, unsigned flags)
2150{
2151	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
2152	bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
 
 
2153	/* Workaround for cache flush problems. First send a dummy EOP
2154	 * event down the pipe with seq one below.
2155	 */
2156	amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
2157	amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
2158				 EOP_TC_ACTION_EN |
2159				 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
2160				 EVENT_INDEX(5)));
2161	amdgpu_ring_write(ring, addr & 0xfffffffc);
2162	amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
2163				DATA_SEL(1) | INT_SEL(0));
2164	amdgpu_ring_write(ring, lower_32_bits(seq - 1));
2165	amdgpu_ring_write(ring, upper_32_bits(seq - 1));
2166
2167	/* Then send the real EOP event down the pipe. */
2168	amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
2169	amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
2170				 EOP_TC_ACTION_EN |
2171				 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
2172				 EVENT_INDEX(5)));
 
2173	amdgpu_ring_write(ring, addr & 0xfffffffc);
2174	amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
2175				DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
2176	amdgpu_ring_write(ring, lower_32_bits(seq));
2177	amdgpu_ring_write(ring, upper_32_bits(seq));
2178}
2179
2180/**
2181 * gfx_v7_0_ring_emit_fence_compute - emit a fence on the compute ring
2182 *
2183 * @adev: amdgpu_device pointer
2184 * @fence: amdgpu fence object
 
 
2185 *
2186 * Emits a fence sequnce number on the compute ring and flushes
2187 * GPU caches.
2188 */
2189static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
2190					     u64 addr, u64 seq,
2191					     unsigned flags)
2192{
2193	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
2194	bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
2195
2196	/* RELEASE_MEM - flush caches, send int */
2197	amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 5));
2198	amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
2199				 EOP_TC_ACTION_EN |
2200				 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
2201				 EVENT_INDEX(5)));
2202	amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
2203	amdgpu_ring_write(ring, addr & 0xfffffffc);
2204	amdgpu_ring_write(ring, upper_32_bits(addr));
2205	amdgpu_ring_write(ring, lower_32_bits(seq));
2206	amdgpu_ring_write(ring, upper_32_bits(seq));
2207}
2208
2209/*
2210 * IB stuff
2211 */
2212/**
2213 * gfx_v7_0_ring_emit_ib - emit an IB (Indirect Buffer) on the ring
2214 *
2215 * @ring: amdgpu_ring structure holding ring information
 
2216 * @ib: amdgpu indirect buffer object
 
2217 *
2218 * Emits an DE (drawing engine) or CE (constant engine) IB
2219 * on the gfx ring.  IBs are usually generated by userspace
2220 * acceleration drivers and submitted to the kernel for
2221 * sheduling on the ring.  This function schedules the IB
2222 * on the gfx ring for execution by the GPU.
2223 */
2224static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
2225				      struct amdgpu_ib *ib,
2226				      unsigned vm_id, bool ctx_switch)
 
2227{
 
2228	u32 header, control = 0;
2229
2230	/* insert SWITCH_BUFFER packet before first IB in the ring frame */
2231	if (ctx_switch) {
2232		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
2233		amdgpu_ring_write(ring, 0);
2234	}
2235
2236	if (ib->flags & AMDGPU_IB_FLAG_CE)
2237		header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
2238	else
2239		header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
2240
2241	control |= ib->length_dw | (vm_id << 24);
2242
2243	amdgpu_ring_write(ring, header);
2244	amdgpu_ring_write(ring,
2245#ifdef __BIG_ENDIAN
2246			  (2 << 0) |
2247#endif
2248			  (ib->gpu_addr & 0xFFFFFFFC));
2249	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
2250	amdgpu_ring_write(ring, control);
2251}
2252
2253static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
 
2254					  struct amdgpu_ib *ib,
2255					  unsigned vm_id, bool ctx_switch)
2256{
2257	u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vm_id << 24);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2258
2259	amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
2260	amdgpu_ring_write(ring,
2261#ifdef __BIG_ENDIAN
2262					  (2 << 0) |
2263#endif
2264					  (ib->gpu_addr & 0xFFFFFFFC));
2265	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
2266	amdgpu_ring_write(ring, control);
2267}
2268
2269static void gfx_v7_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
2270{
2271	uint32_t dw2 = 0;
2272
2273	dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
2274	if (flags & AMDGPU_HAVE_CTX_SWITCH) {
2275		gfx_v7_0_ring_emit_vgt_flush(ring);
2276		/* set load_global_config & load_global_uconfig */
2277		dw2 |= 0x8001;
2278		/* set load_cs_sh_regs */
2279		dw2 |= 0x01000000;
2280		/* set load_per_context_state & load_gfx_sh_regs */
2281		dw2 |= 0x10002;
2282	}
2283
2284	amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
2285	amdgpu_ring_write(ring, dw2);
2286	amdgpu_ring_write(ring, 0);
2287}
2288
2289/**
2290 * gfx_v7_0_ring_test_ib - basic ring IB test
2291 *
2292 * @ring: amdgpu_ring structure holding ring information
 
2293 *
2294 * Allocate an IB and execute it on the gfx ring (CIK).
2295 * Provides a basic gfx ring test to verify that IBs are working.
2296 * Returns 0 on success, error on failure.
2297 */
2298static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
2299{
2300	struct amdgpu_device *adev = ring->adev;
2301	struct amdgpu_ib ib;
2302	struct dma_fence *f = NULL;
2303	uint32_t scratch;
2304	uint32_t tmp = 0;
2305	long r;
2306
2307	r = amdgpu_gfx_scratch_get(adev, &scratch);
2308	if (r) {
2309		DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r);
2310		return r;
2311	}
2312	WREG32(scratch, 0xCAFEDEAD);
2313	memset(&ib, 0, sizeof(ib));
2314	r = amdgpu_ib_get(adev, NULL, 256, &ib);
2315	if (r) {
2316		DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
2317		goto err1;
2318	}
2319	ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
2320	ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START));
2321	ib.ptr[2] = 0xDEADBEEF;
2322	ib.length_dw = 3;
2323
2324	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f);
2325	if (r)
2326		goto err2;
2327
2328	r = dma_fence_wait_timeout(f, false, timeout);
2329	if (r == 0) {
2330		DRM_ERROR("amdgpu: IB test timed out\n");
2331		r = -ETIMEDOUT;
2332		goto err2;
2333	} else if (r < 0) {
2334		DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
2335		goto err2;
2336	}
2337	tmp = RREG32(scratch);
2338	if (tmp == 0xDEADBEEF) {
2339		DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
2340		r = 0;
2341	} else {
2342		DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
2343			  scratch, tmp);
2344		r = -EINVAL;
2345	}
2346
2347err2:
2348	amdgpu_ib_free(adev, &ib, NULL);
2349	dma_fence_put(f);
2350err1:
2351	amdgpu_gfx_scratch_free(adev, scratch);
2352	return r;
2353}
2354
2355/*
2356 * CP.
2357 * On CIK, gfx and compute now have independant command processors.
2358 *
2359 * GFX
2360 * Gfx consists of a single ring and can process both gfx jobs and
2361 * compute jobs.  The gfx CP consists of three microengines (ME):
2362 * PFP - Pre-Fetch Parser
2363 * ME - Micro Engine
2364 * CE - Constant Engine
2365 * The PFP and ME make up what is considered the Drawing Engine (DE).
2366 * The CE is an asynchronous engine used for updating buffer desciptors
2367 * used by the DE so that they can be loaded into cache in parallel
2368 * while the DE is processing state update packets.
2369 *
2370 * Compute
2371 * The compute CP consists of two microengines (ME):
2372 * MEC1 - Compute MicroEngine 1
2373 * MEC2 - Compute MicroEngine 2
2374 * Each MEC supports 4 compute pipes and each pipe supports 8 queues.
2375 * The queues are exposed to userspace and are programmed directly
2376 * by the compute runtime.
2377 */
2378/**
2379 * gfx_v7_0_cp_gfx_enable - enable/disable the gfx CP MEs
2380 *
2381 * @adev: amdgpu_device pointer
2382 * @enable: enable or disable the MEs
2383 *
2384 * Halts or unhalts the gfx MEs.
2385 */
2386static void gfx_v7_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
2387{
2388	int i;
2389
2390	if (enable) {
2391		WREG32(mmCP_ME_CNTL, 0);
2392	} else {
2393		WREG32(mmCP_ME_CNTL, (CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK));
2394		for (i = 0; i < adev->gfx.num_gfx_rings; i++)
2395			adev->gfx.gfx_ring[i].ready = false;
2396	}
2397	udelay(50);
2398}
2399
2400/**
2401 * gfx_v7_0_cp_gfx_load_microcode - load the gfx CP ME ucode
2402 *
2403 * @adev: amdgpu_device pointer
2404 *
2405 * Loads the gfx PFP, ME, and CE ucode.
2406 * Returns 0 for success, -EINVAL if the ucode is not available.
2407 */
2408static int gfx_v7_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
2409{
2410	const struct gfx_firmware_header_v1_0 *pfp_hdr;
2411	const struct gfx_firmware_header_v1_0 *ce_hdr;
2412	const struct gfx_firmware_header_v1_0 *me_hdr;
2413	const __le32 *fw_data;
2414	unsigned i, fw_size;
2415
2416	if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
2417		return -EINVAL;
2418
2419	pfp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
2420	ce_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
2421	me_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
2422
2423	amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
2424	amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
2425	amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
2426	adev->gfx.pfp_fw_version = le32_to_cpu(pfp_hdr->header.ucode_version);
2427	adev->gfx.ce_fw_version = le32_to_cpu(ce_hdr->header.ucode_version);
2428	adev->gfx.me_fw_version = le32_to_cpu(me_hdr->header.ucode_version);
2429	adev->gfx.me_feature_version = le32_to_cpu(me_hdr->ucode_feature_version);
2430	adev->gfx.ce_feature_version = le32_to_cpu(ce_hdr->ucode_feature_version);
2431	adev->gfx.pfp_feature_version = le32_to_cpu(pfp_hdr->ucode_feature_version);
2432
2433	gfx_v7_0_cp_gfx_enable(adev, false);
2434
2435	/* PFP */
2436	fw_data = (const __le32 *)
2437		(adev->gfx.pfp_fw->data +
2438		 le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
2439	fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
2440	WREG32(mmCP_PFP_UCODE_ADDR, 0);
2441	for (i = 0; i < fw_size; i++)
2442		WREG32(mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
2443	WREG32(mmCP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version);
2444
2445	/* CE */
2446	fw_data = (const __le32 *)
2447		(adev->gfx.ce_fw->data +
2448		 le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
2449	fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
2450	WREG32(mmCP_CE_UCODE_ADDR, 0);
2451	for (i = 0; i < fw_size; i++)
2452		WREG32(mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
2453	WREG32(mmCP_CE_UCODE_ADDR, adev->gfx.ce_fw_version);
2454
2455	/* ME */
2456	fw_data = (const __le32 *)
2457		(adev->gfx.me_fw->data +
2458		 le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
2459	fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
2460	WREG32(mmCP_ME_RAM_WADDR, 0);
2461	for (i = 0; i < fw_size; i++)
2462		WREG32(mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++));
2463	WREG32(mmCP_ME_RAM_WADDR, adev->gfx.me_fw_version);
2464
2465	return 0;
2466}
2467
2468/**
2469 * gfx_v7_0_cp_gfx_start - start the gfx ring
2470 *
2471 * @adev: amdgpu_device pointer
2472 *
2473 * Enables the ring and loads the clear state context and other
2474 * packets required to init the ring.
2475 * Returns 0 for success, error for failure.
2476 */
2477static int gfx_v7_0_cp_gfx_start(struct amdgpu_device *adev)
2478{
2479	struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
2480	const struct cs_section_def *sect = NULL;
2481	const struct cs_extent_def *ext = NULL;
2482	int r, i;
2483
2484	/* init the CP */
2485	WREG32(mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1);
2486	WREG32(mmCP_ENDIAN_SWAP, 0);
2487	WREG32(mmCP_DEVICE_ID, 1);
2488
2489	gfx_v7_0_cp_gfx_enable(adev, true);
2490
2491	r = amdgpu_ring_alloc(ring, gfx_v7_0_get_csb_size(adev) + 8);
2492	if (r) {
2493		DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
2494		return r;
2495	}
2496
2497	/* init the CE partitions.  CE only used for gfx on CIK */
2498	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
2499	amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
2500	amdgpu_ring_write(ring, 0x8000);
2501	amdgpu_ring_write(ring, 0x8000);
2502
2503	/* clear state buffer */
2504	amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2505	amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
2506
2507	amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
2508	amdgpu_ring_write(ring, 0x80000000);
2509	amdgpu_ring_write(ring, 0x80000000);
2510
2511	for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
2512		for (ext = sect->section; ext->extent != NULL; ++ext) {
2513			if (sect->id == SECT_CONTEXT) {
2514				amdgpu_ring_write(ring,
2515						  PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
2516				amdgpu_ring_write(ring, ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
2517				for (i = 0; i < ext->reg_count; i++)
2518					amdgpu_ring_write(ring, ext->extent[i]);
2519			}
2520		}
2521	}
2522
2523	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
2524	amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
2525	switch (adev->asic_type) {
2526	case CHIP_BONAIRE:
2527		amdgpu_ring_write(ring, 0x16000012);
2528		amdgpu_ring_write(ring, 0x00000000);
2529		break;
2530	case CHIP_KAVERI:
2531		amdgpu_ring_write(ring, 0x00000000); /* XXX */
2532		amdgpu_ring_write(ring, 0x00000000);
2533		break;
2534	case CHIP_KABINI:
2535	case CHIP_MULLINS:
2536		amdgpu_ring_write(ring, 0x00000000); /* XXX */
2537		amdgpu_ring_write(ring, 0x00000000);
2538		break;
2539	case CHIP_HAWAII:
2540		amdgpu_ring_write(ring, 0x3a00161a);
2541		amdgpu_ring_write(ring, 0x0000002e);
2542		break;
2543	default:
2544		amdgpu_ring_write(ring, 0x00000000);
2545		amdgpu_ring_write(ring, 0x00000000);
2546		break;
2547	}
2548
2549	amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2550	amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
2551
2552	amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
2553	amdgpu_ring_write(ring, 0);
2554
2555	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
2556	amdgpu_ring_write(ring, 0x00000316);
2557	amdgpu_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
2558	amdgpu_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
2559
2560	amdgpu_ring_commit(ring);
2561
2562	return 0;
2563}
2564
2565/**
2566 * gfx_v7_0_cp_gfx_resume - setup the gfx ring buffer registers
2567 *
2568 * @adev: amdgpu_device pointer
2569 *
2570 * Program the location and size of the gfx ring buffer
2571 * and test it to make sure it's working.
2572 * Returns 0 for success, error for failure.
2573 */
2574static int gfx_v7_0_cp_gfx_resume(struct amdgpu_device *adev)
2575{
2576	struct amdgpu_ring *ring;
2577	u32 tmp;
2578	u32 rb_bufsz;
2579	u64 rb_addr, rptr_addr;
2580	int r;
2581
2582	WREG32(mmCP_SEM_WAIT_TIMER, 0x0);
2583	if (adev->asic_type != CHIP_HAWAII)
2584		WREG32(mmCP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
2585
2586	/* Set the write pointer delay */
2587	WREG32(mmCP_RB_WPTR_DELAY, 0);
2588
2589	/* set the RB to use vmid 0 */
2590	WREG32(mmCP_RB_VMID, 0);
2591
2592	WREG32(mmSCRATCH_ADDR, 0);
2593
2594	/* ring 0 - compute and gfx */
2595	/* Set ring buffer size */
2596	ring = &adev->gfx.gfx_ring[0];
2597	rb_bufsz = order_base_2(ring->ring_size / 8);
2598	tmp = (order_base_2(AMDGPU_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
2599#ifdef __BIG_ENDIAN
2600	tmp |= 2 << CP_RB0_CNTL__BUF_SWAP__SHIFT;
2601#endif
2602	WREG32(mmCP_RB0_CNTL, tmp);
2603
2604	/* Initialize the ring buffer's read and write pointers */
2605	WREG32(mmCP_RB0_CNTL, tmp | CP_RB0_CNTL__RB_RPTR_WR_ENA_MASK);
2606	ring->wptr = 0;
2607	WREG32(mmCP_RB0_WPTR, ring->wptr);
2608
2609	/* set the wb address wether it's enabled or not */
2610	rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
2611	WREG32(mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
2612	WREG32(mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
2613
2614	/* scratch register shadowing is no longer supported */
2615	WREG32(mmSCRATCH_UMSK, 0);
2616
2617	mdelay(1);
2618	WREG32(mmCP_RB0_CNTL, tmp);
2619
2620	rb_addr = ring->gpu_addr >> 8;
2621	WREG32(mmCP_RB0_BASE, rb_addr);
2622	WREG32(mmCP_RB0_BASE_HI, upper_32_bits(rb_addr));
2623
2624	/* start the ring */
2625	gfx_v7_0_cp_gfx_start(adev);
2626	ring->ready = true;
2627	r = amdgpu_ring_test_ring(ring);
2628	if (r) {
2629		ring->ready = false;
2630		return r;
2631	}
2632
2633	return 0;
2634}
2635
2636static u32 gfx_v7_0_ring_get_rptr(struct amdgpu_ring *ring)
2637{
2638	return ring->adev->wb.wb[ring->rptr_offs];
2639}
2640
2641static u32 gfx_v7_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
2642{
2643	struct amdgpu_device *adev = ring->adev;
2644
2645	return RREG32(mmCP_RB0_WPTR);
2646}
2647
2648static void gfx_v7_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
2649{
2650	struct amdgpu_device *adev = ring->adev;
2651
2652	WREG32(mmCP_RB0_WPTR, ring->wptr);
2653	(void)RREG32(mmCP_RB0_WPTR);
2654}
2655
2656static u32 gfx_v7_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
2657{
2658	/* XXX check if swapping is necessary on BE */
2659	return ring->adev->wb.wb[ring->wptr_offs];
2660}
2661
2662static void gfx_v7_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
2663{
2664	struct amdgpu_device *adev = ring->adev;
2665
2666	/* XXX check if swapping is necessary on BE */
2667	adev->wb.wb[ring->wptr_offs] = ring->wptr;
2668	WDOORBELL32(ring->doorbell_index, ring->wptr);
2669}
2670
2671/**
2672 * gfx_v7_0_cp_compute_enable - enable/disable the compute CP MEs
2673 *
2674 * @adev: amdgpu_device pointer
2675 * @enable: enable or disable the MEs
2676 *
2677 * Halts or unhalts the compute MEs.
2678 */
2679static void gfx_v7_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
2680{
2681	int i;
2682
2683	if (enable) {
2684		WREG32(mmCP_MEC_CNTL, 0);
2685	} else {
2686		WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
2687		for (i = 0; i < adev->gfx.num_compute_rings; i++)
2688			adev->gfx.compute_ring[i].ready = false;
2689	}
2690	udelay(50);
2691}
2692
2693/**
2694 * gfx_v7_0_cp_compute_load_microcode - load the compute CP ME ucode
2695 *
2696 * @adev: amdgpu_device pointer
2697 *
2698 * Loads the compute MEC1&2 ucode.
2699 * Returns 0 for success, -EINVAL if the ucode is not available.
2700 */
2701static int gfx_v7_0_cp_compute_load_microcode(struct amdgpu_device *adev)
2702{
2703	const struct gfx_firmware_header_v1_0 *mec_hdr;
2704	const __le32 *fw_data;
2705	unsigned i, fw_size;
2706
2707	if (!adev->gfx.mec_fw)
2708		return -EINVAL;
2709
2710	mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
2711	amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
2712	adev->gfx.mec_fw_version = le32_to_cpu(mec_hdr->header.ucode_version);
2713	adev->gfx.mec_feature_version = le32_to_cpu(
2714					mec_hdr->ucode_feature_version);
2715
2716	gfx_v7_0_cp_compute_enable(adev, false);
2717
2718	/* MEC1 */
2719	fw_data = (const __le32 *)
2720		(adev->gfx.mec_fw->data +
2721		 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
2722	fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4;
2723	WREG32(mmCP_MEC_ME1_UCODE_ADDR, 0);
2724	for (i = 0; i < fw_size; i++)
2725		WREG32(mmCP_MEC_ME1_UCODE_DATA, le32_to_cpup(fw_data++));
2726	WREG32(mmCP_MEC_ME1_UCODE_ADDR, 0);
2727
2728	if (adev->asic_type == CHIP_KAVERI) {
2729		const struct gfx_firmware_header_v1_0 *mec2_hdr;
2730
2731		if (!adev->gfx.mec2_fw)
2732			return -EINVAL;
2733
2734		mec2_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
2735		amdgpu_ucode_print_gfx_hdr(&mec2_hdr->header);
2736		adev->gfx.mec2_fw_version = le32_to_cpu(mec2_hdr->header.ucode_version);
2737		adev->gfx.mec2_feature_version = le32_to_cpu(
2738				mec2_hdr->ucode_feature_version);
2739
2740		/* MEC2 */
2741		fw_data = (const __le32 *)
2742			(adev->gfx.mec2_fw->data +
2743			 le32_to_cpu(mec2_hdr->header.ucode_array_offset_bytes));
2744		fw_size = le32_to_cpu(mec2_hdr->header.ucode_size_bytes) / 4;
2745		WREG32(mmCP_MEC_ME2_UCODE_ADDR, 0);
2746		for (i = 0; i < fw_size; i++)
2747			WREG32(mmCP_MEC_ME2_UCODE_DATA, le32_to_cpup(fw_data++));
2748		WREG32(mmCP_MEC_ME2_UCODE_ADDR, 0);
2749	}
2750
2751	return 0;
2752}
2753
2754/**
2755 * gfx_v7_0_cp_compute_fini - stop the compute queues
2756 *
2757 * @adev: amdgpu_device pointer
2758 *
2759 * Stop the compute queues and tear down the driver queue
2760 * info.
2761 */
2762static void gfx_v7_0_cp_compute_fini(struct amdgpu_device *adev)
2763{
2764	int i, r;
2765
2766	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2767		struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
2768
2769		if (ring->mqd_obj) {
2770			r = amdgpu_bo_reserve(ring->mqd_obj, false);
2771			if (unlikely(r != 0))
2772				dev_warn(adev->dev, "(%d) reserve MQD bo failed\n", r);
2773
2774			amdgpu_bo_unpin(ring->mqd_obj);
2775			amdgpu_bo_unreserve(ring->mqd_obj);
2776
2777			amdgpu_bo_unref(&ring->mqd_obj);
2778			ring->mqd_obj = NULL;
2779		}
2780	}
2781}
2782
2783static void gfx_v7_0_mec_fini(struct amdgpu_device *adev)
2784{
2785	int r;
2786
2787	if (adev->gfx.mec.hpd_eop_obj) {
2788		r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false);
2789		if (unlikely(r != 0))
2790			dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r);
2791		amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj);
2792		amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
2793
2794		amdgpu_bo_unref(&adev->gfx.mec.hpd_eop_obj);
2795		adev->gfx.mec.hpd_eop_obj = NULL;
2796	}
2797}
2798
2799#define MEC_HPD_SIZE 2048
2800
2801static int gfx_v7_0_mec_init(struct amdgpu_device *adev)
2802{
2803	int r;
2804	u32 *hpd;
 
2805
2806	/*
2807	 * KV:    2 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 64 Queues total
2808	 * CI/KB: 1 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 32 Queues total
2809	 * Nonetheless, we assign only 1 pipe because all other pipes will
2810	 * be handled by KFD
2811	 */
2812	adev->gfx.mec.num_mec = 1;
2813	adev->gfx.mec.num_pipe = 1;
2814	adev->gfx.mec.num_queue = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe * 8;
2815
2816	if (adev->gfx.mec.hpd_eop_obj == NULL) {
2817		r = amdgpu_bo_create(adev,
2818				     adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2,
2819				     PAGE_SIZE, true,
2820				     AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
2821				     &adev->gfx.mec.hpd_eop_obj);
2822		if (r) {
2823			dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
2824			return r;
2825		}
2826	}
2827
2828	r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false);
2829	if (unlikely(r != 0)) {
2830		gfx_v7_0_mec_fini(adev);
2831		return r;
2832	}
2833	r = amdgpu_bo_pin(adev->gfx.mec.hpd_eop_obj, AMDGPU_GEM_DOMAIN_GTT,
2834			  &adev->gfx.mec.hpd_eop_gpu_addr);
2835	if (r) {
2836		dev_warn(adev->dev, "(%d) pin HDP EOP bo failed\n", r);
2837		gfx_v7_0_mec_fini(adev);
2838		return r;
2839	}
2840	r = amdgpu_bo_kmap(adev->gfx.mec.hpd_eop_obj, (void **)&hpd);
2841	if (r) {
2842		dev_warn(adev->dev, "(%d) map HDP EOP bo failed\n", r);
2843		gfx_v7_0_mec_fini(adev);
2844		return r;
2845	}
2846
2847	/* clear memory.  Not sure if this is required or not */
2848	memset(hpd, 0, adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2);
2849
2850	amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
2851	amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
2852
2853	return 0;
2854}
2855
2856struct hqd_registers
 
2857{
2858	u32 cp_mqd_base_addr;
2859	u32 cp_mqd_base_addr_hi;
2860	u32 cp_hqd_active;
2861	u32 cp_hqd_vmid;
2862	u32 cp_hqd_persistent_state;
2863	u32 cp_hqd_pipe_priority;
2864	u32 cp_hqd_queue_priority;
2865	u32 cp_hqd_quantum;
2866	u32 cp_hqd_pq_base;
2867	u32 cp_hqd_pq_base_hi;
2868	u32 cp_hqd_pq_rptr;
2869	u32 cp_hqd_pq_rptr_report_addr;
2870	u32 cp_hqd_pq_rptr_report_addr_hi;
2871	u32 cp_hqd_pq_wptr_poll_addr;
2872	u32 cp_hqd_pq_wptr_poll_addr_hi;
2873	u32 cp_hqd_pq_doorbell_control;
2874	u32 cp_hqd_pq_wptr;
2875	u32 cp_hqd_pq_control;
2876	u32 cp_hqd_ib_base_addr;
2877	u32 cp_hqd_ib_base_addr_hi;
2878	u32 cp_hqd_ib_rptr;
2879	u32 cp_hqd_ib_control;
2880	u32 cp_hqd_iq_timer;
2881	u32 cp_hqd_iq_rptr;
2882	u32 cp_hqd_dequeue_request;
2883	u32 cp_hqd_dma_offload;
2884	u32 cp_hqd_sema_cmd;
2885	u32 cp_hqd_msg_type;
2886	u32 cp_hqd_atomic0_preop_lo;
2887	u32 cp_hqd_atomic0_preop_hi;
2888	u32 cp_hqd_atomic1_preop_lo;
2889	u32 cp_hqd_atomic1_preop_hi;
2890	u32 cp_hqd_hq_scheduler0;
2891	u32 cp_hqd_hq_scheduler1;
2892	u32 cp_mqd_control;
2893};
2894
2895struct bonaire_mqd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2896{
2897	u32 header;
2898	u32 dispatch_initiator;
2899	u32 dimensions[3];
2900	u32 start_idx[3];
2901	u32 num_threads[3];
2902	u32 pipeline_stat_enable;
2903	u32 perf_counter_enable;
2904	u32 pgm[2];
2905	u32 tba[2];
2906	u32 tma[2];
2907	u32 pgm_rsrc[2];
2908	u32 vmid;
2909	u32 resource_limits;
2910	u32 static_thread_mgmt01[2];
2911	u32 tmp_ring_size;
2912	u32 static_thread_mgmt23[2];
2913	u32 restart[3];
2914	u32 thread_trace_enable;
2915	u32 reserved1;
2916	u32 user_data[16];
2917	u32 vgtcs_invoke_count[2];
2918	struct hqd_registers queue_state;
2919	u32 dequeue_cntr;
2920	u32 interrupt_queue[64];
2921};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2922
2923/**
2924 * gfx_v7_0_cp_compute_resume - setup the compute queue registers
2925 *
2926 * @adev: amdgpu_device pointer
2927 *
2928 * Program the compute queues and test them to make sure they
2929 * are working.
2930 * Returns 0 for success, error for failure.
2931 */
2932static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev)
2933{
2934	int r, i, j;
2935	u32 tmp;
2936	bool use_doorbell = true;
2937	u64 hqd_gpu_addr;
2938	u64 mqd_gpu_addr;
2939	u64 eop_gpu_addr;
2940	u64 wb_gpu_addr;
2941	u32 *buf;
2942	struct bonaire_mqd *mqd;
2943	struct amdgpu_ring *ring;
2944
2945	/* fix up chicken bits */
2946	tmp = RREG32(mmCP_CPF_DEBUG);
2947	tmp |= (1 << 23);
2948	WREG32(mmCP_CPF_DEBUG, tmp);
2949
2950	/* init the pipes */
2951	mutex_lock(&adev->srbm_mutex);
2952	for (i = 0; i < (adev->gfx.mec.num_pipe * adev->gfx.mec.num_mec); i++) {
2953		int me = (i < 4) ? 1 : 2;
2954		int pipe = (i < 4) ? i : (i - 4);
2955
2956		eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + (i * MEC_HPD_SIZE * 2);
2957
2958		cik_srbm_select(adev, me, pipe, 0, 0);
2959
2960		/* write the EOP addr */
2961		WREG32(mmCP_HPD_EOP_BASE_ADDR, eop_gpu_addr >> 8);
2962		WREG32(mmCP_HPD_EOP_BASE_ADDR_HI, upper_32_bits(eop_gpu_addr) >> 8);
2963
2964		/* set the VMID assigned */
2965		WREG32(mmCP_HPD_EOP_VMID, 0);
2966
2967		/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
2968		tmp = RREG32(mmCP_HPD_EOP_CONTROL);
2969		tmp &= ~CP_HPD_EOP_CONTROL__EOP_SIZE_MASK;
2970		tmp |= order_base_2(MEC_HPD_SIZE / 8);
2971		WREG32(mmCP_HPD_EOP_CONTROL, tmp);
2972	}
2973	cik_srbm_select(adev, 0, 0, 0, 0);
2974	mutex_unlock(&adev->srbm_mutex);
2975
2976	/* init the queues.  Just two for now. */
2977	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2978		ring = &adev->gfx.compute_ring[i];
2979
2980		if (ring->mqd_obj == NULL) {
2981			r = amdgpu_bo_create(adev,
2982					     sizeof(struct bonaire_mqd),
2983					     PAGE_SIZE, true,
2984					     AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
2985					     &ring->mqd_obj);
2986			if (r) {
2987				dev_warn(adev->dev, "(%d) create MQD bo failed\n", r);
2988				return r;
2989			}
2990		}
2991
2992		r = amdgpu_bo_reserve(ring->mqd_obj, false);
2993		if (unlikely(r != 0)) {
2994			gfx_v7_0_cp_compute_fini(adev);
2995			return r;
2996		}
2997		r = amdgpu_bo_pin(ring->mqd_obj, AMDGPU_GEM_DOMAIN_GTT,
2998				  &mqd_gpu_addr);
2999		if (r) {
3000			dev_warn(adev->dev, "(%d) pin MQD bo failed\n", r);
3001			gfx_v7_0_cp_compute_fini(adev);
3002			return r;
3003		}
3004		r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&buf);
3005		if (r) {
3006			dev_warn(adev->dev, "(%d) map MQD bo failed\n", r);
3007			gfx_v7_0_cp_compute_fini(adev);
3008			return r;
3009		}
3010
3011		/* init the mqd struct */
3012		memset(buf, 0, sizeof(struct bonaire_mqd));
3013
3014		mqd = (struct bonaire_mqd *)buf;
3015		mqd->header = 0xC0310800;
3016		mqd->static_thread_mgmt01[0] = 0xffffffff;
3017		mqd->static_thread_mgmt01[1] = 0xffffffff;
3018		mqd->static_thread_mgmt23[0] = 0xffffffff;
3019		mqd->static_thread_mgmt23[1] = 0xffffffff;
3020
3021		mutex_lock(&adev->srbm_mutex);
3022		cik_srbm_select(adev, ring->me,
3023				ring->pipe,
3024				ring->queue, 0);
3025
3026		/* disable wptr polling */
3027		tmp = RREG32(mmCP_PQ_WPTR_POLL_CNTL);
3028		tmp &= ~CP_PQ_WPTR_POLL_CNTL__EN_MASK;
3029		WREG32(mmCP_PQ_WPTR_POLL_CNTL, tmp);
3030
3031		/* enable doorbell? */
3032		mqd->queue_state.cp_hqd_pq_doorbell_control =
3033			RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
3034		if (use_doorbell)
3035			mqd->queue_state.cp_hqd_pq_doorbell_control |= CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK;
3036		else
3037			mqd->queue_state.cp_hqd_pq_doorbell_control &= ~CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK;
3038		WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL,
3039		       mqd->queue_state.cp_hqd_pq_doorbell_control);
3040
3041		/* disable the queue if it's active */
3042		mqd->queue_state.cp_hqd_dequeue_request = 0;
3043		mqd->queue_state.cp_hqd_pq_rptr = 0;
3044		mqd->queue_state.cp_hqd_pq_wptr= 0;
3045		if (RREG32(mmCP_HQD_ACTIVE) & 1) {
3046			WREG32(mmCP_HQD_DEQUEUE_REQUEST, 1);
3047			for (j = 0; j < adev->usec_timeout; j++) {
3048				if (!(RREG32(mmCP_HQD_ACTIVE) & 1))
3049					break;
3050				udelay(1);
3051			}
3052			WREG32(mmCP_HQD_DEQUEUE_REQUEST, mqd->queue_state.cp_hqd_dequeue_request);
3053			WREG32(mmCP_HQD_PQ_RPTR, mqd->queue_state.cp_hqd_pq_rptr);
3054			WREG32(mmCP_HQD_PQ_WPTR, mqd->queue_state.cp_hqd_pq_wptr);
3055		}
3056
3057		/* set the pointer to the MQD */
3058		mqd->queue_state.cp_mqd_base_addr = mqd_gpu_addr & 0xfffffffc;
3059		mqd->queue_state.cp_mqd_base_addr_hi = upper_32_bits(mqd_gpu_addr);
3060		WREG32(mmCP_MQD_BASE_ADDR, mqd->queue_state.cp_mqd_base_addr);
3061		WREG32(mmCP_MQD_BASE_ADDR_HI, mqd->queue_state.cp_mqd_base_addr_hi);
3062		/* set MQD vmid to 0 */
3063		mqd->queue_state.cp_mqd_control = RREG32(mmCP_MQD_CONTROL);
3064		mqd->queue_state.cp_mqd_control &= ~CP_MQD_CONTROL__VMID_MASK;
3065		WREG32(mmCP_MQD_CONTROL, mqd->queue_state.cp_mqd_control);
3066
3067		/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
3068		hqd_gpu_addr = ring->gpu_addr >> 8;
3069		mqd->queue_state.cp_hqd_pq_base = hqd_gpu_addr;
3070		mqd->queue_state.cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
3071		WREG32(mmCP_HQD_PQ_BASE, mqd->queue_state.cp_hqd_pq_base);
3072		WREG32(mmCP_HQD_PQ_BASE_HI, mqd->queue_state.cp_hqd_pq_base_hi);
3073
3074		/* set up the HQD, this is similar to CP_RB0_CNTL */
3075		mqd->queue_state.cp_hqd_pq_control = RREG32(mmCP_HQD_PQ_CONTROL);
3076		mqd->queue_state.cp_hqd_pq_control &=
3077			~(CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK |
3078					CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE_MASK);
3079
3080		mqd->queue_state.cp_hqd_pq_control |=
3081			order_base_2(ring->ring_size / 8);
3082		mqd->queue_state.cp_hqd_pq_control |=
3083			(order_base_2(AMDGPU_GPU_PAGE_SIZE/8) << 8);
3084#ifdef __BIG_ENDIAN
3085		mqd->queue_state.cp_hqd_pq_control |=
3086			2 << CP_HQD_PQ_CONTROL__ENDIAN_SWAP__SHIFT;
3087#endif
3088		mqd->queue_state.cp_hqd_pq_control &=
3089			~(CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK |
3090				CP_HQD_PQ_CONTROL__ROQ_PQ_IB_FLIP_MASK |
3091				CP_HQD_PQ_CONTROL__PQ_VOLATILE_MASK);
3092		mqd->queue_state.cp_hqd_pq_control |=
3093			CP_HQD_PQ_CONTROL__PRIV_STATE_MASK |
3094			CP_HQD_PQ_CONTROL__KMD_QUEUE_MASK; /* assuming kernel queue control */
3095		WREG32(mmCP_HQD_PQ_CONTROL, mqd->queue_state.cp_hqd_pq_control);
3096
3097		/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
3098		wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
3099		mqd->queue_state.cp_hqd_pq_wptr_poll_addr = wb_gpu_addr & 0xfffffffc;
3100		mqd->queue_state.cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
3101		WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR, mqd->queue_state.cp_hqd_pq_wptr_poll_addr);
3102		WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
3103		       mqd->queue_state.cp_hqd_pq_wptr_poll_addr_hi);
3104
3105		/* set the wb address wether it's enabled or not */
3106		wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
3107		mqd->queue_state.cp_hqd_pq_rptr_report_addr = wb_gpu_addr & 0xfffffffc;
3108		mqd->queue_state.cp_hqd_pq_rptr_report_addr_hi =
3109			upper_32_bits(wb_gpu_addr) & 0xffff;
3110		WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR,
3111		       mqd->queue_state.cp_hqd_pq_rptr_report_addr);
3112		WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
3113		       mqd->queue_state.cp_hqd_pq_rptr_report_addr_hi);
3114
3115		/* enable the doorbell if requested */
3116		if (use_doorbell) {
3117			mqd->queue_state.cp_hqd_pq_doorbell_control =
3118				RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
3119			mqd->queue_state.cp_hqd_pq_doorbell_control &=
3120				~CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET_MASK;
3121			mqd->queue_state.cp_hqd_pq_doorbell_control |=
3122				(ring->doorbell_index <<
3123				 CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT);
3124			mqd->queue_state.cp_hqd_pq_doorbell_control |=
3125				CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK;
3126			mqd->queue_state.cp_hqd_pq_doorbell_control &=
3127				~(CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_SOURCE_MASK |
3128				CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_HIT_MASK);
3129
3130		} else {
3131			mqd->queue_state.cp_hqd_pq_doorbell_control = 0;
3132		}
3133		WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL,
3134		       mqd->queue_state.cp_hqd_pq_doorbell_control);
3135
3136		/* read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3137		ring->wptr = 0;
3138		mqd->queue_state.cp_hqd_pq_wptr = ring->wptr;
3139		WREG32(mmCP_HQD_PQ_WPTR, mqd->queue_state.cp_hqd_pq_wptr);
3140		mqd->queue_state.cp_hqd_pq_rptr = RREG32(mmCP_HQD_PQ_RPTR);
3141
3142		/* set the vmid for the queue */
3143		mqd->queue_state.cp_hqd_vmid = 0;
3144		WREG32(mmCP_HQD_VMID, mqd->queue_state.cp_hqd_vmid);
3145
3146		/* activate the queue */
3147		mqd->queue_state.cp_hqd_active = 1;
3148		WREG32(mmCP_HQD_ACTIVE, mqd->queue_state.cp_hqd_active);
3149
3150		cik_srbm_select(adev, 0, 0, 0, 0);
3151		mutex_unlock(&adev->srbm_mutex);
3152
3153		amdgpu_bo_kunmap(ring->mqd_obj);
3154		amdgpu_bo_unreserve(ring->mqd_obj);
3155
3156		ring->ready = true;
3157	}
3158
3159	gfx_v7_0_cp_compute_enable(adev, true);
3160
3161	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3162		ring = &adev->gfx.compute_ring[i];
3163
3164		r = amdgpu_ring_test_ring(ring);
3165		if (r)
3166			ring->ready = false;
3167	}
3168
3169	return 0;
3170}
3171
3172static void gfx_v7_0_cp_enable(struct amdgpu_device *adev, bool enable)
3173{
3174	gfx_v7_0_cp_gfx_enable(adev, enable);
3175	gfx_v7_0_cp_compute_enable(adev, enable);
3176}
3177
3178static int gfx_v7_0_cp_load_microcode(struct amdgpu_device *adev)
3179{
3180	int r;
3181
3182	r = gfx_v7_0_cp_gfx_load_microcode(adev);
3183	if (r)
3184		return r;
3185	r = gfx_v7_0_cp_compute_load_microcode(adev);
3186	if (r)
3187		return r;
3188
3189	return 0;
3190}
3191
3192static void gfx_v7_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
3193					       bool enable)
3194{
3195	u32 tmp = RREG32(mmCP_INT_CNTL_RING0);
3196
3197	if (enable)
3198		tmp |= (CP_INT_CNTL_RING0__CNTX_BUSY_INT_ENABLE_MASK |
3199				CP_INT_CNTL_RING0__CNTX_EMPTY_INT_ENABLE_MASK);
3200	else
3201		tmp &= ~(CP_INT_CNTL_RING0__CNTX_BUSY_INT_ENABLE_MASK |
3202				CP_INT_CNTL_RING0__CNTX_EMPTY_INT_ENABLE_MASK);
3203	WREG32(mmCP_INT_CNTL_RING0, tmp);
3204}
3205
3206static int gfx_v7_0_cp_resume(struct amdgpu_device *adev)
3207{
3208	int r;
3209
3210	gfx_v7_0_enable_gui_idle_interrupt(adev, false);
3211
3212	r = gfx_v7_0_cp_load_microcode(adev);
3213	if (r)
3214		return r;
3215
3216	r = gfx_v7_0_cp_gfx_resume(adev);
3217	if (r)
3218		return r;
3219	r = gfx_v7_0_cp_compute_resume(adev);
3220	if (r)
3221		return r;
3222
3223	gfx_v7_0_enable_gui_idle_interrupt(adev, true);
3224
3225	return 0;
3226}
3227
3228/**
3229 * gfx_v7_0_ring_emit_vm_flush - cik vm flush using the CP
3230 *
3231 * @ring: the ring to emmit the commands to
3232 *
3233 * Sync the command pipeline with the PFP. E.g. wait for everything
3234 * to be completed.
3235 */
3236static void gfx_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
3237{
3238	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
3239	uint32_t seq = ring->fence_drv.sync_seq;
3240	uint64_t addr = ring->fence_drv.gpu_addr;
3241
3242	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
3243	amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
3244				 WAIT_REG_MEM_FUNCTION(3) | /* equal */
3245				 WAIT_REG_MEM_ENGINE(usepfp)));   /* pfp or me */
3246	amdgpu_ring_write(ring, addr & 0xfffffffc);
3247	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
3248	amdgpu_ring_write(ring, seq);
3249	amdgpu_ring_write(ring, 0xffffffff);
3250	amdgpu_ring_write(ring, 4); /* poll interval */
3251
3252	if (usepfp) {
3253		/* synce CE with ME to prevent CE fetch CEIB before context switch done */
3254		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3255		amdgpu_ring_write(ring, 0);
3256		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3257		amdgpu_ring_write(ring, 0);
3258	}
3259}
3260
3261/*
3262 * vm
3263 * VMID 0 is the physical GPU addresses as used by the kernel.
3264 * VMIDs 1-15 are used for userspace clients and are handled
3265 * by the amdgpu vm/hsa code.
3266 */
3267/**
3268 * gfx_v7_0_ring_emit_vm_flush - cik vm flush using the CP
3269 *
3270 * @adev: amdgpu_device pointer
 
 
3271 *
3272 * Update the page table base and flush the VM TLB
3273 * using the CP (CIK).
3274 */
3275static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
3276					unsigned vm_id, uint64_t pd_addr)
3277{
3278	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
3279
3280	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3281	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
3282				 WRITE_DATA_DST_SEL(0)));
3283	if (vm_id < 8) {
3284		amdgpu_ring_write(ring,
3285				  (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
3286	} else {
3287		amdgpu_ring_write(ring,
3288				  (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8));
3289	}
3290	amdgpu_ring_write(ring, 0);
3291	amdgpu_ring_write(ring, pd_addr >> 12);
3292
3293	/* bits 0-15 are the VM contexts0-15 */
3294	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3295	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
3296				 WRITE_DATA_DST_SEL(0)));
3297	amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
3298	amdgpu_ring_write(ring, 0);
3299	amdgpu_ring_write(ring, 1 << vm_id);
3300
3301	/* wait for the invalidate to complete */
3302	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
3303	amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */
3304				 WAIT_REG_MEM_FUNCTION(0) |  /* always */
3305				 WAIT_REG_MEM_ENGINE(0))); /* me */
3306	amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
3307	amdgpu_ring_write(ring, 0);
3308	amdgpu_ring_write(ring, 0); /* ref */
3309	amdgpu_ring_write(ring, 0); /* mask */
3310	amdgpu_ring_write(ring, 0x20); /* poll interval */
3311
3312	/* compute doesn't have PFP */
3313	if (usepfp) {
3314		/* sync PFP to ME, otherwise we might get invalid PFP reads */
3315		amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
3316		amdgpu_ring_write(ring, 0x0);
3317
3318		/* synce CE with ME to prevent CE fetch CEIB before context switch done */
3319		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3320		amdgpu_ring_write(ring, 0);
3321		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3322		amdgpu_ring_write(ring, 0);
3323	}
3324}
3325
 
 
 
 
 
 
 
 
 
 
 
 
 
3326/*
3327 * RLC
3328 * The RLC is a multi-purpose microengine that handles a
3329 * variety of functions.
3330 */
3331static void gfx_v7_0_rlc_fini(struct amdgpu_device *adev)
3332{
3333	int r;
3334
3335	/* save restore block */
3336	if (adev->gfx.rlc.save_restore_obj) {
3337		r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, false);
3338		if (unlikely(r != 0))
3339			dev_warn(adev->dev, "(%d) reserve RLC sr bo failed\n", r);
3340		amdgpu_bo_unpin(adev->gfx.rlc.save_restore_obj);
3341		amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
3342
3343		amdgpu_bo_unref(&adev->gfx.rlc.save_restore_obj);
3344		adev->gfx.rlc.save_restore_obj = NULL;
3345	}
3346
3347	/* clear state block */
3348	if (adev->gfx.rlc.clear_state_obj) {
3349		r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
3350		if (unlikely(r != 0))
3351			dev_warn(adev->dev, "(%d) reserve RLC c bo failed\n", r);
3352		amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
3353		amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
3354
3355		amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
3356		adev->gfx.rlc.clear_state_obj = NULL;
3357	}
3358
3359	/* clear state block */
3360	if (adev->gfx.rlc.cp_table_obj) {
3361		r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false);
3362		if (unlikely(r != 0))
3363			dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
3364		amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj);
3365		amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
3366
3367		amdgpu_bo_unref(&adev->gfx.rlc.cp_table_obj);
3368		adev->gfx.rlc.cp_table_obj = NULL;
3369	}
3370}
3371
3372static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
3373{
3374	const u32 *src_ptr;
3375	volatile u32 *dst_ptr;
3376	u32 dws, i;
3377	const struct cs_section_def *cs_data;
3378	int r;
3379
3380	/* allocate rlc buffers */
3381	if (adev->flags & AMD_IS_APU) {
3382		if (adev->asic_type == CHIP_KAVERI) {
3383			adev->gfx.rlc.reg_list = spectre_rlc_save_restore_register_list;
3384			adev->gfx.rlc.reg_list_size =
3385				(u32)ARRAY_SIZE(spectre_rlc_save_restore_register_list);
3386		} else {
3387			adev->gfx.rlc.reg_list = kalindi_rlc_save_restore_register_list;
3388			adev->gfx.rlc.reg_list_size =
3389				(u32)ARRAY_SIZE(kalindi_rlc_save_restore_register_list);
3390		}
3391	}
3392	adev->gfx.rlc.cs_data = ci_cs_data;
3393	adev->gfx.rlc.cp_table_size = ALIGN(CP_ME_TABLE_SIZE * 5 * 4, 2048); /* CP JT */
3394	adev->gfx.rlc.cp_table_size += 64 * 1024; /* GDS */
3395
3396	src_ptr = adev->gfx.rlc.reg_list;
3397	dws = adev->gfx.rlc.reg_list_size;
3398	dws += (5 * 16) + 48 + 48 + 64;
3399
3400	cs_data = adev->gfx.rlc.cs_data;
3401
3402	if (src_ptr) {
3403		/* save restore block */
3404		if (adev->gfx.rlc.save_restore_obj == NULL) {
3405			r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
3406					     AMDGPU_GEM_DOMAIN_VRAM,
3407					     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
3408					     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
3409					     NULL, NULL,
3410					     &adev->gfx.rlc.save_restore_obj);
3411			if (r) {
3412				dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r);
3413				return r;
3414			}
3415		}
3416
3417		r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, false);
3418		if (unlikely(r != 0)) {
3419			gfx_v7_0_rlc_fini(adev);
3420			return r;
3421		}
3422		r = amdgpu_bo_pin(adev->gfx.rlc.save_restore_obj, AMDGPU_GEM_DOMAIN_VRAM,
3423				  &adev->gfx.rlc.save_restore_gpu_addr);
3424		if (r) {
3425			amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
3426			dev_warn(adev->dev, "(%d) pin RLC sr bo failed\n", r);
3427			gfx_v7_0_rlc_fini(adev);
3428			return r;
3429		}
3430
3431		r = amdgpu_bo_kmap(adev->gfx.rlc.save_restore_obj, (void **)&adev->gfx.rlc.sr_ptr);
3432		if (r) {
3433			dev_warn(adev->dev, "(%d) map RLC sr bo failed\n", r);
3434			gfx_v7_0_rlc_fini(adev);
3435			return r;
3436		}
3437		/* write the sr buffer */
3438		dst_ptr = adev->gfx.rlc.sr_ptr;
3439		for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
3440			dst_ptr[i] = cpu_to_le32(src_ptr[i]);
3441		amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj);
3442		amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
3443	}
3444
3445	if (cs_data) {
3446		/* clear state block */
3447		adev->gfx.rlc.clear_state_size = dws = gfx_v7_0_get_csb_size(adev);
3448
3449		if (adev->gfx.rlc.clear_state_obj == NULL) {
3450			r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
3451					     AMDGPU_GEM_DOMAIN_VRAM,
3452					     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
3453					     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
3454					     NULL, NULL,
3455					     &adev->gfx.rlc.clear_state_obj);
3456			if (r) {
3457				dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
3458				gfx_v7_0_rlc_fini(adev);
3459				return r;
3460			}
3461		}
3462		r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
3463		if (unlikely(r != 0)) {
3464			gfx_v7_0_rlc_fini(adev);
3465			return r;
3466		}
3467		r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, AMDGPU_GEM_DOMAIN_VRAM,
3468				  &adev->gfx.rlc.clear_state_gpu_addr);
3469		if (r) {
3470			amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
3471			dev_warn(adev->dev, "(%d) pin RLC c bo failed\n", r);
3472			gfx_v7_0_rlc_fini(adev);
3473			return r;
3474		}
3475
3476		r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, (void **)&adev->gfx.rlc.cs_ptr);
3477		if (r) {
3478			dev_warn(adev->dev, "(%d) map RLC c bo failed\n", r);
3479			gfx_v7_0_rlc_fini(adev);
3480			return r;
3481		}
3482		/* set up the cs buffer */
3483		dst_ptr = adev->gfx.rlc.cs_ptr;
3484		gfx_v7_0_get_csb_buffer(adev, dst_ptr);
3485		amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
3486		amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
3487	}
3488
3489	if (adev->gfx.rlc.cp_table_size) {
3490		if (adev->gfx.rlc.cp_table_obj == NULL) {
3491			r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true,
3492					     AMDGPU_GEM_DOMAIN_VRAM,
3493					     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
3494					     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
3495					     NULL, NULL,
3496					     &adev->gfx.rlc.cp_table_obj);
3497			if (r) {
3498				dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
3499				gfx_v7_0_rlc_fini(adev);
3500				return r;
3501			}
3502		}
3503
3504		r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false);
3505		if (unlikely(r != 0)) {
3506			dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
3507			gfx_v7_0_rlc_fini(adev);
3508			return r;
3509		}
3510		r = amdgpu_bo_pin(adev->gfx.rlc.cp_table_obj, AMDGPU_GEM_DOMAIN_VRAM,
3511				  &adev->gfx.rlc.cp_table_gpu_addr);
3512		if (r) {
3513			amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
3514			dev_warn(adev->dev, "(%d) pin RLC cp_table bo failed\n", r);
3515			gfx_v7_0_rlc_fini(adev);
3516			return r;
3517		}
3518		r = amdgpu_bo_kmap(adev->gfx.rlc.cp_table_obj, (void **)&adev->gfx.rlc.cp_table_ptr);
3519		if (r) {
3520			dev_warn(adev->dev, "(%d) map RLC cp table bo failed\n", r);
3521			gfx_v7_0_rlc_fini(adev);
3522			return r;
3523		}
3524
3525		gfx_v7_0_init_cp_pg_table(adev);
3526
3527		amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
3528		amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
3529
3530	}
3531
 
 
 
 
3532	return 0;
3533}
3534
3535static void gfx_v7_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
3536{
3537	u32 tmp;
3538
3539	tmp = RREG32(mmRLC_LB_CNTL);
3540	if (enable)
3541		tmp |= RLC_LB_CNTL__LOAD_BALANCE_ENABLE_MASK;
3542	else
3543		tmp &= ~RLC_LB_CNTL__LOAD_BALANCE_ENABLE_MASK;
3544	WREG32(mmRLC_LB_CNTL, tmp);
3545}
3546
3547static void gfx_v7_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
3548{
3549	u32 i, j, k;
3550	u32 mask;
3551
3552	mutex_lock(&adev->grbm_idx_mutex);
3553	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
3554		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
3555			gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff);
3556			for (k = 0; k < adev->usec_timeout; k++) {
3557				if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY) == 0)
3558					break;
3559				udelay(1);
3560			}
3561		}
3562	}
3563	gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3564	mutex_unlock(&adev->grbm_idx_mutex);
3565
3566	mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
3567		RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
3568		RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
3569		RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
3570	for (k = 0; k < adev->usec_timeout; k++) {
3571		if ((RREG32(mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
3572			break;
3573		udelay(1);
3574	}
3575}
3576
3577static void gfx_v7_0_update_rlc(struct amdgpu_device *adev, u32 rlc)
3578{
3579	u32 tmp;
3580
3581	tmp = RREG32(mmRLC_CNTL);
3582	if (tmp != rlc)
3583		WREG32(mmRLC_CNTL, rlc);
3584}
3585
3586static u32 gfx_v7_0_halt_rlc(struct amdgpu_device *adev)
3587{
3588	u32 data, orig;
3589
3590	orig = data = RREG32(mmRLC_CNTL);
3591
3592	if (data & RLC_CNTL__RLC_ENABLE_F32_MASK) {
3593		u32 i;
3594
3595		data &= ~RLC_CNTL__RLC_ENABLE_F32_MASK;
3596		WREG32(mmRLC_CNTL, data);
3597
3598		for (i = 0; i < adev->usec_timeout; i++) {
3599			if ((RREG32(mmRLC_GPM_STAT) & RLC_GPM_STAT__RLC_BUSY_MASK) == 0)
3600				break;
3601			udelay(1);
3602		}
3603
3604		gfx_v7_0_wait_for_rlc_serdes(adev);
3605	}
3606
3607	return orig;
3608}
3609
3610static void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
 
 
 
 
 
3611{
3612	u32 tmp, i, mask;
3613
3614	tmp = 0x1 | (1 << 1);
3615	WREG32(mmRLC_GPR_REG2, tmp);
3616
3617	mask = RLC_GPM_STAT__GFX_POWER_STATUS_MASK |
3618		RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK;
3619	for (i = 0; i < adev->usec_timeout; i++) {
3620		if ((RREG32(mmRLC_GPM_STAT) & mask) == mask)
3621			break;
3622		udelay(1);
3623	}
3624
3625	for (i = 0; i < adev->usec_timeout; i++) {
3626		if ((RREG32(mmRLC_GPR_REG2) & 0x1) == 0)
3627			break;
3628		udelay(1);
3629	}
3630}
3631
3632static void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev)
3633{
3634	u32 tmp;
3635
3636	tmp = 0x1 | (0 << 1);
3637	WREG32(mmRLC_GPR_REG2, tmp);
3638}
3639
3640/**
3641 * gfx_v7_0_rlc_stop - stop the RLC ME
3642 *
3643 * @adev: amdgpu_device pointer
3644 *
3645 * Halt the RLC ME (MicroEngine) (CIK).
3646 */
3647static void gfx_v7_0_rlc_stop(struct amdgpu_device *adev)
3648{
3649	WREG32(mmRLC_CNTL, 0);
3650
3651	gfx_v7_0_enable_gui_idle_interrupt(adev, false);
3652
3653	gfx_v7_0_wait_for_rlc_serdes(adev);
3654}
3655
3656/**
3657 * gfx_v7_0_rlc_start - start the RLC ME
3658 *
3659 * @adev: amdgpu_device pointer
3660 *
3661 * Unhalt the RLC ME (MicroEngine) (CIK).
3662 */
3663static void gfx_v7_0_rlc_start(struct amdgpu_device *adev)
3664{
3665	WREG32(mmRLC_CNTL, RLC_CNTL__RLC_ENABLE_F32_MASK);
3666
3667	gfx_v7_0_enable_gui_idle_interrupt(adev, true);
3668
3669	udelay(50);
3670}
3671
3672static void gfx_v7_0_rlc_reset(struct amdgpu_device *adev)
3673{
3674	u32 tmp = RREG32(mmGRBM_SOFT_RESET);
3675
3676	tmp |= GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK;
3677	WREG32(mmGRBM_SOFT_RESET, tmp);
3678	udelay(50);
3679	tmp &= ~GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK;
3680	WREG32(mmGRBM_SOFT_RESET, tmp);
3681	udelay(50);
3682}
3683
3684/**
3685 * gfx_v7_0_rlc_resume - setup the RLC hw
3686 *
3687 * @adev: amdgpu_device pointer
3688 *
3689 * Initialize the RLC registers, load the ucode,
3690 * and start the RLC (CIK).
3691 * Returns 0 for success, -EINVAL if the ucode is not available.
3692 */
3693static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev)
3694{
3695	const struct rlc_firmware_header_v1_0 *hdr;
3696	const __le32 *fw_data;
3697	unsigned i, fw_size;
3698	u32 tmp;
3699
3700	if (!adev->gfx.rlc_fw)
3701		return -EINVAL;
3702
3703	hdr = (const struct rlc_firmware_header_v1_0 *)adev->gfx.rlc_fw->data;
3704	amdgpu_ucode_print_rlc_hdr(&hdr->header);
3705	adev->gfx.rlc_fw_version = le32_to_cpu(hdr->header.ucode_version);
3706	adev->gfx.rlc_feature_version = le32_to_cpu(
3707					hdr->ucode_feature_version);
3708
3709	gfx_v7_0_rlc_stop(adev);
3710
3711	/* disable CG */
3712	tmp = RREG32(mmRLC_CGCG_CGLS_CTRL) & 0xfffffffc;
3713	WREG32(mmRLC_CGCG_CGLS_CTRL, tmp);
3714
3715	gfx_v7_0_rlc_reset(adev);
3716
3717	gfx_v7_0_init_pg(adev);
3718
3719	WREG32(mmRLC_LB_CNTR_INIT, 0);
3720	WREG32(mmRLC_LB_CNTR_MAX, 0x00008000);
3721
3722	mutex_lock(&adev->grbm_idx_mutex);
3723	gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3724	WREG32(mmRLC_LB_INIT_CU_MASK, 0xffffffff);
3725	WREG32(mmRLC_LB_PARAMS, 0x00600408);
3726	WREG32(mmRLC_LB_CNTL, 0x80000004);
3727	mutex_unlock(&adev->grbm_idx_mutex);
3728
3729	WREG32(mmRLC_MC_CNTL, 0);
3730	WREG32(mmRLC_UCODE_CNTL, 0);
3731
3732	fw_data = (const __le32 *)
3733		(adev->gfx.rlc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
3734	fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
3735	WREG32(mmRLC_GPM_UCODE_ADDR, 0);
3736	for (i = 0; i < fw_size; i++)
3737		WREG32(mmRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
3738	WREG32(mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
3739
3740	/* XXX - find out what chips support lbpw */
3741	gfx_v7_0_enable_lbpw(adev, false);
3742
3743	if (adev->asic_type == CHIP_BONAIRE)
3744		WREG32(mmRLC_DRIVER_CPDMA_STATUS, 0);
3745
3746	gfx_v7_0_rlc_start(adev);
3747
3748	return 0;
3749}
3750
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3751static void gfx_v7_0_enable_cgcg(struct amdgpu_device *adev, bool enable)
3752{
3753	u32 data, orig, tmp, tmp2;
3754
3755	orig = data = RREG32(mmRLC_CGCG_CGLS_CTRL);
3756
3757	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
3758		gfx_v7_0_enable_gui_idle_interrupt(adev, true);
3759
3760		tmp = gfx_v7_0_halt_rlc(adev);
3761
3762		mutex_lock(&adev->grbm_idx_mutex);
3763		gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3764		WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
3765		WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
3766		tmp2 = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK |
3767			RLC_SERDES_WR_CTRL__CGCG_OVERRIDE_0_MASK |
3768			RLC_SERDES_WR_CTRL__CGLS_ENABLE_MASK;
3769		WREG32(mmRLC_SERDES_WR_CTRL, tmp2);
3770		mutex_unlock(&adev->grbm_idx_mutex);
3771
3772		gfx_v7_0_update_rlc(adev, tmp);
3773
3774		data |= RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
 
 
 
3775	} else {
3776		gfx_v7_0_enable_gui_idle_interrupt(adev, false);
3777
3778		RREG32(mmCB_CGTT_SCLK_CTRL);
3779		RREG32(mmCB_CGTT_SCLK_CTRL);
3780		RREG32(mmCB_CGTT_SCLK_CTRL);
3781		RREG32(mmCB_CGTT_SCLK_CTRL);
3782
3783		data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
3784	}
3785
3786	if (orig != data)
3787		WREG32(mmRLC_CGCG_CGLS_CTRL, data);
3788
 
 
3789}
3790
3791static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
3792{
3793	u32 data, orig, tmp = 0;
3794
3795	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
3796		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
3797			if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
3798				orig = data = RREG32(mmCP_MEM_SLP_CNTL);
3799				data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
3800				if (orig != data)
3801					WREG32(mmCP_MEM_SLP_CNTL, data);
3802			}
3803		}
3804
3805		orig = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
3806		data |= 0x00000001;
3807		data &= 0xfffffffd;
3808		if (orig != data)
3809			WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data);
3810
3811		tmp = gfx_v7_0_halt_rlc(adev);
3812
3813		mutex_lock(&adev->grbm_idx_mutex);
3814		gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3815		WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
3816		WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
3817		data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK |
3818			RLC_SERDES_WR_CTRL__MGCG_OVERRIDE_0_MASK;
3819		WREG32(mmRLC_SERDES_WR_CTRL, data);
3820		mutex_unlock(&adev->grbm_idx_mutex);
3821
3822		gfx_v7_0_update_rlc(adev, tmp);
3823
3824		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS) {
3825			orig = data = RREG32(mmCGTS_SM_CTRL_REG);
3826			data &= ~CGTS_SM_CTRL_REG__SM_MODE_MASK;
3827			data |= (0x2 << CGTS_SM_CTRL_REG__SM_MODE__SHIFT);
3828			data |= CGTS_SM_CTRL_REG__SM_MODE_ENABLE_MASK;
3829			data &= ~CGTS_SM_CTRL_REG__OVERRIDE_MASK;
3830			if ((adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) &&
3831			    (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS_LS))
3832				data &= ~CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK;
3833			data &= ~CGTS_SM_CTRL_REG__ON_MONITOR_ADD_MASK;
3834			data |= CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN_MASK;
3835			data |= (0x96 << CGTS_SM_CTRL_REG__ON_MONITOR_ADD__SHIFT);
3836			if (orig != data)
3837				WREG32(mmCGTS_SM_CTRL_REG, data);
3838		}
3839	} else {
3840		orig = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
3841		data |= 0x00000003;
3842		if (orig != data)
3843			WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data);
3844
3845		data = RREG32(mmRLC_MEM_SLP_CNTL);
3846		if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
3847			data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
3848			WREG32(mmRLC_MEM_SLP_CNTL, data);
3849		}
3850
3851		data = RREG32(mmCP_MEM_SLP_CNTL);
3852		if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
3853			data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
3854			WREG32(mmCP_MEM_SLP_CNTL, data);
3855		}
3856
3857		orig = data = RREG32(mmCGTS_SM_CTRL_REG);
3858		data |= CGTS_SM_CTRL_REG__OVERRIDE_MASK | CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK;
3859		if (orig != data)
3860			WREG32(mmCGTS_SM_CTRL_REG, data);
3861
3862		tmp = gfx_v7_0_halt_rlc(adev);
3863
3864		mutex_lock(&adev->grbm_idx_mutex);
3865		gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3866		WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
3867		WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
3868		data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK | RLC_SERDES_WR_CTRL__MGCG_OVERRIDE_1_MASK;
3869		WREG32(mmRLC_SERDES_WR_CTRL, data);
3870		mutex_unlock(&adev->grbm_idx_mutex);
3871
3872		gfx_v7_0_update_rlc(adev, tmp);
3873	}
3874}
3875
3876static void gfx_v7_0_update_cg(struct amdgpu_device *adev,
3877			       bool enable)
3878{
3879	gfx_v7_0_enable_gui_idle_interrupt(adev, false);
3880	/* order matters! */
3881	if (enable) {
3882		gfx_v7_0_enable_mgcg(adev, true);
3883		gfx_v7_0_enable_cgcg(adev, true);
3884	} else {
3885		gfx_v7_0_enable_cgcg(adev, false);
3886		gfx_v7_0_enable_mgcg(adev, false);
3887	}
3888	gfx_v7_0_enable_gui_idle_interrupt(adev, true);
3889}
3890
3891static void gfx_v7_0_enable_sclk_slowdown_on_pu(struct amdgpu_device *adev,
3892						bool enable)
3893{
3894	u32 data, orig;
3895
3896	orig = data = RREG32(mmRLC_PG_CNTL);
3897	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS))
3898		data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
3899	else
3900		data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
3901	if (orig != data)
3902		WREG32(mmRLC_PG_CNTL, data);
3903}
3904
3905static void gfx_v7_0_enable_sclk_slowdown_on_pd(struct amdgpu_device *adev,
3906						bool enable)
3907{
3908	u32 data, orig;
3909
3910	orig = data = RREG32(mmRLC_PG_CNTL);
3911	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS))
3912		data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
3913	else
3914		data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
3915	if (orig != data)
3916		WREG32(mmRLC_PG_CNTL, data);
3917}
3918
3919static void gfx_v7_0_enable_cp_pg(struct amdgpu_device *adev, bool enable)
3920{
3921	u32 data, orig;
3922
3923	orig = data = RREG32(mmRLC_PG_CNTL);
3924	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_CP))
3925		data &= ~0x8000;
3926	else
3927		data |= 0x8000;
3928	if (orig != data)
3929		WREG32(mmRLC_PG_CNTL, data);
3930}
3931
3932static void gfx_v7_0_enable_gds_pg(struct amdgpu_device *adev, bool enable)
3933{
3934	u32 data, orig;
3935
3936	orig = data = RREG32(mmRLC_PG_CNTL);
3937	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GDS))
3938		data &= ~0x2000;
3939	else
3940		data |= 0x2000;
3941	if (orig != data)
3942		WREG32(mmRLC_PG_CNTL, data);
3943}
3944
3945static void gfx_v7_0_init_cp_pg_table(struct amdgpu_device *adev)
3946{
3947	const __le32 *fw_data;
3948	volatile u32 *dst_ptr;
3949	int me, i, max_me = 4;
3950	u32 bo_offset = 0;
3951	u32 table_offset, table_size;
3952
3953	if (adev->asic_type == CHIP_KAVERI)
3954		max_me = 5;
3955
3956	if (adev->gfx.rlc.cp_table_ptr == NULL)
3957		return;
3958
3959	/* write the cp table buffer */
3960	dst_ptr = adev->gfx.rlc.cp_table_ptr;
3961	for (me = 0; me < max_me; me++) {
3962		if (me == 0) {
3963			const struct gfx_firmware_header_v1_0 *hdr =
3964				(const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
3965			fw_data = (const __le32 *)
3966				(adev->gfx.ce_fw->data +
3967				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
3968			table_offset = le32_to_cpu(hdr->jt_offset);
3969			table_size = le32_to_cpu(hdr->jt_size);
3970		} else if (me == 1) {
3971			const struct gfx_firmware_header_v1_0 *hdr =
3972				(const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
3973			fw_data = (const __le32 *)
3974				(adev->gfx.pfp_fw->data +
3975				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
3976			table_offset = le32_to_cpu(hdr->jt_offset);
3977			table_size = le32_to_cpu(hdr->jt_size);
3978		} else if (me == 2) {
3979			const struct gfx_firmware_header_v1_0 *hdr =
3980				(const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
3981			fw_data = (const __le32 *)
3982				(adev->gfx.me_fw->data +
3983				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
3984			table_offset = le32_to_cpu(hdr->jt_offset);
3985			table_size = le32_to_cpu(hdr->jt_size);
3986		} else if (me == 3) {
3987			const struct gfx_firmware_header_v1_0 *hdr =
3988				(const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
3989			fw_data = (const __le32 *)
3990				(adev->gfx.mec_fw->data +
3991				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
3992			table_offset = le32_to_cpu(hdr->jt_offset);
3993			table_size = le32_to_cpu(hdr->jt_size);
3994		} else {
3995			const struct gfx_firmware_header_v1_0 *hdr =
3996				(const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
3997			fw_data = (const __le32 *)
3998				(adev->gfx.mec2_fw->data +
3999				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
4000			table_offset = le32_to_cpu(hdr->jt_offset);
4001			table_size = le32_to_cpu(hdr->jt_size);
4002		}
4003
4004		for (i = 0; i < table_size; i ++) {
4005			dst_ptr[bo_offset + i] =
4006				cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
4007		}
4008
4009		bo_offset += table_size;
4010	}
4011}
4012
4013static void gfx_v7_0_enable_gfx_cgpg(struct amdgpu_device *adev,
4014				     bool enable)
4015{
4016	u32 data, orig;
4017
4018	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
4019		orig = data = RREG32(mmRLC_PG_CNTL);
4020		data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
4021		if (orig != data)
4022			WREG32(mmRLC_PG_CNTL, data);
4023
4024		orig = data = RREG32(mmRLC_AUTO_PG_CTRL);
4025		data |= RLC_AUTO_PG_CTRL__AUTO_PG_EN_MASK;
4026		if (orig != data)
4027			WREG32(mmRLC_AUTO_PG_CTRL, data);
4028	} else {
4029		orig = data = RREG32(mmRLC_PG_CNTL);
4030		data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
4031		if (orig != data)
4032			WREG32(mmRLC_PG_CNTL, data);
4033
4034		orig = data = RREG32(mmRLC_AUTO_PG_CTRL);
4035		data &= ~RLC_AUTO_PG_CTRL__AUTO_PG_EN_MASK;
4036		if (orig != data)
4037			WREG32(mmRLC_AUTO_PG_CTRL, data);
4038
4039		data = RREG32(mmDB_RENDER_CONTROL);
4040	}
4041}
4042
4043static void gfx_v7_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
4044						 u32 bitmap)
4045{
4046	u32 data;
4047
4048	if (!bitmap)
4049		return;
4050
4051	data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
4052	data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
4053
4054	WREG32(mmGC_USER_SHADER_ARRAY_CONFIG, data);
4055}
4056
4057static u32 gfx_v7_0_get_cu_active_bitmap(struct amdgpu_device *adev)
4058{
4059	u32 data, mask;
4060
4061	data = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG);
4062	data |= RREG32(mmGC_USER_SHADER_ARRAY_CONFIG);
4063
4064	data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
4065	data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
4066
4067	mask = gfx_v7_0_create_bitmask(adev->gfx.config.max_cu_per_sh);
4068
4069	return (~data) & mask;
4070}
4071
4072static void gfx_v7_0_init_ao_cu_mask(struct amdgpu_device *adev)
4073{
4074	u32 tmp;
4075
4076	WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask);
4077
4078	tmp = RREG32(mmRLC_MAX_PG_CU);
4079	tmp &= ~RLC_MAX_PG_CU__MAX_POWERED_UP_CU_MASK;
4080	tmp |= (adev->gfx.cu_info.number << RLC_MAX_PG_CU__MAX_POWERED_UP_CU__SHIFT);
4081	WREG32(mmRLC_MAX_PG_CU, tmp);
4082}
4083
4084static void gfx_v7_0_enable_gfx_static_mgpg(struct amdgpu_device *adev,
4085					    bool enable)
4086{
4087	u32 data, orig;
4088
4089	orig = data = RREG32(mmRLC_PG_CNTL);
4090	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG))
4091		data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
4092	else
4093		data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
4094	if (orig != data)
4095		WREG32(mmRLC_PG_CNTL, data);
4096}
4097
4098static void gfx_v7_0_enable_gfx_dynamic_mgpg(struct amdgpu_device *adev,
4099					     bool enable)
4100{
4101	u32 data, orig;
4102
4103	orig = data = RREG32(mmRLC_PG_CNTL);
4104	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG))
4105		data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
4106	else
4107		data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
4108	if (orig != data)
4109		WREG32(mmRLC_PG_CNTL, data);
4110}
4111
4112#define RLC_SAVE_AND_RESTORE_STARTING_OFFSET 0x90
4113#define RLC_CLEAR_STATE_DESCRIPTOR_OFFSET    0x3D
4114
4115static void gfx_v7_0_init_gfx_cgpg(struct amdgpu_device *adev)
4116{
4117	u32 data, orig;
4118	u32 i;
4119
4120	if (adev->gfx.rlc.cs_data) {
4121		WREG32(mmRLC_GPM_SCRATCH_ADDR, RLC_CLEAR_STATE_DESCRIPTOR_OFFSET);
4122		WREG32(mmRLC_GPM_SCRATCH_DATA, upper_32_bits(adev->gfx.rlc.clear_state_gpu_addr));
4123		WREG32(mmRLC_GPM_SCRATCH_DATA, lower_32_bits(adev->gfx.rlc.clear_state_gpu_addr));
4124		WREG32(mmRLC_GPM_SCRATCH_DATA, adev->gfx.rlc.clear_state_size);
4125	} else {
4126		WREG32(mmRLC_GPM_SCRATCH_ADDR, RLC_CLEAR_STATE_DESCRIPTOR_OFFSET);
4127		for (i = 0; i < 3; i++)
4128			WREG32(mmRLC_GPM_SCRATCH_DATA, 0);
4129	}
4130	if (adev->gfx.rlc.reg_list) {
4131		WREG32(mmRLC_GPM_SCRATCH_ADDR, RLC_SAVE_AND_RESTORE_STARTING_OFFSET);
4132		for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
4133			WREG32(mmRLC_GPM_SCRATCH_DATA, adev->gfx.rlc.reg_list[i]);
4134	}
4135
4136	orig = data = RREG32(mmRLC_PG_CNTL);
4137	data |= RLC_PG_CNTL__GFX_POWER_GATING_SRC_MASK;
4138	if (orig != data)
4139		WREG32(mmRLC_PG_CNTL, data);
4140
4141	WREG32(mmRLC_SAVE_AND_RESTORE_BASE, adev->gfx.rlc.save_restore_gpu_addr >> 8);
4142	WREG32(mmRLC_JUMP_TABLE_RESTORE, adev->gfx.rlc.cp_table_gpu_addr >> 8);
4143
4144	data = RREG32(mmCP_RB_WPTR_POLL_CNTL);
4145	data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK;
4146	data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
4147	WREG32(mmCP_RB_WPTR_POLL_CNTL, data);
4148
4149	data = 0x10101010;
4150	WREG32(mmRLC_PG_DELAY, data);
4151
4152	data = RREG32(mmRLC_PG_DELAY_2);
4153	data &= ~0xff;
4154	data |= 0x3;
4155	WREG32(mmRLC_PG_DELAY_2, data);
4156
4157	data = RREG32(mmRLC_AUTO_PG_CTRL);
4158	data &= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK;
4159	data |= (0x700 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT);
4160	WREG32(mmRLC_AUTO_PG_CTRL, data);
4161
4162}
4163
4164static void gfx_v7_0_update_gfx_pg(struct amdgpu_device *adev, bool enable)
4165{
4166	gfx_v7_0_enable_gfx_cgpg(adev, enable);
4167	gfx_v7_0_enable_gfx_static_mgpg(adev, enable);
4168	gfx_v7_0_enable_gfx_dynamic_mgpg(adev, enable);
4169}
4170
4171static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev)
4172{
4173	u32 count = 0;
4174	const struct cs_section_def *sect = NULL;
4175	const struct cs_extent_def *ext = NULL;
4176
4177	if (adev->gfx.rlc.cs_data == NULL)
4178		return 0;
4179
4180	/* begin clear state */
4181	count += 2;
4182	/* context control state */
4183	count += 3;
4184
4185	for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
4186		for (ext = sect->section; ext->extent != NULL; ++ext) {
4187			if (sect->id == SECT_CONTEXT)
4188				count += 2 + ext->reg_count;
4189			else
4190				return 0;
4191		}
4192	}
4193	/* pa_sc_raster_config/pa_sc_raster_config1 */
4194	count += 4;
4195	/* end clear state */
4196	count += 2;
4197	/* clear state */
4198	count += 2;
4199
4200	return count;
4201}
4202
4203static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev,
4204				    volatile u32 *buffer)
4205{
4206	u32 count = 0, i;
4207	const struct cs_section_def *sect = NULL;
4208	const struct cs_extent_def *ext = NULL;
4209
4210	if (adev->gfx.rlc.cs_data == NULL)
4211		return;
4212	if (buffer == NULL)
4213		return;
4214
4215	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
4216	buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
4217
4218	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
4219	buffer[count++] = cpu_to_le32(0x80000000);
4220	buffer[count++] = cpu_to_le32(0x80000000);
4221
4222	for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
4223		for (ext = sect->section; ext->extent != NULL; ++ext) {
4224			if (sect->id == SECT_CONTEXT) {
4225				buffer[count++] =
4226					cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
4227				buffer[count++] = cpu_to_le32(ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
4228				for (i = 0; i < ext->reg_count; i++)
4229					buffer[count++] = cpu_to_le32(ext->extent[i]);
4230			} else {
4231				return;
4232			}
4233		}
4234	}
4235
4236	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2));
4237	buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
4238	switch (adev->asic_type) {
4239	case CHIP_BONAIRE:
4240		buffer[count++] = cpu_to_le32(0x16000012);
4241		buffer[count++] = cpu_to_le32(0x00000000);
4242		break;
4243	case CHIP_KAVERI:
4244		buffer[count++] = cpu_to_le32(0x00000000); /* XXX */
4245		buffer[count++] = cpu_to_le32(0x00000000);
4246		break;
4247	case CHIP_KABINI:
4248	case CHIP_MULLINS:
4249		buffer[count++] = cpu_to_le32(0x00000000); /* XXX */
4250		buffer[count++] = cpu_to_le32(0x00000000);
4251		break;
4252	case CHIP_HAWAII:
4253		buffer[count++] = cpu_to_le32(0x3a00161a);
4254		buffer[count++] = cpu_to_le32(0x0000002e);
4255		break;
4256	default:
4257		buffer[count++] = cpu_to_le32(0x00000000);
4258		buffer[count++] = cpu_to_le32(0x00000000);
4259		break;
4260	}
4261
4262	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
4263	buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
4264
4265	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
4266	buffer[count++] = cpu_to_le32(0);
4267}
4268
4269static void gfx_v7_0_init_pg(struct amdgpu_device *adev)
4270{
4271	if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
4272			      AMD_PG_SUPPORT_GFX_SMG |
4273			      AMD_PG_SUPPORT_GFX_DMG |
4274			      AMD_PG_SUPPORT_CP |
4275			      AMD_PG_SUPPORT_GDS |
4276			      AMD_PG_SUPPORT_RLC_SMU_HS)) {
4277		gfx_v7_0_enable_sclk_slowdown_on_pu(adev, true);
4278		gfx_v7_0_enable_sclk_slowdown_on_pd(adev, true);
4279		if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
4280			gfx_v7_0_init_gfx_cgpg(adev);
4281			gfx_v7_0_enable_cp_pg(adev, true);
4282			gfx_v7_0_enable_gds_pg(adev, true);
4283		}
4284		gfx_v7_0_init_ao_cu_mask(adev);
4285		gfx_v7_0_update_gfx_pg(adev, true);
4286	}
4287}
4288
4289static void gfx_v7_0_fini_pg(struct amdgpu_device *adev)
4290{
4291	if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
4292			      AMD_PG_SUPPORT_GFX_SMG |
4293			      AMD_PG_SUPPORT_GFX_DMG |
4294			      AMD_PG_SUPPORT_CP |
4295			      AMD_PG_SUPPORT_GDS |
4296			      AMD_PG_SUPPORT_RLC_SMU_HS)) {
4297		gfx_v7_0_update_gfx_pg(adev, false);
4298		if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
4299			gfx_v7_0_enable_cp_pg(adev, false);
4300			gfx_v7_0_enable_gds_pg(adev, false);
4301		}
4302	}
4303}
4304
4305/**
4306 * gfx_v7_0_get_gpu_clock_counter - return GPU clock counter snapshot
4307 *
4308 * @adev: amdgpu_device pointer
4309 *
4310 * Fetches a GPU clock counter snapshot (SI).
4311 * Returns the 64 bit clock counter snapshot.
4312 */
4313static uint64_t gfx_v7_0_get_gpu_clock_counter(struct amdgpu_device *adev)
4314{
4315	uint64_t clock;
4316
4317	mutex_lock(&adev->gfx.gpu_clock_mutex);
4318	WREG32(mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
4319	clock = (uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_LSB) |
4320		((uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
4321	mutex_unlock(&adev->gfx.gpu_clock_mutex);
4322	return clock;
4323}
4324
4325static void gfx_v7_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
4326					  uint32_t vmid,
4327					  uint32_t gds_base, uint32_t gds_size,
4328					  uint32_t gws_base, uint32_t gws_size,
4329					  uint32_t oa_base, uint32_t oa_size)
4330{
4331	gds_base = gds_base >> AMDGPU_GDS_SHIFT;
4332	gds_size = gds_size >> AMDGPU_GDS_SHIFT;
4333
4334	gws_base = gws_base >> AMDGPU_GWS_SHIFT;
4335	gws_size = gws_size >> AMDGPU_GWS_SHIFT;
4336
4337	oa_base = oa_base >> AMDGPU_OA_SHIFT;
4338	oa_size = oa_size >> AMDGPU_OA_SHIFT;
4339
4340	/* GDS Base */
4341	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4342	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4343				WRITE_DATA_DST_SEL(0)));
4344	amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_base);
4345	amdgpu_ring_write(ring, 0);
4346	amdgpu_ring_write(ring, gds_base);
4347
4348	/* GDS Size */
4349	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4350	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4351				WRITE_DATA_DST_SEL(0)));
4352	amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_size);
4353	amdgpu_ring_write(ring, 0);
4354	amdgpu_ring_write(ring, gds_size);
4355
4356	/* GWS */
4357	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4358	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4359				WRITE_DATA_DST_SEL(0)));
4360	amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].gws);
4361	amdgpu_ring_write(ring, 0);
4362	amdgpu_ring_write(ring, gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
4363
4364	/* OA */
4365	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4366	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4367				WRITE_DATA_DST_SEL(0)));
4368	amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].oa);
4369	amdgpu_ring_write(ring, 0);
4370	amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));
4371}
4372
 
 
 
 
 
 
 
 
 
 
 
 
4373static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
4374{
4375	WREG32(mmSQ_IND_INDEX,
4376		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
4377		(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
4378		(address << SQ_IND_INDEX__INDEX__SHIFT) |
4379		(SQ_IND_INDEX__FORCE_READ_MASK));
4380	return RREG32(mmSQ_IND_DATA);
4381}
4382
4383static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
4384			   uint32_t wave, uint32_t thread,
4385			   uint32_t regno, uint32_t num, uint32_t *out)
4386{
4387	WREG32(mmSQ_IND_INDEX,
4388		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
4389		(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
4390		(regno << SQ_IND_INDEX__INDEX__SHIFT) |
4391		(thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
4392		(SQ_IND_INDEX__FORCE_READ_MASK) |
4393		(SQ_IND_INDEX__AUTO_INCR_MASK));
4394	while (num--)
4395		*(out++) = RREG32(mmSQ_IND_DATA);
4396}
4397
4398static void gfx_v7_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
4399{
4400	/* type 0 wave data */
4401	dst[(*no_fields)++] = 0;
4402	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
4403	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
4404	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
4405	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
4406	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
4407	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
4408	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
4409	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
4410	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
4411	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
4412	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
4413	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
4414	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_LO);
4415	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_HI);
4416	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_LO);
4417	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_HI);
4418	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
4419	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
 
4420}
4421
4422static void gfx_v7_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
4423				     uint32_t wave, uint32_t start,
4424				     uint32_t size, uint32_t *dst)
4425{
4426	wave_read_regs(
4427		adev, simd, wave, 0,
4428		start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
4429}
4430
 
 
 
 
 
 
4431static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = {
4432	.get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter,
4433	.select_se_sh = &gfx_v7_0_select_se_sh,
4434	.read_wave_data = &gfx_v7_0_read_wave_data,
4435	.read_wave_sgprs = &gfx_v7_0_read_wave_sgprs,
 
4436};
4437
4438static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = {
4439	.enter_safe_mode = gfx_v7_0_enter_rlc_safe_mode,
4440	.exit_safe_mode = gfx_v7_0_exit_rlc_safe_mode
 
 
 
 
 
 
 
 
 
 
4441};
4442
4443static int gfx_v7_0_early_init(void *handle)
4444{
4445	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4446
 
4447	adev->gfx.num_gfx_rings = GFX7_NUM_GFX_RINGS;
4448	adev->gfx.num_compute_rings = GFX7_NUM_COMPUTE_RINGS;
 
4449	adev->gfx.funcs = &gfx_v7_0_gfx_funcs;
4450	adev->gfx.rlc.funcs = &gfx_v7_0_rlc_funcs;
4451	gfx_v7_0_set_ring_funcs(adev);
4452	gfx_v7_0_set_irq_funcs(adev);
4453	gfx_v7_0_set_gds_init(adev);
4454
4455	return 0;
4456}
4457
4458static int gfx_v7_0_late_init(void *handle)
4459{
4460	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4461	int r;
4462
4463	r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
4464	if (r)
4465		return r;
4466
4467	r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
4468	if (r)
4469		return r;
4470
4471	return 0;
4472}
4473
4474static void gfx_v7_0_gpu_early_init(struct amdgpu_device *adev)
4475{
4476	u32 gb_addr_config;
4477	u32 mc_shared_chmap, mc_arb_ramcfg;
4478	u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map;
4479	u32 tmp;
4480
4481	switch (adev->asic_type) {
4482	case CHIP_BONAIRE:
4483		adev->gfx.config.max_shader_engines = 2;
4484		adev->gfx.config.max_tile_pipes = 4;
4485		adev->gfx.config.max_cu_per_sh = 7;
4486		adev->gfx.config.max_sh_per_se = 1;
4487		adev->gfx.config.max_backends_per_se = 2;
4488		adev->gfx.config.max_texture_channel_caches = 4;
4489		adev->gfx.config.max_gprs = 256;
4490		adev->gfx.config.max_gs_threads = 32;
4491		adev->gfx.config.max_hw_contexts = 8;
4492
4493		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
4494		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
4495		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
4496		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
4497		gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
4498		break;
4499	case CHIP_HAWAII:
4500		adev->gfx.config.max_shader_engines = 4;
4501		adev->gfx.config.max_tile_pipes = 16;
4502		adev->gfx.config.max_cu_per_sh = 11;
4503		adev->gfx.config.max_sh_per_se = 1;
4504		adev->gfx.config.max_backends_per_se = 4;
4505		adev->gfx.config.max_texture_channel_caches = 16;
4506		adev->gfx.config.max_gprs = 256;
4507		adev->gfx.config.max_gs_threads = 32;
4508		adev->gfx.config.max_hw_contexts = 8;
4509
4510		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
4511		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
4512		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
4513		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
4514		gb_addr_config = HAWAII_GB_ADDR_CONFIG_GOLDEN;
4515		break;
4516	case CHIP_KAVERI:
4517		adev->gfx.config.max_shader_engines = 1;
4518		adev->gfx.config.max_tile_pipes = 4;
4519		if ((adev->pdev->device == 0x1304) ||
4520		    (adev->pdev->device == 0x1305) ||
4521		    (adev->pdev->device == 0x130C) ||
4522		    (adev->pdev->device == 0x130F) ||
4523		    (adev->pdev->device == 0x1310) ||
4524		    (adev->pdev->device == 0x1311) ||
4525		    (adev->pdev->device == 0x131C)) {
4526			adev->gfx.config.max_cu_per_sh = 8;
4527			adev->gfx.config.max_backends_per_se = 2;
4528		} else if ((adev->pdev->device == 0x1309) ||
4529			   (adev->pdev->device == 0x130A) ||
4530			   (adev->pdev->device == 0x130D) ||
4531			   (adev->pdev->device == 0x1313) ||
4532			   (adev->pdev->device == 0x131D)) {
4533			adev->gfx.config.max_cu_per_sh = 6;
4534			adev->gfx.config.max_backends_per_se = 2;
4535		} else if ((adev->pdev->device == 0x1306) ||
4536			   (adev->pdev->device == 0x1307) ||
4537			   (adev->pdev->device == 0x130B) ||
4538			   (adev->pdev->device == 0x130E) ||
4539			   (adev->pdev->device == 0x1315) ||
4540			   (adev->pdev->device == 0x131B)) {
4541			adev->gfx.config.max_cu_per_sh = 4;
4542			adev->gfx.config.max_backends_per_se = 1;
4543		} else {
4544			adev->gfx.config.max_cu_per_sh = 3;
4545			adev->gfx.config.max_backends_per_se = 1;
4546		}
4547		adev->gfx.config.max_sh_per_se = 1;
4548		adev->gfx.config.max_texture_channel_caches = 4;
4549		adev->gfx.config.max_gprs = 256;
4550		adev->gfx.config.max_gs_threads = 16;
4551		adev->gfx.config.max_hw_contexts = 8;
4552
4553		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
4554		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
4555		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
4556		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
4557		gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
4558		break;
4559	case CHIP_KABINI:
4560	case CHIP_MULLINS:
4561	default:
4562		adev->gfx.config.max_shader_engines = 1;
4563		adev->gfx.config.max_tile_pipes = 2;
4564		adev->gfx.config.max_cu_per_sh = 2;
4565		adev->gfx.config.max_sh_per_se = 1;
4566		adev->gfx.config.max_backends_per_se = 1;
4567		adev->gfx.config.max_texture_channel_caches = 2;
4568		adev->gfx.config.max_gprs = 256;
4569		adev->gfx.config.max_gs_threads = 16;
4570		adev->gfx.config.max_hw_contexts = 8;
4571
4572		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
4573		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
4574		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
4575		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
4576		gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
4577		break;
4578	}
4579
4580	mc_shared_chmap = RREG32(mmMC_SHARED_CHMAP);
4581	adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG);
4582	mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg;
4583
 
 
 
 
 
4584	adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
4585	adev->gfx.config.mem_max_burst_length_bytes = 256;
4586	if (adev->flags & AMD_IS_APU) {
4587		/* Get memory bank mapping mode. */
4588		tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING);
4589		dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
4590		dimm01_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
4591
4592		tmp = RREG32(mmMC_FUS_DRAM1_BANK_ADDR_MAPPING);
4593		dimm10_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
4594		dimm11_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
4595
4596		/* Validate settings in case only one DIMM installed. */
4597		if ((dimm00_addr_map == 0) || (dimm00_addr_map == 3) || (dimm00_addr_map == 4) || (dimm00_addr_map > 12))
4598			dimm00_addr_map = 0;
4599		if ((dimm01_addr_map == 0) || (dimm01_addr_map == 3) || (dimm01_addr_map == 4) || (dimm01_addr_map > 12))
4600			dimm01_addr_map = 0;
4601		if ((dimm10_addr_map == 0) || (dimm10_addr_map == 3) || (dimm10_addr_map == 4) || (dimm10_addr_map > 12))
4602			dimm10_addr_map = 0;
4603		if ((dimm11_addr_map == 0) || (dimm11_addr_map == 3) || (dimm11_addr_map == 4) || (dimm11_addr_map > 12))
4604			dimm11_addr_map = 0;
4605
4606		/* If DIMM Addr map is 8GB, ROW size should be 2KB. Otherwise 1KB. */
4607		/* If ROW size(DIMM1) != ROW size(DMIMM0), ROW size should be larger one. */
4608		if ((dimm00_addr_map == 11) || (dimm01_addr_map == 11) || (dimm10_addr_map == 11) || (dimm11_addr_map == 11))
4609			adev->gfx.config.mem_row_size_in_kb = 2;
4610		else
4611			adev->gfx.config.mem_row_size_in_kb = 1;
4612	} else {
4613		tmp = (mc_arb_ramcfg & MC_ARB_RAMCFG__NOOFCOLS_MASK) >> MC_ARB_RAMCFG__NOOFCOLS__SHIFT;
4614		adev->gfx.config.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
4615		if (adev->gfx.config.mem_row_size_in_kb > 4)
4616			adev->gfx.config.mem_row_size_in_kb = 4;
4617	}
4618	/* XXX use MC settings? */
4619	adev->gfx.config.shader_engine_tile_size = 32;
4620	adev->gfx.config.num_gpus = 1;
4621	adev->gfx.config.multi_gpu_tile_size = 64;
4622
4623	/* fix up row size */
4624	gb_addr_config &= ~GB_ADDR_CONFIG__ROW_SIZE_MASK;
4625	switch (adev->gfx.config.mem_row_size_in_kb) {
4626	case 1:
4627	default:
4628		gb_addr_config |= (0 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT);
4629		break;
4630	case 2:
4631		gb_addr_config |= (1 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT);
4632		break;
4633	case 4:
4634		gb_addr_config |= (2 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT);
4635		break;
4636	}
4637	adev->gfx.config.gb_addr_config = gb_addr_config;
4638}
4639
4640static int gfx_v7_0_sw_init(void *handle)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4641{
4642	struct amdgpu_ring *ring;
4643	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4644	int i, r;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4645
4646	/* EOP Event */
4647	r = amdgpu_irq_add_id(adev, 181, &adev->gfx.eop_irq);
4648	if (r)
4649		return r;
4650
4651	/* Privileged reg */
4652	r = amdgpu_irq_add_id(adev, 184, &adev->gfx.priv_reg_irq);
 
4653	if (r)
4654		return r;
4655
4656	/* Privileged inst */
4657	r = amdgpu_irq_add_id(adev, 185, &adev->gfx.priv_inst_irq);
 
4658	if (r)
4659		return r;
4660
4661	gfx_v7_0_scratch_init(adev);
4662
4663	r = gfx_v7_0_init_microcode(adev);
4664	if (r) {
4665		DRM_ERROR("Failed to load gfx firmware!\n");
4666		return r;
4667	}
4668
4669	r = gfx_v7_0_rlc_init(adev);
4670	if (r) {
4671		DRM_ERROR("Failed to init rlc BOs!\n");
4672		return r;
4673	}
4674
4675	/* allocate mec buffers */
4676	r = gfx_v7_0_mec_init(adev);
4677	if (r) {
4678		DRM_ERROR("Failed to init MEC BOs!\n");
4679		return r;
4680	}
4681
4682	for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
4683		ring = &adev->gfx.gfx_ring[i];
4684		ring->ring_obj = NULL;
4685		sprintf(ring->name, "gfx");
4686		r = amdgpu_ring_init(adev, ring, 1024,
4687				     &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP);
 
 
4688		if (r)
4689			return r;
4690	}
4691
4692	/* set up the compute queues */
4693	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4694		unsigned irq_type;
 
 
 
 
 
 
 
 
 
 
 
4695
4696		/* max 32 queues per MEC */
4697		if ((i >= 32) || (i >= AMDGPU_MAX_COMPUTE_RINGS)) {
4698			DRM_ERROR("Too many (%d) compute rings!\n", i);
4699			break;
4700		}
4701		ring = &adev->gfx.compute_ring[i];
4702		ring->ring_obj = NULL;
4703		ring->use_doorbell = true;
4704		ring->doorbell_index = AMDGPU_DOORBELL_MEC_RING0 + i;
4705		ring->me = 1; /* first MEC */
4706		ring->pipe = i / 8;
4707		ring->queue = i % 8;
4708		sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
4709		irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
4710		/* type-2 packets are deprecated on MEC, use type-3 instead */
4711		r = amdgpu_ring_init(adev, ring, 1024,
4712				     &adev->gfx.eop_irq, irq_type);
4713		if (r)
4714			return r;
4715	}
4716
4717	/* reserve GDS, GWS and OA resource for gfx */
4718	r = amdgpu_bo_create_kernel(adev, adev->gds.mem.gfx_partition_size,
4719				    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GDS,
4720				    &adev->gds.gds_gfx_bo, NULL, NULL);
4721	if (r)
4722		return r;
4723
4724	r = amdgpu_bo_create_kernel(adev, adev->gds.gws.gfx_partition_size,
4725				    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GWS,
4726				    &adev->gds.gws_gfx_bo, NULL, NULL);
4727	if (r)
4728		return r;
4729
4730	r = amdgpu_bo_create_kernel(adev, adev->gds.oa.gfx_partition_size,
4731				    PAGE_SIZE, AMDGPU_GEM_DOMAIN_OA,
4732				    &adev->gds.oa_gfx_bo, NULL, NULL);
4733	if (r)
4734		return r;
4735
4736	adev->gfx.ce_ram_size = 0x8000;
4737
4738	gfx_v7_0_gpu_early_init(adev);
4739
4740	return r;
4741}
4742
4743static int gfx_v7_0_sw_fini(void *handle)
4744{
 
4745	int i;
4746	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4747
4748	amdgpu_bo_free_kernel(&adev->gds.oa_gfx_bo, NULL, NULL);
4749	amdgpu_bo_free_kernel(&adev->gds.gws_gfx_bo, NULL, NULL);
4750	amdgpu_bo_free_kernel(&adev->gds.gds_gfx_bo, NULL, NULL);
4751
4752	for (i = 0; i < adev->gfx.num_gfx_rings; i++)
4753		amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
4754	for (i = 0; i < adev->gfx.num_compute_rings; i++)
4755		amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
4756
4757	gfx_v7_0_cp_compute_fini(adev);
4758	gfx_v7_0_rlc_fini(adev);
4759	gfx_v7_0_mec_fini(adev);
 
 
 
 
 
 
 
 
4760	gfx_v7_0_free_microcode(adev);
4761
4762	return 0;
4763}
4764
4765static int gfx_v7_0_hw_init(void *handle)
4766{
4767	int r;
4768	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4769
4770	gfx_v7_0_gpu_init(adev);
4771
 
 
4772	/* init rlc */
4773	r = gfx_v7_0_rlc_resume(adev);
4774	if (r)
4775		return r;
4776
4777	r = gfx_v7_0_cp_resume(adev);
4778	if (r)
4779		return r;
4780
4781	return r;
4782}
4783
4784static int gfx_v7_0_hw_fini(void *handle)
4785{
4786	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4787
4788	amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
4789	amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
4790	gfx_v7_0_cp_enable(adev, false);
4791	gfx_v7_0_rlc_stop(adev);
4792	gfx_v7_0_fini_pg(adev);
4793
4794	return 0;
4795}
4796
4797static int gfx_v7_0_suspend(void *handle)
4798{
4799	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4800
4801	return gfx_v7_0_hw_fini(adev);
4802}
4803
4804static int gfx_v7_0_resume(void *handle)
4805{
4806	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4807
4808	return gfx_v7_0_hw_init(adev);
4809}
4810
4811static bool gfx_v7_0_is_idle(void *handle)
4812{
4813	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4814
4815	if (RREG32(mmGRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK)
4816		return false;
4817	else
4818		return true;
4819}
4820
4821static int gfx_v7_0_wait_for_idle(void *handle)
4822{
4823	unsigned i;
4824	u32 tmp;
4825	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4826
4827	for (i = 0; i < adev->usec_timeout; i++) {
4828		/* read MC_STATUS */
4829		tmp = RREG32(mmGRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK;
4830
4831		if (!tmp)
4832			return 0;
4833		udelay(1);
4834	}
4835	return -ETIMEDOUT;
4836}
4837
4838static int gfx_v7_0_soft_reset(void *handle)
4839{
4840	u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
4841	u32 tmp;
4842	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4843
4844	/* GRBM_STATUS */
4845	tmp = RREG32(mmGRBM_STATUS);
4846	if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
4847		   GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
4848		   GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
4849		   GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
4850		   GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
4851		   GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK))
4852		grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_CP_MASK |
4853			GRBM_SOFT_RESET__SOFT_RESET_GFX_MASK;
4854
4855	if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
4856		grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_CP_MASK;
4857		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_GRBM_MASK;
4858	}
4859
4860	/* GRBM_STATUS2 */
4861	tmp = RREG32(mmGRBM_STATUS2);
4862	if (tmp & GRBM_STATUS2__RLC_BUSY_MASK)
4863		grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK;
4864
4865	/* SRBM_STATUS */
4866	tmp = RREG32(mmSRBM_STATUS);
4867	if (tmp & SRBM_STATUS__GRBM_RQ_PENDING_MASK)
4868		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_GRBM_MASK;
4869
4870	if (grbm_soft_reset || srbm_soft_reset) {
4871		/* disable CG/PG */
4872		gfx_v7_0_fini_pg(adev);
4873		gfx_v7_0_update_cg(adev, false);
4874
4875		/* stop the rlc */
4876		gfx_v7_0_rlc_stop(adev);
4877
4878		/* Disable GFX parsing/prefetching */
4879		WREG32(mmCP_ME_CNTL, CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK);
4880
4881		/* Disable MEC parsing/prefetching */
4882		WREG32(mmCP_MEC_CNTL, CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK);
4883
4884		if (grbm_soft_reset) {
4885			tmp = RREG32(mmGRBM_SOFT_RESET);
4886			tmp |= grbm_soft_reset;
4887			dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
4888			WREG32(mmGRBM_SOFT_RESET, tmp);
4889			tmp = RREG32(mmGRBM_SOFT_RESET);
4890
4891			udelay(50);
4892
4893			tmp &= ~grbm_soft_reset;
4894			WREG32(mmGRBM_SOFT_RESET, tmp);
4895			tmp = RREG32(mmGRBM_SOFT_RESET);
4896		}
4897
4898		if (srbm_soft_reset) {
4899			tmp = RREG32(mmSRBM_SOFT_RESET);
4900			tmp |= srbm_soft_reset;
4901			dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
4902			WREG32(mmSRBM_SOFT_RESET, tmp);
4903			tmp = RREG32(mmSRBM_SOFT_RESET);
4904
4905			udelay(50);
4906
4907			tmp &= ~srbm_soft_reset;
4908			WREG32(mmSRBM_SOFT_RESET, tmp);
4909			tmp = RREG32(mmSRBM_SOFT_RESET);
4910		}
4911		/* Wait a little for things to settle down */
4912		udelay(50);
4913	}
4914	return 0;
4915}
4916
4917static void gfx_v7_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
4918						 enum amdgpu_interrupt_state state)
4919{
4920	u32 cp_int_cntl;
4921
4922	switch (state) {
4923	case AMDGPU_IRQ_STATE_DISABLE:
4924		cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
4925		cp_int_cntl &= ~CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
4926		WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
4927		break;
4928	case AMDGPU_IRQ_STATE_ENABLE:
4929		cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
4930		cp_int_cntl |= CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
4931		WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
4932		break;
4933	default:
4934		break;
4935	}
4936}
4937
4938static void gfx_v7_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
4939						     int me, int pipe,
4940						     enum amdgpu_interrupt_state state)
4941{
4942	u32 mec_int_cntl, mec_int_cntl_reg;
4943
4944	/*
4945	 * amdgpu controls only pipe 0 of MEC1. That's why this function only
4946	 * handles the setting of interrupts for this specific pipe. All other
4947	 * pipes' interrupts are set by amdkfd.
4948	 */
4949
4950	if (me == 1) {
4951		switch (pipe) {
4952		case 0:
4953			mec_int_cntl_reg = mmCP_ME1_PIPE0_INT_CNTL;
4954			break;
 
 
 
 
 
 
 
 
 
4955		default:
4956			DRM_DEBUG("invalid pipe %d\n", pipe);
4957			return;
4958		}
4959	} else {
4960		DRM_DEBUG("invalid me %d\n", me);
4961		return;
4962	}
4963
4964	switch (state) {
4965	case AMDGPU_IRQ_STATE_DISABLE:
4966		mec_int_cntl = RREG32(mec_int_cntl_reg);
4967		mec_int_cntl &= ~CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
4968		WREG32(mec_int_cntl_reg, mec_int_cntl);
4969		break;
4970	case AMDGPU_IRQ_STATE_ENABLE:
4971		mec_int_cntl = RREG32(mec_int_cntl_reg);
4972		mec_int_cntl |= CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
4973		WREG32(mec_int_cntl_reg, mec_int_cntl);
4974		break;
4975	default:
4976		break;
4977	}
4978}
4979
4980static int gfx_v7_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
4981					     struct amdgpu_irq_src *src,
4982					     unsigned type,
4983					     enum amdgpu_interrupt_state state)
4984{
4985	u32 cp_int_cntl;
4986
4987	switch (state) {
4988	case AMDGPU_IRQ_STATE_DISABLE:
4989		cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
4990		cp_int_cntl &= ~CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK;
4991		WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
4992		break;
4993	case AMDGPU_IRQ_STATE_ENABLE:
4994		cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
4995		cp_int_cntl |= CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK;
4996		WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
4997		break;
4998	default:
4999		break;
5000	}
5001
5002	return 0;
5003}
5004
5005static int gfx_v7_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
5006					      struct amdgpu_irq_src *src,
5007					      unsigned type,
5008					      enum amdgpu_interrupt_state state)
5009{
5010	u32 cp_int_cntl;
5011
5012	switch (state) {
5013	case AMDGPU_IRQ_STATE_DISABLE:
5014		cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
5015		cp_int_cntl &= ~CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK;
5016		WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
5017		break;
5018	case AMDGPU_IRQ_STATE_ENABLE:
5019		cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
5020		cp_int_cntl |= CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK;
5021		WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
5022		break;
5023	default:
5024		break;
5025	}
5026
5027	return 0;
5028}
5029
5030static int gfx_v7_0_set_eop_interrupt_state(struct amdgpu_device *adev,
5031					    struct amdgpu_irq_src *src,
5032					    unsigned type,
5033					    enum amdgpu_interrupt_state state)
5034{
5035	switch (type) {
5036	case AMDGPU_CP_IRQ_GFX_EOP:
5037		gfx_v7_0_set_gfx_eop_interrupt_state(adev, state);
5038		break;
5039	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
5040		gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
5041		break;
5042	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
5043		gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
5044		break;
5045	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
5046		gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
5047		break;
5048	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
5049		gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
5050		break;
5051	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
5052		gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 0, state);
5053		break;
5054	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
5055		gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 1, state);
5056		break;
5057	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
5058		gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 2, state);
5059		break;
5060	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
5061		gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 3, state);
5062		break;
5063	default:
5064		break;
5065	}
5066	return 0;
5067}
5068
5069static int gfx_v7_0_eop_irq(struct amdgpu_device *adev,
5070			    struct amdgpu_irq_src *source,
5071			    struct amdgpu_iv_entry *entry)
5072{
5073	u8 me_id, pipe_id;
5074	struct amdgpu_ring *ring;
5075	int i;
5076
5077	DRM_DEBUG("IH: CP EOP\n");
5078	me_id = (entry->ring_id & 0x0c) >> 2;
5079	pipe_id = (entry->ring_id & 0x03) >> 0;
5080	switch (me_id) {
5081	case 0:
5082		amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
5083		break;
5084	case 1:
5085	case 2:
5086		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
5087			ring = &adev->gfx.compute_ring[i];
5088			if ((ring->me == me_id) && (ring->pipe == pipe_id))
5089				amdgpu_fence_process(ring);
5090		}
5091		break;
5092	}
5093	return 0;
5094}
5095
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5096static int gfx_v7_0_priv_reg_irq(struct amdgpu_device *adev,
5097				 struct amdgpu_irq_src *source,
5098				 struct amdgpu_iv_entry *entry)
5099{
5100	DRM_ERROR("Illegal register access in command stream\n");
5101	schedule_work(&adev->reset_work);
5102	return 0;
5103}
5104
5105static int gfx_v7_0_priv_inst_irq(struct amdgpu_device *adev,
5106				  struct amdgpu_irq_src *source,
5107				  struct amdgpu_iv_entry *entry)
5108{
5109	DRM_ERROR("Illegal instruction in command stream\n");
5110	// XXX soft reset the gfx block only
5111	schedule_work(&adev->reset_work);
5112	return 0;
5113}
5114
5115static int gfx_v7_0_set_clockgating_state(void *handle,
5116					  enum amd_clockgating_state state)
5117{
5118	bool gate = false;
5119	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5120
5121	if (state == AMD_CG_STATE_GATE)
5122		gate = true;
5123
5124	gfx_v7_0_enable_gui_idle_interrupt(adev, false);
5125	/* order matters! */
5126	if (gate) {
5127		gfx_v7_0_enable_mgcg(adev, true);
5128		gfx_v7_0_enable_cgcg(adev, true);
5129	} else {
5130		gfx_v7_0_enable_cgcg(adev, false);
5131		gfx_v7_0_enable_mgcg(adev, false);
5132	}
5133	gfx_v7_0_enable_gui_idle_interrupt(adev, true);
5134
5135	return 0;
5136}
5137
5138static int gfx_v7_0_set_powergating_state(void *handle,
5139					  enum amd_powergating_state state)
5140{
5141	bool gate = false;
5142	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5143
5144	if (state == AMD_PG_STATE_GATE)
5145		gate = true;
5146
5147	if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
5148			      AMD_PG_SUPPORT_GFX_SMG |
5149			      AMD_PG_SUPPORT_GFX_DMG |
5150			      AMD_PG_SUPPORT_CP |
5151			      AMD_PG_SUPPORT_GDS |
5152			      AMD_PG_SUPPORT_RLC_SMU_HS)) {
5153		gfx_v7_0_update_gfx_pg(adev, gate);
5154		if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
5155			gfx_v7_0_enable_cp_pg(adev, gate);
5156			gfx_v7_0_enable_gds_pg(adev, gate);
5157		}
5158	}
5159
5160	return 0;
5161}
5162
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5163static const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
5164	.name = "gfx_v7_0",
5165	.early_init = gfx_v7_0_early_init,
5166	.late_init = gfx_v7_0_late_init,
5167	.sw_init = gfx_v7_0_sw_init,
5168	.sw_fini = gfx_v7_0_sw_fini,
5169	.hw_init = gfx_v7_0_hw_init,
5170	.hw_fini = gfx_v7_0_hw_fini,
5171	.suspend = gfx_v7_0_suspend,
5172	.resume = gfx_v7_0_resume,
5173	.is_idle = gfx_v7_0_is_idle,
5174	.wait_for_idle = gfx_v7_0_wait_for_idle,
5175	.soft_reset = gfx_v7_0_soft_reset,
5176	.set_clockgating_state = gfx_v7_0_set_clockgating_state,
5177	.set_powergating_state = gfx_v7_0_set_powergating_state,
5178};
5179
5180static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
5181	.type = AMDGPU_RING_TYPE_GFX,
5182	.align_mask = 0xff,
5183	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
 
5184	.get_rptr = gfx_v7_0_ring_get_rptr,
5185	.get_wptr = gfx_v7_0_ring_get_wptr_gfx,
5186	.set_wptr = gfx_v7_0_ring_set_wptr_gfx,
5187	.emit_frame_size =
5188		20 + /* gfx_v7_0_ring_emit_gds_switch */
5189		7 + /* gfx_v7_0_ring_emit_hdp_flush */
5190		5 + /* gfx_v7_0_ring_emit_hdp_invalidate */
5191		12 + 12 + 12 + /* gfx_v7_0_ring_emit_fence_gfx x3 for user fence, vm fence */
5192		7 + 4 + /* gfx_v7_0_ring_emit_pipeline_sync */
5193		17 + 6 + /* gfx_v7_0_ring_emit_vm_flush */
5194		3 + 4, /* gfx_v7_ring_emit_cntxcntl including vgt flush*/
 
5195	.emit_ib_size = 4, /* gfx_v7_0_ring_emit_ib_gfx */
5196	.emit_ib = gfx_v7_0_ring_emit_ib_gfx,
5197	.emit_fence = gfx_v7_0_ring_emit_fence_gfx,
5198	.emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync,
5199	.emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
5200	.emit_gds_switch = gfx_v7_0_ring_emit_gds_switch,
5201	.emit_hdp_flush = gfx_v7_0_ring_emit_hdp_flush,
5202	.emit_hdp_invalidate = gfx_v7_0_ring_emit_hdp_invalidate,
5203	.test_ring = gfx_v7_0_ring_test_ring,
5204	.test_ib = gfx_v7_0_ring_test_ib,
5205	.insert_nop = amdgpu_ring_insert_nop,
5206	.pad_ib = amdgpu_ring_generic_pad_ib,
5207	.emit_cntxcntl = gfx_v7_ring_emit_cntxcntl,
 
 
 
 
5208};
5209
5210static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
5211	.type = AMDGPU_RING_TYPE_COMPUTE,
5212	.align_mask = 0xff,
5213	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
 
5214	.get_rptr = gfx_v7_0_ring_get_rptr,
5215	.get_wptr = gfx_v7_0_ring_get_wptr_compute,
5216	.set_wptr = gfx_v7_0_ring_set_wptr_compute,
5217	.emit_frame_size =
5218		20 + /* gfx_v7_0_ring_emit_gds_switch */
5219		7 + /* gfx_v7_0_ring_emit_hdp_flush */
5220		5 + /* gfx_v7_0_ring_emit_hdp_invalidate */
5221		7 + /* gfx_v7_0_ring_emit_pipeline_sync */
5222		17 + /* gfx_v7_0_ring_emit_vm_flush */
5223		7 + 7 + 7, /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */
5224	.emit_ib_size =	4, /* gfx_v7_0_ring_emit_ib_compute */
 
5225	.emit_ib = gfx_v7_0_ring_emit_ib_compute,
5226	.emit_fence = gfx_v7_0_ring_emit_fence_compute,
5227	.emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync,
5228	.emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
5229	.emit_gds_switch = gfx_v7_0_ring_emit_gds_switch,
5230	.emit_hdp_flush = gfx_v7_0_ring_emit_hdp_flush,
5231	.emit_hdp_invalidate = gfx_v7_0_ring_emit_hdp_invalidate,
5232	.test_ring = gfx_v7_0_ring_test_ring,
5233	.test_ib = gfx_v7_0_ring_test_ib,
5234	.insert_nop = amdgpu_ring_insert_nop,
5235	.pad_ib = amdgpu_ring_generic_pad_ib,
 
 
 
5236};
5237
5238static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev)
5239{
5240	int i;
5241
5242	for (i = 0; i < adev->gfx.num_gfx_rings; i++)
5243		adev->gfx.gfx_ring[i].funcs = &gfx_v7_0_ring_funcs_gfx;
5244	for (i = 0; i < adev->gfx.num_compute_rings; i++)
5245		adev->gfx.compute_ring[i].funcs = &gfx_v7_0_ring_funcs_compute;
5246}
5247
5248static const struct amdgpu_irq_src_funcs gfx_v7_0_eop_irq_funcs = {
5249	.set = gfx_v7_0_set_eop_interrupt_state,
5250	.process = gfx_v7_0_eop_irq,
5251};
5252
5253static const struct amdgpu_irq_src_funcs gfx_v7_0_priv_reg_irq_funcs = {
5254	.set = gfx_v7_0_set_priv_reg_fault_state,
5255	.process = gfx_v7_0_priv_reg_irq,
5256};
5257
5258static const struct amdgpu_irq_src_funcs gfx_v7_0_priv_inst_irq_funcs = {
5259	.set = gfx_v7_0_set_priv_inst_fault_state,
5260	.process = gfx_v7_0_priv_inst_irq,
5261};
5262
5263static void gfx_v7_0_set_irq_funcs(struct amdgpu_device *adev)
5264{
5265	adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
5266	adev->gfx.eop_irq.funcs = &gfx_v7_0_eop_irq_funcs;
5267
5268	adev->gfx.priv_reg_irq.num_types = 1;
5269	adev->gfx.priv_reg_irq.funcs = &gfx_v7_0_priv_reg_irq_funcs;
5270
5271	adev->gfx.priv_inst_irq.num_types = 1;
5272	adev->gfx.priv_inst_irq.funcs = &gfx_v7_0_priv_inst_irq_funcs;
5273}
5274
5275static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev)
5276{
5277	/* init asci gds info */
5278	adev->gds.mem.total_size = RREG32(mmGDS_VMID0_SIZE);
5279	adev->gds.gws.total_size = 64;
5280	adev->gds.oa.total_size = 16;
5281
5282	if (adev->gds.mem.total_size == 64 * 1024) {
5283		adev->gds.mem.gfx_partition_size = 4096;
5284		adev->gds.mem.cs_partition_size = 4096;
5285
5286		adev->gds.gws.gfx_partition_size = 4;
5287		adev->gds.gws.cs_partition_size = 4;
5288
5289		adev->gds.oa.gfx_partition_size = 4;
5290		adev->gds.oa.cs_partition_size = 1;
5291	} else {
5292		adev->gds.mem.gfx_partition_size = 1024;
5293		adev->gds.mem.cs_partition_size = 1024;
5294
5295		adev->gds.gws.gfx_partition_size = 16;
5296		adev->gds.gws.cs_partition_size = 16;
5297
5298		adev->gds.oa.gfx_partition_size = 4;
5299		adev->gds.oa.cs_partition_size = 4;
5300	}
5301}
5302
5303
5304static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev)
5305{
5306	int i, j, k, counter, active_cu_number = 0;
5307	u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
5308	struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
5309	unsigned disable_masks[4 * 2];
 
 
 
 
 
 
5310
5311	memset(cu_info, 0, sizeof(*cu_info));
5312
5313	amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2);
5314
5315	mutex_lock(&adev->grbm_idx_mutex);
5316	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
5317		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
5318			mask = 1;
5319			ao_bitmap = 0;
5320			counter = 0;
5321			gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff);
5322			if (i < 4 && j < 2)
5323				gfx_v7_0_set_user_cu_inactive_bitmap(
5324					adev, disable_masks[i * 2 + j]);
5325			bitmap = gfx_v7_0_get_cu_active_bitmap(adev);
5326			cu_info->bitmap[i][j] = bitmap;
5327
5328			for (k = 0; k < 16; k ++) {
5329				if (bitmap & mask) {
5330					if (counter < 2)
5331						ao_bitmap |= mask;
5332					counter ++;
5333				}
5334				mask <<= 1;
5335			}
5336			active_cu_number += counter;
5337			ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
 
 
5338		}
5339	}
5340	gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
5341	mutex_unlock(&adev->grbm_idx_mutex);
5342
5343	cu_info->number = active_cu_number;
5344	cu_info->ao_cu_mask = ao_cu_mask;
 
 
 
 
 
5345}
5346
5347const struct amdgpu_ip_block_version gfx_v7_0_ip_block =
5348{
5349	.type = AMD_IP_BLOCK_TYPE_GFX,
5350	.major = 7,
5351	.minor = 0,
5352	.rev = 0,
5353	.funcs = &gfx_v7_0_ip_funcs,
5354};
5355
5356const struct amdgpu_ip_block_version gfx_v7_1_ip_block =
5357{
5358	.type = AMD_IP_BLOCK_TYPE_GFX,
5359	.major = 7,
5360	.minor = 1,
5361	.rev = 0,
5362	.funcs = &gfx_v7_0_ip_funcs,
5363};
5364
5365const struct amdgpu_ip_block_version gfx_v7_2_ip_block =
5366{
5367	.type = AMD_IP_BLOCK_TYPE_GFX,
5368	.major = 7,
5369	.minor = 2,
5370	.rev = 0,
5371	.funcs = &gfx_v7_0_ip_funcs,
5372};
5373
5374const struct amdgpu_ip_block_version gfx_v7_3_ip_block =
5375{
5376	.type = AMD_IP_BLOCK_TYPE_GFX,
5377	.major = 7,
5378	.minor = 3,
5379	.rev = 0,
5380	.funcs = &gfx_v7_0_ip_funcs,
5381};
v6.13.7
   1/*
   2 * Copyright 2014 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23
  24#include <linux/firmware.h>
  25#include <linux/module.h>
  26
  27#include "amdgpu.h"
  28#include "amdgpu_ih.h"
  29#include "amdgpu_gfx.h"
  30#include "cikd.h"
  31#include "cik.h"
  32#include "cik_structs.h"
  33#include "atom.h"
  34#include "amdgpu_ucode.h"
  35#include "clearstate_ci.h"
  36
  37#include "dce/dce_8_0_d.h"
  38#include "dce/dce_8_0_sh_mask.h"
  39
  40#include "bif/bif_4_1_d.h"
  41#include "bif/bif_4_1_sh_mask.h"
  42
  43#include "gca/gfx_7_0_d.h"
  44#include "gca/gfx_7_2_enum.h"
  45#include "gca/gfx_7_2_sh_mask.h"
  46
  47#include "gmc/gmc_7_0_d.h"
  48#include "gmc/gmc_7_0_sh_mask.h"
  49
  50#include "oss/oss_2_0_d.h"
  51#include "oss/oss_2_0_sh_mask.h"
  52
  53#define NUM_SIMD_PER_CU 0x4 /* missing from the gfx_7 IP headers */
  54
  55#define GFX7_NUM_GFX_RINGS     1
  56#define GFX7_MEC_HPD_SIZE      2048
  57
  58static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev);
  59static void gfx_v7_0_set_irq_funcs(struct amdgpu_device *adev);
  60static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev);
  61
  62MODULE_FIRMWARE("amdgpu/bonaire_pfp.bin");
  63MODULE_FIRMWARE("amdgpu/bonaire_me.bin");
  64MODULE_FIRMWARE("amdgpu/bonaire_ce.bin");
  65MODULE_FIRMWARE("amdgpu/bonaire_rlc.bin");
  66MODULE_FIRMWARE("amdgpu/bonaire_mec.bin");
  67
  68MODULE_FIRMWARE("amdgpu/hawaii_pfp.bin");
  69MODULE_FIRMWARE("amdgpu/hawaii_me.bin");
  70MODULE_FIRMWARE("amdgpu/hawaii_ce.bin");
  71MODULE_FIRMWARE("amdgpu/hawaii_rlc.bin");
  72MODULE_FIRMWARE("amdgpu/hawaii_mec.bin");
  73
  74MODULE_FIRMWARE("amdgpu/kaveri_pfp.bin");
  75MODULE_FIRMWARE("amdgpu/kaveri_me.bin");
  76MODULE_FIRMWARE("amdgpu/kaveri_ce.bin");
  77MODULE_FIRMWARE("amdgpu/kaveri_rlc.bin");
  78MODULE_FIRMWARE("amdgpu/kaveri_mec.bin");
  79MODULE_FIRMWARE("amdgpu/kaveri_mec2.bin");
  80
  81MODULE_FIRMWARE("amdgpu/kabini_pfp.bin");
  82MODULE_FIRMWARE("amdgpu/kabini_me.bin");
  83MODULE_FIRMWARE("amdgpu/kabini_ce.bin");
  84MODULE_FIRMWARE("amdgpu/kabini_rlc.bin");
  85MODULE_FIRMWARE("amdgpu/kabini_mec.bin");
  86
  87MODULE_FIRMWARE("amdgpu/mullins_pfp.bin");
  88MODULE_FIRMWARE("amdgpu/mullins_me.bin");
  89MODULE_FIRMWARE("amdgpu/mullins_ce.bin");
  90MODULE_FIRMWARE("amdgpu/mullins_rlc.bin");
  91MODULE_FIRMWARE("amdgpu/mullins_mec.bin");
  92
  93static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] = {
 
  94	{mmGDS_VMID0_BASE, mmGDS_VMID0_SIZE, mmGDS_GWS_VMID0, mmGDS_OA_VMID0},
  95	{mmGDS_VMID1_BASE, mmGDS_VMID1_SIZE, mmGDS_GWS_VMID1, mmGDS_OA_VMID1},
  96	{mmGDS_VMID2_BASE, mmGDS_VMID2_SIZE, mmGDS_GWS_VMID2, mmGDS_OA_VMID2},
  97	{mmGDS_VMID3_BASE, mmGDS_VMID3_SIZE, mmGDS_GWS_VMID3, mmGDS_OA_VMID3},
  98	{mmGDS_VMID4_BASE, mmGDS_VMID4_SIZE, mmGDS_GWS_VMID4, mmGDS_OA_VMID4},
  99	{mmGDS_VMID5_BASE, mmGDS_VMID5_SIZE, mmGDS_GWS_VMID5, mmGDS_OA_VMID5},
 100	{mmGDS_VMID6_BASE, mmGDS_VMID6_SIZE, mmGDS_GWS_VMID6, mmGDS_OA_VMID6},
 101	{mmGDS_VMID7_BASE, mmGDS_VMID7_SIZE, mmGDS_GWS_VMID7, mmGDS_OA_VMID7},
 102	{mmGDS_VMID8_BASE, mmGDS_VMID8_SIZE, mmGDS_GWS_VMID8, mmGDS_OA_VMID8},
 103	{mmGDS_VMID9_BASE, mmGDS_VMID9_SIZE, mmGDS_GWS_VMID9, mmGDS_OA_VMID9},
 104	{mmGDS_VMID10_BASE, mmGDS_VMID10_SIZE, mmGDS_GWS_VMID10, mmGDS_OA_VMID10},
 105	{mmGDS_VMID11_BASE, mmGDS_VMID11_SIZE, mmGDS_GWS_VMID11, mmGDS_OA_VMID11},
 106	{mmGDS_VMID12_BASE, mmGDS_VMID12_SIZE, mmGDS_GWS_VMID12, mmGDS_OA_VMID12},
 107	{mmGDS_VMID13_BASE, mmGDS_VMID13_SIZE, mmGDS_GWS_VMID13, mmGDS_OA_VMID13},
 108	{mmGDS_VMID14_BASE, mmGDS_VMID14_SIZE, mmGDS_GWS_VMID14, mmGDS_OA_VMID14},
 109	{mmGDS_VMID15_BASE, mmGDS_VMID15_SIZE, mmGDS_GWS_VMID15, mmGDS_OA_VMID15}
 110};
 111
 112static const u32 spectre_rlc_save_restore_register_list[] = {
 
 113	(0x0e00 << 16) | (0xc12c >> 2),
 114	0x00000000,
 115	(0x0e00 << 16) | (0xc140 >> 2),
 116	0x00000000,
 117	(0x0e00 << 16) | (0xc150 >> 2),
 118	0x00000000,
 119	(0x0e00 << 16) | (0xc15c >> 2),
 120	0x00000000,
 121	(0x0e00 << 16) | (0xc168 >> 2),
 122	0x00000000,
 123	(0x0e00 << 16) | (0xc170 >> 2),
 124	0x00000000,
 125	(0x0e00 << 16) | (0xc178 >> 2),
 126	0x00000000,
 127	(0x0e00 << 16) | (0xc204 >> 2),
 128	0x00000000,
 129	(0x0e00 << 16) | (0xc2b4 >> 2),
 130	0x00000000,
 131	(0x0e00 << 16) | (0xc2b8 >> 2),
 132	0x00000000,
 133	(0x0e00 << 16) | (0xc2bc >> 2),
 134	0x00000000,
 135	(0x0e00 << 16) | (0xc2c0 >> 2),
 136	0x00000000,
 137	(0x0e00 << 16) | (0x8228 >> 2),
 138	0x00000000,
 139	(0x0e00 << 16) | (0x829c >> 2),
 140	0x00000000,
 141	(0x0e00 << 16) | (0x869c >> 2),
 142	0x00000000,
 143	(0x0600 << 16) | (0x98f4 >> 2),
 144	0x00000000,
 145	(0x0e00 << 16) | (0x98f8 >> 2),
 146	0x00000000,
 147	(0x0e00 << 16) | (0x9900 >> 2),
 148	0x00000000,
 149	(0x0e00 << 16) | (0xc260 >> 2),
 150	0x00000000,
 151	(0x0e00 << 16) | (0x90e8 >> 2),
 152	0x00000000,
 153	(0x0e00 << 16) | (0x3c000 >> 2),
 154	0x00000000,
 155	(0x0e00 << 16) | (0x3c00c >> 2),
 156	0x00000000,
 157	(0x0e00 << 16) | (0x8c1c >> 2),
 158	0x00000000,
 159	(0x0e00 << 16) | (0x9700 >> 2),
 160	0x00000000,
 161	(0x0e00 << 16) | (0xcd20 >> 2),
 162	0x00000000,
 163	(0x4e00 << 16) | (0xcd20 >> 2),
 164	0x00000000,
 165	(0x5e00 << 16) | (0xcd20 >> 2),
 166	0x00000000,
 167	(0x6e00 << 16) | (0xcd20 >> 2),
 168	0x00000000,
 169	(0x7e00 << 16) | (0xcd20 >> 2),
 170	0x00000000,
 171	(0x8e00 << 16) | (0xcd20 >> 2),
 172	0x00000000,
 173	(0x9e00 << 16) | (0xcd20 >> 2),
 174	0x00000000,
 175	(0xae00 << 16) | (0xcd20 >> 2),
 176	0x00000000,
 177	(0xbe00 << 16) | (0xcd20 >> 2),
 178	0x00000000,
 179	(0x0e00 << 16) | (0x89bc >> 2),
 180	0x00000000,
 181	(0x0e00 << 16) | (0x8900 >> 2),
 182	0x00000000,
 183	0x3,
 184	(0x0e00 << 16) | (0xc130 >> 2),
 185	0x00000000,
 186	(0x0e00 << 16) | (0xc134 >> 2),
 187	0x00000000,
 188	(0x0e00 << 16) | (0xc1fc >> 2),
 189	0x00000000,
 190	(0x0e00 << 16) | (0xc208 >> 2),
 191	0x00000000,
 192	(0x0e00 << 16) | (0xc264 >> 2),
 193	0x00000000,
 194	(0x0e00 << 16) | (0xc268 >> 2),
 195	0x00000000,
 196	(0x0e00 << 16) | (0xc26c >> 2),
 197	0x00000000,
 198	(0x0e00 << 16) | (0xc270 >> 2),
 199	0x00000000,
 200	(0x0e00 << 16) | (0xc274 >> 2),
 201	0x00000000,
 202	(0x0e00 << 16) | (0xc278 >> 2),
 203	0x00000000,
 204	(0x0e00 << 16) | (0xc27c >> 2),
 205	0x00000000,
 206	(0x0e00 << 16) | (0xc280 >> 2),
 207	0x00000000,
 208	(0x0e00 << 16) | (0xc284 >> 2),
 209	0x00000000,
 210	(0x0e00 << 16) | (0xc288 >> 2),
 211	0x00000000,
 212	(0x0e00 << 16) | (0xc28c >> 2),
 213	0x00000000,
 214	(0x0e00 << 16) | (0xc290 >> 2),
 215	0x00000000,
 216	(0x0e00 << 16) | (0xc294 >> 2),
 217	0x00000000,
 218	(0x0e00 << 16) | (0xc298 >> 2),
 219	0x00000000,
 220	(0x0e00 << 16) | (0xc29c >> 2),
 221	0x00000000,
 222	(0x0e00 << 16) | (0xc2a0 >> 2),
 223	0x00000000,
 224	(0x0e00 << 16) | (0xc2a4 >> 2),
 225	0x00000000,
 226	(0x0e00 << 16) | (0xc2a8 >> 2),
 227	0x00000000,
 228	(0x0e00 << 16) | (0xc2ac  >> 2),
 229	0x00000000,
 230	(0x0e00 << 16) | (0xc2b0 >> 2),
 231	0x00000000,
 232	(0x0e00 << 16) | (0x301d0 >> 2),
 233	0x00000000,
 234	(0x0e00 << 16) | (0x30238 >> 2),
 235	0x00000000,
 236	(0x0e00 << 16) | (0x30250 >> 2),
 237	0x00000000,
 238	(0x0e00 << 16) | (0x30254 >> 2),
 239	0x00000000,
 240	(0x0e00 << 16) | (0x30258 >> 2),
 241	0x00000000,
 242	(0x0e00 << 16) | (0x3025c >> 2),
 243	0x00000000,
 244	(0x4e00 << 16) | (0xc900 >> 2),
 245	0x00000000,
 246	(0x5e00 << 16) | (0xc900 >> 2),
 247	0x00000000,
 248	(0x6e00 << 16) | (0xc900 >> 2),
 249	0x00000000,
 250	(0x7e00 << 16) | (0xc900 >> 2),
 251	0x00000000,
 252	(0x8e00 << 16) | (0xc900 >> 2),
 253	0x00000000,
 254	(0x9e00 << 16) | (0xc900 >> 2),
 255	0x00000000,
 256	(0xae00 << 16) | (0xc900 >> 2),
 257	0x00000000,
 258	(0xbe00 << 16) | (0xc900 >> 2),
 259	0x00000000,
 260	(0x4e00 << 16) | (0xc904 >> 2),
 261	0x00000000,
 262	(0x5e00 << 16) | (0xc904 >> 2),
 263	0x00000000,
 264	(0x6e00 << 16) | (0xc904 >> 2),
 265	0x00000000,
 266	(0x7e00 << 16) | (0xc904 >> 2),
 267	0x00000000,
 268	(0x8e00 << 16) | (0xc904 >> 2),
 269	0x00000000,
 270	(0x9e00 << 16) | (0xc904 >> 2),
 271	0x00000000,
 272	(0xae00 << 16) | (0xc904 >> 2),
 273	0x00000000,
 274	(0xbe00 << 16) | (0xc904 >> 2),
 275	0x00000000,
 276	(0x4e00 << 16) | (0xc908 >> 2),
 277	0x00000000,
 278	(0x5e00 << 16) | (0xc908 >> 2),
 279	0x00000000,
 280	(0x6e00 << 16) | (0xc908 >> 2),
 281	0x00000000,
 282	(0x7e00 << 16) | (0xc908 >> 2),
 283	0x00000000,
 284	(0x8e00 << 16) | (0xc908 >> 2),
 285	0x00000000,
 286	(0x9e00 << 16) | (0xc908 >> 2),
 287	0x00000000,
 288	(0xae00 << 16) | (0xc908 >> 2),
 289	0x00000000,
 290	(0xbe00 << 16) | (0xc908 >> 2),
 291	0x00000000,
 292	(0x4e00 << 16) | (0xc90c >> 2),
 293	0x00000000,
 294	(0x5e00 << 16) | (0xc90c >> 2),
 295	0x00000000,
 296	(0x6e00 << 16) | (0xc90c >> 2),
 297	0x00000000,
 298	(0x7e00 << 16) | (0xc90c >> 2),
 299	0x00000000,
 300	(0x8e00 << 16) | (0xc90c >> 2),
 301	0x00000000,
 302	(0x9e00 << 16) | (0xc90c >> 2),
 303	0x00000000,
 304	(0xae00 << 16) | (0xc90c >> 2),
 305	0x00000000,
 306	(0xbe00 << 16) | (0xc90c >> 2),
 307	0x00000000,
 308	(0x4e00 << 16) | (0xc910 >> 2),
 309	0x00000000,
 310	(0x5e00 << 16) | (0xc910 >> 2),
 311	0x00000000,
 312	(0x6e00 << 16) | (0xc910 >> 2),
 313	0x00000000,
 314	(0x7e00 << 16) | (0xc910 >> 2),
 315	0x00000000,
 316	(0x8e00 << 16) | (0xc910 >> 2),
 317	0x00000000,
 318	(0x9e00 << 16) | (0xc910 >> 2),
 319	0x00000000,
 320	(0xae00 << 16) | (0xc910 >> 2),
 321	0x00000000,
 322	(0xbe00 << 16) | (0xc910 >> 2),
 323	0x00000000,
 324	(0x0e00 << 16) | (0xc99c >> 2),
 325	0x00000000,
 326	(0x0e00 << 16) | (0x9834 >> 2),
 327	0x00000000,
 328	(0x0000 << 16) | (0x30f00 >> 2),
 329	0x00000000,
 330	(0x0001 << 16) | (0x30f00 >> 2),
 331	0x00000000,
 332	(0x0000 << 16) | (0x30f04 >> 2),
 333	0x00000000,
 334	(0x0001 << 16) | (0x30f04 >> 2),
 335	0x00000000,
 336	(0x0000 << 16) | (0x30f08 >> 2),
 337	0x00000000,
 338	(0x0001 << 16) | (0x30f08 >> 2),
 339	0x00000000,
 340	(0x0000 << 16) | (0x30f0c >> 2),
 341	0x00000000,
 342	(0x0001 << 16) | (0x30f0c >> 2),
 343	0x00000000,
 344	(0x0600 << 16) | (0x9b7c >> 2),
 345	0x00000000,
 346	(0x0e00 << 16) | (0x8a14 >> 2),
 347	0x00000000,
 348	(0x0e00 << 16) | (0x8a18 >> 2),
 349	0x00000000,
 350	(0x0600 << 16) | (0x30a00 >> 2),
 351	0x00000000,
 352	(0x0e00 << 16) | (0x8bf0 >> 2),
 353	0x00000000,
 354	(0x0e00 << 16) | (0x8bcc >> 2),
 355	0x00000000,
 356	(0x0e00 << 16) | (0x8b24 >> 2),
 357	0x00000000,
 358	(0x0e00 << 16) | (0x30a04 >> 2),
 359	0x00000000,
 360	(0x0600 << 16) | (0x30a10 >> 2),
 361	0x00000000,
 362	(0x0600 << 16) | (0x30a14 >> 2),
 363	0x00000000,
 364	(0x0600 << 16) | (0x30a18 >> 2),
 365	0x00000000,
 366	(0x0600 << 16) | (0x30a2c >> 2),
 367	0x00000000,
 368	(0x0e00 << 16) | (0xc700 >> 2),
 369	0x00000000,
 370	(0x0e00 << 16) | (0xc704 >> 2),
 371	0x00000000,
 372	(0x0e00 << 16) | (0xc708 >> 2),
 373	0x00000000,
 374	(0x0e00 << 16) | (0xc768 >> 2),
 375	0x00000000,
 376	(0x0400 << 16) | (0xc770 >> 2),
 377	0x00000000,
 378	(0x0400 << 16) | (0xc774 >> 2),
 379	0x00000000,
 380	(0x0400 << 16) | (0xc778 >> 2),
 381	0x00000000,
 382	(0x0400 << 16) | (0xc77c >> 2),
 383	0x00000000,
 384	(0x0400 << 16) | (0xc780 >> 2),
 385	0x00000000,
 386	(0x0400 << 16) | (0xc784 >> 2),
 387	0x00000000,
 388	(0x0400 << 16) | (0xc788 >> 2),
 389	0x00000000,
 390	(0x0400 << 16) | (0xc78c >> 2),
 391	0x00000000,
 392	(0x0400 << 16) | (0xc798 >> 2),
 393	0x00000000,
 394	(0x0400 << 16) | (0xc79c >> 2),
 395	0x00000000,
 396	(0x0400 << 16) | (0xc7a0 >> 2),
 397	0x00000000,
 398	(0x0400 << 16) | (0xc7a4 >> 2),
 399	0x00000000,
 400	(0x0400 << 16) | (0xc7a8 >> 2),
 401	0x00000000,
 402	(0x0400 << 16) | (0xc7ac >> 2),
 403	0x00000000,
 404	(0x0400 << 16) | (0xc7b0 >> 2),
 405	0x00000000,
 406	(0x0400 << 16) | (0xc7b4 >> 2),
 407	0x00000000,
 408	(0x0e00 << 16) | (0x9100 >> 2),
 409	0x00000000,
 410	(0x0e00 << 16) | (0x3c010 >> 2),
 411	0x00000000,
 412	(0x0e00 << 16) | (0x92a8 >> 2),
 413	0x00000000,
 414	(0x0e00 << 16) | (0x92ac >> 2),
 415	0x00000000,
 416	(0x0e00 << 16) | (0x92b4 >> 2),
 417	0x00000000,
 418	(0x0e00 << 16) | (0x92b8 >> 2),
 419	0x00000000,
 420	(0x0e00 << 16) | (0x92bc >> 2),
 421	0x00000000,
 422	(0x0e00 << 16) | (0x92c0 >> 2),
 423	0x00000000,
 424	(0x0e00 << 16) | (0x92c4 >> 2),
 425	0x00000000,
 426	(0x0e00 << 16) | (0x92c8 >> 2),
 427	0x00000000,
 428	(0x0e00 << 16) | (0x92cc >> 2),
 429	0x00000000,
 430	(0x0e00 << 16) | (0x92d0 >> 2),
 431	0x00000000,
 432	(0x0e00 << 16) | (0x8c00 >> 2),
 433	0x00000000,
 434	(0x0e00 << 16) | (0x8c04 >> 2),
 435	0x00000000,
 436	(0x0e00 << 16) | (0x8c20 >> 2),
 437	0x00000000,
 438	(0x0e00 << 16) | (0x8c38 >> 2),
 439	0x00000000,
 440	(0x0e00 << 16) | (0x8c3c >> 2),
 441	0x00000000,
 442	(0x0e00 << 16) | (0xae00 >> 2),
 443	0x00000000,
 444	(0x0e00 << 16) | (0x9604 >> 2),
 445	0x00000000,
 446	(0x0e00 << 16) | (0xac08 >> 2),
 447	0x00000000,
 448	(0x0e00 << 16) | (0xac0c >> 2),
 449	0x00000000,
 450	(0x0e00 << 16) | (0xac10 >> 2),
 451	0x00000000,
 452	(0x0e00 << 16) | (0xac14 >> 2),
 453	0x00000000,
 454	(0x0e00 << 16) | (0xac58 >> 2),
 455	0x00000000,
 456	(0x0e00 << 16) | (0xac68 >> 2),
 457	0x00000000,
 458	(0x0e00 << 16) | (0xac6c >> 2),
 459	0x00000000,
 460	(0x0e00 << 16) | (0xac70 >> 2),
 461	0x00000000,
 462	(0x0e00 << 16) | (0xac74 >> 2),
 463	0x00000000,
 464	(0x0e00 << 16) | (0xac78 >> 2),
 465	0x00000000,
 466	(0x0e00 << 16) | (0xac7c >> 2),
 467	0x00000000,
 468	(0x0e00 << 16) | (0xac80 >> 2),
 469	0x00000000,
 470	(0x0e00 << 16) | (0xac84 >> 2),
 471	0x00000000,
 472	(0x0e00 << 16) | (0xac88 >> 2),
 473	0x00000000,
 474	(0x0e00 << 16) | (0xac8c >> 2),
 475	0x00000000,
 476	(0x0e00 << 16) | (0x970c >> 2),
 477	0x00000000,
 478	(0x0e00 << 16) | (0x9714 >> 2),
 479	0x00000000,
 480	(0x0e00 << 16) | (0x9718 >> 2),
 481	0x00000000,
 482	(0x0e00 << 16) | (0x971c >> 2),
 483	0x00000000,
 484	(0x0e00 << 16) | (0x31068 >> 2),
 485	0x00000000,
 486	(0x4e00 << 16) | (0x31068 >> 2),
 487	0x00000000,
 488	(0x5e00 << 16) | (0x31068 >> 2),
 489	0x00000000,
 490	(0x6e00 << 16) | (0x31068 >> 2),
 491	0x00000000,
 492	(0x7e00 << 16) | (0x31068 >> 2),
 493	0x00000000,
 494	(0x8e00 << 16) | (0x31068 >> 2),
 495	0x00000000,
 496	(0x9e00 << 16) | (0x31068 >> 2),
 497	0x00000000,
 498	(0xae00 << 16) | (0x31068 >> 2),
 499	0x00000000,
 500	(0xbe00 << 16) | (0x31068 >> 2),
 501	0x00000000,
 502	(0x0e00 << 16) | (0xcd10 >> 2),
 503	0x00000000,
 504	(0x0e00 << 16) | (0xcd14 >> 2),
 505	0x00000000,
 506	(0x0e00 << 16) | (0x88b0 >> 2),
 507	0x00000000,
 508	(0x0e00 << 16) | (0x88b4 >> 2),
 509	0x00000000,
 510	(0x0e00 << 16) | (0x88b8 >> 2),
 511	0x00000000,
 512	(0x0e00 << 16) | (0x88bc >> 2),
 513	0x00000000,
 514	(0x0400 << 16) | (0x89c0 >> 2),
 515	0x00000000,
 516	(0x0e00 << 16) | (0x88c4 >> 2),
 517	0x00000000,
 518	(0x0e00 << 16) | (0x88c8 >> 2),
 519	0x00000000,
 520	(0x0e00 << 16) | (0x88d0 >> 2),
 521	0x00000000,
 522	(0x0e00 << 16) | (0x88d4 >> 2),
 523	0x00000000,
 524	(0x0e00 << 16) | (0x88d8 >> 2),
 525	0x00000000,
 526	(0x0e00 << 16) | (0x8980 >> 2),
 527	0x00000000,
 528	(0x0e00 << 16) | (0x30938 >> 2),
 529	0x00000000,
 530	(0x0e00 << 16) | (0x3093c >> 2),
 531	0x00000000,
 532	(0x0e00 << 16) | (0x30940 >> 2),
 533	0x00000000,
 534	(0x0e00 << 16) | (0x89a0 >> 2),
 535	0x00000000,
 536	(0x0e00 << 16) | (0x30900 >> 2),
 537	0x00000000,
 538	(0x0e00 << 16) | (0x30904 >> 2),
 539	0x00000000,
 540	(0x0e00 << 16) | (0x89b4 >> 2),
 541	0x00000000,
 542	(0x0e00 << 16) | (0x3c210 >> 2),
 543	0x00000000,
 544	(0x0e00 << 16) | (0x3c214 >> 2),
 545	0x00000000,
 546	(0x0e00 << 16) | (0x3c218 >> 2),
 547	0x00000000,
 548	(0x0e00 << 16) | (0x8904 >> 2),
 549	0x00000000,
 550	0x5,
 551	(0x0e00 << 16) | (0x8c28 >> 2),
 552	(0x0e00 << 16) | (0x8c2c >> 2),
 553	(0x0e00 << 16) | (0x8c30 >> 2),
 554	(0x0e00 << 16) | (0x8c34 >> 2),
 555	(0x0e00 << 16) | (0x9600 >> 2),
 556};
 557
 558static const u32 kalindi_rlc_save_restore_register_list[] = {
 
 559	(0x0e00 << 16) | (0xc12c >> 2),
 560	0x00000000,
 561	(0x0e00 << 16) | (0xc140 >> 2),
 562	0x00000000,
 563	(0x0e00 << 16) | (0xc150 >> 2),
 564	0x00000000,
 565	(0x0e00 << 16) | (0xc15c >> 2),
 566	0x00000000,
 567	(0x0e00 << 16) | (0xc168 >> 2),
 568	0x00000000,
 569	(0x0e00 << 16) | (0xc170 >> 2),
 570	0x00000000,
 571	(0x0e00 << 16) | (0xc204 >> 2),
 572	0x00000000,
 573	(0x0e00 << 16) | (0xc2b4 >> 2),
 574	0x00000000,
 575	(0x0e00 << 16) | (0xc2b8 >> 2),
 576	0x00000000,
 577	(0x0e00 << 16) | (0xc2bc >> 2),
 578	0x00000000,
 579	(0x0e00 << 16) | (0xc2c0 >> 2),
 580	0x00000000,
 581	(0x0e00 << 16) | (0x8228 >> 2),
 582	0x00000000,
 583	(0x0e00 << 16) | (0x829c >> 2),
 584	0x00000000,
 585	(0x0e00 << 16) | (0x869c >> 2),
 586	0x00000000,
 587	(0x0600 << 16) | (0x98f4 >> 2),
 588	0x00000000,
 589	(0x0e00 << 16) | (0x98f8 >> 2),
 590	0x00000000,
 591	(0x0e00 << 16) | (0x9900 >> 2),
 592	0x00000000,
 593	(0x0e00 << 16) | (0xc260 >> 2),
 594	0x00000000,
 595	(0x0e00 << 16) | (0x90e8 >> 2),
 596	0x00000000,
 597	(0x0e00 << 16) | (0x3c000 >> 2),
 598	0x00000000,
 599	(0x0e00 << 16) | (0x3c00c >> 2),
 600	0x00000000,
 601	(0x0e00 << 16) | (0x8c1c >> 2),
 602	0x00000000,
 603	(0x0e00 << 16) | (0x9700 >> 2),
 604	0x00000000,
 605	(0x0e00 << 16) | (0xcd20 >> 2),
 606	0x00000000,
 607	(0x4e00 << 16) | (0xcd20 >> 2),
 608	0x00000000,
 609	(0x5e00 << 16) | (0xcd20 >> 2),
 610	0x00000000,
 611	(0x6e00 << 16) | (0xcd20 >> 2),
 612	0x00000000,
 613	(0x7e00 << 16) | (0xcd20 >> 2),
 614	0x00000000,
 615	(0x0e00 << 16) | (0x89bc >> 2),
 616	0x00000000,
 617	(0x0e00 << 16) | (0x8900 >> 2),
 618	0x00000000,
 619	0x3,
 620	(0x0e00 << 16) | (0xc130 >> 2),
 621	0x00000000,
 622	(0x0e00 << 16) | (0xc134 >> 2),
 623	0x00000000,
 624	(0x0e00 << 16) | (0xc1fc >> 2),
 625	0x00000000,
 626	(0x0e00 << 16) | (0xc208 >> 2),
 627	0x00000000,
 628	(0x0e00 << 16) | (0xc264 >> 2),
 629	0x00000000,
 630	(0x0e00 << 16) | (0xc268 >> 2),
 631	0x00000000,
 632	(0x0e00 << 16) | (0xc26c >> 2),
 633	0x00000000,
 634	(0x0e00 << 16) | (0xc270 >> 2),
 635	0x00000000,
 636	(0x0e00 << 16) | (0xc274 >> 2),
 637	0x00000000,
 638	(0x0e00 << 16) | (0xc28c >> 2),
 639	0x00000000,
 640	(0x0e00 << 16) | (0xc290 >> 2),
 641	0x00000000,
 642	(0x0e00 << 16) | (0xc294 >> 2),
 643	0x00000000,
 644	(0x0e00 << 16) | (0xc298 >> 2),
 645	0x00000000,
 646	(0x0e00 << 16) | (0xc2a0 >> 2),
 647	0x00000000,
 648	(0x0e00 << 16) | (0xc2a4 >> 2),
 649	0x00000000,
 650	(0x0e00 << 16) | (0xc2a8 >> 2),
 651	0x00000000,
 652	(0x0e00 << 16) | (0xc2ac >> 2),
 653	0x00000000,
 654	(0x0e00 << 16) | (0x301d0 >> 2),
 655	0x00000000,
 656	(0x0e00 << 16) | (0x30238 >> 2),
 657	0x00000000,
 658	(0x0e00 << 16) | (0x30250 >> 2),
 659	0x00000000,
 660	(0x0e00 << 16) | (0x30254 >> 2),
 661	0x00000000,
 662	(0x0e00 << 16) | (0x30258 >> 2),
 663	0x00000000,
 664	(0x0e00 << 16) | (0x3025c >> 2),
 665	0x00000000,
 666	(0x4e00 << 16) | (0xc900 >> 2),
 667	0x00000000,
 668	(0x5e00 << 16) | (0xc900 >> 2),
 669	0x00000000,
 670	(0x6e00 << 16) | (0xc900 >> 2),
 671	0x00000000,
 672	(0x7e00 << 16) | (0xc900 >> 2),
 673	0x00000000,
 674	(0x4e00 << 16) | (0xc904 >> 2),
 675	0x00000000,
 676	(0x5e00 << 16) | (0xc904 >> 2),
 677	0x00000000,
 678	(0x6e00 << 16) | (0xc904 >> 2),
 679	0x00000000,
 680	(0x7e00 << 16) | (0xc904 >> 2),
 681	0x00000000,
 682	(0x4e00 << 16) | (0xc908 >> 2),
 683	0x00000000,
 684	(0x5e00 << 16) | (0xc908 >> 2),
 685	0x00000000,
 686	(0x6e00 << 16) | (0xc908 >> 2),
 687	0x00000000,
 688	(0x7e00 << 16) | (0xc908 >> 2),
 689	0x00000000,
 690	(0x4e00 << 16) | (0xc90c >> 2),
 691	0x00000000,
 692	(0x5e00 << 16) | (0xc90c >> 2),
 693	0x00000000,
 694	(0x6e00 << 16) | (0xc90c >> 2),
 695	0x00000000,
 696	(0x7e00 << 16) | (0xc90c >> 2),
 697	0x00000000,
 698	(0x4e00 << 16) | (0xc910 >> 2),
 699	0x00000000,
 700	(0x5e00 << 16) | (0xc910 >> 2),
 701	0x00000000,
 702	(0x6e00 << 16) | (0xc910 >> 2),
 703	0x00000000,
 704	(0x7e00 << 16) | (0xc910 >> 2),
 705	0x00000000,
 706	(0x0e00 << 16) | (0xc99c >> 2),
 707	0x00000000,
 708	(0x0e00 << 16) | (0x9834 >> 2),
 709	0x00000000,
 710	(0x0000 << 16) | (0x30f00 >> 2),
 711	0x00000000,
 712	(0x0000 << 16) | (0x30f04 >> 2),
 713	0x00000000,
 714	(0x0000 << 16) | (0x30f08 >> 2),
 715	0x00000000,
 716	(0x0000 << 16) | (0x30f0c >> 2),
 717	0x00000000,
 718	(0x0600 << 16) | (0x9b7c >> 2),
 719	0x00000000,
 720	(0x0e00 << 16) | (0x8a14 >> 2),
 721	0x00000000,
 722	(0x0e00 << 16) | (0x8a18 >> 2),
 723	0x00000000,
 724	(0x0600 << 16) | (0x30a00 >> 2),
 725	0x00000000,
 726	(0x0e00 << 16) | (0x8bf0 >> 2),
 727	0x00000000,
 728	(0x0e00 << 16) | (0x8bcc >> 2),
 729	0x00000000,
 730	(0x0e00 << 16) | (0x8b24 >> 2),
 731	0x00000000,
 732	(0x0e00 << 16) | (0x30a04 >> 2),
 733	0x00000000,
 734	(0x0600 << 16) | (0x30a10 >> 2),
 735	0x00000000,
 736	(0x0600 << 16) | (0x30a14 >> 2),
 737	0x00000000,
 738	(0x0600 << 16) | (0x30a18 >> 2),
 739	0x00000000,
 740	(0x0600 << 16) | (0x30a2c >> 2),
 741	0x00000000,
 742	(0x0e00 << 16) | (0xc700 >> 2),
 743	0x00000000,
 744	(0x0e00 << 16) | (0xc704 >> 2),
 745	0x00000000,
 746	(0x0e00 << 16) | (0xc708 >> 2),
 747	0x00000000,
 748	(0x0e00 << 16) | (0xc768 >> 2),
 749	0x00000000,
 750	(0x0400 << 16) | (0xc770 >> 2),
 751	0x00000000,
 752	(0x0400 << 16) | (0xc774 >> 2),
 753	0x00000000,
 754	(0x0400 << 16) | (0xc798 >> 2),
 755	0x00000000,
 756	(0x0400 << 16) | (0xc79c >> 2),
 757	0x00000000,
 758	(0x0e00 << 16) | (0x9100 >> 2),
 759	0x00000000,
 760	(0x0e00 << 16) | (0x3c010 >> 2),
 761	0x00000000,
 762	(0x0e00 << 16) | (0x8c00 >> 2),
 763	0x00000000,
 764	(0x0e00 << 16) | (0x8c04 >> 2),
 765	0x00000000,
 766	(0x0e00 << 16) | (0x8c20 >> 2),
 767	0x00000000,
 768	(0x0e00 << 16) | (0x8c38 >> 2),
 769	0x00000000,
 770	(0x0e00 << 16) | (0x8c3c >> 2),
 771	0x00000000,
 772	(0x0e00 << 16) | (0xae00 >> 2),
 773	0x00000000,
 774	(0x0e00 << 16) | (0x9604 >> 2),
 775	0x00000000,
 776	(0x0e00 << 16) | (0xac08 >> 2),
 777	0x00000000,
 778	(0x0e00 << 16) | (0xac0c >> 2),
 779	0x00000000,
 780	(0x0e00 << 16) | (0xac10 >> 2),
 781	0x00000000,
 782	(0x0e00 << 16) | (0xac14 >> 2),
 783	0x00000000,
 784	(0x0e00 << 16) | (0xac58 >> 2),
 785	0x00000000,
 786	(0x0e00 << 16) | (0xac68 >> 2),
 787	0x00000000,
 788	(0x0e00 << 16) | (0xac6c >> 2),
 789	0x00000000,
 790	(0x0e00 << 16) | (0xac70 >> 2),
 791	0x00000000,
 792	(0x0e00 << 16) | (0xac74 >> 2),
 793	0x00000000,
 794	(0x0e00 << 16) | (0xac78 >> 2),
 795	0x00000000,
 796	(0x0e00 << 16) | (0xac7c >> 2),
 797	0x00000000,
 798	(0x0e00 << 16) | (0xac80 >> 2),
 799	0x00000000,
 800	(0x0e00 << 16) | (0xac84 >> 2),
 801	0x00000000,
 802	(0x0e00 << 16) | (0xac88 >> 2),
 803	0x00000000,
 804	(0x0e00 << 16) | (0xac8c >> 2),
 805	0x00000000,
 806	(0x0e00 << 16) | (0x970c >> 2),
 807	0x00000000,
 808	(0x0e00 << 16) | (0x9714 >> 2),
 809	0x00000000,
 810	(0x0e00 << 16) | (0x9718 >> 2),
 811	0x00000000,
 812	(0x0e00 << 16) | (0x971c >> 2),
 813	0x00000000,
 814	(0x0e00 << 16) | (0x31068 >> 2),
 815	0x00000000,
 816	(0x4e00 << 16) | (0x31068 >> 2),
 817	0x00000000,
 818	(0x5e00 << 16) | (0x31068 >> 2),
 819	0x00000000,
 820	(0x6e00 << 16) | (0x31068 >> 2),
 821	0x00000000,
 822	(0x7e00 << 16) | (0x31068 >> 2),
 823	0x00000000,
 824	(0x0e00 << 16) | (0xcd10 >> 2),
 825	0x00000000,
 826	(0x0e00 << 16) | (0xcd14 >> 2),
 827	0x00000000,
 828	(0x0e00 << 16) | (0x88b0 >> 2),
 829	0x00000000,
 830	(0x0e00 << 16) | (0x88b4 >> 2),
 831	0x00000000,
 832	(0x0e00 << 16) | (0x88b8 >> 2),
 833	0x00000000,
 834	(0x0e00 << 16) | (0x88bc >> 2),
 835	0x00000000,
 836	(0x0400 << 16) | (0x89c0 >> 2),
 837	0x00000000,
 838	(0x0e00 << 16) | (0x88c4 >> 2),
 839	0x00000000,
 840	(0x0e00 << 16) | (0x88c8 >> 2),
 841	0x00000000,
 842	(0x0e00 << 16) | (0x88d0 >> 2),
 843	0x00000000,
 844	(0x0e00 << 16) | (0x88d4 >> 2),
 845	0x00000000,
 846	(0x0e00 << 16) | (0x88d8 >> 2),
 847	0x00000000,
 848	(0x0e00 << 16) | (0x8980 >> 2),
 849	0x00000000,
 850	(0x0e00 << 16) | (0x30938 >> 2),
 851	0x00000000,
 852	(0x0e00 << 16) | (0x3093c >> 2),
 853	0x00000000,
 854	(0x0e00 << 16) | (0x30940 >> 2),
 855	0x00000000,
 856	(0x0e00 << 16) | (0x89a0 >> 2),
 857	0x00000000,
 858	(0x0e00 << 16) | (0x30900 >> 2),
 859	0x00000000,
 860	(0x0e00 << 16) | (0x30904 >> 2),
 861	0x00000000,
 862	(0x0e00 << 16) | (0x89b4 >> 2),
 863	0x00000000,
 864	(0x0e00 << 16) | (0x3e1fc >> 2),
 865	0x00000000,
 866	(0x0e00 << 16) | (0x3c210 >> 2),
 867	0x00000000,
 868	(0x0e00 << 16) | (0x3c214 >> 2),
 869	0x00000000,
 870	(0x0e00 << 16) | (0x3c218 >> 2),
 871	0x00000000,
 872	(0x0e00 << 16) | (0x8904 >> 2),
 873	0x00000000,
 874	0x5,
 875	(0x0e00 << 16) | (0x8c28 >> 2),
 876	(0x0e00 << 16) | (0x8c2c >> 2),
 877	(0x0e00 << 16) | (0x8c30 >> 2),
 878	(0x0e00 << 16) | (0x8c34 >> 2),
 879	(0x0e00 << 16) | (0x9600 >> 2),
 880};
 881
 882static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev);
 883static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer);
 
 884static void gfx_v7_0_init_pg(struct amdgpu_device *adev);
 885static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev);
 886
 887static void gfx_v7_0_free_microcode(struct amdgpu_device *adev)
 888{
 889	amdgpu_ucode_release(&adev->gfx.pfp_fw);
 890	amdgpu_ucode_release(&adev->gfx.me_fw);
 891	amdgpu_ucode_release(&adev->gfx.ce_fw);
 892	amdgpu_ucode_release(&adev->gfx.mec_fw);
 893	amdgpu_ucode_release(&adev->gfx.mec2_fw);
 894	amdgpu_ucode_release(&adev->gfx.rlc_fw);
 895}
 896
 897/*
 898 * Core functions
 899 */
 900/**
 901 * gfx_v7_0_init_microcode - load ucode images from disk
 902 *
 903 * @adev: amdgpu_device pointer
 904 *
 905 * Use the firmware interface to load the ucode images into
 906 * the driver (not loaded into hw).
 907 * Returns 0 on success, error on failure.
 908 */
 909static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
 910{
 911	const char *chip_name;
 
 912	int err;
 913
 914	DRM_DEBUG("\n");
 915
 916	switch (adev->asic_type) {
 917	case CHIP_BONAIRE:
 918		chip_name = "bonaire";
 919		break;
 920	case CHIP_HAWAII:
 921		chip_name = "hawaii";
 922		break;
 923	case CHIP_KAVERI:
 924		chip_name = "kaveri";
 925		break;
 926	case CHIP_KABINI:
 927		chip_name = "kabini";
 928		break;
 929	case CHIP_MULLINS:
 930		chip_name = "mullins";
 931		break;
 932	default:
 933		BUG();
 934	}
 935
 936	err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw,
 937				   "amdgpu/%s_pfp.bin", chip_name);
 
 
 
 938	if (err)
 939		goto out;
 940
 941	err = amdgpu_ucode_request(adev, &adev->gfx.me_fw,
 942				   "amdgpu/%s_me.bin", chip_name);
 
 
 
 943	if (err)
 944		goto out;
 945
 946	err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw,
 947				   "amdgpu/%s_ce.bin", chip_name);
 
 
 
 948	if (err)
 949		goto out;
 950
 951	err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
 952				   "amdgpu/%s_mec.bin", chip_name);
 
 
 
 953	if (err)
 954		goto out;
 955
 956	if (adev->asic_type == CHIP_KAVERI) {
 957		err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw,
 958					   "amdgpu/%s_mec2.bin", chip_name);
 
 
 
 959		if (err)
 960			goto out;
 961	}
 962
 963	err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
 964				   "amdgpu/%s_rlc.bin", chip_name);
 
 
 
 
 965out:
 966	if (err) {
 967		pr_err("gfx7: Failed to load firmware %s gfx firmware\n", chip_name);
 968		gfx_v7_0_free_microcode(adev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 969	}
 970	return err;
 971}
 972
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 973/**
 974 * gfx_v7_0_tiling_mode_table_init - init the hw tiling table
 975 *
 976 * @adev: amdgpu_device pointer
 977 *
 978 * Starting with SI, the tiling setup is done globally in a
 979 * set of 32 tiling modes.  Rather than selecting each set of
 980 * parameters per surface as on older asics, we just select
 981 * which index in the tiling table we want to use, and the
 982 * surface uses those parameters (CIK).
 983 */
 984static void gfx_v7_0_tiling_mode_table_init(struct amdgpu_device *adev)
 985{
 986	const u32 num_tile_mode_states =
 987			ARRAY_SIZE(adev->gfx.config.tile_mode_array);
 988	const u32 num_secondary_tile_mode_states =
 989			ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
 990	u32 reg_offset, split_equal_to_row_size;
 991	uint32_t *tile, *macrotile;
 992
 993	tile = adev->gfx.config.tile_mode_array;
 994	macrotile = adev->gfx.config.macrotile_mode_array;
 995
 996	switch (adev->gfx.config.mem_row_size_in_kb) {
 997	case 1:
 998		split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_1KB;
 999		break;
1000	case 2:
1001	default:
1002		split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_2KB;
1003		break;
1004	case 4:
1005		split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_4KB;
1006		break;
1007	}
1008
1009	for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
1010		tile[reg_offset] = 0;
1011	for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
1012		macrotile[reg_offset] = 0;
1013
1014	switch (adev->asic_type) {
1015	case CHIP_BONAIRE:
1016		tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1017			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1018			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
1019			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1020		tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1021			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1022			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
1023			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1024		tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1025			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1026			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1027			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1028		tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1029			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1030			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
1031			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1032		tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1033			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1034			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1035			   TILE_SPLIT(split_equal_to_row_size));
1036		tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1037			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1038			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1039		tile[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1040			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1041			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1042			   TILE_SPLIT(split_equal_to_row_size));
1043		tile[7] = (TILE_SPLIT(split_equal_to_row_size));
1044		tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
1045			   PIPE_CONFIG(ADDR_SURF_P4_16x16));
1046		tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1047			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1048			   MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
1049		tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1050			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1051			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1052			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1053		tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1054			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1055			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1056			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1057		tile[12] = (TILE_SPLIT(split_equal_to_row_size));
1058		tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1059			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1060			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
1061		tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1062			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1063			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1064			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1065		tile[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
1066			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1067			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1068			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1069		tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1070			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1071			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1072			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1073		tile[17] = (TILE_SPLIT(split_equal_to_row_size));
1074		tile[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1075			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1076			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1077			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1078		tile[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1079			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1080			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
1081		tile[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1082			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1083			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1084			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1085		tile[21] =  (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
1086			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1087			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1088			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1089		tile[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
1090			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1091			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1092			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1093		tile[23] = (TILE_SPLIT(split_equal_to_row_size));
1094		tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1095			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1096			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1097			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1098		tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
1099			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1100			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1101			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1102		tile[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
1103			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1104			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1105			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1106		tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1107			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1108			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
1109		tile[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1110			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1111			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1112			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1113		tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1114			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1115			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1116			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1117		tile[30] = (TILE_SPLIT(split_equal_to_row_size));
1118
1119		macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1120				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1121				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1122				NUM_BANKS(ADDR_SURF_16_BANK));
1123		macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1124				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1125				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1126				NUM_BANKS(ADDR_SURF_16_BANK));
1127		macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1128				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1129				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1130				NUM_BANKS(ADDR_SURF_16_BANK));
1131		macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1132				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1133				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1134				NUM_BANKS(ADDR_SURF_16_BANK));
1135		macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1136				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1137				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1138				NUM_BANKS(ADDR_SURF_16_BANK));
1139		macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1140				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1141				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1142				NUM_BANKS(ADDR_SURF_8_BANK));
1143		macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1144				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1145				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1146				NUM_BANKS(ADDR_SURF_4_BANK));
1147		macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1148				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
1149				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1150				NUM_BANKS(ADDR_SURF_16_BANK));
1151		macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1152				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1153				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1154				NUM_BANKS(ADDR_SURF_16_BANK));
1155		macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1156				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1157				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1158				NUM_BANKS(ADDR_SURF_16_BANK));
1159		macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1160				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1161				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1162				NUM_BANKS(ADDR_SURF_16_BANK));
1163		macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1164				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1165				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1166				NUM_BANKS(ADDR_SURF_16_BANK));
1167		macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1168				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1169				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1170				NUM_BANKS(ADDR_SURF_8_BANK));
1171		macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1172				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1173				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1174				NUM_BANKS(ADDR_SURF_4_BANK));
1175
1176		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
1177			WREG32(mmGB_TILE_MODE0 + reg_offset, tile[reg_offset]);
1178		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
1179			if (reg_offset != 7)
1180				WREG32(mmGB_MACROTILE_MODE0 + reg_offset, macrotile[reg_offset]);
1181		break;
1182	case CHIP_HAWAII:
1183		tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1184			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1185			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
1186			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1187		tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1188			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1189			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
1190			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1191		tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1192			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1193			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1194			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1195		tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1196			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1197			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
1198			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1199		tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1200			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1201			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1202			   TILE_SPLIT(split_equal_to_row_size));
1203		tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1204			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1205			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1206			   TILE_SPLIT(split_equal_to_row_size));
1207		tile[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1208			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1209			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1210			   TILE_SPLIT(split_equal_to_row_size));
1211		tile[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1212			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1213			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1214			   TILE_SPLIT(split_equal_to_row_size));
1215		tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
1216			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16));
1217		tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1218			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1219			   MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
1220		tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1221			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1222			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1223			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1224		tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1225			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1226			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1227			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1228		tile[12] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
1229			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1230			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1231			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1232		tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1233			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1234			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
1235		tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1236			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1237			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1238			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1239		tile[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
1240			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1241			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1242			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1243		tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1244			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1245			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1246			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1247		tile[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1248			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1249			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1250			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1251		tile[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1252			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1253			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1254			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1255		tile[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1256			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1257			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING));
1258		tile[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1259			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1260			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1261			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1262		tile[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
1263			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1264			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1265			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1266		tile[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
1267			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1268			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1269			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1270		tile[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
1271			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1272			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1273			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1274		tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1275			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1276			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1277			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1278		tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
1279			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1280			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1281			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1282		tile[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
1283			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1284			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1285			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1286		tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1287			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1288			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
1289		tile[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1290			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1291			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1292			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1293		tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1294			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1295			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1296			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1297		tile[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1298			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1299			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1300			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1301
1302		macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1303				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1304				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1305				NUM_BANKS(ADDR_SURF_16_BANK));
1306		macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1307				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1308				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1309				NUM_BANKS(ADDR_SURF_16_BANK));
1310		macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1311				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1312				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1313				NUM_BANKS(ADDR_SURF_16_BANK));
1314		macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1315				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1316				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1317				NUM_BANKS(ADDR_SURF_16_BANK));
1318		macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1319				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1320				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1321				NUM_BANKS(ADDR_SURF_8_BANK));
1322		macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1323				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1324				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1325				NUM_BANKS(ADDR_SURF_4_BANK));
1326		macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1327				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1328				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1329				NUM_BANKS(ADDR_SURF_4_BANK));
1330		macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1331				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1332				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1333				NUM_BANKS(ADDR_SURF_16_BANK));
1334		macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1335				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1336				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1337				NUM_BANKS(ADDR_SURF_16_BANK));
1338		macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1339				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1340				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1341				NUM_BANKS(ADDR_SURF_16_BANK));
1342		macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1343				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1344				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1345				NUM_BANKS(ADDR_SURF_8_BANK));
1346		macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1347				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1348				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1349				NUM_BANKS(ADDR_SURF_16_BANK));
1350		macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1351				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1352				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1353				NUM_BANKS(ADDR_SURF_8_BANK));
1354		macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1355				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1356				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1357				NUM_BANKS(ADDR_SURF_4_BANK));
1358
1359		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
1360			WREG32(mmGB_TILE_MODE0 + reg_offset, tile[reg_offset]);
1361		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
1362			if (reg_offset != 7)
1363				WREG32(mmGB_MACROTILE_MODE0 + reg_offset, macrotile[reg_offset]);
1364		break;
1365	case CHIP_KABINI:
1366	case CHIP_KAVERI:
1367	case CHIP_MULLINS:
1368	default:
1369		tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1370			   PIPE_CONFIG(ADDR_SURF_P2) |
1371			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
1372			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1373		tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1374			   PIPE_CONFIG(ADDR_SURF_P2) |
1375			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
1376			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1377		tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1378			   PIPE_CONFIG(ADDR_SURF_P2) |
1379			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1380			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1381		tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1382			   PIPE_CONFIG(ADDR_SURF_P2) |
1383			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
1384			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1385		tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1386			   PIPE_CONFIG(ADDR_SURF_P2) |
1387			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1388			   TILE_SPLIT(split_equal_to_row_size));
1389		tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1390			   PIPE_CONFIG(ADDR_SURF_P2) |
1391			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1392		tile[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1393			   PIPE_CONFIG(ADDR_SURF_P2) |
1394			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1395			   TILE_SPLIT(split_equal_to_row_size));
1396		tile[7] = (TILE_SPLIT(split_equal_to_row_size));
1397		tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
1398			   PIPE_CONFIG(ADDR_SURF_P2));
1399		tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1400			   PIPE_CONFIG(ADDR_SURF_P2) |
1401			   MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
1402		tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1403			    PIPE_CONFIG(ADDR_SURF_P2) |
1404			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1405			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1406		tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1407			    PIPE_CONFIG(ADDR_SURF_P2) |
1408			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1409			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1410		tile[12] = (TILE_SPLIT(split_equal_to_row_size));
1411		tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1412			    PIPE_CONFIG(ADDR_SURF_P2) |
1413			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
1414		tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1415			    PIPE_CONFIG(ADDR_SURF_P2) |
1416			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1417			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1418		tile[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
1419			    PIPE_CONFIG(ADDR_SURF_P2) |
1420			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1421			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1422		tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1423			    PIPE_CONFIG(ADDR_SURF_P2) |
1424			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1425			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1426		tile[17] = (TILE_SPLIT(split_equal_to_row_size));
1427		tile[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1428			    PIPE_CONFIG(ADDR_SURF_P2) |
1429			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1430			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1431		tile[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1432			    PIPE_CONFIG(ADDR_SURF_P2) |
1433			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING));
1434		tile[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1435			    PIPE_CONFIG(ADDR_SURF_P2) |
1436			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1437			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1438		tile[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
1439			    PIPE_CONFIG(ADDR_SURF_P2) |
1440			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1441			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1442		tile[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
1443			    PIPE_CONFIG(ADDR_SURF_P2) |
1444			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1445			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1446		tile[23] = (TILE_SPLIT(split_equal_to_row_size));
1447		tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1448			    PIPE_CONFIG(ADDR_SURF_P2) |
1449			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1450			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1451		tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
1452			    PIPE_CONFIG(ADDR_SURF_P2) |
1453			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1454			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1455		tile[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
1456			    PIPE_CONFIG(ADDR_SURF_P2) |
1457			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1458			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1459		tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1460			    PIPE_CONFIG(ADDR_SURF_P2) |
1461			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
1462		tile[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1463			    PIPE_CONFIG(ADDR_SURF_P2) |
1464			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1465			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1466		tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1467			    PIPE_CONFIG(ADDR_SURF_P2) |
1468			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1469			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1470		tile[30] = (TILE_SPLIT(split_equal_to_row_size));
1471
1472		macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1473				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1474				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1475				NUM_BANKS(ADDR_SURF_8_BANK));
1476		macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1477				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1478				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1479				NUM_BANKS(ADDR_SURF_8_BANK));
1480		macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1481				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1482				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1483				NUM_BANKS(ADDR_SURF_8_BANK));
1484		macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1485				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1486				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1487				NUM_BANKS(ADDR_SURF_8_BANK));
1488		macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1489				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1490				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1491				NUM_BANKS(ADDR_SURF_8_BANK));
1492		macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1493				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1494				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1495				NUM_BANKS(ADDR_SURF_8_BANK));
1496		macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1497				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1498				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1499				NUM_BANKS(ADDR_SURF_8_BANK));
1500		macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
1501				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
1502				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1503				NUM_BANKS(ADDR_SURF_16_BANK));
1504		macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
1505				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1506				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1507				NUM_BANKS(ADDR_SURF_16_BANK));
1508		macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1509				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1510				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1511				NUM_BANKS(ADDR_SURF_16_BANK));
1512		macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1513				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1514				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1515				NUM_BANKS(ADDR_SURF_16_BANK));
1516		macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1517				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1518				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1519				NUM_BANKS(ADDR_SURF_16_BANK));
1520		macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1521				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1522				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1523				NUM_BANKS(ADDR_SURF_16_BANK));
1524		macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1525				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1526				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1527				NUM_BANKS(ADDR_SURF_8_BANK));
1528
1529		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
1530			WREG32(mmGB_TILE_MODE0 + reg_offset, tile[reg_offset]);
1531		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
1532			if (reg_offset != 7)
1533				WREG32(mmGB_MACROTILE_MODE0 + reg_offset, macrotile[reg_offset]);
1534		break;
1535	}
1536}
1537
1538/**
1539 * gfx_v7_0_select_se_sh - select which SE, SH to address
1540 *
1541 * @adev: amdgpu_device pointer
1542 * @se_num: shader engine to address
1543 * @sh_num: sh block to address
1544 * @instance: Certain registers are instanced per SE or SH.
1545 *            0xffffffff means broadcast to all SEs or SHs (CIK).
1546 * @xcc_id: xcc accelerated compute core id
1547 * Select which SE, SH combinations to address.
1548 */
1549static void gfx_v7_0_select_se_sh(struct amdgpu_device *adev,
1550				  u32 se_num, u32 sh_num, u32 instance,
1551				  int xcc_id)
1552{
1553	u32 data;
1554
1555	if (instance == 0xffffffff)
1556		data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
1557	else
1558		data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
1559
1560	if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
1561		data |= GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK |
1562			GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK;
1563	else if (se_num == 0xffffffff)
1564		data |= GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK |
1565			(sh_num << GRBM_GFX_INDEX__SH_INDEX__SHIFT);
1566	else if (sh_num == 0xffffffff)
1567		data |= GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK |
1568			(se_num << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
1569	else
1570		data |= (sh_num << GRBM_GFX_INDEX__SH_INDEX__SHIFT) |
1571			(se_num << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
1572	WREG32(mmGRBM_GFX_INDEX, data);
1573}
1574
1575/**
 
 
 
 
 
 
 
 
 
 
 
 
 
1576 * gfx_v7_0_get_rb_active_bitmap - computes the mask of enabled RBs
1577 *
1578 * @adev: amdgpu_device pointer
1579 *
1580 * Calculates the bitmask of enabled RBs (CIK).
1581 * Returns the enabled RB bitmask.
1582 */
1583static u32 gfx_v7_0_get_rb_active_bitmap(struct amdgpu_device *adev)
1584{
1585	u32 data, mask;
1586
1587	data = RREG32(mmCC_RB_BACKEND_DISABLE);
1588	data |= RREG32(mmGC_USER_RB_BACKEND_DISABLE);
1589
1590	data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
1591	data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
1592
1593	mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se /
1594					 adev->gfx.config.max_sh_per_se);
1595
1596	return (~data) & mask;
1597}
1598
1599static void
1600gfx_v7_0_raster_config(struct amdgpu_device *adev, u32 *rconf, u32 *rconf1)
1601{
1602	switch (adev->asic_type) {
1603	case CHIP_BONAIRE:
1604		*rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) |
1605			  SE_XSEL(1) | SE_YSEL(1);
1606		*rconf1 |= 0x0;
1607		break;
1608	case CHIP_HAWAII:
1609		*rconf |= RB_MAP_PKR0(2) | RB_MAP_PKR1(2) |
1610			  RB_XSEL2(1) | PKR_MAP(2) | PKR_XSEL(1) |
1611			  PKR_YSEL(1) | SE_MAP(2) | SE_XSEL(2) |
1612			  SE_YSEL(3);
1613		*rconf1 |= SE_PAIR_MAP(2) | SE_PAIR_XSEL(3) |
1614			   SE_PAIR_YSEL(2);
1615		break;
1616	case CHIP_KAVERI:
1617		*rconf |= RB_MAP_PKR0(2);
1618		*rconf1 |= 0x0;
1619		break;
1620	case CHIP_KABINI:
1621	case CHIP_MULLINS:
1622		*rconf |= 0x0;
1623		*rconf1 |= 0x0;
1624		break;
1625	default:
1626		DRM_ERROR("unknown asic: 0x%x\n", adev->asic_type);
1627		break;
1628	}
1629}
1630
1631static void
1632gfx_v7_0_write_harvested_raster_configs(struct amdgpu_device *adev,
1633					u32 raster_config, u32 raster_config_1,
1634					unsigned rb_mask, unsigned num_rb)
1635{
1636	unsigned sh_per_se = max_t(unsigned, adev->gfx.config.max_sh_per_se, 1);
1637	unsigned num_se = max_t(unsigned, adev->gfx.config.max_shader_engines, 1);
1638	unsigned rb_per_pkr = min_t(unsigned, num_rb / num_se / sh_per_se, 2);
1639	unsigned rb_per_se = num_rb / num_se;
1640	unsigned se_mask[4];
1641	unsigned se;
1642
1643	se_mask[0] = ((1 << rb_per_se) - 1) & rb_mask;
1644	se_mask[1] = (se_mask[0] << rb_per_se) & rb_mask;
1645	se_mask[2] = (se_mask[1] << rb_per_se) & rb_mask;
1646	se_mask[3] = (se_mask[2] << rb_per_se) & rb_mask;
1647
1648	WARN_ON(!(num_se == 1 || num_se == 2 || num_se == 4));
1649	WARN_ON(!(sh_per_se == 1 || sh_per_se == 2));
1650	WARN_ON(!(rb_per_pkr == 1 || rb_per_pkr == 2));
1651
1652	if ((num_se > 2) && ((!se_mask[0] && !se_mask[1]) ||
1653			     (!se_mask[2] && !se_mask[3]))) {
1654		raster_config_1 &= ~SE_PAIR_MAP_MASK;
1655
1656		if (!se_mask[0] && !se_mask[1]) {
1657			raster_config_1 |=
1658				SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_3);
1659		} else {
1660			raster_config_1 |=
1661				SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_0);
1662		}
1663	}
1664
1665	for (se = 0; se < num_se; se++) {
1666		unsigned raster_config_se = raster_config;
1667		unsigned pkr0_mask = ((1 << rb_per_pkr) - 1) << (se * rb_per_se);
1668		unsigned pkr1_mask = pkr0_mask << rb_per_pkr;
1669		int idx = (se / 2) * 2;
1670
1671		if ((num_se > 1) && (!se_mask[idx] || !se_mask[idx + 1])) {
1672			raster_config_se &= ~SE_MAP_MASK;
1673
1674			if (!se_mask[idx]) {
1675				raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_3);
1676			} else {
1677				raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_0);
1678			}
1679		}
1680
1681		pkr0_mask &= rb_mask;
1682		pkr1_mask &= rb_mask;
1683		if (rb_per_se > 2 && (!pkr0_mask || !pkr1_mask)) {
1684			raster_config_se &= ~PKR_MAP_MASK;
1685
1686			if (!pkr0_mask) {
1687				raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_3);
1688			} else {
1689				raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_0);
1690			}
1691		}
1692
1693		if (rb_per_se >= 2) {
1694			unsigned rb0_mask = 1 << (se * rb_per_se);
1695			unsigned rb1_mask = rb0_mask << 1;
1696
1697			rb0_mask &= rb_mask;
1698			rb1_mask &= rb_mask;
1699			if (!rb0_mask || !rb1_mask) {
1700				raster_config_se &= ~RB_MAP_PKR0_MASK;
1701
1702				if (!rb0_mask) {
1703					raster_config_se |=
1704						RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_3);
1705				} else {
1706					raster_config_se |=
1707						RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_0);
1708				}
1709			}
1710
1711			if (rb_per_se > 2) {
1712				rb0_mask = 1 << (se * rb_per_se + rb_per_pkr);
1713				rb1_mask = rb0_mask << 1;
1714				rb0_mask &= rb_mask;
1715				rb1_mask &= rb_mask;
1716				if (!rb0_mask || !rb1_mask) {
1717					raster_config_se &= ~RB_MAP_PKR1_MASK;
1718
1719					if (!rb0_mask) {
1720						raster_config_se |=
1721							RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_3);
1722					} else {
1723						raster_config_se |=
1724							RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_0);
1725					}
1726				}
1727			}
1728		}
1729
1730		/* GRBM_GFX_INDEX has a different offset on CI+ */
1731		gfx_v7_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff, 0);
1732		WREG32(mmPA_SC_RASTER_CONFIG, raster_config_se);
1733		WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
1734	}
1735
1736	/* GRBM_GFX_INDEX has a different offset on CI+ */
1737	gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
1738}
1739
1740/**
1741 * gfx_v7_0_setup_rb - setup the RBs on the asic
1742 *
1743 * @adev: amdgpu_device pointer
 
 
1744 *
1745 * Configures per-SE/SH RB registers (CIK).
1746 */
1747static void gfx_v7_0_setup_rb(struct amdgpu_device *adev)
1748{
1749	int i, j;
1750	u32 data;
1751	u32 raster_config = 0, raster_config_1 = 0;
1752	u32 active_rbs = 0;
1753	u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
1754					adev->gfx.config.max_sh_per_se;
1755	unsigned num_rb_pipes;
1756
1757	mutex_lock(&adev->grbm_idx_mutex);
1758	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1759		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1760			gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff, 0);
1761			data = gfx_v7_0_get_rb_active_bitmap(adev);
1762			active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
1763					       rb_bitmap_width_per_sh);
1764		}
1765	}
1766	gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
1767
1768	adev->gfx.config.backend_enable_mask = active_rbs;
1769	adev->gfx.config.num_rbs = hweight32(active_rbs);
1770
1771	num_rb_pipes = min_t(unsigned, adev->gfx.config.max_backends_per_se *
1772			     adev->gfx.config.max_shader_engines, 16);
1773
1774	gfx_v7_0_raster_config(adev, &raster_config, &raster_config_1);
1775
1776	if (!adev->gfx.config.backend_enable_mask ||
1777			adev->gfx.config.num_rbs >= num_rb_pipes) {
1778		WREG32(mmPA_SC_RASTER_CONFIG, raster_config);
1779		WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
1780	} else {
1781		gfx_v7_0_write_harvested_raster_configs(adev, raster_config, raster_config_1,
1782							adev->gfx.config.backend_enable_mask,
1783							num_rb_pipes);
1784	}
1785
1786	/* cache the values for userspace */
1787	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1788		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1789			gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff, 0);
1790			adev->gfx.config.rb_config[i][j].rb_backend_disable =
1791				RREG32(mmCC_RB_BACKEND_DISABLE);
1792			adev->gfx.config.rb_config[i][j].user_rb_backend_disable =
1793				RREG32(mmGC_USER_RB_BACKEND_DISABLE);
1794			adev->gfx.config.rb_config[i][j].raster_config =
1795				RREG32(mmPA_SC_RASTER_CONFIG);
1796			adev->gfx.config.rb_config[i][j].raster_config_1 =
1797				RREG32(mmPA_SC_RASTER_CONFIG_1);
1798		}
1799	}
1800	gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
1801	mutex_unlock(&adev->grbm_idx_mutex);
1802}
1803
1804#define DEFAULT_SH_MEM_BASES	(0x6000)
1805/**
1806 * gfx_v7_0_init_compute_vmid - gart enable
1807 *
1808 * @adev: amdgpu_device pointer
1809 *
1810 * Initialize compute vmid sh_mem registers
1811 *
1812 */
1813static void gfx_v7_0_init_compute_vmid(struct amdgpu_device *adev)
 
 
 
1814{
1815	int i;
1816	uint32_t sh_mem_config;
1817	uint32_t sh_mem_bases;
1818
1819	/*
1820	 * Configure apertures:
1821	 * LDS:         0x60000000'00000000 - 0x60000001'00000000 (4GB)
1822	 * Scratch:     0x60000001'00000000 - 0x60000002'00000000 (4GB)
1823	 * GPUVM:       0x60010000'00000000 - 0x60020000'00000000 (1TB)
1824	*/
1825	sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
1826	sh_mem_config = SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
1827			SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
1828	sh_mem_config |= MTYPE_NONCACHED << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT;
1829	mutex_lock(&adev->srbm_mutex);
1830	for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
1831		cik_srbm_select(adev, 0, 0, 0, i);
1832		/* CP and shaders */
1833		WREG32(mmSH_MEM_CONFIG, sh_mem_config);
1834		WREG32(mmSH_MEM_APE1_BASE, 1);
1835		WREG32(mmSH_MEM_APE1_LIMIT, 0);
1836		WREG32(mmSH_MEM_BASES, sh_mem_bases);
1837	}
1838	cik_srbm_select(adev, 0, 0, 0, 0);
1839	mutex_unlock(&adev->srbm_mutex);
1840
1841	/* Initialize all compute VMIDs to have no GDS, GWS, or OA
1842	   access. These should be enabled by FW for target VMIDs. */
1843	for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
1844		WREG32(amdgpu_gds_reg_offset[i].mem_base, 0);
1845		WREG32(amdgpu_gds_reg_offset[i].mem_size, 0);
1846		WREG32(amdgpu_gds_reg_offset[i].gws, 0);
1847		WREG32(amdgpu_gds_reg_offset[i].oa, 0);
1848	}
1849}
1850
1851static void gfx_v7_0_init_gds_vmid(struct amdgpu_device *adev)
1852{
1853	int vmid;
1854
1855	/*
1856	 * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA
1857	 * access. Compute VMIDs should be enabled by FW for target VMIDs,
1858	 * the driver can enable them for graphics. VMID0 should maintain
1859	 * access so that HWS firmware can save/restore entries.
1860	 */
1861	for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) {
1862		WREG32(amdgpu_gds_reg_offset[vmid].mem_base, 0);
1863		WREG32(amdgpu_gds_reg_offset[vmid].mem_size, 0);
1864		WREG32(amdgpu_gds_reg_offset[vmid].gws, 0);
1865		WREG32(amdgpu_gds_reg_offset[vmid].oa, 0);
1866	}
1867}
1868
1869static void gfx_v7_0_config_init(struct amdgpu_device *adev)
1870{
1871	adev->gfx.config.double_offchip_lds_buf = 1;
1872}
1873
1874/**
1875 * gfx_v7_0_constants_init - setup the 3D engine
1876 *
1877 * @adev: amdgpu_device pointer
1878 *
1879 * init the gfx constants such as the 3D engine, tiling configuration
1880 * registers, maximum number of quad pipes, render backends...
1881 */
1882static void gfx_v7_0_constants_init(struct amdgpu_device *adev)
1883{
1884	u32 sh_mem_cfg, sh_static_mem_cfg, sh_mem_base;
1885	u32 tmp;
1886	int i;
1887
1888	WREG32(mmGRBM_CNTL, (0xff << GRBM_CNTL__READ_TIMEOUT__SHIFT));
1889
1890	WREG32(mmGB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
1891	WREG32(mmHDP_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
1892	WREG32(mmDMIF_ADDR_CALC, adev->gfx.config.gb_addr_config);
1893
1894	gfx_v7_0_tiling_mode_table_init(adev);
1895
1896	gfx_v7_0_setup_rb(adev);
1897	gfx_v7_0_get_cu_info(adev);
1898	gfx_v7_0_config_init(adev);
1899
1900	/* set HW defaults for 3D engine */
1901	WREG32(mmCP_MEQ_THRESHOLDS,
1902	       (0x30 << CP_MEQ_THRESHOLDS__MEQ1_START__SHIFT) |
1903	       (0x60 << CP_MEQ_THRESHOLDS__MEQ2_START__SHIFT));
1904
1905	mutex_lock(&adev->grbm_idx_mutex);
1906	/*
1907	 * making sure that the following register writes will be broadcasted
1908	 * to all the shaders
1909	 */
1910	gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
1911
1912	/* XXX SH_MEM regs */
1913	/* where to put LDS, scratch, GPUVM in FSA64 space */
1914	sh_mem_cfg = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
1915				   SH_MEM_ALIGNMENT_MODE_UNALIGNED);
1916	sh_mem_cfg = REG_SET_FIELD(sh_mem_cfg, SH_MEM_CONFIG, DEFAULT_MTYPE,
1917				   MTYPE_NC);
1918	sh_mem_cfg = REG_SET_FIELD(sh_mem_cfg, SH_MEM_CONFIG, APE1_MTYPE,
1919				   MTYPE_UC);
1920	sh_mem_cfg = REG_SET_FIELD(sh_mem_cfg, SH_MEM_CONFIG, PRIVATE_ATC, 0);
1921
1922	sh_static_mem_cfg = REG_SET_FIELD(0, SH_STATIC_MEM_CONFIG,
1923				   SWIZZLE_ENABLE, 1);
1924	sh_static_mem_cfg = REG_SET_FIELD(sh_static_mem_cfg, SH_STATIC_MEM_CONFIG,
1925				   ELEMENT_SIZE, 1);
1926	sh_static_mem_cfg = REG_SET_FIELD(sh_static_mem_cfg, SH_STATIC_MEM_CONFIG,
1927				   INDEX_STRIDE, 3);
1928	WREG32(mmSH_STATIC_MEM_CONFIG, sh_static_mem_cfg);
1929
1930	mutex_lock(&adev->srbm_mutex);
1931	for (i = 0; i < adev->vm_manager.id_mgr[0].num_ids; i++) {
1932		if (i == 0)
1933			sh_mem_base = 0;
1934		else
1935			sh_mem_base = adev->gmc.shared_aperture_start >> 48;
1936		cik_srbm_select(adev, 0, 0, 0, i);
1937		/* CP and shaders */
1938		WREG32(mmSH_MEM_CONFIG, sh_mem_cfg);
1939		WREG32(mmSH_MEM_APE1_BASE, 1);
1940		WREG32(mmSH_MEM_APE1_LIMIT, 0);
1941		WREG32(mmSH_MEM_BASES, sh_mem_base);
1942	}
1943	cik_srbm_select(adev, 0, 0, 0, 0);
1944	mutex_unlock(&adev->srbm_mutex);
1945
1946	gfx_v7_0_init_compute_vmid(adev);
1947	gfx_v7_0_init_gds_vmid(adev);
1948
1949	WREG32(mmSX_DEBUG_1, 0x20);
1950
1951	WREG32(mmTA_CNTL_AUX, 0x00010000);
1952
1953	tmp = RREG32(mmSPI_CONFIG_CNTL);
1954	tmp |= 0x03000000;
1955	WREG32(mmSPI_CONFIG_CNTL, tmp);
1956
1957	WREG32(mmSQ_CONFIG, 1);
1958
1959	WREG32(mmDB_DEBUG, 0);
1960
1961	tmp = RREG32(mmDB_DEBUG2) & ~0xf00fffff;
1962	tmp |= 0x00000400;
1963	WREG32(mmDB_DEBUG2, tmp);
1964
1965	tmp = RREG32(mmDB_DEBUG3) & ~0x0002021c;
1966	tmp |= 0x00020200;
1967	WREG32(mmDB_DEBUG3, tmp);
1968
1969	tmp = RREG32(mmCB_HW_CONTROL) & ~0x00010000;
1970	tmp |= 0x00018208;
1971	WREG32(mmCB_HW_CONTROL, tmp);
1972
1973	WREG32(mmSPI_CONFIG_CNTL_1, (4 << SPI_CONFIG_CNTL_1__VTX_DONE_DELAY__SHIFT));
1974
1975	WREG32(mmPA_SC_FIFO_SIZE,
1976		((adev->gfx.config.sc_prim_fifo_size_frontend << PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) |
1977		(adev->gfx.config.sc_prim_fifo_size_backend << PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT) |
1978		(adev->gfx.config.sc_hiz_tile_fifo_size << PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) |
1979		(adev->gfx.config.sc_earlyz_tile_fifo_size << PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT)));
1980
1981	WREG32(mmVGT_NUM_INSTANCES, 1);
1982
1983	WREG32(mmCP_PERFMON_CNTL, 0);
1984
1985	WREG32(mmSQ_CONFIG, 0);
1986
1987	WREG32(mmPA_SC_FORCE_EOV_MAX_CNTS,
1988		((4095 << PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_CLK_CNT__SHIFT) |
1989		(255 << PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_REZ_CNT__SHIFT)));
1990
1991	WREG32(mmVGT_CACHE_INVALIDATION,
1992		(VC_AND_TC << VGT_CACHE_INVALIDATION__CACHE_INVALIDATION__SHIFT) |
1993		(ES_AND_GS_AUTO << VGT_CACHE_INVALIDATION__AUTO_INVLD_EN__SHIFT));
1994
1995	WREG32(mmVGT_GS_VERTEX_REUSE, 16);
1996	WREG32(mmPA_SC_LINE_STIPPLE_STATE, 0);
1997
1998	WREG32(mmPA_CL_ENHANCE, PA_CL_ENHANCE__CLIP_VTX_REORDER_ENA_MASK |
1999			(3 << PA_CL_ENHANCE__NUM_CLIP_SEQ__SHIFT));
2000	WREG32(mmPA_SC_ENHANCE, PA_SC_ENHANCE__ENABLE_PA_SC_OUT_OF_ORDER_MASK);
 
2001
2002	tmp = RREG32(mmSPI_ARB_PRIORITY);
2003	tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS0, 2);
2004	tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS1, 2);
2005	tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS2, 2);
2006	tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS3, 2);
2007	WREG32(mmSPI_ARB_PRIORITY, tmp);
2008
2009	mutex_unlock(&adev->grbm_idx_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2010
2011	udelay(50);
 
 
 
 
 
2012}
2013
2014/**
2015 * gfx_v7_0_ring_test_ring - basic gfx ring test
2016 *
 
2017 * @ring: amdgpu_ring structure holding ring information
2018 *
2019 * Allocate a scratch register and write to it using the gfx ring (CIK).
2020 * Provides a basic gfx ring test to verify that the ring is working.
2021 * Used by gfx_v7_0_cp_gfx_resume();
2022 * Returns 0 on success, error on failure.
2023 */
2024static int gfx_v7_0_ring_test_ring(struct amdgpu_ring *ring)
2025{
2026	struct amdgpu_device *adev = ring->adev;
 
2027	uint32_t tmp = 0;
2028	unsigned i;
2029	int r;
2030
2031	WREG32(mmSCRATCH_REG0, 0xCAFEDEAD);
 
 
 
 
 
2032	r = amdgpu_ring_alloc(ring, 3);
2033	if (r)
 
 
2034		return r;
2035
2036	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
2037	amdgpu_ring_write(ring, mmSCRATCH_REG0 - PACKET3_SET_UCONFIG_REG_START);
2038	amdgpu_ring_write(ring, 0xDEADBEEF);
2039	amdgpu_ring_commit(ring);
2040
2041	for (i = 0; i < adev->usec_timeout; i++) {
2042		tmp = RREG32(mmSCRATCH_REG0);
2043		if (tmp == 0xDEADBEEF)
2044			break;
2045		udelay(1);
 
 
 
 
 
 
 
2046	}
2047	if (i >= adev->usec_timeout)
2048		r = -ETIMEDOUT;
2049	return r;
2050}
2051
2052/**
2053 * gfx_v7_0_ring_emit_hdp_flush - emit an hdp flush on the cp
2054 *
2055 * @ring: amdgpu_ring structure holding ring information
 
2056 *
2057 * Emits an hdp flush on the cp.
2058 */
2059static void gfx_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
2060{
2061	u32 ref_and_mask;
2062	int usepfp = ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ? 0 : 1;
2063
2064	if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
2065		switch (ring->me) {
2066		case 1:
2067			ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe;
2068			break;
2069		case 2:
2070			ref_and_mask = GPU_HDP_FLUSH_DONE__CP6_MASK << ring->pipe;
2071			break;
2072		default:
2073			return;
2074		}
2075	} else {
2076		ref_and_mask = GPU_HDP_FLUSH_DONE__CP0_MASK;
2077	}
2078
2079	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
2080	amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(1) | /* write, wait, write */
2081				 WAIT_REG_MEM_FUNCTION(3) |  /* == */
2082				 WAIT_REG_MEM_ENGINE(usepfp)));   /* pfp or me */
2083	amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ);
2084	amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE);
2085	amdgpu_ring_write(ring, ref_and_mask);
2086	amdgpu_ring_write(ring, ref_and_mask);
2087	amdgpu_ring_write(ring, 0x20); /* poll interval */
2088}
2089
2090static void gfx_v7_0_ring_emit_vgt_flush(struct amdgpu_ring *ring)
2091{
2092	amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
2093	amdgpu_ring_write(ring, EVENT_TYPE(VS_PARTIAL_FLUSH) |
2094		EVENT_INDEX(4));
2095
2096	amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
2097	amdgpu_ring_write(ring, EVENT_TYPE(VGT_FLUSH) |
2098		EVENT_INDEX(0));
2099}
2100
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2101/**
2102 * gfx_v7_0_ring_emit_fence_gfx - emit a fence on the gfx ring
2103 *
2104 * @ring: amdgpu_ring structure holding ring information
2105 * @addr: address
2106 * @seq: sequence number
2107 * @flags: fence related flags
2108 *
2109 * Emits a fence sequence number on the gfx ring and flushes
2110 * GPU caches.
2111 */
2112static void gfx_v7_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
2113					 u64 seq, unsigned flags)
2114{
2115	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
2116	bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
2117	bool exec = flags & AMDGPU_FENCE_FLAG_EXEC;
2118
2119	/* Workaround for cache flush problems. First send a dummy EOP
2120	 * event down the pipe with seq one below.
2121	 */
2122	amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
2123	amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
2124				 EOP_TC_ACTION_EN |
2125				 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
2126				 EVENT_INDEX(5)));
2127	amdgpu_ring_write(ring, addr & 0xfffffffc);
2128	amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
2129				DATA_SEL(1) | INT_SEL(0));
2130	amdgpu_ring_write(ring, lower_32_bits(seq - 1));
2131	amdgpu_ring_write(ring, upper_32_bits(seq - 1));
2132
2133	/* Then send the real EOP event down the pipe. */
2134	amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
2135	amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
2136				 EOP_TC_ACTION_EN |
2137				 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
2138				 EVENT_INDEX(5) |
2139				 (exec ? EOP_EXEC : 0)));
2140	amdgpu_ring_write(ring, addr & 0xfffffffc);
2141	amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
2142				DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
2143	amdgpu_ring_write(ring, lower_32_bits(seq));
2144	amdgpu_ring_write(ring, upper_32_bits(seq));
2145}
2146
2147/**
2148 * gfx_v7_0_ring_emit_fence_compute - emit a fence on the compute ring
2149 *
2150 * @ring: amdgpu_ring structure holding ring information
2151 * @addr: address
2152 * @seq: sequence number
2153 * @flags: fence related flags
2154 *
2155 * Emits a fence sequence number on the compute ring and flushes
2156 * GPU caches.
2157 */
2158static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
2159					     u64 addr, u64 seq,
2160					     unsigned flags)
2161{
2162	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
2163	bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
2164
2165	/* RELEASE_MEM - flush caches, send int */
2166	amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 5));
2167	amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
2168				 EOP_TC_ACTION_EN |
2169				 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
2170				 EVENT_INDEX(5)));
2171	amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
2172	amdgpu_ring_write(ring, addr & 0xfffffffc);
2173	amdgpu_ring_write(ring, upper_32_bits(addr));
2174	amdgpu_ring_write(ring, lower_32_bits(seq));
2175	amdgpu_ring_write(ring, upper_32_bits(seq));
2176}
2177
2178/*
2179 * IB stuff
2180 */
2181/**
2182 * gfx_v7_0_ring_emit_ib_gfx - emit an IB (Indirect Buffer) on the ring
2183 *
2184 * @ring: amdgpu_ring structure holding ring information
2185 * @job: job to retrieve vmid from
2186 * @ib: amdgpu indirect buffer object
2187 * @flags: options (AMDGPU_HAVE_CTX_SWITCH)
2188 *
2189 * Emits an DE (drawing engine) or CE (constant engine) IB
2190 * on the gfx ring.  IBs are usually generated by userspace
2191 * acceleration drivers and submitted to the kernel for
2192 * scheduling on the ring.  This function schedules the IB
2193 * on the gfx ring for execution by the GPU.
2194 */
2195static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
2196					struct amdgpu_job *job,
2197					struct amdgpu_ib *ib,
2198					uint32_t flags)
2199{
2200	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
2201	u32 header, control = 0;
2202
2203	/* insert SWITCH_BUFFER packet before first IB in the ring frame */
2204	if (flags & AMDGPU_HAVE_CTX_SWITCH) {
2205		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
2206		amdgpu_ring_write(ring, 0);
2207	}
2208
2209	if (ib->flags & AMDGPU_IB_FLAG_CE)
2210		header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
2211	else
2212		header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
2213
2214	control |= ib->length_dw | (vmid << 24);
2215
2216	amdgpu_ring_write(ring, header);
2217	amdgpu_ring_write(ring,
2218#ifdef __BIG_ENDIAN
2219			  (2 << 0) |
2220#endif
2221			  (ib->gpu_addr & 0xFFFFFFFC));
2222	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
2223	amdgpu_ring_write(ring, control);
2224}
2225
2226static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
2227					  struct amdgpu_job *job,
2228					  struct amdgpu_ib *ib,
2229					  uint32_t flags)
2230{
2231	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
2232	u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
2233
2234	/* Currently, there is a high possibility to get wave ID mismatch
2235	 * between ME and GDS, leading to a hw deadlock, because ME generates
2236	 * different wave IDs than the GDS expects. This situation happens
2237	 * randomly when at least 5 compute pipes use GDS ordered append.
2238	 * The wave IDs generated by ME are also wrong after suspend/resume.
2239	 * Those are probably bugs somewhere else in the kernel driver.
2240	 *
2241	 * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
2242	 * GDS to 0 for this ring (me/pipe).
2243	 */
2244	if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
2245		amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2246		amdgpu_ring_write(ring, mmGDS_COMPUTE_MAX_WAVE_ID - PACKET3_SET_CONFIG_REG_START);
2247		amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
2248	}
2249
2250	amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
2251	amdgpu_ring_write(ring,
2252#ifdef __BIG_ENDIAN
2253					  (2 << 0) |
2254#endif
2255					  (ib->gpu_addr & 0xFFFFFFFC));
2256	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
2257	amdgpu_ring_write(ring, control);
2258}
2259
2260static void gfx_v7_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
2261{
2262	uint32_t dw2 = 0;
2263
2264	dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
2265	if (flags & AMDGPU_HAVE_CTX_SWITCH) {
2266		gfx_v7_0_ring_emit_vgt_flush(ring);
2267		/* set load_global_config & load_global_uconfig */
2268		dw2 |= 0x8001;
2269		/* set load_cs_sh_regs */
2270		dw2 |= 0x01000000;
2271		/* set load_per_context_state & load_gfx_sh_regs */
2272		dw2 |= 0x10002;
2273	}
2274
2275	amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
2276	amdgpu_ring_write(ring, dw2);
2277	amdgpu_ring_write(ring, 0);
2278}
2279
2280/**
2281 * gfx_v7_0_ring_test_ib - basic ring IB test
2282 *
2283 * @ring: amdgpu_ring structure holding ring information
2284 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
2285 *
2286 * Allocate an IB and execute it on the gfx ring (CIK).
2287 * Provides a basic gfx ring test to verify that IBs are working.
2288 * Returns 0 on success, error on failure.
2289 */
2290static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
2291{
2292	struct amdgpu_device *adev = ring->adev;
2293	struct amdgpu_ib ib;
2294	struct dma_fence *f = NULL;
 
2295	uint32_t tmp = 0;
2296	long r;
2297
2298	WREG32(mmSCRATCH_REG0, 0xCAFEDEAD);
 
 
 
 
 
2299	memset(&ib, 0, sizeof(ib));
2300	r = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib);
2301	if (r)
2302		return r;
2303
 
2304	ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
2305	ib.ptr[1] = mmSCRATCH_REG0 - PACKET3_SET_UCONFIG_REG_START;
2306	ib.ptr[2] = 0xDEADBEEF;
2307	ib.length_dw = 3;
2308
2309	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
2310	if (r)
2311		goto error;
2312
2313	r = dma_fence_wait_timeout(f, false, timeout);
2314	if (r == 0) {
 
2315		r = -ETIMEDOUT;
2316		goto error;
2317	} else if (r < 0) {
2318		goto error;
 
2319	}
2320	tmp = RREG32(mmSCRATCH_REG0);
2321	if (tmp == 0xDEADBEEF)
 
2322		r = 0;
2323	else
 
 
2324		r = -EINVAL;
 
2325
2326error:
2327	amdgpu_ib_free(adev, &ib, NULL);
2328	dma_fence_put(f);
 
 
2329	return r;
2330}
2331
2332/*
2333 * CP.
2334 * On CIK, gfx and compute now have independent command processors.
2335 *
2336 * GFX
2337 * Gfx consists of a single ring and can process both gfx jobs and
2338 * compute jobs.  The gfx CP consists of three microengines (ME):
2339 * PFP - Pre-Fetch Parser
2340 * ME - Micro Engine
2341 * CE - Constant Engine
2342 * The PFP and ME make up what is considered the Drawing Engine (DE).
2343 * The CE is an asynchronous engine used for updating buffer desciptors
2344 * used by the DE so that they can be loaded into cache in parallel
2345 * while the DE is processing state update packets.
2346 *
2347 * Compute
2348 * The compute CP consists of two microengines (ME):
2349 * MEC1 - Compute MicroEngine 1
2350 * MEC2 - Compute MicroEngine 2
2351 * Each MEC supports 4 compute pipes and each pipe supports 8 queues.
2352 * The queues are exposed to userspace and are programmed directly
2353 * by the compute runtime.
2354 */
2355/**
2356 * gfx_v7_0_cp_gfx_enable - enable/disable the gfx CP MEs
2357 *
2358 * @adev: amdgpu_device pointer
2359 * @enable: enable or disable the MEs
2360 *
2361 * Halts or unhalts the gfx MEs.
2362 */
2363static void gfx_v7_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
2364{
2365	if (enable)
 
 
2366		WREG32(mmCP_ME_CNTL, 0);
2367	else
2368		WREG32(mmCP_ME_CNTL, (CP_ME_CNTL__ME_HALT_MASK |
2369				      CP_ME_CNTL__PFP_HALT_MASK |
2370				      CP_ME_CNTL__CE_HALT_MASK));
 
2371	udelay(50);
2372}
2373
2374/**
2375 * gfx_v7_0_cp_gfx_load_microcode - load the gfx CP ME ucode
2376 *
2377 * @adev: amdgpu_device pointer
2378 *
2379 * Loads the gfx PFP, ME, and CE ucode.
2380 * Returns 0 for success, -EINVAL if the ucode is not available.
2381 */
2382static int gfx_v7_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
2383{
2384	const struct gfx_firmware_header_v1_0 *pfp_hdr;
2385	const struct gfx_firmware_header_v1_0 *ce_hdr;
2386	const struct gfx_firmware_header_v1_0 *me_hdr;
2387	const __le32 *fw_data;
2388	unsigned i, fw_size;
2389
2390	if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
2391		return -EINVAL;
2392
2393	pfp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
2394	ce_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
2395	me_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
2396
2397	amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
2398	amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
2399	amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
2400	adev->gfx.pfp_fw_version = le32_to_cpu(pfp_hdr->header.ucode_version);
2401	adev->gfx.ce_fw_version = le32_to_cpu(ce_hdr->header.ucode_version);
2402	adev->gfx.me_fw_version = le32_to_cpu(me_hdr->header.ucode_version);
2403	adev->gfx.me_feature_version = le32_to_cpu(me_hdr->ucode_feature_version);
2404	adev->gfx.ce_feature_version = le32_to_cpu(ce_hdr->ucode_feature_version);
2405	adev->gfx.pfp_feature_version = le32_to_cpu(pfp_hdr->ucode_feature_version);
2406
2407	gfx_v7_0_cp_gfx_enable(adev, false);
2408
2409	/* PFP */
2410	fw_data = (const __le32 *)
2411		(adev->gfx.pfp_fw->data +
2412		 le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
2413	fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
2414	WREG32(mmCP_PFP_UCODE_ADDR, 0);
2415	for (i = 0; i < fw_size; i++)
2416		WREG32(mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
2417	WREG32(mmCP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version);
2418
2419	/* CE */
2420	fw_data = (const __le32 *)
2421		(adev->gfx.ce_fw->data +
2422		 le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
2423	fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
2424	WREG32(mmCP_CE_UCODE_ADDR, 0);
2425	for (i = 0; i < fw_size; i++)
2426		WREG32(mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
2427	WREG32(mmCP_CE_UCODE_ADDR, adev->gfx.ce_fw_version);
2428
2429	/* ME */
2430	fw_data = (const __le32 *)
2431		(adev->gfx.me_fw->data +
2432		 le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
2433	fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
2434	WREG32(mmCP_ME_RAM_WADDR, 0);
2435	for (i = 0; i < fw_size; i++)
2436		WREG32(mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++));
2437	WREG32(mmCP_ME_RAM_WADDR, adev->gfx.me_fw_version);
2438
2439	return 0;
2440}
2441
2442/**
2443 * gfx_v7_0_cp_gfx_start - start the gfx ring
2444 *
2445 * @adev: amdgpu_device pointer
2446 *
2447 * Enables the ring and loads the clear state context and other
2448 * packets required to init the ring.
2449 * Returns 0 for success, error for failure.
2450 */
2451static int gfx_v7_0_cp_gfx_start(struct amdgpu_device *adev)
2452{
2453	struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
2454	const struct cs_section_def *sect = NULL;
2455	const struct cs_extent_def *ext = NULL;
2456	int r, i;
2457
2458	/* init the CP */
2459	WREG32(mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1);
2460	WREG32(mmCP_ENDIAN_SWAP, 0);
2461	WREG32(mmCP_DEVICE_ID, 1);
2462
2463	gfx_v7_0_cp_gfx_enable(adev, true);
2464
2465	r = amdgpu_ring_alloc(ring, gfx_v7_0_get_csb_size(adev) + 8);
2466	if (r) {
2467		DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
2468		return r;
2469	}
2470
2471	/* init the CE partitions.  CE only used for gfx on CIK */
2472	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
2473	amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
2474	amdgpu_ring_write(ring, 0x8000);
2475	amdgpu_ring_write(ring, 0x8000);
2476
2477	/* clear state buffer */
2478	amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2479	amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
2480
2481	amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
2482	amdgpu_ring_write(ring, 0x80000000);
2483	amdgpu_ring_write(ring, 0x80000000);
2484
2485	for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
2486		for (ext = sect->section; ext->extent != NULL; ++ext) {
2487			if (sect->id == SECT_CONTEXT) {
2488				amdgpu_ring_write(ring,
2489						  PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
2490				amdgpu_ring_write(ring, ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
2491				for (i = 0; i < ext->reg_count; i++)
2492					amdgpu_ring_write(ring, ext->extent[i]);
2493			}
2494		}
2495	}
2496
2497	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
2498	amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
2499	amdgpu_ring_write(ring, adev->gfx.config.rb_config[0][0].raster_config);
2500	amdgpu_ring_write(ring, adev->gfx.config.rb_config[0][0].raster_config_1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2501
2502	amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2503	amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
2504
2505	amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
2506	amdgpu_ring_write(ring, 0);
2507
2508	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
2509	amdgpu_ring_write(ring, 0x00000316);
2510	amdgpu_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
2511	amdgpu_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
2512
2513	amdgpu_ring_commit(ring);
2514
2515	return 0;
2516}
2517
2518/**
2519 * gfx_v7_0_cp_gfx_resume - setup the gfx ring buffer registers
2520 *
2521 * @adev: amdgpu_device pointer
2522 *
2523 * Program the location and size of the gfx ring buffer
2524 * and test it to make sure it's working.
2525 * Returns 0 for success, error for failure.
2526 */
2527static int gfx_v7_0_cp_gfx_resume(struct amdgpu_device *adev)
2528{
2529	struct amdgpu_ring *ring;
2530	u32 tmp;
2531	u32 rb_bufsz;
2532	u64 rb_addr, rptr_addr;
2533	int r;
2534
2535	WREG32(mmCP_SEM_WAIT_TIMER, 0x0);
2536	if (adev->asic_type != CHIP_HAWAII)
2537		WREG32(mmCP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
2538
2539	/* Set the write pointer delay */
2540	WREG32(mmCP_RB_WPTR_DELAY, 0);
2541
2542	/* set the RB to use vmid 0 */
2543	WREG32(mmCP_RB_VMID, 0);
2544
2545	WREG32(mmSCRATCH_ADDR, 0);
2546
2547	/* ring 0 - compute and gfx */
2548	/* Set ring buffer size */
2549	ring = &adev->gfx.gfx_ring[0];
2550	rb_bufsz = order_base_2(ring->ring_size / 8);
2551	tmp = (order_base_2(AMDGPU_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
2552#ifdef __BIG_ENDIAN
2553	tmp |= 2 << CP_RB0_CNTL__BUF_SWAP__SHIFT;
2554#endif
2555	WREG32(mmCP_RB0_CNTL, tmp);
2556
2557	/* Initialize the ring buffer's read and write pointers */
2558	WREG32(mmCP_RB0_CNTL, tmp | CP_RB0_CNTL__RB_RPTR_WR_ENA_MASK);
2559	ring->wptr = 0;
2560	WREG32(mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
2561
2562	/* set the wb address whether it's enabled or not */
2563	rptr_addr = ring->rptr_gpu_addr;
2564	WREG32(mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
2565	WREG32(mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
2566
2567	/* scratch register shadowing is no longer supported */
2568	WREG32(mmSCRATCH_UMSK, 0);
2569
2570	mdelay(1);
2571	WREG32(mmCP_RB0_CNTL, tmp);
2572
2573	rb_addr = ring->gpu_addr >> 8;
2574	WREG32(mmCP_RB0_BASE, rb_addr);
2575	WREG32(mmCP_RB0_BASE_HI, upper_32_bits(rb_addr));
2576
2577	/* start the ring */
2578	gfx_v7_0_cp_gfx_start(adev);
2579	r = amdgpu_ring_test_helper(ring);
2580	if (r)
 
 
2581		return r;
 
2582
2583	return 0;
2584}
2585
2586static u64 gfx_v7_0_ring_get_rptr(struct amdgpu_ring *ring)
2587{
2588	return *ring->rptr_cpu_addr;
2589}
2590
2591static u64 gfx_v7_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
2592{
2593	struct amdgpu_device *adev = ring->adev;
2594
2595	return RREG32(mmCP_RB0_WPTR);
2596}
2597
2598static void gfx_v7_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
2599{
2600	struct amdgpu_device *adev = ring->adev;
2601
2602	WREG32(mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
2603	(void)RREG32(mmCP_RB0_WPTR);
2604}
2605
2606static u64 gfx_v7_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
2607{
2608	/* XXX check if swapping is necessary on BE */
2609	return *ring->wptr_cpu_addr;
2610}
2611
2612static void gfx_v7_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
2613{
2614	struct amdgpu_device *adev = ring->adev;
2615
2616	/* XXX check if swapping is necessary on BE */
2617	*ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
2618	WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
2619}
2620
2621/**
2622 * gfx_v7_0_cp_compute_enable - enable/disable the compute CP MEs
2623 *
2624 * @adev: amdgpu_device pointer
2625 * @enable: enable or disable the MEs
2626 *
2627 * Halts or unhalts the compute MEs.
2628 */
2629static void gfx_v7_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
2630{
2631	if (enable)
 
 
2632		WREG32(mmCP_MEC_CNTL, 0);
2633	else
2634		WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK |
2635				       CP_MEC_CNTL__MEC_ME2_HALT_MASK));
 
 
2636	udelay(50);
2637}
2638
2639/**
2640 * gfx_v7_0_cp_compute_load_microcode - load the compute CP ME ucode
2641 *
2642 * @adev: amdgpu_device pointer
2643 *
2644 * Loads the compute MEC1&2 ucode.
2645 * Returns 0 for success, -EINVAL if the ucode is not available.
2646 */
2647static int gfx_v7_0_cp_compute_load_microcode(struct amdgpu_device *adev)
2648{
2649	const struct gfx_firmware_header_v1_0 *mec_hdr;
2650	const __le32 *fw_data;
2651	unsigned i, fw_size;
2652
2653	if (!adev->gfx.mec_fw)
2654		return -EINVAL;
2655
2656	mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
2657	amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
2658	adev->gfx.mec_fw_version = le32_to_cpu(mec_hdr->header.ucode_version);
2659	adev->gfx.mec_feature_version = le32_to_cpu(
2660					mec_hdr->ucode_feature_version);
2661
2662	gfx_v7_0_cp_compute_enable(adev, false);
2663
2664	/* MEC1 */
2665	fw_data = (const __le32 *)
2666		(adev->gfx.mec_fw->data +
2667		 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
2668	fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4;
2669	WREG32(mmCP_MEC_ME1_UCODE_ADDR, 0);
2670	for (i = 0; i < fw_size; i++)
2671		WREG32(mmCP_MEC_ME1_UCODE_DATA, le32_to_cpup(fw_data++));
2672	WREG32(mmCP_MEC_ME1_UCODE_ADDR, 0);
2673
2674	if (adev->asic_type == CHIP_KAVERI) {
2675		const struct gfx_firmware_header_v1_0 *mec2_hdr;
2676
2677		if (!adev->gfx.mec2_fw)
2678			return -EINVAL;
2679
2680		mec2_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
2681		amdgpu_ucode_print_gfx_hdr(&mec2_hdr->header);
2682		adev->gfx.mec2_fw_version = le32_to_cpu(mec2_hdr->header.ucode_version);
2683		adev->gfx.mec2_feature_version = le32_to_cpu(
2684				mec2_hdr->ucode_feature_version);
2685
2686		/* MEC2 */
2687		fw_data = (const __le32 *)
2688			(adev->gfx.mec2_fw->data +
2689			 le32_to_cpu(mec2_hdr->header.ucode_array_offset_bytes));
2690		fw_size = le32_to_cpu(mec2_hdr->header.ucode_size_bytes) / 4;
2691		WREG32(mmCP_MEC_ME2_UCODE_ADDR, 0);
2692		for (i = 0; i < fw_size; i++)
2693			WREG32(mmCP_MEC_ME2_UCODE_DATA, le32_to_cpup(fw_data++));
2694		WREG32(mmCP_MEC_ME2_UCODE_ADDR, 0);
2695	}
2696
2697	return 0;
2698}
2699
2700/**
2701 * gfx_v7_0_cp_compute_fini - stop the compute queues
2702 *
2703 * @adev: amdgpu_device pointer
2704 *
2705 * Stop the compute queues and tear down the driver queue
2706 * info.
2707 */
2708static void gfx_v7_0_cp_compute_fini(struct amdgpu_device *adev)
2709{
2710	int i;
2711
2712	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2713		struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
2714
2715		amdgpu_bo_free_kernel(&ring->mqd_obj, NULL, NULL);
 
 
 
 
 
 
 
 
 
 
2716	}
2717}
2718
2719static void gfx_v7_0_mec_fini(struct amdgpu_device *adev)
2720{
2721	amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
 
 
 
 
 
 
 
 
 
 
 
2722}
2723
 
 
2724static int gfx_v7_0_mec_init(struct amdgpu_device *adev)
2725{
2726	int r;
2727	u32 *hpd;
2728	size_t mec_hpd_size;
2729
2730	bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2731
2732	/* take ownership of the relevant compute queues */
2733	amdgpu_gfx_compute_queue_acquire(adev);
2734
2735	/* allocate space for ALL pipes (even the ones we don't own) */
2736	mec_hpd_size = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec
2737		* GFX7_MEC_HPD_SIZE * 2;
2738
2739	r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
2740				      AMDGPU_GEM_DOMAIN_VRAM |
2741				      AMDGPU_GEM_DOMAIN_GTT,
2742				      &adev->gfx.mec.hpd_eop_obj,
2743				      &adev->gfx.mec.hpd_eop_gpu_addr,
2744				      (void **)&hpd);
2745	if (r) {
2746		dev_warn(adev->dev, "(%d) create, pin or map of HDP EOP bo failed\n", r);
2747		gfx_v7_0_mec_fini(adev);
2748		return r;
2749	}
2750
2751	/* clear memory.  Not sure if this is required or not */
2752	memset(hpd, 0, mec_hpd_size);
2753
2754	amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
2755	amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
2756
2757	return 0;
2758}
2759
2760static void gfx_v7_0_compute_pipe_init(struct amdgpu_device *adev,
2761				       int mec, int pipe)
2762{
2763	u64 eop_gpu_addr;
2764	u32 tmp;
2765	size_t eop_offset = (mec * adev->gfx.mec.num_pipe_per_mec + pipe)
2766			    * GFX7_MEC_HPD_SIZE * 2;
2767
2768	mutex_lock(&adev->srbm_mutex);
2769	eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + eop_offset;
2770
2771	cik_srbm_select(adev, mec + 1, pipe, 0, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2772
2773	/* write the EOP addr */
2774	WREG32(mmCP_HPD_EOP_BASE_ADDR, eop_gpu_addr >> 8);
2775	WREG32(mmCP_HPD_EOP_BASE_ADDR_HI, upper_32_bits(eop_gpu_addr) >> 8);
2776
2777	/* set the VMID assigned */
2778	WREG32(mmCP_HPD_EOP_VMID, 0);
2779
2780	/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
2781	tmp = RREG32(mmCP_HPD_EOP_CONTROL);
2782	tmp &= ~CP_HPD_EOP_CONTROL__EOP_SIZE_MASK;
2783	tmp |= order_base_2(GFX7_MEC_HPD_SIZE / 8);
2784	WREG32(mmCP_HPD_EOP_CONTROL, tmp);
2785
2786	cik_srbm_select(adev, 0, 0, 0, 0);
2787	mutex_unlock(&adev->srbm_mutex);
2788}
2789
2790static int gfx_v7_0_mqd_deactivate(struct amdgpu_device *adev)
2791{
2792	int i;
2793
2794	/* disable the queue if it's active */
2795	if (RREG32(mmCP_HQD_ACTIVE) & 1) {
2796		WREG32(mmCP_HQD_DEQUEUE_REQUEST, 1);
2797		for (i = 0; i < adev->usec_timeout; i++) {
2798			if (!(RREG32(mmCP_HQD_ACTIVE) & 1))
2799				break;
2800			udelay(1);
2801		}
2802
2803		if (i == adev->usec_timeout)
2804			return -ETIMEDOUT;
2805
2806		WREG32(mmCP_HQD_DEQUEUE_REQUEST, 0);
2807		WREG32(mmCP_HQD_PQ_RPTR, 0);
2808		WREG32(mmCP_HQD_PQ_WPTR, 0);
2809	}
2810
2811	return 0;
2812}
2813
2814static void gfx_v7_0_mqd_init(struct amdgpu_device *adev,
2815			     struct cik_mqd *mqd,
2816			     uint64_t mqd_gpu_addr,
2817			     struct amdgpu_ring *ring)
2818{
2819	u64 hqd_gpu_addr;
2820	u64 wb_gpu_addr;
2821
2822	/* init the mqd struct */
2823	memset(mqd, 0, sizeof(struct cik_mqd));
2824
2825	mqd->header = 0xC0310800;
2826	mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
2827	mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
2828	mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
2829	mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
2830
2831	/* enable doorbell? */
2832	mqd->cp_hqd_pq_doorbell_control =
2833		RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
2834	if (ring->use_doorbell)
2835		mqd->cp_hqd_pq_doorbell_control |= CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK;
2836	else
2837		mqd->cp_hqd_pq_doorbell_control &= ~CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK;
2838
2839	/* set the pointer to the MQD */
2840	mqd->cp_mqd_base_addr_lo = mqd_gpu_addr & 0xfffffffc;
2841	mqd->cp_mqd_base_addr_hi = upper_32_bits(mqd_gpu_addr);
2842
2843	/* set MQD vmid to 0 */
2844	mqd->cp_mqd_control = RREG32(mmCP_MQD_CONTROL);
2845	mqd->cp_mqd_control &= ~CP_MQD_CONTROL__VMID_MASK;
2846
2847	/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
2848	hqd_gpu_addr = ring->gpu_addr >> 8;
2849	mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
2850	mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
2851
2852	/* set up the HQD, this is similar to CP_RB0_CNTL */
2853	mqd->cp_hqd_pq_control = RREG32(mmCP_HQD_PQ_CONTROL);
2854	mqd->cp_hqd_pq_control &=
2855		~(CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK |
2856				CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE_MASK);
2857
2858	mqd->cp_hqd_pq_control |=
2859		order_base_2(ring->ring_size / 8);
2860	mqd->cp_hqd_pq_control |=
2861		(order_base_2(AMDGPU_GPU_PAGE_SIZE/8) << 8);
2862#ifdef __BIG_ENDIAN
2863	mqd->cp_hqd_pq_control |=
2864		2 << CP_HQD_PQ_CONTROL__ENDIAN_SWAP__SHIFT;
2865#endif
2866	mqd->cp_hqd_pq_control &=
2867		~(CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK |
2868				CP_HQD_PQ_CONTROL__ROQ_PQ_IB_FLIP_MASK |
2869				CP_HQD_PQ_CONTROL__PQ_VOLATILE_MASK);
2870	mqd->cp_hqd_pq_control |=
2871		CP_HQD_PQ_CONTROL__PRIV_STATE_MASK |
2872		CP_HQD_PQ_CONTROL__KMD_QUEUE_MASK; /* assuming kernel queue control */
2873
2874	/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
2875	wb_gpu_addr = ring->wptr_gpu_addr;
2876	mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
2877	mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
2878
2879	/* set the wb address whether it's enabled or not */
2880	wb_gpu_addr = ring->rptr_gpu_addr;
2881	mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
2882	mqd->cp_hqd_pq_rptr_report_addr_hi =
2883		upper_32_bits(wb_gpu_addr) & 0xffff;
2884
2885	/* enable the doorbell if requested */
2886	if (ring->use_doorbell) {
2887		mqd->cp_hqd_pq_doorbell_control =
2888			RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
2889		mqd->cp_hqd_pq_doorbell_control &=
2890			~CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET_MASK;
2891		mqd->cp_hqd_pq_doorbell_control |=
2892			(ring->doorbell_index <<
2893			 CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT);
2894		mqd->cp_hqd_pq_doorbell_control |=
2895			CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK;
2896		mqd->cp_hqd_pq_doorbell_control &=
2897			~(CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_SOURCE_MASK |
2898					CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_HIT_MASK);
2899
2900	} else {
2901		mqd->cp_hqd_pq_doorbell_control = 0;
2902	}
2903
2904	/* read and write pointers, similar to CP_RB0_WPTR/_RPTR */
2905	ring->wptr = 0;
2906	mqd->cp_hqd_pq_wptr = lower_32_bits(ring->wptr);
2907	mqd->cp_hqd_pq_rptr = RREG32(mmCP_HQD_PQ_RPTR);
2908
2909	/* set the vmid for the queue */
2910	mqd->cp_hqd_vmid = 0;
2911
2912	/* defaults */
2913	mqd->cp_hqd_ib_control = RREG32(mmCP_HQD_IB_CONTROL);
2914	mqd->cp_hqd_ib_base_addr_lo = RREG32(mmCP_HQD_IB_BASE_ADDR);
2915	mqd->cp_hqd_ib_base_addr_hi = RREG32(mmCP_HQD_IB_BASE_ADDR_HI);
2916	mqd->cp_hqd_ib_rptr = RREG32(mmCP_HQD_IB_RPTR);
2917	mqd->cp_hqd_persistent_state = RREG32(mmCP_HQD_PERSISTENT_STATE);
2918	mqd->cp_hqd_sema_cmd = RREG32(mmCP_HQD_SEMA_CMD);
2919	mqd->cp_hqd_msg_type = RREG32(mmCP_HQD_MSG_TYPE);
2920	mqd->cp_hqd_atomic0_preop_lo = RREG32(mmCP_HQD_ATOMIC0_PREOP_LO);
2921	mqd->cp_hqd_atomic0_preop_hi = RREG32(mmCP_HQD_ATOMIC0_PREOP_HI);
2922	mqd->cp_hqd_atomic1_preop_lo = RREG32(mmCP_HQD_ATOMIC1_PREOP_LO);
2923	mqd->cp_hqd_atomic1_preop_hi = RREG32(mmCP_HQD_ATOMIC1_PREOP_HI);
2924	mqd->cp_hqd_pq_rptr = RREG32(mmCP_HQD_PQ_RPTR);
2925	mqd->cp_hqd_quantum = RREG32(mmCP_HQD_QUANTUM);
2926	mqd->cp_hqd_pipe_priority = RREG32(mmCP_HQD_PIPE_PRIORITY);
2927	mqd->cp_hqd_queue_priority = RREG32(mmCP_HQD_QUEUE_PRIORITY);
2928	mqd->cp_hqd_iq_rptr = RREG32(mmCP_HQD_IQ_RPTR);
2929
2930	/* activate the queue */
2931	mqd->cp_hqd_active = 1;
2932}
2933
2934static int gfx_v7_0_mqd_commit(struct amdgpu_device *adev, struct cik_mqd *mqd)
2935{
2936	uint32_t tmp;
2937	uint32_t mqd_reg;
2938	uint32_t *mqd_data;
2939
2940	/* HQD registers extend from mmCP_MQD_BASE_ADDR to mmCP_MQD_CONTROL */
2941	mqd_data = &mqd->cp_mqd_base_addr_lo;
2942
2943	/* disable wptr polling */
2944	tmp = RREG32(mmCP_PQ_WPTR_POLL_CNTL);
2945	tmp = REG_SET_FIELD(tmp, CP_PQ_WPTR_POLL_CNTL, EN, 0);
2946	WREG32(mmCP_PQ_WPTR_POLL_CNTL, tmp);
2947
2948	/* program all HQD registers */
2949	for (mqd_reg = mmCP_HQD_VMID; mqd_reg <= mmCP_MQD_CONTROL; mqd_reg++)
2950		WREG32(mqd_reg, mqd_data[mqd_reg - mmCP_MQD_BASE_ADDR]);
2951
2952	/* activate the HQD */
2953	for (mqd_reg = mmCP_MQD_BASE_ADDR; mqd_reg <= mmCP_HQD_ACTIVE; mqd_reg++)
2954		WREG32(mqd_reg, mqd_data[mqd_reg - mmCP_MQD_BASE_ADDR]);
2955
2956	return 0;
2957}
2958
2959static int gfx_v7_0_compute_queue_init(struct amdgpu_device *adev, int ring_id)
2960{
2961	int r;
2962	u64 mqd_gpu_addr;
2963	struct cik_mqd *mqd;
2964	struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
2965
2966	r = amdgpu_bo_create_reserved(adev, sizeof(struct cik_mqd), PAGE_SIZE,
2967				      AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
2968				      &mqd_gpu_addr, (void **)&mqd);
2969	if (r) {
2970		dev_warn(adev->dev, "(%d) create MQD bo failed\n", r);
2971		return r;
2972	}
2973
2974	mutex_lock(&adev->srbm_mutex);
2975	cik_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
2976
2977	gfx_v7_0_mqd_init(adev, mqd, mqd_gpu_addr, ring);
2978	gfx_v7_0_mqd_deactivate(adev);
2979	gfx_v7_0_mqd_commit(adev, mqd);
2980
2981	cik_srbm_select(adev, 0, 0, 0, 0);
2982	mutex_unlock(&adev->srbm_mutex);
2983
2984	amdgpu_bo_kunmap(ring->mqd_obj);
2985	amdgpu_bo_unreserve(ring->mqd_obj);
2986	return 0;
2987}
2988
2989/**
2990 * gfx_v7_0_cp_compute_resume - setup the compute queue registers
2991 *
2992 * @adev: amdgpu_device pointer
2993 *
2994 * Program the compute queues and test them to make sure they
2995 * are working.
2996 * Returns 0 for success, error for failure.
2997 */
2998static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev)
2999{
3000	int r, i, j;
3001	u32 tmp;
 
 
 
 
 
 
 
3002	struct amdgpu_ring *ring;
3003
3004	/* fix up chicken bits */
3005	tmp = RREG32(mmCP_CPF_DEBUG);
3006	tmp |= (1 << 23);
3007	WREG32(mmCP_CPF_DEBUG, tmp);
3008
3009	/* init all pipes (even the ones we don't own) */
3010	for (i = 0; i < adev->gfx.mec.num_mec; i++)
3011		for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++)
3012			gfx_v7_0_compute_pipe_init(adev, i, j);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3013
3014	/* init the queues */
3015	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3016		r = gfx_v7_0_compute_queue_init(adev, i);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3017		if (r) {
 
3018			gfx_v7_0_cp_compute_fini(adev);
3019			return r;
3020		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3021	}
3022
3023	gfx_v7_0_cp_compute_enable(adev, true);
3024
3025	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3026		ring = &adev->gfx.compute_ring[i];
3027		amdgpu_ring_test_helper(ring);
 
 
 
3028	}
3029
3030	return 0;
3031}
3032
3033static void gfx_v7_0_cp_enable(struct amdgpu_device *adev, bool enable)
3034{
3035	gfx_v7_0_cp_gfx_enable(adev, enable);
3036	gfx_v7_0_cp_compute_enable(adev, enable);
3037}
3038
3039static int gfx_v7_0_cp_load_microcode(struct amdgpu_device *adev)
3040{
3041	int r;
3042
3043	r = gfx_v7_0_cp_gfx_load_microcode(adev);
3044	if (r)
3045		return r;
3046	r = gfx_v7_0_cp_compute_load_microcode(adev);
3047	if (r)
3048		return r;
3049
3050	return 0;
3051}
3052
3053static void gfx_v7_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
3054					       bool enable)
3055{
3056	u32 tmp = RREG32(mmCP_INT_CNTL_RING0);
3057
3058	if (enable)
3059		tmp |= (CP_INT_CNTL_RING0__CNTX_BUSY_INT_ENABLE_MASK |
3060				CP_INT_CNTL_RING0__CNTX_EMPTY_INT_ENABLE_MASK);
3061	else
3062		tmp &= ~(CP_INT_CNTL_RING0__CNTX_BUSY_INT_ENABLE_MASK |
3063				CP_INT_CNTL_RING0__CNTX_EMPTY_INT_ENABLE_MASK);
3064	WREG32(mmCP_INT_CNTL_RING0, tmp);
3065}
3066
3067static int gfx_v7_0_cp_resume(struct amdgpu_device *adev)
3068{
3069	int r;
3070
3071	gfx_v7_0_enable_gui_idle_interrupt(adev, false);
3072
3073	r = gfx_v7_0_cp_load_microcode(adev);
3074	if (r)
3075		return r;
3076
3077	r = gfx_v7_0_cp_gfx_resume(adev);
3078	if (r)
3079		return r;
3080	r = gfx_v7_0_cp_compute_resume(adev);
3081	if (r)
3082		return r;
3083
3084	gfx_v7_0_enable_gui_idle_interrupt(adev, true);
3085
3086	return 0;
3087}
3088
3089/**
3090 * gfx_v7_0_ring_emit_pipeline_sync - cik vm flush using the CP
3091 *
3092 * @ring: the ring to emit the commands to
3093 *
3094 * Sync the command pipeline with the PFP. E.g. wait for everything
3095 * to be completed.
3096 */
3097static void gfx_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
3098{
3099	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
3100	uint32_t seq = ring->fence_drv.sync_seq;
3101	uint64_t addr = ring->fence_drv.gpu_addr;
3102
3103	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
3104	amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
3105				 WAIT_REG_MEM_FUNCTION(3) | /* equal */
3106				 WAIT_REG_MEM_ENGINE(usepfp)));   /* pfp or me */
3107	amdgpu_ring_write(ring, addr & 0xfffffffc);
3108	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
3109	amdgpu_ring_write(ring, seq);
3110	amdgpu_ring_write(ring, 0xffffffff);
3111	amdgpu_ring_write(ring, 4); /* poll interval */
3112
3113	if (usepfp) {
3114		/* sync CE with ME to prevent CE fetch CEIB before context switch done */
3115		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3116		amdgpu_ring_write(ring, 0);
3117		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3118		amdgpu_ring_write(ring, 0);
3119	}
3120}
3121
3122/*
3123 * vm
3124 * VMID 0 is the physical GPU addresses as used by the kernel.
3125 * VMIDs 1-15 are used for userspace clients and are handled
3126 * by the amdgpu vm/hsa code.
3127 */
3128/**
3129 * gfx_v7_0_ring_emit_vm_flush - cik vm flush using the CP
3130 *
3131 * @ring: amdgpu_ring pointer
3132 * @vmid: vmid number to use
3133 * @pd_addr: address
3134 *
3135 * Update the page table base and flush the VM TLB
3136 * using the CP (CIK).
3137 */
3138static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
3139					unsigned vmid, uint64_t pd_addr)
3140{
3141	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
3142
3143	amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3144
3145	/* wait for the invalidate to complete */
3146	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
3147	amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */
3148				 WAIT_REG_MEM_FUNCTION(0) |  /* always */
3149				 WAIT_REG_MEM_ENGINE(0))); /* me */
3150	amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
3151	amdgpu_ring_write(ring, 0);
3152	amdgpu_ring_write(ring, 0); /* ref */
3153	amdgpu_ring_write(ring, 0); /* mask */
3154	amdgpu_ring_write(ring, 0x20); /* poll interval */
3155
3156	/* compute doesn't have PFP */
3157	if (usepfp) {
3158		/* sync PFP to ME, otherwise we might get invalid PFP reads */
3159		amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
3160		amdgpu_ring_write(ring, 0x0);
3161
3162		/* synce CE with ME to prevent CE fetch CEIB before context switch done */
3163		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3164		amdgpu_ring_write(ring, 0);
3165		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3166		amdgpu_ring_write(ring, 0);
3167	}
3168}
3169
3170static void gfx_v7_0_ring_emit_wreg(struct amdgpu_ring *ring,
3171				    uint32_t reg, uint32_t val)
3172{
3173	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
3174
3175	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3176	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
3177				 WRITE_DATA_DST_SEL(0)));
3178	amdgpu_ring_write(ring, reg);
3179	amdgpu_ring_write(ring, 0);
3180	amdgpu_ring_write(ring, val);
3181}
3182
3183/*
3184 * RLC
3185 * The RLC is a multi-purpose microengine that handles a
3186 * variety of functions.
3187 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3188static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
3189{
3190	const u32 *src_ptr;
3191	u32 dws;
 
3192	const struct cs_section_def *cs_data;
3193	int r;
3194
3195	/* allocate rlc buffers */
3196	if (adev->flags & AMD_IS_APU) {
3197		if (adev->asic_type == CHIP_KAVERI) {
3198			adev->gfx.rlc.reg_list = spectre_rlc_save_restore_register_list;
3199			adev->gfx.rlc.reg_list_size =
3200				(u32)ARRAY_SIZE(spectre_rlc_save_restore_register_list);
3201		} else {
3202			adev->gfx.rlc.reg_list = kalindi_rlc_save_restore_register_list;
3203			adev->gfx.rlc.reg_list_size =
3204				(u32)ARRAY_SIZE(kalindi_rlc_save_restore_register_list);
3205		}
3206	}
3207	adev->gfx.rlc.cs_data = ci_cs_data;
3208	adev->gfx.rlc.cp_table_size = ALIGN(CP_ME_TABLE_SIZE * 5 * 4, 2048); /* CP JT */
3209	adev->gfx.rlc.cp_table_size += 64 * 1024; /* GDS */
3210
3211	src_ptr = adev->gfx.rlc.reg_list;
3212	dws = adev->gfx.rlc.reg_list_size;
3213	dws += (5 * 16) + 48 + 48 + 64;
3214
3215	cs_data = adev->gfx.rlc.cs_data;
3216
3217	if (src_ptr) {
3218		/* init save restore block */
3219		r = amdgpu_gfx_rlc_init_sr(adev, dws);
3220		if (r)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3221			return r;
 
 
 
 
 
 
 
3222	}
3223
3224	if (cs_data) {
3225		/* init clear state block */
3226		r = amdgpu_gfx_rlc_init_csb(adev);
3227		if (r)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3228			return r;
 
 
 
 
 
 
3229	}
3230
3231	if (adev->gfx.rlc.cp_table_size) {
3232		r = amdgpu_gfx_rlc_init_cpt(adev);
3233		if (r)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3234			return r;
 
 
 
 
 
 
 
3235	}
3236
3237	/* init spm vmid with 0xf */
3238	if (adev->gfx.rlc.funcs->update_spm_vmid)
3239		adev->gfx.rlc.funcs->update_spm_vmid(adev, NULL, 0xf);
3240
3241	return 0;
3242}
3243
3244static void gfx_v7_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
3245{
3246	u32 tmp;
3247
3248	tmp = RREG32(mmRLC_LB_CNTL);
3249	if (enable)
3250		tmp |= RLC_LB_CNTL__LOAD_BALANCE_ENABLE_MASK;
3251	else
3252		tmp &= ~RLC_LB_CNTL__LOAD_BALANCE_ENABLE_MASK;
3253	WREG32(mmRLC_LB_CNTL, tmp);
3254}
3255
3256static void gfx_v7_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
3257{
3258	u32 i, j, k;
3259	u32 mask;
3260
3261	mutex_lock(&adev->grbm_idx_mutex);
3262	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
3263		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
3264			gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff, 0);
3265			for (k = 0; k < adev->usec_timeout; k++) {
3266				if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY) == 0)
3267					break;
3268				udelay(1);
3269			}
3270		}
3271	}
3272	gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
3273	mutex_unlock(&adev->grbm_idx_mutex);
3274
3275	mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
3276		RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
3277		RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
3278		RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
3279	for (k = 0; k < adev->usec_timeout; k++) {
3280		if ((RREG32(mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
3281			break;
3282		udelay(1);
3283	}
3284}
3285
3286static void gfx_v7_0_update_rlc(struct amdgpu_device *adev, u32 rlc)
3287{
3288	u32 tmp;
3289
3290	tmp = RREG32(mmRLC_CNTL);
3291	if (tmp != rlc)
3292		WREG32(mmRLC_CNTL, rlc);
3293}
3294
3295static u32 gfx_v7_0_halt_rlc(struct amdgpu_device *adev)
3296{
3297	u32 data, orig;
3298
3299	orig = data = RREG32(mmRLC_CNTL);
3300
3301	if (data & RLC_CNTL__RLC_ENABLE_F32_MASK) {
3302		u32 i;
3303
3304		data &= ~RLC_CNTL__RLC_ENABLE_F32_MASK;
3305		WREG32(mmRLC_CNTL, data);
3306
3307		for (i = 0; i < adev->usec_timeout; i++) {
3308			if ((RREG32(mmRLC_GPM_STAT) & RLC_GPM_STAT__RLC_BUSY_MASK) == 0)
3309				break;
3310			udelay(1);
3311		}
3312
3313		gfx_v7_0_wait_for_rlc_serdes(adev);
3314	}
3315
3316	return orig;
3317}
3318
3319static bool gfx_v7_0_is_rlc_enabled(struct amdgpu_device *adev)
3320{
3321	return true;
3322}
3323
3324static void gfx_v7_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id)
3325{
3326	u32 tmp, i, mask;
3327
3328	tmp = 0x1 | (1 << 1);
3329	WREG32(mmRLC_GPR_REG2, tmp);
3330
3331	mask = RLC_GPM_STAT__GFX_POWER_STATUS_MASK |
3332		RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK;
3333	for (i = 0; i < adev->usec_timeout; i++) {
3334		if ((RREG32(mmRLC_GPM_STAT) & mask) == mask)
3335			break;
3336		udelay(1);
3337	}
3338
3339	for (i = 0; i < adev->usec_timeout; i++) {
3340		if ((RREG32(mmRLC_GPR_REG2) & 0x1) == 0)
3341			break;
3342		udelay(1);
3343	}
3344}
3345
3346static void gfx_v7_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id)
3347{
3348	u32 tmp;
3349
3350	tmp = 0x1 | (0 << 1);
3351	WREG32(mmRLC_GPR_REG2, tmp);
3352}
3353
3354/**
3355 * gfx_v7_0_rlc_stop - stop the RLC ME
3356 *
3357 * @adev: amdgpu_device pointer
3358 *
3359 * Halt the RLC ME (MicroEngine) (CIK).
3360 */
3361static void gfx_v7_0_rlc_stop(struct amdgpu_device *adev)
3362{
3363	WREG32(mmRLC_CNTL, 0);
3364
3365	gfx_v7_0_enable_gui_idle_interrupt(adev, false);
3366
3367	gfx_v7_0_wait_for_rlc_serdes(adev);
3368}
3369
3370/**
3371 * gfx_v7_0_rlc_start - start the RLC ME
3372 *
3373 * @adev: amdgpu_device pointer
3374 *
3375 * Unhalt the RLC ME (MicroEngine) (CIK).
3376 */
3377static void gfx_v7_0_rlc_start(struct amdgpu_device *adev)
3378{
3379	WREG32(mmRLC_CNTL, RLC_CNTL__RLC_ENABLE_F32_MASK);
3380
3381	gfx_v7_0_enable_gui_idle_interrupt(adev, true);
3382
3383	udelay(50);
3384}
3385
3386static void gfx_v7_0_rlc_reset(struct amdgpu_device *adev)
3387{
3388	u32 tmp = RREG32(mmGRBM_SOFT_RESET);
3389
3390	tmp |= GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK;
3391	WREG32(mmGRBM_SOFT_RESET, tmp);
3392	udelay(50);
3393	tmp &= ~GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK;
3394	WREG32(mmGRBM_SOFT_RESET, tmp);
3395	udelay(50);
3396}
3397
3398/**
3399 * gfx_v7_0_rlc_resume - setup the RLC hw
3400 *
3401 * @adev: amdgpu_device pointer
3402 *
3403 * Initialize the RLC registers, load the ucode,
3404 * and start the RLC (CIK).
3405 * Returns 0 for success, -EINVAL if the ucode is not available.
3406 */
3407static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev)
3408{
3409	const struct rlc_firmware_header_v1_0 *hdr;
3410	const __le32 *fw_data;
3411	unsigned i, fw_size;
3412	u32 tmp;
3413
3414	if (!adev->gfx.rlc_fw)
3415		return -EINVAL;
3416
3417	hdr = (const struct rlc_firmware_header_v1_0 *)adev->gfx.rlc_fw->data;
3418	amdgpu_ucode_print_rlc_hdr(&hdr->header);
3419	adev->gfx.rlc_fw_version = le32_to_cpu(hdr->header.ucode_version);
3420	adev->gfx.rlc_feature_version = le32_to_cpu(
3421					hdr->ucode_feature_version);
3422
3423	adev->gfx.rlc.funcs->stop(adev);
3424
3425	/* disable CG */
3426	tmp = RREG32(mmRLC_CGCG_CGLS_CTRL) & 0xfffffffc;
3427	WREG32(mmRLC_CGCG_CGLS_CTRL, tmp);
3428
3429	adev->gfx.rlc.funcs->reset(adev);
3430
3431	gfx_v7_0_init_pg(adev);
3432
3433	WREG32(mmRLC_LB_CNTR_INIT, 0);
3434	WREG32(mmRLC_LB_CNTR_MAX, 0x00008000);
3435
3436	mutex_lock(&adev->grbm_idx_mutex);
3437	gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
3438	WREG32(mmRLC_LB_INIT_CU_MASK, 0xffffffff);
3439	WREG32(mmRLC_LB_PARAMS, 0x00600408);
3440	WREG32(mmRLC_LB_CNTL, 0x80000004);
3441	mutex_unlock(&adev->grbm_idx_mutex);
3442
3443	WREG32(mmRLC_MC_CNTL, 0);
3444	WREG32(mmRLC_UCODE_CNTL, 0);
3445
3446	fw_data = (const __le32 *)
3447		(adev->gfx.rlc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
3448	fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
3449	WREG32(mmRLC_GPM_UCODE_ADDR, 0);
3450	for (i = 0; i < fw_size; i++)
3451		WREG32(mmRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
3452	WREG32(mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
3453
3454	/* XXX - find out what chips support lbpw */
3455	gfx_v7_0_enable_lbpw(adev, false);
3456
3457	if (adev->asic_type == CHIP_BONAIRE)
3458		WREG32(mmRLC_DRIVER_CPDMA_STATUS, 0);
3459
3460	adev->gfx.rlc.funcs->start(adev);
3461
3462	return 0;
3463}
3464
3465static void gfx_v7_0_update_spm_vmid(struct amdgpu_device *adev, struct amdgpu_ring *ring, unsigned vmid)
3466{
3467	u32 data;
3468
3469	amdgpu_gfx_off_ctrl(adev, false);
3470
3471	data = RREG32(mmRLC_SPM_VMID);
3472
3473	data &= ~RLC_SPM_VMID__RLC_SPM_VMID_MASK;
3474	data |= (vmid & RLC_SPM_VMID__RLC_SPM_VMID_MASK) << RLC_SPM_VMID__RLC_SPM_VMID__SHIFT;
3475
3476	WREG32(mmRLC_SPM_VMID, data);
3477
3478	amdgpu_gfx_off_ctrl(adev, true);
3479}
3480
3481static void gfx_v7_0_enable_cgcg(struct amdgpu_device *adev, bool enable)
3482{
3483	u32 data, orig, tmp, tmp2;
3484
3485	orig = data = RREG32(mmRLC_CGCG_CGLS_CTRL);
3486
3487	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
3488		gfx_v7_0_enable_gui_idle_interrupt(adev, true);
3489
3490		tmp = gfx_v7_0_halt_rlc(adev);
3491
3492		mutex_lock(&adev->grbm_idx_mutex);
3493		gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
3494		WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
3495		WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
3496		tmp2 = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK |
3497			RLC_SERDES_WR_CTRL__CGCG_OVERRIDE_0_MASK |
3498			RLC_SERDES_WR_CTRL__CGLS_ENABLE_MASK;
3499		WREG32(mmRLC_SERDES_WR_CTRL, tmp2);
3500		mutex_unlock(&adev->grbm_idx_mutex);
3501
3502		gfx_v7_0_update_rlc(adev, tmp);
3503
3504		data |= RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
3505		if (orig != data)
3506			WREG32(mmRLC_CGCG_CGLS_CTRL, data);
3507
3508	} else {
3509		gfx_v7_0_enable_gui_idle_interrupt(adev, false);
3510
3511		RREG32(mmCB_CGTT_SCLK_CTRL);
3512		RREG32(mmCB_CGTT_SCLK_CTRL);
3513		RREG32(mmCB_CGTT_SCLK_CTRL);
3514		RREG32(mmCB_CGTT_SCLK_CTRL);
3515
3516		data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
3517		if (orig != data)
3518			WREG32(mmRLC_CGCG_CGLS_CTRL, data);
 
 
3519
3520		gfx_v7_0_enable_gui_idle_interrupt(adev, true);
3521	}
3522}
3523
3524static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
3525{
3526	u32 data, orig, tmp = 0;
3527
3528	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
3529		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
3530			if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
3531				orig = data = RREG32(mmCP_MEM_SLP_CNTL);
3532				data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
3533				if (orig != data)
3534					WREG32(mmCP_MEM_SLP_CNTL, data);
3535			}
3536		}
3537
3538		orig = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
3539		data |= 0x00000001;
3540		data &= 0xfffffffd;
3541		if (orig != data)
3542			WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data);
3543
3544		tmp = gfx_v7_0_halt_rlc(adev);
3545
3546		mutex_lock(&adev->grbm_idx_mutex);
3547		gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
3548		WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
3549		WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
3550		data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK |
3551			RLC_SERDES_WR_CTRL__MGCG_OVERRIDE_0_MASK;
3552		WREG32(mmRLC_SERDES_WR_CTRL, data);
3553		mutex_unlock(&adev->grbm_idx_mutex);
3554
3555		gfx_v7_0_update_rlc(adev, tmp);
3556
3557		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS) {
3558			orig = data = RREG32(mmCGTS_SM_CTRL_REG);
3559			data &= ~CGTS_SM_CTRL_REG__SM_MODE_MASK;
3560			data |= (0x2 << CGTS_SM_CTRL_REG__SM_MODE__SHIFT);
3561			data |= CGTS_SM_CTRL_REG__SM_MODE_ENABLE_MASK;
3562			data &= ~CGTS_SM_CTRL_REG__OVERRIDE_MASK;
3563			if ((adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) &&
3564			    (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS_LS))
3565				data &= ~CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK;
3566			data &= ~CGTS_SM_CTRL_REG__ON_MONITOR_ADD_MASK;
3567			data |= CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN_MASK;
3568			data |= (0x96 << CGTS_SM_CTRL_REG__ON_MONITOR_ADD__SHIFT);
3569			if (orig != data)
3570				WREG32(mmCGTS_SM_CTRL_REG, data);
3571		}
3572	} else {
3573		orig = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
3574		data |= 0x00000003;
3575		if (orig != data)
3576			WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data);
3577
3578		data = RREG32(mmRLC_MEM_SLP_CNTL);
3579		if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
3580			data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
3581			WREG32(mmRLC_MEM_SLP_CNTL, data);
3582		}
3583
3584		data = RREG32(mmCP_MEM_SLP_CNTL);
3585		if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
3586			data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
3587			WREG32(mmCP_MEM_SLP_CNTL, data);
3588		}
3589
3590		orig = data = RREG32(mmCGTS_SM_CTRL_REG);
3591		data |= CGTS_SM_CTRL_REG__OVERRIDE_MASK | CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK;
3592		if (orig != data)
3593			WREG32(mmCGTS_SM_CTRL_REG, data);
3594
3595		tmp = gfx_v7_0_halt_rlc(adev);
3596
3597		mutex_lock(&adev->grbm_idx_mutex);
3598		gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
3599		WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
3600		WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
3601		data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK | RLC_SERDES_WR_CTRL__MGCG_OVERRIDE_1_MASK;
3602		WREG32(mmRLC_SERDES_WR_CTRL, data);
3603		mutex_unlock(&adev->grbm_idx_mutex);
3604
3605		gfx_v7_0_update_rlc(adev, tmp);
3606	}
3607}
3608
3609static void gfx_v7_0_update_cg(struct amdgpu_device *adev,
3610			       bool enable)
3611{
3612	gfx_v7_0_enable_gui_idle_interrupt(adev, false);
3613	/* order matters! */
3614	if (enable) {
3615		gfx_v7_0_enable_mgcg(adev, true);
3616		gfx_v7_0_enable_cgcg(adev, true);
3617	} else {
3618		gfx_v7_0_enable_cgcg(adev, false);
3619		gfx_v7_0_enable_mgcg(adev, false);
3620	}
3621	gfx_v7_0_enable_gui_idle_interrupt(adev, true);
3622}
3623
3624static void gfx_v7_0_enable_sclk_slowdown_on_pu(struct amdgpu_device *adev,
3625						bool enable)
3626{
3627	u32 data, orig;
3628
3629	orig = data = RREG32(mmRLC_PG_CNTL);
3630	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS))
3631		data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
3632	else
3633		data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
3634	if (orig != data)
3635		WREG32(mmRLC_PG_CNTL, data);
3636}
3637
3638static void gfx_v7_0_enable_sclk_slowdown_on_pd(struct amdgpu_device *adev,
3639						bool enable)
3640{
3641	u32 data, orig;
3642
3643	orig = data = RREG32(mmRLC_PG_CNTL);
3644	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS))
3645		data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
3646	else
3647		data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
3648	if (orig != data)
3649		WREG32(mmRLC_PG_CNTL, data);
3650}
3651
3652static void gfx_v7_0_enable_cp_pg(struct amdgpu_device *adev, bool enable)
3653{
3654	u32 data, orig;
3655
3656	orig = data = RREG32(mmRLC_PG_CNTL);
3657	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_CP))
3658		data &= ~0x8000;
3659	else
3660		data |= 0x8000;
3661	if (orig != data)
3662		WREG32(mmRLC_PG_CNTL, data);
3663}
3664
3665static void gfx_v7_0_enable_gds_pg(struct amdgpu_device *adev, bool enable)
3666{
3667	u32 data, orig;
3668
3669	orig = data = RREG32(mmRLC_PG_CNTL);
3670	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GDS))
3671		data &= ~0x2000;
3672	else
3673		data |= 0x2000;
3674	if (orig != data)
3675		WREG32(mmRLC_PG_CNTL, data);
3676}
3677
3678static int gfx_v7_0_cp_pg_table_num(struct amdgpu_device *adev)
3679{
 
 
 
 
 
 
3680	if (adev->asic_type == CHIP_KAVERI)
3681		return 5;
3682	else
3683		return 4;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3684}
3685
3686static void gfx_v7_0_enable_gfx_cgpg(struct amdgpu_device *adev,
3687				     bool enable)
3688{
3689	u32 data, orig;
3690
3691	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
3692		orig = data = RREG32(mmRLC_PG_CNTL);
3693		data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
3694		if (orig != data)
3695			WREG32(mmRLC_PG_CNTL, data);
3696
3697		orig = data = RREG32(mmRLC_AUTO_PG_CTRL);
3698		data |= RLC_AUTO_PG_CTRL__AUTO_PG_EN_MASK;
3699		if (orig != data)
3700			WREG32(mmRLC_AUTO_PG_CTRL, data);
3701	} else {
3702		orig = data = RREG32(mmRLC_PG_CNTL);
3703		data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
3704		if (orig != data)
3705			WREG32(mmRLC_PG_CNTL, data);
3706
3707		orig = data = RREG32(mmRLC_AUTO_PG_CTRL);
3708		data &= ~RLC_AUTO_PG_CTRL__AUTO_PG_EN_MASK;
3709		if (orig != data)
3710			WREG32(mmRLC_AUTO_PG_CTRL, data);
3711
3712		data = RREG32(mmDB_RENDER_CONTROL);
3713	}
3714}
3715
3716static void gfx_v7_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
3717						 u32 bitmap)
3718{
3719	u32 data;
3720
3721	if (!bitmap)
3722		return;
3723
3724	data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
3725	data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
3726
3727	WREG32(mmGC_USER_SHADER_ARRAY_CONFIG, data);
3728}
3729
3730static u32 gfx_v7_0_get_cu_active_bitmap(struct amdgpu_device *adev)
3731{
3732	u32 data, mask;
3733
3734	data = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG);
3735	data |= RREG32(mmGC_USER_SHADER_ARRAY_CONFIG);
3736
3737	data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
3738	data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
3739
3740	mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh);
3741
3742	return (~data) & mask;
3743}
3744
3745static void gfx_v7_0_init_ao_cu_mask(struct amdgpu_device *adev)
3746{
3747	u32 tmp;
3748
3749	WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask);
3750
3751	tmp = RREG32(mmRLC_MAX_PG_CU);
3752	tmp &= ~RLC_MAX_PG_CU__MAX_POWERED_UP_CU_MASK;
3753	tmp |= (adev->gfx.cu_info.number << RLC_MAX_PG_CU__MAX_POWERED_UP_CU__SHIFT);
3754	WREG32(mmRLC_MAX_PG_CU, tmp);
3755}
3756
3757static void gfx_v7_0_enable_gfx_static_mgpg(struct amdgpu_device *adev,
3758					    bool enable)
3759{
3760	u32 data, orig;
3761
3762	orig = data = RREG32(mmRLC_PG_CNTL);
3763	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG))
3764		data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
3765	else
3766		data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
3767	if (orig != data)
3768		WREG32(mmRLC_PG_CNTL, data);
3769}
3770
3771static void gfx_v7_0_enable_gfx_dynamic_mgpg(struct amdgpu_device *adev,
3772					     bool enable)
3773{
3774	u32 data, orig;
3775
3776	orig = data = RREG32(mmRLC_PG_CNTL);
3777	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG))
3778		data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
3779	else
3780		data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
3781	if (orig != data)
3782		WREG32(mmRLC_PG_CNTL, data);
3783}
3784
3785#define RLC_SAVE_AND_RESTORE_STARTING_OFFSET 0x90
3786#define RLC_CLEAR_STATE_DESCRIPTOR_OFFSET    0x3D
3787
3788static void gfx_v7_0_init_gfx_cgpg(struct amdgpu_device *adev)
3789{
3790	u32 data, orig;
3791	u32 i;
3792
3793	if (adev->gfx.rlc.cs_data) {
3794		WREG32(mmRLC_GPM_SCRATCH_ADDR, RLC_CLEAR_STATE_DESCRIPTOR_OFFSET);
3795		WREG32(mmRLC_GPM_SCRATCH_DATA, upper_32_bits(adev->gfx.rlc.clear_state_gpu_addr));
3796		WREG32(mmRLC_GPM_SCRATCH_DATA, lower_32_bits(adev->gfx.rlc.clear_state_gpu_addr));
3797		WREG32(mmRLC_GPM_SCRATCH_DATA, adev->gfx.rlc.clear_state_size);
3798	} else {
3799		WREG32(mmRLC_GPM_SCRATCH_ADDR, RLC_CLEAR_STATE_DESCRIPTOR_OFFSET);
3800		for (i = 0; i < 3; i++)
3801			WREG32(mmRLC_GPM_SCRATCH_DATA, 0);
3802	}
3803	if (adev->gfx.rlc.reg_list) {
3804		WREG32(mmRLC_GPM_SCRATCH_ADDR, RLC_SAVE_AND_RESTORE_STARTING_OFFSET);
3805		for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
3806			WREG32(mmRLC_GPM_SCRATCH_DATA, adev->gfx.rlc.reg_list[i]);
3807	}
3808
3809	orig = data = RREG32(mmRLC_PG_CNTL);
3810	data |= RLC_PG_CNTL__GFX_POWER_GATING_SRC_MASK;
3811	if (orig != data)
3812		WREG32(mmRLC_PG_CNTL, data);
3813
3814	WREG32(mmRLC_SAVE_AND_RESTORE_BASE, adev->gfx.rlc.save_restore_gpu_addr >> 8);
3815	WREG32(mmRLC_JUMP_TABLE_RESTORE, adev->gfx.rlc.cp_table_gpu_addr >> 8);
3816
3817	data = RREG32(mmCP_RB_WPTR_POLL_CNTL);
3818	data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK;
3819	data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
3820	WREG32(mmCP_RB_WPTR_POLL_CNTL, data);
3821
3822	data = 0x10101010;
3823	WREG32(mmRLC_PG_DELAY, data);
3824
3825	data = RREG32(mmRLC_PG_DELAY_2);
3826	data &= ~0xff;
3827	data |= 0x3;
3828	WREG32(mmRLC_PG_DELAY_2, data);
3829
3830	data = RREG32(mmRLC_AUTO_PG_CTRL);
3831	data &= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK;
3832	data |= (0x700 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT);
3833	WREG32(mmRLC_AUTO_PG_CTRL, data);
3834
3835}
3836
3837static void gfx_v7_0_update_gfx_pg(struct amdgpu_device *adev, bool enable)
3838{
3839	gfx_v7_0_enable_gfx_cgpg(adev, enable);
3840	gfx_v7_0_enable_gfx_static_mgpg(adev, enable);
3841	gfx_v7_0_enable_gfx_dynamic_mgpg(adev, enable);
3842}
3843
3844static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev)
3845{
3846	u32 count = 0;
3847	const struct cs_section_def *sect = NULL;
3848	const struct cs_extent_def *ext = NULL;
3849
3850	if (adev->gfx.rlc.cs_data == NULL)
3851		return 0;
3852
3853	/* begin clear state */
3854	count += 2;
3855	/* context control state */
3856	count += 3;
3857
3858	for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
3859		for (ext = sect->section; ext->extent != NULL; ++ext) {
3860			if (sect->id == SECT_CONTEXT)
3861				count += 2 + ext->reg_count;
3862			else
3863				return 0;
3864		}
3865	}
3866	/* pa_sc_raster_config/pa_sc_raster_config1 */
3867	count += 4;
3868	/* end clear state */
3869	count += 2;
3870	/* clear state */
3871	count += 2;
3872
3873	return count;
3874}
3875
3876static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev,
3877				    volatile u32 *buffer)
3878{
3879	u32 count = 0, i;
3880	const struct cs_section_def *sect = NULL;
3881	const struct cs_extent_def *ext = NULL;
3882
3883	if (adev->gfx.rlc.cs_data == NULL)
3884		return;
3885	if (buffer == NULL)
3886		return;
3887
3888	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3889	buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
3890
3891	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
3892	buffer[count++] = cpu_to_le32(0x80000000);
3893	buffer[count++] = cpu_to_le32(0x80000000);
3894
3895	for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
3896		for (ext = sect->section; ext->extent != NULL; ++ext) {
3897			if (sect->id == SECT_CONTEXT) {
3898				buffer[count++] =
3899					cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
3900				buffer[count++] = cpu_to_le32(ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
3901				for (i = 0; i < ext->reg_count; i++)
3902					buffer[count++] = cpu_to_le32(ext->extent[i]);
3903			} else {
3904				return;
3905			}
3906		}
3907	}
3908
3909	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2));
3910	buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
3911	switch (adev->asic_type) {
3912	case CHIP_BONAIRE:
3913		buffer[count++] = cpu_to_le32(0x16000012);
3914		buffer[count++] = cpu_to_le32(0x00000000);
3915		break;
3916	case CHIP_KAVERI:
3917		buffer[count++] = cpu_to_le32(0x00000000); /* XXX */
3918		buffer[count++] = cpu_to_le32(0x00000000);
3919		break;
3920	case CHIP_KABINI:
3921	case CHIP_MULLINS:
3922		buffer[count++] = cpu_to_le32(0x00000000); /* XXX */
3923		buffer[count++] = cpu_to_le32(0x00000000);
3924		break;
3925	case CHIP_HAWAII:
3926		buffer[count++] = cpu_to_le32(0x3a00161a);
3927		buffer[count++] = cpu_to_le32(0x0000002e);
3928		break;
3929	default:
3930		buffer[count++] = cpu_to_le32(0x00000000);
3931		buffer[count++] = cpu_to_le32(0x00000000);
3932		break;
3933	}
3934
3935	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3936	buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
3937
3938	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
3939	buffer[count++] = cpu_to_le32(0);
3940}
3941
3942static void gfx_v7_0_init_pg(struct amdgpu_device *adev)
3943{
3944	if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
3945			      AMD_PG_SUPPORT_GFX_SMG |
3946			      AMD_PG_SUPPORT_GFX_DMG |
3947			      AMD_PG_SUPPORT_CP |
3948			      AMD_PG_SUPPORT_GDS |
3949			      AMD_PG_SUPPORT_RLC_SMU_HS)) {
3950		gfx_v7_0_enable_sclk_slowdown_on_pu(adev, true);
3951		gfx_v7_0_enable_sclk_slowdown_on_pd(adev, true);
3952		if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
3953			gfx_v7_0_init_gfx_cgpg(adev);
3954			gfx_v7_0_enable_cp_pg(adev, true);
3955			gfx_v7_0_enable_gds_pg(adev, true);
3956		}
3957		gfx_v7_0_init_ao_cu_mask(adev);
3958		gfx_v7_0_update_gfx_pg(adev, true);
3959	}
3960}
3961
3962static void gfx_v7_0_fini_pg(struct amdgpu_device *adev)
3963{
3964	if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
3965			      AMD_PG_SUPPORT_GFX_SMG |
3966			      AMD_PG_SUPPORT_GFX_DMG |
3967			      AMD_PG_SUPPORT_CP |
3968			      AMD_PG_SUPPORT_GDS |
3969			      AMD_PG_SUPPORT_RLC_SMU_HS)) {
3970		gfx_v7_0_update_gfx_pg(adev, false);
3971		if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
3972			gfx_v7_0_enable_cp_pg(adev, false);
3973			gfx_v7_0_enable_gds_pg(adev, false);
3974		}
3975	}
3976}
3977
3978/**
3979 * gfx_v7_0_get_gpu_clock_counter - return GPU clock counter snapshot
3980 *
3981 * @adev: amdgpu_device pointer
3982 *
3983 * Fetches a GPU clock counter snapshot (SI).
3984 * Returns the 64 bit clock counter snapshot.
3985 */
3986static uint64_t gfx_v7_0_get_gpu_clock_counter(struct amdgpu_device *adev)
3987{
3988	uint64_t clock;
3989
3990	mutex_lock(&adev->gfx.gpu_clock_mutex);
3991	WREG32(mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
3992	clock = (uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_LSB) |
3993		((uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
3994	mutex_unlock(&adev->gfx.gpu_clock_mutex);
3995	return clock;
3996}
3997
3998static void gfx_v7_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
3999					  uint32_t vmid,
4000					  uint32_t gds_base, uint32_t gds_size,
4001					  uint32_t gws_base, uint32_t gws_size,
4002					  uint32_t oa_base, uint32_t oa_size)
4003{
 
 
 
 
 
 
 
 
 
4004	/* GDS Base */
4005	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4006	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4007				WRITE_DATA_DST_SEL(0)));
4008	amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_base);
4009	amdgpu_ring_write(ring, 0);
4010	amdgpu_ring_write(ring, gds_base);
4011
4012	/* GDS Size */
4013	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4014	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4015				WRITE_DATA_DST_SEL(0)));
4016	amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_size);
4017	amdgpu_ring_write(ring, 0);
4018	amdgpu_ring_write(ring, gds_size);
4019
4020	/* GWS */
4021	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4022	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4023				WRITE_DATA_DST_SEL(0)));
4024	amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].gws);
4025	amdgpu_ring_write(ring, 0);
4026	amdgpu_ring_write(ring, gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
4027
4028	/* OA */
4029	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4030	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4031				WRITE_DATA_DST_SEL(0)));
4032	amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].oa);
4033	amdgpu_ring_write(ring, 0);
4034	amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));
4035}
4036
4037static void gfx_v7_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid)
4038{
4039	struct amdgpu_device *adev = ring->adev;
4040	uint32_t value = 0;
4041
4042	value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
4043	value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
4044	value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
4045	value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
4046	WREG32(mmSQ_CMD, value);
4047}
4048
4049static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
4050{
4051	WREG32(mmSQ_IND_INDEX,
4052		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
4053		(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
4054		(address << SQ_IND_INDEX__INDEX__SHIFT) |
4055		(SQ_IND_INDEX__FORCE_READ_MASK));
4056	return RREG32(mmSQ_IND_DATA);
4057}
4058
4059static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
4060			   uint32_t wave, uint32_t thread,
4061			   uint32_t regno, uint32_t num, uint32_t *out)
4062{
4063	WREG32(mmSQ_IND_INDEX,
4064		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
4065		(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
4066		(regno << SQ_IND_INDEX__INDEX__SHIFT) |
4067		(thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
4068		(SQ_IND_INDEX__FORCE_READ_MASK) |
4069		(SQ_IND_INDEX__AUTO_INCR_MASK));
4070	while (num--)
4071		*(out++) = RREG32(mmSQ_IND_DATA);
4072}
4073
4074static void gfx_v7_0_read_wave_data(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
4075{
4076	/* type 0 wave data */
4077	dst[(*no_fields)++] = 0;
4078	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
4079	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
4080	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
4081	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
4082	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
4083	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
4084	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
4085	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
4086	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
4087	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
4088	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
4089	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
4090	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_LO);
4091	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_HI);
4092	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_LO);
4093	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_HI);
4094	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
4095	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
4096	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_MODE);
4097}
4098
4099static void gfx_v7_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
4100				     uint32_t wave, uint32_t start,
4101				     uint32_t size, uint32_t *dst)
4102{
4103	wave_read_regs(
4104		adev, simd, wave, 0,
4105		start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
4106}
4107
4108static void gfx_v7_0_select_me_pipe_q(struct amdgpu_device *adev,
4109				  u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
4110{
4111	cik_srbm_select(adev, me, pipe, q, vm);
4112}
4113
4114static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = {
4115	.get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter,
4116	.select_se_sh = &gfx_v7_0_select_se_sh,
4117	.read_wave_data = &gfx_v7_0_read_wave_data,
4118	.read_wave_sgprs = &gfx_v7_0_read_wave_sgprs,
4119	.select_me_pipe_q = &gfx_v7_0_select_me_pipe_q
4120};
4121
4122static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = {
4123	.is_rlc_enabled = gfx_v7_0_is_rlc_enabled,
4124	.set_safe_mode = gfx_v7_0_set_safe_mode,
4125	.unset_safe_mode = gfx_v7_0_unset_safe_mode,
4126	.init = gfx_v7_0_rlc_init,
4127	.get_csb_size = gfx_v7_0_get_csb_size,
4128	.get_csb_buffer = gfx_v7_0_get_csb_buffer,
4129	.get_cp_table_num = gfx_v7_0_cp_pg_table_num,
4130	.resume = gfx_v7_0_rlc_resume,
4131	.stop = gfx_v7_0_rlc_stop,
4132	.reset = gfx_v7_0_rlc_reset,
4133	.start = gfx_v7_0_rlc_start,
4134	.update_spm_vmid = gfx_v7_0_update_spm_vmid
4135};
4136
4137static int gfx_v7_0_early_init(struct amdgpu_ip_block *ip_block)
4138{
4139	struct amdgpu_device *adev = ip_block->adev;
4140
4141	adev->gfx.xcc_mask = 1;
4142	adev->gfx.num_gfx_rings = GFX7_NUM_GFX_RINGS;
4143	adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
4144					  AMDGPU_MAX_COMPUTE_RINGS);
4145	adev->gfx.funcs = &gfx_v7_0_gfx_funcs;
4146	adev->gfx.rlc.funcs = &gfx_v7_0_rlc_funcs;
4147	gfx_v7_0_set_ring_funcs(adev);
4148	gfx_v7_0_set_irq_funcs(adev);
4149	gfx_v7_0_set_gds_init(adev);
4150
4151	return 0;
4152}
4153
4154static int gfx_v7_0_late_init(struct amdgpu_ip_block *ip_block)
4155{
4156	struct amdgpu_device *adev = ip_block->adev;
4157	int r;
4158
4159	r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
4160	if (r)
4161		return r;
4162
4163	r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
4164	if (r)
4165		return r;
4166
4167	return 0;
4168}
4169
4170static void gfx_v7_0_gpu_early_init(struct amdgpu_device *adev)
4171{
4172	u32 gb_addr_config;
4173	u32 mc_arb_ramcfg;
4174	u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map;
4175	u32 tmp;
4176
4177	switch (adev->asic_type) {
4178	case CHIP_BONAIRE:
4179		adev->gfx.config.max_shader_engines = 2;
4180		adev->gfx.config.max_tile_pipes = 4;
4181		adev->gfx.config.max_cu_per_sh = 7;
4182		adev->gfx.config.max_sh_per_se = 1;
4183		adev->gfx.config.max_backends_per_se = 2;
4184		adev->gfx.config.max_texture_channel_caches = 4;
4185		adev->gfx.config.max_gprs = 256;
4186		adev->gfx.config.max_gs_threads = 32;
4187		adev->gfx.config.max_hw_contexts = 8;
4188
4189		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
4190		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
4191		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
4192		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
4193		gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
4194		break;
4195	case CHIP_HAWAII:
4196		adev->gfx.config.max_shader_engines = 4;
4197		adev->gfx.config.max_tile_pipes = 16;
4198		adev->gfx.config.max_cu_per_sh = 11;
4199		adev->gfx.config.max_sh_per_se = 1;
4200		adev->gfx.config.max_backends_per_se = 4;
4201		adev->gfx.config.max_texture_channel_caches = 16;
4202		adev->gfx.config.max_gprs = 256;
4203		adev->gfx.config.max_gs_threads = 32;
4204		adev->gfx.config.max_hw_contexts = 8;
4205
4206		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
4207		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
4208		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
4209		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
4210		gb_addr_config = HAWAII_GB_ADDR_CONFIG_GOLDEN;
4211		break;
4212	case CHIP_KAVERI:
4213		adev->gfx.config.max_shader_engines = 1;
4214		adev->gfx.config.max_tile_pipes = 4;
4215		adev->gfx.config.max_cu_per_sh = 8;
4216		adev->gfx.config.max_backends_per_se = 2;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4217		adev->gfx.config.max_sh_per_se = 1;
4218		adev->gfx.config.max_texture_channel_caches = 4;
4219		adev->gfx.config.max_gprs = 256;
4220		adev->gfx.config.max_gs_threads = 16;
4221		adev->gfx.config.max_hw_contexts = 8;
4222
4223		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
4224		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
4225		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
4226		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
4227		gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
4228		break;
4229	case CHIP_KABINI:
4230	case CHIP_MULLINS:
4231	default:
4232		adev->gfx.config.max_shader_engines = 1;
4233		adev->gfx.config.max_tile_pipes = 2;
4234		adev->gfx.config.max_cu_per_sh = 2;
4235		adev->gfx.config.max_sh_per_se = 1;
4236		adev->gfx.config.max_backends_per_se = 1;
4237		adev->gfx.config.max_texture_channel_caches = 2;
4238		adev->gfx.config.max_gprs = 256;
4239		adev->gfx.config.max_gs_threads = 16;
4240		adev->gfx.config.max_hw_contexts = 8;
4241
4242		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
4243		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
4244		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
4245		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
4246		gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
4247		break;
4248	}
4249
 
4250	adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG);
4251	mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg;
4252
4253	adev->gfx.config.num_banks = REG_GET_FIELD(mc_arb_ramcfg,
4254				MC_ARB_RAMCFG, NOOFBANK);
4255	adev->gfx.config.num_ranks = REG_GET_FIELD(mc_arb_ramcfg,
4256				MC_ARB_RAMCFG, NOOFRANKS);
4257
4258	adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
4259	adev->gfx.config.mem_max_burst_length_bytes = 256;
4260	if (adev->flags & AMD_IS_APU) {
4261		/* Get memory bank mapping mode. */
4262		tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING);
4263		dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
4264		dimm01_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
4265
4266		tmp = RREG32(mmMC_FUS_DRAM1_BANK_ADDR_MAPPING);
4267		dimm10_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
4268		dimm11_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
4269
4270		/* Validate settings in case only one DIMM installed. */
4271		if ((dimm00_addr_map == 0) || (dimm00_addr_map == 3) || (dimm00_addr_map == 4) || (dimm00_addr_map > 12))
4272			dimm00_addr_map = 0;
4273		if ((dimm01_addr_map == 0) || (dimm01_addr_map == 3) || (dimm01_addr_map == 4) || (dimm01_addr_map > 12))
4274			dimm01_addr_map = 0;
4275		if ((dimm10_addr_map == 0) || (dimm10_addr_map == 3) || (dimm10_addr_map == 4) || (dimm10_addr_map > 12))
4276			dimm10_addr_map = 0;
4277		if ((dimm11_addr_map == 0) || (dimm11_addr_map == 3) || (dimm11_addr_map == 4) || (dimm11_addr_map > 12))
4278			dimm11_addr_map = 0;
4279
4280		/* If DIMM Addr map is 8GB, ROW size should be 2KB. Otherwise 1KB. */
4281		/* If ROW size(DIMM1) != ROW size(DMIMM0), ROW size should be larger one. */
4282		if ((dimm00_addr_map == 11) || (dimm01_addr_map == 11) || (dimm10_addr_map == 11) || (dimm11_addr_map == 11))
4283			adev->gfx.config.mem_row_size_in_kb = 2;
4284		else
4285			adev->gfx.config.mem_row_size_in_kb = 1;
4286	} else {
4287		tmp = (mc_arb_ramcfg & MC_ARB_RAMCFG__NOOFCOLS_MASK) >> MC_ARB_RAMCFG__NOOFCOLS__SHIFT;
4288		adev->gfx.config.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
4289		if (adev->gfx.config.mem_row_size_in_kb > 4)
4290			adev->gfx.config.mem_row_size_in_kb = 4;
4291	}
4292	/* XXX use MC settings? */
4293	adev->gfx.config.shader_engine_tile_size = 32;
4294	adev->gfx.config.num_gpus = 1;
4295	adev->gfx.config.multi_gpu_tile_size = 64;
4296
4297	/* fix up row size */
4298	gb_addr_config &= ~GB_ADDR_CONFIG__ROW_SIZE_MASK;
4299	switch (adev->gfx.config.mem_row_size_in_kb) {
4300	case 1:
4301	default:
4302		gb_addr_config |= (0 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT);
4303		break;
4304	case 2:
4305		gb_addr_config |= (1 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT);
4306		break;
4307	case 4:
4308		gb_addr_config |= (2 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT);
4309		break;
4310	}
4311	adev->gfx.config.gb_addr_config = gb_addr_config;
4312}
4313
4314static int gfx_v7_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
4315					int mec, int pipe, int queue)
4316{
4317	int r;
4318	unsigned irq_type;
4319	struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
4320
4321	/* mec0 is me1 */
4322	ring->me = mec + 1;
4323	ring->pipe = pipe;
4324	ring->queue = queue;
4325
4326	ring->ring_obj = NULL;
4327	ring->use_doorbell = true;
4328	ring->doorbell_index = adev->doorbell_index.mec_ring0 + ring_id;
4329	sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
4330
4331	irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
4332		+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
4333		+ ring->pipe;
4334
4335	/* type-2 packets are deprecated on MEC, use type-3 instead */
4336	r = amdgpu_ring_init(adev, ring, 1024,
4337			     &adev->gfx.eop_irq, irq_type,
4338			     AMDGPU_RING_PRIO_DEFAULT, NULL);
4339	if (r)
4340		return r;
4341
4342
4343	return 0;
4344}
4345
4346static int gfx_v7_0_sw_init(struct amdgpu_ip_block *ip_block)
4347{
4348	struct amdgpu_ring *ring;
4349	struct amdgpu_device *adev = ip_block->adev;
4350	int i, j, k, r, ring_id;
4351
4352	switch (adev->asic_type) {
4353	case CHIP_KAVERI:
4354		adev->gfx.mec.num_mec = 2;
4355		break;
4356	case CHIP_BONAIRE:
4357	case CHIP_HAWAII:
4358	case CHIP_KABINI:
4359	case CHIP_MULLINS:
4360	default:
4361		adev->gfx.mec.num_mec = 1;
4362		break;
4363	}
4364	adev->gfx.mec.num_pipe_per_mec = 4;
4365	adev->gfx.mec.num_queue_per_pipe = 8;
4366
4367	/* EOP Event */
4368	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 181, &adev->gfx.eop_irq);
4369	if (r)
4370		return r;
4371
4372	/* Privileged reg */
4373	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 184,
4374			      &adev->gfx.priv_reg_irq);
4375	if (r)
4376		return r;
4377
4378	/* Privileged inst */
4379	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 185,
4380			      &adev->gfx.priv_inst_irq);
4381	if (r)
4382		return r;
4383
 
 
4384	r = gfx_v7_0_init_microcode(adev);
4385	if (r) {
4386		DRM_ERROR("Failed to load gfx firmware!\n");
4387		return r;
4388	}
4389
4390	r = adev->gfx.rlc.funcs->init(adev);
4391	if (r) {
4392		DRM_ERROR("Failed to init rlc BOs!\n");
4393		return r;
4394	}
4395
4396	/* allocate mec buffers */
4397	r = gfx_v7_0_mec_init(adev);
4398	if (r) {
4399		DRM_ERROR("Failed to init MEC BOs!\n");
4400		return r;
4401	}
4402
4403	for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
4404		ring = &adev->gfx.gfx_ring[i];
4405		ring->ring_obj = NULL;
4406		sprintf(ring->name, "gfx");
4407		r = amdgpu_ring_init(adev, ring, 1024,
4408				     &adev->gfx.eop_irq,
4409				     AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
4410				     AMDGPU_RING_PRIO_DEFAULT, NULL);
4411		if (r)
4412			return r;
4413	}
4414
4415	/* set up the compute queues - allocate horizontally across pipes */
4416	ring_id = 0;
4417	for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
4418		for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
4419			for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
4420				if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i,
4421								     k, j))
4422					continue;
4423
4424				r = gfx_v7_0_compute_ring_init(adev,
4425								ring_id,
4426								i, k, j);
4427				if (r)
4428					return r;
4429
4430				ring_id++;
4431			}
 
 
4432		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4433	}
4434
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4435	adev->gfx.ce_ram_size = 0x8000;
4436
4437	gfx_v7_0_gpu_early_init(adev);
4438
4439	return r;
4440}
4441
4442static int gfx_v7_0_sw_fini(struct amdgpu_ip_block *ip_block)
4443{
4444	struct amdgpu_device *adev = ip_block->adev;
4445	int i;
 
 
 
 
 
4446
4447	for (i = 0; i < adev->gfx.num_gfx_rings; i++)
4448		amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
4449	for (i = 0; i < adev->gfx.num_compute_rings; i++)
4450		amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
4451
4452	gfx_v7_0_cp_compute_fini(adev);
4453	amdgpu_gfx_rlc_fini(adev);
4454	gfx_v7_0_mec_fini(adev);
4455	amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
4456				&adev->gfx.rlc.clear_state_gpu_addr,
4457				(void **)&adev->gfx.rlc.cs_ptr);
4458	if (adev->gfx.rlc.cp_table_size) {
4459		amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
4460				&adev->gfx.rlc.cp_table_gpu_addr,
4461				(void **)&adev->gfx.rlc.cp_table_ptr);
4462	}
4463	gfx_v7_0_free_microcode(adev);
4464
4465	return 0;
4466}
4467
4468static int gfx_v7_0_hw_init(struct amdgpu_ip_block *ip_block)
4469{
4470	int r;
4471	struct amdgpu_device *adev = ip_block->adev;
4472
4473	gfx_v7_0_constants_init(adev);
4474
4475	/* init CSB */
4476	adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
4477	/* init rlc */
4478	r = adev->gfx.rlc.funcs->resume(adev);
4479	if (r)
4480		return r;
4481
4482	r = gfx_v7_0_cp_resume(adev);
4483	if (r)
4484		return r;
4485
4486	return r;
4487}
4488
4489static int gfx_v7_0_hw_fini(struct amdgpu_ip_block *ip_block)
4490{
4491	struct amdgpu_device *adev = ip_block->adev;
4492
4493	amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
4494	amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
4495	gfx_v7_0_cp_enable(adev, false);
4496	adev->gfx.rlc.funcs->stop(adev);
4497	gfx_v7_0_fini_pg(adev);
4498
4499	return 0;
4500}
4501
4502static int gfx_v7_0_suspend(struct amdgpu_ip_block *ip_block)
4503{
4504	return gfx_v7_0_hw_fini(ip_block);
 
 
4505}
4506
4507static int gfx_v7_0_resume(struct amdgpu_ip_block *ip_block)
4508{
4509	return gfx_v7_0_hw_init(ip_block);
 
 
4510}
4511
4512static bool gfx_v7_0_is_idle(void *handle)
4513{
4514	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4515
4516	if (RREG32(mmGRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK)
4517		return false;
4518	else
4519		return true;
4520}
4521
4522static int gfx_v7_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
4523{
4524	unsigned i;
4525	u32 tmp;
4526	struct amdgpu_device *adev = ip_block->adev;
4527
4528	for (i = 0; i < adev->usec_timeout; i++) {
4529		/* read MC_STATUS */
4530		tmp = RREG32(mmGRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK;
4531
4532		if (!tmp)
4533			return 0;
4534		udelay(1);
4535	}
4536	return -ETIMEDOUT;
4537}
4538
4539static int gfx_v7_0_soft_reset(struct amdgpu_ip_block *ip_block)
4540{
4541	u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
4542	u32 tmp;
4543	struct amdgpu_device *adev = ip_block->adev;
4544
4545	/* GRBM_STATUS */
4546	tmp = RREG32(mmGRBM_STATUS);
4547	if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
4548		   GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
4549		   GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
4550		   GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
4551		   GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
4552		   GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK))
4553		grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_CP_MASK |
4554			GRBM_SOFT_RESET__SOFT_RESET_GFX_MASK;
4555
4556	if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
4557		grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_CP_MASK;
4558		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_GRBM_MASK;
4559	}
4560
4561	/* GRBM_STATUS2 */
4562	tmp = RREG32(mmGRBM_STATUS2);
4563	if (tmp & GRBM_STATUS2__RLC_BUSY_MASK)
4564		grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK;
4565
4566	/* SRBM_STATUS */
4567	tmp = RREG32(mmSRBM_STATUS);
4568	if (tmp & SRBM_STATUS__GRBM_RQ_PENDING_MASK)
4569		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_GRBM_MASK;
4570
4571	if (grbm_soft_reset || srbm_soft_reset) {
4572		/* disable CG/PG */
4573		gfx_v7_0_fini_pg(adev);
4574		gfx_v7_0_update_cg(adev, false);
4575
4576		/* stop the rlc */
4577		adev->gfx.rlc.funcs->stop(adev);
4578
4579		/* Disable GFX parsing/prefetching */
4580		WREG32(mmCP_ME_CNTL, CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK);
4581
4582		/* Disable MEC parsing/prefetching */
4583		WREG32(mmCP_MEC_CNTL, CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK);
4584
4585		if (grbm_soft_reset) {
4586			tmp = RREG32(mmGRBM_SOFT_RESET);
4587			tmp |= grbm_soft_reset;
4588			dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
4589			WREG32(mmGRBM_SOFT_RESET, tmp);
4590			tmp = RREG32(mmGRBM_SOFT_RESET);
4591
4592			udelay(50);
4593
4594			tmp &= ~grbm_soft_reset;
4595			WREG32(mmGRBM_SOFT_RESET, tmp);
4596			tmp = RREG32(mmGRBM_SOFT_RESET);
4597		}
4598
4599		if (srbm_soft_reset) {
4600			tmp = RREG32(mmSRBM_SOFT_RESET);
4601			tmp |= srbm_soft_reset;
4602			dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
4603			WREG32(mmSRBM_SOFT_RESET, tmp);
4604			tmp = RREG32(mmSRBM_SOFT_RESET);
4605
4606			udelay(50);
4607
4608			tmp &= ~srbm_soft_reset;
4609			WREG32(mmSRBM_SOFT_RESET, tmp);
4610			tmp = RREG32(mmSRBM_SOFT_RESET);
4611		}
4612		/* Wait a little for things to settle down */
4613		udelay(50);
4614	}
4615	return 0;
4616}
4617
4618static void gfx_v7_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
4619						 enum amdgpu_interrupt_state state)
4620{
4621	u32 cp_int_cntl;
4622
4623	switch (state) {
4624	case AMDGPU_IRQ_STATE_DISABLE:
4625		cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
4626		cp_int_cntl &= ~CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
4627		WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
4628		break;
4629	case AMDGPU_IRQ_STATE_ENABLE:
4630		cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
4631		cp_int_cntl |= CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
4632		WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
4633		break;
4634	default:
4635		break;
4636	}
4637}
4638
4639static void gfx_v7_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
4640						     int me, int pipe,
4641						     enum amdgpu_interrupt_state state)
4642{
4643	u32 mec_int_cntl, mec_int_cntl_reg;
4644
4645	/*
4646	 * amdgpu controls only the first MEC. That's why this function only
4647	 * handles the setting of interrupts for this specific MEC. All other
4648	 * pipes' interrupts are set by amdkfd.
4649	 */
4650
4651	if (me == 1) {
4652		switch (pipe) {
4653		case 0:
4654			mec_int_cntl_reg = mmCP_ME1_PIPE0_INT_CNTL;
4655			break;
4656		case 1:
4657			mec_int_cntl_reg = mmCP_ME1_PIPE1_INT_CNTL;
4658			break;
4659		case 2:
4660			mec_int_cntl_reg = mmCP_ME1_PIPE2_INT_CNTL;
4661			break;
4662		case 3:
4663			mec_int_cntl_reg = mmCP_ME1_PIPE3_INT_CNTL;
4664			break;
4665		default:
4666			DRM_DEBUG("invalid pipe %d\n", pipe);
4667			return;
4668		}
4669	} else {
4670		DRM_DEBUG("invalid me %d\n", me);
4671		return;
4672	}
4673
4674	switch (state) {
4675	case AMDGPU_IRQ_STATE_DISABLE:
4676		mec_int_cntl = RREG32(mec_int_cntl_reg);
4677		mec_int_cntl &= ~CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
4678		WREG32(mec_int_cntl_reg, mec_int_cntl);
4679		break;
4680	case AMDGPU_IRQ_STATE_ENABLE:
4681		mec_int_cntl = RREG32(mec_int_cntl_reg);
4682		mec_int_cntl |= CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
4683		WREG32(mec_int_cntl_reg, mec_int_cntl);
4684		break;
4685	default:
4686		break;
4687	}
4688}
4689
4690static int gfx_v7_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
4691					     struct amdgpu_irq_src *src,
4692					     unsigned type,
4693					     enum amdgpu_interrupt_state state)
4694{
4695	u32 cp_int_cntl;
4696
4697	switch (state) {
4698	case AMDGPU_IRQ_STATE_DISABLE:
4699		cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
4700		cp_int_cntl &= ~CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK;
4701		WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
4702		break;
4703	case AMDGPU_IRQ_STATE_ENABLE:
4704		cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
4705		cp_int_cntl |= CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK;
4706		WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
4707		break;
4708	default:
4709		break;
4710	}
4711
4712	return 0;
4713}
4714
4715static int gfx_v7_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
4716					      struct amdgpu_irq_src *src,
4717					      unsigned type,
4718					      enum amdgpu_interrupt_state state)
4719{
4720	u32 cp_int_cntl;
4721
4722	switch (state) {
4723	case AMDGPU_IRQ_STATE_DISABLE:
4724		cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
4725		cp_int_cntl &= ~CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK;
4726		WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
4727		break;
4728	case AMDGPU_IRQ_STATE_ENABLE:
4729		cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
4730		cp_int_cntl |= CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK;
4731		WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
4732		break;
4733	default:
4734		break;
4735	}
4736
4737	return 0;
4738}
4739
4740static int gfx_v7_0_set_eop_interrupt_state(struct amdgpu_device *adev,
4741					    struct amdgpu_irq_src *src,
4742					    unsigned type,
4743					    enum amdgpu_interrupt_state state)
4744{
4745	switch (type) {
4746	case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP:
4747		gfx_v7_0_set_gfx_eop_interrupt_state(adev, state);
4748		break;
4749	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
4750		gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
4751		break;
4752	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
4753		gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
4754		break;
4755	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
4756		gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
4757		break;
4758	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
4759		gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
4760		break;
4761	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
4762		gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 0, state);
4763		break;
4764	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
4765		gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 1, state);
4766		break;
4767	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
4768		gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 2, state);
4769		break;
4770	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
4771		gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 3, state);
4772		break;
4773	default:
4774		break;
4775	}
4776	return 0;
4777}
4778
4779static int gfx_v7_0_eop_irq(struct amdgpu_device *adev,
4780			    struct amdgpu_irq_src *source,
4781			    struct amdgpu_iv_entry *entry)
4782{
4783	u8 me_id, pipe_id;
4784	struct amdgpu_ring *ring;
4785	int i;
4786
4787	DRM_DEBUG("IH: CP EOP\n");
4788	me_id = (entry->ring_id & 0x0c) >> 2;
4789	pipe_id = (entry->ring_id & 0x03) >> 0;
4790	switch (me_id) {
4791	case 0:
4792		amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
4793		break;
4794	case 1:
4795	case 2:
4796		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4797			ring = &adev->gfx.compute_ring[i];
4798			if ((ring->me == me_id) && (ring->pipe == pipe_id))
4799				amdgpu_fence_process(ring);
4800		}
4801		break;
4802	}
4803	return 0;
4804}
4805
4806static void gfx_v7_0_fault(struct amdgpu_device *adev,
4807			   struct amdgpu_iv_entry *entry)
4808{
4809	struct amdgpu_ring *ring;
4810	u8 me_id, pipe_id;
4811	int i;
4812
4813	me_id = (entry->ring_id & 0x0c) >> 2;
4814	pipe_id = (entry->ring_id & 0x03) >> 0;
4815	switch (me_id) {
4816	case 0:
4817		drm_sched_fault(&adev->gfx.gfx_ring[0].sched);
4818		break;
4819	case 1:
4820	case 2:
4821		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4822			ring = &adev->gfx.compute_ring[i];
4823			if ((ring->me == me_id) && (ring->pipe == pipe_id))
4824				drm_sched_fault(&ring->sched);
4825		}
4826		break;
4827	}
4828}
4829
4830static int gfx_v7_0_priv_reg_irq(struct amdgpu_device *adev,
4831				 struct amdgpu_irq_src *source,
4832				 struct amdgpu_iv_entry *entry)
4833{
4834	DRM_ERROR("Illegal register access in command stream\n");
4835	gfx_v7_0_fault(adev, entry);
4836	return 0;
4837}
4838
4839static int gfx_v7_0_priv_inst_irq(struct amdgpu_device *adev,
4840				  struct amdgpu_irq_src *source,
4841				  struct amdgpu_iv_entry *entry)
4842{
4843	DRM_ERROR("Illegal instruction in command stream\n");
4844	// XXX soft reset the gfx block only
4845	gfx_v7_0_fault(adev, entry);
4846	return 0;
4847}
4848
4849static int gfx_v7_0_set_clockgating_state(void *handle,
4850					  enum amd_clockgating_state state)
4851{
4852	bool gate = false;
4853	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4854
4855	if (state == AMD_CG_STATE_GATE)
4856		gate = true;
4857
4858	gfx_v7_0_enable_gui_idle_interrupt(adev, false);
4859	/* order matters! */
4860	if (gate) {
4861		gfx_v7_0_enable_mgcg(adev, true);
4862		gfx_v7_0_enable_cgcg(adev, true);
4863	} else {
4864		gfx_v7_0_enable_cgcg(adev, false);
4865		gfx_v7_0_enable_mgcg(adev, false);
4866	}
4867	gfx_v7_0_enable_gui_idle_interrupt(adev, true);
4868
4869	return 0;
4870}
4871
4872static int gfx_v7_0_set_powergating_state(void *handle,
4873					  enum amd_powergating_state state)
4874{
4875	bool gate = false;
4876	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4877
4878	if (state == AMD_PG_STATE_GATE)
4879		gate = true;
4880
4881	if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
4882			      AMD_PG_SUPPORT_GFX_SMG |
4883			      AMD_PG_SUPPORT_GFX_DMG |
4884			      AMD_PG_SUPPORT_CP |
4885			      AMD_PG_SUPPORT_GDS |
4886			      AMD_PG_SUPPORT_RLC_SMU_HS)) {
4887		gfx_v7_0_update_gfx_pg(adev, gate);
4888		if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
4889			gfx_v7_0_enable_cp_pg(adev, gate);
4890			gfx_v7_0_enable_gds_pg(adev, gate);
4891		}
4892	}
4893
4894	return 0;
4895}
4896
4897static void gfx_v7_0_emit_mem_sync(struct amdgpu_ring *ring)
4898{
4899	amdgpu_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
4900	amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
4901			  PACKET3_TC_ACTION_ENA |
4902			  PACKET3_SH_KCACHE_ACTION_ENA |
4903			  PACKET3_SH_ICACHE_ACTION_ENA);  /* CP_COHER_CNTL */
4904	amdgpu_ring_write(ring, 0xffffffff);  /* CP_COHER_SIZE */
4905	amdgpu_ring_write(ring, 0);  /* CP_COHER_BASE */
4906	amdgpu_ring_write(ring, 0x0000000A); /* poll interval */
4907}
4908
4909static void gfx_v7_0_emit_mem_sync_compute(struct amdgpu_ring *ring)
4910{
4911	amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5));
4912	amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
4913			  PACKET3_TC_ACTION_ENA |
4914			  PACKET3_SH_KCACHE_ACTION_ENA |
4915			  PACKET3_SH_ICACHE_ACTION_ENA);  /* CP_COHER_CNTL */
4916	amdgpu_ring_write(ring, 0xffffffff);	/* CP_COHER_SIZE */
4917	amdgpu_ring_write(ring, 0xff);		/* CP_COHER_SIZE_HI */
4918	amdgpu_ring_write(ring, 0);		/* CP_COHER_BASE */
4919	amdgpu_ring_write(ring, 0);		/* CP_COHER_BASE_HI */
4920	amdgpu_ring_write(ring, 0x0000000A);	/* poll interval */
4921}
4922
4923static void gfx_v7_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
4924				  int mem_space, int opt, uint32_t addr0,
4925				  uint32_t addr1, uint32_t ref, uint32_t mask,
4926				  uint32_t inv)
4927{
4928	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
4929	amdgpu_ring_write(ring,
4930			  /* memory (1) or register (0) */
4931			  (WAIT_REG_MEM_MEM_SPACE(mem_space) |
4932			   WAIT_REG_MEM_OPERATION(opt) | /* wait */
4933			   WAIT_REG_MEM_FUNCTION(3) |  /* equal */
4934			   WAIT_REG_MEM_ENGINE(eng_sel)));
4935
4936	if (mem_space)
4937		BUG_ON(addr0 & 0x3); /* Dword align */
4938	amdgpu_ring_write(ring, addr0);
4939	amdgpu_ring_write(ring, addr1);
4940	amdgpu_ring_write(ring, ref);
4941	amdgpu_ring_write(ring, mask);
4942	amdgpu_ring_write(ring, inv); /* poll interval */
4943}
4944
4945static void gfx_v7_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
4946					uint32_t val, uint32_t mask)
4947{
4948	gfx_v7_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
4949}
4950
4951static int gfx_v7_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
4952{
4953	struct amdgpu_device *adev = ring->adev;
4954	struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
4955	struct amdgpu_ring *kiq_ring = &kiq->ring;
4956	unsigned long flags;
4957	u32 tmp;
4958	int r;
4959
4960	if (amdgpu_sriov_vf(adev))
4961		return -EINVAL;
4962
4963	if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
4964		return -EINVAL;
4965
4966	spin_lock_irqsave(&kiq->ring_lock, flags);
4967
4968	if (amdgpu_ring_alloc(kiq_ring, 5)) {
4969		spin_unlock_irqrestore(&kiq->ring_lock, flags);
4970		return -ENOMEM;
4971	}
4972
4973	tmp = REG_SET_FIELD(0, CP_VMID_RESET, RESET_REQUEST, 1 << vmid);
4974	gfx_v7_0_ring_emit_wreg(kiq_ring, mmCP_VMID_RESET, tmp);
4975	amdgpu_ring_commit(kiq_ring);
4976
4977	spin_unlock_irqrestore(&kiq->ring_lock, flags);
4978
4979	r = amdgpu_ring_test_ring(kiq_ring);
4980	if (r)
4981		return r;
4982
4983	if (amdgpu_ring_alloc(ring, 7 + 12 + 5))
4984		return -ENOMEM;
4985	gfx_v7_0_ring_emit_fence_gfx(ring, ring->fence_drv.gpu_addr,
4986				     ring->fence_drv.sync_seq, AMDGPU_FENCE_FLAG_EXEC);
4987	gfx_v7_0_ring_emit_reg_wait(ring, mmCP_VMID_RESET, 0, 0xffff);
4988	gfx_v7_0_ring_emit_wreg(ring, mmCP_VMID_RESET, 0);
4989
4990	return amdgpu_ring_test_ring(ring);
4991}
4992
4993static const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
4994	.name = "gfx_v7_0",
4995	.early_init = gfx_v7_0_early_init,
4996	.late_init = gfx_v7_0_late_init,
4997	.sw_init = gfx_v7_0_sw_init,
4998	.sw_fini = gfx_v7_0_sw_fini,
4999	.hw_init = gfx_v7_0_hw_init,
5000	.hw_fini = gfx_v7_0_hw_fini,
5001	.suspend = gfx_v7_0_suspend,
5002	.resume = gfx_v7_0_resume,
5003	.is_idle = gfx_v7_0_is_idle,
5004	.wait_for_idle = gfx_v7_0_wait_for_idle,
5005	.soft_reset = gfx_v7_0_soft_reset,
5006	.set_clockgating_state = gfx_v7_0_set_clockgating_state,
5007	.set_powergating_state = gfx_v7_0_set_powergating_state,
5008};
5009
5010static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
5011	.type = AMDGPU_RING_TYPE_GFX,
5012	.align_mask = 0xff,
5013	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
5014	.support_64bit_ptrs = false,
5015	.get_rptr = gfx_v7_0_ring_get_rptr,
5016	.get_wptr = gfx_v7_0_ring_get_wptr_gfx,
5017	.set_wptr = gfx_v7_0_ring_set_wptr_gfx,
5018	.emit_frame_size =
5019		20 + /* gfx_v7_0_ring_emit_gds_switch */
5020		7 + /* gfx_v7_0_ring_emit_hdp_flush */
5021		5 + /* hdp invalidate */
5022		12 + 12 + 12 + /* gfx_v7_0_ring_emit_fence_gfx x3 for user fence, vm fence */
5023		7 + 4 + /* gfx_v7_0_ring_emit_pipeline_sync */
5024		CIK_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + 6 + /* gfx_v7_0_ring_emit_vm_flush */
5025		3 + 4 + /* gfx_v7_ring_emit_cntxcntl including vgt flush*/
5026		5, /* SURFACE_SYNC */
5027	.emit_ib_size = 4, /* gfx_v7_0_ring_emit_ib_gfx */
5028	.emit_ib = gfx_v7_0_ring_emit_ib_gfx,
5029	.emit_fence = gfx_v7_0_ring_emit_fence_gfx,
5030	.emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync,
5031	.emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
5032	.emit_gds_switch = gfx_v7_0_ring_emit_gds_switch,
5033	.emit_hdp_flush = gfx_v7_0_ring_emit_hdp_flush,
 
5034	.test_ring = gfx_v7_0_ring_test_ring,
5035	.test_ib = gfx_v7_0_ring_test_ib,
5036	.insert_nop = amdgpu_ring_insert_nop,
5037	.pad_ib = amdgpu_ring_generic_pad_ib,
5038	.emit_cntxcntl = gfx_v7_ring_emit_cntxcntl,
5039	.emit_wreg = gfx_v7_0_ring_emit_wreg,
5040	.soft_recovery = gfx_v7_0_ring_soft_recovery,
5041	.emit_mem_sync = gfx_v7_0_emit_mem_sync,
5042	.reset = gfx_v7_0_reset_kgq,
5043};
5044
5045static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
5046	.type = AMDGPU_RING_TYPE_COMPUTE,
5047	.align_mask = 0xff,
5048	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
5049	.support_64bit_ptrs = false,
5050	.get_rptr = gfx_v7_0_ring_get_rptr,
5051	.get_wptr = gfx_v7_0_ring_get_wptr_compute,
5052	.set_wptr = gfx_v7_0_ring_set_wptr_compute,
5053	.emit_frame_size =
5054		20 + /* gfx_v7_0_ring_emit_gds_switch */
5055		7 + /* gfx_v7_0_ring_emit_hdp_flush */
5056		5 + /* hdp invalidate */
5057		7 + /* gfx_v7_0_ring_emit_pipeline_sync */
5058		CIK_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + /* gfx_v7_0_ring_emit_vm_flush */
5059		7 + 7 + 7 + /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */
5060		7, /* gfx_v7_0_emit_mem_sync_compute */
5061	.emit_ib_size =	7, /* gfx_v7_0_ring_emit_ib_compute */
5062	.emit_ib = gfx_v7_0_ring_emit_ib_compute,
5063	.emit_fence = gfx_v7_0_ring_emit_fence_compute,
5064	.emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync,
5065	.emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
5066	.emit_gds_switch = gfx_v7_0_ring_emit_gds_switch,
5067	.emit_hdp_flush = gfx_v7_0_ring_emit_hdp_flush,
 
5068	.test_ring = gfx_v7_0_ring_test_ring,
5069	.test_ib = gfx_v7_0_ring_test_ib,
5070	.insert_nop = amdgpu_ring_insert_nop,
5071	.pad_ib = amdgpu_ring_generic_pad_ib,
5072	.emit_wreg = gfx_v7_0_ring_emit_wreg,
5073	.soft_recovery = gfx_v7_0_ring_soft_recovery,
5074	.emit_mem_sync = gfx_v7_0_emit_mem_sync_compute,
5075};
5076
5077static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev)
5078{
5079	int i;
5080
5081	for (i = 0; i < adev->gfx.num_gfx_rings; i++)
5082		adev->gfx.gfx_ring[i].funcs = &gfx_v7_0_ring_funcs_gfx;
5083	for (i = 0; i < adev->gfx.num_compute_rings; i++)
5084		adev->gfx.compute_ring[i].funcs = &gfx_v7_0_ring_funcs_compute;
5085}
5086
5087static const struct amdgpu_irq_src_funcs gfx_v7_0_eop_irq_funcs = {
5088	.set = gfx_v7_0_set_eop_interrupt_state,
5089	.process = gfx_v7_0_eop_irq,
5090};
5091
5092static const struct amdgpu_irq_src_funcs gfx_v7_0_priv_reg_irq_funcs = {
5093	.set = gfx_v7_0_set_priv_reg_fault_state,
5094	.process = gfx_v7_0_priv_reg_irq,
5095};
5096
5097static const struct amdgpu_irq_src_funcs gfx_v7_0_priv_inst_irq_funcs = {
5098	.set = gfx_v7_0_set_priv_inst_fault_state,
5099	.process = gfx_v7_0_priv_inst_irq,
5100};
5101
5102static void gfx_v7_0_set_irq_funcs(struct amdgpu_device *adev)
5103{
5104	adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
5105	adev->gfx.eop_irq.funcs = &gfx_v7_0_eop_irq_funcs;
5106
5107	adev->gfx.priv_reg_irq.num_types = 1;
5108	adev->gfx.priv_reg_irq.funcs = &gfx_v7_0_priv_reg_irq_funcs;
5109
5110	adev->gfx.priv_inst_irq.num_types = 1;
5111	adev->gfx.priv_inst_irq.funcs = &gfx_v7_0_priv_inst_irq_funcs;
5112}
5113
5114static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev)
5115{
5116	/* init asci gds info */
5117	adev->gds.gds_size = RREG32(mmGDS_VMID0_SIZE);
5118	adev->gds.gws_size = 64;
5119	adev->gds.oa_size = 16;
5120	adev->gds.gds_compute_max_wave_id = RREG32(mmGDS_COMPUTE_MAX_WAVE_ID);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5121}
5122
5123
5124static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev)
5125{
5126	int i, j, k, counter, active_cu_number = 0;
5127	u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
5128	struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
5129	unsigned disable_masks[4 * 2];
5130	u32 ao_cu_num;
5131
5132	if (adev->flags & AMD_IS_APU)
5133		ao_cu_num = 2;
5134	else
5135		ao_cu_num = adev->gfx.config.max_cu_per_sh;
5136
5137	memset(cu_info, 0, sizeof(*cu_info));
5138
5139	amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2);
5140
5141	mutex_lock(&adev->grbm_idx_mutex);
5142	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
5143		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
5144			mask = 1;
5145			ao_bitmap = 0;
5146			counter = 0;
5147			gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff, 0);
5148			if (i < 4 && j < 2)
5149				gfx_v7_0_set_user_cu_inactive_bitmap(
5150					adev, disable_masks[i * 2 + j]);
5151			bitmap = gfx_v7_0_get_cu_active_bitmap(adev);
5152			cu_info->bitmap[0][i][j] = bitmap;
5153
5154			for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
5155				if (bitmap & mask) {
5156					if (counter < ao_cu_num)
5157						ao_bitmap |= mask;
5158					counter++;
5159				}
5160				mask <<= 1;
5161			}
5162			active_cu_number += counter;
5163			if (i < 2 && j < 2)
5164				ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
5165			cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
5166		}
5167	}
5168	gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
5169	mutex_unlock(&adev->grbm_idx_mutex);
5170
5171	cu_info->number = active_cu_number;
5172	cu_info->ao_cu_mask = ao_cu_mask;
5173	cu_info->simd_per_cu = NUM_SIMD_PER_CU;
5174	cu_info->max_waves_per_simd = 10;
5175	cu_info->max_scratch_slots_per_cu = 32;
5176	cu_info->wave_front_size = 64;
5177	cu_info->lds_size = 64;
5178}
5179
5180const struct amdgpu_ip_block_version gfx_v7_1_ip_block = {
 
 
 
 
 
 
 
 
 
 
5181	.type = AMD_IP_BLOCK_TYPE_GFX,
5182	.major = 7,
5183	.minor = 1,
5184	.rev = 0,
5185	.funcs = &gfx_v7_0_ip_funcs,
5186};
5187
5188const struct amdgpu_ip_block_version gfx_v7_2_ip_block = {
 
5189	.type = AMD_IP_BLOCK_TYPE_GFX,
5190	.major = 7,
5191	.minor = 2,
5192	.rev = 0,
5193	.funcs = &gfx_v7_0_ip_funcs,
5194};
5195
5196const struct amdgpu_ip_block_version gfx_v7_3_ip_block = {
 
5197	.type = AMD_IP_BLOCK_TYPE_GFX,
5198	.major = 7,
5199	.minor = 3,
5200	.rev = 0,
5201	.funcs = &gfx_v7_0_ip_funcs,
5202};