Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Apr 14-17, 2025
Register
Loading...
v4.6
   1/*
   2 * Copyright 2011 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: Alex Deucher
  23 */
  24#include <linux/firmware.h>
 
  25#include <linux/slab.h>
  26#include <linux/module.h>
  27#include <drm/drmP.h>
  28#include "radeon.h"
  29#include "radeon_asic.h"
  30#include "radeon_audio.h"
  31#include <drm/radeon_drm.h>
  32#include "sid.h"
  33#include "atom.h"
  34#include "si_blit_shaders.h"
  35#include "clearstate_si.h"
  36#include "radeon_ucode.h"
  37
 
 
 
 
 
  38
  39MODULE_FIRMWARE("radeon/TAHITI_pfp.bin");
  40MODULE_FIRMWARE("radeon/TAHITI_me.bin");
  41MODULE_FIRMWARE("radeon/TAHITI_ce.bin");
  42MODULE_FIRMWARE("radeon/TAHITI_mc.bin");
  43MODULE_FIRMWARE("radeon/TAHITI_mc2.bin");
  44MODULE_FIRMWARE("radeon/TAHITI_rlc.bin");
  45MODULE_FIRMWARE("radeon/TAHITI_smc.bin");
  46
  47MODULE_FIRMWARE("radeon/tahiti_pfp.bin");
  48MODULE_FIRMWARE("radeon/tahiti_me.bin");
  49MODULE_FIRMWARE("radeon/tahiti_ce.bin");
  50MODULE_FIRMWARE("radeon/tahiti_mc.bin");
  51MODULE_FIRMWARE("radeon/tahiti_rlc.bin");
  52MODULE_FIRMWARE("radeon/tahiti_smc.bin");
  53
  54MODULE_FIRMWARE("radeon/PITCAIRN_pfp.bin");
  55MODULE_FIRMWARE("radeon/PITCAIRN_me.bin");
  56MODULE_FIRMWARE("radeon/PITCAIRN_ce.bin");
  57MODULE_FIRMWARE("radeon/PITCAIRN_mc.bin");
  58MODULE_FIRMWARE("radeon/PITCAIRN_mc2.bin");
  59MODULE_FIRMWARE("radeon/PITCAIRN_rlc.bin");
  60MODULE_FIRMWARE("radeon/PITCAIRN_smc.bin");
  61
  62MODULE_FIRMWARE("radeon/pitcairn_pfp.bin");
  63MODULE_FIRMWARE("radeon/pitcairn_me.bin");
  64MODULE_FIRMWARE("radeon/pitcairn_ce.bin");
  65MODULE_FIRMWARE("radeon/pitcairn_mc.bin");
  66MODULE_FIRMWARE("radeon/pitcairn_rlc.bin");
  67MODULE_FIRMWARE("radeon/pitcairn_smc.bin");
  68
  69MODULE_FIRMWARE("radeon/VERDE_pfp.bin");
  70MODULE_FIRMWARE("radeon/VERDE_me.bin");
  71MODULE_FIRMWARE("radeon/VERDE_ce.bin");
  72MODULE_FIRMWARE("radeon/VERDE_mc.bin");
  73MODULE_FIRMWARE("radeon/VERDE_mc2.bin");
  74MODULE_FIRMWARE("radeon/VERDE_rlc.bin");
  75MODULE_FIRMWARE("radeon/VERDE_smc.bin");
  76
  77MODULE_FIRMWARE("radeon/verde_pfp.bin");
  78MODULE_FIRMWARE("radeon/verde_me.bin");
  79MODULE_FIRMWARE("radeon/verde_ce.bin");
  80MODULE_FIRMWARE("radeon/verde_mc.bin");
  81MODULE_FIRMWARE("radeon/verde_rlc.bin");
  82MODULE_FIRMWARE("radeon/verde_smc.bin");
  83
  84MODULE_FIRMWARE("radeon/OLAND_pfp.bin");
  85MODULE_FIRMWARE("radeon/OLAND_me.bin");
  86MODULE_FIRMWARE("radeon/OLAND_ce.bin");
  87MODULE_FIRMWARE("radeon/OLAND_mc.bin");
  88MODULE_FIRMWARE("radeon/OLAND_mc2.bin");
  89MODULE_FIRMWARE("radeon/OLAND_rlc.bin");
  90MODULE_FIRMWARE("radeon/OLAND_smc.bin");
  91
  92MODULE_FIRMWARE("radeon/oland_pfp.bin");
  93MODULE_FIRMWARE("radeon/oland_me.bin");
  94MODULE_FIRMWARE("radeon/oland_ce.bin");
  95MODULE_FIRMWARE("radeon/oland_mc.bin");
  96MODULE_FIRMWARE("radeon/oland_rlc.bin");
  97MODULE_FIRMWARE("radeon/oland_smc.bin");
  98
  99MODULE_FIRMWARE("radeon/HAINAN_pfp.bin");
 100MODULE_FIRMWARE("radeon/HAINAN_me.bin");
 101MODULE_FIRMWARE("radeon/HAINAN_ce.bin");
 102MODULE_FIRMWARE("radeon/HAINAN_mc.bin");
 103MODULE_FIRMWARE("radeon/HAINAN_mc2.bin");
 104MODULE_FIRMWARE("radeon/HAINAN_rlc.bin");
 105MODULE_FIRMWARE("radeon/HAINAN_smc.bin");
 106
 107MODULE_FIRMWARE("radeon/hainan_pfp.bin");
 108MODULE_FIRMWARE("radeon/hainan_me.bin");
 109MODULE_FIRMWARE("radeon/hainan_ce.bin");
 110MODULE_FIRMWARE("radeon/hainan_mc.bin");
 111MODULE_FIRMWARE("radeon/hainan_rlc.bin");
 112MODULE_FIRMWARE("radeon/hainan_smc.bin");
 113
 114static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh);
 115static void si_pcie_gen3_enable(struct radeon_device *rdev);
 116static void si_program_aspm(struct radeon_device *rdev);
 117extern void sumo_rlc_fini(struct radeon_device *rdev);
 118extern int sumo_rlc_init(struct radeon_device *rdev);
 119extern int r600_ih_ring_alloc(struct radeon_device *rdev);
 120extern void r600_ih_ring_fini(struct radeon_device *rdev);
 121extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);
 122extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
 123extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
 124extern u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev);
 125extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
 126extern bool evergreen_is_display_hung(struct radeon_device *rdev);
 127static void si_enable_gui_idle_interrupt(struct radeon_device *rdev,
 128					 bool enable);
 129static void si_init_pg(struct radeon_device *rdev);
 130static void si_init_cg(struct radeon_device *rdev);
 131static void si_fini_pg(struct radeon_device *rdev);
 132static void si_fini_cg(struct radeon_device *rdev);
 133static void si_rlc_stop(struct radeon_device *rdev);
 134
 135static const u32 verde_rlc_save_restore_register_list[] =
 136{
 137	(0x8000 << 16) | (0x98f4 >> 2),
 138	0x00000000,
 139	(0x8040 << 16) | (0x98f4 >> 2),
 140	0x00000000,
 141	(0x8000 << 16) | (0xe80 >> 2),
 142	0x00000000,
 143	(0x8040 << 16) | (0xe80 >> 2),
 144	0x00000000,
 145	(0x8000 << 16) | (0x89bc >> 2),
 146	0x00000000,
 147	(0x8040 << 16) | (0x89bc >> 2),
 148	0x00000000,
 149	(0x8000 << 16) | (0x8c1c >> 2),
 150	0x00000000,
 151	(0x8040 << 16) | (0x8c1c >> 2),
 152	0x00000000,
 153	(0x9c00 << 16) | (0x98f0 >> 2),
 154	0x00000000,
 155	(0x9c00 << 16) | (0xe7c >> 2),
 156	0x00000000,
 157	(0x8000 << 16) | (0x9148 >> 2),
 158	0x00000000,
 159	(0x8040 << 16) | (0x9148 >> 2),
 160	0x00000000,
 161	(0x9c00 << 16) | (0x9150 >> 2),
 162	0x00000000,
 163	(0x9c00 << 16) | (0x897c >> 2),
 164	0x00000000,
 165	(0x9c00 << 16) | (0x8d8c >> 2),
 166	0x00000000,
 167	(0x9c00 << 16) | (0xac54 >> 2),
 168	0X00000000,
 169	0x3,
 170	(0x9c00 << 16) | (0x98f8 >> 2),
 171	0x00000000,
 172	(0x9c00 << 16) | (0x9910 >> 2),
 173	0x00000000,
 174	(0x9c00 << 16) | (0x9914 >> 2),
 175	0x00000000,
 176	(0x9c00 << 16) | (0x9918 >> 2),
 177	0x00000000,
 178	(0x9c00 << 16) | (0x991c >> 2),
 179	0x00000000,
 180	(0x9c00 << 16) | (0x9920 >> 2),
 181	0x00000000,
 182	(0x9c00 << 16) | (0x9924 >> 2),
 183	0x00000000,
 184	(0x9c00 << 16) | (0x9928 >> 2),
 185	0x00000000,
 186	(0x9c00 << 16) | (0x992c >> 2),
 187	0x00000000,
 188	(0x9c00 << 16) | (0x9930 >> 2),
 189	0x00000000,
 190	(0x9c00 << 16) | (0x9934 >> 2),
 191	0x00000000,
 192	(0x9c00 << 16) | (0x9938 >> 2),
 193	0x00000000,
 194	(0x9c00 << 16) | (0x993c >> 2),
 195	0x00000000,
 196	(0x9c00 << 16) | (0x9940 >> 2),
 197	0x00000000,
 198	(0x9c00 << 16) | (0x9944 >> 2),
 199	0x00000000,
 200	(0x9c00 << 16) | (0x9948 >> 2),
 201	0x00000000,
 202	(0x9c00 << 16) | (0x994c >> 2),
 203	0x00000000,
 204	(0x9c00 << 16) | (0x9950 >> 2),
 205	0x00000000,
 206	(0x9c00 << 16) | (0x9954 >> 2),
 207	0x00000000,
 208	(0x9c00 << 16) | (0x9958 >> 2),
 209	0x00000000,
 210	(0x9c00 << 16) | (0x995c >> 2),
 211	0x00000000,
 212	(0x9c00 << 16) | (0x9960 >> 2),
 213	0x00000000,
 214	(0x9c00 << 16) | (0x9964 >> 2),
 215	0x00000000,
 216	(0x9c00 << 16) | (0x9968 >> 2),
 217	0x00000000,
 218	(0x9c00 << 16) | (0x996c >> 2),
 219	0x00000000,
 220	(0x9c00 << 16) | (0x9970 >> 2),
 221	0x00000000,
 222	(0x9c00 << 16) | (0x9974 >> 2),
 223	0x00000000,
 224	(0x9c00 << 16) | (0x9978 >> 2),
 225	0x00000000,
 226	(0x9c00 << 16) | (0x997c >> 2),
 227	0x00000000,
 228	(0x9c00 << 16) | (0x9980 >> 2),
 229	0x00000000,
 230	(0x9c00 << 16) | (0x9984 >> 2),
 231	0x00000000,
 232	(0x9c00 << 16) | (0x9988 >> 2),
 233	0x00000000,
 234	(0x9c00 << 16) | (0x998c >> 2),
 235	0x00000000,
 236	(0x9c00 << 16) | (0x8c00 >> 2),
 237	0x00000000,
 238	(0x9c00 << 16) | (0x8c14 >> 2),
 239	0x00000000,
 240	(0x9c00 << 16) | (0x8c04 >> 2),
 241	0x00000000,
 242	(0x9c00 << 16) | (0x8c08 >> 2),
 243	0x00000000,
 244	(0x8000 << 16) | (0x9b7c >> 2),
 245	0x00000000,
 246	(0x8040 << 16) | (0x9b7c >> 2),
 247	0x00000000,
 248	(0x8000 << 16) | (0xe84 >> 2),
 249	0x00000000,
 250	(0x8040 << 16) | (0xe84 >> 2),
 251	0x00000000,
 252	(0x8000 << 16) | (0x89c0 >> 2),
 253	0x00000000,
 254	(0x8040 << 16) | (0x89c0 >> 2),
 255	0x00000000,
 256	(0x8000 << 16) | (0x914c >> 2),
 257	0x00000000,
 258	(0x8040 << 16) | (0x914c >> 2),
 259	0x00000000,
 260	(0x8000 << 16) | (0x8c20 >> 2),
 261	0x00000000,
 262	(0x8040 << 16) | (0x8c20 >> 2),
 263	0x00000000,
 264	(0x8000 << 16) | (0x9354 >> 2),
 265	0x00000000,
 266	(0x8040 << 16) | (0x9354 >> 2),
 267	0x00000000,
 268	(0x9c00 << 16) | (0x9060 >> 2),
 269	0x00000000,
 270	(0x9c00 << 16) | (0x9364 >> 2),
 271	0x00000000,
 272	(0x9c00 << 16) | (0x9100 >> 2),
 273	0x00000000,
 274	(0x9c00 << 16) | (0x913c >> 2),
 275	0x00000000,
 276	(0x8000 << 16) | (0x90e0 >> 2),
 277	0x00000000,
 278	(0x8000 << 16) | (0x90e4 >> 2),
 279	0x00000000,
 280	(0x8000 << 16) | (0x90e8 >> 2),
 281	0x00000000,
 282	(0x8040 << 16) | (0x90e0 >> 2),
 283	0x00000000,
 284	(0x8040 << 16) | (0x90e4 >> 2),
 285	0x00000000,
 286	(0x8040 << 16) | (0x90e8 >> 2),
 287	0x00000000,
 288	(0x9c00 << 16) | (0x8bcc >> 2),
 289	0x00000000,
 290	(0x9c00 << 16) | (0x8b24 >> 2),
 291	0x00000000,
 292	(0x9c00 << 16) | (0x88c4 >> 2),
 293	0x00000000,
 294	(0x9c00 << 16) | (0x8e50 >> 2),
 295	0x00000000,
 296	(0x9c00 << 16) | (0x8c0c >> 2),
 297	0x00000000,
 298	(0x9c00 << 16) | (0x8e58 >> 2),
 299	0x00000000,
 300	(0x9c00 << 16) | (0x8e5c >> 2),
 301	0x00000000,
 302	(0x9c00 << 16) | (0x9508 >> 2),
 303	0x00000000,
 304	(0x9c00 << 16) | (0x950c >> 2),
 305	0x00000000,
 306	(0x9c00 << 16) | (0x9494 >> 2),
 307	0x00000000,
 308	(0x9c00 << 16) | (0xac0c >> 2),
 309	0x00000000,
 310	(0x9c00 << 16) | (0xac10 >> 2),
 311	0x00000000,
 312	(0x9c00 << 16) | (0xac14 >> 2),
 313	0x00000000,
 314	(0x9c00 << 16) | (0xae00 >> 2),
 315	0x00000000,
 316	(0x9c00 << 16) | (0xac08 >> 2),
 317	0x00000000,
 318	(0x9c00 << 16) | (0x88d4 >> 2),
 319	0x00000000,
 320	(0x9c00 << 16) | (0x88c8 >> 2),
 321	0x00000000,
 322	(0x9c00 << 16) | (0x88cc >> 2),
 323	0x00000000,
 324	(0x9c00 << 16) | (0x89b0 >> 2),
 325	0x00000000,
 326	(0x9c00 << 16) | (0x8b10 >> 2),
 327	0x00000000,
 328	(0x9c00 << 16) | (0x8a14 >> 2),
 329	0x00000000,
 330	(0x9c00 << 16) | (0x9830 >> 2),
 331	0x00000000,
 332	(0x9c00 << 16) | (0x9834 >> 2),
 333	0x00000000,
 334	(0x9c00 << 16) | (0x9838 >> 2),
 335	0x00000000,
 336	(0x9c00 << 16) | (0x9a10 >> 2),
 337	0x00000000,
 338	(0x8000 << 16) | (0x9870 >> 2),
 339	0x00000000,
 340	(0x8000 << 16) | (0x9874 >> 2),
 341	0x00000000,
 342	(0x8001 << 16) | (0x9870 >> 2),
 343	0x00000000,
 344	(0x8001 << 16) | (0x9874 >> 2),
 345	0x00000000,
 346	(0x8040 << 16) | (0x9870 >> 2),
 347	0x00000000,
 348	(0x8040 << 16) | (0x9874 >> 2),
 349	0x00000000,
 350	(0x8041 << 16) | (0x9870 >> 2),
 351	0x00000000,
 352	(0x8041 << 16) | (0x9874 >> 2),
 353	0x00000000,
 354	0x00000000
 355};
 356
 357static const u32 tahiti_golden_rlc_registers[] =
 358{
 359	0xc424, 0xffffffff, 0x00601005,
 360	0xc47c, 0xffffffff, 0x10104040,
 361	0xc488, 0xffffffff, 0x0100000a,
 362	0xc314, 0xffffffff, 0x00000800,
 363	0xc30c, 0xffffffff, 0x800000f4,
 364	0xf4a8, 0xffffffff, 0x00000000
 365};
 366
 367static const u32 tahiti_golden_registers[] =
 368{
 369	0x9a10, 0x00010000, 0x00018208,
 370	0x9830, 0xffffffff, 0x00000000,
 371	0x9834, 0xf00fffff, 0x00000400,
 372	0x9838, 0x0002021c, 0x00020200,
 373	0xc78, 0x00000080, 0x00000000,
 374	0xd030, 0x000300c0, 0x00800040,
 375	0xd830, 0x000300c0, 0x00800040,
 376	0x5bb0, 0x000000f0, 0x00000070,
 377	0x5bc0, 0x00200000, 0x50100000,
 378	0x7030, 0x31000311, 0x00000011,
 379	0x277c, 0x00000003, 0x000007ff,
 380	0x240c, 0x000007ff, 0x00000000,
 381	0x8a14, 0xf000001f, 0x00000007,
 382	0x8b24, 0xffffffff, 0x00ffffff,
 383	0x8b10, 0x0000ff0f, 0x00000000,
 384	0x28a4c, 0x07ffffff, 0x4e000000,
 385	0x28350, 0x3f3f3fff, 0x2a00126a,
 386	0x30, 0x000000ff, 0x0040,
 387	0x34, 0x00000040, 0x00004040,
 388	0x9100, 0x07ffffff, 0x03000000,
 389	0x8e88, 0x01ff1f3f, 0x00000000,
 390	0x8e84, 0x01ff1f3f, 0x00000000,
 391	0x9060, 0x0000007f, 0x00000020,
 392	0x9508, 0x00010000, 0x00010000,
 393	0xac14, 0x00000200, 0x000002fb,
 394	0xac10, 0xffffffff, 0x0000543b,
 395	0xac0c, 0xffffffff, 0xa9210876,
 396	0x88d0, 0xffffffff, 0x000fff40,
 397	0x88d4, 0x0000001f, 0x00000010,
 398	0x1410, 0x20000000, 0x20fffed8,
 399	0x15c0, 0x000c0fc0, 0x000c0400
 400};
 401
 402static const u32 tahiti_golden_registers2[] =
 403{
 404	0xc64, 0x00000001, 0x00000001
 405};
 406
 407static const u32 pitcairn_golden_rlc_registers[] =
 408{
 409	0xc424, 0xffffffff, 0x00601004,
 410	0xc47c, 0xffffffff, 0x10102020,
 411	0xc488, 0xffffffff, 0x01000020,
 412	0xc314, 0xffffffff, 0x00000800,
 413	0xc30c, 0xffffffff, 0x800000a4
 414};
 415
 416static const u32 pitcairn_golden_registers[] =
 417{
 418	0x9a10, 0x00010000, 0x00018208,
 419	0x9830, 0xffffffff, 0x00000000,
 420	0x9834, 0xf00fffff, 0x00000400,
 421	0x9838, 0x0002021c, 0x00020200,
 422	0xc78, 0x00000080, 0x00000000,
 423	0xd030, 0x000300c0, 0x00800040,
 424	0xd830, 0x000300c0, 0x00800040,
 425	0x5bb0, 0x000000f0, 0x00000070,
 426	0x5bc0, 0x00200000, 0x50100000,
 427	0x7030, 0x31000311, 0x00000011,
 428	0x2ae4, 0x00073ffe, 0x000022a2,
 429	0x240c, 0x000007ff, 0x00000000,
 430	0x8a14, 0xf000001f, 0x00000007,
 431	0x8b24, 0xffffffff, 0x00ffffff,
 432	0x8b10, 0x0000ff0f, 0x00000000,
 433	0x28a4c, 0x07ffffff, 0x4e000000,
 434	0x28350, 0x3f3f3fff, 0x2a00126a,
 435	0x30, 0x000000ff, 0x0040,
 436	0x34, 0x00000040, 0x00004040,
 437	0x9100, 0x07ffffff, 0x03000000,
 438	0x9060, 0x0000007f, 0x00000020,
 439	0x9508, 0x00010000, 0x00010000,
 440	0xac14, 0x000003ff, 0x000000f7,
 441	0xac10, 0xffffffff, 0x00000000,
 442	0xac0c, 0xffffffff, 0x32761054,
 443	0x88d4, 0x0000001f, 0x00000010,
 444	0x15c0, 0x000c0fc0, 0x000c0400
 445};
 446
 447static const u32 verde_golden_rlc_registers[] =
 448{
 449	0xc424, 0xffffffff, 0x033f1005,
 450	0xc47c, 0xffffffff, 0x10808020,
 451	0xc488, 0xffffffff, 0x00800008,
 452	0xc314, 0xffffffff, 0x00001000,
 453	0xc30c, 0xffffffff, 0x80010014
 454};
 455
 456static const u32 verde_golden_registers[] =
 457{
 458	0x9a10, 0x00010000, 0x00018208,
 459	0x9830, 0xffffffff, 0x00000000,
 460	0x9834, 0xf00fffff, 0x00000400,
 461	0x9838, 0x0002021c, 0x00020200,
 462	0xc78, 0x00000080, 0x00000000,
 463	0xd030, 0x000300c0, 0x00800040,
 464	0xd030, 0x000300c0, 0x00800040,
 465	0xd830, 0x000300c0, 0x00800040,
 466	0xd830, 0x000300c0, 0x00800040,
 467	0x5bb0, 0x000000f0, 0x00000070,
 468	0x5bc0, 0x00200000, 0x50100000,
 469	0x7030, 0x31000311, 0x00000011,
 470	0x2ae4, 0x00073ffe, 0x000022a2,
 471	0x2ae4, 0x00073ffe, 0x000022a2,
 472	0x2ae4, 0x00073ffe, 0x000022a2,
 473	0x240c, 0x000007ff, 0x00000000,
 474	0x240c, 0x000007ff, 0x00000000,
 475	0x240c, 0x000007ff, 0x00000000,
 476	0x8a14, 0xf000001f, 0x00000007,
 477	0x8a14, 0xf000001f, 0x00000007,
 478	0x8a14, 0xf000001f, 0x00000007,
 479	0x8b24, 0xffffffff, 0x00ffffff,
 480	0x8b10, 0x0000ff0f, 0x00000000,
 481	0x28a4c, 0x07ffffff, 0x4e000000,
 482	0x28350, 0x3f3f3fff, 0x0000124a,
 483	0x28350, 0x3f3f3fff, 0x0000124a,
 484	0x28350, 0x3f3f3fff, 0x0000124a,
 485	0x30, 0x000000ff, 0x0040,
 486	0x34, 0x00000040, 0x00004040,
 487	0x9100, 0x07ffffff, 0x03000000,
 488	0x9100, 0x07ffffff, 0x03000000,
 489	0x8e88, 0x01ff1f3f, 0x00000000,
 490	0x8e88, 0x01ff1f3f, 0x00000000,
 491	0x8e88, 0x01ff1f3f, 0x00000000,
 492	0x8e84, 0x01ff1f3f, 0x00000000,
 493	0x8e84, 0x01ff1f3f, 0x00000000,
 494	0x8e84, 0x01ff1f3f, 0x00000000,
 495	0x9060, 0x0000007f, 0x00000020,
 496	0x9508, 0x00010000, 0x00010000,
 497	0xac14, 0x000003ff, 0x00000003,
 498	0xac14, 0x000003ff, 0x00000003,
 499	0xac14, 0x000003ff, 0x00000003,
 500	0xac10, 0xffffffff, 0x00000000,
 501	0xac10, 0xffffffff, 0x00000000,
 502	0xac10, 0xffffffff, 0x00000000,
 503	0xac0c, 0xffffffff, 0x00001032,
 504	0xac0c, 0xffffffff, 0x00001032,
 505	0xac0c, 0xffffffff, 0x00001032,
 506	0x88d4, 0x0000001f, 0x00000010,
 507	0x88d4, 0x0000001f, 0x00000010,
 508	0x88d4, 0x0000001f, 0x00000010,
 509	0x15c0, 0x000c0fc0, 0x000c0400
 510};
 511
 512static const u32 oland_golden_rlc_registers[] =
 513{
 514	0xc424, 0xffffffff, 0x00601005,
 515	0xc47c, 0xffffffff, 0x10104040,
 516	0xc488, 0xffffffff, 0x0100000a,
 517	0xc314, 0xffffffff, 0x00000800,
 518	0xc30c, 0xffffffff, 0x800000f4
 519};
 520
 521static const u32 oland_golden_registers[] =
 522{
 523	0x9a10, 0x00010000, 0x00018208,
 524	0x9830, 0xffffffff, 0x00000000,
 525	0x9834, 0xf00fffff, 0x00000400,
 526	0x9838, 0x0002021c, 0x00020200,
 527	0xc78, 0x00000080, 0x00000000,
 528	0xd030, 0x000300c0, 0x00800040,
 529	0xd830, 0x000300c0, 0x00800040,
 530	0x5bb0, 0x000000f0, 0x00000070,
 531	0x5bc0, 0x00200000, 0x50100000,
 532	0x7030, 0x31000311, 0x00000011,
 533	0x2ae4, 0x00073ffe, 0x000022a2,
 534	0x240c, 0x000007ff, 0x00000000,
 535	0x8a14, 0xf000001f, 0x00000007,
 536	0x8b24, 0xffffffff, 0x00ffffff,
 537	0x8b10, 0x0000ff0f, 0x00000000,
 538	0x28a4c, 0x07ffffff, 0x4e000000,
 539	0x28350, 0x3f3f3fff, 0x00000082,
 540	0x30, 0x000000ff, 0x0040,
 541	0x34, 0x00000040, 0x00004040,
 542	0x9100, 0x07ffffff, 0x03000000,
 543	0x9060, 0x0000007f, 0x00000020,
 544	0x9508, 0x00010000, 0x00010000,
 545	0xac14, 0x000003ff, 0x000000f3,
 546	0xac10, 0xffffffff, 0x00000000,
 547	0xac0c, 0xffffffff, 0x00003210,
 548	0x88d4, 0x0000001f, 0x00000010,
 549	0x15c0, 0x000c0fc0, 0x000c0400
 550};
 551
 552static const u32 hainan_golden_registers[] =
 553{
 554	0x9a10, 0x00010000, 0x00018208,
 555	0x9830, 0xffffffff, 0x00000000,
 556	0x9834, 0xf00fffff, 0x00000400,
 557	0x9838, 0x0002021c, 0x00020200,
 558	0xd0c0, 0xff000fff, 0x00000100,
 559	0xd030, 0x000300c0, 0x00800040,
 560	0xd8c0, 0xff000fff, 0x00000100,
 561	0xd830, 0x000300c0, 0x00800040,
 562	0x2ae4, 0x00073ffe, 0x000022a2,
 563	0x240c, 0x000007ff, 0x00000000,
 564	0x8a14, 0xf000001f, 0x00000007,
 565	0x8b24, 0xffffffff, 0x00ffffff,
 566	0x8b10, 0x0000ff0f, 0x00000000,
 567	0x28a4c, 0x07ffffff, 0x4e000000,
 568	0x28350, 0x3f3f3fff, 0x00000000,
 569	0x30, 0x000000ff, 0x0040,
 570	0x34, 0x00000040, 0x00004040,
 571	0x9100, 0x03e00000, 0x03600000,
 572	0x9060, 0x0000007f, 0x00000020,
 573	0x9508, 0x00010000, 0x00010000,
 574	0xac14, 0x000003ff, 0x000000f1,
 575	0xac10, 0xffffffff, 0x00000000,
 576	0xac0c, 0xffffffff, 0x00003210,
 577	0x88d4, 0x0000001f, 0x00000010,
 578	0x15c0, 0x000c0fc0, 0x000c0400
 579};
 580
 581static const u32 hainan_golden_registers2[] =
 582{
 583	0x98f8, 0xffffffff, 0x02010001
 584};
 585
 586static const u32 tahiti_mgcg_cgcg_init[] =
 587{
 588	0xc400, 0xffffffff, 0xfffffffc,
 589	0x802c, 0xffffffff, 0xe0000000,
 590	0x9a60, 0xffffffff, 0x00000100,
 591	0x92a4, 0xffffffff, 0x00000100,
 592	0xc164, 0xffffffff, 0x00000100,
 593	0x9774, 0xffffffff, 0x00000100,
 594	0x8984, 0xffffffff, 0x06000100,
 595	0x8a18, 0xffffffff, 0x00000100,
 596	0x92a0, 0xffffffff, 0x00000100,
 597	0xc380, 0xffffffff, 0x00000100,
 598	0x8b28, 0xffffffff, 0x00000100,
 599	0x9144, 0xffffffff, 0x00000100,
 600	0x8d88, 0xffffffff, 0x00000100,
 601	0x8d8c, 0xffffffff, 0x00000100,
 602	0x9030, 0xffffffff, 0x00000100,
 603	0x9034, 0xffffffff, 0x00000100,
 604	0x9038, 0xffffffff, 0x00000100,
 605	0x903c, 0xffffffff, 0x00000100,
 606	0xad80, 0xffffffff, 0x00000100,
 607	0xac54, 0xffffffff, 0x00000100,
 608	0x897c, 0xffffffff, 0x06000100,
 609	0x9868, 0xffffffff, 0x00000100,
 610	0x9510, 0xffffffff, 0x00000100,
 611	0xaf04, 0xffffffff, 0x00000100,
 612	0xae04, 0xffffffff, 0x00000100,
 613	0x949c, 0xffffffff, 0x00000100,
 614	0x802c, 0xffffffff, 0xe0000000,
 615	0x9160, 0xffffffff, 0x00010000,
 616	0x9164, 0xffffffff, 0x00030002,
 617	0x9168, 0xffffffff, 0x00040007,
 618	0x916c, 0xffffffff, 0x00060005,
 619	0x9170, 0xffffffff, 0x00090008,
 620	0x9174, 0xffffffff, 0x00020001,
 621	0x9178, 0xffffffff, 0x00040003,
 622	0x917c, 0xffffffff, 0x00000007,
 623	0x9180, 0xffffffff, 0x00060005,
 624	0x9184, 0xffffffff, 0x00090008,
 625	0x9188, 0xffffffff, 0x00030002,
 626	0x918c, 0xffffffff, 0x00050004,
 627	0x9190, 0xffffffff, 0x00000008,
 628	0x9194, 0xffffffff, 0x00070006,
 629	0x9198, 0xffffffff, 0x000a0009,
 630	0x919c, 0xffffffff, 0x00040003,
 631	0x91a0, 0xffffffff, 0x00060005,
 632	0x91a4, 0xffffffff, 0x00000009,
 633	0x91a8, 0xffffffff, 0x00080007,
 634	0x91ac, 0xffffffff, 0x000b000a,
 635	0x91b0, 0xffffffff, 0x00050004,
 636	0x91b4, 0xffffffff, 0x00070006,
 637	0x91b8, 0xffffffff, 0x0008000b,
 638	0x91bc, 0xffffffff, 0x000a0009,
 639	0x91c0, 0xffffffff, 0x000d000c,
 640	0x91c4, 0xffffffff, 0x00060005,
 641	0x91c8, 0xffffffff, 0x00080007,
 642	0x91cc, 0xffffffff, 0x0000000b,
 643	0x91d0, 0xffffffff, 0x000a0009,
 644	0x91d4, 0xffffffff, 0x000d000c,
 645	0x91d8, 0xffffffff, 0x00070006,
 646	0x91dc, 0xffffffff, 0x00090008,
 647	0x91e0, 0xffffffff, 0x0000000c,
 648	0x91e4, 0xffffffff, 0x000b000a,
 649	0x91e8, 0xffffffff, 0x000e000d,
 650	0x91ec, 0xffffffff, 0x00080007,
 651	0x91f0, 0xffffffff, 0x000a0009,
 652	0x91f4, 0xffffffff, 0x0000000d,
 653	0x91f8, 0xffffffff, 0x000c000b,
 654	0x91fc, 0xffffffff, 0x000f000e,
 655	0x9200, 0xffffffff, 0x00090008,
 656	0x9204, 0xffffffff, 0x000b000a,
 657	0x9208, 0xffffffff, 0x000c000f,
 658	0x920c, 0xffffffff, 0x000e000d,
 659	0x9210, 0xffffffff, 0x00110010,
 660	0x9214, 0xffffffff, 0x000a0009,
 661	0x9218, 0xffffffff, 0x000c000b,
 662	0x921c, 0xffffffff, 0x0000000f,
 663	0x9220, 0xffffffff, 0x000e000d,
 664	0x9224, 0xffffffff, 0x00110010,
 665	0x9228, 0xffffffff, 0x000b000a,
 666	0x922c, 0xffffffff, 0x000d000c,
 667	0x9230, 0xffffffff, 0x00000010,
 668	0x9234, 0xffffffff, 0x000f000e,
 669	0x9238, 0xffffffff, 0x00120011,
 670	0x923c, 0xffffffff, 0x000c000b,
 671	0x9240, 0xffffffff, 0x000e000d,
 672	0x9244, 0xffffffff, 0x00000011,
 673	0x9248, 0xffffffff, 0x0010000f,
 674	0x924c, 0xffffffff, 0x00130012,
 675	0x9250, 0xffffffff, 0x000d000c,
 676	0x9254, 0xffffffff, 0x000f000e,
 677	0x9258, 0xffffffff, 0x00100013,
 678	0x925c, 0xffffffff, 0x00120011,
 679	0x9260, 0xffffffff, 0x00150014,
 680	0x9264, 0xffffffff, 0x000e000d,
 681	0x9268, 0xffffffff, 0x0010000f,
 682	0x926c, 0xffffffff, 0x00000013,
 683	0x9270, 0xffffffff, 0x00120011,
 684	0x9274, 0xffffffff, 0x00150014,
 685	0x9278, 0xffffffff, 0x000f000e,
 686	0x927c, 0xffffffff, 0x00110010,
 687	0x9280, 0xffffffff, 0x00000014,
 688	0x9284, 0xffffffff, 0x00130012,
 689	0x9288, 0xffffffff, 0x00160015,
 690	0x928c, 0xffffffff, 0x0010000f,
 691	0x9290, 0xffffffff, 0x00120011,
 692	0x9294, 0xffffffff, 0x00000015,
 693	0x9298, 0xffffffff, 0x00140013,
 694	0x929c, 0xffffffff, 0x00170016,
 695	0x9150, 0xffffffff, 0x96940200,
 696	0x8708, 0xffffffff, 0x00900100,
 697	0xc478, 0xffffffff, 0x00000080,
 698	0xc404, 0xffffffff, 0x0020003f,
 699	0x30, 0xffffffff, 0x0000001c,
 700	0x34, 0x000f0000, 0x000f0000,
 701	0x160c, 0xffffffff, 0x00000100,
 702	0x1024, 0xffffffff, 0x00000100,
 703	0x102c, 0x00000101, 0x00000000,
 704	0x20a8, 0xffffffff, 0x00000104,
 705	0x264c, 0x000c0000, 0x000c0000,
 706	0x2648, 0x000c0000, 0x000c0000,
 707	0x55e4, 0xff000fff, 0x00000100,
 708	0x55e8, 0x00000001, 0x00000001,
 709	0x2f50, 0x00000001, 0x00000001,
 710	0x30cc, 0xc0000fff, 0x00000104,
 711	0xc1e4, 0x00000001, 0x00000001,
 712	0xd0c0, 0xfffffff0, 0x00000100,
 713	0xd8c0, 0xfffffff0, 0x00000100
 714};
 715
 716static const u32 pitcairn_mgcg_cgcg_init[] =
 717{
 718	0xc400, 0xffffffff, 0xfffffffc,
 719	0x802c, 0xffffffff, 0xe0000000,
 720	0x9a60, 0xffffffff, 0x00000100,
 721	0x92a4, 0xffffffff, 0x00000100,
 722	0xc164, 0xffffffff, 0x00000100,
 723	0x9774, 0xffffffff, 0x00000100,
 724	0x8984, 0xffffffff, 0x06000100,
 725	0x8a18, 0xffffffff, 0x00000100,
 726	0x92a0, 0xffffffff, 0x00000100,
 727	0xc380, 0xffffffff, 0x00000100,
 728	0x8b28, 0xffffffff, 0x00000100,
 729	0x9144, 0xffffffff, 0x00000100,
 730	0x8d88, 0xffffffff, 0x00000100,
 731	0x8d8c, 0xffffffff, 0x00000100,
 732	0x9030, 0xffffffff, 0x00000100,
 733	0x9034, 0xffffffff, 0x00000100,
 734	0x9038, 0xffffffff, 0x00000100,
 735	0x903c, 0xffffffff, 0x00000100,
 736	0xad80, 0xffffffff, 0x00000100,
 737	0xac54, 0xffffffff, 0x00000100,
 738	0x897c, 0xffffffff, 0x06000100,
 739	0x9868, 0xffffffff, 0x00000100,
 740	0x9510, 0xffffffff, 0x00000100,
 741	0xaf04, 0xffffffff, 0x00000100,
 742	0xae04, 0xffffffff, 0x00000100,
 743	0x949c, 0xffffffff, 0x00000100,
 744	0x802c, 0xffffffff, 0xe0000000,
 745	0x9160, 0xffffffff, 0x00010000,
 746	0x9164, 0xffffffff, 0x00030002,
 747	0x9168, 0xffffffff, 0x00040007,
 748	0x916c, 0xffffffff, 0x00060005,
 749	0x9170, 0xffffffff, 0x00090008,
 750	0x9174, 0xffffffff, 0x00020001,
 751	0x9178, 0xffffffff, 0x00040003,
 752	0x917c, 0xffffffff, 0x00000007,
 753	0x9180, 0xffffffff, 0x00060005,
 754	0x9184, 0xffffffff, 0x00090008,
 755	0x9188, 0xffffffff, 0x00030002,
 756	0x918c, 0xffffffff, 0x00050004,
 757	0x9190, 0xffffffff, 0x00000008,
 758	0x9194, 0xffffffff, 0x00070006,
 759	0x9198, 0xffffffff, 0x000a0009,
 760	0x919c, 0xffffffff, 0x00040003,
 761	0x91a0, 0xffffffff, 0x00060005,
 762	0x91a4, 0xffffffff, 0x00000009,
 763	0x91a8, 0xffffffff, 0x00080007,
 764	0x91ac, 0xffffffff, 0x000b000a,
 765	0x91b0, 0xffffffff, 0x00050004,
 766	0x91b4, 0xffffffff, 0x00070006,
 767	0x91b8, 0xffffffff, 0x0008000b,
 768	0x91bc, 0xffffffff, 0x000a0009,
 769	0x91c0, 0xffffffff, 0x000d000c,
 770	0x9200, 0xffffffff, 0x00090008,
 771	0x9204, 0xffffffff, 0x000b000a,
 772	0x9208, 0xffffffff, 0x000c000f,
 773	0x920c, 0xffffffff, 0x000e000d,
 774	0x9210, 0xffffffff, 0x00110010,
 775	0x9214, 0xffffffff, 0x000a0009,
 776	0x9218, 0xffffffff, 0x000c000b,
 777	0x921c, 0xffffffff, 0x0000000f,
 778	0x9220, 0xffffffff, 0x000e000d,
 779	0x9224, 0xffffffff, 0x00110010,
 780	0x9228, 0xffffffff, 0x000b000a,
 781	0x922c, 0xffffffff, 0x000d000c,
 782	0x9230, 0xffffffff, 0x00000010,
 783	0x9234, 0xffffffff, 0x000f000e,
 784	0x9238, 0xffffffff, 0x00120011,
 785	0x923c, 0xffffffff, 0x000c000b,
 786	0x9240, 0xffffffff, 0x000e000d,
 787	0x9244, 0xffffffff, 0x00000011,
 788	0x9248, 0xffffffff, 0x0010000f,
 789	0x924c, 0xffffffff, 0x00130012,
 790	0x9250, 0xffffffff, 0x000d000c,
 791	0x9254, 0xffffffff, 0x000f000e,
 792	0x9258, 0xffffffff, 0x00100013,
 793	0x925c, 0xffffffff, 0x00120011,
 794	0x9260, 0xffffffff, 0x00150014,
 795	0x9150, 0xffffffff, 0x96940200,
 796	0x8708, 0xffffffff, 0x00900100,
 797	0xc478, 0xffffffff, 0x00000080,
 798	0xc404, 0xffffffff, 0x0020003f,
 799	0x30, 0xffffffff, 0x0000001c,
 800	0x34, 0x000f0000, 0x000f0000,
 801	0x160c, 0xffffffff, 0x00000100,
 802	0x1024, 0xffffffff, 0x00000100,
 803	0x102c, 0x00000101, 0x00000000,
 804	0x20a8, 0xffffffff, 0x00000104,
 805	0x55e4, 0xff000fff, 0x00000100,
 806	0x55e8, 0x00000001, 0x00000001,
 807	0x2f50, 0x00000001, 0x00000001,
 808	0x30cc, 0xc0000fff, 0x00000104,
 809	0xc1e4, 0x00000001, 0x00000001,
 810	0xd0c0, 0xfffffff0, 0x00000100,
 811	0xd8c0, 0xfffffff0, 0x00000100
 812};
 813
 814static const u32 verde_mgcg_cgcg_init[] =
 815{
 816	0xc400, 0xffffffff, 0xfffffffc,
 817	0x802c, 0xffffffff, 0xe0000000,
 818	0x9a60, 0xffffffff, 0x00000100,
 819	0x92a4, 0xffffffff, 0x00000100,
 820	0xc164, 0xffffffff, 0x00000100,
 821	0x9774, 0xffffffff, 0x00000100,
 822	0x8984, 0xffffffff, 0x06000100,
 823	0x8a18, 0xffffffff, 0x00000100,
 824	0x92a0, 0xffffffff, 0x00000100,
 825	0xc380, 0xffffffff, 0x00000100,
 826	0x8b28, 0xffffffff, 0x00000100,
 827	0x9144, 0xffffffff, 0x00000100,
 828	0x8d88, 0xffffffff, 0x00000100,
 829	0x8d8c, 0xffffffff, 0x00000100,
 830	0x9030, 0xffffffff, 0x00000100,
 831	0x9034, 0xffffffff, 0x00000100,
 832	0x9038, 0xffffffff, 0x00000100,
 833	0x903c, 0xffffffff, 0x00000100,
 834	0xad80, 0xffffffff, 0x00000100,
 835	0xac54, 0xffffffff, 0x00000100,
 836	0x897c, 0xffffffff, 0x06000100,
 837	0x9868, 0xffffffff, 0x00000100,
 838	0x9510, 0xffffffff, 0x00000100,
 839	0xaf04, 0xffffffff, 0x00000100,
 840	0xae04, 0xffffffff, 0x00000100,
 841	0x949c, 0xffffffff, 0x00000100,
 842	0x802c, 0xffffffff, 0xe0000000,
 843	0x9160, 0xffffffff, 0x00010000,
 844	0x9164, 0xffffffff, 0x00030002,
 845	0x9168, 0xffffffff, 0x00040007,
 846	0x916c, 0xffffffff, 0x00060005,
 847	0x9170, 0xffffffff, 0x00090008,
 848	0x9174, 0xffffffff, 0x00020001,
 849	0x9178, 0xffffffff, 0x00040003,
 850	0x917c, 0xffffffff, 0x00000007,
 851	0x9180, 0xffffffff, 0x00060005,
 852	0x9184, 0xffffffff, 0x00090008,
 853	0x9188, 0xffffffff, 0x00030002,
 854	0x918c, 0xffffffff, 0x00050004,
 855	0x9190, 0xffffffff, 0x00000008,
 856	0x9194, 0xffffffff, 0x00070006,
 857	0x9198, 0xffffffff, 0x000a0009,
 858	0x919c, 0xffffffff, 0x00040003,
 859	0x91a0, 0xffffffff, 0x00060005,
 860	0x91a4, 0xffffffff, 0x00000009,
 861	0x91a8, 0xffffffff, 0x00080007,
 862	0x91ac, 0xffffffff, 0x000b000a,
 863	0x91b0, 0xffffffff, 0x00050004,
 864	0x91b4, 0xffffffff, 0x00070006,
 865	0x91b8, 0xffffffff, 0x0008000b,
 866	0x91bc, 0xffffffff, 0x000a0009,
 867	0x91c0, 0xffffffff, 0x000d000c,
 868	0x9200, 0xffffffff, 0x00090008,
 869	0x9204, 0xffffffff, 0x000b000a,
 870	0x9208, 0xffffffff, 0x000c000f,
 871	0x920c, 0xffffffff, 0x000e000d,
 872	0x9210, 0xffffffff, 0x00110010,
 873	0x9214, 0xffffffff, 0x000a0009,
 874	0x9218, 0xffffffff, 0x000c000b,
 875	0x921c, 0xffffffff, 0x0000000f,
 876	0x9220, 0xffffffff, 0x000e000d,
 877	0x9224, 0xffffffff, 0x00110010,
 878	0x9228, 0xffffffff, 0x000b000a,
 879	0x922c, 0xffffffff, 0x000d000c,
 880	0x9230, 0xffffffff, 0x00000010,
 881	0x9234, 0xffffffff, 0x000f000e,
 882	0x9238, 0xffffffff, 0x00120011,
 883	0x923c, 0xffffffff, 0x000c000b,
 884	0x9240, 0xffffffff, 0x000e000d,
 885	0x9244, 0xffffffff, 0x00000011,
 886	0x9248, 0xffffffff, 0x0010000f,
 887	0x924c, 0xffffffff, 0x00130012,
 888	0x9250, 0xffffffff, 0x000d000c,
 889	0x9254, 0xffffffff, 0x000f000e,
 890	0x9258, 0xffffffff, 0x00100013,
 891	0x925c, 0xffffffff, 0x00120011,
 892	0x9260, 0xffffffff, 0x00150014,
 893	0x9150, 0xffffffff, 0x96940200,
 894	0x8708, 0xffffffff, 0x00900100,
 895	0xc478, 0xffffffff, 0x00000080,
 896	0xc404, 0xffffffff, 0x0020003f,
 897	0x30, 0xffffffff, 0x0000001c,
 898	0x34, 0x000f0000, 0x000f0000,
 899	0x160c, 0xffffffff, 0x00000100,
 900	0x1024, 0xffffffff, 0x00000100,
 901	0x102c, 0x00000101, 0x00000000,
 902	0x20a8, 0xffffffff, 0x00000104,
 903	0x264c, 0x000c0000, 0x000c0000,
 904	0x2648, 0x000c0000, 0x000c0000,
 905	0x55e4, 0xff000fff, 0x00000100,
 906	0x55e8, 0x00000001, 0x00000001,
 907	0x2f50, 0x00000001, 0x00000001,
 908	0x30cc, 0xc0000fff, 0x00000104,
 909	0xc1e4, 0x00000001, 0x00000001,
 910	0xd0c0, 0xfffffff0, 0x00000100,
 911	0xd8c0, 0xfffffff0, 0x00000100
 912};
 913
 914static const u32 oland_mgcg_cgcg_init[] =
 915{
 916	0xc400, 0xffffffff, 0xfffffffc,
 917	0x802c, 0xffffffff, 0xe0000000,
 918	0x9a60, 0xffffffff, 0x00000100,
 919	0x92a4, 0xffffffff, 0x00000100,
 920	0xc164, 0xffffffff, 0x00000100,
 921	0x9774, 0xffffffff, 0x00000100,
 922	0x8984, 0xffffffff, 0x06000100,
 923	0x8a18, 0xffffffff, 0x00000100,
 924	0x92a0, 0xffffffff, 0x00000100,
 925	0xc380, 0xffffffff, 0x00000100,
 926	0x8b28, 0xffffffff, 0x00000100,
 927	0x9144, 0xffffffff, 0x00000100,
 928	0x8d88, 0xffffffff, 0x00000100,
 929	0x8d8c, 0xffffffff, 0x00000100,
 930	0x9030, 0xffffffff, 0x00000100,
 931	0x9034, 0xffffffff, 0x00000100,
 932	0x9038, 0xffffffff, 0x00000100,
 933	0x903c, 0xffffffff, 0x00000100,
 934	0xad80, 0xffffffff, 0x00000100,
 935	0xac54, 0xffffffff, 0x00000100,
 936	0x897c, 0xffffffff, 0x06000100,
 937	0x9868, 0xffffffff, 0x00000100,
 938	0x9510, 0xffffffff, 0x00000100,
 939	0xaf04, 0xffffffff, 0x00000100,
 940	0xae04, 0xffffffff, 0x00000100,
 941	0x949c, 0xffffffff, 0x00000100,
 942	0x802c, 0xffffffff, 0xe0000000,
 943	0x9160, 0xffffffff, 0x00010000,
 944	0x9164, 0xffffffff, 0x00030002,
 945	0x9168, 0xffffffff, 0x00040007,
 946	0x916c, 0xffffffff, 0x00060005,
 947	0x9170, 0xffffffff, 0x00090008,
 948	0x9174, 0xffffffff, 0x00020001,
 949	0x9178, 0xffffffff, 0x00040003,
 950	0x917c, 0xffffffff, 0x00000007,
 951	0x9180, 0xffffffff, 0x00060005,
 952	0x9184, 0xffffffff, 0x00090008,
 953	0x9188, 0xffffffff, 0x00030002,
 954	0x918c, 0xffffffff, 0x00050004,
 955	0x9190, 0xffffffff, 0x00000008,
 956	0x9194, 0xffffffff, 0x00070006,
 957	0x9198, 0xffffffff, 0x000a0009,
 958	0x919c, 0xffffffff, 0x00040003,
 959	0x91a0, 0xffffffff, 0x00060005,
 960	0x91a4, 0xffffffff, 0x00000009,
 961	0x91a8, 0xffffffff, 0x00080007,
 962	0x91ac, 0xffffffff, 0x000b000a,
 963	0x91b0, 0xffffffff, 0x00050004,
 964	0x91b4, 0xffffffff, 0x00070006,
 965	0x91b8, 0xffffffff, 0x0008000b,
 966	0x91bc, 0xffffffff, 0x000a0009,
 967	0x91c0, 0xffffffff, 0x000d000c,
 968	0x91c4, 0xffffffff, 0x00060005,
 969	0x91c8, 0xffffffff, 0x00080007,
 970	0x91cc, 0xffffffff, 0x0000000b,
 971	0x91d0, 0xffffffff, 0x000a0009,
 972	0x91d4, 0xffffffff, 0x000d000c,
 973	0x9150, 0xffffffff, 0x96940200,
 974	0x8708, 0xffffffff, 0x00900100,
 975	0xc478, 0xffffffff, 0x00000080,
 976	0xc404, 0xffffffff, 0x0020003f,
 977	0x30, 0xffffffff, 0x0000001c,
 978	0x34, 0x000f0000, 0x000f0000,
 979	0x160c, 0xffffffff, 0x00000100,
 980	0x1024, 0xffffffff, 0x00000100,
 981	0x102c, 0x00000101, 0x00000000,
 982	0x20a8, 0xffffffff, 0x00000104,
 983	0x264c, 0x000c0000, 0x000c0000,
 984	0x2648, 0x000c0000, 0x000c0000,
 985	0x55e4, 0xff000fff, 0x00000100,
 986	0x55e8, 0x00000001, 0x00000001,
 987	0x2f50, 0x00000001, 0x00000001,
 988	0x30cc, 0xc0000fff, 0x00000104,
 989	0xc1e4, 0x00000001, 0x00000001,
 990	0xd0c0, 0xfffffff0, 0x00000100,
 991	0xd8c0, 0xfffffff0, 0x00000100
 992};
 993
 994static const u32 hainan_mgcg_cgcg_init[] =
 995{
 996	0xc400, 0xffffffff, 0xfffffffc,
 997	0x802c, 0xffffffff, 0xe0000000,
 998	0x9a60, 0xffffffff, 0x00000100,
 999	0x92a4, 0xffffffff, 0x00000100,
1000	0xc164, 0xffffffff, 0x00000100,
1001	0x9774, 0xffffffff, 0x00000100,
1002	0x8984, 0xffffffff, 0x06000100,
1003	0x8a18, 0xffffffff, 0x00000100,
1004	0x92a0, 0xffffffff, 0x00000100,
1005	0xc380, 0xffffffff, 0x00000100,
1006	0x8b28, 0xffffffff, 0x00000100,
1007	0x9144, 0xffffffff, 0x00000100,
1008	0x8d88, 0xffffffff, 0x00000100,
1009	0x8d8c, 0xffffffff, 0x00000100,
1010	0x9030, 0xffffffff, 0x00000100,
1011	0x9034, 0xffffffff, 0x00000100,
1012	0x9038, 0xffffffff, 0x00000100,
1013	0x903c, 0xffffffff, 0x00000100,
1014	0xad80, 0xffffffff, 0x00000100,
1015	0xac54, 0xffffffff, 0x00000100,
1016	0x897c, 0xffffffff, 0x06000100,
1017	0x9868, 0xffffffff, 0x00000100,
1018	0x9510, 0xffffffff, 0x00000100,
1019	0xaf04, 0xffffffff, 0x00000100,
1020	0xae04, 0xffffffff, 0x00000100,
1021	0x949c, 0xffffffff, 0x00000100,
1022	0x802c, 0xffffffff, 0xe0000000,
1023	0x9160, 0xffffffff, 0x00010000,
1024	0x9164, 0xffffffff, 0x00030002,
1025	0x9168, 0xffffffff, 0x00040007,
1026	0x916c, 0xffffffff, 0x00060005,
1027	0x9170, 0xffffffff, 0x00090008,
1028	0x9174, 0xffffffff, 0x00020001,
1029	0x9178, 0xffffffff, 0x00040003,
1030	0x917c, 0xffffffff, 0x00000007,
1031	0x9180, 0xffffffff, 0x00060005,
1032	0x9184, 0xffffffff, 0x00090008,
1033	0x9188, 0xffffffff, 0x00030002,
1034	0x918c, 0xffffffff, 0x00050004,
1035	0x9190, 0xffffffff, 0x00000008,
1036	0x9194, 0xffffffff, 0x00070006,
1037	0x9198, 0xffffffff, 0x000a0009,
1038	0x919c, 0xffffffff, 0x00040003,
1039	0x91a0, 0xffffffff, 0x00060005,
1040	0x91a4, 0xffffffff, 0x00000009,
1041	0x91a8, 0xffffffff, 0x00080007,
1042	0x91ac, 0xffffffff, 0x000b000a,
1043	0x91b0, 0xffffffff, 0x00050004,
1044	0x91b4, 0xffffffff, 0x00070006,
1045	0x91b8, 0xffffffff, 0x0008000b,
1046	0x91bc, 0xffffffff, 0x000a0009,
1047	0x91c0, 0xffffffff, 0x000d000c,
1048	0x91c4, 0xffffffff, 0x00060005,
1049	0x91c8, 0xffffffff, 0x00080007,
1050	0x91cc, 0xffffffff, 0x0000000b,
1051	0x91d0, 0xffffffff, 0x000a0009,
1052	0x91d4, 0xffffffff, 0x000d000c,
1053	0x9150, 0xffffffff, 0x96940200,
1054	0x8708, 0xffffffff, 0x00900100,
1055	0xc478, 0xffffffff, 0x00000080,
1056	0xc404, 0xffffffff, 0x0020003f,
1057	0x30, 0xffffffff, 0x0000001c,
1058	0x34, 0x000f0000, 0x000f0000,
1059	0x160c, 0xffffffff, 0x00000100,
1060	0x1024, 0xffffffff, 0x00000100,
1061	0x20a8, 0xffffffff, 0x00000104,
1062	0x264c, 0x000c0000, 0x000c0000,
1063	0x2648, 0x000c0000, 0x000c0000,
1064	0x2f50, 0x00000001, 0x00000001,
1065	0x30cc, 0xc0000fff, 0x00000104,
1066	0xc1e4, 0x00000001, 0x00000001,
1067	0xd0c0, 0xfffffff0, 0x00000100,
1068	0xd8c0, 0xfffffff0, 0x00000100
1069};
1070
1071static u32 verde_pg_init[] =
1072{
1073	0x353c, 0xffffffff, 0x40000,
1074	0x3538, 0xffffffff, 0x200010ff,
1075	0x353c, 0xffffffff, 0x0,
1076	0x353c, 0xffffffff, 0x0,
1077	0x353c, 0xffffffff, 0x0,
1078	0x353c, 0xffffffff, 0x0,
1079	0x353c, 0xffffffff, 0x0,
1080	0x353c, 0xffffffff, 0x7007,
1081	0x3538, 0xffffffff, 0x300010ff,
1082	0x353c, 0xffffffff, 0x0,
1083	0x353c, 0xffffffff, 0x0,
1084	0x353c, 0xffffffff, 0x0,
1085	0x353c, 0xffffffff, 0x0,
1086	0x353c, 0xffffffff, 0x0,
1087	0x353c, 0xffffffff, 0x400000,
1088	0x3538, 0xffffffff, 0x100010ff,
1089	0x353c, 0xffffffff, 0x0,
1090	0x353c, 0xffffffff, 0x0,
1091	0x353c, 0xffffffff, 0x0,
1092	0x353c, 0xffffffff, 0x0,
1093	0x353c, 0xffffffff, 0x0,
1094	0x353c, 0xffffffff, 0x120200,
1095	0x3538, 0xffffffff, 0x500010ff,
1096	0x353c, 0xffffffff, 0x0,
1097	0x353c, 0xffffffff, 0x0,
1098	0x353c, 0xffffffff, 0x0,
1099	0x353c, 0xffffffff, 0x0,
1100	0x353c, 0xffffffff, 0x0,
1101	0x353c, 0xffffffff, 0x1e1e16,
1102	0x3538, 0xffffffff, 0x600010ff,
1103	0x353c, 0xffffffff, 0x0,
1104	0x353c, 0xffffffff, 0x0,
1105	0x353c, 0xffffffff, 0x0,
1106	0x353c, 0xffffffff, 0x0,
1107	0x353c, 0xffffffff, 0x0,
1108	0x353c, 0xffffffff, 0x171f1e,
1109	0x3538, 0xffffffff, 0x700010ff,
1110	0x353c, 0xffffffff, 0x0,
1111	0x353c, 0xffffffff, 0x0,
1112	0x353c, 0xffffffff, 0x0,
1113	0x353c, 0xffffffff, 0x0,
1114	0x353c, 0xffffffff, 0x0,
1115	0x353c, 0xffffffff, 0x0,
1116	0x3538, 0xffffffff, 0x9ff,
1117	0x3500, 0xffffffff, 0x0,
1118	0x3504, 0xffffffff, 0x10000800,
1119	0x3504, 0xffffffff, 0xf,
1120	0x3504, 0xffffffff, 0xf,
1121	0x3500, 0xffffffff, 0x4,
1122	0x3504, 0xffffffff, 0x1000051e,
1123	0x3504, 0xffffffff, 0xffff,
1124	0x3504, 0xffffffff, 0xffff,
1125	0x3500, 0xffffffff, 0x8,
1126	0x3504, 0xffffffff, 0x80500,
1127	0x3500, 0xffffffff, 0x12,
1128	0x3504, 0xffffffff, 0x9050c,
1129	0x3500, 0xffffffff, 0x1d,
1130	0x3504, 0xffffffff, 0xb052c,
1131	0x3500, 0xffffffff, 0x2a,
1132	0x3504, 0xffffffff, 0x1053e,
1133	0x3500, 0xffffffff, 0x2d,
1134	0x3504, 0xffffffff, 0x10546,
1135	0x3500, 0xffffffff, 0x30,
1136	0x3504, 0xffffffff, 0xa054e,
1137	0x3500, 0xffffffff, 0x3c,
1138	0x3504, 0xffffffff, 0x1055f,
1139	0x3500, 0xffffffff, 0x3f,
1140	0x3504, 0xffffffff, 0x10567,
1141	0x3500, 0xffffffff, 0x42,
1142	0x3504, 0xffffffff, 0x1056f,
1143	0x3500, 0xffffffff, 0x45,
1144	0x3504, 0xffffffff, 0x10572,
1145	0x3500, 0xffffffff, 0x48,
1146	0x3504, 0xffffffff, 0x20575,
1147	0x3500, 0xffffffff, 0x4c,
1148	0x3504, 0xffffffff, 0x190801,
1149	0x3500, 0xffffffff, 0x67,
1150	0x3504, 0xffffffff, 0x1082a,
1151	0x3500, 0xffffffff, 0x6a,
1152	0x3504, 0xffffffff, 0x1b082d,
1153	0x3500, 0xffffffff, 0x87,
1154	0x3504, 0xffffffff, 0x310851,
1155	0x3500, 0xffffffff, 0xba,
1156	0x3504, 0xffffffff, 0x891,
1157	0x3500, 0xffffffff, 0xbc,
1158	0x3504, 0xffffffff, 0x893,
1159	0x3500, 0xffffffff, 0xbe,
1160	0x3504, 0xffffffff, 0x20895,
1161	0x3500, 0xffffffff, 0xc2,
1162	0x3504, 0xffffffff, 0x20899,
1163	0x3500, 0xffffffff, 0xc6,
1164	0x3504, 0xffffffff, 0x2089d,
1165	0x3500, 0xffffffff, 0xca,
1166	0x3504, 0xffffffff, 0x8a1,
1167	0x3500, 0xffffffff, 0xcc,
1168	0x3504, 0xffffffff, 0x8a3,
1169	0x3500, 0xffffffff, 0xce,
1170	0x3504, 0xffffffff, 0x308a5,
1171	0x3500, 0xffffffff, 0xd3,
1172	0x3504, 0xffffffff, 0x6d08cd,
1173	0x3500, 0xffffffff, 0x142,
1174	0x3504, 0xffffffff, 0x2000095a,
1175	0x3504, 0xffffffff, 0x1,
1176	0x3500, 0xffffffff, 0x144,
1177	0x3504, 0xffffffff, 0x301f095b,
1178	0x3500, 0xffffffff, 0x165,
1179	0x3504, 0xffffffff, 0xc094d,
1180	0x3500, 0xffffffff, 0x173,
1181	0x3504, 0xffffffff, 0xf096d,
1182	0x3500, 0xffffffff, 0x184,
1183	0x3504, 0xffffffff, 0x15097f,
1184	0x3500, 0xffffffff, 0x19b,
1185	0x3504, 0xffffffff, 0xc0998,
1186	0x3500, 0xffffffff, 0x1a9,
1187	0x3504, 0xffffffff, 0x409a7,
1188	0x3500, 0xffffffff, 0x1af,
1189	0x3504, 0xffffffff, 0xcdc,
1190	0x3500, 0xffffffff, 0x1b1,
1191	0x3504, 0xffffffff, 0x800,
1192	0x3508, 0xffffffff, 0x6c9b2000,
1193	0x3510, 0xfc00, 0x2000,
1194	0x3544, 0xffffffff, 0xfc0,
1195	0x28d4, 0x00000100, 0x100
1196};
1197
1198static void si_init_golden_registers(struct radeon_device *rdev)
1199{
1200	switch (rdev->family) {
1201	case CHIP_TAHITI:
1202		radeon_program_register_sequence(rdev,
1203						 tahiti_golden_registers,
1204						 (const u32)ARRAY_SIZE(tahiti_golden_registers));
1205		radeon_program_register_sequence(rdev,
1206						 tahiti_golden_rlc_registers,
1207						 (const u32)ARRAY_SIZE(tahiti_golden_rlc_registers));
1208		radeon_program_register_sequence(rdev,
1209						 tahiti_mgcg_cgcg_init,
1210						 (const u32)ARRAY_SIZE(tahiti_mgcg_cgcg_init));
1211		radeon_program_register_sequence(rdev,
1212						 tahiti_golden_registers2,
1213						 (const u32)ARRAY_SIZE(tahiti_golden_registers2));
1214		break;
1215	case CHIP_PITCAIRN:
1216		radeon_program_register_sequence(rdev,
1217						 pitcairn_golden_registers,
1218						 (const u32)ARRAY_SIZE(pitcairn_golden_registers));
1219		radeon_program_register_sequence(rdev,
1220						 pitcairn_golden_rlc_registers,
1221						 (const u32)ARRAY_SIZE(pitcairn_golden_rlc_registers));
1222		radeon_program_register_sequence(rdev,
1223						 pitcairn_mgcg_cgcg_init,
1224						 (const u32)ARRAY_SIZE(pitcairn_mgcg_cgcg_init));
1225		break;
1226	case CHIP_VERDE:
1227		radeon_program_register_sequence(rdev,
1228						 verde_golden_registers,
1229						 (const u32)ARRAY_SIZE(verde_golden_registers));
1230		radeon_program_register_sequence(rdev,
1231						 verde_golden_rlc_registers,
1232						 (const u32)ARRAY_SIZE(verde_golden_rlc_registers));
1233		radeon_program_register_sequence(rdev,
1234						 verde_mgcg_cgcg_init,
1235						 (const u32)ARRAY_SIZE(verde_mgcg_cgcg_init));
1236		radeon_program_register_sequence(rdev,
1237						 verde_pg_init,
1238						 (const u32)ARRAY_SIZE(verde_pg_init));
1239		break;
1240	case CHIP_OLAND:
1241		radeon_program_register_sequence(rdev,
1242						 oland_golden_registers,
1243						 (const u32)ARRAY_SIZE(oland_golden_registers));
1244		radeon_program_register_sequence(rdev,
1245						 oland_golden_rlc_registers,
1246						 (const u32)ARRAY_SIZE(oland_golden_rlc_registers));
1247		radeon_program_register_sequence(rdev,
1248						 oland_mgcg_cgcg_init,
1249						 (const u32)ARRAY_SIZE(oland_mgcg_cgcg_init));
1250		break;
1251	case CHIP_HAINAN:
1252		radeon_program_register_sequence(rdev,
1253						 hainan_golden_registers,
1254						 (const u32)ARRAY_SIZE(hainan_golden_registers));
1255		radeon_program_register_sequence(rdev,
1256						 hainan_golden_registers2,
1257						 (const u32)ARRAY_SIZE(hainan_golden_registers2));
1258		radeon_program_register_sequence(rdev,
1259						 hainan_mgcg_cgcg_init,
1260						 (const u32)ARRAY_SIZE(hainan_mgcg_cgcg_init));
1261		break;
1262	default:
1263		break;
1264	}
1265}
1266
1267/**
1268 * si_get_allowed_info_register - fetch the register for the info ioctl
1269 *
1270 * @rdev: radeon_device pointer
1271 * @reg: register offset in bytes
1272 * @val: register value
1273 *
1274 * Returns 0 for success or -EINVAL for an invalid register
1275 *
1276 */
1277int si_get_allowed_info_register(struct radeon_device *rdev,
1278				 u32 reg, u32 *val)
1279{
1280	switch (reg) {
1281	case GRBM_STATUS:
1282	case GRBM_STATUS2:
1283	case GRBM_STATUS_SE0:
1284	case GRBM_STATUS_SE1:
1285	case SRBM_STATUS:
1286	case SRBM_STATUS2:
1287	case (DMA_STATUS_REG + DMA0_REGISTER_OFFSET):
1288	case (DMA_STATUS_REG + DMA1_REGISTER_OFFSET):
1289	case UVD_STATUS:
1290		*val = RREG32(reg);
1291		return 0;
1292	default:
1293		return -EINVAL;
1294	}
1295}
1296
1297#define PCIE_BUS_CLK                10000
1298#define TCLK                        (PCIE_BUS_CLK / 10)
1299
1300/**
1301 * si_get_xclk - get the xclk
1302 *
1303 * @rdev: radeon_device pointer
1304 *
1305 * Returns the reference clock used by the gfx engine
1306 * (SI).
1307 */
1308u32 si_get_xclk(struct radeon_device *rdev)
1309{
1310	u32 reference_clock = rdev->clock.spll.reference_freq;
1311	u32 tmp;
1312
1313	tmp = RREG32(CG_CLKPIN_CNTL_2);
1314	if (tmp & MUX_TCLK_TO_XCLK)
1315		return TCLK;
1316
1317	tmp = RREG32(CG_CLKPIN_CNTL);
1318	if (tmp & XTALIN_DIVIDE)
1319		return reference_clock / 4;
1320
1321	return reference_clock;
1322}
1323
1324/* get temperature in millidegrees */
1325int si_get_temp(struct radeon_device *rdev)
1326{
1327	u32 temp;
1328	int actual_temp = 0;
1329
1330	temp = (RREG32(CG_MULT_THERMAL_STATUS) & CTF_TEMP_MASK) >>
1331		CTF_TEMP_SHIFT;
1332
1333	if (temp & 0x200)
1334		actual_temp = 255;
1335	else
1336		actual_temp = temp & 0x1ff;
1337
1338	actual_temp = (actual_temp * 1000);
1339
1340	return actual_temp;
1341}
1342
1343#define TAHITI_IO_MC_REGS_SIZE 36
1344
1345static const u32 tahiti_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
1346	{0x0000006f, 0x03044000},
1347	{0x00000070, 0x0480c018},
1348	{0x00000071, 0x00000040},
1349	{0x00000072, 0x01000000},
1350	{0x00000074, 0x000000ff},
1351	{0x00000075, 0x00143400},
1352	{0x00000076, 0x08ec0800},
1353	{0x00000077, 0x040000cc},
1354	{0x00000079, 0x00000000},
1355	{0x0000007a, 0x21000409},
1356	{0x0000007c, 0x00000000},
1357	{0x0000007d, 0xe8000000},
1358	{0x0000007e, 0x044408a8},
1359	{0x0000007f, 0x00000003},
1360	{0x00000080, 0x00000000},
1361	{0x00000081, 0x01000000},
1362	{0x00000082, 0x02000000},
1363	{0x00000083, 0x00000000},
1364	{0x00000084, 0xe3f3e4f4},
1365	{0x00000085, 0x00052024},
1366	{0x00000087, 0x00000000},
1367	{0x00000088, 0x66036603},
1368	{0x00000089, 0x01000000},
1369	{0x0000008b, 0x1c0a0000},
1370	{0x0000008c, 0xff010000},
1371	{0x0000008e, 0xffffefff},
1372	{0x0000008f, 0xfff3efff},
1373	{0x00000090, 0xfff3efbf},
1374	{0x00000094, 0x00101101},
1375	{0x00000095, 0x00000fff},
1376	{0x00000096, 0x00116fff},
1377	{0x00000097, 0x60010000},
1378	{0x00000098, 0x10010000},
1379	{0x00000099, 0x00006000},
1380	{0x0000009a, 0x00001000},
1381	{0x0000009f, 0x00a77400}
1382};
1383
1384static const u32 pitcairn_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
1385	{0x0000006f, 0x03044000},
1386	{0x00000070, 0x0480c018},
1387	{0x00000071, 0x00000040},
1388	{0x00000072, 0x01000000},
1389	{0x00000074, 0x000000ff},
1390	{0x00000075, 0x00143400},
1391	{0x00000076, 0x08ec0800},
1392	{0x00000077, 0x040000cc},
1393	{0x00000079, 0x00000000},
1394	{0x0000007a, 0x21000409},
1395	{0x0000007c, 0x00000000},
1396	{0x0000007d, 0xe8000000},
1397	{0x0000007e, 0x044408a8},
1398	{0x0000007f, 0x00000003},
1399	{0x00000080, 0x00000000},
1400	{0x00000081, 0x01000000},
1401	{0x00000082, 0x02000000},
1402	{0x00000083, 0x00000000},
1403	{0x00000084, 0xe3f3e4f4},
1404	{0x00000085, 0x00052024},
1405	{0x00000087, 0x00000000},
1406	{0x00000088, 0x66036603},
1407	{0x00000089, 0x01000000},
1408	{0x0000008b, 0x1c0a0000},
1409	{0x0000008c, 0xff010000},
1410	{0x0000008e, 0xffffefff},
1411	{0x0000008f, 0xfff3efff},
1412	{0x00000090, 0xfff3efbf},
1413	{0x00000094, 0x00101101},
1414	{0x00000095, 0x00000fff},
1415	{0x00000096, 0x00116fff},
1416	{0x00000097, 0x60010000},
1417	{0x00000098, 0x10010000},
1418	{0x00000099, 0x00006000},
1419	{0x0000009a, 0x00001000},
1420	{0x0000009f, 0x00a47400}
1421};
1422
1423static const u32 verde_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
1424	{0x0000006f, 0x03044000},
1425	{0x00000070, 0x0480c018},
1426	{0x00000071, 0x00000040},
1427	{0x00000072, 0x01000000},
1428	{0x00000074, 0x000000ff},
1429	{0x00000075, 0x00143400},
1430	{0x00000076, 0x08ec0800},
1431	{0x00000077, 0x040000cc},
1432	{0x00000079, 0x00000000},
1433	{0x0000007a, 0x21000409},
1434	{0x0000007c, 0x00000000},
1435	{0x0000007d, 0xe8000000},
1436	{0x0000007e, 0x044408a8},
1437	{0x0000007f, 0x00000003},
1438	{0x00000080, 0x00000000},
1439	{0x00000081, 0x01000000},
1440	{0x00000082, 0x02000000},
1441	{0x00000083, 0x00000000},
1442	{0x00000084, 0xe3f3e4f4},
1443	{0x00000085, 0x00052024},
1444	{0x00000087, 0x00000000},
1445	{0x00000088, 0x66036603},
1446	{0x00000089, 0x01000000},
1447	{0x0000008b, 0x1c0a0000},
1448	{0x0000008c, 0xff010000},
1449	{0x0000008e, 0xffffefff},
1450	{0x0000008f, 0xfff3efff},
1451	{0x00000090, 0xfff3efbf},
1452	{0x00000094, 0x00101101},
1453	{0x00000095, 0x00000fff},
1454	{0x00000096, 0x00116fff},
1455	{0x00000097, 0x60010000},
1456	{0x00000098, 0x10010000},
1457	{0x00000099, 0x00006000},
1458	{0x0000009a, 0x00001000},
1459	{0x0000009f, 0x00a37400}
1460};
1461
1462static const u32 oland_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
1463	{0x0000006f, 0x03044000},
1464	{0x00000070, 0x0480c018},
1465	{0x00000071, 0x00000040},
1466	{0x00000072, 0x01000000},
1467	{0x00000074, 0x000000ff},
1468	{0x00000075, 0x00143400},
1469	{0x00000076, 0x08ec0800},
1470	{0x00000077, 0x040000cc},
1471	{0x00000079, 0x00000000},
1472	{0x0000007a, 0x21000409},
1473	{0x0000007c, 0x00000000},
1474	{0x0000007d, 0xe8000000},
1475	{0x0000007e, 0x044408a8},
1476	{0x0000007f, 0x00000003},
1477	{0x00000080, 0x00000000},
1478	{0x00000081, 0x01000000},
1479	{0x00000082, 0x02000000},
1480	{0x00000083, 0x00000000},
1481	{0x00000084, 0xe3f3e4f4},
1482	{0x00000085, 0x00052024},
1483	{0x00000087, 0x00000000},
1484	{0x00000088, 0x66036603},
1485	{0x00000089, 0x01000000},
1486	{0x0000008b, 0x1c0a0000},
1487	{0x0000008c, 0xff010000},
1488	{0x0000008e, 0xffffefff},
1489	{0x0000008f, 0xfff3efff},
1490	{0x00000090, 0xfff3efbf},
1491	{0x00000094, 0x00101101},
1492	{0x00000095, 0x00000fff},
1493	{0x00000096, 0x00116fff},
1494	{0x00000097, 0x60010000},
1495	{0x00000098, 0x10010000},
1496	{0x00000099, 0x00006000},
1497	{0x0000009a, 0x00001000},
1498	{0x0000009f, 0x00a17730}
1499};
1500
1501static const u32 hainan_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
1502	{0x0000006f, 0x03044000},
1503	{0x00000070, 0x0480c018},
1504	{0x00000071, 0x00000040},
1505	{0x00000072, 0x01000000},
1506	{0x00000074, 0x000000ff},
1507	{0x00000075, 0x00143400},
1508	{0x00000076, 0x08ec0800},
1509	{0x00000077, 0x040000cc},
1510	{0x00000079, 0x00000000},
1511	{0x0000007a, 0x21000409},
1512	{0x0000007c, 0x00000000},
1513	{0x0000007d, 0xe8000000},
1514	{0x0000007e, 0x044408a8},
1515	{0x0000007f, 0x00000003},
1516	{0x00000080, 0x00000000},
1517	{0x00000081, 0x01000000},
1518	{0x00000082, 0x02000000},
1519	{0x00000083, 0x00000000},
1520	{0x00000084, 0xe3f3e4f4},
1521	{0x00000085, 0x00052024},
1522	{0x00000087, 0x00000000},
1523	{0x00000088, 0x66036603},
1524	{0x00000089, 0x01000000},
1525	{0x0000008b, 0x1c0a0000},
1526	{0x0000008c, 0xff010000},
1527	{0x0000008e, 0xffffefff},
1528	{0x0000008f, 0xfff3efff},
1529	{0x00000090, 0xfff3efbf},
1530	{0x00000094, 0x00101101},
1531	{0x00000095, 0x00000fff},
1532	{0x00000096, 0x00116fff},
1533	{0x00000097, 0x60010000},
1534	{0x00000098, 0x10010000},
1535	{0x00000099, 0x00006000},
1536	{0x0000009a, 0x00001000},
1537	{0x0000009f, 0x00a07730}
1538};
1539
1540/* ucode loading */
1541int si_mc_load_microcode(struct radeon_device *rdev)
1542{
1543	const __be32 *fw_data = NULL;
1544	const __le32 *new_fw_data = NULL;
1545	u32 running, blackout = 0;
1546	u32 *io_mc_regs = NULL;
1547	const __le32 *new_io_mc_regs = NULL;
1548	int i, regs_size, ucode_size;
1549
1550	if (!rdev->mc_fw)
1551		return -EINVAL;
1552
1553	if (rdev->new_fw) {
1554		const struct mc_firmware_header_v1_0 *hdr =
1555			(const struct mc_firmware_header_v1_0 *)rdev->mc_fw->data;
1556
1557		radeon_ucode_print_mc_hdr(&hdr->header);
1558		regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
1559		new_io_mc_regs = (const __le32 *)
1560			(rdev->mc_fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
1561		ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
1562		new_fw_data = (const __le32 *)
1563			(rdev->mc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1564	} else {
1565		ucode_size = rdev->mc_fw->size / 4;
1566
1567		switch (rdev->family) {
1568		case CHIP_TAHITI:
1569			io_mc_regs = (u32 *)&tahiti_io_mc_regs;
1570			regs_size = TAHITI_IO_MC_REGS_SIZE;
1571			break;
1572		case CHIP_PITCAIRN:
1573			io_mc_regs = (u32 *)&pitcairn_io_mc_regs;
1574			regs_size = TAHITI_IO_MC_REGS_SIZE;
1575			break;
1576		case CHIP_VERDE:
1577		default:
1578			io_mc_regs = (u32 *)&verde_io_mc_regs;
1579			regs_size = TAHITI_IO_MC_REGS_SIZE;
1580			break;
1581		case CHIP_OLAND:
1582			io_mc_regs = (u32 *)&oland_io_mc_regs;
1583			regs_size = TAHITI_IO_MC_REGS_SIZE;
1584			break;
1585		case CHIP_HAINAN:
1586			io_mc_regs = (u32 *)&hainan_io_mc_regs;
1587			regs_size = TAHITI_IO_MC_REGS_SIZE;
1588			break;
1589		}
1590		fw_data = (const __be32 *)rdev->mc_fw->data;
1591	}
1592
1593	running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
1594
1595	if (running == 0) {
1596		if (running) {
1597			blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
1598			WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
1599		}
1600
1601		/* reset the engine and set to writable */
1602		WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
1603		WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
1604
1605		/* load mc io regs */
1606		for (i = 0; i < regs_size; i++) {
1607			if (rdev->new_fw) {
1608				WREG32(MC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(new_io_mc_regs++));
1609				WREG32(MC_SEQ_IO_DEBUG_DATA, le32_to_cpup(new_io_mc_regs++));
1610			} else {
1611				WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
1612				WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
1613			}
1614		}
1615		/* load the MC ucode */
1616		for (i = 0; i < ucode_size; i++) {
1617			if (rdev->new_fw)
1618				WREG32(MC_SEQ_SUP_PGM, le32_to_cpup(new_fw_data++));
1619			else
1620				WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
1621		}
1622
1623		/* put the engine back into the active state */
1624		WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
1625		WREG32(MC_SEQ_SUP_CNTL, 0x00000004);
1626		WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
1627
1628		/* wait for training to complete */
1629		for (i = 0; i < rdev->usec_timeout; i++) {
1630			if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D0)
1631				break;
1632			udelay(1);
1633		}
1634		for (i = 0; i < rdev->usec_timeout; i++) {
1635			if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D1)
1636				break;
1637			udelay(1);
1638		}
1639
1640		if (running)
1641			WREG32(MC_SHARED_BLACKOUT_CNTL, blackout);
1642	}
1643
1644	return 0;
1645}
1646
1647static int si_init_microcode(struct radeon_device *rdev)
1648{
 
1649	const char *chip_name;
1650	const char *new_chip_name;
1651	size_t pfp_req_size, me_req_size, ce_req_size, rlc_req_size, mc_req_size;
1652	size_t smc_req_size, mc2_req_size;
1653	char fw_name[30];
1654	int err;
1655	int new_fw = 0;
1656
1657	DRM_DEBUG("\n");
1658
 
 
 
 
 
 
 
1659	switch (rdev->family) {
1660	case CHIP_TAHITI:
1661		chip_name = "TAHITI";
1662		new_chip_name = "tahiti";
1663		pfp_req_size = SI_PFP_UCODE_SIZE * 4;
1664		me_req_size = SI_PM4_UCODE_SIZE * 4;
1665		ce_req_size = SI_CE_UCODE_SIZE * 4;
1666		rlc_req_size = SI_RLC_UCODE_SIZE * 4;
1667		mc_req_size = SI_MC_UCODE_SIZE * 4;
1668		mc2_req_size = TAHITI_MC_UCODE_SIZE * 4;
1669		smc_req_size = ALIGN(TAHITI_SMC_UCODE_SIZE, 4);
1670		break;
1671	case CHIP_PITCAIRN:
1672		chip_name = "PITCAIRN";
1673		new_chip_name = "pitcairn";
1674		pfp_req_size = SI_PFP_UCODE_SIZE * 4;
1675		me_req_size = SI_PM4_UCODE_SIZE * 4;
1676		ce_req_size = SI_CE_UCODE_SIZE * 4;
1677		rlc_req_size = SI_RLC_UCODE_SIZE * 4;
1678		mc_req_size = SI_MC_UCODE_SIZE * 4;
1679		mc2_req_size = PITCAIRN_MC_UCODE_SIZE * 4;
1680		smc_req_size = ALIGN(PITCAIRN_SMC_UCODE_SIZE, 4);
1681		break;
1682	case CHIP_VERDE:
1683		chip_name = "VERDE";
1684		new_chip_name = "verde";
1685		pfp_req_size = SI_PFP_UCODE_SIZE * 4;
1686		me_req_size = SI_PM4_UCODE_SIZE * 4;
1687		ce_req_size = SI_CE_UCODE_SIZE * 4;
1688		rlc_req_size = SI_RLC_UCODE_SIZE * 4;
1689		mc_req_size = SI_MC_UCODE_SIZE * 4;
1690		mc2_req_size = VERDE_MC_UCODE_SIZE * 4;
1691		smc_req_size = ALIGN(VERDE_SMC_UCODE_SIZE, 4);
1692		break;
1693	case CHIP_OLAND:
1694		chip_name = "OLAND";
1695		new_chip_name = "oland";
1696		pfp_req_size = SI_PFP_UCODE_SIZE * 4;
1697		me_req_size = SI_PM4_UCODE_SIZE * 4;
1698		ce_req_size = SI_CE_UCODE_SIZE * 4;
1699		rlc_req_size = SI_RLC_UCODE_SIZE * 4;
1700		mc_req_size = mc2_req_size = OLAND_MC_UCODE_SIZE * 4;
1701		smc_req_size = ALIGN(OLAND_SMC_UCODE_SIZE, 4);
1702		break;
1703	case CHIP_HAINAN:
1704		chip_name = "HAINAN";
1705		new_chip_name = "hainan";
1706		pfp_req_size = SI_PFP_UCODE_SIZE * 4;
1707		me_req_size = SI_PM4_UCODE_SIZE * 4;
1708		ce_req_size = SI_CE_UCODE_SIZE * 4;
1709		rlc_req_size = SI_RLC_UCODE_SIZE * 4;
1710		mc_req_size = mc2_req_size = OLAND_MC_UCODE_SIZE * 4;
1711		smc_req_size = ALIGN(HAINAN_SMC_UCODE_SIZE, 4);
1712		break;
1713	default: BUG();
1714	}
1715
1716	DRM_INFO("Loading %s Microcode\n", new_chip_name);
1717
1718	snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", new_chip_name);
1719	err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
1720	if (err) {
1721		snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
1722		err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
1723		if (err)
1724			goto out;
1725		if (rdev->pfp_fw->size != pfp_req_size) {
1726			printk(KERN_ERR
1727			       "si_cp: Bogus length %zu in firmware \"%s\"\n",
1728			       rdev->pfp_fw->size, fw_name);
1729			err = -EINVAL;
1730			goto out;
1731		}
1732	} else {
1733		err = radeon_ucode_validate(rdev->pfp_fw);
1734		if (err) {
1735			printk(KERN_ERR
1736			       "si_cp: validation failed for firmware \"%s\"\n",
1737			       fw_name);
1738			goto out;
1739		} else {
1740			new_fw++;
1741		}
1742	}
1743
1744	snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", new_chip_name);
1745	err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
1746	if (err) {
1747		snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
1748		err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
1749		if (err)
1750			goto out;
1751		if (rdev->me_fw->size != me_req_size) {
1752			printk(KERN_ERR
1753			       "si_cp: Bogus length %zu in firmware \"%s\"\n",
1754			       rdev->me_fw->size, fw_name);
1755			err = -EINVAL;
1756		}
1757	} else {
1758		err = radeon_ucode_validate(rdev->me_fw);
1759		if (err) {
1760			printk(KERN_ERR
1761			       "si_cp: validation failed for firmware \"%s\"\n",
1762			       fw_name);
1763			goto out;
1764		} else {
1765			new_fw++;
1766		}
1767	}
1768
1769	snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", new_chip_name);
1770	err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev);
1771	if (err) {
1772		snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
1773		err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev);
1774		if (err)
1775			goto out;
1776		if (rdev->ce_fw->size != ce_req_size) {
1777			printk(KERN_ERR
1778			       "si_cp: Bogus length %zu in firmware \"%s\"\n",
1779			       rdev->ce_fw->size, fw_name);
1780			err = -EINVAL;
1781		}
1782	} else {
1783		err = radeon_ucode_validate(rdev->ce_fw);
1784		if (err) {
1785			printk(KERN_ERR
1786			       "si_cp: validation failed for firmware \"%s\"\n",
1787			       fw_name);
1788			goto out;
1789		} else {
1790			new_fw++;
1791		}
1792	}
1793
1794	snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", new_chip_name);
1795	err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
1796	if (err) {
1797		snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name);
1798		err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
1799		if (err)
1800			goto out;
1801		if (rdev->rlc_fw->size != rlc_req_size) {
1802			printk(KERN_ERR
1803			       "si_rlc: Bogus length %zu in firmware \"%s\"\n",
1804			       rdev->rlc_fw->size, fw_name);
1805			err = -EINVAL;
1806		}
1807	} else {
1808		err = radeon_ucode_validate(rdev->rlc_fw);
1809		if (err) {
1810			printk(KERN_ERR
1811			       "si_cp: validation failed for firmware \"%s\"\n",
1812			       fw_name);
1813			goto out;
1814		} else {
1815			new_fw++;
1816		}
1817	}
1818
1819	snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name);
1820	err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
1821	if (err) {
1822		snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name);
1823		err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
1824		if (err) {
1825			snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
1826			err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
1827			if (err)
1828				goto out;
1829		}
1830		if ((rdev->mc_fw->size != mc_req_size) &&
1831		    (rdev->mc_fw->size != mc2_req_size)) {
1832			printk(KERN_ERR
1833			       "si_mc: Bogus length %zu in firmware \"%s\"\n",
1834			       rdev->mc_fw->size, fw_name);
1835			err = -EINVAL;
1836		}
1837		DRM_INFO("%s: %zu bytes\n", fw_name, rdev->mc_fw->size);
1838	} else {
1839		err = radeon_ucode_validate(rdev->mc_fw);
1840		if (err) {
1841			printk(KERN_ERR
1842			       "si_cp: validation failed for firmware \"%s\"\n",
1843			       fw_name);
1844			goto out;
1845		} else {
1846			new_fw++;
1847		}
1848	}
1849
1850	snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name);
1851	err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
1852	if (err) {
1853		snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
1854		err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
1855		if (err) {
1856			printk(KERN_ERR
1857			       "smc: error loading firmware \"%s\"\n",
1858			       fw_name);
1859			release_firmware(rdev->smc_fw);
1860			rdev->smc_fw = NULL;
1861			err = 0;
1862		} else if (rdev->smc_fw->size != smc_req_size) {
1863			printk(KERN_ERR
1864			       "si_smc: Bogus length %zu in firmware \"%s\"\n",
1865			       rdev->smc_fw->size, fw_name);
1866			err = -EINVAL;
1867		}
1868	} else {
1869		err = radeon_ucode_validate(rdev->smc_fw);
1870		if (err) {
1871			printk(KERN_ERR
1872			       "si_cp: validation failed for firmware \"%s\"\n",
1873			       fw_name);
1874			goto out;
1875		} else {
1876			new_fw++;
1877		}
1878	}
1879
1880	if (new_fw == 0) {
1881		rdev->new_fw = false;
1882	} else if (new_fw < 6) {
1883		printk(KERN_ERR "si_fw: mixing new and old firmware!\n");
 
 
 
 
1884		err = -EINVAL;
1885	} else {
1886		rdev->new_fw = true;
1887	}
 
1888out:
 
 
1889	if (err) {
1890		if (err != -EINVAL)
1891			printk(KERN_ERR
1892			       "si_cp: Failed to load firmware \"%s\"\n",
1893			       fw_name);
1894		release_firmware(rdev->pfp_fw);
1895		rdev->pfp_fw = NULL;
1896		release_firmware(rdev->me_fw);
1897		rdev->me_fw = NULL;
1898		release_firmware(rdev->ce_fw);
1899		rdev->ce_fw = NULL;
1900		release_firmware(rdev->rlc_fw);
1901		rdev->rlc_fw = NULL;
1902		release_firmware(rdev->mc_fw);
1903		rdev->mc_fw = NULL;
1904		release_firmware(rdev->smc_fw);
1905		rdev->smc_fw = NULL;
1906	}
1907	return err;
1908}
1909
1910/* watermark setup */
1911static u32 dce6_line_buffer_adjust(struct radeon_device *rdev,
1912				   struct radeon_crtc *radeon_crtc,
1913				   struct drm_display_mode *mode,
1914				   struct drm_display_mode *other_mode)
1915{
1916	u32 tmp, buffer_alloc, i;
1917	u32 pipe_offset = radeon_crtc->crtc_id * 0x20;
1918	/*
1919	 * Line Buffer Setup
1920	 * There are 3 line buffers, each one shared by 2 display controllers.
1921	 * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
1922	 * the display controllers.  The paritioning is done via one of four
1923	 * preset allocations specified in bits 21:20:
1924	 *  0 - half lb
1925	 *  2 - whole lb, other crtc must be disabled
1926	 */
1927	/* this can get tricky if we have two large displays on a paired group
1928	 * of crtcs.  Ideally for multiple large displays we'd assign them to
1929	 * non-linked crtcs for maximum line buffer allocation.
1930	 */
1931	if (radeon_crtc->base.enabled && mode) {
1932		if (other_mode) {
1933			tmp = 0; /* 1/2 */
1934			buffer_alloc = 1;
1935		} else {
1936			tmp = 2; /* whole */
1937			buffer_alloc = 2;
1938		}
1939	} else {
1940		tmp = 0;
1941		buffer_alloc = 0;
1942	}
1943
1944	WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset,
1945	       DC_LB_MEMORY_CONFIG(tmp));
1946
1947	WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
1948	       DMIF_BUFFERS_ALLOCATED(buffer_alloc));
1949	for (i = 0; i < rdev->usec_timeout; i++) {
1950		if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
1951		    DMIF_BUFFERS_ALLOCATED_COMPLETED)
1952			break;
1953		udelay(1);
1954	}
1955
1956	if (radeon_crtc->base.enabled && mode) {
1957		switch (tmp) {
1958		case 0:
1959		default:
1960			return 4096 * 2;
1961		case 2:
1962			return 8192 * 2;
1963		}
1964	}
1965
1966	/* controller not enabled, so no lb used */
1967	return 0;
1968}
1969
1970static u32 si_get_number_of_dram_channels(struct radeon_device *rdev)
1971{
1972	u32 tmp = RREG32(MC_SHARED_CHMAP);
1973
1974	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
1975	case 0:
1976	default:
1977		return 1;
1978	case 1:
1979		return 2;
1980	case 2:
1981		return 4;
1982	case 3:
1983		return 8;
1984	case 4:
1985		return 3;
1986	case 5:
1987		return 6;
1988	case 6:
1989		return 10;
1990	case 7:
1991		return 12;
1992	case 8:
1993		return 16;
1994	}
1995}
1996
1997struct dce6_wm_params {
1998	u32 dram_channels; /* number of dram channels */
1999	u32 yclk;          /* bandwidth per dram data pin in kHz */
2000	u32 sclk;          /* engine clock in kHz */
2001	u32 disp_clk;      /* display clock in kHz */
2002	u32 src_width;     /* viewport width */
2003	u32 active_time;   /* active display time in ns */
2004	u32 blank_time;    /* blank time in ns */
2005	bool interlaced;    /* mode is interlaced */
2006	fixed20_12 vsc;    /* vertical scale ratio */
2007	u32 num_heads;     /* number of active crtcs */
2008	u32 bytes_per_pixel; /* bytes per pixel display + overlay */
2009	u32 lb_size;       /* line buffer allocated to pipe */
2010	u32 vtaps;         /* vertical scaler taps */
2011};
2012
2013static u32 dce6_dram_bandwidth(struct dce6_wm_params *wm)
2014{
2015	/* Calculate raw DRAM Bandwidth */
2016	fixed20_12 dram_efficiency; /* 0.7 */
2017	fixed20_12 yclk, dram_channels, bandwidth;
2018	fixed20_12 a;
2019
2020	a.full = dfixed_const(1000);
2021	yclk.full = dfixed_const(wm->yclk);
2022	yclk.full = dfixed_div(yclk, a);
2023	dram_channels.full = dfixed_const(wm->dram_channels * 4);
2024	a.full = dfixed_const(10);
2025	dram_efficiency.full = dfixed_const(7);
2026	dram_efficiency.full = dfixed_div(dram_efficiency, a);
2027	bandwidth.full = dfixed_mul(dram_channels, yclk);
2028	bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
2029
2030	return dfixed_trunc(bandwidth);
2031}
2032
2033static u32 dce6_dram_bandwidth_for_display(struct dce6_wm_params *wm)
2034{
2035	/* Calculate DRAM Bandwidth and the part allocated to display. */
2036	fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
2037	fixed20_12 yclk, dram_channels, bandwidth;
2038	fixed20_12 a;
2039
2040	a.full = dfixed_const(1000);
2041	yclk.full = dfixed_const(wm->yclk);
2042	yclk.full = dfixed_div(yclk, a);
2043	dram_channels.full = dfixed_const(wm->dram_channels * 4);
2044	a.full = dfixed_const(10);
2045	disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
2046	disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
2047	bandwidth.full = dfixed_mul(dram_channels, yclk);
2048	bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
2049
2050	return dfixed_trunc(bandwidth);
2051}
2052
2053static u32 dce6_data_return_bandwidth(struct dce6_wm_params *wm)
2054{
2055	/* Calculate the display Data return Bandwidth */
2056	fixed20_12 return_efficiency; /* 0.8 */
2057	fixed20_12 sclk, bandwidth;
2058	fixed20_12 a;
2059
2060	a.full = dfixed_const(1000);
2061	sclk.full = dfixed_const(wm->sclk);
2062	sclk.full = dfixed_div(sclk, a);
2063	a.full = dfixed_const(10);
2064	return_efficiency.full = dfixed_const(8);
2065	return_efficiency.full = dfixed_div(return_efficiency, a);
2066	a.full = dfixed_const(32);
2067	bandwidth.full = dfixed_mul(a, sclk);
2068	bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
2069
2070	return dfixed_trunc(bandwidth);
2071}
2072
2073static u32 dce6_get_dmif_bytes_per_request(struct dce6_wm_params *wm)
2074{
2075	return 32;
2076}
2077
2078static u32 dce6_dmif_request_bandwidth(struct dce6_wm_params *wm)
2079{
2080	/* Calculate the DMIF Request Bandwidth */
2081	fixed20_12 disp_clk_request_efficiency; /* 0.8 */
2082	fixed20_12 disp_clk, sclk, bandwidth;
2083	fixed20_12 a, b1, b2;
2084	u32 min_bandwidth;
2085
2086	a.full = dfixed_const(1000);
2087	disp_clk.full = dfixed_const(wm->disp_clk);
2088	disp_clk.full = dfixed_div(disp_clk, a);
2089	a.full = dfixed_const(dce6_get_dmif_bytes_per_request(wm) / 2);
2090	b1.full = dfixed_mul(a, disp_clk);
2091
2092	a.full = dfixed_const(1000);
2093	sclk.full = dfixed_const(wm->sclk);
2094	sclk.full = dfixed_div(sclk, a);
2095	a.full = dfixed_const(dce6_get_dmif_bytes_per_request(wm));
2096	b2.full = dfixed_mul(a, sclk);
2097
2098	a.full = dfixed_const(10);
2099	disp_clk_request_efficiency.full = dfixed_const(8);
2100	disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
2101
2102	min_bandwidth = min(dfixed_trunc(b1), dfixed_trunc(b2));
2103
2104	a.full = dfixed_const(min_bandwidth);
2105	bandwidth.full = dfixed_mul(a, disp_clk_request_efficiency);
2106
2107	return dfixed_trunc(bandwidth);
2108}
2109
2110static u32 dce6_available_bandwidth(struct dce6_wm_params *wm)
2111{
2112	/* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
2113	u32 dram_bandwidth = dce6_dram_bandwidth(wm);
2114	u32 data_return_bandwidth = dce6_data_return_bandwidth(wm);
2115	u32 dmif_req_bandwidth = dce6_dmif_request_bandwidth(wm);
2116
2117	return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
2118}
2119
2120static u32 dce6_average_bandwidth(struct dce6_wm_params *wm)
2121{
2122	/* Calculate the display mode Average Bandwidth
2123	 * DisplayMode should contain the source and destination dimensions,
2124	 * timing, etc.
2125	 */
2126	fixed20_12 bpp;
2127	fixed20_12 line_time;
2128	fixed20_12 src_width;
2129	fixed20_12 bandwidth;
2130	fixed20_12 a;
2131
2132	a.full = dfixed_const(1000);
2133	line_time.full = dfixed_const(wm->active_time + wm->blank_time);
2134	line_time.full = dfixed_div(line_time, a);
2135	bpp.full = dfixed_const(wm->bytes_per_pixel);
2136	src_width.full = dfixed_const(wm->src_width);
2137	bandwidth.full = dfixed_mul(src_width, bpp);
2138	bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
2139	bandwidth.full = dfixed_div(bandwidth, line_time);
2140
2141	return dfixed_trunc(bandwidth);
2142}
2143
2144static u32 dce6_latency_watermark(struct dce6_wm_params *wm)
2145{
2146	/* First calcualte the latency in ns */
2147	u32 mc_latency = 2000; /* 2000 ns. */
2148	u32 available_bandwidth = dce6_available_bandwidth(wm);
2149	u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
2150	u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
2151	u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
2152	u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
2153		(wm->num_heads * cursor_line_pair_return_time);
2154	u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
2155	u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
2156	u32 tmp, dmif_size = 12288;
2157	fixed20_12 a, b, c;
2158
2159	if (wm->num_heads == 0)
2160		return 0;
2161
2162	a.full = dfixed_const(2);
2163	b.full = dfixed_const(1);
2164	if ((wm->vsc.full > a.full) ||
2165	    ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
2166	    (wm->vtaps >= 5) ||
2167	    ((wm->vsc.full >= a.full) && wm->interlaced))
2168		max_src_lines_per_dst_line = 4;
2169	else
2170		max_src_lines_per_dst_line = 2;
2171
2172	a.full = dfixed_const(available_bandwidth);
2173	b.full = dfixed_const(wm->num_heads);
2174	a.full = dfixed_div(a, b);
2175
2176	b.full = dfixed_const(mc_latency + 512);
2177	c.full = dfixed_const(wm->disp_clk);
2178	b.full = dfixed_div(b, c);
2179
2180	c.full = dfixed_const(dmif_size);
2181	b.full = dfixed_div(c, b);
2182
2183	tmp = min(dfixed_trunc(a), dfixed_trunc(b));
2184
2185	b.full = dfixed_const(1000);
2186	c.full = dfixed_const(wm->disp_clk);
2187	b.full = dfixed_div(c, b);
2188	c.full = dfixed_const(wm->bytes_per_pixel);
2189	b.full = dfixed_mul(b, c);
2190
2191	lb_fill_bw = min(tmp, dfixed_trunc(b));
2192
2193	a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
2194	b.full = dfixed_const(1000);
2195	c.full = dfixed_const(lb_fill_bw);
2196	b.full = dfixed_div(c, b);
2197	a.full = dfixed_div(a, b);
2198	line_fill_time = dfixed_trunc(a);
2199
2200	if (line_fill_time < wm->active_time)
2201		return latency;
2202	else
2203		return latency + (line_fill_time - wm->active_time);
2204
2205}
2206
2207static bool dce6_average_bandwidth_vs_dram_bandwidth_for_display(struct dce6_wm_params *wm)
2208{
2209	if (dce6_average_bandwidth(wm) <=
2210	    (dce6_dram_bandwidth_for_display(wm) / wm->num_heads))
2211		return true;
2212	else
2213		return false;
2214};
2215
2216static bool dce6_average_bandwidth_vs_available_bandwidth(struct dce6_wm_params *wm)
2217{
2218	if (dce6_average_bandwidth(wm) <=
2219	    (dce6_available_bandwidth(wm) / wm->num_heads))
2220		return true;
2221	else
2222		return false;
2223};
2224
2225static bool dce6_check_latency_hiding(struct dce6_wm_params *wm)
2226{
2227	u32 lb_partitions = wm->lb_size / wm->src_width;
2228	u32 line_time = wm->active_time + wm->blank_time;
2229	u32 latency_tolerant_lines;
2230	u32 latency_hiding;
2231	fixed20_12 a;
2232
2233	a.full = dfixed_const(1);
2234	if (wm->vsc.full > a.full)
2235		latency_tolerant_lines = 1;
2236	else {
2237		if (lb_partitions <= (wm->vtaps + 1))
2238			latency_tolerant_lines = 1;
2239		else
2240			latency_tolerant_lines = 2;
2241	}
2242
2243	latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
2244
2245	if (dce6_latency_watermark(wm) <= latency_hiding)
2246		return true;
2247	else
2248		return false;
2249}
2250
2251static void dce6_program_watermarks(struct radeon_device *rdev,
2252					 struct radeon_crtc *radeon_crtc,
2253					 u32 lb_size, u32 num_heads)
2254{
2255	struct drm_display_mode *mode = &radeon_crtc->base.mode;
2256	struct dce6_wm_params wm_low, wm_high;
2257	u32 dram_channels;
2258	u32 pixel_period;
2259	u32 line_time = 0;
2260	u32 latency_watermark_a = 0, latency_watermark_b = 0;
2261	u32 priority_a_mark = 0, priority_b_mark = 0;
2262	u32 priority_a_cnt = PRIORITY_OFF;
2263	u32 priority_b_cnt = PRIORITY_OFF;
2264	u32 tmp, arb_control3;
2265	fixed20_12 a, b, c;
2266
2267	if (radeon_crtc->base.enabled && num_heads && mode) {
2268		pixel_period = 1000000 / (u32)mode->clock;
2269		line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
2270		priority_a_cnt = 0;
2271		priority_b_cnt = 0;
2272
2273		if (rdev->family == CHIP_ARUBA)
2274			dram_channels = evergreen_get_number_of_dram_channels(rdev);
2275		else
2276			dram_channels = si_get_number_of_dram_channels(rdev);
2277
2278		/* watermark for high clocks */
2279		if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
2280			wm_high.yclk =
2281				radeon_dpm_get_mclk(rdev, false) * 10;
2282			wm_high.sclk =
2283				radeon_dpm_get_sclk(rdev, false) * 10;
2284		} else {
2285			wm_high.yclk = rdev->pm.current_mclk * 10;
2286			wm_high.sclk = rdev->pm.current_sclk * 10;
2287		}
2288
2289		wm_high.disp_clk = mode->clock;
2290		wm_high.src_width = mode->crtc_hdisplay;
2291		wm_high.active_time = mode->crtc_hdisplay * pixel_period;
2292		wm_high.blank_time = line_time - wm_high.active_time;
2293		wm_high.interlaced = false;
2294		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2295			wm_high.interlaced = true;
2296		wm_high.vsc = radeon_crtc->vsc;
2297		wm_high.vtaps = 1;
2298		if (radeon_crtc->rmx_type != RMX_OFF)
2299			wm_high.vtaps = 2;
2300		wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
2301		wm_high.lb_size = lb_size;
2302		wm_high.dram_channels = dram_channels;
2303		wm_high.num_heads = num_heads;
2304
2305		/* watermark for low clocks */
2306		if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
2307			wm_low.yclk =
2308				radeon_dpm_get_mclk(rdev, true) * 10;
2309			wm_low.sclk =
2310				radeon_dpm_get_sclk(rdev, true) * 10;
2311		} else {
2312			wm_low.yclk = rdev->pm.current_mclk * 10;
2313			wm_low.sclk = rdev->pm.current_sclk * 10;
2314		}
2315
2316		wm_low.disp_clk = mode->clock;
2317		wm_low.src_width = mode->crtc_hdisplay;
2318		wm_low.active_time = mode->crtc_hdisplay * pixel_period;
2319		wm_low.blank_time = line_time - wm_low.active_time;
2320		wm_low.interlaced = false;
2321		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2322			wm_low.interlaced = true;
2323		wm_low.vsc = radeon_crtc->vsc;
2324		wm_low.vtaps = 1;
2325		if (radeon_crtc->rmx_type != RMX_OFF)
2326			wm_low.vtaps = 2;
2327		wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
2328		wm_low.lb_size = lb_size;
2329		wm_low.dram_channels = dram_channels;
2330		wm_low.num_heads = num_heads;
 
 
 
2331
2332		/* set for high clocks */
2333		latency_watermark_a = min(dce6_latency_watermark(&wm_high), (u32)65535);
2334		/* set for low clocks */
2335		latency_watermark_b = min(dce6_latency_watermark(&wm_low), (u32)65535);
 
2336
2337		/* possibly force display priority to high */
2338		/* should really do this at mode validation time... */
2339		if (!dce6_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
2340		    !dce6_average_bandwidth_vs_available_bandwidth(&wm_high) ||
2341		    !dce6_check_latency_hiding(&wm_high) ||
2342		    (rdev->disp_priority == 2)) {
2343			DRM_DEBUG_KMS("force priority to high\n");
2344			priority_a_cnt |= PRIORITY_ALWAYS_ON;
2345			priority_b_cnt |= PRIORITY_ALWAYS_ON;
2346		}
2347		if (!dce6_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
2348		    !dce6_average_bandwidth_vs_available_bandwidth(&wm_low) ||
2349		    !dce6_check_latency_hiding(&wm_low) ||
2350		    (rdev->disp_priority == 2)) {
2351			DRM_DEBUG_KMS("force priority to high\n");
2352			priority_a_cnt |= PRIORITY_ALWAYS_ON;
2353			priority_b_cnt |= PRIORITY_ALWAYS_ON;
2354		}
2355
2356		a.full = dfixed_const(1000);
2357		b.full = dfixed_const(mode->clock);
2358		b.full = dfixed_div(b, a);
2359		c.full = dfixed_const(latency_watermark_a);
2360		c.full = dfixed_mul(c, b);
2361		c.full = dfixed_mul(c, radeon_crtc->hsc);
2362		c.full = dfixed_div(c, a);
2363		a.full = dfixed_const(16);
2364		c.full = dfixed_div(c, a);
2365		priority_a_mark = dfixed_trunc(c);
2366		priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
2367
2368		a.full = dfixed_const(1000);
2369		b.full = dfixed_const(mode->clock);
2370		b.full = dfixed_div(b, a);
2371		c.full = dfixed_const(latency_watermark_b);
2372		c.full = dfixed_mul(c, b);
2373		c.full = dfixed_mul(c, radeon_crtc->hsc);
2374		c.full = dfixed_div(c, a);
2375		a.full = dfixed_const(16);
2376		c.full = dfixed_div(c, a);
2377		priority_b_mark = dfixed_trunc(c);
2378		priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
2379
2380		/* Save number of lines the linebuffer leads before the scanout */
2381		radeon_crtc->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
2382	}
2383
2384	/* select wm A */
2385	arb_control3 = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset);
2386	tmp = arb_control3;
2387	tmp &= ~LATENCY_WATERMARK_MASK(3);
2388	tmp |= LATENCY_WATERMARK_MASK(1);
2389	WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, tmp);
2390	WREG32(DPG_PIPE_LATENCY_CONTROL + radeon_crtc->crtc_offset,
2391	       (LATENCY_LOW_WATERMARK(latency_watermark_a) |
2392		LATENCY_HIGH_WATERMARK(line_time)));
2393	/* select wm B */
2394	tmp = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset);
2395	tmp &= ~LATENCY_WATERMARK_MASK(3);
2396	tmp |= LATENCY_WATERMARK_MASK(2);
2397	WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, tmp);
2398	WREG32(DPG_PIPE_LATENCY_CONTROL + radeon_crtc->crtc_offset,
2399	       (LATENCY_LOW_WATERMARK(latency_watermark_b) |
2400		LATENCY_HIGH_WATERMARK(line_time)));
2401	/* restore original selection */
2402	WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, arb_control3);
2403
2404	/* write the priority marks */
2405	WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt);
2406	WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt);
2407
2408	/* save values for DPM */
2409	radeon_crtc->line_time = line_time;
2410	radeon_crtc->wm_high = latency_watermark_a;
2411	radeon_crtc->wm_low = latency_watermark_b;
2412}
2413
2414void dce6_bandwidth_update(struct radeon_device *rdev)
2415{
2416	struct drm_display_mode *mode0 = NULL;
2417	struct drm_display_mode *mode1 = NULL;
2418	u32 num_heads = 0, lb_size;
2419	int i;
2420
2421	if (!rdev->mode_info.mode_config_initialized)
2422		return;
2423
2424	radeon_update_display_priority(rdev);
2425
2426	for (i = 0; i < rdev->num_crtc; i++) {
2427		if (rdev->mode_info.crtcs[i]->base.enabled)
2428			num_heads++;
2429	}
2430	for (i = 0; i < rdev->num_crtc; i += 2) {
2431		mode0 = &rdev->mode_info.crtcs[i]->base.mode;
2432		mode1 = &rdev->mode_info.crtcs[i+1]->base.mode;
2433		lb_size = dce6_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i], mode0, mode1);
2434		dce6_program_watermarks(rdev, rdev->mode_info.crtcs[i], lb_size, num_heads);
2435		lb_size = dce6_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i+1], mode1, mode0);
2436		dce6_program_watermarks(rdev, rdev->mode_info.crtcs[i+1], lb_size, num_heads);
2437	}
2438}
2439
2440/*
2441 * Core functions
2442 */
2443static void si_tiling_mode_table_init(struct radeon_device *rdev)
2444{
2445	u32 *tile = rdev->config.si.tile_mode_array;
2446	const u32 num_tile_mode_states =
2447			ARRAY_SIZE(rdev->config.si.tile_mode_array);
2448	u32 reg_offset, split_equal_to_row_size;
2449
2450	switch (rdev->config.si.mem_row_size_in_kb) {
2451	case 1:
2452		split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_1KB;
2453		break;
2454	case 2:
2455	default:
2456		split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_2KB;
2457		break;
2458	case 4:
2459		split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_4KB;
2460		break;
2461	}
2462
2463	for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
2464		tile[reg_offset] = 0;
2465
2466	switch(rdev->family) {
2467	case CHIP_TAHITI:
2468	case CHIP_PITCAIRN:
2469		/* non-AA compressed depth or any compressed stencil */
2470		tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2471			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2472			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2473			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2474			   NUM_BANKS(ADDR_SURF_16_BANK) |
2475			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2476			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2477			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2478		/* 2xAA/4xAA compressed depth only */
2479		tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2480			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2481			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2482			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2483			   NUM_BANKS(ADDR_SURF_16_BANK) |
2484			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2485			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2486			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2487		/* 8xAA compressed depth only */
2488		tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2489			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2490			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2491			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2492			   NUM_BANKS(ADDR_SURF_16_BANK) |
2493			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2494			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2495			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2496		/* 2xAA/4xAA compressed depth with stencil (for depth buffer) */
2497		tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2498			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2499			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2500			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2501			   NUM_BANKS(ADDR_SURF_16_BANK) |
2502			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2503			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2504			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2505		/* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */
2506		tile[4] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2507			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2508			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2509			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2510			   NUM_BANKS(ADDR_SURF_16_BANK) |
2511			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2512			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2513			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2514		/* Uncompressed 16bpp depth - and stencil buffer allocated with it */
2515		tile[5] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2516			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2517			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2518			   TILE_SPLIT(split_equal_to_row_size) |
2519			   NUM_BANKS(ADDR_SURF_16_BANK) |
2520			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2521			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2522			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2523		/* Uncompressed 32bpp depth - and stencil buffer allocated with it */
2524		tile[6] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2525			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2526			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2527			   TILE_SPLIT(split_equal_to_row_size) |
2528			   NUM_BANKS(ADDR_SURF_16_BANK) |
2529			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2530			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2531			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
2532		/* Uncompressed 8bpp stencil without depth (drivers typically do not use) */
2533		tile[7] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2534			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2535			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2536			   TILE_SPLIT(split_equal_to_row_size) |
2537			   NUM_BANKS(ADDR_SURF_16_BANK) |
2538			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2539			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2540			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2541		/* 1D and 1D Array Surfaces */
2542		tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2543			   MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
2544			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2545			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2546			   NUM_BANKS(ADDR_SURF_16_BANK) |
2547			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2548			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2549			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2550		/* Displayable maps. */
2551		tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2552			   MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
2553			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2554			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2555			   NUM_BANKS(ADDR_SURF_16_BANK) |
2556			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2557			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2558			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2559		/* Display 8bpp. */
2560		tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2561			   MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
2562			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2563			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2564			   NUM_BANKS(ADDR_SURF_16_BANK) |
2565			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2566			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2567			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2568		/* Display 16bpp. */
2569		tile[11] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2570			   MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
2571			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2572			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2573			   NUM_BANKS(ADDR_SURF_16_BANK) |
2574			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2575			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2576			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2577		/* Display 32bpp. */
2578		tile[12] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2579			   MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
2580			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2581			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2582			   NUM_BANKS(ADDR_SURF_16_BANK) |
2583			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2584			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2585			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
2586		/* Thin. */
2587		tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2588			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2589			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2590			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2591			   NUM_BANKS(ADDR_SURF_16_BANK) |
2592			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2593			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2594			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2595		/* Thin 8 bpp. */
2596		tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2597			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2598			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2599			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2600			   NUM_BANKS(ADDR_SURF_16_BANK) |
2601			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2602			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2603			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
2604		/* Thin 16 bpp. */
2605		tile[15] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2606			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2607			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2608			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2609			   NUM_BANKS(ADDR_SURF_16_BANK) |
2610			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2611			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2612			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
2613		/* Thin 32 bpp. */
2614		tile[16] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2615			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2616			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2617			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2618			   NUM_BANKS(ADDR_SURF_16_BANK) |
2619			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2620			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2621			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
2622		/* Thin 64 bpp. */
2623		tile[17] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2624			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2625			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2626			   TILE_SPLIT(split_equal_to_row_size) |
2627			   NUM_BANKS(ADDR_SURF_16_BANK) |
2628			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2629			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2630			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
2631		/* 8 bpp PRT. */
2632		tile[21] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2633			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2634			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2635			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2636			   NUM_BANKS(ADDR_SURF_16_BANK) |
2637			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2638			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2639			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2640		/* 16 bpp PRT */
2641		tile[22] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2642			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2643			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2644			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2645			   NUM_BANKS(ADDR_SURF_16_BANK) |
2646			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2647			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2648			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
2649		/* 32 bpp PRT */
2650		tile[23] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2651			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2652			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2653			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2654			   NUM_BANKS(ADDR_SURF_16_BANK) |
2655			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2656			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2657			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2658		/* 64 bpp PRT */
2659		tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2660			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2661			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2662			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2663			   NUM_BANKS(ADDR_SURF_16_BANK) |
2664			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2665			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2666			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2667		/* 128 bpp PRT */
2668		tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2669			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2670			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2671			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
2672			   NUM_BANKS(ADDR_SURF_8_BANK) |
2673			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2674			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2675			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
2676
2677		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
2678			WREG32(GB_TILE_MODE0 + (reg_offset * 4), tile[reg_offset]);
2679		break;
2680
2681	case CHIP_VERDE:
2682	case CHIP_OLAND:
2683	case CHIP_HAINAN:
2684		/* non-AA compressed depth or any compressed stencil */
2685		tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2686			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2687			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2688			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2689			   NUM_BANKS(ADDR_SURF_16_BANK) |
2690			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2691			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2692			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
2693		/* 2xAA/4xAA compressed depth only */
2694		tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2695			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2696			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2697			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2698			   NUM_BANKS(ADDR_SURF_16_BANK) |
2699			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2700			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2701			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
2702		/* 8xAA compressed depth only */
2703		tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2704			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2705			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2706			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2707			   NUM_BANKS(ADDR_SURF_16_BANK) |
2708			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2709			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2710			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
2711		/* 2xAA/4xAA compressed depth with stencil (for depth buffer) */
2712		tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2713			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2714			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2715			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2716			   NUM_BANKS(ADDR_SURF_16_BANK) |
2717			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2718			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2719			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
2720		/* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */
2721		tile[4] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2722			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2723			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2724			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2725			   NUM_BANKS(ADDR_SURF_16_BANK) |
2726			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2727			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2728			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2729		/* Uncompressed 16bpp depth - and stencil buffer allocated with it */
2730		tile[5] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2731			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2732			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2733			   TILE_SPLIT(split_equal_to_row_size) |
2734			   NUM_BANKS(ADDR_SURF_16_BANK) |
2735			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2736			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2737			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2738		/* Uncompressed 32bpp depth - and stencil buffer allocated with it */
2739		tile[6] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2740			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2741			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2742			   TILE_SPLIT(split_equal_to_row_size) |
2743			   NUM_BANKS(ADDR_SURF_16_BANK) |
2744			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2745			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2746			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2747		/* Uncompressed 8bpp stencil without depth (drivers typically do not use) */
2748		tile[7] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2749			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2750			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2751			   TILE_SPLIT(split_equal_to_row_size) |
2752			   NUM_BANKS(ADDR_SURF_16_BANK) |
2753			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2754			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2755			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
2756		/* 1D and 1D Array Surfaces */
2757		tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2758			   MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
2759			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2760			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2761			   NUM_BANKS(ADDR_SURF_16_BANK) |
2762			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2763			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2764			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2765		/* Displayable maps. */
2766		tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2767			   MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
2768			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2769			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2770			   NUM_BANKS(ADDR_SURF_16_BANK) |
2771			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2772			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2773			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2774		/* Display 8bpp. */
2775		tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2776			   MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
2777			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2778			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2779			   NUM_BANKS(ADDR_SURF_16_BANK) |
2780			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2781			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2782			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
2783		/* Display 16bpp. */
2784		tile[11] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2785			   MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
2786			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2787			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2788			   NUM_BANKS(ADDR_SURF_16_BANK) |
2789			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2790			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2791			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2792		/* Display 32bpp. */
2793		tile[12] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2794			   MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
2795			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2796			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2797			   NUM_BANKS(ADDR_SURF_16_BANK) |
2798			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2799			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2800			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2801		/* Thin. */
2802		tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2803			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2804			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2805			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2806			   NUM_BANKS(ADDR_SURF_16_BANK) |
2807			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2808			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2809			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2810		/* Thin 8 bpp. */
2811		tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2812			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2813			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2814			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2815			   NUM_BANKS(ADDR_SURF_16_BANK) |
2816			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2817			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2818			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2819		/* Thin 16 bpp. */
2820		tile[15] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2821			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2822			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2823			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2824			   NUM_BANKS(ADDR_SURF_16_BANK) |
2825			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2826			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2827			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2828		/* Thin 32 bpp. */
2829		tile[16] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2830			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2831			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2832			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2833			   NUM_BANKS(ADDR_SURF_16_BANK) |
2834			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2835			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2836			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2837		/* Thin 64 bpp. */
2838		tile[17] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2839			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2840			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2841			   TILE_SPLIT(split_equal_to_row_size) |
2842			   NUM_BANKS(ADDR_SURF_16_BANK) |
2843			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2844			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2845			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2846		/* 8 bpp PRT. */
2847		tile[21] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2848			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2849			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2850			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2851			   NUM_BANKS(ADDR_SURF_16_BANK) |
2852			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2853			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2854			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2855		/* 16 bpp PRT */
2856		tile[22] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2857			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2858			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2859			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2860			   NUM_BANKS(ADDR_SURF_16_BANK) |
2861			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2862			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2863			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
2864		/* 32 bpp PRT */
2865		tile[23] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2866			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2867			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2868			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2869			   NUM_BANKS(ADDR_SURF_16_BANK) |
2870			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2871			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2872			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2873		/* 64 bpp PRT */
2874		tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2875			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2876			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2877			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2878			   NUM_BANKS(ADDR_SURF_16_BANK) |
2879			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2880			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2881			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2882		/* 128 bpp PRT */
2883		tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2884			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2885			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2886			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
2887			   NUM_BANKS(ADDR_SURF_8_BANK) |
2888			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2889			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2890			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
2891
2892		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
2893			WREG32(GB_TILE_MODE0 + (reg_offset * 4), tile[reg_offset]);
2894		break;
2895
2896	default:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2897		DRM_ERROR("unknown asic: 0x%x\n", rdev->family);
2898	}
2899}
2900
2901static void si_select_se_sh(struct radeon_device *rdev,
2902			    u32 se_num, u32 sh_num)
2903{
2904	u32 data = INSTANCE_BROADCAST_WRITES;
2905
2906	if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
2907		data |= SH_BROADCAST_WRITES | SE_BROADCAST_WRITES;
2908	else if (se_num == 0xffffffff)
2909		data |= SE_BROADCAST_WRITES | SH_INDEX(sh_num);
2910	else if (sh_num == 0xffffffff)
2911		data |= SH_BROADCAST_WRITES | SE_INDEX(se_num);
2912	else
2913		data |= SH_INDEX(sh_num) | SE_INDEX(se_num);
2914	WREG32(GRBM_GFX_INDEX, data);
2915}
2916
2917static u32 si_create_bitmask(u32 bit_width)
2918{
2919	u32 i, mask = 0;
2920
2921	for (i = 0; i < bit_width; i++) {
2922		mask <<= 1;
2923		mask |= 1;
2924	}
2925	return mask;
2926}
2927
2928static u32 si_get_cu_enabled(struct radeon_device *rdev, u32 cu_per_sh)
2929{
2930	u32 data, mask;
2931
2932	data = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
2933	if (data & 1)
2934		data &= INACTIVE_CUS_MASK;
2935	else
2936		data = 0;
2937	data |= RREG32(GC_USER_SHADER_ARRAY_CONFIG);
2938
2939	data >>= INACTIVE_CUS_SHIFT;
2940
2941	mask = si_create_bitmask(cu_per_sh);
2942
2943	return ~data & mask;
2944}
2945
2946static void si_setup_spi(struct radeon_device *rdev,
2947			 u32 se_num, u32 sh_per_se,
2948			 u32 cu_per_sh)
2949{
2950	int i, j, k;
2951	u32 data, mask, active_cu;
2952
2953	for (i = 0; i < se_num; i++) {
2954		for (j = 0; j < sh_per_se; j++) {
2955			si_select_se_sh(rdev, i, j);
2956			data = RREG32(SPI_STATIC_THREAD_MGMT_3);
2957			active_cu = si_get_cu_enabled(rdev, cu_per_sh);
2958
2959			mask = 1;
2960			for (k = 0; k < 16; k++) {
2961				mask <<= k;
2962				if (active_cu & mask) {
2963					data &= ~mask;
2964					WREG32(SPI_STATIC_THREAD_MGMT_3, data);
2965					break;
2966				}
2967			}
2968		}
2969	}
2970	si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
2971}
2972
2973static u32 si_get_rb_disabled(struct radeon_device *rdev,
2974			      u32 max_rb_num_per_se,
2975			      u32 sh_per_se)
2976{
2977	u32 data, mask;
2978
2979	data = RREG32(CC_RB_BACKEND_DISABLE);
2980	if (data & 1)
2981		data &= BACKEND_DISABLE_MASK;
2982	else
2983		data = 0;
2984	data |= RREG32(GC_USER_RB_BACKEND_DISABLE);
2985
2986	data >>= BACKEND_DISABLE_SHIFT;
2987
2988	mask = si_create_bitmask(max_rb_num_per_se / sh_per_se);
2989
2990	return data & mask;
2991}
2992
2993static void si_setup_rb(struct radeon_device *rdev,
2994			u32 se_num, u32 sh_per_se,
2995			u32 max_rb_num_per_se)
2996{
2997	int i, j;
2998	u32 data, mask;
2999	u32 disabled_rbs = 0;
3000	u32 enabled_rbs = 0;
3001
3002	for (i = 0; i < se_num; i++) {
3003		for (j = 0; j < sh_per_se; j++) {
3004			si_select_se_sh(rdev, i, j);
3005			data = si_get_rb_disabled(rdev, max_rb_num_per_se, sh_per_se);
3006			disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH);
3007		}
3008	}
3009	si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
3010
3011	mask = 1;
3012	for (i = 0; i < max_rb_num_per_se * se_num; i++) {
3013		if (!(disabled_rbs & mask))
3014			enabled_rbs |= mask;
3015		mask <<= 1;
3016	}
3017
3018	rdev->config.si.backend_enable_mask = enabled_rbs;
3019
3020	for (i = 0; i < se_num; i++) {
3021		si_select_se_sh(rdev, i, 0xffffffff);
3022		data = 0;
3023		for (j = 0; j < sh_per_se; j++) {
3024			switch (enabled_rbs & 3) {
3025			case 1:
3026				data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2);
3027				break;
3028			case 2:
3029				data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2);
3030				break;
3031			case 3:
3032			default:
3033				data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2);
3034				break;
3035			}
3036			enabled_rbs >>= 2;
3037		}
3038		WREG32(PA_SC_RASTER_CONFIG, data);
3039	}
3040	si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
3041}
3042
3043static void si_gpu_init(struct radeon_device *rdev)
3044{
3045	u32 gb_addr_config = 0;
3046	u32 mc_shared_chmap, mc_arb_ramcfg;
3047	u32 sx_debug_1;
3048	u32 hdp_host_path_cntl;
3049	u32 tmp;
3050	int i, j;
3051
3052	switch (rdev->family) {
3053	case CHIP_TAHITI:
3054		rdev->config.si.max_shader_engines = 2;
3055		rdev->config.si.max_tile_pipes = 12;
3056		rdev->config.si.max_cu_per_sh = 8;
3057		rdev->config.si.max_sh_per_se = 2;
3058		rdev->config.si.max_backends_per_se = 4;
3059		rdev->config.si.max_texture_channel_caches = 12;
3060		rdev->config.si.max_gprs = 256;
3061		rdev->config.si.max_gs_threads = 32;
3062		rdev->config.si.max_hw_contexts = 8;
3063
3064		rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
3065		rdev->config.si.sc_prim_fifo_size_backend = 0x100;
3066		rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
3067		rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
3068		gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN;
3069		break;
3070	case CHIP_PITCAIRN:
3071		rdev->config.si.max_shader_engines = 2;
3072		rdev->config.si.max_tile_pipes = 8;
3073		rdev->config.si.max_cu_per_sh = 5;
3074		rdev->config.si.max_sh_per_se = 2;
3075		rdev->config.si.max_backends_per_se = 4;
3076		rdev->config.si.max_texture_channel_caches = 8;
3077		rdev->config.si.max_gprs = 256;
3078		rdev->config.si.max_gs_threads = 32;
3079		rdev->config.si.max_hw_contexts = 8;
3080
3081		rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
3082		rdev->config.si.sc_prim_fifo_size_backend = 0x100;
3083		rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
3084		rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
3085		gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN;
3086		break;
3087	case CHIP_VERDE:
3088	default:
3089		rdev->config.si.max_shader_engines = 1;
3090		rdev->config.si.max_tile_pipes = 4;
3091		rdev->config.si.max_cu_per_sh = 5;
3092		rdev->config.si.max_sh_per_se = 2;
3093		rdev->config.si.max_backends_per_se = 4;
3094		rdev->config.si.max_texture_channel_caches = 4;
3095		rdev->config.si.max_gprs = 256;
3096		rdev->config.si.max_gs_threads = 32;
3097		rdev->config.si.max_hw_contexts = 8;
3098
3099		rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
3100		rdev->config.si.sc_prim_fifo_size_backend = 0x40;
3101		rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
3102		rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
3103		gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN;
3104		break;
3105	case CHIP_OLAND:
3106		rdev->config.si.max_shader_engines = 1;
3107		rdev->config.si.max_tile_pipes = 4;
3108		rdev->config.si.max_cu_per_sh = 6;
3109		rdev->config.si.max_sh_per_se = 1;
3110		rdev->config.si.max_backends_per_se = 2;
3111		rdev->config.si.max_texture_channel_caches = 4;
3112		rdev->config.si.max_gprs = 256;
3113		rdev->config.si.max_gs_threads = 16;
3114		rdev->config.si.max_hw_contexts = 8;
3115
3116		rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
3117		rdev->config.si.sc_prim_fifo_size_backend = 0x40;
3118		rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
3119		rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
3120		gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN;
3121		break;
3122	case CHIP_HAINAN:
3123		rdev->config.si.max_shader_engines = 1;
3124		rdev->config.si.max_tile_pipes = 4;
3125		rdev->config.si.max_cu_per_sh = 5;
3126		rdev->config.si.max_sh_per_se = 1;
3127		rdev->config.si.max_backends_per_se = 1;
3128		rdev->config.si.max_texture_channel_caches = 2;
3129		rdev->config.si.max_gprs = 256;
3130		rdev->config.si.max_gs_threads = 16;
3131		rdev->config.si.max_hw_contexts = 8;
3132
3133		rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
3134		rdev->config.si.sc_prim_fifo_size_backend = 0x40;
3135		rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
3136		rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
3137		gb_addr_config = HAINAN_GB_ADDR_CONFIG_GOLDEN;
3138		break;
3139	}
3140
3141	/* Initialize HDP */
3142	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
3143		WREG32((0x2c14 + j), 0x00000000);
3144		WREG32((0x2c18 + j), 0x00000000);
3145		WREG32((0x2c1c + j), 0x00000000);
3146		WREG32((0x2c20 + j), 0x00000000);
3147		WREG32((0x2c24 + j), 0x00000000);
3148	}
3149
3150	WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
3151	WREG32(SRBM_INT_CNTL, 1);
3152	WREG32(SRBM_INT_ACK, 1);
3153
3154	evergreen_fix_pci_max_read_req_size(rdev);
3155
3156	WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
3157
3158	mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
3159	mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
3160
3161	rdev->config.si.num_tile_pipes = rdev->config.si.max_tile_pipes;
3162	rdev->config.si.mem_max_burst_length_bytes = 256;
3163	tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
3164	rdev->config.si.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
3165	if (rdev->config.si.mem_row_size_in_kb > 4)
3166		rdev->config.si.mem_row_size_in_kb = 4;
3167	/* XXX use MC settings? */
3168	rdev->config.si.shader_engine_tile_size = 32;
3169	rdev->config.si.num_gpus = 1;
3170	rdev->config.si.multi_gpu_tile_size = 64;
3171
3172	/* fix up row size */
3173	gb_addr_config &= ~ROW_SIZE_MASK;
3174	switch (rdev->config.si.mem_row_size_in_kb) {
3175	case 1:
3176	default:
3177		gb_addr_config |= ROW_SIZE(0);
3178		break;
3179	case 2:
3180		gb_addr_config |= ROW_SIZE(1);
3181		break;
3182	case 4:
3183		gb_addr_config |= ROW_SIZE(2);
3184		break;
3185	}
3186
3187	/* setup tiling info dword.  gb_addr_config is not adequate since it does
3188	 * not have bank info, so create a custom tiling dword.
3189	 * bits 3:0   num_pipes
3190	 * bits 7:4   num_banks
3191	 * bits 11:8  group_size
3192	 * bits 15:12 row_size
3193	 */
3194	rdev->config.si.tile_config = 0;
3195	switch (rdev->config.si.num_tile_pipes) {
3196	case 1:
3197		rdev->config.si.tile_config |= (0 << 0);
3198		break;
3199	case 2:
3200		rdev->config.si.tile_config |= (1 << 0);
3201		break;
3202	case 4:
3203		rdev->config.si.tile_config |= (2 << 0);
3204		break;
3205	case 8:
3206	default:
3207		/* XXX what about 12? */
3208		rdev->config.si.tile_config |= (3 << 0);
3209		break;
3210	}	
3211	switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
3212	case 0: /* four banks */
3213		rdev->config.si.tile_config |= 0 << 4;
3214		break;
3215	case 1: /* eight banks */
3216		rdev->config.si.tile_config |= 1 << 4;
3217		break;
3218	case 2: /* sixteen banks */
3219	default:
3220		rdev->config.si.tile_config |= 2 << 4;
3221		break;
3222	}
3223	rdev->config.si.tile_config |=
3224		((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
3225	rdev->config.si.tile_config |=
3226		((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
3227
3228	WREG32(GB_ADDR_CONFIG, gb_addr_config);
3229	WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
3230	WREG32(DMIF_ADDR_CALC, gb_addr_config);
3231	WREG32(HDP_ADDR_CONFIG, gb_addr_config);
3232	WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
3233	WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
3234	if (rdev->has_uvd) {
3235		WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
3236		WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
3237		WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
3238	}
3239
3240	si_tiling_mode_table_init(rdev);
3241
3242	si_setup_rb(rdev, rdev->config.si.max_shader_engines,
3243		    rdev->config.si.max_sh_per_se,
3244		    rdev->config.si.max_backends_per_se);
3245
3246	si_setup_spi(rdev, rdev->config.si.max_shader_engines,
3247		     rdev->config.si.max_sh_per_se,
3248		     rdev->config.si.max_cu_per_sh);
3249
3250	rdev->config.si.active_cus = 0;
3251	for (i = 0; i < rdev->config.si.max_shader_engines; i++) {
3252		for (j = 0; j < rdev->config.si.max_sh_per_se; j++) {
3253			rdev->config.si.active_cus +=
3254				hweight32(si_get_cu_active_bitmap(rdev, i, j));
3255		}
3256	}
3257
3258	/* set HW defaults for 3D engine */
3259	WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
3260				     ROQ_IB2_START(0x2b)));
3261	WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
3262
3263	sx_debug_1 = RREG32(SX_DEBUG_1);
3264	WREG32(SX_DEBUG_1, sx_debug_1);
3265
3266	WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
3267
3268	WREG32(PA_SC_FIFO_SIZE, (SC_FRONTEND_PRIM_FIFO_SIZE(rdev->config.si.sc_prim_fifo_size_frontend) |
3269				 SC_BACKEND_PRIM_FIFO_SIZE(rdev->config.si.sc_prim_fifo_size_backend) |
3270				 SC_HIZ_TILE_FIFO_SIZE(rdev->config.si.sc_hiz_tile_fifo_size) |
3271				 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.si.sc_earlyz_tile_fifo_size)));
3272
3273	WREG32(VGT_NUM_INSTANCES, 1);
3274
3275	WREG32(CP_PERFMON_CNTL, 0);
3276
3277	WREG32(SQ_CONFIG, 0);
3278
3279	WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
3280					  FORCE_EOV_MAX_REZ_CNT(255)));
3281
3282	WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) |
3283	       AUTO_INVLD_EN(ES_AND_GS_AUTO));
3284
3285	WREG32(VGT_GS_VERTEX_REUSE, 16);
3286	WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
3287
3288	WREG32(CB_PERFCOUNTER0_SELECT0, 0);
3289	WREG32(CB_PERFCOUNTER0_SELECT1, 0);
3290	WREG32(CB_PERFCOUNTER1_SELECT0, 0);
3291	WREG32(CB_PERFCOUNTER1_SELECT1, 0);
3292	WREG32(CB_PERFCOUNTER2_SELECT0, 0);
3293	WREG32(CB_PERFCOUNTER2_SELECT1, 0);
3294	WREG32(CB_PERFCOUNTER3_SELECT0, 0);
3295	WREG32(CB_PERFCOUNTER3_SELECT1, 0);
3296
3297	tmp = RREG32(HDP_MISC_CNTL);
3298	tmp |= HDP_FLUSH_INVALIDATE_CACHE;
3299	WREG32(HDP_MISC_CNTL, tmp);
3300
3301	hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
3302	WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
3303
3304	WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
3305
3306	udelay(50);
3307}
3308
3309/*
3310 * GPU scratch registers helpers function.
3311 */
3312static void si_scratch_init(struct radeon_device *rdev)
3313{
3314	int i;
3315
3316	rdev->scratch.num_reg = 7;
3317	rdev->scratch.reg_base = SCRATCH_REG0;
3318	for (i = 0; i < rdev->scratch.num_reg; i++) {
3319		rdev->scratch.free[i] = true;
3320		rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
3321	}
3322}
3323
3324void si_fence_ring_emit(struct radeon_device *rdev,
3325			struct radeon_fence *fence)
3326{
3327	struct radeon_ring *ring = &rdev->ring[fence->ring];
3328	u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
3329
3330	/* flush read cache over gart */
3331	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
3332	radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
3333	radeon_ring_write(ring, 0);
3334	radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
3335	radeon_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
3336			  PACKET3_TC_ACTION_ENA |
3337			  PACKET3_SH_KCACHE_ACTION_ENA |
3338			  PACKET3_SH_ICACHE_ACTION_ENA);
3339	radeon_ring_write(ring, 0xFFFFFFFF);
3340	radeon_ring_write(ring, 0);
3341	radeon_ring_write(ring, 10); /* poll interval */
3342	/* EVENT_WRITE_EOP - flush caches, send int */
3343	radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
3344	radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5));
3345	radeon_ring_write(ring, lower_32_bits(addr));
3346	radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
3347	radeon_ring_write(ring, fence->seq);
3348	radeon_ring_write(ring, 0);
3349}
3350
3351/*
3352 * IB stuff
3353 */
3354void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
3355{
3356	struct radeon_ring *ring = &rdev->ring[ib->ring];
3357	unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0;
3358	u32 header;
3359
3360	if (ib->is_const_ib) {
3361		/* set switch buffer packet before const IB */
3362		radeon_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3363		radeon_ring_write(ring, 0);
3364
3365		header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
3366	} else {
3367		u32 next_rptr;
3368		if (ring->rptr_save_reg) {
3369			next_rptr = ring->wptr + 3 + 4 + 8;
3370			radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
3371			radeon_ring_write(ring, ((ring->rptr_save_reg -
3372						  PACKET3_SET_CONFIG_REG_START) >> 2));
3373			radeon_ring_write(ring, next_rptr);
3374		} else if (rdev->wb.enabled) {
3375			next_rptr = ring->wptr + 5 + 4 + 8;
3376			radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3377			radeon_ring_write(ring, (1 << 8));
3378			radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
3379			radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr));
3380			radeon_ring_write(ring, next_rptr);
3381		}
3382
3383		header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
3384	}
3385
3386	radeon_ring_write(ring, header);
3387	radeon_ring_write(ring,
3388#ifdef __BIG_ENDIAN
3389			  (2 << 0) |
3390#endif
3391			  (ib->gpu_addr & 0xFFFFFFFC));
3392	radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
3393	radeon_ring_write(ring, ib->length_dw | (vm_id << 24));
3394
3395	if (!ib->is_const_ib) {
3396		/* flush read cache over gart for this vmid */
3397		radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
3398		radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
3399		radeon_ring_write(ring, vm_id);
3400		radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
3401		radeon_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
3402				  PACKET3_TC_ACTION_ENA |
3403				  PACKET3_SH_KCACHE_ACTION_ENA |
3404				  PACKET3_SH_ICACHE_ACTION_ENA);
3405		radeon_ring_write(ring, 0xFFFFFFFF);
3406		radeon_ring_write(ring, 0);
3407		radeon_ring_write(ring, 10); /* poll interval */
3408	}
3409}
3410
3411/*
3412 * CP.
3413 */
3414static void si_cp_enable(struct radeon_device *rdev, bool enable)
3415{
3416	if (enable)
3417		WREG32(CP_ME_CNTL, 0);
3418	else {
3419		if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
3420			radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
3421		WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT));
3422		WREG32(SCRATCH_UMSK, 0);
3423		rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
3424		rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
3425		rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
3426	}
3427	udelay(50);
3428}
3429
3430static int si_cp_load_microcode(struct radeon_device *rdev)
3431{
 
3432	int i;
3433
3434	if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw)
3435		return -EINVAL;
3436
3437	si_cp_enable(rdev, false);
3438
3439	if (rdev->new_fw) {
3440		const struct gfx_firmware_header_v1_0 *pfp_hdr =
3441			(const struct gfx_firmware_header_v1_0 *)rdev->pfp_fw->data;
3442		const struct gfx_firmware_header_v1_0 *ce_hdr =
3443			(const struct gfx_firmware_header_v1_0 *)rdev->ce_fw->data;
3444		const struct gfx_firmware_header_v1_0 *me_hdr =
3445			(const struct gfx_firmware_header_v1_0 *)rdev->me_fw->data;
3446		const __le32 *fw_data;
3447		u32 fw_size;
3448
3449		radeon_ucode_print_gfx_hdr(&pfp_hdr->header);
3450		radeon_ucode_print_gfx_hdr(&ce_hdr->header);
3451		radeon_ucode_print_gfx_hdr(&me_hdr->header);
3452
3453		/* PFP */
3454		fw_data = (const __le32 *)
3455			(rdev->pfp_fw->data + le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
3456		fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
3457		WREG32(CP_PFP_UCODE_ADDR, 0);
3458		for (i = 0; i < fw_size; i++)
3459			WREG32(CP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
3460		WREG32(CP_PFP_UCODE_ADDR, 0);
3461
3462		/* CE */
3463		fw_data = (const __le32 *)
3464			(rdev->ce_fw->data + le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
3465		fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
3466		WREG32(CP_CE_UCODE_ADDR, 0);
3467		for (i = 0; i < fw_size; i++)
3468			WREG32(CP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
3469		WREG32(CP_CE_UCODE_ADDR, 0);
3470
3471		/* ME */
3472		fw_data = (const __be32 *)
3473			(rdev->me_fw->data + le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
3474		fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
3475		WREG32(CP_ME_RAM_WADDR, 0);
3476		for (i = 0; i < fw_size; i++)
3477			WREG32(CP_ME_RAM_DATA, le32_to_cpup(fw_data++));
3478		WREG32(CP_ME_RAM_WADDR, 0);
3479	} else {
3480		const __be32 *fw_data;
3481
3482		/* PFP */
3483		fw_data = (const __be32 *)rdev->pfp_fw->data;
3484		WREG32(CP_PFP_UCODE_ADDR, 0);
3485		for (i = 0; i < SI_PFP_UCODE_SIZE; i++)
3486			WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
3487		WREG32(CP_PFP_UCODE_ADDR, 0);
3488
3489		/* CE */
3490		fw_data = (const __be32 *)rdev->ce_fw->data;
3491		WREG32(CP_CE_UCODE_ADDR, 0);
3492		for (i = 0; i < SI_CE_UCODE_SIZE; i++)
3493			WREG32(CP_CE_UCODE_DATA, be32_to_cpup(fw_data++));
3494		WREG32(CP_CE_UCODE_ADDR, 0);
3495
3496		/* ME */
3497		fw_data = (const __be32 *)rdev->me_fw->data;
3498		WREG32(CP_ME_RAM_WADDR, 0);
3499		for (i = 0; i < SI_PM4_UCODE_SIZE; i++)
3500			WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
3501		WREG32(CP_ME_RAM_WADDR, 0);
3502	}
3503
3504	WREG32(CP_PFP_UCODE_ADDR, 0);
3505	WREG32(CP_CE_UCODE_ADDR, 0);
3506	WREG32(CP_ME_RAM_WADDR, 0);
3507	WREG32(CP_ME_RAM_RADDR, 0);
3508	return 0;
3509}
3510
3511static int si_cp_start(struct radeon_device *rdev)
3512{
3513	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
3514	int r, i;
3515
3516	r = radeon_ring_lock(rdev, ring, 7 + 4);
3517	if (r) {
3518		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
3519		return r;
3520	}
3521	/* init the CP */
3522	radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
3523	radeon_ring_write(ring, 0x1);
3524	radeon_ring_write(ring, 0x0);
3525	radeon_ring_write(ring, rdev->config.si.max_hw_contexts - 1);
3526	radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
3527	radeon_ring_write(ring, 0);
3528	radeon_ring_write(ring, 0);
3529
3530	/* init the CE partitions */
3531	radeon_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
3532	radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
3533	radeon_ring_write(ring, 0xc000);
3534	radeon_ring_write(ring, 0xe000);
3535	radeon_ring_unlock_commit(rdev, ring, false);
3536
3537	si_cp_enable(rdev, true);
3538
3539	r = radeon_ring_lock(rdev, ring, si_default_size + 10);
3540	if (r) {
3541		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
3542		return r;
3543	}
3544
3545	/* setup clear context state */
3546	radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3547	radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
3548
3549	for (i = 0; i < si_default_size; i++)
3550		radeon_ring_write(ring, si_default_state[i]);
3551
3552	radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3553	radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
3554
3555	/* set clear context state */
3556	radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
3557	radeon_ring_write(ring, 0);
3558
3559	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
3560	radeon_ring_write(ring, 0x00000316);
3561	radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
3562	radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
3563
3564	radeon_ring_unlock_commit(rdev, ring, false);
3565
3566	for (i = RADEON_RING_TYPE_GFX_INDEX; i <= CAYMAN_RING_TYPE_CP2_INDEX; ++i) {
3567		ring = &rdev->ring[i];
3568		r = radeon_ring_lock(rdev, ring, 2);
3569
3570		/* clear the compute context state */
3571		radeon_ring_write(ring, PACKET3_COMPUTE(PACKET3_CLEAR_STATE, 0));
3572		radeon_ring_write(ring, 0);
3573
3574		radeon_ring_unlock_commit(rdev, ring, false);
3575	}
3576
3577	return 0;
3578}
3579
3580static void si_cp_fini(struct radeon_device *rdev)
3581{
3582	struct radeon_ring *ring;
3583	si_cp_enable(rdev, false);
3584
3585	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
3586	radeon_ring_fini(rdev, ring);
3587	radeon_scratch_free(rdev, ring->rptr_save_reg);
3588
3589	ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
3590	radeon_ring_fini(rdev, ring);
3591	radeon_scratch_free(rdev, ring->rptr_save_reg);
3592
3593	ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
3594	radeon_ring_fini(rdev, ring);
3595	radeon_scratch_free(rdev, ring->rptr_save_reg);
3596}
3597
3598static int si_cp_resume(struct radeon_device *rdev)
3599{
3600	struct radeon_ring *ring;
3601	u32 tmp;
3602	u32 rb_bufsz;
3603	int r;
3604
3605	si_enable_gui_idle_interrupt(rdev, false);
 
 
 
 
 
 
 
 
 
3606
3607	WREG32(CP_SEM_WAIT_TIMER, 0x0);
3608	WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
3609
3610	/* Set the write pointer delay */
3611	WREG32(CP_RB_WPTR_DELAY, 0);
3612
3613	WREG32(CP_DEBUG, 0);
3614	WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
3615
3616	/* ring 0 - compute and gfx */
3617	/* Set ring buffer size */
3618	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
3619	rb_bufsz = order_base_2(ring->ring_size / 8);
3620	tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
3621#ifdef __BIG_ENDIAN
3622	tmp |= BUF_SWAP_32BIT;
3623#endif
3624	WREG32(CP_RB0_CNTL, tmp);
3625
3626	/* Initialize the ring buffer's read and write pointers */
3627	WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
3628	ring->wptr = 0;
3629	WREG32(CP_RB0_WPTR, ring->wptr);
3630
3631	/* set the wb address whether it's enabled or not */
3632	WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
3633	WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
3634
3635	if (rdev->wb.enabled)
3636		WREG32(SCRATCH_UMSK, 0xff);
3637	else {
3638		tmp |= RB_NO_UPDATE;
3639		WREG32(SCRATCH_UMSK, 0);
3640	}
3641
3642	mdelay(1);
3643	WREG32(CP_RB0_CNTL, tmp);
3644
3645	WREG32(CP_RB0_BASE, ring->gpu_addr >> 8);
3646
 
 
3647	/* ring1  - compute only */
3648	/* Set ring buffer size */
3649	ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
3650	rb_bufsz = order_base_2(ring->ring_size / 8);
3651	tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
3652#ifdef __BIG_ENDIAN
3653	tmp |= BUF_SWAP_32BIT;
3654#endif
3655	WREG32(CP_RB1_CNTL, tmp);
3656
3657	/* Initialize the ring buffer's read and write pointers */
3658	WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA);
3659	ring->wptr = 0;
3660	WREG32(CP_RB1_WPTR, ring->wptr);
3661
3662	/* set the wb address whether it's enabled or not */
3663	WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC);
3664	WREG32(CP_RB1_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFF);
3665
3666	mdelay(1);
3667	WREG32(CP_RB1_CNTL, tmp);
3668
3669	WREG32(CP_RB1_BASE, ring->gpu_addr >> 8);
3670
 
 
3671	/* ring2 - compute only */
3672	/* Set ring buffer size */
3673	ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
3674	rb_bufsz = order_base_2(ring->ring_size / 8);
3675	tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
3676#ifdef __BIG_ENDIAN
3677	tmp |= BUF_SWAP_32BIT;
3678#endif
3679	WREG32(CP_RB2_CNTL, tmp);
3680
3681	/* Initialize the ring buffer's read and write pointers */
3682	WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA);
3683	ring->wptr = 0;
3684	WREG32(CP_RB2_WPTR, ring->wptr);
3685
3686	/* set the wb address whether it's enabled or not */
3687	WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC);
3688	WREG32(CP_RB2_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFF);
3689
3690	mdelay(1);
3691	WREG32(CP_RB2_CNTL, tmp);
3692
3693	WREG32(CP_RB2_BASE, ring->gpu_addr >> 8);
3694
 
 
3695	/* start the rings */
3696	si_cp_start(rdev);
3697	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
3698	rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = true;
3699	rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = true;
3700	r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
3701	if (r) {
3702		rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
3703		rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
3704		rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
3705		return r;
3706	}
3707	r = radeon_ring_test(rdev, CAYMAN_RING_TYPE_CP1_INDEX, &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]);
3708	if (r) {
3709		rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
3710	}
3711	r = radeon_ring_test(rdev, CAYMAN_RING_TYPE_CP2_INDEX, &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]);
3712	if (r) {
3713		rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
3714	}
3715
3716	si_enable_gui_idle_interrupt(rdev, true);
3717
3718	if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
3719		radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
3720
3721	return 0;
3722}
3723
3724u32 si_gpu_check_soft_reset(struct radeon_device *rdev)
3725{
3726	u32 reset_mask = 0;
3727	u32 tmp;
3728
3729	/* GRBM_STATUS */
3730	tmp = RREG32(GRBM_STATUS);
3731	if (tmp & (PA_BUSY | SC_BUSY |
3732		   BCI_BUSY | SX_BUSY |
3733		   TA_BUSY | VGT_BUSY |
3734		   DB_BUSY | CB_BUSY |
3735		   GDS_BUSY | SPI_BUSY |
3736		   IA_BUSY | IA_BUSY_NO_DMA))
3737		reset_mask |= RADEON_RESET_GFX;
3738
3739	if (tmp & (CF_RQ_PENDING | PF_RQ_PENDING |
3740		   CP_BUSY | CP_COHERENCY_BUSY))
3741		reset_mask |= RADEON_RESET_CP;
3742
3743	if (tmp & GRBM_EE_BUSY)
3744		reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
3745
3746	/* GRBM_STATUS2 */
3747	tmp = RREG32(GRBM_STATUS2);
3748	if (tmp & (RLC_RQ_PENDING | RLC_BUSY))
3749		reset_mask |= RADEON_RESET_RLC;
3750
3751	/* DMA_STATUS_REG 0 */
3752	tmp = RREG32(DMA_STATUS_REG + DMA0_REGISTER_OFFSET);
3753	if (!(tmp & DMA_IDLE))
3754		reset_mask |= RADEON_RESET_DMA;
3755
3756	/* DMA_STATUS_REG 1 */
3757	tmp = RREG32(DMA_STATUS_REG + DMA1_REGISTER_OFFSET);
3758	if (!(tmp & DMA_IDLE))
3759		reset_mask |= RADEON_RESET_DMA1;
3760
3761	/* SRBM_STATUS2 */
3762	tmp = RREG32(SRBM_STATUS2);
3763	if (tmp & DMA_BUSY)
3764		reset_mask |= RADEON_RESET_DMA;
3765
3766	if (tmp & DMA1_BUSY)
3767		reset_mask |= RADEON_RESET_DMA1;
3768
3769	/* SRBM_STATUS */
3770	tmp = RREG32(SRBM_STATUS);
3771
3772	if (tmp & IH_BUSY)
3773		reset_mask |= RADEON_RESET_IH;
3774
3775	if (tmp & SEM_BUSY)
3776		reset_mask |= RADEON_RESET_SEM;
3777
3778	if (tmp & GRBM_RQ_PENDING)
3779		reset_mask |= RADEON_RESET_GRBM;
3780
3781	if (tmp & VMC_BUSY)
3782		reset_mask |= RADEON_RESET_VMC;
3783
3784	if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY |
3785		   MCC_BUSY | MCD_BUSY))
3786		reset_mask |= RADEON_RESET_MC;
3787
3788	if (evergreen_is_display_hung(rdev))
3789		reset_mask |= RADEON_RESET_DISPLAY;
3790
3791	/* VM_L2_STATUS */
3792	tmp = RREG32(VM_L2_STATUS);
3793	if (tmp & L2_BUSY)
3794		reset_mask |= RADEON_RESET_VMC;
3795
3796	/* Skip MC reset as it's mostly likely not hung, just busy */
3797	if (reset_mask & RADEON_RESET_MC) {
3798		DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
3799		reset_mask &= ~RADEON_RESET_MC;
3800	}
3801
3802	return reset_mask;
 
3803}
3804
3805static void si_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
3806{
3807	struct evergreen_mc_save save;
3808	u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
3809	u32 tmp;
3810
3811	if (reset_mask == 0)
3812		return;
3813
3814	dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
3815
3816	evergreen_print_gpu_status_regs(rdev);
3817	dev_info(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
3818		 RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
3819	dev_info(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
3820		 RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
3821
3822	/* disable PG/CG */
3823	si_fini_pg(rdev);
3824	si_fini_cg(rdev);
3825
3826	/* stop the rlc */
3827	si_rlc_stop(rdev);
3828
3829	/* Disable CP parsing/prefetching */
3830	WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
3831
3832	if (reset_mask & RADEON_RESET_DMA) {
3833		/* dma0 */
3834		tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
3835		tmp &= ~DMA_RB_ENABLE;
3836		WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
3837	}
3838	if (reset_mask & RADEON_RESET_DMA1) {
3839		/* dma1 */
3840		tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
3841		tmp &= ~DMA_RB_ENABLE;
3842		WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
3843	}
3844
3845	udelay(50);
3846
 
 
 
 
 
 
 
 
 
 
 
3847	evergreen_mc_stop(rdev, &save);
3848	if (evergreen_mc_wait_for_idle(rdev)) {
3849		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
3850	}
3851
3852	if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE | RADEON_RESET_CP)) {
3853		grbm_soft_reset = SOFT_RESET_CB |
3854			SOFT_RESET_DB |
3855			SOFT_RESET_GDS |
3856			SOFT_RESET_PA |
3857			SOFT_RESET_SC |
3858			SOFT_RESET_BCI |
3859			SOFT_RESET_SPI |
3860			SOFT_RESET_SX |
3861			SOFT_RESET_TC |
3862			SOFT_RESET_TA |
3863			SOFT_RESET_VGT |
3864			SOFT_RESET_IA;
3865	}
3866
3867	if (reset_mask & RADEON_RESET_CP) {
3868		grbm_soft_reset |= SOFT_RESET_CP | SOFT_RESET_VGT;
3869
3870		srbm_soft_reset |= SOFT_RESET_GRBM;
3871	}
3872
3873	if (reset_mask & RADEON_RESET_DMA)
3874		srbm_soft_reset |= SOFT_RESET_DMA;
3875
3876	if (reset_mask & RADEON_RESET_DMA1)
3877		srbm_soft_reset |= SOFT_RESET_DMA1;
3878
3879	if (reset_mask & RADEON_RESET_DISPLAY)
3880		srbm_soft_reset |= SOFT_RESET_DC;
3881
3882	if (reset_mask & RADEON_RESET_RLC)
3883		grbm_soft_reset |= SOFT_RESET_RLC;
3884
3885	if (reset_mask & RADEON_RESET_SEM)
3886		srbm_soft_reset |= SOFT_RESET_SEM;
3887
3888	if (reset_mask & RADEON_RESET_IH)
3889		srbm_soft_reset |= SOFT_RESET_IH;
3890
3891	if (reset_mask & RADEON_RESET_GRBM)
3892		srbm_soft_reset |= SOFT_RESET_GRBM;
3893
3894	if (reset_mask & RADEON_RESET_VMC)
3895		srbm_soft_reset |= SOFT_RESET_VMC;
3896
3897	if (reset_mask & RADEON_RESET_MC)
3898		srbm_soft_reset |= SOFT_RESET_MC;
3899
3900	if (grbm_soft_reset) {
3901		tmp = RREG32(GRBM_SOFT_RESET);
3902		tmp |= grbm_soft_reset;
3903		dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
3904		WREG32(GRBM_SOFT_RESET, tmp);
3905		tmp = RREG32(GRBM_SOFT_RESET);
3906
3907		udelay(50);
3908
3909		tmp &= ~grbm_soft_reset;
3910		WREG32(GRBM_SOFT_RESET, tmp);
3911		tmp = RREG32(GRBM_SOFT_RESET);
3912	}
3913
3914	if (srbm_soft_reset) {
3915		tmp = RREG32(SRBM_SOFT_RESET);
3916		tmp |= srbm_soft_reset;
3917		dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
3918		WREG32(SRBM_SOFT_RESET, tmp);
3919		tmp = RREG32(SRBM_SOFT_RESET);
3920
3921		udelay(50);
3922
3923		tmp &= ~srbm_soft_reset;
3924		WREG32(SRBM_SOFT_RESET, tmp);
3925		tmp = RREG32(SRBM_SOFT_RESET);
3926	}
3927
3928	/* Wait a little for things to settle down */
3929	udelay(50);
3930
3931	evergreen_mc_resume(rdev, &save);
3932	udelay(50);
3933
3934	evergreen_print_gpu_status_regs(rdev);
3935}
3936
3937static void si_set_clk_bypass_mode(struct radeon_device *rdev)
3938{
3939	u32 tmp, i;
3940
3941	tmp = RREG32(CG_SPLL_FUNC_CNTL);
3942	tmp |= SPLL_BYPASS_EN;
3943	WREG32(CG_SPLL_FUNC_CNTL, tmp);
3944
3945	tmp = RREG32(CG_SPLL_FUNC_CNTL_2);
3946	tmp |= SPLL_CTLREQ_CHG;
3947	WREG32(CG_SPLL_FUNC_CNTL_2, tmp);
3948
3949	for (i = 0; i < rdev->usec_timeout; i++) {
3950		if (RREG32(SPLL_STATUS) & SPLL_CHG_STATUS)
3951			break;
3952		udelay(1);
3953	}
3954
3955	tmp = RREG32(CG_SPLL_FUNC_CNTL_2);
3956	tmp &= ~(SPLL_CTLREQ_CHG | SCLK_MUX_UPDATE);
3957	WREG32(CG_SPLL_FUNC_CNTL_2, tmp);
3958
3959	tmp = RREG32(MPLL_CNTL_MODE);
3960	tmp &= ~MPLL_MCLK_SEL;
3961	WREG32(MPLL_CNTL_MODE, tmp);
3962}
3963
3964static void si_spll_powerdown(struct radeon_device *rdev)
3965{
3966	u32 tmp;
3967
3968	tmp = RREG32(SPLL_CNTL_MODE);
3969	tmp |= SPLL_SW_DIR_CONTROL;
3970	WREG32(SPLL_CNTL_MODE, tmp);
3971
3972	tmp = RREG32(CG_SPLL_FUNC_CNTL);
3973	tmp |= SPLL_RESET;
3974	WREG32(CG_SPLL_FUNC_CNTL, tmp);
3975
3976	tmp = RREG32(CG_SPLL_FUNC_CNTL);
3977	tmp |= SPLL_SLEEP;
3978	WREG32(CG_SPLL_FUNC_CNTL, tmp);
3979
3980	tmp = RREG32(SPLL_CNTL_MODE);
3981	tmp &= ~SPLL_SW_DIR_CONTROL;
3982	WREG32(SPLL_CNTL_MODE, tmp);
3983}
3984
3985static void si_gpu_pci_config_reset(struct radeon_device *rdev)
3986{
3987	struct evergreen_mc_save save;
3988	u32 tmp, i;
3989
3990	dev_info(rdev->dev, "GPU pci config reset\n");
3991
3992	/* disable dpm? */
3993
3994	/* disable cg/pg */
3995	si_fini_pg(rdev);
3996	si_fini_cg(rdev);
3997
3998	/* Disable CP parsing/prefetching */
3999	WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
4000	/* dma0 */
4001	tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
4002	tmp &= ~DMA_RB_ENABLE;
4003	WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
4004	/* dma1 */
4005	tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
4006	tmp &= ~DMA_RB_ENABLE;
4007	WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
4008	/* XXX other engines? */
4009
4010	/* halt the rlc, disable cp internal ints */
4011	si_rlc_stop(rdev);
4012
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4013	udelay(50);
4014
4015	/* disable mem access */
4016	evergreen_mc_stop(rdev, &save);
4017	if (evergreen_mc_wait_for_idle(rdev)) {
4018		dev_warn(rdev->dev, "Wait for MC idle timed out !\n");
4019	}
4020
4021	/* set mclk/sclk to bypass */
4022	si_set_clk_bypass_mode(rdev);
4023	/* powerdown spll */
4024	si_spll_powerdown(rdev);
4025	/* disable BM */
4026	pci_clear_master(rdev->pdev);
4027	/* reset */
4028	radeon_pci_config_reset(rdev);
4029	/* wait for asic to come out of reset */
4030	for (i = 0; i < rdev->usec_timeout; i++) {
4031		if (RREG32(CONFIG_MEMSIZE) != 0xffffffff)
4032			break;
4033		udelay(1);
4034	}
4035}
4036
4037int si_asic_reset(struct radeon_device *rdev)
4038{
4039	u32 reset_mask;
4040
4041	reset_mask = si_gpu_check_soft_reset(rdev);
4042
4043	if (reset_mask)
4044		r600_set_bios_scratch_engine_hung(rdev, true);
4045
4046	/* try soft reset */
4047	si_gpu_soft_reset(rdev, reset_mask);
4048
4049	reset_mask = si_gpu_check_soft_reset(rdev);
4050
4051	/* try pci config reset */
4052	if (reset_mask && radeon_hard_reset)
4053		si_gpu_pci_config_reset(rdev);
4054
4055	reset_mask = si_gpu_check_soft_reset(rdev);
4056
4057	if (!reset_mask)
4058		r600_set_bios_scratch_engine_hung(rdev, false);
4059
4060	return 0;
4061}
4062
4063/**
4064 * si_gfx_is_lockup - Check if the GFX engine is locked up
4065 *
4066 * @rdev: radeon_device pointer
4067 * @ring: radeon_ring structure holding ring information
4068 *
4069 * Check if the GFX engine is locked up.
4070 * Returns true if the engine appears to be locked up, false if not.
4071 */
4072bool si_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
4073{
4074	u32 reset_mask = si_gpu_check_soft_reset(rdev);
4075
4076	if (!(reset_mask & (RADEON_RESET_GFX |
4077			    RADEON_RESET_COMPUTE |
4078			    RADEON_RESET_CP))) {
4079		radeon_ring_lockup_update(rdev, ring);
4080		return false;
4081	}
4082	return radeon_ring_test_lockup(rdev, ring);
4083}
4084
4085/* MC */
4086static void si_mc_program(struct radeon_device *rdev)
4087{
4088	struct evergreen_mc_save save;
4089	u32 tmp;
4090	int i, j;
4091
4092	/* Initialize HDP */
4093	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
4094		WREG32((0x2c14 + j), 0x00000000);
4095		WREG32((0x2c18 + j), 0x00000000);
4096		WREG32((0x2c1c + j), 0x00000000);
4097		WREG32((0x2c20 + j), 0x00000000);
4098		WREG32((0x2c24 + j), 0x00000000);
4099	}
4100	WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
4101
4102	evergreen_mc_stop(rdev, &save);
4103	if (radeon_mc_wait_for_idle(rdev)) {
4104		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
4105	}
4106	if (!ASIC_IS_NODCE(rdev))
4107		/* Lockout access through VGA aperture*/
4108		WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
4109	/* Update configuration */
4110	WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
4111	       rdev->mc.vram_start >> 12);
4112	WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
4113	       rdev->mc.vram_end >> 12);
4114	WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
4115	       rdev->vram_scratch.gpu_addr >> 12);
4116	tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
4117	tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
4118	WREG32(MC_VM_FB_LOCATION, tmp);
4119	/* XXX double check these! */
4120	WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
4121	WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
4122	WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
4123	WREG32(MC_VM_AGP_BASE, 0);
4124	WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
4125	WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
4126	if (radeon_mc_wait_for_idle(rdev)) {
4127		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
4128	}
4129	evergreen_mc_resume(rdev, &save);
4130	if (!ASIC_IS_NODCE(rdev)) {
4131		/* we need to own VRAM, so turn off the VGA renderer here
4132		 * to stop it overwriting our objects */
4133		rv515_vga_render_disable(rdev);
 
 
 
 
 
 
 
 
 
 
4134	}
 
 
 
 
4135}
4136
4137void si_vram_gtt_location(struct radeon_device *rdev,
4138			  struct radeon_mc *mc)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4139{
4140	if (mc->mc_vram_size > 0xFFC0000000ULL) {
4141		/* leave room for at least 1024M GTT */
4142		dev_warn(rdev->dev, "limiting VRAM\n");
4143		mc->real_vram_size = 0xFFC0000000ULL;
4144		mc->mc_vram_size = 0xFFC0000000ULL;
4145	}
4146	radeon_vram_location(rdev, &rdev->mc, 0);
4147	rdev->mc.gtt_base_align = 0;
4148	radeon_gtt_location(rdev, mc);
4149}
4150
4151static int si_mc_init(struct radeon_device *rdev)
4152{
4153	u32 tmp;
4154	int chansize, numchan;
4155
4156	/* Get VRAM informations */
4157	rdev->mc.vram_is_ddr = true;
4158	tmp = RREG32(MC_ARB_RAMCFG);
4159	if (tmp & CHANSIZE_OVERRIDE) {
4160		chansize = 16;
4161	} else if (tmp & CHANSIZE_MASK) {
4162		chansize = 64;
4163	} else {
4164		chansize = 32;
4165	}
4166	tmp = RREG32(MC_SHARED_CHMAP);
4167	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
4168	case 0:
4169	default:
4170		numchan = 1;
4171		break;
4172	case 1:
4173		numchan = 2;
4174		break;
4175	case 2:
4176		numchan = 4;
4177		break;
4178	case 3:
4179		numchan = 8;
4180		break;
4181	case 4:
4182		numchan = 3;
4183		break;
4184	case 5:
4185		numchan = 6;
4186		break;
4187	case 6:
4188		numchan = 10;
4189		break;
4190	case 7:
4191		numchan = 12;
4192		break;
4193	case 8:
4194		numchan = 16;
4195		break;
4196	}
4197	rdev->mc.vram_width = numchan * chansize;
4198	/* Could aper size report 0 ? */
4199	rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
4200	rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
4201	/* size in MB on si */
4202	tmp = RREG32(CONFIG_MEMSIZE);
4203	/* some boards may have garbage in the upper 16 bits */
4204	if (tmp & 0xffff0000) {
4205		DRM_INFO("Probable bad vram size: 0x%08x\n", tmp);
4206		if (tmp & 0xffff)
4207			tmp &= 0xffff;
4208	}
4209	rdev->mc.mc_vram_size = tmp * 1024ULL * 1024ULL;
4210	rdev->mc.real_vram_size = rdev->mc.mc_vram_size;
4211	rdev->mc.visible_vram_size = rdev->mc.aper_size;
4212	si_vram_gtt_location(rdev, &rdev->mc);
4213	radeon_update_bandwidth_info(rdev);
4214
4215	return 0;
4216}
4217
4218/*
4219 * GART
4220 */
4221void si_pcie_gart_tlb_flush(struct radeon_device *rdev)
4222{
4223	/* flush hdp cache */
4224	WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
4225
4226	/* bits 0-15 are the VM contexts0-15 */
4227	WREG32(VM_INVALIDATE_REQUEST, 1);
4228}
4229
4230static int si_pcie_gart_enable(struct radeon_device *rdev)
4231{
4232	int r, i;
4233
4234	if (rdev->gart.robj == NULL) {
4235		dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
4236		return -EINVAL;
4237	}
4238	r = radeon_gart_table_vram_pin(rdev);
4239	if (r)
4240		return r;
 
4241	/* Setup TLB control */
4242	WREG32(MC_VM_MX_L1_TLB_CNTL,
4243	       (0xA << 7) |
4244	       ENABLE_L1_TLB |
4245	       ENABLE_L1_FRAGMENT_PROCESSING |
4246	       SYSTEM_ACCESS_MODE_NOT_IN_SYS |
4247	       ENABLE_ADVANCED_DRIVER_MODEL |
4248	       SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
4249	/* Setup L2 cache */
4250	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
4251	       ENABLE_L2_FRAGMENT_PROCESSING |
4252	       ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
4253	       ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
4254	       EFFECTIVE_L2_QUEUE_SIZE(7) |
4255	       CONTEXT1_IDENTITY_ACCESS_MODE(1));
4256	WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
4257	WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
4258	       BANK_SELECT(4) |
4259	       L2_CACHE_BIGK_FRAGMENT_SIZE(4));
4260	/* setup context0 */
4261	WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
4262	WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
4263	WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
4264	WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
4265			(u32)(rdev->dummy_page.addr >> 12));
4266	WREG32(VM_CONTEXT0_CNTL2, 0);
4267	WREG32(VM_CONTEXT0_CNTL, (ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
4268				  RANGE_PROTECTION_FAULT_ENABLE_DEFAULT));
4269
4270	WREG32(0x15D4, 0);
4271	WREG32(0x15D8, 0);
4272	WREG32(0x15DC, 0);
4273
4274	/* empty context1-15 */
 
 
 
4275	/* set vm size, must be a multiple of 4 */
4276	WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
4277	WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn - 1);
4278	/* Assign the pt base to something valid for now; the pts used for
4279	 * the VMs are determined by the application and setup and assigned
4280	 * on the fly in the vm part of radeon_gart.c
4281	 */
4282	for (i = 1; i < 16; i++) {
4283		if (i < 8)
4284			WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
4285			       rdev->vm_manager.saved_table_addr[i]);
4286		else
4287			WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2),
4288			       rdev->vm_manager.saved_table_addr[i]);
4289	}
4290
4291	/* enable context1-15 */
4292	WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
4293	       (u32)(rdev->dummy_page.addr >> 12));
4294	WREG32(VM_CONTEXT1_CNTL2, 4);
4295	WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
4296				PAGE_TABLE_BLOCK_SIZE(radeon_vm_block_size - 9) |
4297				RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
4298				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
4299				DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
4300				DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
4301				PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
4302				PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
4303				VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
4304				VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
4305				READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
4306				READ_PROTECTION_FAULT_ENABLE_DEFAULT |
4307				WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
4308				WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
4309
4310	si_pcie_gart_tlb_flush(rdev);
4311	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
4312		 (unsigned)(rdev->mc.gtt_size >> 20),
4313		 (unsigned long long)rdev->gart.table_addr);
4314	rdev->gart.ready = true;
4315	return 0;
4316}
4317
4318static void si_pcie_gart_disable(struct radeon_device *rdev)
4319{
4320	unsigned i;
4321
4322	for (i = 1; i < 16; ++i) {
4323		uint32_t reg;
4324		if (i < 8)
4325			reg = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2);
4326		else
4327			reg = VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2);
4328		rdev->vm_manager.saved_table_addr[i] = RREG32(reg);
4329	}
4330
4331	/* Disable all tables */
4332	WREG32(VM_CONTEXT0_CNTL, 0);
4333	WREG32(VM_CONTEXT1_CNTL, 0);
4334	/* Setup TLB control */
4335	WREG32(MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE_NOT_IN_SYS |
4336	       SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
4337	/* Setup L2 cache */
4338	WREG32(VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
4339	       ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
4340	       EFFECTIVE_L2_QUEUE_SIZE(7) |
4341	       CONTEXT1_IDENTITY_ACCESS_MODE(1));
4342	WREG32(VM_L2_CNTL2, 0);
4343	WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
4344	       L2_CACHE_BIGK_FRAGMENT_SIZE(0));
4345	radeon_gart_table_vram_unpin(rdev);
4346}
4347
4348static void si_pcie_gart_fini(struct radeon_device *rdev)
4349{
4350	si_pcie_gart_disable(rdev);
4351	radeon_gart_table_vram_free(rdev);
4352	radeon_gart_fini(rdev);
4353}
4354
4355/* vm parser */
4356static bool si_vm_reg_valid(u32 reg)
4357{
4358	/* context regs are fine */
4359	if (reg >= 0x28000)
4360		return true;
4361
4362	/* check config regs */
4363	switch (reg) {
4364	case GRBM_GFX_INDEX:
4365	case CP_STRMOUT_CNTL:
4366	case VGT_VTX_VECT_EJECT_REG:
4367	case VGT_CACHE_INVALIDATION:
4368	case VGT_ESGS_RING_SIZE:
4369	case VGT_GSVS_RING_SIZE:
4370	case VGT_GS_VERTEX_REUSE:
4371	case VGT_PRIMITIVE_TYPE:
4372	case VGT_INDEX_TYPE:
4373	case VGT_NUM_INDICES:
4374	case VGT_NUM_INSTANCES:
4375	case VGT_TF_RING_SIZE:
4376	case VGT_HS_OFFCHIP_PARAM:
4377	case VGT_TF_MEMORY_BASE:
4378	case PA_CL_ENHANCE:
4379	case PA_SU_LINE_STIPPLE_VALUE:
4380	case PA_SC_LINE_STIPPLE_STATE:
4381	case PA_SC_ENHANCE:
4382	case SQC_CACHES:
4383	case SPI_STATIC_THREAD_MGMT_1:
4384	case SPI_STATIC_THREAD_MGMT_2:
4385	case SPI_STATIC_THREAD_MGMT_3:
4386	case SPI_PS_MAX_WAVE_ID:
4387	case SPI_CONFIG_CNTL:
4388	case SPI_CONFIG_CNTL_1:
4389	case TA_CNTL_AUX:
4390		return true;
4391	default:
4392		DRM_ERROR("Invalid register 0x%x in CS\n", reg);
4393		return false;
4394	}
4395}
4396
4397static int si_vm_packet3_ce_check(struct radeon_device *rdev,
4398				  u32 *ib, struct radeon_cs_packet *pkt)
4399{
4400	switch (pkt->opcode) {
4401	case PACKET3_NOP:
4402	case PACKET3_SET_BASE:
4403	case PACKET3_SET_CE_DE_COUNTERS:
4404	case PACKET3_LOAD_CONST_RAM:
4405	case PACKET3_WRITE_CONST_RAM:
4406	case PACKET3_WRITE_CONST_RAM_OFFSET:
4407	case PACKET3_DUMP_CONST_RAM:
4408	case PACKET3_INCREMENT_CE_COUNTER:
4409	case PACKET3_WAIT_ON_DE_COUNTER:
4410	case PACKET3_CE_WRITE:
4411		break;
4412	default:
4413		DRM_ERROR("Invalid CE packet3: 0x%x\n", pkt->opcode);
4414		return -EINVAL;
4415	}
4416	return 0;
4417}
4418
4419static int si_vm_packet3_cp_dma_check(u32 *ib, u32 idx)
4420{
4421	u32 start_reg, reg, i;
4422	u32 command = ib[idx + 4];
4423	u32 info = ib[idx + 1];
4424	u32 idx_value = ib[idx];
4425	if (command & PACKET3_CP_DMA_CMD_SAS) {
4426		/* src address space is register */
4427		if (((info & 0x60000000) >> 29) == 0) {
4428			start_reg = idx_value << 2;
4429			if (command & PACKET3_CP_DMA_CMD_SAIC) {
4430				reg = start_reg;
4431				if (!si_vm_reg_valid(reg)) {
4432					DRM_ERROR("CP DMA Bad SRC register\n");
4433					return -EINVAL;
4434				}
4435			} else {
4436				for (i = 0; i < (command & 0x1fffff); i++) {
4437					reg = start_reg + (4 * i);
4438					if (!si_vm_reg_valid(reg)) {
4439						DRM_ERROR("CP DMA Bad SRC register\n");
4440						return -EINVAL;
4441					}
4442				}
4443			}
4444		}
4445	}
4446	if (command & PACKET3_CP_DMA_CMD_DAS) {
4447		/* dst address space is register */
4448		if (((info & 0x00300000) >> 20) == 0) {
4449			start_reg = ib[idx + 2];
4450			if (command & PACKET3_CP_DMA_CMD_DAIC) {
4451				reg = start_reg;
4452				if (!si_vm_reg_valid(reg)) {
4453					DRM_ERROR("CP DMA Bad DST register\n");
4454					return -EINVAL;
4455				}
4456			} else {
4457				for (i = 0; i < (command & 0x1fffff); i++) {
4458					reg = start_reg + (4 * i);
4459				if (!si_vm_reg_valid(reg)) {
4460						DRM_ERROR("CP DMA Bad DST register\n");
4461						return -EINVAL;
4462					}
4463				}
4464			}
4465		}
4466	}
4467	return 0;
4468}
4469
4470static int si_vm_packet3_gfx_check(struct radeon_device *rdev,
4471				   u32 *ib, struct radeon_cs_packet *pkt)
4472{
4473	int r;
4474	u32 idx = pkt->idx + 1;
4475	u32 idx_value = ib[idx];
4476	u32 start_reg, end_reg, reg, i;
4477
4478	switch (pkt->opcode) {
4479	case PACKET3_NOP:
4480	case PACKET3_SET_BASE:
4481	case PACKET3_CLEAR_STATE:
4482	case PACKET3_INDEX_BUFFER_SIZE:
4483	case PACKET3_DISPATCH_DIRECT:
4484	case PACKET3_DISPATCH_INDIRECT:
4485	case PACKET3_ALLOC_GDS:
4486	case PACKET3_WRITE_GDS_RAM:
4487	case PACKET3_ATOMIC_GDS:
4488	case PACKET3_ATOMIC:
4489	case PACKET3_OCCLUSION_QUERY:
4490	case PACKET3_SET_PREDICATION:
4491	case PACKET3_COND_EXEC:
4492	case PACKET3_PRED_EXEC:
4493	case PACKET3_DRAW_INDIRECT:
4494	case PACKET3_DRAW_INDEX_INDIRECT:
4495	case PACKET3_INDEX_BASE:
4496	case PACKET3_DRAW_INDEX_2:
4497	case PACKET3_CONTEXT_CONTROL:
4498	case PACKET3_INDEX_TYPE:
4499	case PACKET3_DRAW_INDIRECT_MULTI:
4500	case PACKET3_DRAW_INDEX_AUTO:
4501	case PACKET3_DRAW_INDEX_IMMD:
4502	case PACKET3_NUM_INSTANCES:
4503	case PACKET3_DRAW_INDEX_MULTI_AUTO:
4504	case PACKET3_STRMOUT_BUFFER_UPDATE:
4505	case PACKET3_DRAW_INDEX_OFFSET_2:
4506	case PACKET3_DRAW_INDEX_MULTI_ELEMENT:
4507	case PACKET3_DRAW_INDEX_INDIRECT_MULTI:
4508	case PACKET3_MPEG_INDEX:
4509	case PACKET3_WAIT_REG_MEM:
4510	case PACKET3_MEM_WRITE:
4511	case PACKET3_PFP_SYNC_ME:
4512	case PACKET3_SURFACE_SYNC:
4513	case PACKET3_EVENT_WRITE:
4514	case PACKET3_EVENT_WRITE_EOP:
4515	case PACKET3_EVENT_WRITE_EOS:
4516	case PACKET3_SET_CONTEXT_REG:
4517	case PACKET3_SET_CONTEXT_REG_INDIRECT:
4518	case PACKET3_SET_SH_REG:
4519	case PACKET3_SET_SH_REG_OFFSET:
4520	case PACKET3_INCREMENT_DE_COUNTER:
4521	case PACKET3_WAIT_ON_CE_COUNTER:
4522	case PACKET3_WAIT_ON_AVAIL_BUFFER:
4523	case PACKET3_ME_WRITE:
4524		break;
4525	case PACKET3_COPY_DATA:
4526		if ((idx_value & 0xf00) == 0) {
4527			reg = ib[idx + 3] * 4;
4528			if (!si_vm_reg_valid(reg))
4529				return -EINVAL;
4530		}
4531		break;
4532	case PACKET3_WRITE_DATA:
4533		if ((idx_value & 0xf00) == 0) {
4534			start_reg = ib[idx + 1] * 4;
4535			if (idx_value & 0x10000) {
4536				if (!si_vm_reg_valid(start_reg))
4537					return -EINVAL;
4538			} else {
4539				for (i = 0; i < (pkt->count - 2); i++) {
4540					reg = start_reg + (4 * i);
4541					if (!si_vm_reg_valid(reg))
4542						return -EINVAL;
4543				}
4544			}
4545		}
4546		break;
4547	case PACKET3_COND_WRITE:
4548		if (idx_value & 0x100) {
4549			reg = ib[idx + 5] * 4;
4550			if (!si_vm_reg_valid(reg))
4551				return -EINVAL;
4552		}
4553		break;
4554	case PACKET3_COPY_DW:
4555		if (idx_value & 0x2) {
4556			reg = ib[idx + 3] * 4;
4557			if (!si_vm_reg_valid(reg))
4558				return -EINVAL;
4559		}
4560		break;
4561	case PACKET3_SET_CONFIG_REG:
4562		start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START;
4563		end_reg = 4 * pkt->count + start_reg - 4;
4564		if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
4565		    (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
4566		    (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
4567			DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
4568			return -EINVAL;
4569		}
4570		for (i = 0; i < pkt->count; i++) {
4571			reg = start_reg + (4 * i);
4572			if (!si_vm_reg_valid(reg))
4573				return -EINVAL;
4574		}
4575		break;
4576	case PACKET3_CP_DMA:
4577		r = si_vm_packet3_cp_dma_check(ib, idx);
4578		if (r)
4579			return r;
4580		break;
4581	default:
4582		DRM_ERROR("Invalid GFX packet3: 0x%x\n", pkt->opcode);
4583		return -EINVAL;
4584	}
4585	return 0;
4586}
4587
4588static int si_vm_packet3_compute_check(struct radeon_device *rdev,
4589				       u32 *ib, struct radeon_cs_packet *pkt)
4590{
4591	int r;
4592	u32 idx = pkt->idx + 1;
4593	u32 idx_value = ib[idx];
4594	u32 start_reg, reg, i;
4595
4596	switch (pkt->opcode) {
4597	case PACKET3_NOP:
4598	case PACKET3_SET_BASE:
4599	case PACKET3_CLEAR_STATE:
4600	case PACKET3_DISPATCH_DIRECT:
4601	case PACKET3_DISPATCH_INDIRECT:
4602	case PACKET3_ALLOC_GDS:
4603	case PACKET3_WRITE_GDS_RAM:
4604	case PACKET3_ATOMIC_GDS:
4605	case PACKET3_ATOMIC:
4606	case PACKET3_OCCLUSION_QUERY:
4607	case PACKET3_SET_PREDICATION:
4608	case PACKET3_COND_EXEC:
4609	case PACKET3_PRED_EXEC:
4610	case PACKET3_CONTEXT_CONTROL:
4611	case PACKET3_STRMOUT_BUFFER_UPDATE:
4612	case PACKET3_WAIT_REG_MEM:
4613	case PACKET3_MEM_WRITE:
4614	case PACKET3_PFP_SYNC_ME:
4615	case PACKET3_SURFACE_SYNC:
4616	case PACKET3_EVENT_WRITE:
4617	case PACKET3_EVENT_WRITE_EOP:
4618	case PACKET3_EVENT_WRITE_EOS:
4619	case PACKET3_SET_CONTEXT_REG:
4620	case PACKET3_SET_CONTEXT_REG_INDIRECT:
4621	case PACKET3_SET_SH_REG:
4622	case PACKET3_SET_SH_REG_OFFSET:
4623	case PACKET3_INCREMENT_DE_COUNTER:
4624	case PACKET3_WAIT_ON_CE_COUNTER:
4625	case PACKET3_WAIT_ON_AVAIL_BUFFER:
4626	case PACKET3_ME_WRITE:
4627		break;
4628	case PACKET3_COPY_DATA:
4629		if ((idx_value & 0xf00) == 0) {
4630			reg = ib[idx + 3] * 4;
4631			if (!si_vm_reg_valid(reg))
4632				return -EINVAL;
4633		}
4634		break;
4635	case PACKET3_WRITE_DATA:
4636		if ((idx_value & 0xf00) == 0) {
4637			start_reg = ib[idx + 1] * 4;
4638			if (idx_value & 0x10000) {
4639				if (!si_vm_reg_valid(start_reg))
4640					return -EINVAL;
4641			} else {
4642				for (i = 0; i < (pkt->count - 2); i++) {
4643					reg = start_reg + (4 * i);
4644					if (!si_vm_reg_valid(reg))
4645						return -EINVAL;
4646				}
4647			}
4648		}
4649		break;
4650	case PACKET3_COND_WRITE:
4651		if (idx_value & 0x100) {
4652			reg = ib[idx + 5] * 4;
4653			if (!si_vm_reg_valid(reg))
4654				return -EINVAL;
4655		}
4656		break;
4657	case PACKET3_COPY_DW:
4658		if (idx_value & 0x2) {
4659			reg = ib[idx + 3] * 4;
4660			if (!si_vm_reg_valid(reg))
4661				return -EINVAL;
4662		}
4663		break;
4664	case PACKET3_CP_DMA:
4665		r = si_vm_packet3_cp_dma_check(ib, idx);
4666		if (r)
4667			return r;
4668		break;
4669	default:
4670		DRM_ERROR("Invalid Compute packet3: 0x%x\n", pkt->opcode);
4671		return -EINVAL;
4672	}
4673	return 0;
4674}
4675
4676int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
4677{
4678	int ret = 0;
4679	u32 idx = 0, i;
4680	struct radeon_cs_packet pkt;
4681
4682	do {
4683		pkt.idx = idx;
4684		pkt.type = RADEON_CP_PACKET_GET_TYPE(ib->ptr[idx]);
4685		pkt.count = RADEON_CP_PACKET_GET_COUNT(ib->ptr[idx]);
4686		pkt.one_reg_wr = 0;
4687		switch (pkt.type) {
4688		case RADEON_PACKET_TYPE0:
4689			dev_err(rdev->dev, "Packet0 not allowed!\n");
4690			ret = -EINVAL;
4691			break;
4692		case RADEON_PACKET_TYPE2:
4693			idx += 1;
4694			break;
4695		case RADEON_PACKET_TYPE3:
4696			pkt.opcode = RADEON_CP_PACKET3_GET_OPCODE(ib->ptr[idx]);
4697			if (ib->is_const_ib)
4698				ret = si_vm_packet3_ce_check(rdev, ib->ptr, &pkt);
4699			else {
4700				switch (ib->ring) {
4701				case RADEON_RING_TYPE_GFX_INDEX:
4702					ret = si_vm_packet3_gfx_check(rdev, ib->ptr, &pkt);
4703					break;
4704				case CAYMAN_RING_TYPE_CP1_INDEX:
4705				case CAYMAN_RING_TYPE_CP2_INDEX:
4706					ret = si_vm_packet3_compute_check(rdev, ib->ptr, &pkt);
4707					break;
4708				default:
4709					dev_err(rdev->dev, "Non-PM4 ring %d !\n", ib->ring);
4710					ret = -EINVAL;
4711					break;
4712				}
4713			}
4714			idx += pkt.count + 2;
4715			break;
4716		default:
4717			dev_err(rdev->dev, "Unknown packet type %d !\n", pkt.type);
4718			ret = -EINVAL;
4719			break;
4720		}
4721		if (ret) {
4722			for (i = 0; i < ib->length_dw; i++) {
4723				if (i == idx)
4724					printk("\t0x%08x <---\n", ib->ptr[i]);
4725				else
4726					printk("\t0x%08x\n", ib->ptr[i]);
4727			}
4728			break;
4729		}
4730	} while (idx < ib->length_dw);
4731
4732	return ret;
4733}
4734
4735/*
4736 * vm
4737 */
4738int si_vm_init(struct radeon_device *rdev)
4739{
4740	/* number of VMs */
4741	rdev->vm_manager.nvm = 16;
4742	/* base offset of vram pages */
4743	rdev->vm_manager.vram_base_offset = 0;
4744
4745	return 0;
4746}
4747
4748void si_vm_fini(struct radeon_device *rdev)
4749{
4750}
4751
4752/**
4753 * si_vm_decode_fault - print human readable fault info
4754 *
4755 * @rdev: radeon_device pointer
4756 * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
4757 * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
4758 *
4759 * Print human readable fault information (SI).
4760 */
4761static void si_vm_decode_fault(struct radeon_device *rdev,
4762			       u32 status, u32 addr)
4763{
4764	u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
4765	u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT;
4766	u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT;
4767	char *block;
4768
4769	if (rdev->family == CHIP_TAHITI) {
4770		switch (mc_id) {
4771		case 160:
4772		case 144:
4773		case 96:
4774		case 80:
4775		case 224:
4776		case 208:
4777		case 32:
4778		case 16:
4779			block = "CB";
4780			break;
4781		case 161:
4782		case 145:
4783		case 97:
4784		case 81:
4785		case 225:
4786		case 209:
4787		case 33:
4788		case 17:
4789			block = "CB_FMASK";
4790			break;
4791		case 162:
4792		case 146:
4793		case 98:
4794		case 82:
4795		case 226:
4796		case 210:
4797		case 34:
4798		case 18:
4799			block = "CB_CMASK";
4800			break;
4801		case 163:
4802		case 147:
4803		case 99:
4804		case 83:
4805		case 227:
4806		case 211:
4807		case 35:
4808		case 19:
4809			block = "CB_IMMED";
4810			break;
4811		case 164:
4812		case 148:
4813		case 100:
4814		case 84:
4815		case 228:
4816		case 212:
4817		case 36:
4818		case 20:
4819			block = "DB";
4820			break;
4821		case 165:
4822		case 149:
4823		case 101:
4824		case 85:
4825		case 229:
4826		case 213:
4827		case 37:
4828		case 21:
4829			block = "DB_HTILE";
4830			break;
4831		case 167:
4832		case 151:
4833		case 103:
4834		case 87:
4835		case 231:
4836		case 215:
4837		case 39:
4838		case 23:
4839			block = "DB_STEN";
4840			break;
4841		case 72:
4842		case 68:
4843		case 64:
4844		case 8:
4845		case 4:
4846		case 0:
4847		case 136:
4848		case 132:
4849		case 128:
4850		case 200:
4851		case 196:
4852		case 192:
4853			block = "TC";
4854			break;
4855		case 112:
4856		case 48:
4857			block = "CP";
4858			break;
4859		case 49:
4860		case 177:
4861		case 50:
4862		case 178:
4863			block = "SH";
4864			break;
4865		case 53:
4866		case 190:
4867			block = "VGT";
4868			break;
4869		case 117:
4870			block = "IH";
4871			break;
4872		case 51:
4873		case 115:
4874			block = "RLC";
4875			break;
4876		case 119:
4877		case 183:
4878			block = "DMA0";
4879			break;
4880		case 61:
4881			block = "DMA1";
4882			break;
4883		case 248:
4884		case 120:
4885			block = "HDP";
4886			break;
4887		default:
4888			block = "unknown";
4889			break;
4890		}
4891	} else {
4892		switch (mc_id) {
4893		case 32:
4894		case 16:
4895		case 96:
4896		case 80:
4897		case 160:
4898		case 144:
4899		case 224:
4900		case 208:
4901			block = "CB";
4902			break;
4903		case 33:
4904		case 17:
4905		case 97:
4906		case 81:
4907		case 161:
4908		case 145:
4909		case 225:
4910		case 209:
4911			block = "CB_FMASK";
4912			break;
4913		case 34:
4914		case 18:
4915		case 98:
4916		case 82:
4917		case 162:
4918		case 146:
4919		case 226:
4920		case 210:
4921			block = "CB_CMASK";
4922			break;
4923		case 35:
4924		case 19:
4925		case 99:
4926		case 83:
4927		case 163:
4928		case 147:
4929		case 227:
4930		case 211:
4931			block = "CB_IMMED";
4932			break;
4933		case 36:
4934		case 20:
4935		case 100:
4936		case 84:
4937		case 164:
4938		case 148:
4939		case 228:
4940		case 212:
4941			block = "DB";
4942			break;
4943		case 37:
4944		case 21:
4945		case 101:
4946		case 85:
4947		case 165:
4948		case 149:
4949		case 229:
4950		case 213:
4951			block = "DB_HTILE";
4952			break;
4953		case 39:
4954		case 23:
4955		case 103:
4956		case 87:
4957		case 167:
4958		case 151:
4959		case 231:
4960		case 215:
4961			block = "DB_STEN";
4962			break;
4963		case 72:
4964		case 68:
4965		case 8:
4966		case 4:
4967		case 136:
4968		case 132:
4969		case 200:
4970		case 196:
4971			block = "TC";
4972			break;
4973		case 112:
4974		case 48:
4975			block = "CP";
4976			break;
4977		case 49:
4978		case 177:
4979		case 50:
4980		case 178:
4981			block = "SH";
4982			break;
4983		case 53:
4984			block = "VGT";
4985			break;
4986		case 117:
4987			block = "IH";
4988			break;
4989		case 51:
4990		case 115:
4991			block = "RLC";
4992			break;
4993		case 119:
4994		case 183:
4995			block = "DMA0";
4996			break;
4997		case 61:
4998			block = "DMA1";
4999			break;
5000		case 248:
5001		case 120:
5002			block = "HDP";
5003			break;
5004		default:
5005			block = "unknown";
5006			break;
5007		}
5008	}
5009
5010	printk("VM fault (0x%02x, vmid %d) at page %u, %s from %s (%d)\n",
5011	       protections, vmid, addr,
5012	       (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read",
5013	       block, mc_id);
5014}
5015
5016void si_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
5017		 unsigned vm_id, uint64_t pd_addr)
5018{
5019	/* write new base address */
5020	radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5021	radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
5022				 WRITE_DATA_DST_SEL(0)));
5023
5024	if (vm_id < 8) {
5025		radeon_ring_write(ring,
5026				  (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2);
5027	} else {
5028		radeon_ring_write(ring,
5029				  (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm_id - 8) << 2)) >> 2);
5030	}
5031	radeon_ring_write(ring, 0);
5032	radeon_ring_write(ring, pd_addr >> 12);
5033
5034	/* flush hdp cache */
5035	radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5036	radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
5037				 WRITE_DATA_DST_SEL(0)));
5038	radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
5039	radeon_ring_write(ring, 0);
5040	radeon_ring_write(ring, 0x1);
5041
5042	/* bits 0-15 are the VM contexts0-15 */
5043	radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5044	radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
5045				 WRITE_DATA_DST_SEL(0)));
5046	radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
5047	radeon_ring_write(ring, 0);
5048	radeon_ring_write(ring, 1 << vm_id);
5049
5050	/* wait for the invalidate to complete */
5051	radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
5052	radeon_ring_write(ring, (WAIT_REG_MEM_FUNCTION(0) |  /* always */
5053				 WAIT_REG_MEM_ENGINE(0))); /* me */
5054	radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
5055	radeon_ring_write(ring, 0);
5056	radeon_ring_write(ring, 0); /* ref */
5057	radeon_ring_write(ring, 0); /* mask */
5058	radeon_ring_write(ring, 0x20); /* poll interval */
5059
5060	/* sync PFP to ME, otherwise we might get invalid PFP reads */
5061	radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
5062	radeon_ring_write(ring, 0x0);
5063}
5064
5065/*
5066 *  Power and clock gating
5067 */
5068static void si_wait_for_rlc_serdes(struct radeon_device *rdev)
5069{
5070	int i;
5071
5072	for (i = 0; i < rdev->usec_timeout; i++) {
5073		if (RREG32(RLC_SERDES_MASTER_BUSY_0) == 0)
5074			break;
5075		udelay(1);
5076	}
5077
5078	for (i = 0; i < rdev->usec_timeout; i++) {
5079		if (RREG32(RLC_SERDES_MASTER_BUSY_1) == 0)
5080			break;
5081		udelay(1);
5082	}
5083}
5084
5085static void si_enable_gui_idle_interrupt(struct radeon_device *rdev,
5086					 bool enable)
5087{
5088	u32 tmp = RREG32(CP_INT_CNTL_RING0);
5089	u32 mask;
5090	int i;
5091
5092	if (enable)
5093		tmp |= (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
5094	else
5095		tmp &= ~(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
5096	WREG32(CP_INT_CNTL_RING0, tmp);
5097
5098	if (!enable) {
5099		/* read a gfx register */
5100		tmp = RREG32(DB_DEPTH_INFO);
5101
5102		mask = RLC_BUSY_STATUS | GFX_POWER_STATUS | GFX_CLOCK_STATUS | GFX_LS_STATUS;
5103		for (i = 0; i < rdev->usec_timeout; i++) {
5104			if ((RREG32(RLC_STAT) & mask) == (GFX_CLOCK_STATUS | GFX_POWER_STATUS))
5105				break;
5106			udelay(1);
5107		}
5108	}
5109}
5110
5111static void si_set_uvd_dcm(struct radeon_device *rdev,
5112			   bool sw_mode)
5113{
5114	u32 tmp, tmp2;
5115
5116	tmp = RREG32(UVD_CGC_CTRL);
5117	tmp &= ~(CLK_OD_MASK | CG_DT_MASK);
5118	tmp |= DCM | CG_DT(1) | CLK_OD(4);
5119
5120	if (sw_mode) {
5121		tmp &= ~0x7ffff800;
5122		tmp2 = DYN_OR_EN | DYN_RR_EN | G_DIV_ID(7);
5123	} else {
5124		tmp |= 0x7ffff800;
5125		tmp2 = 0;
5126	}
5127
5128	WREG32(UVD_CGC_CTRL, tmp);
5129	WREG32_UVD_CTX(UVD_CGC_CTRL2, tmp2);
5130}
5131
5132void si_init_uvd_internal_cg(struct radeon_device *rdev)
5133{
5134	bool hw_mode = true;
5135
5136	if (hw_mode) {
5137		si_set_uvd_dcm(rdev, false);
5138	} else {
5139		u32 tmp = RREG32(UVD_CGC_CTRL);
5140		tmp &= ~DCM;
5141		WREG32(UVD_CGC_CTRL, tmp);
5142	}
5143}
5144
5145static u32 si_halt_rlc(struct radeon_device *rdev)
5146{
5147	u32 data, orig;
5148
5149	orig = data = RREG32(RLC_CNTL);
5150
5151	if (data & RLC_ENABLE) {
5152		data &= ~RLC_ENABLE;
5153		WREG32(RLC_CNTL, data);
5154
5155		si_wait_for_rlc_serdes(rdev);
5156	}
5157
5158	return orig;
5159}
5160
5161static void si_update_rlc(struct radeon_device *rdev, u32 rlc)
5162{
5163	u32 tmp;
5164
5165	tmp = RREG32(RLC_CNTL);
5166	if (tmp != rlc)
5167		WREG32(RLC_CNTL, rlc);
5168}
5169
5170static void si_enable_dma_pg(struct radeon_device *rdev, bool enable)
5171{
5172	u32 data, orig;
5173
5174	orig = data = RREG32(DMA_PG);
5175	if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_SDMA))
5176		data |= PG_CNTL_ENABLE;
5177	else
5178		data &= ~PG_CNTL_ENABLE;
5179	if (orig != data)
5180		WREG32(DMA_PG, data);
5181}
5182
5183static void si_init_dma_pg(struct radeon_device *rdev)
5184{
5185	u32 tmp;
5186
5187	WREG32(DMA_PGFSM_WRITE,  0x00002000);
5188	WREG32(DMA_PGFSM_CONFIG, 0x100010ff);
5189
5190	for (tmp = 0; tmp < 5; tmp++)
5191		WREG32(DMA_PGFSM_WRITE, 0);
5192}
5193
5194static void si_enable_gfx_cgpg(struct radeon_device *rdev,
5195			       bool enable)
5196{
5197	u32 tmp;
5198
5199	if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG)) {
5200		tmp = RLC_PUD(0x10) | RLC_PDD(0x10) | RLC_TTPD(0x10) | RLC_MSD(0x10);
5201		WREG32(RLC_TTOP_D, tmp);
5202
5203		tmp = RREG32(RLC_PG_CNTL);
5204		tmp |= GFX_PG_ENABLE;
5205		WREG32(RLC_PG_CNTL, tmp);
5206
5207		tmp = RREG32(RLC_AUTO_PG_CTRL);
5208		tmp |= AUTO_PG_EN;
5209		WREG32(RLC_AUTO_PG_CTRL, tmp);
5210	} else {
5211		tmp = RREG32(RLC_AUTO_PG_CTRL);
5212		tmp &= ~AUTO_PG_EN;
5213		WREG32(RLC_AUTO_PG_CTRL, tmp);
5214
5215		tmp = RREG32(DB_RENDER_CONTROL);
5216	}
5217}
5218
5219static void si_init_gfx_cgpg(struct radeon_device *rdev)
5220{
5221	u32 tmp;
5222
5223	WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
5224
5225	tmp = RREG32(RLC_PG_CNTL);
5226	tmp |= GFX_PG_SRC;
5227	WREG32(RLC_PG_CNTL, tmp);
5228
5229	WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
5230
5231	tmp = RREG32(RLC_AUTO_PG_CTRL);
5232
5233	tmp &= ~GRBM_REG_SGIT_MASK;
5234	tmp |= GRBM_REG_SGIT(0x700);
5235	tmp &= ~PG_AFTER_GRBM_REG_ST_MASK;
5236	WREG32(RLC_AUTO_PG_CTRL, tmp);
5237}
5238
5239static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh)
5240{
5241	u32 mask = 0, tmp, tmp1;
5242	int i;
5243
5244	si_select_se_sh(rdev, se, sh);
5245	tmp = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
5246	tmp1 = RREG32(GC_USER_SHADER_ARRAY_CONFIG);
5247	si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
5248
5249	tmp &= 0xffff0000;
5250
5251	tmp |= tmp1;
5252	tmp >>= 16;
5253
5254	for (i = 0; i < rdev->config.si.max_cu_per_sh; i ++) {
5255		mask <<= 1;
5256		mask |= 1;
5257	}
5258
5259	return (~tmp) & mask;
5260}
5261
5262static void si_init_ao_cu_mask(struct radeon_device *rdev)
5263{
5264	u32 i, j, k, active_cu_number = 0;
5265	u32 mask, counter, cu_bitmap;
5266	u32 tmp = 0;
5267
5268	for (i = 0; i < rdev->config.si.max_shader_engines; i++) {
5269		for (j = 0; j < rdev->config.si.max_sh_per_se; j++) {
5270			mask = 1;
5271			cu_bitmap = 0;
5272			counter  = 0;
5273			for (k = 0; k < rdev->config.si.max_cu_per_sh; k++) {
5274				if (si_get_cu_active_bitmap(rdev, i, j) & mask) {
5275					if (counter < 2)
5276						cu_bitmap |= mask;
5277					counter++;
5278				}
5279				mask <<= 1;
5280			}
5281
5282			active_cu_number += counter;
5283			tmp |= (cu_bitmap << (i * 16 + j * 8));
5284		}
5285	}
5286
5287	WREG32(RLC_PG_AO_CU_MASK, tmp);
5288
5289	tmp = RREG32(RLC_MAX_PG_CU);
5290	tmp &= ~MAX_PU_CU_MASK;
5291	tmp |= MAX_PU_CU(active_cu_number);
5292	WREG32(RLC_MAX_PG_CU, tmp);
5293}
5294
5295static void si_enable_cgcg(struct radeon_device *rdev,
5296			   bool enable)
5297{
5298	u32 data, orig, tmp;
5299
5300	orig = data = RREG32(RLC_CGCG_CGLS_CTRL);
5301
5302	if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGCG)) {
5303		si_enable_gui_idle_interrupt(rdev, true);
5304
5305		WREG32(RLC_GCPM_GENERAL_3, 0x00000080);
5306
5307		tmp = si_halt_rlc(rdev);
5308
5309		WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
5310		WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
5311		WREG32(RLC_SERDES_WR_CTRL, 0x00b000ff);
5312
5313		si_wait_for_rlc_serdes(rdev);
5314
5315		si_update_rlc(rdev, tmp);
5316
5317		WREG32(RLC_SERDES_WR_CTRL, 0x007000ff);
5318
5319		data |= CGCG_EN | CGLS_EN;
5320	} else {
5321		si_enable_gui_idle_interrupt(rdev, false);
5322
5323		RREG32(CB_CGTT_SCLK_CTRL);
5324		RREG32(CB_CGTT_SCLK_CTRL);
5325		RREG32(CB_CGTT_SCLK_CTRL);
5326		RREG32(CB_CGTT_SCLK_CTRL);
5327
5328		data &= ~(CGCG_EN | CGLS_EN);
5329	}
5330
5331	if (orig != data)
5332		WREG32(RLC_CGCG_CGLS_CTRL, data);
5333}
5334
5335static void si_enable_mgcg(struct radeon_device *rdev,
5336			   bool enable)
5337{
5338	u32 data, orig, tmp = 0;
5339
5340	if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGCG)) {
5341		orig = data = RREG32(CGTS_SM_CTRL_REG);
5342		data = 0x96940200;
5343		if (orig != data)
5344			WREG32(CGTS_SM_CTRL_REG, data);
5345
5346		if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CP_LS) {
5347			orig = data = RREG32(CP_MEM_SLP_CNTL);
5348			data |= CP_MEM_LS_EN;
5349			if (orig != data)
5350				WREG32(CP_MEM_SLP_CNTL, data);
5351		}
5352
5353		orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
5354		data &= 0xffffffc0;
5355		if (orig != data)
5356			WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
5357
5358		tmp = si_halt_rlc(rdev);
5359
5360		WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
5361		WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
5362		WREG32(RLC_SERDES_WR_CTRL, 0x00d000ff);
5363
5364		si_update_rlc(rdev, tmp);
5365	} else {
5366		orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
5367		data |= 0x00000003;
5368		if (orig != data)
5369			WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
5370
5371		data = RREG32(CP_MEM_SLP_CNTL);
5372		if (data & CP_MEM_LS_EN) {
5373			data &= ~CP_MEM_LS_EN;
5374			WREG32(CP_MEM_SLP_CNTL, data);
5375		}
5376		orig = data = RREG32(CGTS_SM_CTRL_REG);
5377		data |= LS_OVERRIDE | OVERRIDE;
5378		if (orig != data)
5379			WREG32(CGTS_SM_CTRL_REG, data);
5380
5381		tmp = si_halt_rlc(rdev);
5382
5383		WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
5384		WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
5385		WREG32(RLC_SERDES_WR_CTRL, 0x00e000ff);
5386
5387		si_update_rlc(rdev, tmp);
5388	}
5389}
5390
5391static void si_enable_uvd_mgcg(struct radeon_device *rdev,
5392			       bool enable)
5393{
5394	u32 orig, data, tmp;
5395
5396	if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_UVD_MGCG)) {
5397		tmp = RREG32_UVD_CTX(UVD_CGC_MEM_CTRL);
5398		tmp |= 0x3fff;
5399		WREG32_UVD_CTX(UVD_CGC_MEM_CTRL, tmp);
5400
5401		orig = data = RREG32(UVD_CGC_CTRL);
5402		data |= DCM;
5403		if (orig != data)
5404			WREG32(UVD_CGC_CTRL, data);
5405
5406		WREG32_SMC(SMC_CG_IND_START + CG_CGTT_LOCAL_0, 0);
5407		WREG32_SMC(SMC_CG_IND_START + CG_CGTT_LOCAL_1, 0);
5408	} else {
5409		tmp = RREG32_UVD_CTX(UVD_CGC_MEM_CTRL);
5410		tmp &= ~0x3fff;
5411		WREG32_UVD_CTX(UVD_CGC_MEM_CTRL, tmp);
5412
5413		orig = data = RREG32(UVD_CGC_CTRL);
5414		data &= ~DCM;
5415		if (orig != data)
5416			WREG32(UVD_CGC_CTRL, data);
5417
5418		WREG32_SMC(SMC_CG_IND_START + CG_CGTT_LOCAL_0, 0xffffffff);
5419		WREG32_SMC(SMC_CG_IND_START + CG_CGTT_LOCAL_1, 0xffffffff);
5420	}
5421}
5422
5423static const u32 mc_cg_registers[] =
5424{
5425	MC_HUB_MISC_HUB_CG,
5426	MC_HUB_MISC_SIP_CG,
5427	MC_HUB_MISC_VM_CG,
5428	MC_XPB_CLK_GAT,
5429	ATC_MISC_CG,
5430	MC_CITF_MISC_WR_CG,
5431	MC_CITF_MISC_RD_CG,
5432	MC_CITF_MISC_VM_CG,
5433	VM_L2_CG,
5434};
5435
5436static void si_enable_mc_ls(struct radeon_device *rdev,
5437			    bool enable)
5438{
5439	int i;
5440	u32 orig, data;
5441
5442	for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
5443		orig = data = RREG32(mc_cg_registers[i]);
5444		if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_LS))
5445			data |= MC_LS_ENABLE;
5446		else
5447			data &= ~MC_LS_ENABLE;
5448		if (data != orig)
5449			WREG32(mc_cg_registers[i], data);
5450	}
5451}
5452
5453static void si_enable_mc_mgcg(struct radeon_device *rdev,
5454			       bool enable)
5455{
5456	int i;
5457	u32 orig, data;
 
 
5458
5459	for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
5460		orig = data = RREG32(mc_cg_registers[i]);
5461		if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_MGCG))
5462			data |= MC_CG_ENABLE;
5463		else
5464			data &= ~MC_CG_ENABLE;
5465		if (data != orig)
5466			WREG32(mc_cg_registers[i], data);
5467	}
5468}
5469
5470static void si_enable_dma_mgcg(struct radeon_device *rdev,
5471			       bool enable)
5472{
5473	u32 orig, data, offset;
5474	int i;
 
 
5475
5476	if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_SDMA_MGCG)) {
5477		for (i = 0; i < 2; i++) {
5478			if (i == 0)
5479				offset = DMA0_REGISTER_OFFSET;
5480			else
5481				offset = DMA1_REGISTER_OFFSET;
5482			orig = data = RREG32(DMA_POWER_CNTL + offset);
5483			data &= ~MEM_POWER_OVERRIDE;
5484			if (data != orig)
5485				WREG32(DMA_POWER_CNTL + offset, data);
5486			WREG32(DMA_CLK_CTRL + offset, 0x00000100);
5487		}
5488	} else {
5489		for (i = 0; i < 2; i++) {
5490			if (i == 0)
5491				offset = DMA0_REGISTER_OFFSET;
5492			else
5493				offset = DMA1_REGISTER_OFFSET;
5494			orig = data = RREG32(DMA_POWER_CNTL + offset);
5495			data |= MEM_POWER_OVERRIDE;
5496			if (data != orig)
5497				WREG32(DMA_POWER_CNTL + offset, data);
5498
5499			orig = data = RREG32(DMA_CLK_CTRL + offset);
5500			data = 0xff000000;
5501			if (data != orig)
5502				WREG32(DMA_CLK_CTRL + offset, data);
5503		}
5504	}
5505}
5506
5507static void si_enable_bif_mgls(struct radeon_device *rdev,
5508			       bool enable)
5509{
5510	u32 orig, data;
5511
5512	orig = data = RREG32_PCIE(PCIE_CNTL2);
5513
5514	if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_BIF_LS))
5515		data |= SLV_MEM_LS_EN | MST_MEM_LS_EN |
5516			REPLAY_MEM_LS_EN | SLV_MEM_AGGRESSIVE_LS_EN;
5517	else
5518		data &= ~(SLV_MEM_LS_EN | MST_MEM_LS_EN |
5519			  REPLAY_MEM_LS_EN | SLV_MEM_AGGRESSIVE_LS_EN);
5520
5521	if (orig != data)
5522		WREG32_PCIE(PCIE_CNTL2, data);
5523}
5524
5525static void si_enable_hdp_mgcg(struct radeon_device *rdev,
5526			       bool enable)
5527{
5528	u32 orig, data;
5529
5530	orig = data = RREG32(HDP_HOST_PATH_CNTL);
5531
5532	if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_MGCG))
5533		data &= ~CLOCK_GATING_DIS;
5534	else
5535		data |= CLOCK_GATING_DIS;
5536
5537	if (orig != data)
5538		WREG32(HDP_HOST_PATH_CNTL, data);
5539}
5540
5541static void si_enable_hdp_ls(struct radeon_device *rdev,
5542			     bool enable)
5543{
5544	u32 orig, data;
5545
5546	orig = data = RREG32(HDP_MEM_POWER_LS);
5547
5548	if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_LS))
5549		data |= HDP_LS_ENABLE;
5550	else
5551		data &= ~HDP_LS_ENABLE;
5552
5553	if (orig != data)
5554		WREG32(HDP_MEM_POWER_LS, data);
5555}
5556
5557static void si_update_cg(struct radeon_device *rdev,
5558			 u32 block, bool enable)
5559{
5560	if (block & RADEON_CG_BLOCK_GFX) {
5561		si_enable_gui_idle_interrupt(rdev, false);
5562		/* order matters! */
5563		if (enable) {
5564			si_enable_mgcg(rdev, true);
5565			si_enable_cgcg(rdev, true);
5566		} else {
5567			si_enable_cgcg(rdev, false);
5568			si_enable_mgcg(rdev, false);
5569		}
5570		si_enable_gui_idle_interrupt(rdev, true);
5571	}
5572
5573	if (block & RADEON_CG_BLOCK_MC) {
5574		si_enable_mc_mgcg(rdev, enable);
5575		si_enable_mc_ls(rdev, enable);
5576	}
5577
5578	if (block & RADEON_CG_BLOCK_SDMA) {
5579		si_enable_dma_mgcg(rdev, enable);
5580	}
5581
5582	if (block & RADEON_CG_BLOCK_BIF) {
5583		si_enable_bif_mgls(rdev, enable);
5584	}
5585
5586	if (block & RADEON_CG_BLOCK_UVD) {
5587		if (rdev->has_uvd) {
5588			si_enable_uvd_mgcg(rdev, enable);
 
 
 
 
 
5589		}
5590	}
5591
5592	if (block & RADEON_CG_BLOCK_HDP) {
5593		si_enable_hdp_mgcg(rdev, enable);
5594		si_enable_hdp_ls(rdev, enable);
5595	}
5596}
5597
5598static void si_init_cg(struct radeon_device *rdev)
5599{
5600	si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
5601			    RADEON_CG_BLOCK_MC |
5602			    RADEON_CG_BLOCK_SDMA |
5603			    RADEON_CG_BLOCK_BIF |
5604			    RADEON_CG_BLOCK_HDP), true);
5605	if (rdev->has_uvd) {
5606		si_update_cg(rdev, RADEON_CG_BLOCK_UVD, true);
5607		si_init_uvd_internal_cg(rdev);
5608	}
5609}
5610
5611static void si_fini_cg(struct radeon_device *rdev)
5612{
5613	if (rdev->has_uvd) {
5614		si_update_cg(rdev, RADEON_CG_BLOCK_UVD, false);
5615	}
5616	si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
5617			    RADEON_CG_BLOCK_MC |
5618			    RADEON_CG_BLOCK_SDMA |
5619			    RADEON_CG_BLOCK_BIF |
5620			    RADEON_CG_BLOCK_HDP), false);
5621}
5622
5623u32 si_get_csb_size(struct radeon_device *rdev)
5624{
5625	u32 count = 0;
5626	const struct cs_section_def *sect = NULL;
5627	const struct cs_extent_def *ext = NULL;
5628
5629	if (rdev->rlc.cs_data == NULL)
5630		return 0;
5631
5632	/* begin clear state */
5633	count += 2;
5634	/* context control state */
5635	count += 3;
5636
5637	for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
5638		for (ext = sect->section; ext->extent != NULL; ++ext) {
5639			if (sect->id == SECT_CONTEXT)
5640				count += 2 + ext->reg_count;
5641			else
5642				return 0;
5643		}
5644	}
5645	/* pa_sc_raster_config */
5646	count += 3;
5647	/* end clear state */
5648	count += 2;
5649	/* clear state */
5650	count += 2;
5651
5652	return count;
5653}
5654
5655void si_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer)
5656{
5657	u32 count = 0, i;
5658	const struct cs_section_def *sect = NULL;
5659	const struct cs_extent_def *ext = NULL;
5660
5661	if (rdev->rlc.cs_data == NULL)
5662		return;
5663	if (buffer == NULL)
5664		return;
5665
5666	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
5667	buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
5668
5669	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
5670	buffer[count++] = cpu_to_le32(0x80000000);
5671	buffer[count++] = cpu_to_le32(0x80000000);
5672
5673	for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
5674		for (ext = sect->section; ext->extent != NULL; ++ext) {
5675			if (sect->id == SECT_CONTEXT) {
5676				buffer[count++] =
5677					cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
5678				buffer[count++] = cpu_to_le32(ext->reg_index - 0xa000);
5679				for (i = 0; i < ext->reg_count; i++)
5680					buffer[count++] = cpu_to_le32(ext->extent[i]);
5681			} else {
5682				return;
5683			}
5684		}
5685	}
5686
5687	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1));
5688	buffer[count++] = cpu_to_le32(PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
5689	switch (rdev->family) {
5690	case CHIP_TAHITI:
5691	case CHIP_PITCAIRN:
5692		buffer[count++] = cpu_to_le32(0x2a00126a);
5693		break;
5694	case CHIP_VERDE:
5695		buffer[count++] = cpu_to_le32(0x0000124a);
5696		break;
5697	case CHIP_OLAND:
5698		buffer[count++] = cpu_to_le32(0x00000082);
5699		break;
5700	case CHIP_HAINAN:
5701		buffer[count++] = cpu_to_le32(0x00000000);
5702		break;
5703	default:
5704		buffer[count++] = cpu_to_le32(0x00000000);
5705		break;
5706	}
5707
5708	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
5709	buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
5710
5711	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
5712	buffer[count++] = cpu_to_le32(0);
5713}
5714
5715static void si_init_pg(struct radeon_device *rdev)
5716{
5717	if (rdev->pg_flags) {
5718		if (rdev->pg_flags & RADEON_PG_SUPPORT_SDMA) {
5719			si_init_dma_pg(rdev);
5720		}
5721		si_init_ao_cu_mask(rdev);
5722		if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) {
5723			si_init_gfx_cgpg(rdev);
5724		} else {
5725			WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
5726			WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
5727		}
5728		si_enable_dma_pg(rdev, true);
5729		si_enable_gfx_cgpg(rdev, true);
5730	} else {
5731		WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
5732		WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
5733	}
5734}
5735
5736static void si_fini_pg(struct radeon_device *rdev)
5737{
5738	if (rdev->pg_flags) {
5739		si_enable_dma_pg(rdev, false);
5740		si_enable_gfx_cgpg(rdev, false);
5741	}
5742}
5743
5744/*
5745 * RLC
5746 */
5747void si_rlc_reset(struct radeon_device *rdev)
5748{
5749	u32 tmp = RREG32(GRBM_SOFT_RESET);
5750
5751	tmp |= SOFT_RESET_RLC;
5752	WREG32(GRBM_SOFT_RESET, tmp);
5753	udelay(50);
5754	tmp &= ~SOFT_RESET_RLC;
5755	WREG32(GRBM_SOFT_RESET, tmp);
5756	udelay(50);
5757}
5758
5759static void si_rlc_stop(struct radeon_device *rdev)
5760{
5761	WREG32(RLC_CNTL, 0);
5762
5763	si_enable_gui_idle_interrupt(rdev, false);
5764
5765	si_wait_for_rlc_serdes(rdev);
5766}
5767
5768static void si_rlc_start(struct radeon_device *rdev)
5769{
5770	WREG32(RLC_CNTL, RLC_ENABLE);
5771
5772	si_enable_gui_idle_interrupt(rdev, true);
5773
5774	udelay(50);
5775}
5776
5777static bool si_lbpw_supported(struct radeon_device *rdev)
5778{
5779	u32 tmp;
5780
5781	/* Enable LBPW only for DDR3 */
5782	tmp = RREG32(MC_SEQ_MISC0);
5783	if ((tmp & 0xF0000000) == 0xB0000000)
5784		return true;
5785	return false;
5786}
5787
5788static void si_enable_lbpw(struct radeon_device *rdev, bool enable)
5789{
5790	u32 tmp;
5791
5792	tmp = RREG32(RLC_LB_CNTL);
5793	if (enable)
5794		tmp |= LOAD_BALANCE_ENABLE;
5795	else
5796		tmp &= ~LOAD_BALANCE_ENABLE;
5797	WREG32(RLC_LB_CNTL, tmp);
5798
5799	if (!enable) {
5800		si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
5801		WREG32(SPI_LB_CU_MASK, 0x00ff);
5802	}
5803}
5804
5805static int si_rlc_resume(struct radeon_device *rdev)
5806{
5807	u32 i;
 
5808
5809	if (!rdev->rlc_fw)
5810		return -EINVAL;
5811
5812	si_rlc_stop(rdev);
5813
5814	si_rlc_reset(rdev);
5815
5816	si_init_pg(rdev);
5817
5818	si_init_cg(rdev);
5819
5820	WREG32(RLC_RL_BASE, 0);
5821	WREG32(RLC_RL_SIZE, 0);
5822	WREG32(RLC_LB_CNTL, 0);
5823	WREG32(RLC_LB_CNTR_MAX, 0xffffffff);
5824	WREG32(RLC_LB_CNTR_INIT, 0);
5825	WREG32(RLC_LB_INIT_CU_MASK, 0xffffffff);
 
 
5826
5827	WREG32(RLC_MC_CNTL, 0);
5828	WREG32(RLC_UCODE_CNTL, 0);
5829
5830	if (rdev->new_fw) {
5831		const struct rlc_firmware_header_v1_0 *hdr =
5832			(const struct rlc_firmware_header_v1_0 *)rdev->rlc_fw->data;
5833		u32 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
5834		const __le32 *fw_data = (const __le32 *)
5835			(rdev->rlc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
5836
5837		radeon_ucode_print_rlc_hdr(&hdr->header);
5838
5839		for (i = 0; i < fw_size; i++) {
5840			WREG32(RLC_UCODE_ADDR, i);
5841			WREG32(RLC_UCODE_DATA, le32_to_cpup(fw_data++));
5842		}
5843	} else {
5844		const __be32 *fw_data =
5845			(const __be32 *)rdev->rlc_fw->data;
5846		for (i = 0; i < SI_RLC_UCODE_SIZE; i++) {
5847			WREG32(RLC_UCODE_ADDR, i);
5848			WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
5849		}
5850	}
5851	WREG32(RLC_UCODE_ADDR, 0);
5852
5853	si_enable_lbpw(rdev, si_lbpw_supported(rdev));
5854
5855	si_rlc_start(rdev);
5856
5857	return 0;
5858}
5859
5860static void si_enable_interrupts(struct radeon_device *rdev)
5861{
5862	u32 ih_cntl = RREG32(IH_CNTL);
5863	u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
5864
5865	ih_cntl |= ENABLE_INTR;
5866	ih_rb_cntl |= IH_RB_ENABLE;
5867	WREG32(IH_CNTL, ih_cntl);
5868	WREG32(IH_RB_CNTL, ih_rb_cntl);
5869	rdev->ih.enabled = true;
5870}
5871
5872static void si_disable_interrupts(struct radeon_device *rdev)
5873{
5874	u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
5875	u32 ih_cntl = RREG32(IH_CNTL);
5876
5877	ih_rb_cntl &= ~IH_RB_ENABLE;
5878	ih_cntl &= ~ENABLE_INTR;
5879	WREG32(IH_RB_CNTL, ih_rb_cntl);
5880	WREG32(IH_CNTL, ih_cntl);
5881	/* set rptr, wptr to 0 */
5882	WREG32(IH_RB_RPTR, 0);
5883	WREG32(IH_RB_WPTR, 0);
5884	rdev->ih.enabled = false;
 
5885	rdev->ih.rptr = 0;
5886}
5887
5888static void si_disable_interrupt_state(struct radeon_device *rdev)
5889{
5890	u32 tmp;
5891
5892	tmp = RREG32(CP_INT_CNTL_RING0) &
5893		(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
5894	WREG32(CP_INT_CNTL_RING0, tmp);
5895	WREG32(CP_INT_CNTL_RING1, 0);
5896	WREG32(CP_INT_CNTL_RING2, 0);
5897	tmp = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
5898	WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, tmp);
5899	tmp = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
5900	WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, tmp);
5901	WREG32(GRBM_INT_CNTL, 0);
5902	WREG32(SRBM_INT_CNTL, 0);
5903	if (rdev->num_crtc >= 2) {
5904		WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
5905		WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
5906	}
5907	if (rdev->num_crtc >= 4) {
5908		WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
5909		WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
5910	}
5911	if (rdev->num_crtc >= 6) {
5912		WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
5913		WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
5914	}
5915
5916	if (rdev->num_crtc >= 2) {
5917		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
5918		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
5919	}
5920	if (rdev->num_crtc >= 4) {
5921		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
5922		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
5923	}
5924	if (rdev->num_crtc >= 6) {
5925		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
5926		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
5927	}
5928
5929	if (!ASIC_IS_NODCE(rdev)) {
5930		WREG32(DAC_AUTODETECT_INT_CONTROL, 0);
 
 
 
 
 
 
 
 
 
 
 
 
5931
5932		tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
5933		WREG32(DC_HPD1_INT_CONTROL, tmp);
5934		tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
5935		WREG32(DC_HPD2_INT_CONTROL, tmp);
5936		tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
5937		WREG32(DC_HPD3_INT_CONTROL, tmp);
5938		tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
5939		WREG32(DC_HPD4_INT_CONTROL, tmp);
5940		tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
5941		WREG32(DC_HPD5_INT_CONTROL, tmp);
5942		tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
5943		WREG32(DC_HPD6_INT_CONTROL, tmp);
5944	}
5945}
5946
5947static int si_irq_init(struct radeon_device *rdev)
5948{
5949	int ret = 0;
5950	int rb_bufsz;
5951	u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
5952
5953	/* allocate ring */
5954	ret = r600_ih_ring_alloc(rdev);
5955	if (ret)
5956		return ret;
5957
5958	/* disable irqs */
5959	si_disable_interrupts(rdev);
5960
5961	/* init rlc */
5962	ret = si_rlc_resume(rdev);
5963	if (ret) {
5964		r600_ih_ring_fini(rdev);
5965		return ret;
5966	}
5967
5968	/* setup interrupt control */
5969	/* set dummy read address to ring address */
5970	WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
5971	interrupt_cntl = RREG32(INTERRUPT_CNTL);
5972	/* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
5973	 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
5974	 */
5975	interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
5976	/* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
5977	interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
5978	WREG32(INTERRUPT_CNTL, interrupt_cntl);
5979
5980	WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
5981	rb_bufsz = order_base_2(rdev->ih.ring_size / 4);
5982
5983	ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
5984		      IH_WPTR_OVERFLOW_CLEAR |
5985		      (rb_bufsz << 1));
5986
5987	if (rdev->wb.enabled)
5988		ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;
5989
5990	/* set the writeback address whether it's enabled or not */
5991	WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC);
5992	WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF);
5993
5994	WREG32(IH_RB_CNTL, ih_rb_cntl);
5995
5996	/* set rptr, wptr to 0 */
5997	WREG32(IH_RB_RPTR, 0);
5998	WREG32(IH_RB_WPTR, 0);
5999
6000	/* Default settings for IH_CNTL (disabled at first) */
6001	ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10) | MC_VMID(0);
6002	/* RPTR_REARM only works if msi's are enabled */
6003	if (rdev->msi_enabled)
6004		ih_cntl |= RPTR_REARM;
6005	WREG32(IH_CNTL, ih_cntl);
6006
6007	/* force the active interrupt state to all disabled */
6008	si_disable_interrupt_state(rdev);
6009
6010	pci_set_master(rdev->pdev);
6011
6012	/* enable irqs */
6013	si_enable_interrupts(rdev);
6014
6015	return ret;
6016}
6017
6018int si_irq_set(struct radeon_device *rdev)
6019{
6020	u32 cp_int_cntl;
6021	u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0;
6022	u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
6023	u32 hpd1 = 0, hpd2 = 0, hpd3 = 0, hpd4 = 0, hpd5 = 0, hpd6 = 0;
6024	u32 grbm_int_cntl = 0;
6025	u32 dma_cntl, dma_cntl1;
6026	u32 thermal_int = 0;
6027
6028	if (!rdev->irq.installed) {
6029		WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
6030		return -EINVAL;
6031	}
6032	/* don't enable anything if the ih is disabled */
6033	if (!rdev->ih.enabled) {
6034		si_disable_interrupts(rdev);
6035		/* force the active interrupt state to all disabled */
6036		si_disable_interrupt_state(rdev);
6037		return 0;
6038	}
6039
6040	cp_int_cntl = RREG32(CP_INT_CNTL_RING0) &
6041		(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
6042
6043	if (!ASIC_IS_NODCE(rdev)) {
6044		hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
6045		hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
6046		hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
6047		hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
6048		hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
6049		hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
6050	}
6051
6052	dma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
6053	dma_cntl1 = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
6054
6055	thermal_int = RREG32(CG_THERMAL_INT) &
6056		~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
6057
6058	/* enable CP interrupts on all rings */
6059	if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
6060		DRM_DEBUG("si_irq_set: sw int gfx\n");
6061		cp_int_cntl |= TIME_STAMP_INT_ENABLE;
6062	}
6063	if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP1_INDEX])) {
6064		DRM_DEBUG("si_irq_set: sw int cp1\n");
6065		cp_int_cntl1 |= TIME_STAMP_INT_ENABLE;
6066	}
6067	if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP2_INDEX])) {
6068		DRM_DEBUG("si_irq_set: sw int cp2\n");
6069		cp_int_cntl2 |= TIME_STAMP_INT_ENABLE;
6070	}
6071	if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
6072		DRM_DEBUG("si_irq_set: sw int dma\n");
6073		dma_cntl |= TRAP_ENABLE;
6074	}
6075
6076	if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_DMA1_INDEX])) {
6077		DRM_DEBUG("si_irq_set: sw int dma1\n");
6078		dma_cntl1 |= TRAP_ENABLE;
6079	}
6080	if (rdev->irq.crtc_vblank_int[0] ||
6081	    atomic_read(&rdev->irq.pflip[0])) {
6082		DRM_DEBUG("si_irq_set: vblank 0\n");
6083		crtc1 |= VBLANK_INT_MASK;
6084	}
6085	if (rdev->irq.crtc_vblank_int[1] ||
6086	    atomic_read(&rdev->irq.pflip[1])) {
6087		DRM_DEBUG("si_irq_set: vblank 1\n");
6088		crtc2 |= VBLANK_INT_MASK;
6089	}
6090	if (rdev->irq.crtc_vblank_int[2] ||
6091	    atomic_read(&rdev->irq.pflip[2])) {
6092		DRM_DEBUG("si_irq_set: vblank 2\n");
6093		crtc3 |= VBLANK_INT_MASK;
6094	}
6095	if (rdev->irq.crtc_vblank_int[3] ||
6096	    atomic_read(&rdev->irq.pflip[3])) {
6097		DRM_DEBUG("si_irq_set: vblank 3\n");
6098		crtc4 |= VBLANK_INT_MASK;
6099	}
6100	if (rdev->irq.crtc_vblank_int[4] ||
6101	    atomic_read(&rdev->irq.pflip[4])) {
6102		DRM_DEBUG("si_irq_set: vblank 4\n");
6103		crtc5 |= VBLANK_INT_MASK;
6104	}
6105	if (rdev->irq.crtc_vblank_int[5] ||
6106	    atomic_read(&rdev->irq.pflip[5])) {
6107		DRM_DEBUG("si_irq_set: vblank 5\n");
6108		crtc6 |= VBLANK_INT_MASK;
6109	}
6110	if (rdev->irq.hpd[0]) {
6111		DRM_DEBUG("si_irq_set: hpd 1\n");
6112		hpd1 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
6113	}
6114	if (rdev->irq.hpd[1]) {
6115		DRM_DEBUG("si_irq_set: hpd 2\n");
6116		hpd2 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
6117	}
6118	if (rdev->irq.hpd[2]) {
6119		DRM_DEBUG("si_irq_set: hpd 3\n");
6120		hpd3 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
6121	}
6122	if (rdev->irq.hpd[3]) {
6123		DRM_DEBUG("si_irq_set: hpd 4\n");
6124		hpd4 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
6125	}
6126	if (rdev->irq.hpd[4]) {
6127		DRM_DEBUG("si_irq_set: hpd 5\n");
6128		hpd5 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
6129	}
6130	if (rdev->irq.hpd[5]) {
6131		DRM_DEBUG("si_irq_set: hpd 6\n");
6132		hpd6 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
 
 
 
 
6133	}
6134
6135	WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
6136	WREG32(CP_INT_CNTL_RING1, cp_int_cntl1);
6137	WREG32(CP_INT_CNTL_RING2, cp_int_cntl2);
6138
6139	WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, dma_cntl);
6140	WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, dma_cntl1);
6141
6142	WREG32(GRBM_INT_CNTL, grbm_int_cntl);
6143
6144	if (rdev->irq.dpm_thermal) {
6145		DRM_DEBUG("dpm thermal\n");
6146		thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
6147	}
6148
6149	if (rdev->num_crtc >= 2) {
6150		WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
6151		WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
6152	}
6153	if (rdev->num_crtc >= 4) {
6154		WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
6155		WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
6156	}
6157	if (rdev->num_crtc >= 6) {
6158		WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5);
6159		WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
6160	}
6161
6162	if (rdev->num_crtc >= 2) {
6163		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET,
6164		       GRPH_PFLIP_INT_MASK);
6165		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET,
6166		       GRPH_PFLIP_INT_MASK);
6167	}
6168	if (rdev->num_crtc >= 4) {
6169		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET,
6170		       GRPH_PFLIP_INT_MASK);
6171		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET,
6172		       GRPH_PFLIP_INT_MASK);
6173	}
6174	if (rdev->num_crtc >= 6) {
6175		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET,
6176		       GRPH_PFLIP_INT_MASK);
6177		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET,
6178		       GRPH_PFLIP_INT_MASK);
6179	}
6180
6181	if (!ASIC_IS_NODCE(rdev)) {
6182		WREG32(DC_HPD1_INT_CONTROL, hpd1);
6183		WREG32(DC_HPD2_INT_CONTROL, hpd2);
6184		WREG32(DC_HPD3_INT_CONTROL, hpd3);
6185		WREG32(DC_HPD4_INT_CONTROL, hpd4);
6186		WREG32(DC_HPD5_INT_CONTROL, hpd5);
6187		WREG32(DC_HPD6_INT_CONTROL, hpd6);
6188	}
6189
6190	WREG32(CG_THERMAL_INT, thermal_int);
6191
6192	/* posting read */
6193	RREG32(SRBM_STATUS);
6194
6195	return 0;
6196}
6197
6198static inline void si_irq_ack(struct radeon_device *rdev)
6199{
6200	u32 tmp;
6201
6202	if (ASIC_IS_NODCE(rdev))
6203		return;
6204
6205	rdev->irq.stat_regs.evergreen.disp_int = RREG32(DISP_INTERRUPT_STATUS);
6206	rdev->irq.stat_regs.evergreen.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
6207	rdev->irq.stat_regs.evergreen.disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
6208	rdev->irq.stat_regs.evergreen.disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3);
6209	rdev->irq.stat_regs.evergreen.disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4);
6210	rdev->irq.stat_regs.evergreen.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
6211	rdev->irq.stat_regs.evergreen.d1grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET);
6212	rdev->irq.stat_regs.evergreen.d2grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET);
6213	if (rdev->num_crtc >= 4) {
6214		rdev->irq.stat_regs.evergreen.d3grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET);
6215		rdev->irq.stat_regs.evergreen.d4grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET);
6216	}
6217	if (rdev->num_crtc >= 6) {
6218		rdev->irq.stat_regs.evergreen.d5grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET);
6219		rdev->irq.stat_regs.evergreen.d6grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
6220	}
6221
6222	if (rdev->irq.stat_regs.evergreen.d1grph_int & GRPH_PFLIP_INT_OCCURRED)
6223		WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
6224	if (rdev->irq.stat_regs.evergreen.d2grph_int & GRPH_PFLIP_INT_OCCURRED)
6225		WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
6226	if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT)
6227		WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
6228	if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT)
6229		WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK);
6230	if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT)
6231		WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK);
6232	if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT)
6233		WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
6234
6235	if (rdev->num_crtc >= 4) {
6236		if (rdev->irq.stat_regs.evergreen.d3grph_int & GRPH_PFLIP_INT_OCCURRED)
6237			WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
6238		if (rdev->irq.stat_regs.evergreen.d4grph_int & GRPH_PFLIP_INT_OCCURRED)
6239			WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
6240		if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
6241			WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
6242		if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
6243			WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK);
6244		if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
6245			WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK);
6246		if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
6247			WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK);
6248	}
6249
6250	if (rdev->num_crtc >= 6) {
6251		if (rdev->irq.stat_regs.evergreen.d5grph_int & GRPH_PFLIP_INT_OCCURRED)
6252			WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
6253		if (rdev->irq.stat_regs.evergreen.d6grph_int & GRPH_PFLIP_INT_OCCURRED)
6254			WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
6255		if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
6256			WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
6257		if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
6258			WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK);
6259		if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
6260			WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK);
6261		if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
6262			WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK);
6263	}
6264
6265	if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
6266		tmp = RREG32(DC_HPD1_INT_CONTROL);
6267		tmp |= DC_HPDx_INT_ACK;
6268		WREG32(DC_HPD1_INT_CONTROL, tmp);
6269	}
6270	if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
6271		tmp = RREG32(DC_HPD2_INT_CONTROL);
6272		tmp |= DC_HPDx_INT_ACK;
6273		WREG32(DC_HPD2_INT_CONTROL, tmp);
6274	}
6275	if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
6276		tmp = RREG32(DC_HPD3_INT_CONTROL);
6277		tmp |= DC_HPDx_INT_ACK;
6278		WREG32(DC_HPD3_INT_CONTROL, tmp);
6279	}
6280	if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
6281		tmp = RREG32(DC_HPD4_INT_CONTROL);
6282		tmp |= DC_HPDx_INT_ACK;
6283		WREG32(DC_HPD4_INT_CONTROL, tmp);
6284	}
6285	if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
6286		tmp = RREG32(DC_HPD5_INT_CONTROL);
6287		tmp |= DC_HPDx_INT_ACK;
6288		WREG32(DC_HPD5_INT_CONTROL, tmp);
6289	}
6290	if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
6291		tmp = RREG32(DC_HPD5_INT_CONTROL);
6292		tmp |= DC_HPDx_INT_ACK;
6293		WREG32(DC_HPD6_INT_CONTROL, tmp);
6294	}
6295
6296	if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT) {
6297		tmp = RREG32(DC_HPD1_INT_CONTROL);
6298		tmp |= DC_HPDx_RX_INT_ACK;
6299		WREG32(DC_HPD1_INT_CONTROL, tmp);
6300	}
6301	if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT) {
6302		tmp = RREG32(DC_HPD2_INT_CONTROL);
6303		tmp |= DC_HPDx_RX_INT_ACK;
6304		WREG32(DC_HPD2_INT_CONTROL, tmp);
6305	}
6306	if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) {
6307		tmp = RREG32(DC_HPD3_INT_CONTROL);
6308		tmp |= DC_HPDx_RX_INT_ACK;
6309		WREG32(DC_HPD3_INT_CONTROL, tmp);
6310	}
6311	if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) {
6312		tmp = RREG32(DC_HPD4_INT_CONTROL);
6313		tmp |= DC_HPDx_RX_INT_ACK;
6314		WREG32(DC_HPD4_INT_CONTROL, tmp);
6315	}
6316	if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) {
6317		tmp = RREG32(DC_HPD5_INT_CONTROL);
6318		tmp |= DC_HPDx_RX_INT_ACK;
6319		WREG32(DC_HPD5_INT_CONTROL, tmp);
6320	}
6321	if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
6322		tmp = RREG32(DC_HPD5_INT_CONTROL);
6323		tmp |= DC_HPDx_RX_INT_ACK;
6324		WREG32(DC_HPD6_INT_CONTROL, tmp);
6325	}
6326}
6327
6328static void si_irq_disable(struct radeon_device *rdev)
6329{
6330	si_disable_interrupts(rdev);
6331	/* Wait and acknowledge irq */
6332	mdelay(1);
6333	si_irq_ack(rdev);
6334	si_disable_interrupt_state(rdev);
6335}
6336
6337static void si_irq_suspend(struct radeon_device *rdev)
6338{
6339	si_irq_disable(rdev);
6340	si_rlc_stop(rdev);
6341}
6342
6343static void si_irq_fini(struct radeon_device *rdev)
6344{
6345	si_irq_suspend(rdev);
6346	r600_ih_ring_fini(rdev);
6347}
6348
6349static inline u32 si_get_ih_wptr(struct radeon_device *rdev)
6350{
6351	u32 wptr, tmp;
6352
6353	if (rdev->wb.enabled)
6354		wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
6355	else
6356		wptr = RREG32(IH_RB_WPTR);
6357
6358	if (wptr & RB_OVERFLOW) {
6359		wptr &= ~RB_OVERFLOW;
6360		/* When a ring buffer overflow happen start parsing interrupt
6361		 * from the last not overwritten vector (wptr + 16). Hopefully
6362		 * this should allow us to catchup.
6363		 */
6364		dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
6365			 wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask);
6366		rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
6367		tmp = RREG32(IH_RB_CNTL);
6368		tmp |= IH_WPTR_OVERFLOW_CLEAR;
6369		WREG32(IH_RB_CNTL, tmp);
6370	}
6371	return (wptr & rdev->ih.ptr_mask);
6372}
6373
6374/*        SI IV Ring
6375 * Each IV ring entry is 128 bits:
6376 * [7:0]    - interrupt source id
6377 * [31:8]   - reserved
6378 * [59:32]  - interrupt source data
6379 * [63:60]  - reserved
6380 * [71:64]  - RINGID
6381 * [79:72]  - VMID
6382 * [127:80] - reserved
6383 */
6384int si_irq_process(struct radeon_device *rdev)
6385{
6386	u32 wptr;
6387	u32 rptr;
6388	u32 src_id, src_data, ring_id;
6389	u32 ring_index;
 
6390	bool queue_hotplug = false;
6391	bool queue_dp = false;
6392	bool queue_thermal = false;
6393	u32 status, addr;
6394
6395	if (!rdev->ih.enabled || rdev->shutdown)
6396		return IRQ_NONE;
6397
6398	wptr = si_get_ih_wptr(rdev);
6399
6400restart_ih:
6401	/* is somebody else already processing irqs? */
6402	if (atomic_xchg(&rdev->ih.lock, 1))
6403		return IRQ_NONE;
6404
6405	rptr = rdev->ih.rptr;
6406	DRM_DEBUG("si_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
6407
 
 
 
 
 
 
6408	/* Order reading of wptr vs. reading of IH ring data */
6409	rmb();
6410
6411	/* display interrupts */
6412	si_irq_ack(rdev);
6413
 
6414	while (rptr != wptr) {
6415		/* wptr/rptr are in bytes! */
6416		ring_index = rptr / 4;
6417		src_id =  le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
6418		src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
6419		ring_id = le32_to_cpu(rdev->ih.ring[ring_index + 2]) & 0xff;
6420
6421		switch (src_id) {
6422		case 1: /* D1 vblank/vline */
6423			switch (src_data) {
6424			case 0: /* D1 vblank */
6425				if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT))
6426					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6427
6428				if (rdev->irq.crtc_vblank_int[0]) {
6429					drm_handle_vblank(rdev->ddev, 0);
6430					rdev->pm.vblank_sync = true;
6431					wake_up(&rdev->irq.vblank_queue);
 
 
 
6432				}
6433				if (atomic_read(&rdev->irq.pflip[0]))
6434					radeon_crtc_handle_vblank(rdev, 0);
6435				rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
6436				DRM_DEBUG("IH: D1 vblank\n");
6437
6438				break;
6439			case 1: /* D1 vline */
6440				if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT))
6441					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6442
6443				rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
6444				DRM_DEBUG("IH: D1 vline\n");
6445
6446				break;
6447			default:
6448				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
6449				break;
6450			}
6451			break;
6452		case 2: /* D2 vblank/vline */
6453			switch (src_data) {
6454			case 0: /* D2 vblank */
6455				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT))
6456					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6457
6458				if (rdev->irq.crtc_vblank_int[1]) {
6459					drm_handle_vblank(rdev->ddev, 1);
6460					rdev->pm.vblank_sync = true;
6461					wake_up(&rdev->irq.vblank_queue);
 
 
 
6462				}
6463				if (atomic_read(&rdev->irq.pflip[1]))
6464					radeon_crtc_handle_vblank(rdev, 1);
6465				rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
6466				DRM_DEBUG("IH: D2 vblank\n");
6467
6468				break;
6469			case 1: /* D2 vline */
6470				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT))
6471					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6472
6473				rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
6474				DRM_DEBUG("IH: D2 vline\n");
6475
6476				break;
6477			default:
6478				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
6479				break;
6480			}
6481			break;
6482		case 3: /* D3 vblank/vline */
6483			switch (src_data) {
6484			case 0: /* D3 vblank */
6485				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT))
6486					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6487
6488				if (rdev->irq.crtc_vblank_int[2]) {
6489					drm_handle_vblank(rdev->ddev, 2);
6490					rdev->pm.vblank_sync = true;
6491					wake_up(&rdev->irq.vblank_queue);
 
 
 
6492				}
6493				if (atomic_read(&rdev->irq.pflip[2]))
6494					radeon_crtc_handle_vblank(rdev, 2);
6495				rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
6496				DRM_DEBUG("IH: D3 vblank\n");
6497
6498				break;
6499			case 1: /* D3 vline */
6500				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT))
6501					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6502
6503				rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
6504				DRM_DEBUG("IH: D3 vline\n");
6505
6506				break;
6507			default:
6508				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
6509				break;
6510			}
6511			break;
6512		case 4: /* D4 vblank/vline */
6513			switch (src_data) {
6514			case 0: /* D4 vblank */
6515				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT))
6516					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6517
6518				if (rdev->irq.crtc_vblank_int[3]) {
6519					drm_handle_vblank(rdev->ddev, 3);
6520					rdev->pm.vblank_sync = true;
6521					wake_up(&rdev->irq.vblank_queue);
 
 
 
6522				}
6523				if (atomic_read(&rdev->irq.pflip[3]))
6524					radeon_crtc_handle_vblank(rdev, 3);
6525				rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
6526				DRM_DEBUG("IH: D4 vblank\n");
6527
6528				break;
6529			case 1: /* D4 vline */
6530				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT))
6531					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6532
6533				rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
6534				DRM_DEBUG("IH: D4 vline\n");
6535
6536				break;
6537			default:
6538				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
6539				break;
6540			}
6541			break;
6542		case 5: /* D5 vblank/vline */
6543			switch (src_data) {
6544			case 0: /* D5 vblank */
6545				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT))
6546					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6547
6548				if (rdev->irq.crtc_vblank_int[4]) {
6549					drm_handle_vblank(rdev->ddev, 4);
6550					rdev->pm.vblank_sync = true;
6551					wake_up(&rdev->irq.vblank_queue);
 
 
 
6552				}
6553				if (atomic_read(&rdev->irq.pflip[4]))
6554					radeon_crtc_handle_vblank(rdev, 4);
6555				rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
6556				DRM_DEBUG("IH: D5 vblank\n");
6557
6558				break;
6559			case 1: /* D5 vline */
6560				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT))
6561					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6562
6563				rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
6564				DRM_DEBUG("IH: D5 vline\n");
6565
6566				break;
6567			default:
6568				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
6569				break;
6570			}
6571			break;
6572		case 6: /* D6 vblank/vline */
6573			switch (src_data) {
6574			case 0: /* D6 vblank */
6575				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT))
6576					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6577
6578				if (rdev->irq.crtc_vblank_int[5]) {
6579					drm_handle_vblank(rdev->ddev, 5);
6580					rdev->pm.vblank_sync = true;
6581					wake_up(&rdev->irq.vblank_queue);
 
 
 
6582				}
6583				if (atomic_read(&rdev->irq.pflip[5]))
6584					radeon_crtc_handle_vblank(rdev, 5);
6585				rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
6586				DRM_DEBUG("IH: D6 vblank\n");
6587
6588				break;
6589			case 1: /* D6 vline */
6590				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT))
6591					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6592
6593				rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
6594				DRM_DEBUG("IH: D6 vline\n");
6595
6596				break;
6597			default:
6598				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
6599				break;
6600			}
6601			break;
6602		case 8: /* D1 page flip */
6603		case 10: /* D2 page flip */
6604		case 12: /* D3 page flip */
6605		case 14: /* D4 page flip */
6606		case 16: /* D5 page flip */
6607		case 18: /* D6 page flip */
6608			DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1);
6609			if (radeon_use_pflipirq > 0)
6610				radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
6611			break;
6612		case 42: /* HPD hotplug */
6613			switch (src_data) {
6614			case 0:
6615				if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT))
6616					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6617
6618				rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
6619				queue_hotplug = true;
6620				DRM_DEBUG("IH: HPD1\n");
6621
6622				break;
6623			case 1:
6624				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT))
6625					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6626
6627				rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
6628				queue_hotplug = true;
6629				DRM_DEBUG("IH: HPD2\n");
6630
6631				break;
6632			case 2:
6633				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT))
6634					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6635
6636				rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
6637				queue_hotplug = true;
6638				DRM_DEBUG("IH: HPD3\n");
6639
6640				break;
6641			case 3:
6642				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT))
6643					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6644
6645				rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
6646				queue_hotplug = true;
6647				DRM_DEBUG("IH: HPD4\n");
6648
6649				break;
6650			case 4:
6651				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT))
6652					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6653
6654				rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
6655				queue_hotplug = true;
6656				DRM_DEBUG("IH: HPD5\n");
6657
6658				break;
6659			case 5:
6660				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT))
6661					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6662
6663				rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
6664				queue_hotplug = true;
6665				DRM_DEBUG("IH: HPD6\n");
6666
6667				break;
6668			case 6:
6669				if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT))
6670					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6671
6672				rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT;
6673				queue_dp = true;
6674				DRM_DEBUG("IH: HPD_RX 1\n");
6675
6676				break;
6677			case 7:
6678				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT))
6679					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6680
6681				rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
6682				queue_dp = true;
6683				DRM_DEBUG("IH: HPD_RX 2\n");
6684
6685				break;
6686			case 8:
6687				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT))
6688					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6689
6690				rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
6691				queue_dp = true;
6692				DRM_DEBUG("IH: HPD_RX 3\n");
6693
6694				break;
6695			case 9:
6696				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT))
6697					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6698
6699				rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
6700				queue_dp = true;
6701				DRM_DEBUG("IH: HPD_RX 4\n");
6702
6703				break;
6704			case 10:
6705				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT))
6706					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6707
6708				rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
6709				queue_dp = true;
6710				DRM_DEBUG("IH: HPD_RX 5\n");
6711
6712				break;
6713			case 11:
6714				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT))
6715					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6716
6717				rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
6718				queue_dp = true;
6719				DRM_DEBUG("IH: HPD_RX 6\n");
6720
6721				break;
6722			default:
6723				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
6724				break;
6725			}
6726			break;
6727		case 96:
6728			DRM_ERROR("SRBM_READ_ERROR: 0x%x\n", RREG32(SRBM_READ_ERROR));
6729			WREG32(SRBM_INT_ACK, 0x1);
6730			break;
6731		case 124: /* UVD */
6732			DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
6733			radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
6734			break;
6735		case 146:
6736		case 147:
6737			addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
6738			status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
6739			/* reset addr and status */
6740			WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
6741			if (addr == 0x0 && status == 0x0)
6742				break;
6743			dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
6744			dev_err(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
6745				addr);
6746			dev_err(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
6747				status);
6748			si_vm_decode_fault(rdev, status, addr);
6749			break;
6750		case 176: /* RINGID0 CP_INT */
6751			radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
6752			break;
6753		case 177: /* RINGID1 CP_INT */
6754			radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
6755			break;
6756		case 178: /* RINGID2 CP_INT */
6757			radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
6758			break;
6759		case 181: /* CP EOP event */
6760			DRM_DEBUG("IH: CP EOP\n");
6761			switch (ring_id) {
6762			case 0:
6763				radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
6764				break;
6765			case 1:
6766				radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
6767				break;
6768			case 2:
6769				radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
6770				break;
6771			}
6772			break;
6773		case 224: /* DMA trap event */
6774			DRM_DEBUG("IH: DMA trap\n");
6775			radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
6776			break;
6777		case 230: /* thermal low to high */
6778			DRM_DEBUG("IH: thermal low to high\n");
6779			rdev->pm.dpm.thermal.high_to_low = false;
6780			queue_thermal = true;
6781			break;
6782		case 231: /* thermal high to low */
6783			DRM_DEBUG("IH: thermal high to low\n");
6784			rdev->pm.dpm.thermal.high_to_low = true;
6785			queue_thermal = true;
6786			break;
6787		case 233: /* GUI IDLE */
6788			DRM_DEBUG("IH: GUI idle\n");
6789			break;
6790		case 244: /* DMA trap event */
6791			DRM_DEBUG("IH: DMA1 trap\n");
6792			radeon_fence_process(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
6793			break;
6794		default:
6795			DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
6796			break;
6797		}
6798
6799		/* wptr/rptr are in bytes! */
6800		rptr += 16;
6801		rptr &= rdev->ih.ptr_mask;
6802		WREG32(IH_RB_RPTR, rptr);
6803	}
6804	if (queue_dp)
6805		schedule_work(&rdev->dp_work);
6806	if (queue_hotplug)
6807		schedule_delayed_work(&rdev->hotplug_work, 0);
6808	if (queue_thermal && rdev->pm.dpm_enabled)
6809		schedule_work(&rdev->pm.dpm.thermal.work);
6810	rdev->ih.rptr = rptr;
6811	atomic_set(&rdev->ih.lock, 0);
6812
6813	/* make sure wptr hasn't changed while processing */
6814	wptr = si_get_ih_wptr(rdev);
6815	if (wptr != rptr)
6816		goto restart_ih;
6817
 
 
 
 
6818	return IRQ_HANDLED;
6819}
6820
6821/*
6822 * startup/shutdown callbacks
6823 */
6824static int si_startup(struct radeon_device *rdev)
6825{
6826	struct radeon_ring *ring;
6827	int r;
6828
6829	/* enable pcie gen2/3 link */
6830	si_pcie_gen3_enable(rdev);
6831	/* enable aspm */
6832	si_program_aspm(rdev);
 
 
 
 
 
 
 
 
 
 
6833
6834	/* scratch needs to be initialized before MC */
6835	r = r600_vram_scratch_init(rdev);
6836	if (r)
6837		return r;
6838
6839	si_mc_program(rdev);
6840
6841	if (!rdev->pm.dpm_enabled) {
6842		r = si_mc_load_microcode(rdev);
6843		if (r) {
6844			DRM_ERROR("Failed to load MC firmware!\n");
6845			return r;
6846		}
6847	}
6848
6849	r = si_pcie_gart_enable(rdev);
6850	if (r)
6851		return r;
6852	si_gpu_init(rdev);
6853
6854	/* allocate rlc buffers */
6855	if (rdev->family == CHIP_VERDE) {
6856		rdev->rlc.reg_list = verde_rlc_save_restore_register_list;
6857		rdev->rlc.reg_list_size =
6858			(u32)ARRAY_SIZE(verde_rlc_save_restore_register_list);
 
6859	}
6860	rdev->rlc.cs_data = si_cs_data;
6861	r = sumo_rlc_init(rdev);
 
6862	if (r) {
6863		DRM_ERROR("Failed to init rlc BOs!\n");
6864		return r;
6865	}
6866
6867	/* allocate wb buffer */
6868	r = radeon_wb_init(rdev);
6869	if (r)
6870		return r;
6871
6872	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
6873	if (r) {
6874		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
6875		return r;
6876	}
6877
6878	r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
6879	if (r) {
6880		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
6881		return r;
6882	}
6883
6884	r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
6885	if (r) {
6886		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
6887		return r;
6888	}
6889
6890	r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
6891	if (r) {
6892		dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
6893		return r;
6894	}
6895
6896	r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
6897	if (r) {
6898		dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
6899		return r;
6900	}
6901
6902	if (rdev->has_uvd) {
6903		r = uvd_v2_2_resume(rdev);
6904		if (!r) {
6905			r = radeon_fence_driver_start_ring(rdev,
6906							   R600_RING_TYPE_UVD_INDEX);
6907			if (r)
6908				dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
6909		}
6910		if (r)
6911			rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
6912	}
6913
6914	r = radeon_vce_resume(rdev);
6915	if (!r) {
6916		r = vce_v1_0_resume(rdev);
6917		if (!r)
6918			r = radeon_fence_driver_start_ring(rdev,
6919							   TN_RING_TYPE_VCE1_INDEX);
6920		if (!r)
6921			r = radeon_fence_driver_start_ring(rdev,
6922							   TN_RING_TYPE_VCE2_INDEX);
6923	}
6924	if (r) {
6925		dev_err(rdev->dev, "VCE init error (%d).\n", r);
6926		rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size = 0;
6927		rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_size = 0;
6928	}
6929
6930	/* Enable IRQ */
6931	if (!rdev->irq.installed) {
6932		r = radeon_irq_kms_init(rdev);
6933		if (r)
6934			return r;
6935	}
6936
6937	r = si_irq_init(rdev);
6938	if (r) {
6939		DRM_ERROR("radeon: IH init failed (%d).\n", r);
6940		radeon_irq_kms_fini(rdev);
6941		return r;
6942	}
6943	si_irq_set(rdev);
6944
6945	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
6946	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
6947			     RADEON_CP_PACKET2);
 
6948	if (r)
6949		return r;
6950
6951	ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
6952	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET,
6953			     RADEON_CP_PACKET2);
 
6954	if (r)
6955		return r;
6956
6957	ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
6958	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET,
6959			     RADEON_CP_PACKET2);
6960	if (r)
6961		return r;
6962
6963	ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
6964	r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
6965			     DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
6966	if (r)
6967		return r;
6968
6969	ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
6970	r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
6971			     DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
6972	if (r)
6973		return r;
6974
6975	r = si_cp_load_microcode(rdev);
6976	if (r)
6977		return r;
6978	r = si_cp_resume(rdev);
6979	if (r)
6980		return r;
6981
6982	r = cayman_dma_resume(rdev);
6983	if (r)
6984		return r;
6985
6986	if (rdev->has_uvd) {
6987		ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
6988		if (ring->ring_size) {
6989			r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
6990					     RADEON_CP_PACKET2);
6991			if (!r)
6992				r = uvd_v1_0_init(rdev);
6993			if (r)
6994				DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
6995		}
6996	}
6997
6998	r = -ENOENT;
6999
7000	ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
7001	if (ring->ring_size)
7002		r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
7003				     VCE_CMD_NO_OP);
7004
7005	ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
7006	if (ring->ring_size)
7007		r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
7008				     VCE_CMD_NO_OP);
7009
7010	if (!r)
7011		r = vce_v1_0_init(rdev);
7012	else if (r != -ENOENT)
7013		DRM_ERROR("radeon: failed initializing VCE (%d).\n", r);
7014
7015	r = radeon_ib_pool_init(rdev);
7016	if (r) {
7017		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
 
7018		return r;
7019	}
7020
7021	r = radeon_vm_manager_init(rdev);
7022	if (r) {
7023		dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r);
 
7024		return r;
7025	}
7026
7027	r = radeon_audio_init(rdev);
7028	if (r)
7029		return r;
7030
7031	return 0;
7032}
7033
7034int si_resume(struct radeon_device *rdev)
7035{
7036	int r;
7037
7038	/* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
7039	 * posting will perform necessary task to bring back GPU into good
7040	 * shape.
7041	 */
7042	/* post card */
7043	atom_asic_init(rdev->mode_info.atom_context);
7044
7045	/* init golden registers */
7046	si_init_golden_registers(rdev);
7047
7048	if (rdev->pm.pm_method == PM_METHOD_DPM)
7049		radeon_pm_resume(rdev);
7050
7051	rdev->accel_working = true;
7052	r = si_startup(rdev);
7053	if (r) {
7054		DRM_ERROR("si startup failed on resume\n");
7055		rdev->accel_working = false;
7056		return r;
7057	}
7058
7059	return r;
7060
7061}
7062
7063int si_suspend(struct radeon_device *rdev)
7064{
7065	radeon_pm_suspend(rdev);
7066	radeon_audio_fini(rdev);
7067	radeon_vm_manager_fini(rdev);
 
 
 
7068	si_cp_enable(rdev, false);
7069	cayman_dma_stop(rdev);
7070	if (rdev->has_uvd) {
7071		uvd_v1_0_fini(rdev);
7072		radeon_uvd_suspend(rdev);
7073		radeon_vce_suspend(rdev);
7074	}
7075	si_fini_pg(rdev);
7076	si_fini_cg(rdev);
7077	si_irq_suspend(rdev);
7078	radeon_wb_disable(rdev);
7079	si_pcie_gart_disable(rdev);
7080	return 0;
7081}
7082
7083/* Plan is to move initialization in that function and use
7084 * helper function so that radeon_device_init pretty much
7085 * do nothing more than calling asic specific function. This
7086 * should also allow to remove a bunch of callback function
7087 * like vram_info.
7088 */
7089int si_init(struct radeon_device *rdev)
7090{
7091	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
7092	int r;
7093
7094	/* Read BIOS */
7095	if (!radeon_get_bios(rdev)) {
7096		if (ASIC_IS_AVIVO(rdev))
7097			return -EINVAL;
7098	}
7099	/* Must be an ATOMBIOS */
7100	if (!rdev->is_atom_bios) {
7101		dev_err(rdev->dev, "Expecting atombios for cayman GPU\n");
7102		return -EINVAL;
7103	}
7104	r = radeon_atombios_init(rdev);
7105	if (r)
7106		return r;
7107
7108	/* Post card if necessary */
7109	if (!radeon_card_posted(rdev)) {
7110		if (!rdev->bios) {
7111			dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
7112			return -EINVAL;
7113		}
7114		DRM_INFO("GPU not posted. posting now...\n");
7115		atom_asic_init(rdev->mode_info.atom_context);
7116	}
7117	/* init golden registers */
7118	si_init_golden_registers(rdev);
7119	/* Initialize scratch registers */
7120	si_scratch_init(rdev);
7121	/* Initialize surface registers */
7122	radeon_surface_init(rdev);
7123	/* Initialize clocks */
7124	radeon_get_clock_info(rdev->ddev);
7125
7126	/* Fence driver */
7127	r = radeon_fence_driver_init(rdev);
7128	if (r)
7129		return r;
7130
7131	/* initialize memory controller */
7132	r = si_mc_init(rdev);
7133	if (r)
7134		return r;
7135	/* Memory manager */
7136	r = radeon_bo_init(rdev);
7137	if (r)
7138		return r;
7139
7140	if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
7141	    !rdev->rlc_fw || !rdev->mc_fw) {
7142		r = si_init_microcode(rdev);
7143		if (r) {
7144			DRM_ERROR("Failed to load firmware!\n");
7145			return r;
7146		}
7147	}
7148
7149	/* Initialize power management */
7150	radeon_pm_init(rdev);
7151
7152	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
7153	ring->ring_obj = NULL;
7154	r600_ring_init(rdev, ring, 1024 * 1024);
7155
7156	ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
7157	ring->ring_obj = NULL;
7158	r600_ring_init(rdev, ring, 1024 * 1024);
7159
7160	ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
7161	ring->ring_obj = NULL;
7162	r600_ring_init(rdev, ring, 1024 * 1024);
7163
7164	ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
7165	ring->ring_obj = NULL;
7166	r600_ring_init(rdev, ring, 64 * 1024);
7167
7168	ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
7169	ring->ring_obj = NULL;
7170	r600_ring_init(rdev, ring, 64 * 1024);
7171
7172	if (rdev->has_uvd) {
7173		r = radeon_uvd_init(rdev);
7174		if (!r) {
7175			ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
7176			ring->ring_obj = NULL;
7177			r600_ring_init(rdev, ring, 4096);
7178		}
7179	}
7180
7181	r = radeon_vce_init(rdev);
7182	if (!r) {
7183		ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
7184		ring->ring_obj = NULL;
7185		r600_ring_init(rdev, ring, 4096);
7186
7187		ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
7188		ring->ring_obj = NULL;
7189		r600_ring_init(rdev, ring, 4096);
7190	}
7191
7192	rdev->ih.ring_obj = NULL;
7193	r600_ih_ring_init(rdev, 64 * 1024);
7194
7195	r = r600_pcie_gart_init(rdev);
7196	if (r)
7197		return r;
7198
 
7199	rdev->accel_working = true;
 
 
 
 
 
 
 
 
 
7200	r = si_startup(rdev);
7201	if (r) {
7202		dev_err(rdev->dev, "disabling GPU acceleration\n");
7203		si_cp_fini(rdev);
7204		cayman_dma_fini(rdev);
7205		si_irq_fini(rdev);
7206		sumo_rlc_fini(rdev);
7207		radeon_wb_fini(rdev);
7208		radeon_ib_pool_fini(rdev);
7209		radeon_vm_manager_fini(rdev);
7210		radeon_irq_kms_fini(rdev);
7211		si_pcie_gart_fini(rdev);
7212		rdev->accel_working = false;
7213	}
7214
7215	/* Don't start up if the MC ucode is missing.
7216	 * The default clocks and voltages before the MC ucode
7217	 * is loaded are not suffient for advanced operations.
7218	 */
7219	if (!rdev->mc_fw) {
7220		DRM_ERROR("radeon: MC ucode required for NI+.\n");
7221		return -EINVAL;
7222	}
7223
7224	return 0;
7225}
7226
7227void si_fini(struct radeon_device *rdev)
7228{
7229	radeon_pm_fini(rdev);
 
 
7230	si_cp_fini(rdev);
7231	cayman_dma_fini(rdev);
7232	si_fini_pg(rdev);
7233	si_fini_cg(rdev);
7234	si_irq_fini(rdev);
7235	sumo_rlc_fini(rdev);
7236	radeon_wb_fini(rdev);
7237	radeon_vm_manager_fini(rdev);
7238	radeon_ib_pool_fini(rdev);
7239	radeon_irq_kms_fini(rdev);
7240	if (rdev->has_uvd) {
7241		uvd_v1_0_fini(rdev);
7242		radeon_uvd_fini(rdev);
7243		radeon_vce_fini(rdev);
7244	}
7245	si_pcie_gart_fini(rdev);
7246	r600_vram_scratch_fini(rdev);
7247	radeon_gem_fini(rdev);
7248	radeon_fence_driver_fini(rdev);
7249	radeon_bo_fini(rdev);
7250	radeon_atombios_fini(rdev);
7251	kfree(rdev->bios);
7252	rdev->bios = NULL;
7253}
7254
7255/**
7256 * si_get_gpu_clock_counter - return GPU clock counter snapshot
7257 *
7258 * @rdev: radeon_device pointer
7259 *
7260 * Fetches a GPU clock counter snapshot (SI).
7261 * Returns the 64 bit clock counter snapshot.
7262 */
7263uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev)
7264{
7265	uint64_t clock;
7266
7267	mutex_lock(&rdev->gpu_clock_mutex);
7268	WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
7269	clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
7270		((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
7271	mutex_unlock(&rdev->gpu_clock_mutex);
7272	return clock;
7273}
7274
7275int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
7276{
7277	unsigned fb_div = 0, vclk_div = 0, dclk_div = 0;
7278	int r;
7279
7280	/* bypass vclk and dclk with bclk */
7281	WREG32_P(CG_UPLL_FUNC_CNTL_2,
7282		VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1),
7283		~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
7284
7285	/* put PLL in bypass mode */
7286	WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK);
7287
7288	if (!vclk || !dclk) {
7289		/* keep the Bypass mode */
7290		return 0;
7291	}
7292
7293	r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 125000, 250000,
7294					  16384, 0x03FFFFFF, 0, 128, 5,
7295					  &fb_div, &vclk_div, &dclk_div);
7296	if (r)
7297		return r;
7298
7299	/* set RESET_ANTI_MUX to 0 */
7300	WREG32_P(CG_UPLL_FUNC_CNTL_5, 0, ~RESET_ANTI_MUX_MASK);
7301
7302	/* set VCO_MODE to 1 */
7303	WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK);
7304
7305	/* disable sleep mode */
7306	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK);
7307
7308	/* deassert UPLL_RESET */
7309	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
7310
7311	mdelay(1);
7312
7313	r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
7314	if (r)
7315		return r;
7316
7317	/* assert UPLL_RESET again */
7318	WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK);
7319
7320	/* disable spread spectrum. */
7321	WREG32_P(CG_UPLL_SPREAD_SPECTRUM, 0, ~SSEN_MASK);
7322
7323	/* set feedback divider */
7324	WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(fb_div), ~UPLL_FB_DIV_MASK);
7325
7326	/* set ref divider to 0 */
7327	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_REF_DIV_MASK);
7328
7329	if (fb_div < 307200)
7330		WREG32_P(CG_UPLL_FUNC_CNTL_4, 0, ~UPLL_SPARE_ISPARE9);
7331	else
7332		WREG32_P(CG_UPLL_FUNC_CNTL_4, UPLL_SPARE_ISPARE9, ~UPLL_SPARE_ISPARE9);
7333
7334	/* set PDIV_A and PDIV_B */
7335	WREG32_P(CG_UPLL_FUNC_CNTL_2,
7336		UPLL_PDIV_A(vclk_div) | UPLL_PDIV_B(dclk_div),
7337		~(UPLL_PDIV_A_MASK | UPLL_PDIV_B_MASK));
7338
7339	/* give the PLL some time to settle */
7340	mdelay(15);
7341
7342	/* deassert PLL_RESET */
7343	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
7344
7345	mdelay(15);
7346
7347	/* switch from bypass mode to normal mode */
7348	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK);
7349
7350	r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
7351	if (r)
7352		return r;
7353
7354	/* switch VCLK and DCLK selection */
7355	WREG32_P(CG_UPLL_FUNC_CNTL_2,
7356		VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2),
7357		~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
7358
7359	mdelay(100);
7360
7361	return 0;
7362}
7363
7364static void si_pcie_gen3_enable(struct radeon_device *rdev)
7365{
7366	struct pci_dev *root = rdev->pdev->bus->self;
7367	int bridge_pos, gpu_pos;
7368	u32 speed_cntl, mask, current_data_rate;
7369	int ret, i;
7370	u16 tmp16;
7371
7372	if (pci_is_root_bus(rdev->pdev->bus))
7373		return;
7374
7375	if (radeon_pcie_gen2 == 0)
7376		return;
7377
7378	if (rdev->flags & RADEON_IS_IGP)
7379		return;
7380
7381	if (!(rdev->flags & RADEON_IS_PCIE))
7382		return;
7383
7384	ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
7385	if (ret != 0)
7386		return;
7387
7388	if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
7389		return;
7390
7391	speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
7392	current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >>
7393		LC_CURRENT_DATA_RATE_SHIFT;
7394	if (mask & DRM_PCIE_SPEED_80) {
7395		if (current_data_rate == 2) {
7396			DRM_INFO("PCIE gen 3 link speeds already enabled\n");
7397			return;
7398		}
7399		DRM_INFO("enabling PCIE gen 3 link speeds, disable with radeon.pcie_gen2=0\n");
7400	} else if (mask & DRM_PCIE_SPEED_50) {
7401		if (current_data_rate == 1) {
7402			DRM_INFO("PCIE gen 2 link speeds already enabled\n");
7403			return;
7404		}
7405		DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
7406	}
7407
7408	bridge_pos = pci_pcie_cap(root);
7409	if (!bridge_pos)
7410		return;
7411
7412	gpu_pos = pci_pcie_cap(rdev->pdev);
7413	if (!gpu_pos)
7414		return;
7415
7416	if (mask & DRM_PCIE_SPEED_80) {
7417		/* re-try equalization if gen3 is not already enabled */
7418		if (current_data_rate != 2) {
7419			u16 bridge_cfg, gpu_cfg;
7420			u16 bridge_cfg2, gpu_cfg2;
7421			u32 max_lw, current_lw, tmp;
7422
7423			pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
7424			pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
7425
7426			tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
7427			pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
7428
7429			tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
7430			pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
7431
7432			tmp = RREG32_PCIE(PCIE_LC_STATUS1);
7433			max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
7434			current_lw = (tmp & LC_OPERATING_LINK_WIDTH_MASK) >> LC_OPERATING_LINK_WIDTH_SHIFT;
7435
7436			if (current_lw < max_lw) {
7437				tmp = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
7438				if (tmp & LC_RENEGOTIATION_SUPPORT) {
7439					tmp &= ~(LC_LINK_WIDTH_MASK | LC_UPCONFIGURE_DIS);
7440					tmp |= (max_lw << LC_LINK_WIDTH_SHIFT);
7441					tmp |= LC_UPCONFIGURE_SUPPORT | LC_RENEGOTIATE_EN | LC_RECONFIG_NOW;
7442					WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, tmp);
7443				}
7444			}
7445
7446			for (i = 0; i < 10; i++) {
7447				/* check status */
7448				pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16);
7449				if (tmp16 & PCI_EXP_DEVSTA_TRPND)
7450					break;
7451
7452				pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
7453				pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
7454
7455				pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2);
7456				pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2);
7457
7458				tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
7459				tmp |= LC_SET_QUIESCE;
7460				WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
7461
7462				tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
7463				tmp |= LC_REDO_EQ;
7464				WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
7465
7466				mdelay(100);
7467
7468				/* linkctl */
7469				pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);
7470				tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
7471				tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
7472				pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
7473
7474				pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16);
7475				tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
7476				tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
7477				pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
7478
7479				/* linkctl2 */
7480				pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16);
7481				tmp16 &= ~((1 << 4) | (7 << 9));
7482				tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9)));
7483				pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16);
7484
7485				pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
7486				tmp16 &= ~((1 << 4) | (7 << 9));
7487				tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9)));
7488				pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
7489
7490				tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
7491				tmp &= ~LC_SET_QUIESCE;
7492				WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
7493			}
7494		}
7495	}
7496
7497	/* set the link speed */
7498	speed_cntl |= LC_FORCE_EN_SW_SPEED_CHANGE | LC_FORCE_DIS_HW_SPEED_CHANGE;
7499	speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE;
7500	WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
7501
7502	pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
7503	tmp16 &= ~0xf;
7504	if (mask & DRM_PCIE_SPEED_80)
7505		tmp16 |= 3; /* gen3 */
7506	else if (mask & DRM_PCIE_SPEED_50)
7507		tmp16 |= 2; /* gen2 */
7508	else
7509		tmp16 |= 1; /* gen1 */
7510	pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
7511
7512	speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
7513	speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE;
7514	WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
7515
7516	for (i = 0; i < rdev->usec_timeout; i++) {
7517		speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
7518		if ((speed_cntl & LC_INITIATE_LINK_SPEED_CHANGE) == 0)
7519			break;
7520		udelay(1);
7521	}
7522}
7523
7524static void si_program_aspm(struct radeon_device *rdev)
7525{
7526	u32 data, orig;
7527	bool disable_l0s = false, disable_l1 = false, disable_plloff_in_l1 = false;
7528	bool disable_clkreq = false;
7529
7530	if (radeon_aspm == 0)
7531		return;
7532
7533	if (!(rdev->flags & RADEON_IS_PCIE))
7534		return;
7535
7536	orig = data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
7537	data &= ~LC_XMIT_N_FTS_MASK;
7538	data |= LC_XMIT_N_FTS(0x24) | LC_XMIT_N_FTS_OVERRIDE_EN;
7539	if (orig != data)
7540		WREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL, data);
7541
7542	orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL3);
7543	data |= LC_GO_TO_RECOVERY;
7544	if (orig != data)
7545		WREG32_PCIE_PORT(PCIE_LC_CNTL3, data);
7546
7547	orig = data = RREG32_PCIE(PCIE_P_CNTL);
7548	data |= P_IGNORE_EDB_ERR;
7549	if (orig != data)
7550		WREG32_PCIE(PCIE_P_CNTL, data);
7551
7552	orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
7553	data &= ~(LC_L0S_INACTIVITY_MASK | LC_L1_INACTIVITY_MASK);
7554	data |= LC_PMI_TO_L1_DIS;
7555	if (!disable_l0s)
7556		data |= LC_L0S_INACTIVITY(7);
7557
7558	if (!disable_l1) {
7559		data |= LC_L1_INACTIVITY(7);
7560		data &= ~LC_PMI_TO_L1_DIS;
7561		if (orig != data)
7562			WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
7563
7564		if (!disable_plloff_in_l1) {
7565			bool clk_req_support;
7566
7567			orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0);
7568			data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
7569			data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
7570			if (orig != data)
7571				WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0, data);
7572
7573			orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1);
7574			data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
7575			data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
7576			if (orig != data)
7577				WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1, data);
7578
7579			orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0);
7580			data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
7581			data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
7582			if (orig != data)
7583				WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0, data);
7584
7585			orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1);
7586			data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
7587			data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
7588			if (orig != data)
7589				WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1, data);
7590
7591			if ((rdev->family != CHIP_OLAND) && (rdev->family != CHIP_HAINAN)) {
7592				orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0);
7593				data &= ~PLL_RAMP_UP_TIME_0_MASK;
7594				if (orig != data)
7595					WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0, data);
7596
7597				orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1);
7598				data &= ~PLL_RAMP_UP_TIME_1_MASK;
7599				if (orig != data)
7600					WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1, data);
7601
7602				orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_2);
7603				data &= ~PLL_RAMP_UP_TIME_2_MASK;
7604				if (orig != data)
7605					WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_2, data);
7606
7607				orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_3);
7608				data &= ~PLL_RAMP_UP_TIME_3_MASK;
7609				if (orig != data)
7610					WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_3, data);
7611
7612				orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0);
7613				data &= ~PLL_RAMP_UP_TIME_0_MASK;
7614				if (orig != data)
7615					WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0, data);
7616
7617				orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1);
7618				data &= ~PLL_RAMP_UP_TIME_1_MASK;
7619				if (orig != data)
7620					WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1, data);
7621
7622				orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_2);
7623				data &= ~PLL_RAMP_UP_TIME_2_MASK;
7624				if (orig != data)
7625					WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_2, data);
7626
7627				orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_3);
7628				data &= ~PLL_RAMP_UP_TIME_3_MASK;
7629				if (orig != data)
7630					WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_3, data);
7631			}
7632			orig = data = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
7633			data &= ~LC_DYN_LANES_PWR_STATE_MASK;
7634			data |= LC_DYN_LANES_PWR_STATE(3);
7635			if (orig != data)
7636				WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, data);
7637
7638			orig = data = RREG32_PIF_PHY0(PB0_PIF_CNTL);
7639			data &= ~LS2_EXIT_TIME_MASK;
7640			if ((rdev->family == CHIP_OLAND) || (rdev->family == CHIP_HAINAN))
7641				data |= LS2_EXIT_TIME(5);
7642			if (orig != data)
7643				WREG32_PIF_PHY0(PB0_PIF_CNTL, data);
7644
7645			orig = data = RREG32_PIF_PHY1(PB1_PIF_CNTL);
7646			data &= ~LS2_EXIT_TIME_MASK;
7647			if ((rdev->family == CHIP_OLAND) || (rdev->family == CHIP_HAINAN))
7648				data |= LS2_EXIT_TIME(5);
7649			if (orig != data)
7650				WREG32_PIF_PHY1(PB1_PIF_CNTL, data);
7651
7652			if (!disable_clkreq &&
7653			    !pci_is_root_bus(rdev->pdev->bus)) {
7654				struct pci_dev *root = rdev->pdev->bus->self;
7655				u32 lnkcap;
7656
7657				clk_req_support = false;
7658				pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
7659				if (lnkcap & PCI_EXP_LNKCAP_CLKPM)
7660					clk_req_support = true;
7661			} else {
7662				clk_req_support = false;
7663			}
7664
7665			if (clk_req_support) {
7666				orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL2);
7667				data |= LC_ALLOW_PDWN_IN_L1 | LC_ALLOW_PDWN_IN_L23;
7668				if (orig != data)
7669					WREG32_PCIE_PORT(PCIE_LC_CNTL2, data);
7670
7671				orig = data = RREG32(THM_CLK_CNTL);
7672				data &= ~(CMON_CLK_SEL_MASK | TMON_CLK_SEL_MASK);
7673				data |= CMON_CLK_SEL(1) | TMON_CLK_SEL(1);
7674				if (orig != data)
7675					WREG32(THM_CLK_CNTL, data);
7676
7677				orig = data = RREG32(MISC_CLK_CNTL);
7678				data &= ~(DEEP_SLEEP_CLK_SEL_MASK | ZCLK_SEL_MASK);
7679				data |= DEEP_SLEEP_CLK_SEL(1) | ZCLK_SEL(1);
7680				if (orig != data)
7681					WREG32(MISC_CLK_CNTL, data);
7682
7683				orig = data = RREG32(CG_CLKPIN_CNTL);
7684				data &= ~BCLK_AS_XCLK;
7685				if (orig != data)
7686					WREG32(CG_CLKPIN_CNTL, data);
7687
7688				orig = data = RREG32(CG_CLKPIN_CNTL_2);
7689				data &= ~FORCE_BIF_REFCLK_EN;
7690				if (orig != data)
7691					WREG32(CG_CLKPIN_CNTL_2, data);
7692
7693				orig = data = RREG32(MPLL_BYPASSCLK_SEL);
7694				data &= ~MPLL_CLKOUT_SEL_MASK;
7695				data |= MPLL_CLKOUT_SEL(4);
7696				if (orig != data)
7697					WREG32(MPLL_BYPASSCLK_SEL, data);
7698
7699				orig = data = RREG32(SPLL_CNTL_MODE);
7700				data &= ~SPLL_REFCLK_SEL_MASK;
7701				if (orig != data)
7702					WREG32(SPLL_CNTL_MODE, data);
7703			}
7704		}
7705	} else {
7706		if (orig != data)
7707			WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
7708	}
7709
7710	orig = data = RREG32_PCIE(PCIE_CNTL2);
7711	data |= SLV_MEM_LS_EN | MST_MEM_LS_EN | REPLAY_MEM_LS_EN;
7712	if (orig != data)
7713		WREG32_PCIE(PCIE_CNTL2, data);
7714
7715	if (!disable_l0s) {
7716		data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
7717		if((data & LC_N_FTS_MASK) == LC_N_FTS_MASK) {
7718			data = RREG32_PCIE(PCIE_LC_STATUS1);
7719			if ((data & LC_REVERSE_XMIT) && (data & LC_REVERSE_RCVR)) {
7720				orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
7721				data &= ~LC_L0S_INACTIVITY_MASK;
7722				if (orig != data)
7723					WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
7724			}
7725		}
7726	}
7727}
7728
7729int si_vce_send_vcepll_ctlreq(struct radeon_device *rdev)
7730{
7731	unsigned i;
7732
7733	/* make sure VCEPLL_CTLREQ is deasserted */
7734	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~UPLL_CTLREQ_MASK);
7735
7736	mdelay(10);
7737
7738	/* assert UPLL_CTLREQ */
7739	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, UPLL_CTLREQ_MASK, ~UPLL_CTLREQ_MASK);
7740
7741	/* wait for CTLACK and CTLACK2 to get asserted */
7742	for (i = 0; i < 100; ++i) {
7743		uint32_t mask = UPLL_CTLACK_MASK | UPLL_CTLACK2_MASK;
7744		if ((RREG32_SMC(CG_VCEPLL_FUNC_CNTL) & mask) == mask)
7745			break;
7746		mdelay(10);
7747	}
7748
7749	/* deassert UPLL_CTLREQ */
7750	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~UPLL_CTLREQ_MASK);
7751
7752	if (i == 100) {
7753		DRM_ERROR("Timeout setting UVD clocks!\n");
7754		return -ETIMEDOUT;
7755	}
7756
7757	return 0;
7758}
7759
7760int si_set_vce_clocks(struct radeon_device *rdev, u32 evclk, u32 ecclk)
7761{
7762	unsigned fb_div = 0, evclk_div = 0, ecclk_div = 0;
7763	int r;
7764
7765	/* bypass evclk and ecclk with bclk */
7766	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_2,
7767		     EVCLK_SRC_SEL(1) | ECCLK_SRC_SEL(1),
7768		     ~(EVCLK_SRC_SEL_MASK | ECCLK_SRC_SEL_MASK));
7769
7770	/* put PLL in bypass mode */
7771	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_BYPASS_EN_MASK,
7772		     ~VCEPLL_BYPASS_EN_MASK);
7773
7774	if (!evclk || !ecclk) {
7775		/* keep the Bypass mode, put PLL to sleep */
7776		WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_SLEEP_MASK,
7777			     ~VCEPLL_SLEEP_MASK);
7778		return 0;
7779	}
7780
7781	r = radeon_uvd_calc_upll_dividers(rdev, evclk, ecclk, 125000, 250000,
7782					  16384, 0x03FFFFFF, 0, 128, 5,
7783					  &fb_div, &evclk_div, &ecclk_div);
7784	if (r)
7785		return r;
7786
7787	/* set RESET_ANTI_MUX to 0 */
7788	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_5, 0, ~RESET_ANTI_MUX_MASK);
7789
7790	/* set VCO_MODE to 1 */
7791	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_VCO_MODE_MASK,
7792		     ~VCEPLL_VCO_MODE_MASK);
7793
7794	/* toggle VCEPLL_SLEEP to 1 then back to 0 */
7795	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_SLEEP_MASK,
7796		     ~VCEPLL_SLEEP_MASK);
7797	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_SLEEP_MASK);
7798
7799	/* deassert VCEPLL_RESET */
7800	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_RESET_MASK);
7801
7802	mdelay(1);
7803
7804	r = si_vce_send_vcepll_ctlreq(rdev);
7805	if (r)
7806		return r;
7807
7808	/* assert VCEPLL_RESET again */
7809	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_RESET_MASK, ~VCEPLL_RESET_MASK);
7810
7811	/* disable spread spectrum. */
7812	WREG32_SMC_P(CG_VCEPLL_SPREAD_SPECTRUM, 0, ~SSEN_MASK);
7813
7814	/* set feedback divider */
7815	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_3, VCEPLL_FB_DIV(fb_div), ~VCEPLL_FB_DIV_MASK);
7816
7817	/* set ref divider to 0 */
7818	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_REF_DIV_MASK);
7819
7820	/* set PDIV_A and PDIV_B */
7821	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_2,
7822		     VCEPLL_PDIV_A(evclk_div) | VCEPLL_PDIV_B(ecclk_div),
7823		     ~(VCEPLL_PDIV_A_MASK | VCEPLL_PDIV_B_MASK));
7824
7825	/* give the PLL some time to settle */
7826	mdelay(15);
7827
7828	/* deassert PLL_RESET */
7829	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_RESET_MASK);
7830
7831	mdelay(15);
7832
7833	/* switch from bypass mode to normal mode */
7834	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_BYPASS_EN_MASK);
7835
7836	r = si_vce_send_vcepll_ctlreq(rdev);
7837	if (r)
7838		return r;
7839
7840	/* switch VCLK and DCLK selection */
7841	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_2,
7842		     EVCLK_SRC_SEL(16) | ECCLK_SRC_SEL(16),
7843		     ~(EVCLK_SRC_SEL_MASK | ECCLK_SRC_SEL_MASK));
7844
7845	mdelay(100);
7846
7847	return 0;
7848}
v3.5.6
   1/*
   2 * Copyright 2011 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: Alex Deucher
  23 */
  24#include <linux/firmware.h>
  25#include <linux/platform_device.h>
  26#include <linux/slab.h>
  27#include <linux/module.h>
  28#include "drmP.h"
  29#include "radeon.h"
  30#include "radeon_asic.h"
  31#include "radeon_drm.h"
 
  32#include "sid.h"
  33#include "atom.h"
  34#include "si_blit_shaders.h"
 
 
  35
  36#define SI_PFP_UCODE_SIZE 2144
  37#define SI_PM4_UCODE_SIZE 2144
  38#define SI_CE_UCODE_SIZE 2144
  39#define SI_RLC_UCODE_SIZE 2048
  40#define SI_MC_UCODE_SIZE 7769
  41
  42MODULE_FIRMWARE("radeon/TAHITI_pfp.bin");
  43MODULE_FIRMWARE("radeon/TAHITI_me.bin");
  44MODULE_FIRMWARE("radeon/TAHITI_ce.bin");
  45MODULE_FIRMWARE("radeon/TAHITI_mc.bin");
 
  46MODULE_FIRMWARE("radeon/TAHITI_rlc.bin");
 
 
 
 
 
 
 
 
 
  47MODULE_FIRMWARE("radeon/PITCAIRN_pfp.bin");
  48MODULE_FIRMWARE("radeon/PITCAIRN_me.bin");
  49MODULE_FIRMWARE("radeon/PITCAIRN_ce.bin");
  50MODULE_FIRMWARE("radeon/PITCAIRN_mc.bin");
 
  51MODULE_FIRMWARE("radeon/PITCAIRN_rlc.bin");
 
 
 
 
 
 
 
 
 
  52MODULE_FIRMWARE("radeon/VERDE_pfp.bin");
  53MODULE_FIRMWARE("radeon/VERDE_me.bin");
  54MODULE_FIRMWARE("radeon/VERDE_ce.bin");
  55MODULE_FIRMWARE("radeon/VERDE_mc.bin");
 
  56MODULE_FIRMWARE("radeon/VERDE_rlc.bin");
 
  57
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  58extern int r600_ih_ring_alloc(struct radeon_device *rdev);
  59extern void r600_ih_ring_fini(struct radeon_device *rdev);
  60extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);
  61extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
  62extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
  63extern u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  64
  65/* get temperature in millidegrees */
  66int si_get_temp(struct radeon_device *rdev)
  67{
  68	u32 temp;
  69	int actual_temp = 0;
  70
  71	temp = (RREG32(CG_MULT_THERMAL_STATUS) & CTF_TEMP_MASK) >>
  72		CTF_TEMP_SHIFT;
  73
  74	if (temp & 0x200)
  75		actual_temp = 255;
  76	else
  77		actual_temp = temp & 0x1ff;
  78
  79	actual_temp = (actual_temp * 1000);
  80
  81	return actual_temp;
  82}
  83
  84#define TAHITI_IO_MC_REGS_SIZE 36
  85
  86static const u32 tahiti_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
  87	{0x0000006f, 0x03044000},
  88	{0x00000070, 0x0480c018},
  89	{0x00000071, 0x00000040},
  90	{0x00000072, 0x01000000},
  91	{0x00000074, 0x000000ff},
  92	{0x00000075, 0x00143400},
  93	{0x00000076, 0x08ec0800},
  94	{0x00000077, 0x040000cc},
  95	{0x00000079, 0x00000000},
  96	{0x0000007a, 0x21000409},
  97	{0x0000007c, 0x00000000},
  98	{0x0000007d, 0xe8000000},
  99	{0x0000007e, 0x044408a8},
 100	{0x0000007f, 0x00000003},
 101	{0x00000080, 0x00000000},
 102	{0x00000081, 0x01000000},
 103	{0x00000082, 0x02000000},
 104	{0x00000083, 0x00000000},
 105	{0x00000084, 0xe3f3e4f4},
 106	{0x00000085, 0x00052024},
 107	{0x00000087, 0x00000000},
 108	{0x00000088, 0x66036603},
 109	{0x00000089, 0x01000000},
 110	{0x0000008b, 0x1c0a0000},
 111	{0x0000008c, 0xff010000},
 112	{0x0000008e, 0xffffefff},
 113	{0x0000008f, 0xfff3efff},
 114	{0x00000090, 0xfff3efbf},
 115	{0x00000094, 0x00101101},
 116	{0x00000095, 0x00000fff},
 117	{0x00000096, 0x00116fff},
 118	{0x00000097, 0x60010000},
 119	{0x00000098, 0x10010000},
 120	{0x00000099, 0x00006000},
 121	{0x0000009a, 0x00001000},
 122	{0x0000009f, 0x00a77400}
 123};
 124
 125static const u32 pitcairn_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
 126	{0x0000006f, 0x03044000},
 127	{0x00000070, 0x0480c018},
 128	{0x00000071, 0x00000040},
 129	{0x00000072, 0x01000000},
 130	{0x00000074, 0x000000ff},
 131	{0x00000075, 0x00143400},
 132	{0x00000076, 0x08ec0800},
 133	{0x00000077, 0x040000cc},
 134	{0x00000079, 0x00000000},
 135	{0x0000007a, 0x21000409},
 136	{0x0000007c, 0x00000000},
 137	{0x0000007d, 0xe8000000},
 138	{0x0000007e, 0x044408a8},
 139	{0x0000007f, 0x00000003},
 140	{0x00000080, 0x00000000},
 141	{0x00000081, 0x01000000},
 142	{0x00000082, 0x02000000},
 143	{0x00000083, 0x00000000},
 144	{0x00000084, 0xe3f3e4f4},
 145	{0x00000085, 0x00052024},
 146	{0x00000087, 0x00000000},
 147	{0x00000088, 0x66036603},
 148	{0x00000089, 0x01000000},
 149	{0x0000008b, 0x1c0a0000},
 150	{0x0000008c, 0xff010000},
 151	{0x0000008e, 0xffffefff},
 152	{0x0000008f, 0xfff3efff},
 153	{0x00000090, 0xfff3efbf},
 154	{0x00000094, 0x00101101},
 155	{0x00000095, 0x00000fff},
 156	{0x00000096, 0x00116fff},
 157	{0x00000097, 0x60010000},
 158	{0x00000098, 0x10010000},
 159	{0x00000099, 0x00006000},
 160	{0x0000009a, 0x00001000},
 161	{0x0000009f, 0x00a47400}
 162};
 163
 164static const u32 verde_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
 165	{0x0000006f, 0x03044000},
 166	{0x00000070, 0x0480c018},
 167	{0x00000071, 0x00000040},
 168	{0x00000072, 0x01000000},
 169	{0x00000074, 0x000000ff},
 170	{0x00000075, 0x00143400},
 171	{0x00000076, 0x08ec0800},
 172	{0x00000077, 0x040000cc},
 173	{0x00000079, 0x00000000},
 174	{0x0000007a, 0x21000409},
 175	{0x0000007c, 0x00000000},
 176	{0x0000007d, 0xe8000000},
 177	{0x0000007e, 0x044408a8},
 178	{0x0000007f, 0x00000003},
 179	{0x00000080, 0x00000000},
 180	{0x00000081, 0x01000000},
 181	{0x00000082, 0x02000000},
 182	{0x00000083, 0x00000000},
 183	{0x00000084, 0xe3f3e4f4},
 184	{0x00000085, 0x00052024},
 185	{0x00000087, 0x00000000},
 186	{0x00000088, 0x66036603},
 187	{0x00000089, 0x01000000},
 188	{0x0000008b, 0x1c0a0000},
 189	{0x0000008c, 0xff010000},
 190	{0x0000008e, 0xffffefff},
 191	{0x0000008f, 0xfff3efff},
 192	{0x00000090, 0xfff3efbf},
 193	{0x00000094, 0x00101101},
 194	{0x00000095, 0x00000fff},
 195	{0x00000096, 0x00116fff},
 196	{0x00000097, 0x60010000},
 197	{0x00000098, 0x10010000},
 198	{0x00000099, 0x00006000},
 199	{0x0000009a, 0x00001000},
 200	{0x0000009f, 0x00a37400}
 201};
 202
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 203/* ucode loading */
 204static int si_mc_load_microcode(struct radeon_device *rdev)
 205{
 206	const __be32 *fw_data;
 
 207	u32 running, blackout = 0;
 208	u32 *io_mc_regs;
 209	int i, ucode_size, regs_size;
 
 210
 211	if (!rdev->mc_fw)
 212		return -EINVAL;
 213
 214	switch (rdev->family) {
 215	case CHIP_TAHITI:
 216		io_mc_regs = (u32 *)&tahiti_io_mc_regs;
 217		ucode_size = SI_MC_UCODE_SIZE;
 218		regs_size = TAHITI_IO_MC_REGS_SIZE;
 219		break;
 220	case CHIP_PITCAIRN:
 221		io_mc_regs = (u32 *)&pitcairn_io_mc_regs;
 222		ucode_size = SI_MC_UCODE_SIZE;
 223		regs_size = TAHITI_IO_MC_REGS_SIZE;
 224		break;
 225	case CHIP_VERDE:
 226	default:
 227		io_mc_regs = (u32 *)&verde_io_mc_regs;
 228		ucode_size = SI_MC_UCODE_SIZE;
 229		regs_size = TAHITI_IO_MC_REGS_SIZE;
 230		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 231	}
 232
 233	running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
 234
 235	if (running == 0) {
 236		if (running) {
 237			blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
 238			WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
 239		}
 240
 241		/* reset the engine and set to writable */
 242		WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
 243		WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
 244
 245		/* load mc io regs */
 246		for (i = 0; i < regs_size; i++) {
 247			WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
 248			WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
 
 
 
 
 
 249		}
 250		/* load the MC ucode */
 251		fw_data = (const __be32 *)rdev->mc_fw->data;
 252		for (i = 0; i < ucode_size; i++)
 253			WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
 
 
 
 254
 255		/* put the engine back into the active state */
 256		WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
 257		WREG32(MC_SEQ_SUP_CNTL, 0x00000004);
 258		WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
 259
 260		/* wait for training to complete */
 261		for (i = 0; i < rdev->usec_timeout; i++) {
 262			if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D0)
 263				break;
 264			udelay(1);
 265		}
 266		for (i = 0; i < rdev->usec_timeout; i++) {
 267			if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D1)
 268				break;
 269			udelay(1);
 270		}
 271
 272		if (running)
 273			WREG32(MC_SHARED_BLACKOUT_CNTL, blackout);
 274	}
 275
 276	return 0;
 277}
 278
 279static int si_init_microcode(struct radeon_device *rdev)
 280{
 281	struct platform_device *pdev;
 282	const char *chip_name;
 283	const char *rlc_chip_name;
 284	size_t pfp_req_size, me_req_size, ce_req_size, rlc_req_size, mc_req_size;
 
 285	char fw_name[30];
 286	int err;
 
 287
 288	DRM_DEBUG("\n");
 289
 290	pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
 291	err = IS_ERR(pdev);
 292	if (err) {
 293		printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
 294		return -EINVAL;
 295	}
 296
 297	switch (rdev->family) {
 298	case CHIP_TAHITI:
 299		chip_name = "TAHITI";
 300		rlc_chip_name = "TAHITI";
 301		pfp_req_size = SI_PFP_UCODE_SIZE * 4;
 302		me_req_size = SI_PM4_UCODE_SIZE * 4;
 303		ce_req_size = SI_CE_UCODE_SIZE * 4;
 304		rlc_req_size = SI_RLC_UCODE_SIZE * 4;
 305		mc_req_size = SI_MC_UCODE_SIZE * 4;
 
 
 306		break;
 307	case CHIP_PITCAIRN:
 308		chip_name = "PITCAIRN";
 309		rlc_chip_name = "PITCAIRN";
 310		pfp_req_size = SI_PFP_UCODE_SIZE * 4;
 311		me_req_size = SI_PM4_UCODE_SIZE * 4;
 312		ce_req_size = SI_CE_UCODE_SIZE * 4;
 313		rlc_req_size = SI_RLC_UCODE_SIZE * 4;
 314		mc_req_size = SI_MC_UCODE_SIZE * 4;
 
 
 315		break;
 316	case CHIP_VERDE:
 317		chip_name = "VERDE";
 318		rlc_chip_name = "VERDE";
 319		pfp_req_size = SI_PFP_UCODE_SIZE * 4;
 320		me_req_size = SI_PM4_UCODE_SIZE * 4;
 321		ce_req_size = SI_CE_UCODE_SIZE * 4;
 322		rlc_req_size = SI_RLC_UCODE_SIZE * 4;
 323		mc_req_size = SI_MC_UCODE_SIZE * 4;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 324		break;
 325	default: BUG();
 326	}
 327
 328	DRM_INFO("Loading %s Microcode\n", chip_name);
 329
 330	snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
 331	err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
 332	if (err)
 333		goto out;
 334	if (rdev->pfp_fw->size != pfp_req_size) {
 335		printk(KERN_ERR
 336		       "si_cp: Bogus length %zu in firmware \"%s\"\n",
 337		       rdev->pfp_fw->size, fw_name);
 338		err = -EINVAL;
 339		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 340	}
 341
 342	snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
 343	err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
 344	if (err)
 345		goto out;
 346	if (rdev->me_fw->size != me_req_size) {
 347		printk(KERN_ERR
 348		       "si_cp: Bogus length %zu in firmware \"%s\"\n",
 349		       rdev->me_fw->size, fw_name);
 350		err = -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 351	}
 352
 353	snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
 354	err = request_firmware(&rdev->ce_fw, fw_name, &pdev->dev);
 355	if (err)
 356		goto out;
 357	if (rdev->ce_fw->size != ce_req_size) {
 358		printk(KERN_ERR
 359		       "si_cp: Bogus length %zu in firmware \"%s\"\n",
 360		       rdev->ce_fw->size, fw_name);
 361		err = -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 362	}
 363
 364	snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
 365	err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
 366	if (err)
 367		goto out;
 368	if (rdev->rlc_fw->size != rlc_req_size) {
 369		printk(KERN_ERR
 370		       "si_rlc: Bogus length %zu in firmware \"%s\"\n",
 371		       rdev->rlc_fw->size, fw_name);
 372		err = -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 373	}
 374
 375	snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
 376	err = request_firmware(&rdev->mc_fw, fw_name, &pdev->dev);
 377	if (err)
 378		goto out;
 379	if (rdev->mc_fw->size != mc_req_size) {
 380		printk(KERN_ERR
 381		       "si_mc: Bogus length %zu in firmware \"%s\"\n",
 382		       rdev->mc_fw->size, fw_name);
 383		err = -EINVAL;
 
 
 384	}
 385
 386out:
 387	platform_device_unregister(pdev);
 388
 389	if (err) {
 390		if (err != -EINVAL)
 391			printk(KERN_ERR
 392			       "si_cp: Failed to load firmware \"%s\"\n",
 393			       fw_name);
 394		release_firmware(rdev->pfp_fw);
 395		rdev->pfp_fw = NULL;
 396		release_firmware(rdev->me_fw);
 397		rdev->me_fw = NULL;
 398		release_firmware(rdev->ce_fw);
 399		rdev->ce_fw = NULL;
 400		release_firmware(rdev->rlc_fw);
 401		rdev->rlc_fw = NULL;
 402		release_firmware(rdev->mc_fw);
 403		rdev->mc_fw = NULL;
 
 
 404	}
 405	return err;
 406}
 407
 408/* watermark setup */
 409static u32 dce6_line_buffer_adjust(struct radeon_device *rdev,
 410				   struct radeon_crtc *radeon_crtc,
 411				   struct drm_display_mode *mode,
 412				   struct drm_display_mode *other_mode)
 413{
 414	u32 tmp;
 
 415	/*
 416	 * Line Buffer Setup
 417	 * There are 3 line buffers, each one shared by 2 display controllers.
 418	 * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
 419	 * the display controllers.  The paritioning is done via one of four
 420	 * preset allocations specified in bits 21:20:
 421	 *  0 - half lb
 422	 *  2 - whole lb, other crtc must be disabled
 423	 */
 424	/* this can get tricky if we have two large displays on a paired group
 425	 * of crtcs.  Ideally for multiple large displays we'd assign them to
 426	 * non-linked crtcs for maximum line buffer allocation.
 427	 */
 428	if (radeon_crtc->base.enabled && mode) {
 429		if (other_mode)
 430			tmp = 0; /* 1/2 */
 431		else
 
 432			tmp = 2; /* whole */
 433	} else
 
 
 434		tmp = 0;
 
 
 435
 436	WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset,
 437	       DC_LB_MEMORY_CONFIG(tmp));
 438
 
 
 
 
 
 
 
 
 
 439	if (radeon_crtc->base.enabled && mode) {
 440		switch (tmp) {
 441		case 0:
 442		default:
 443			return 4096 * 2;
 444		case 2:
 445			return 8192 * 2;
 446		}
 447	}
 448
 449	/* controller not enabled, so no lb used */
 450	return 0;
 451}
 452
 453static u32 si_get_number_of_dram_channels(struct radeon_device *rdev)
 454{
 455	u32 tmp = RREG32(MC_SHARED_CHMAP);
 456
 457	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
 458	case 0:
 459	default:
 460		return 1;
 461	case 1:
 462		return 2;
 463	case 2:
 464		return 4;
 465	case 3:
 466		return 8;
 467	case 4:
 468		return 3;
 469	case 5:
 470		return 6;
 471	case 6:
 472		return 10;
 473	case 7:
 474		return 12;
 475	case 8:
 476		return 16;
 477	}
 478}
 479
 480struct dce6_wm_params {
 481	u32 dram_channels; /* number of dram channels */
 482	u32 yclk;          /* bandwidth per dram data pin in kHz */
 483	u32 sclk;          /* engine clock in kHz */
 484	u32 disp_clk;      /* display clock in kHz */
 485	u32 src_width;     /* viewport width */
 486	u32 active_time;   /* active display time in ns */
 487	u32 blank_time;    /* blank time in ns */
 488	bool interlaced;    /* mode is interlaced */
 489	fixed20_12 vsc;    /* vertical scale ratio */
 490	u32 num_heads;     /* number of active crtcs */
 491	u32 bytes_per_pixel; /* bytes per pixel display + overlay */
 492	u32 lb_size;       /* line buffer allocated to pipe */
 493	u32 vtaps;         /* vertical scaler taps */
 494};
 495
 496static u32 dce6_dram_bandwidth(struct dce6_wm_params *wm)
 497{
 498	/* Calculate raw DRAM Bandwidth */
 499	fixed20_12 dram_efficiency; /* 0.7 */
 500	fixed20_12 yclk, dram_channels, bandwidth;
 501	fixed20_12 a;
 502
 503	a.full = dfixed_const(1000);
 504	yclk.full = dfixed_const(wm->yclk);
 505	yclk.full = dfixed_div(yclk, a);
 506	dram_channels.full = dfixed_const(wm->dram_channels * 4);
 507	a.full = dfixed_const(10);
 508	dram_efficiency.full = dfixed_const(7);
 509	dram_efficiency.full = dfixed_div(dram_efficiency, a);
 510	bandwidth.full = dfixed_mul(dram_channels, yclk);
 511	bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
 512
 513	return dfixed_trunc(bandwidth);
 514}
 515
 516static u32 dce6_dram_bandwidth_for_display(struct dce6_wm_params *wm)
 517{
 518	/* Calculate DRAM Bandwidth and the part allocated to display. */
 519	fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
 520	fixed20_12 yclk, dram_channels, bandwidth;
 521	fixed20_12 a;
 522
 523	a.full = dfixed_const(1000);
 524	yclk.full = dfixed_const(wm->yclk);
 525	yclk.full = dfixed_div(yclk, a);
 526	dram_channels.full = dfixed_const(wm->dram_channels * 4);
 527	a.full = dfixed_const(10);
 528	disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
 529	disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
 530	bandwidth.full = dfixed_mul(dram_channels, yclk);
 531	bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
 532
 533	return dfixed_trunc(bandwidth);
 534}
 535
 536static u32 dce6_data_return_bandwidth(struct dce6_wm_params *wm)
 537{
 538	/* Calculate the display Data return Bandwidth */
 539	fixed20_12 return_efficiency; /* 0.8 */
 540	fixed20_12 sclk, bandwidth;
 541	fixed20_12 a;
 542
 543	a.full = dfixed_const(1000);
 544	sclk.full = dfixed_const(wm->sclk);
 545	sclk.full = dfixed_div(sclk, a);
 546	a.full = dfixed_const(10);
 547	return_efficiency.full = dfixed_const(8);
 548	return_efficiency.full = dfixed_div(return_efficiency, a);
 549	a.full = dfixed_const(32);
 550	bandwidth.full = dfixed_mul(a, sclk);
 551	bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
 552
 553	return dfixed_trunc(bandwidth);
 554}
 555
 556static u32 dce6_get_dmif_bytes_per_request(struct dce6_wm_params *wm)
 557{
 558	return 32;
 559}
 560
 561static u32 dce6_dmif_request_bandwidth(struct dce6_wm_params *wm)
 562{
 563	/* Calculate the DMIF Request Bandwidth */
 564	fixed20_12 disp_clk_request_efficiency; /* 0.8 */
 565	fixed20_12 disp_clk, sclk, bandwidth;
 566	fixed20_12 a, b1, b2;
 567	u32 min_bandwidth;
 568
 569	a.full = dfixed_const(1000);
 570	disp_clk.full = dfixed_const(wm->disp_clk);
 571	disp_clk.full = dfixed_div(disp_clk, a);
 572	a.full = dfixed_const(dce6_get_dmif_bytes_per_request(wm) / 2);
 573	b1.full = dfixed_mul(a, disp_clk);
 574
 575	a.full = dfixed_const(1000);
 576	sclk.full = dfixed_const(wm->sclk);
 577	sclk.full = dfixed_div(sclk, a);
 578	a.full = dfixed_const(dce6_get_dmif_bytes_per_request(wm));
 579	b2.full = dfixed_mul(a, sclk);
 580
 581	a.full = dfixed_const(10);
 582	disp_clk_request_efficiency.full = dfixed_const(8);
 583	disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
 584
 585	min_bandwidth = min(dfixed_trunc(b1), dfixed_trunc(b2));
 586
 587	a.full = dfixed_const(min_bandwidth);
 588	bandwidth.full = dfixed_mul(a, disp_clk_request_efficiency);
 589
 590	return dfixed_trunc(bandwidth);
 591}
 592
 593static u32 dce6_available_bandwidth(struct dce6_wm_params *wm)
 594{
 595	/* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
 596	u32 dram_bandwidth = dce6_dram_bandwidth(wm);
 597	u32 data_return_bandwidth = dce6_data_return_bandwidth(wm);
 598	u32 dmif_req_bandwidth = dce6_dmif_request_bandwidth(wm);
 599
 600	return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
 601}
 602
 603static u32 dce6_average_bandwidth(struct dce6_wm_params *wm)
 604{
 605	/* Calculate the display mode Average Bandwidth
 606	 * DisplayMode should contain the source and destination dimensions,
 607	 * timing, etc.
 608	 */
 609	fixed20_12 bpp;
 610	fixed20_12 line_time;
 611	fixed20_12 src_width;
 612	fixed20_12 bandwidth;
 613	fixed20_12 a;
 614
 615	a.full = dfixed_const(1000);
 616	line_time.full = dfixed_const(wm->active_time + wm->blank_time);
 617	line_time.full = dfixed_div(line_time, a);
 618	bpp.full = dfixed_const(wm->bytes_per_pixel);
 619	src_width.full = dfixed_const(wm->src_width);
 620	bandwidth.full = dfixed_mul(src_width, bpp);
 621	bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
 622	bandwidth.full = dfixed_div(bandwidth, line_time);
 623
 624	return dfixed_trunc(bandwidth);
 625}
 626
 627static u32 dce6_latency_watermark(struct dce6_wm_params *wm)
 628{
 629	/* First calcualte the latency in ns */
 630	u32 mc_latency = 2000; /* 2000 ns. */
 631	u32 available_bandwidth = dce6_available_bandwidth(wm);
 632	u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
 633	u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
 634	u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
 635	u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
 636		(wm->num_heads * cursor_line_pair_return_time);
 637	u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
 638	u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
 639	u32 tmp, dmif_size = 12288;
 640	fixed20_12 a, b, c;
 641
 642	if (wm->num_heads == 0)
 643		return 0;
 644
 645	a.full = dfixed_const(2);
 646	b.full = dfixed_const(1);
 647	if ((wm->vsc.full > a.full) ||
 648	    ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
 649	    (wm->vtaps >= 5) ||
 650	    ((wm->vsc.full >= a.full) && wm->interlaced))
 651		max_src_lines_per_dst_line = 4;
 652	else
 653		max_src_lines_per_dst_line = 2;
 654
 655	a.full = dfixed_const(available_bandwidth);
 656	b.full = dfixed_const(wm->num_heads);
 657	a.full = dfixed_div(a, b);
 658
 659	b.full = dfixed_const(mc_latency + 512);
 660	c.full = dfixed_const(wm->disp_clk);
 661	b.full = dfixed_div(b, c);
 662
 663	c.full = dfixed_const(dmif_size);
 664	b.full = dfixed_div(c, b);
 665
 666	tmp = min(dfixed_trunc(a), dfixed_trunc(b));
 667
 668	b.full = dfixed_const(1000);
 669	c.full = dfixed_const(wm->disp_clk);
 670	b.full = dfixed_div(c, b);
 671	c.full = dfixed_const(wm->bytes_per_pixel);
 672	b.full = dfixed_mul(b, c);
 673
 674	lb_fill_bw = min(tmp, dfixed_trunc(b));
 675
 676	a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
 677	b.full = dfixed_const(1000);
 678	c.full = dfixed_const(lb_fill_bw);
 679	b.full = dfixed_div(c, b);
 680	a.full = dfixed_div(a, b);
 681	line_fill_time = dfixed_trunc(a);
 682
 683	if (line_fill_time < wm->active_time)
 684		return latency;
 685	else
 686		return latency + (line_fill_time - wm->active_time);
 687
 688}
 689
 690static bool dce6_average_bandwidth_vs_dram_bandwidth_for_display(struct dce6_wm_params *wm)
 691{
 692	if (dce6_average_bandwidth(wm) <=
 693	    (dce6_dram_bandwidth_for_display(wm) / wm->num_heads))
 694		return true;
 695	else
 696		return false;
 697};
 698
 699static bool dce6_average_bandwidth_vs_available_bandwidth(struct dce6_wm_params *wm)
 700{
 701	if (dce6_average_bandwidth(wm) <=
 702	    (dce6_available_bandwidth(wm) / wm->num_heads))
 703		return true;
 704	else
 705		return false;
 706};
 707
 708static bool dce6_check_latency_hiding(struct dce6_wm_params *wm)
 709{
 710	u32 lb_partitions = wm->lb_size / wm->src_width;
 711	u32 line_time = wm->active_time + wm->blank_time;
 712	u32 latency_tolerant_lines;
 713	u32 latency_hiding;
 714	fixed20_12 a;
 715
 716	a.full = dfixed_const(1);
 717	if (wm->vsc.full > a.full)
 718		latency_tolerant_lines = 1;
 719	else {
 720		if (lb_partitions <= (wm->vtaps + 1))
 721			latency_tolerant_lines = 1;
 722		else
 723			latency_tolerant_lines = 2;
 724	}
 725
 726	latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
 727
 728	if (dce6_latency_watermark(wm) <= latency_hiding)
 729		return true;
 730	else
 731		return false;
 732}
 733
 734static void dce6_program_watermarks(struct radeon_device *rdev,
 735					 struct radeon_crtc *radeon_crtc,
 736					 u32 lb_size, u32 num_heads)
 737{
 738	struct drm_display_mode *mode = &radeon_crtc->base.mode;
 739	struct dce6_wm_params wm;
 
 740	u32 pixel_period;
 741	u32 line_time = 0;
 742	u32 latency_watermark_a = 0, latency_watermark_b = 0;
 743	u32 priority_a_mark = 0, priority_b_mark = 0;
 744	u32 priority_a_cnt = PRIORITY_OFF;
 745	u32 priority_b_cnt = PRIORITY_OFF;
 746	u32 tmp, arb_control3;
 747	fixed20_12 a, b, c;
 748
 749	if (radeon_crtc->base.enabled && num_heads && mode) {
 750		pixel_period = 1000000 / (u32)mode->clock;
 751		line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
 752		priority_a_cnt = 0;
 753		priority_b_cnt = 0;
 754
 755		wm.yclk = rdev->pm.current_mclk * 10;
 756		wm.sclk = rdev->pm.current_sclk * 10;
 757		wm.disp_clk = mode->clock;
 758		wm.src_width = mode->crtc_hdisplay;
 759		wm.active_time = mode->crtc_hdisplay * pixel_period;
 760		wm.blank_time = line_time - wm.active_time;
 761		wm.interlaced = false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 762		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
 763			wm.interlaced = true;
 764		wm.vsc = radeon_crtc->vsc;
 765		wm.vtaps = 1;
 766		if (radeon_crtc->rmx_type != RMX_OFF)
 767			wm.vtaps = 2;
 768		wm.bytes_per_pixel = 4; /* XXX: get this from fb config */
 769		wm.lb_size = lb_size;
 770		if (rdev->family == CHIP_ARUBA)
 771			wm.dram_channels = evergreen_get_number_of_dram_channels(rdev);
 772		else
 773			wm.dram_channels = si_get_number_of_dram_channels(rdev);
 774		wm.num_heads = num_heads;
 775
 776		/* set for high clocks */
 777		latency_watermark_a = min(dce6_latency_watermark(&wm), (u32)65535);
 778		/* set for low clocks */
 779		/* wm.yclk = low clk; wm.sclk = low clk */
 780		latency_watermark_b = min(dce6_latency_watermark(&wm), (u32)65535);
 781
 782		/* possibly force display priority to high */
 783		/* should really do this at mode validation time... */
 784		if (!dce6_average_bandwidth_vs_dram_bandwidth_for_display(&wm) ||
 785		    !dce6_average_bandwidth_vs_available_bandwidth(&wm) ||
 786		    !dce6_check_latency_hiding(&wm) ||
 
 
 
 
 
 
 
 
 787		    (rdev->disp_priority == 2)) {
 788			DRM_DEBUG_KMS("force priority to high\n");
 789			priority_a_cnt |= PRIORITY_ALWAYS_ON;
 790			priority_b_cnt |= PRIORITY_ALWAYS_ON;
 791		}
 792
 793		a.full = dfixed_const(1000);
 794		b.full = dfixed_const(mode->clock);
 795		b.full = dfixed_div(b, a);
 796		c.full = dfixed_const(latency_watermark_a);
 797		c.full = dfixed_mul(c, b);
 798		c.full = dfixed_mul(c, radeon_crtc->hsc);
 799		c.full = dfixed_div(c, a);
 800		a.full = dfixed_const(16);
 801		c.full = dfixed_div(c, a);
 802		priority_a_mark = dfixed_trunc(c);
 803		priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
 804
 805		a.full = dfixed_const(1000);
 806		b.full = dfixed_const(mode->clock);
 807		b.full = dfixed_div(b, a);
 808		c.full = dfixed_const(latency_watermark_b);
 809		c.full = dfixed_mul(c, b);
 810		c.full = dfixed_mul(c, radeon_crtc->hsc);
 811		c.full = dfixed_div(c, a);
 812		a.full = dfixed_const(16);
 813		c.full = dfixed_div(c, a);
 814		priority_b_mark = dfixed_trunc(c);
 815		priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
 
 
 
 816	}
 817
 818	/* select wm A */
 819	arb_control3 = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset);
 820	tmp = arb_control3;
 821	tmp &= ~LATENCY_WATERMARK_MASK(3);
 822	tmp |= LATENCY_WATERMARK_MASK(1);
 823	WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, tmp);
 824	WREG32(DPG_PIPE_LATENCY_CONTROL + radeon_crtc->crtc_offset,
 825	       (LATENCY_LOW_WATERMARK(latency_watermark_a) |
 826		LATENCY_HIGH_WATERMARK(line_time)));
 827	/* select wm B */
 828	tmp = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset);
 829	tmp &= ~LATENCY_WATERMARK_MASK(3);
 830	tmp |= LATENCY_WATERMARK_MASK(2);
 831	WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, tmp);
 832	WREG32(DPG_PIPE_LATENCY_CONTROL + radeon_crtc->crtc_offset,
 833	       (LATENCY_LOW_WATERMARK(latency_watermark_b) |
 834		LATENCY_HIGH_WATERMARK(line_time)));
 835	/* restore original selection */
 836	WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, arb_control3);
 837
 838	/* write the priority marks */
 839	WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt);
 840	WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt);
 841
 
 
 
 
 842}
 843
 844void dce6_bandwidth_update(struct radeon_device *rdev)
 845{
 846	struct drm_display_mode *mode0 = NULL;
 847	struct drm_display_mode *mode1 = NULL;
 848	u32 num_heads = 0, lb_size;
 849	int i;
 850
 
 
 
 851	radeon_update_display_priority(rdev);
 852
 853	for (i = 0; i < rdev->num_crtc; i++) {
 854		if (rdev->mode_info.crtcs[i]->base.enabled)
 855			num_heads++;
 856	}
 857	for (i = 0; i < rdev->num_crtc; i += 2) {
 858		mode0 = &rdev->mode_info.crtcs[i]->base.mode;
 859		mode1 = &rdev->mode_info.crtcs[i+1]->base.mode;
 860		lb_size = dce6_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i], mode0, mode1);
 861		dce6_program_watermarks(rdev, rdev->mode_info.crtcs[i], lb_size, num_heads);
 862		lb_size = dce6_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i+1], mode1, mode0);
 863		dce6_program_watermarks(rdev, rdev->mode_info.crtcs[i+1], lb_size, num_heads);
 864	}
 865}
 866
 867/*
 868 * Core functions
 869 */
 870static void si_tiling_mode_table_init(struct radeon_device *rdev)
 871{
 872	const u32 num_tile_mode_states = 32;
 873	u32 reg_offset, gb_tile_moden, split_equal_to_row_size;
 
 
 874
 875	switch (rdev->config.si.mem_row_size_in_kb) {
 876	case 1:
 877		split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_1KB;
 878		break;
 879	case 2:
 880	default:
 881		split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_2KB;
 882		break;
 883	case 4:
 884		split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_4KB;
 885		break;
 886	}
 887
 888	if ((rdev->family == CHIP_TAHITI) ||
 889	    (rdev->family == CHIP_PITCAIRN)) {
 890		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
 891			switch (reg_offset) {
 892			case 0:  /* non-AA compressed depth or any compressed stencil */
 893				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
 894						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
 895						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
 896						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
 897						 NUM_BANKS(ADDR_SURF_16_BANK) |
 898						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
 899						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
 900						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
 901				break;
 902			case 1:  /* 2xAA/4xAA compressed depth only */
 903				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
 904						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
 905						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
 906						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
 907						 NUM_BANKS(ADDR_SURF_16_BANK) |
 908						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
 909						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
 910						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
 911				break;
 912			case 2:  /* 8xAA compressed depth only */
 913				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
 914						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
 915						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
 916						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
 917						 NUM_BANKS(ADDR_SURF_16_BANK) |
 918						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
 919						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
 920						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
 921				break;
 922			case 3:  /* 2xAA/4xAA compressed depth with stencil (for depth buffer) */
 923				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
 924						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
 925						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
 926						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
 927						 NUM_BANKS(ADDR_SURF_16_BANK) |
 928						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
 929						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
 930						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
 931				break;
 932			case 4:  /* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */
 933				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
 934						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
 935						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
 936						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
 937						 NUM_BANKS(ADDR_SURF_16_BANK) |
 938						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
 939						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
 940						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
 941				break;
 942			case 5:  /* Uncompressed 16bpp depth - and stencil buffer allocated with it */
 943				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
 944						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
 945						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
 946						 TILE_SPLIT(split_equal_to_row_size) |
 947						 NUM_BANKS(ADDR_SURF_16_BANK) |
 948						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
 949						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
 950						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
 951				break;
 952			case 6:  /* Uncompressed 32bpp depth - and stencil buffer allocated with it */
 953				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
 954						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
 955						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
 956						 TILE_SPLIT(split_equal_to_row_size) |
 957						 NUM_BANKS(ADDR_SURF_16_BANK) |
 958						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
 959						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
 960						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
 961				break;
 962			case 7:  /* Uncompressed 8bpp stencil without depth (drivers typically do not use) */
 963				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
 964						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
 965						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
 966						 TILE_SPLIT(split_equal_to_row_size) |
 967						 NUM_BANKS(ADDR_SURF_16_BANK) |
 968						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
 969						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
 970						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
 971				break;
 972			case 8:  /* 1D and 1D Array Surfaces */
 973				gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
 974						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
 975						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
 976						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
 977						 NUM_BANKS(ADDR_SURF_16_BANK) |
 978						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
 979						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
 980						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
 981				break;
 982			case 9:  /* Displayable maps. */
 983				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
 984						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
 985						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
 986						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
 987						 NUM_BANKS(ADDR_SURF_16_BANK) |
 988						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
 989						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
 990						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
 991				break;
 992			case 10:  /* Display 8bpp. */
 993				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
 994						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
 995						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
 996						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
 997						 NUM_BANKS(ADDR_SURF_16_BANK) |
 998						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
 999						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1000						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
1001				break;
1002			case 11:  /* Display 16bpp. */
1003				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1004						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
1005						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
1006						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1007						 NUM_BANKS(ADDR_SURF_16_BANK) |
1008						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1009						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1010						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
1011				break;
1012			case 12:  /* Display 32bpp. */
1013				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1014						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
1015						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
1016						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
1017						 NUM_BANKS(ADDR_SURF_16_BANK) |
1018						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1019						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1020						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
1021				break;
1022			case 13:  /* Thin. */
1023				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1024						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
1025						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
1026						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
1027						 NUM_BANKS(ADDR_SURF_16_BANK) |
1028						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1029						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1030						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
1031				break;
1032			case 14:  /* Thin 8 bpp. */
1033				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1034						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
1035						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
1036						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1037						 NUM_BANKS(ADDR_SURF_16_BANK) |
1038						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1039						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1040						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
1041				break;
1042			case 15:  /* Thin 16 bpp. */
1043				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1044						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
1045						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
1046						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1047						 NUM_BANKS(ADDR_SURF_16_BANK) |
1048						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1049						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1050						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
1051				break;
1052			case 16:  /* Thin 32 bpp. */
1053				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1054						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
1055						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
1056						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
1057						 NUM_BANKS(ADDR_SURF_16_BANK) |
1058						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1059						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1060						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
1061				break;
1062			case 17:  /* Thin 64 bpp. */
1063				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1064						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
1065						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
1066						 TILE_SPLIT(split_equal_to_row_size) |
1067						 NUM_BANKS(ADDR_SURF_16_BANK) |
1068						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1069						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1070						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
1071				break;
1072			case 21:  /* 8 bpp PRT. */
1073				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1074						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
1075						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
1076						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1077						 NUM_BANKS(ADDR_SURF_16_BANK) |
1078						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1079						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1080						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
1081				break;
1082			case 22:  /* 16 bpp PRT */
1083				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1084						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
1085						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
1086						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1087						 NUM_BANKS(ADDR_SURF_16_BANK) |
1088						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1089						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1090						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
1091				break;
1092			case 23:  /* 32 bpp PRT */
1093				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1094						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
1095						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
1096						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1097						 NUM_BANKS(ADDR_SURF_16_BANK) |
1098						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1099						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1100						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
1101				break;
1102			case 24:  /* 64 bpp PRT */
1103				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1104						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
1105						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
1106						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
1107						 NUM_BANKS(ADDR_SURF_16_BANK) |
1108						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1109						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1110						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
1111				break;
1112			case 25:  /* 128 bpp PRT */
1113				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1114						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
1115						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
1116						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
1117						 NUM_BANKS(ADDR_SURF_8_BANK) |
1118						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1119						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1120						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
1121				break;
1122			default:
1123				gb_tile_moden = 0;
1124				break;
1125			}
1126			WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
1127		}
1128	} else if (rdev->family == CHIP_VERDE) {
1129		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
1130			switch (reg_offset) {
1131			case 0:  /* non-AA compressed depth or any compressed stencil */
1132				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1133						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
1134						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1135						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
1136						 NUM_BANKS(ADDR_SURF_16_BANK) |
1137						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1138						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1139						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
1140				break;
1141			case 1:  /* 2xAA/4xAA compressed depth only */
1142				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1143						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
1144						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1145						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
1146						 NUM_BANKS(ADDR_SURF_16_BANK) |
1147						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1148						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1149						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
1150				break;
1151			case 2:  /* 8xAA compressed depth only */
1152				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1153						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
1154						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1155						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1156						 NUM_BANKS(ADDR_SURF_16_BANK) |
1157						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1158						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1159						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
1160				break;
1161			case 3:  /* 2xAA/4xAA compressed depth with stencil (for depth buffer) */
1162				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1163						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
1164						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1165						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
1166						 NUM_BANKS(ADDR_SURF_16_BANK) |
1167						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1168						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1169						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
1170				break;
1171			case 4:  /* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */
1172				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1173						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
1174						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1175						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
1176						 NUM_BANKS(ADDR_SURF_16_BANK) |
1177						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1178						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1179						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
1180				break;
1181			case 5:  /* Uncompressed 16bpp depth - and stencil buffer allocated with it */
1182				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1183						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
1184						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1185						 TILE_SPLIT(split_equal_to_row_size) |
1186						 NUM_BANKS(ADDR_SURF_16_BANK) |
1187						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1188						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1189						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
1190				break;
1191			case 6:  /* Uncompressed 32bpp depth - and stencil buffer allocated with it */
1192				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1193						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
1194						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1195						 TILE_SPLIT(split_equal_to_row_size) |
1196						 NUM_BANKS(ADDR_SURF_16_BANK) |
1197						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1198						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1199						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
1200				break;
1201			case 7:  /* Uncompressed 8bpp stencil without depth (drivers typically do not use) */
1202				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1203						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
1204						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1205						 TILE_SPLIT(split_equal_to_row_size) |
1206						 NUM_BANKS(ADDR_SURF_16_BANK) |
1207						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1208						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1209						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
1210				break;
1211			case 8:  /* 1D and 1D Array Surfaces */
1212				gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
1213						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
1214						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1215						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
1216						 NUM_BANKS(ADDR_SURF_16_BANK) |
1217						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1218						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1219						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
1220				break;
1221			case 9:  /* Displayable maps. */
1222				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1223						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
1224						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1225						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
1226						 NUM_BANKS(ADDR_SURF_16_BANK) |
1227						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1228						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1229						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
1230				break;
1231			case 10:  /* Display 8bpp. */
1232				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1233						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
1234						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1235						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1236						 NUM_BANKS(ADDR_SURF_16_BANK) |
1237						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1238						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1239						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
1240				break;
1241			case 11:  /* Display 16bpp. */
1242				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1243						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
1244						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1245						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1246						 NUM_BANKS(ADDR_SURF_16_BANK) |
1247						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1248						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1249						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
1250				break;
1251			case 12:  /* Display 32bpp. */
1252				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1253						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
1254						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1255						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
1256						 NUM_BANKS(ADDR_SURF_16_BANK) |
1257						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1258						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1259						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
1260				break;
1261			case 13:  /* Thin. */
1262				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1263						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
1264						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1265						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
1266						 NUM_BANKS(ADDR_SURF_16_BANK) |
1267						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1268						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1269						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
1270				break;
1271			case 14:  /* Thin 8 bpp. */
1272				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1273						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
1274						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1275						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1276						 NUM_BANKS(ADDR_SURF_16_BANK) |
1277						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1278						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1279						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
1280				break;
1281			case 15:  /* Thin 16 bpp. */
1282				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1283						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
1284						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1285						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1286						 NUM_BANKS(ADDR_SURF_16_BANK) |
1287						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1288						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1289						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
1290				break;
1291			case 16:  /* Thin 32 bpp. */
1292				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1293						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
1294						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1295						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
1296						 NUM_BANKS(ADDR_SURF_16_BANK) |
1297						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1298						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1299						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
1300				break;
1301			case 17:  /* Thin 64 bpp. */
1302				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1303						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
1304						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1305						 TILE_SPLIT(split_equal_to_row_size) |
1306						 NUM_BANKS(ADDR_SURF_16_BANK) |
1307						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1308						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1309						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
1310				break;
1311			case 21:  /* 8 bpp PRT. */
1312				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1313						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
1314						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
1315						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1316						 NUM_BANKS(ADDR_SURF_16_BANK) |
1317						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1318						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1319						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
1320				break;
1321			case 22:  /* 16 bpp PRT */
1322				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1323						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
1324						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
1325						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1326						 NUM_BANKS(ADDR_SURF_16_BANK) |
1327						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1328						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1329						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
1330				break;
1331			case 23:  /* 32 bpp PRT */
1332				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1333						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
1334						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
1335						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1336						 NUM_BANKS(ADDR_SURF_16_BANK) |
1337						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1338						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1339						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
1340				break;
1341			case 24:  /* 64 bpp PRT */
1342				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1343						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
1344						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
1345						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
1346						 NUM_BANKS(ADDR_SURF_16_BANK) |
1347						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1348						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1349						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
1350				break;
1351			case 25:  /* 128 bpp PRT */
1352				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1353						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
1354						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
1355						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
1356						 NUM_BANKS(ADDR_SURF_8_BANK) |
1357						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1358						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1359						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
1360				break;
1361			default:
1362				gb_tile_moden = 0;
1363				break;
1364			}
1365			WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
1366		}
1367	} else
1368		DRM_ERROR("unknown asic: 0x%x\n", rdev->family);
 
1369}
1370
1371static void si_select_se_sh(struct radeon_device *rdev,
1372			    u32 se_num, u32 sh_num)
1373{
1374	u32 data = INSTANCE_BROADCAST_WRITES;
1375
1376	if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
1377		data = SH_BROADCAST_WRITES | SE_BROADCAST_WRITES;
1378	else if (se_num == 0xffffffff)
1379		data |= SE_BROADCAST_WRITES | SH_INDEX(sh_num);
1380	else if (sh_num == 0xffffffff)
1381		data |= SH_BROADCAST_WRITES | SE_INDEX(se_num);
1382	else
1383		data |= SH_INDEX(sh_num) | SE_INDEX(se_num);
1384	WREG32(GRBM_GFX_INDEX, data);
1385}
1386
1387static u32 si_create_bitmask(u32 bit_width)
1388{
1389	u32 i, mask = 0;
1390
1391	for (i = 0; i < bit_width; i++) {
1392		mask <<= 1;
1393		mask |= 1;
1394	}
1395	return mask;
1396}
1397
1398static u32 si_get_cu_enabled(struct radeon_device *rdev, u32 cu_per_sh)
1399{
1400	u32 data, mask;
1401
1402	data = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
1403	if (data & 1)
1404		data &= INACTIVE_CUS_MASK;
1405	else
1406		data = 0;
1407	data |= RREG32(GC_USER_SHADER_ARRAY_CONFIG);
1408
1409	data >>= INACTIVE_CUS_SHIFT;
1410
1411	mask = si_create_bitmask(cu_per_sh);
1412
1413	return ~data & mask;
1414}
1415
1416static void si_setup_spi(struct radeon_device *rdev,
1417			 u32 se_num, u32 sh_per_se,
1418			 u32 cu_per_sh)
1419{
1420	int i, j, k;
1421	u32 data, mask, active_cu;
1422
1423	for (i = 0; i < se_num; i++) {
1424		for (j = 0; j < sh_per_se; j++) {
1425			si_select_se_sh(rdev, i, j);
1426			data = RREG32(SPI_STATIC_THREAD_MGMT_3);
1427			active_cu = si_get_cu_enabled(rdev, cu_per_sh);
1428
1429			mask = 1;
1430			for (k = 0; k < 16; k++) {
1431				mask <<= k;
1432				if (active_cu & mask) {
1433					data &= ~mask;
1434					WREG32(SPI_STATIC_THREAD_MGMT_3, data);
1435					break;
1436				}
1437			}
1438		}
1439	}
1440	si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
1441}
1442
1443static u32 si_get_rb_disabled(struct radeon_device *rdev,
1444			      u32 max_rb_num, u32 se_num,
1445			      u32 sh_per_se)
1446{
1447	u32 data, mask;
1448
1449	data = RREG32(CC_RB_BACKEND_DISABLE);
1450	if (data & 1)
1451		data &= BACKEND_DISABLE_MASK;
1452	else
1453		data = 0;
1454	data |= RREG32(GC_USER_RB_BACKEND_DISABLE);
1455
1456	data >>= BACKEND_DISABLE_SHIFT;
1457
1458	mask = si_create_bitmask(max_rb_num / se_num / sh_per_se);
1459
1460	return data & mask;
1461}
1462
1463static void si_setup_rb(struct radeon_device *rdev,
1464			u32 se_num, u32 sh_per_se,
1465			u32 max_rb_num)
1466{
1467	int i, j;
1468	u32 data, mask;
1469	u32 disabled_rbs = 0;
1470	u32 enabled_rbs = 0;
1471
1472	for (i = 0; i < se_num; i++) {
1473		for (j = 0; j < sh_per_se; j++) {
1474			si_select_se_sh(rdev, i, j);
1475			data = si_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se);
1476			disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH);
1477		}
1478	}
1479	si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
1480
1481	mask = 1;
1482	for (i = 0; i < max_rb_num; i++) {
1483		if (!(disabled_rbs & mask))
1484			enabled_rbs |= mask;
1485		mask <<= 1;
1486	}
1487
 
 
1488	for (i = 0; i < se_num; i++) {
1489		si_select_se_sh(rdev, i, 0xffffffff);
1490		data = 0;
1491		for (j = 0; j < sh_per_se; j++) {
1492			switch (enabled_rbs & 3) {
1493			case 1:
1494				data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2);
1495				break;
1496			case 2:
1497				data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2);
1498				break;
1499			case 3:
1500			default:
1501				data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2);
1502				break;
1503			}
1504			enabled_rbs >>= 2;
1505		}
1506		WREG32(PA_SC_RASTER_CONFIG, data);
1507	}
1508	si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
1509}
1510
1511static void si_gpu_init(struct radeon_device *rdev)
1512{
1513	u32 gb_addr_config = 0;
1514	u32 mc_shared_chmap, mc_arb_ramcfg;
1515	u32 sx_debug_1;
1516	u32 hdp_host_path_cntl;
1517	u32 tmp;
1518	int i, j;
1519
1520	switch (rdev->family) {
1521	case CHIP_TAHITI:
1522		rdev->config.si.max_shader_engines = 2;
1523		rdev->config.si.max_tile_pipes = 12;
1524		rdev->config.si.max_cu_per_sh = 8;
1525		rdev->config.si.max_sh_per_se = 2;
1526		rdev->config.si.max_backends_per_se = 4;
1527		rdev->config.si.max_texture_channel_caches = 12;
1528		rdev->config.si.max_gprs = 256;
1529		rdev->config.si.max_gs_threads = 32;
1530		rdev->config.si.max_hw_contexts = 8;
1531
1532		rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
1533		rdev->config.si.sc_prim_fifo_size_backend = 0x100;
1534		rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
1535		rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
1536		gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN;
1537		break;
1538	case CHIP_PITCAIRN:
1539		rdev->config.si.max_shader_engines = 2;
1540		rdev->config.si.max_tile_pipes = 8;
1541		rdev->config.si.max_cu_per_sh = 5;
1542		rdev->config.si.max_sh_per_se = 2;
1543		rdev->config.si.max_backends_per_se = 4;
1544		rdev->config.si.max_texture_channel_caches = 8;
1545		rdev->config.si.max_gprs = 256;
1546		rdev->config.si.max_gs_threads = 32;
1547		rdev->config.si.max_hw_contexts = 8;
1548
1549		rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
1550		rdev->config.si.sc_prim_fifo_size_backend = 0x100;
1551		rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
1552		rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
1553		gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN;
1554		break;
1555	case CHIP_VERDE:
1556	default:
1557		rdev->config.si.max_shader_engines = 1;
1558		rdev->config.si.max_tile_pipes = 4;
1559		rdev->config.si.max_cu_per_sh = 2;
1560		rdev->config.si.max_sh_per_se = 2;
1561		rdev->config.si.max_backends_per_se = 4;
1562		rdev->config.si.max_texture_channel_caches = 4;
1563		rdev->config.si.max_gprs = 256;
1564		rdev->config.si.max_gs_threads = 32;
1565		rdev->config.si.max_hw_contexts = 8;
1566
1567		rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
1568		rdev->config.si.sc_prim_fifo_size_backend = 0x40;
1569		rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
1570		rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
1571		gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN;
1572		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1573	}
1574
1575	/* Initialize HDP */
1576	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1577		WREG32((0x2c14 + j), 0x00000000);
1578		WREG32((0x2c18 + j), 0x00000000);
1579		WREG32((0x2c1c + j), 0x00000000);
1580		WREG32((0x2c20 + j), 0x00000000);
1581		WREG32((0x2c24 + j), 0x00000000);
1582	}
1583
1584	WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
 
 
1585
1586	evergreen_fix_pci_max_read_req_size(rdev);
1587
1588	WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
1589
1590	mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
1591	mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
1592
1593	rdev->config.si.num_tile_pipes = rdev->config.si.max_tile_pipes;
1594	rdev->config.si.mem_max_burst_length_bytes = 256;
1595	tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
1596	rdev->config.si.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
1597	if (rdev->config.si.mem_row_size_in_kb > 4)
1598		rdev->config.si.mem_row_size_in_kb = 4;
1599	/* XXX use MC settings? */
1600	rdev->config.si.shader_engine_tile_size = 32;
1601	rdev->config.si.num_gpus = 1;
1602	rdev->config.si.multi_gpu_tile_size = 64;
1603
1604	/* fix up row size */
1605	gb_addr_config &= ~ROW_SIZE_MASK;
1606	switch (rdev->config.si.mem_row_size_in_kb) {
1607	case 1:
1608	default:
1609		gb_addr_config |= ROW_SIZE(0);
1610		break;
1611	case 2:
1612		gb_addr_config |= ROW_SIZE(1);
1613		break;
1614	case 4:
1615		gb_addr_config |= ROW_SIZE(2);
1616		break;
1617	}
1618
1619	/* setup tiling info dword.  gb_addr_config is not adequate since it does
1620	 * not have bank info, so create a custom tiling dword.
1621	 * bits 3:0   num_pipes
1622	 * bits 7:4   num_banks
1623	 * bits 11:8  group_size
1624	 * bits 15:12 row_size
1625	 */
1626	rdev->config.si.tile_config = 0;
1627	switch (rdev->config.si.num_tile_pipes) {
1628	case 1:
1629		rdev->config.si.tile_config |= (0 << 0);
1630		break;
1631	case 2:
1632		rdev->config.si.tile_config |= (1 << 0);
1633		break;
1634	case 4:
1635		rdev->config.si.tile_config |= (2 << 0);
1636		break;
1637	case 8:
1638	default:
1639		/* XXX what about 12? */
1640		rdev->config.si.tile_config |= (3 << 0);
1641		break;
1642	}
1643	switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
1644	case 0: /* four banks */
1645		rdev->config.si.tile_config |= 0 << 4;
1646		break;
1647	case 1: /* eight banks */
1648		rdev->config.si.tile_config |= 1 << 4;
1649		break;
1650	case 2: /* sixteen banks */
1651	default:
1652		rdev->config.si.tile_config |= 2 << 4;
1653		break;
1654	}
1655	rdev->config.si.tile_config |=
1656		((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
1657	rdev->config.si.tile_config |=
1658		((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
1659
1660	WREG32(GB_ADDR_CONFIG, gb_addr_config);
1661	WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
 
1662	WREG32(HDP_ADDR_CONFIG, gb_addr_config);
 
 
 
 
 
 
 
1663
1664	si_tiling_mode_table_init(rdev);
1665
1666	si_setup_rb(rdev, rdev->config.si.max_shader_engines,
1667		    rdev->config.si.max_sh_per_se,
1668		    rdev->config.si.max_backends_per_se);
1669
1670	si_setup_spi(rdev, rdev->config.si.max_shader_engines,
1671		     rdev->config.si.max_sh_per_se,
1672		     rdev->config.si.max_cu_per_sh);
1673
 
 
 
 
 
 
 
1674
1675	/* set HW defaults for 3D engine */
1676	WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
1677				     ROQ_IB2_START(0x2b)));
1678	WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
1679
1680	sx_debug_1 = RREG32(SX_DEBUG_1);
1681	WREG32(SX_DEBUG_1, sx_debug_1);
1682
1683	WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
1684
1685	WREG32(PA_SC_FIFO_SIZE, (SC_FRONTEND_PRIM_FIFO_SIZE(rdev->config.si.sc_prim_fifo_size_frontend) |
1686				 SC_BACKEND_PRIM_FIFO_SIZE(rdev->config.si.sc_prim_fifo_size_backend) |
1687				 SC_HIZ_TILE_FIFO_SIZE(rdev->config.si.sc_hiz_tile_fifo_size) |
1688				 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.si.sc_earlyz_tile_fifo_size)));
1689
1690	WREG32(VGT_NUM_INSTANCES, 1);
1691
1692	WREG32(CP_PERFMON_CNTL, 0);
1693
1694	WREG32(SQ_CONFIG, 0);
1695
1696	WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
1697					  FORCE_EOV_MAX_REZ_CNT(255)));
1698
1699	WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) |
1700	       AUTO_INVLD_EN(ES_AND_GS_AUTO));
1701
1702	WREG32(VGT_GS_VERTEX_REUSE, 16);
1703	WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
1704
1705	WREG32(CB_PERFCOUNTER0_SELECT0, 0);
1706	WREG32(CB_PERFCOUNTER0_SELECT1, 0);
1707	WREG32(CB_PERFCOUNTER1_SELECT0, 0);
1708	WREG32(CB_PERFCOUNTER1_SELECT1, 0);
1709	WREG32(CB_PERFCOUNTER2_SELECT0, 0);
1710	WREG32(CB_PERFCOUNTER2_SELECT1, 0);
1711	WREG32(CB_PERFCOUNTER3_SELECT0, 0);
1712	WREG32(CB_PERFCOUNTER3_SELECT1, 0);
1713
1714	tmp = RREG32(HDP_MISC_CNTL);
1715	tmp |= HDP_FLUSH_INVALIDATE_CACHE;
1716	WREG32(HDP_MISC_CNTL, tmp);
1717
1718	hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
1719	WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
1720
1721	WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
1722
1723	udelay(50);
1724}
1725
1726/*
1727 * GPU scratch registers helpers function.
1728 */
1729static void si_scratch_init(struct radeon_device *rdev)
1730{
1731	int i;
1732
1733	rdev->scratch.num_reg = 7;
1734	rdev->scratch.reg_base = SCRATCH_REG0;
1735	for (i = 0; i < rdev->scratch.num_reg; i++) {
1736		rdev->scratch.free[i] = true;
1737		rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
1738	}
1739}
1740
1741void si_fence_ring_emit(struct radeon_device *rdev,
1742			struct radeon_fence *fence)
1743{
1744	struct radeon_ring *ring = &rdev->ring[fence->ring];
1745	u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
1746
1747	/* flush read cache over gart */
1748	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1749	radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
1750	radeon_ring_write(ring, 0);
1751	radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
1752	radeon_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
1753			  PACKET3_TC_ACTION_ENA |
1754			  PACKET3_SH_KCACHE_ACTION_ENA |
1755			  PACKET3_SH_ICACHE_ACTION_ENA);
1756	radeon_ring_write(ring, 0xFFFFFFFF);
1757	radeon_ring_write(ring, 0);
1758	radeon_ring_write(ring, 10); /* poll interval */
1759	/* EVENT_WRITE_EOP - flush caches, send int */
1760	radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
1761	radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5));
1762	radeon_ring_write(ring, addr & 0xffffffff);
1763	radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
1764	radeon_ring_write(ring, fence->seq);
1765	radeon_ring_write(ring, 0);
1766}
1767
1768/*
1769 * IB stuff
1770 */
1771void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
1772{
1773	struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
 
1774	u32 header;
1775
1776	if (ib->is_const_ib)
 
 
 
 
1777		header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
1778	else
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1779		header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
 
1780
1781	radeon_ring_write(ring, header);
1782	radeon_ring_write(ring,
1783#ifdef __BIG_ENDIAN
1784			  (2 << 0) |
1785#endif
1786			  (ib->gpu_addr & 0xFFFFFFFC));
1787	radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
1788	radeon_ring_write(ring, ib->length_dw | (ib->vm_id << 24));
1789
1790	/* flush read cache over gart for this vmid */
1791	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1792	radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
1793	radeon_ring_write(ring, ib->vm_id);
1794	radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
1795	radeon_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
1796			  PACKET3_TC_ACTION_ENA |
1797			  PACKET3_SH_KCACHE_ACTION_ENA |
1798			  PACKET3_SH_ICACHE_ACTION_ENA);
1799	radeon_ring_write(ring, 0xFFFFFFFF);
1800	radeon_ring_write(ring, 0);
1801	radeon_ring_write(ring, 10); /* poll interval */
 
 
1802}
1803
1804/*
1805 * CP.
1806 */
1807static void si_cp_enable(struct radeon_device *rdev, bool enable)
1808{
1809	if (enable)
1810		WREG32(CP_ME_CNTL, 0);
1811	else {
1812		radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
 
1813		WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT));
1814		WREG32(SCRATCH_UMSK, 0);
 
 
 
1815	}
1816	udelay(50);
1817}
1818
1819static int si_cp_load_microcode(struct radeon_device *rdev)
1820{
1821	const __be32 *fw_data;
1822	int i;
1823
1824	if (!rdev->me_fw || !rdev->pfp_fw)
1825		return -EINVAL;
1826
1827	si_cp_enable(rdev, false);
1828
1829	/* PFP */
1830	fw_data = (const __be32 *)rdev->pfp_fw->data;
1831	WREG32(CP_PFP_UCODE_ADDR, 0);
1832	for (i = 0; i < SI_PFP_UCODE_SIZE; i++)
1833		WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
1834	WREG32(CP_PFP_UCODE_ADDR, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1835
1836	/* CE */
1837	fw_data = (const __be32 *)rdev->ce_fw->data;
1838	WREG32(CP_CE_UCODE_ADDR, 0);
1839	for (i = 0; i < SI_CE_UCODE_SIZE; i++)
1840		WREG32(CP_CE_UCODE_DATA, be32_to_cpup(fw_data++));
1841	WREG32(CP_CE_UCODE_ADDR, 0);
1842
1843	/* ME */
1844	fw_data = (const __be32 *)rdev->me_fw->data;
1845	WREG32(CP_ME_RAM_WADDR, 0);
1846	for (i = 0; i < SI_PM4_UCODE_SIZE; i++)
1847		WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
1848	WREG32(CP_ME_RAM_WADDR, 0);
 
 
 
 
 
 
 
 
1849
1850	WREG32(CP_PFP_UCODE_ADDR, 0);
1851	WREG32(CP_CE_UCODE_ADDR, 0);
1852	WREG32(CP_ME_RAM_WADDR, 0);
1853	WREG32(CP_ME_RAM_RADDR, 0);
1854	return 0;
1855}
1856
1857static int si_cp_start(struct radeon_device *rdev)
1858{
1859	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1860	int r, i;
1861
1862	r = radeon_ring_lock(rdev, ring, 7 + 4);
1863	if (r) {
1864		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1865		return r;
1866	}
1867	/* init the CP */
1868	radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
1869	radeon_ring_write(ring, 0x1);
1870	radeon_ring_write(ring, 0x0);
1871	radeon_ring_write(ring, rdev->config.si.max_hw_contexts - 1);
1872	radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
1873	radeon_ring_write(ring, 0);
1874	radeon_ring_write(ring, 0);
1875
1876	/* init the CE partitions */
1877	radeon_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
1878	radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
1879	radeon_ring_write(ring, 0xc000);
1880	radeon_ring_write(ring, 0xe000);
1881	radeon_ring_unlock_commit(rdev, ring);
1882
1883	si_cp_enable(rdev, true);
1884
1885	r = radeon_ring_lock(rdev, ring, si_default_size + 10);
1886	if (r) {
1887		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1888		return r;
1889	}
1890
1891	/* setup clear context state */
1892	radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1893	radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
1894
1895	for (i = 0; i < si_default_size; i++)
1896		radeon_ring_write(ring, si_default_state[i]);
1897
1898	radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1899	radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
1900
1901	/* set clear context state */
1902	radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
1903	radeon_ring_write(ring, 0);
1904
1905	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
1906	radeon_ring_write(ring, 0x00000316);
1907	radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
1908	radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
1909
1910	radeon_ring_unlock_commit(rdev, ring);
1911
1912	for (i = RADEON_RING_TYPE_GFX_INDEX; i <= CAYMAN_RING_TYPE_CP2_INDEX; ++i) {
1913		ring = &rdev->ring[i];
1914		r = radeon_ring_lock(rdev, ring, 2);
1915
1916		/* clear the compute context state */
1917		radeon_ring_write(ring, PACKET3_COMPUTE(PACKET3_CLEAR_STATE, 0));
1918		radeon_ring_write(ring, 0);
1919
1920		radeon_ring_unlock_commit(rdev, ring);
1921	}
1922
1923	return 0;
1924}
1925
1926static void si_cp_fini(struct radeon_device *rdev)
1927{
 
1928	si_cp_enable(rdev, false);
1929	radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
1930	radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]);
1931	radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]);
 
 
 
 
 
 
 
 
 
1932}
1933
1934static int si_cp_resume(struct radeon_device *rdev)
1935{
1936	struct radeon_ring *ring;
1937	u32 tmp;
1938	u32 rb_bufsz;
1939	int r;
1940
1941	/* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
1942	WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
1943				 SOFT_RESET_PA |
1944				 SOFT_RESET_VGT |
1945				 SOFT_RESET_SPI |
1946				 SOFT_RESET_SX));
1947	RREG32(GRBM_SOFT_RESET);
1948	mdelay(15);
1949	WREG32(GRBM_SOFT_RESET, 0);
1950	RREG32(GRBM_SOFT_RESET);
1951
1952	WREG32(CP_SEM_WAIT_TIMER, 0x0);
1953	WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
1954
1955	/* Set the write pointer delay */
1956	WREG32(CP_RB_WPTR_DELAY, 0);
1957
1958	WREG32(CP_DEBUG, 0);
1959	WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
1960
1961	/* ring 0 - compute and gfx */
1962	/* Set ring buffer size */
1963	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1964	rb_bufsz = drm_order(ring->ring_size / 8);
1965	tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
1966#ifdef __BIG_ENDIAN
1967	tmp |= BUF_SWAP_32BIT;
1968#endif
1969	WREG32(CP_RB0_CNTL, tmp);
1970
1971	/* Initialize the ring buffer's read and write pointers */
1972	WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
1973	ring->wptr = 0;
1974	WREG32(CP_RB0_WPTR, ring->wptr);
1975
1976	/* set the wb address wether it's enabled or not */
1977	WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
1978	WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
1979
1980	if (rdev->wb.enabled)
1981		WREG32(SCRATCH_UMSK, 0xff);
1982	else {
1983		tmp |= RB_NO_UPDATE;
1984		WREG32(SCRATCH_UMSK, 0);
1985	}
1986
1987	mdelay(1);
1988	WREG32(CP_RB0_CNTL, tmp);
1989
1990	WREG32(CP_RB0_BASE, ring->gpu_addr >> 8);
1991
1992	ring->rptr = RREG32(CP_RB0_RPTR);
1993
1994	/* ring1  - compute only */
1995	/* Set ring buffer size */
1996	ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
1997	rb_bufsz = drm_order(ring->ring_size / 8);
1998	tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
1999#ifdef __BIG_ENDIAN
2000	tmp |= BUF_SWAP_32BIT;
2001#endif
2002	WREG32(CP_RB1_CNTL, tmp);
2003
2004	/* Initialize the ring buffer's read and write pointers */
2005	WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA);
2006	ring->wptr = 0;
2007	WREG32(CP_RB1_WPTR, ring->wptr);
2008
2009	/* set the wb address wether it's enabled or not */
2010	WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC);
2011	WREG32(CP_RB1_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFF);
2012
2013	mdelay(1);
2014	WREG32(CP_RB1_CNTL, tmp);
2015
2016	WREG32(CP_RB1_BASE, ring->gpu_addr >> 8);
2017
2018	ring->rptr = RREG32(CP_RB1_RPTR);
2019
2020	/* ring2 - compute only */
2021	/* Set ring buffer size */
2022	ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
2023	rb_bufsz = drm_order(ring->ring_size / 8);
2024	tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
2025#ifdef __BIG_ENDIAN
2026	tmp |= BUF_SWAP_32BIT;
2027#endif
2028	WREG32(CP_RB2_CNTL, tmp);
2029
2030	/* Initialize the ring buffer's read and write pointers */
2031	WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA);
2032	ring->wptr = 0;
2033	WREG32(CP_RB2_WPTR, ring->wptr);
2034
2035	/* set the wb address wether it's enabled or not */
2036	WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC);
2037	WREG32(CP_RB2_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFF);
2038
2039	mdelay(1);
2040	WREG32(CP_RB2_CNTL, tmp);
2041
2042	WREG32(CP_RB2_BASE, ring->gpu_addr >> 8);
2043
2044	ring->rptr = RREG32(CP_RB2_RPTR);
2045
2046	/* start the rings */
2047	si_cp_start(rdev);
2048	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
2049	rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = true;
2050	rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = true;
2051	r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
2052	if (r) {
2053		rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
2054		rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
2055		rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
2056		return r;
2057	}
2058	r = radeon_ring_test(rdev, CAYMAN_RING_TYPE_CP1_INDEX, &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]);
2059	if (r) {
2060		rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
2061	}
2062	r = radeon_ring_test(rdev, CAYMAN_RING_TYPE_CP2_INDEX, &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]);
2063	if (r) {
2064		rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
2065	}
2066
 
 
 
 
 
2067	return 0;
2068}
2069
2070bool si_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
2071{
2072	u32 srbm_status;
2073	u32 grbm_status, grbm_status2;
2074	u32 grbm_status_se0, grbm_status_se1;
2075
2076	srbm_status = RREG32(SRBM_STATUS);
2077	grbm_status = RREG32(GRBM_STATUS);
2078	grbm_status2 = RREG32(GRBM_STATUS2);
2079	grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
2080	grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
2081	if (!(grbm_status & GUI_ACTIVE)) {
2082		radeon_ring_lockup_update(ring);
2083		return false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2084	}
2085	/* force CP activities */
2086	radeon_ring_force_activity(rdev, ring);
2087	return radeon_ring_test_lockup(rdev, ring);
2088}
2089
2090static int si_gpu_soft_reset(struct radeon_device *rdev)
2091{
2092	struct evergreen_mc_save save;
2093	u32 grbm_reset = 0;
 
 
 
 
2094
2095	if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
2096		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2097
2098	dev_info(rdev->dev, "GPU softreset \n");
2099	dev_info(rdev->dev, "  GRBM_STATUS=0x%08X\n",
2100		RREG32(GRBM_STATUS));
2101	dev_info(rdev->dev, "  GRBM_STATUS2=0x%08X\n",
2102		RREG32(GRBM_STATUS2));
2103	dev_info(rdev->dev, "  GRBM_STATUS_SE0=0x%08X\n",
2104		RREG32(GRBM_STATUS_SE0));
2105	dev_info(rdev->dev, "  GRBM_STATUS_SE1=0x%08X\n",
2106		RREG32(GRBM_STATUS_SE1));
2107	dev_info(rdev->dev, "  SRBM_STATUS=0x%08X\n",
2108		RREG32(SRBM_STATUS));
2109	evergreen_mc_stop(rdev, &save);
2110	if (radeon_mc_wait_for_idle(rdev)) {
2111		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
2112	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2113	/* Disable CP parsing/prefetching */
2114	WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
 
 
 
 
 
 
 
 
 
 
 
 
2115
2116	/* reset all the gfx blocks */
2117	grbm_reset = (SOFT_RESET_CP |
2118		      SOFT_RESET_CB |
2119		      SOFT_RESET_DB |
2120		      SOFT_RESET_GDS |
2121		      SOFT_RESET_PA |
2122		      SOFT_RESET_SC |
2123		      SOFT_RESET_BCI |
2124		      SOFT_RESET_SPI |
2125		      SOFT_RESET_SX |
2126		      SOFT_RESET_TC |
2127		      SOFT_RESET_TA |
2128		      SOFT_RESET_VGT |
2129		      SOFT_RESET_IA);
2130
2131	dev_info(rdev->dev, "  GRBM_SOFT_RESET=0x%08X\n", grbm_reset);
2132	WREG32(GRBM_SOFT_RESET, grbm_reset);
2133	(void)RREG32(GRBM_SOFT_RESET);
2134	udelay(50);
2135	WREG32(GRBM_SOFT_RESET, 0);
2136	(void)RREG32(GRBM_SOFT_RESET);
2137	/* Wait a little for things to settle down */
2138	udelay(50);
2139	dev_info(rdev->dev, "  GRBM_STATUS=0x%08X\n",
2140		RREG32(GRBM_STATUS));
2141	dev_info(rdev->dev, "  GRBM_STATUS2=0x%08X\n",
2142		RREG32(GRBM_STATUS2));
2143	dev_info(rdev->dev, "  GRBM_STATUS_SE0=0x%08X\n",
2144		RREG32(GRBM_STATUS_SE0));
2145	dev_info(rdev->dev, "  GRBM_STATUS_SE1=0x%08X\n",
2146		RREG32(GRBM_STATUS_SE1));
2147	dev_info(rdev->dev, "  SRBM_STATUS=0x%08X\n",
2148		RREG32(SRBM_STATUS));
2149	evergreen_mc_resume(rdev, &save);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2150	return 0;
2151}
2152
2153int si_asic_reset(struct radeon_device *rdev)
 
 
 
 
 
 
 
 
 
2154{
2155	return si_gpu_soft_reset(rdev);
 
 
 
 
 
 
 
 
2156}
2157
2158/* MC */
2159static void si_mc_program(struct radeon_device *rdev)
2160{
2161	struct evergreen_mc_save save;
2162	u32 tmp;
2163	int i, j;
2164
2165	/* Initialize HDP */
2166	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
2167		WREG32((0x2c14 + j), 0x00000000);
2168		WREG32((0x2c18 + j), 0x00000000);
2169		WREG32((0x2c1c + j), 0x00000000);
2170		WREG32((0x2c20 + j), 0x00000000);
2171		WREG32((0x2c24 + j), 0x00000000);
2172	}
2173	WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
2174
2175	evergreen_mc_stop(rdev, &save);
2176	if (radeon_mc_wait_for_idle(rdev)) {
2177		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
2178	}
2179	/* Lockout access through VGA aperture*/
2180	WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
 
2181	/* Update configuration */
2182	WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
2183	       rdev->mc.vram_start >> 12);
2184	WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
2185	       rdev->mc.vram_end >> 12);
2186	WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
2187	       rdev->vram_scratch.gpu_addr >> 12);
2188	tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
2189	tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
2190	WREG32(MC_VM_FB_LOCATION, tmp);
2191	/* XXX double check these! */
2192	WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
2193	WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
2194	WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
2195	WREG32(MC_VM_AGP_BASE, 0);
2196	WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
2197	WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
2198	if (radeon_mc_wait_for_idle(rdev)) {
2199		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
2200	}
2201	evergreen_mc_resume(rdev, &save);
2202	/* we need to own VRAM, so turn off the VGA renderer here
2203	 * to stop it overwriting our objects */
2204	rv515_vga_render_disable(rdev);
2205}
2206
2207/* SI MC address space is 40 bits */
2208static void si_vram_location(struct radeon_device *rdev,
2209			     struct radeon_mc *mc, u64 base)
2210{
2211	mc->vram_start = base;
2212	if (mc->mc_vram_size > (0xFFFFFFFFFFULL - base + 1)) {
2213		dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
2214		mc->real_vram_size = mc->aper_size;
2215		mc->mc_vram_size = mc->aper_size;
2216	}
2217	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
2218	dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
2219			mc->mc_vram_size >> 20, mc->vram_start,
2220			mc->vram_end, mc->real_vram_size >> 20);
2221}
2222
2223static void si_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
2224{
2225	u64 size_af, size_bf;
2226
2227	size_af = ((0xFFFFFFFFFFULL - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
2228	size_bf = mc->vram_start & ~mc->gtt_base_align;
2229	if (size_bf > size_af) {
2230		if (mc->gtt_size > size_bf) {
2231			dev_warn(rdev->dev, "limiting GTT\n");
2232			mc->gtt_size = size_bf;
2233		}
2234		mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
2235	} else {
2236		if (mc->gtt_size > size_af) {
2237			dev_warn(rdev->dev, "limiting GTT\n");
2238			mc->gtt_size = size_af;
2239		}
2240		mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
2241	}
2242	mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
2243	dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
2244			mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
2245}
2246
2247static void si_vram_gtt_location(struct radeon_device *rdev,
2248				 struct radeon_mc *mc)
2249{
2250	if (mc->mc_vram_size > 0xFFC0000000ULL) {
2251		/* leave room for at least 1024M GTT */
2252		dev_warn(rdev->dev, "limiting VRAM\n");
2253		mc->real_vram_size = 0xFFC0000000ULL;
2254		mc->mc_vram_size = 0xFFC0000000ULL;
2255	}
2256	si_vram_location(rdev, &rdev->mc, 0);
2257	rdev->mc.gtt_base_align = 0;
2258	si_gtt_location(rdev, mc);
2259}
2260
2261static int si_mc_init(struct radeon_device *rdev)
2262{
2263	u32 tmp;
2264	int chansize, numchan;
2265
2266	/* Get VRAM informations */
2267	rdev->mc.vram_is_ddr = true;
2268	tmp = RREG32(MC_ARB_RAMCFG);
2269	if (tmp & CHANSIZE_OVERRIDE) {
2270		chansize = 16;
2271	} else if (tmp & CHANSIZE_MASK) {
2272		chansize = 64;
2273	} else {
2274		chansize = 32;
2275	}
2276	tmp = RREG32(MC_SHARED_CHMAP);
2277	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
2278	case 0:
2279	default:
2280		numchan = 1;
2281		break;
2282	case 1:
2283		numchan = 2;
2284		break;
2285	case 2:
2286		numchan = 4;
2287		break;
2288	case 3:
2289		numchan = 8;
2290		break;
2291	case 4:
2292		numchan = 3;
2293		break;
2294	case 5:
2295		numchan = 6;
2296		break;
2297	case 6:
2298		numchan = 10;
2299		break;
2300	case 7:
2301		numchan = 12;
2302		break;
2303	case 8:
2304		numchan = 16;
2305		break;
2306	}
2307	rdev->mc.vram_width = numchan * chansize;
2308	/* Could aper size report 0 ? */
2309	rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
2310	rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
2311	/* size in MB on si */
2312	rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
2313	rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
 
 
 
 
 
 
 
2314	rdev->mc.visible_vram_size = rdev->mc.aper_size;
2315	si_vram_gtt_location(rdev, &rdev->mc);
2316	radeon_update_bandwidth_info(rdev);
2317
2318	return 0;
2319}
2320
2321/*
2322 * GART
2323 */
2324void si_pcie_gart_tlb_flush(struct radeon_device *rdev)
2325{
2326	/* flush hdp cache */
2327	WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
2328
2329	/* bits 0-15 are the VM contexts0-15 */
2330	WREG32(VM_INVALIDATE_REQUEST, 1);
2331}
2332
2333int si_pcie_gart_enable(struct radeon_device *rdev)
2334{
2335	int r, i;
2336
2337	if (rdev->gart.robj == NULL) {
2338		dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
2339		return -EINVAL;
2340	}
2341	r = radeon_gart_table_vram_pin(rdev);
2342	if (r)
2343		return r;
2344	radeon_gart_restore(rdev);
2345	/* Setup TLB control */
2346	WREG32(MC_VM_MX_L1_TLB_CNTL,
2347	       (0xA << 7) |
2348	       ENABLE_L1_TLB |
 
2349	       SYSTEM_ACCESS_MODE_NOT_IN_SYS |
2350	       ENABLE_ADVANCED_DRIVER_MODEL |
2351	       SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
2352	/* Setup L2 cache */
2353	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
 
2354	       ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
2355	       ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
2356	       EFFECTIVE_L2_QUEUE_SIZE(7) |
2357	       CONTEXT1_IDENTITY_ACCESS_MODE(1));
2358	WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
2359	WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
2360	       L2_CACHE_BIGK_FRAGMENT_SIZE(0));
 
2361	/* setup context0 */
2362	WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
2363	WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
2364	WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
2365	WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
2366			(u32)(rdev->dummy_page.addr >> 12));
2367	WREG32(VM_CONTEXT0_CNTL2, 0);
2368	WREG32(VM_CONTEXT0_CNTL, (ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
2369				  RANGE_PROTECTION_FAULT_ENABLE_DEFAULT));
2370
2371	WREG32(0x15D4, 0);
2372	WREG32(0x15D8, 0);
2373	WREG32(0x15DC, 0);
2374
2375	/* empty context1-15 */
2376	/* FIXME start with 4G, once using 2 level pt switch to full
2377	 * vm size space
2378	 */
2379	/* set vm size, must be a multiple of 4 */
2380	WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
2381	WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn);
 
 
 
 
2382	for (i = 1; i < 16; i++) {
2383		if (i < 8)
2384			WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
2385			       rdev->gart.table_addr >> 12);
2386		else
2387			WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2),
2388			       rdev->gart.table_addr >> 12);
2389	}
2390
2391	/* enable context1-15 */
2392	WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
2393	       (u32)(rdev->dummy_page.addr >> 12));
2394	WREG32(VM_CONTEXT1_CNTL2, 0);
2395	WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
2396				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
 
 
 
 
 
 
 
 
 
 
 
 
2397
2398	si_pcie_gart_tlb_flush(rdev);
2399	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
2400		 (unsigned)(rdev->mc.gtt_size >> 20),
2401		 (unsigned long long)rdev->gart.table_addr);
2402	rdev->gart.ready = true;
2403	return 0;
2404}
2405
2406void si_pcie_gart_disable(struct radeon_device *rdev)
2407{
 
 
 
 
 
 
 
 
 
 
 
2408	/* Disable all tables */
2409	WREG32(VM_CONTEXT0_CNTL, 0);
2410	WREG32(VM_CONTEXT1_CNTL, 0);
2411	/* Setup TLB control */
2412	WREG32(MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE_NOT_IN_SYS |
2413	       SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
2414	/* Setup L2 cache */
2415	WREG32(VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
2416	       ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
2417	       EFFECTIVE_L2_QUEUE_SIZE(7) |
2418	       CONTEXT1_IDENTITY_ACCESS_MODE(1));
2419	WREG32(VM_L2_CNTL2, 0);
2420	WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
2421	       L2_CACHE_BIGK_FRAGMENT_SIZE(0));
2422	radeon_gart_table_vram_unpin(rdev);
2423}
2424
2425void si_pcie_gart_fini(struct radeon_device *rdev)
2426{
2427	si_pcie_gart_disable(rdev);
2428	radeon_gart_table_vram_free(rdev);
2429	radeon_gart_fini(rdev);
2430}
2431
2432/* vm parser */
2433static bool si_vm_reg_valid(u32 reg)
2434{
2435	/* context regs are fine */
2436	if (reg >= 0x28000)
2437		return true;
2438
2439	/* check config regs */
2440	switch (reg) {
2441	case GRBM_GFX_INDEX:
 
2442	case VGT_VTX_VECT_EJECT_REG:
2443	case VGT_CACHE_INVALIDATION:
2444	case VGT_ESGS_RING_SIZE:
2445	case VGT_GSVS_RING_SIZE:
2446	case VGT_GS_VERTEX_REUSE:
2447	case VGT_PRIMITIVE_TYPE:
2448	case VGT_INDEX_TYPE:
2449	case VGT_NUM_INDICES:
2450	case VGT_NUM_INSTANCES:
2451	case VGT_TF_RING_SIZE:
2452	case VGT_HS_OFFCHIP_PARAM:
2453	case VGT_TF_MEMORY_BASE:
2454	case PA_CL_ENHANCE:
2455	case PA_SU_LINE_STIPPLE_VALUE:
2456	case PA_SC_LINE_STIPPLE_STATE:
2457	case PA_SC_ENHANCE:
2458	case SQC_CACHES:
2459	case SPI_STATIC_THREAD_MGMT_1:
2460	case SPI_STATIC_THREAD_MGMT_2:
2461	case SPI_STATIC_THREAD_MGMT_3:
2462	case SPI_PS_MAX_WAVE_ID:
2463	case SPI_CONFIG_CNTL:
2464	case SPI_CONFIG_CNTL_1:
2465	case TA_CNTL_AUX:
2466		return true;
2467	default:
2468		DRM_ERROR("Invalid register 0x%x in CS\n", reg);
2469		return false;
2470	}
2471}
2472
2473static int si_vm_packet3_ce_check(struct radeon_device *rdev,
2474				  u32 *ib, struct radeon_cs_packet *pkt)
2475{
2476	switch (pkt->opcode) {
2477	case PACKET3_NOP:
2478	case PACKET3_SET_BASE:
2479	case PACKET3_SET_CE_DE_COUNTERS:
2480	case PACKET3_LOAD_CONST_RAM:
2481	case PACKET3_WRITE_CONST_RAM:
2482	case PACKET3_WRITE_CONST_RAM_OFFSET:
2483	case PACKET3_DUMP_CONST_RAM:
2484	case PACKET3_INCREMENT_CE_COUNTER:
2485	case PACKET3_WAIT_ON_DE_COUNTER:
2486	case PACKET3_CE_WRITE:
2487		break;
2488	default:
2489		DRM_ERROR("Invalid CE packet3: 0x%x\n", pkt->opcode);
2490		return -EINVAL;
2491	}
2492	return 0;
2493}
2494
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2495static int si_vm_packet3_gfx_check(struct radeon_device *rdev,
2496				   u32 *ib, struct radeon_cs_packet *pkt)
2497{
 
2498	u32 idx = pkt->idx + 1;
2499	u32 idx_value = ib[idx];
2500	u32 start_reg, end_reg, reg, i;
2501
2502	switch (pkt->opcode) {
2503	case PACKET3_NOP:
2504	case PACKET3_SET_BASE:
2505	case PACKET3_CLEAR_STATE:
2506	case PACKET3_INDEX_BUFFER_SIZE:
2507	case PACKET3_DISPATCH_DIRECT:
2508	case PACKET3_DISPATCH_INDIRECT:
2509	case PACKET3_ALLOC_GDS:
2510	case PACKET3_WRITE_GDS_RAM:
2511	case PACKET3_ATOMIC_GDS:
2512	case PACKET3_ATOMIC:
2513	case PACKET3_OCCLUSION_QUERY:
2514	case PACKET3_SET_PREDICATION:
2515	case PACKET3_COND_EXEC:
2516	case PACKET3_PRED_EXEC:
2517	case PACKET3_DRAW_INDIRECT:
2518	case PACKET3_DRAW_INDEX_INDIRECT:
2519	case PACKET3_INDEX_BASE:
2520	case PACKET3_DRAW_INDEX_2:
2521	case PACKET3_CONTEXT_CONTROL:
2522	case PACKET3_INDEX_TYPE:
2523	case PACKET3_DRAW_INDIRECT_MULTI:
2524	case PACKET3_DRAW_INDEX_AUTO:
2525	case PACKET3_DRAW_INDEX_IMMD:
2526	case PACKET3_NUM_INSTANCES:
2527	case PACKET3_DRAW_INDEX_MULTI_AUTO:
2528	case PACKET3_STRMOUT_BUFFER_UPDATE:
2529	case PACKET3_DRAW_INDEX_OFFSET_2:
2530	case PACKET3_DRAW_INDEX_MULTI_ELEMENT:
2531	case PACKET3_DRAW_INDEX_INDIRECT_MULTI:
2532	case PACKET3_MPEG_INDEX:
2533	case PACKET3_WAIT_REG_MEM:
2534	case PACKET3_MEM_WRITE:
2535	case PACKET3_PFP_SYNC_ME:
2536	case PACKET3_SURFACE_SYNC:
2537	case PACKET3_EVENT_WRITE:
2538	case PACKET3_EVENT_WRITE_EOP:
2539	case PACKET3_EVENT_WRITE_EOS:
2540	case PACKET3_SET_CONTEXT_REG:
2541	case PACKET3_SET_CONTEXT_REG_INDIRECT:
2542	case PACKET3_SET_SH_REG:
2543	case PACKET3_SET_SH_REG_OFFSET:
2544	case PACKET3_INCREMENT_DE_COUNTER:
2545	case PACKET3_WAIT_ON_CE_COUNTER:
2546	case PACKET3_WAIT_ON_AVAIL_BUFFER:
2547	case PACKET3_ME_WRITE:
2548		break;
2549	case PACKET3_COPY_DATA:
2550		if ((idx_value & 0xf00) == 0) {
2551			reg = ib[idx + 3] * 4;
2552			if (!si_vm_reg_valid(reg))
2553				return -EINVAL;
2554		}
2555		break;
2556	case PACKET3_WRITE_DATA:
2557		if ((idx_value & 0xf00) == 0) {
2558			start_reg = ib[idx + 1] * 4;
2559			if (idx_value & 0x10000) {
2560				if (!si_vm_reg_valid(start_reg))
2561					return -EINVAL;
2562			} else {
2563				for (i = 0; i < (pkt->count - 2); i++) {
2564					reg = start_reg + (4 * i);
2565					if (!si_vm_reg_valid(reg))
2566						return -EINVAL;
2567				}
2568			}
2569		}
2570		break;
2571	case PACKET3_COND_WRITE:
2572		if (idx_value & 0x100) {
2573			reg = ib[idx + 5] * 4;
2574			if (!si_vm_reg_valid(reg))
2575				return -EINVAL;
2576		}
2577		break;
2578	case PACKET3_COPY_DW:
2579		if (idx_value & 0x2) {
2580			reg = ib[idx + 3] * 4;
2581			if (!si_vm_reg_valid(reg))
2582				return -EINVAL;
2583		}
2584		break;
2585	case PACKET3_SET_CONFIG_REG:
2586		start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START;
2587		end_reg = 4 * pkt->count + start_reg - 4;
2588		if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
2589		    (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
2590		    (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
2591			DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
2592			return -EINVAL;
2593		}
2594		for (i = 0; i < pkt->count; i++) {
2595			reg = start_reg + (4 * i);
2596			if (!si_vm_reg_valid(reg))
2597				return -EINVAL;
2598		}
2599		break;
 
 
 
 
 
2600	default:
2601		DRM_ERROR("Invalid GFX packet3: 0x%x\n", pkt->opcode);
2602		return -EINVAL;
2603	}
2604	return 0;
2605}
2606
2607static int si_vm_packet3_compute_check(struct radeon_device *rdev,
2608				       u32 *ib, struct radeon_cs_packet *pkt)
2609{
 
2610	u32 idx = pkt->idx + 1;
2611	u32 idx_value = ib[idx];
2612	u32 start_reg, reg, i;
2613
2614	switch (pkt->opcode) {
2615	case PACKET3_NOP:
2616	case PACKET3_SET_BASE:
2617	case PACKET3_CLEAR_STATE:
2618	case PACKET3_DISPATCH_DIRECT:
2619	case PACKET3_DISPATCH_INDIRECT:
2620	case PACKET3_ALLOC_GDS:
2621	case PACKET3_WRITE_GDS_RAM:
2622	case PACKET3_ATOMIC_GDS:
2623	case PACKET3_ATOMIC:
2624	case PACKET3_OCCLUSION_QUERY:
2625	case PACKET3_SET_PREDICATION:
2626	case PACKET3_COND_EXEC:
2627	case PACKET3_PRED_EXEC:
2628	case PACKET3_CONTEXT_CONTROL:
2629	case PACKET3_STRMOUT_BUFFER_UPDATE:
2630	case PACKET3_WAIT_REG_MEM:
2631	case PACKET3_MEM_WRITE:
2632	case PACKET3_PFP_SYNC_ME:
2633	case PACKET3_SURFACE_SYNC:
2634	case PACKET3_EVENT_WRITE:
2635	case PACKET3_EVENT_WRITE_EOP:
2636	case PACKET3_EVENT_WRITE_EOS:
2637	case PACKET3_SET_CONTEXT_REG:
2638	case PACKET3_SET_CONTEXT_REG_INDIRECT:
2639	case PACKET3_SET_SH_REG:
2640	case PACKET3_SET_SH_REG_OFFSET:
2641	case PACKET3_INCREMENT_DE_COUNTER:
2642	case PACKET3_WAIT_ON_CE_COUNTER:
2643	case PACKET3_WAIT_ON_AVAIL_BUFFER:
2644	case PACKET3_ME_WRITE:
2645		break;
2646	case PACKET3_COPY_DATA:
2647		if ((idx_value & 0xf00) == 0) {
2648			reg = ib[idx + 3] * 4;
2649			if (!si_vm_reg_valid(reg))
2650				return -EINVAL;
2651		}
2652		break;
2653	case PACKET3_WRITE_DATA:
2654		if ((idx_value & 0xf00) == 0) {
2655			start_reg = ib[idx + 1] * 4;
2656			if (idx_value & 0x10000) {
2657				if (!si_vm_reg_valid(start_reg))
2658					return -EINVAL;
2659			} else {
2660				for (i = 0; i < (pkt->count - 2); i++) {
2661					reg = start_reg + (4 * i);
2662					if (!si_vm_reg_valid(reg))
2663						return -EINVAL;
2664				}
2665			}
2666		}
2667		break;
2668	case PACKET3_COND_WRITE:
2669		if (idx_value & 0x100) {
2670			reg = ib[idx + 5] * 4;
2671			if (!si_vm_reg_valid(reg))
2672				return -EINVAL;
2673		}
2674		break;
2675	case PACKET3_COPY_DW:
2676		if (idx_value & 0x2) {
2677			reg = ib[idx + 3] * 4;
2678			if (!si_vm_reg_valid(reg))
2679				return -EINVAL;
2680		}
2681		break;
 
 
 
 
 
2682	default:
2683		DRM_ERROR("Invalid Compute packet3: 0x%x\n", pkt->opcode);
2684		return -EINVAL;
2685	}
2686	return 0;
2687}
2688
2689int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
2690{
2691	int ret = 0;
2692	u32 idx = 0;
2693	struct radeon_cs_packet pkt;
2694
2695	do {
2696		pkt.idx = idx;
2697		pkt.type = CP_PACKET_GET_TYPE(ib->ptr[idx]);
2698		pkt.count = CP_PACKET_GET_COUNT(ib->ptr[idx]);
2699		pkt.one_reg_wr = 0;
2700		switch (pkt.type) {
2701		case PACKET_TYPE0:
2702			dev_err(rdev->dev, "Packet0 not allowed!\n");
2703			ret = -EINVAL;
2704			break;
2705		case PACKET_TYPE2:
2706			idx += 1;
2707			break;
2708		case PACKET_TYPE3:
2709			pkt.opcode = CP_PACKET3_GET_OPCODE(ib->ptr[idx]);
2710			if (ib->is_const_ib)
2711				ret = si_vm_packet3_ce_check(rdev, ib->ptr, &pkt);
2712			else {
2713				switch (ib->fence->ring) {
2714				case RADEON_RING_TYPE_GFX_INDEX:
2715					ret = si_vm_packet3_gfx_check(rdev, ib->ptr, &pkt);
2716					break;
2717				case CAYMAN_RING_TYPE_CP1_INDEX:
2718				case CAYMAN_RING_TYPE_CP2_INDEX:
2719					ret = si_vm_packet3_compute_check(rdev, ib->ptr, &pkt);
2720					break;
2721				default:
2722					dev_err(rdev->dev, "Non-PM4 ring %d !\n", ib->fence->ring);
2723					ret = -EINVAL;
2724					break;
2725				}
2726			}
2727			idx += pkt.count + 2;
2728			break;
2729		default:
2730			dev_err(rdev->dev, "Unknown packet type %d !\n", pkt.type);
2731			ret = -EINVAL;
2732			break;
2733		}
2734		if (ret)
 
 
 
 
 
 
2735			break;
 
2736	} while (idx < ib->length_dw);
2737
2738	return ret;
2739}
2740
2741/*
2742 * vm
2743 */
2744int si_vm_init(struct radeon_device *rdev)
2745{
2746	/* number of VMs */
2747	rdev->vm_manager.nvm = 16;
2748	/* base offset of vram pages */
2749	rdev->vm_manager.vram_base_offset = 0;
2750
2751	return 0;
2752}
2753
2754void si_vm_fini(struct radeon_device *rdev)
2755{
2756}
2757
2758int si_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm, int id)
 
 
 
 
 
 
 
 
 
 
2759{
2760	if (id < 8)
2761		WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (id << 2), vm->pt_gpu_addr >> 12);
2762	else
2763		WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((id - 8) << 2),
2764		       vm->pt_gpu_addr >> 12);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2765	/* flush hdp cache */
2766	WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
 
 
 
 
 
 
2767	/* bits 0-15 are the VM contexts0-15 */
2768	WREG32(VM_INVALIDATE_REQUEST, 1 << id);
2769	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2770}
2771
2772void si_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm)
2773{
2774	if (vm->id < 8)
2775		WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2), 0);
 
 
 
2776	else
2777		WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2), 0);
2778	/* flush hdp cache */
2779	WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
2780	/* bits 0-15 are the VM contexts0-15 */
2781	WREG32(VM_INVALIDATE_REQUEST, 1 << vm->id);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2782}
2783
2784void si_vm_tlb_flush(struct radeon_device *rdev, struct radeon_vm *vm)
 
2785{
2786	if (vm->id == -1)
2787		return;
 
 
 
 
 
 
 
 
 
2788
2789	/* flush hdp cache */
2790	WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
2791	/* bits 0-15 are the VM contexts0-15 */
2792	WREG32(VM_INVALIDATE_REQUEST, 1 << vm->id);
 
 
 
 
 
 
 
 
 
 
 
2793}
2794
2795/*
2796 * RLC
2797 */
2798void si_rlc_fini(struct radeon_device *rdev)
 
 
 
 
 
 
 
 
 
 
 
2799{
2800	int r;
 
 
 
 
 
 
 
 
 
 
 
 
2801
2802	/* save restore block */
2803	if (rdev->rlc.save_restore_obj) {
2804		r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false);
2805		if (unlikely(r != 0))
2806			dev_warn(rdev->dev, "(%d) reserve RLC sr bo failed\n", r);
2807		radeon_bo_unpin(rdev->rlc.save_restore_obj);
2808		radeon_bo_unreserve(rdev->rlc.save_restore_obj);
2809
2810		radeon_bo_unref(&rdev->rlc.save_restore_obj);
2811		rdev->rlc.save_restore_obj = NULL;
 
 
 
 
 
 
2812	}
 
2813
2814	/* clear state block */
2815	if (rdev->rlc.clear_state_obj) {
2816		r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false);
2817		if (unlikely(r != 0))
2818			dev_warn(rdev->dev, "(%d) reserve RLC c bo failed\n", r);
2819		radeon_bo_unpin(rdev->rlc.clear_state_obj);
2820		radeon_bo_unreserve(rdev->rlc.clear_state_obj);
2821
2822		radeon_bo_unref(&rdev->rlc.clear_state_obj);
2823		rdev->rlc.clear_state_obj = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2824	}
2825}
2826
2827int si_rlc_init(struct radeon_device *rdev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2828{
2829	int r;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2830
2831	/* save restore block */
2832	if (rdev->rlc.save_restore_obj == NULL) {
2833		r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
2834				     RADEON_GEM_DOMAIN_VRAM, NULL,
2835				     &rdev->rlc.save_restore_obj);
2836		if (r) {
2837			dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r);
2838			return r;
2839		}
2840	}
2841
2842	r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false);
2843	if (unlikely(r != 0)) {
2844		si_rlc_fini(rdev);
2845		return r;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2846	}
2847	r = radeon_bo_pin(rdev->rlc.save_restore_obj, RADEON_GEM_DOMAIN_VRAM,
2848			  &rdev->rlc.save_restore_gpu_addr);
2849	radeon_bo_unreserve(rdev->rlc.save_restore_obj);
2850	if (r) {
2851		dev_warn(rdev->dev, "(%d) pin RLC sr bo failed\n", r);
2852		si_rlc_fini(rdev);
2853		return r;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2854	}
 
 
 
 
 
 
2855
2856	/* clear state block */
2857	if (rdev->rlc.clear_state_obj == NULL) {
2858		r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
2859				     RADEON_GEM_DOMAIN_VRAM, NULL,
2860				     &rdev->rlc.clear_state_obj);
2861		if (r) {
2862			dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r);
2863			si_rlc_fini(rdev);
2864			return r;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2865		}
2866	}
2867	r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false);
2868	if (unlikely(r != 0)) {
2869		si_rlc_fini(rdev);
2870		return r;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2871	}
2872	r = radeon_bo_pin(rdev->rlc.clear_state_obj, RADEON_GEM_DOMAIN_VRAM,
2873			  &rdev->rlc.clear_state_gpu_addr);
2874	radeon_bo_unreserve(rdev->rlc.clear_state_obj);
2875	if (r) {
2876		dev_warn(rdev->dev, "(%d) pin RLC c bo failed\n", r);
2877		si_rlc_fini(rdev);
2878		return r;
2879	}
 
 
 
 
 
 
 
 
2880
2881	return 0;
 
 
 
 
 
2882}
2883
2884static void si_rlc_stop(struct radeon_device *rdev)
2885{
2886	WREG32(RLC_CNTL, 0);
 
 
 
 
2887}
2888
2889static void si_rlc_start(struct radeon_device *rdev)
2890{
2891	WREG32(RLC_CNTL, RLC_ENABLE);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2892}
2893
2894static int si_rlc_resume(struct radeon_device *rdev)
2895{
2896	u32 i;
2897	const __be32 *fw_data;
2898
2899	if (!rdev->rlc_fw)
2900		return -EINVAL;
2901
2902	si_rlc_stop(rdev);
2903
 
 
 
 
 
 
2904	WREG32(RLC_RL_BASE, 0);
2905	WREG32(RLC_RL_SIZE, 0);
2906	WREG32(RLC_LB_CNTL, 0);
2907	WREG32(RLC_LB_CNTR_MAX, 0xffffffff);
2908	WREG32(RLC_LB_CNTR_INIT, 0);
2909
2910	WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
2911	WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
2912
2913	WREG32(RLC_MC_CNTL, 0);
2914	WREG32(RLC_UCODE_CNTL, 0);
2915
2916	fw_data = (const __be32 *)rdev->rlc_fw->data;
2917	for (i = 0; i < SI_RLC_UCODE_SIZE; i++) {
2918		WREG32(RLC_UCODE_ADDR, i);
2919		WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2920	}
2921	WREG32(RLC_UCODE_ADDR, 0);
2922
 
 
2923	si_rlc_start(rdev);
2924
2925	return 0;
2926}
2927
2928static void si_enable_interrupts(struct radeon_device *rdev)
2929{
2930	u32 ih_cntl = RREG32(IH_CNTL);
2931	u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
2932
2933	ih_cntl |= ENABLE_INTR;
2934	ih_rb_cntl |= IH_RB_ENABLE;
2935	WREG32(IH_CNTL, ih_cntl);
2936	WREG32(IH_RB_CNTL, ih_rb_cntl);
2937	rdev->ih.enabled = true;
2938}
2939
2940static void si_disable_interrupts(struct radeon_device *rdev)
2941{
2942	u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
2943	u32 ih_cntl = RREG32(IH_CNTL);
2944
2945	ih_rb_cntl &= ~IH_RB_ENABLE;
2946	ih_cntl &= ~ENABLE_INTR;
2947	WREG32(IH_RB_CNTL, ih_rb_cntl);
2948	WREG32(IH_CNTL, ih_cntl);
2949	/* set rptr, wptr to 0 */
2950	WREG32(IH_RB_RPTR, 0);
2951	WREG32(IH_RB_WPTR, 0);
2952	rdev->ih.enabled = false;
2953	rdev->ih.wptr = 0;
2954	rdev->ih.rptr = 0;
2955}
2956
2957static void si_disable_interrupt_state(struct radeon_device *rdev)
2958{
2959	u32 tmp;
2960
2961	WREG32(CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
 
 
2962	WREG32(CP_INT_CNTL_RING1, 0);
2963	WREG32(CP_INT_CNTL_RING2, 0);
 
 
 
 
2964	WREG32(GRBM_INT_CNTL, 0);
2965	WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
2966	WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
 
 
 
2967	if (rdev->num_crtc >= 4) {
2968		WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
2969		WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
2970	}
2971	if (rdev->num_crtc >= 6) {
2972		WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
2973		WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
2974	}
2975
2976	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
2977	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
 
 
2978	if (rdev->num_crtc >= 4) {
2979		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
2980		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
2981	}
2982	if (rdev->num_crtc >= 6) {
2983		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
2984		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
2985	}
2986
2987	WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
2988
2989	tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2990	WREG32(DC_HPD1_INT_CONTROL, tmp);
2991	tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2992	WREG32(DC_HPD2_INT_CONTROL, tmp);
2993	tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2994	WREG32(DC_HPD3_INT_CONTROL, tmp);
2995	tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2996	WREG32(DC_HPD4_INT_CONTROL, tmp);
2997	tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2998	WREG32(DC_HPD5_INT_CONTROL, tmp);
2999	tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3000	WREG32(DC_HPD6_INT_CONTROL, tmp);
3001
 
 
 
 
 
 
 
 
 
 
 
 
 
3002}
3003
3004static int si_irq_init(struct radeon_device *rdev)
3005{
3006	int ret = 0;
3007	int rb_bufsz;
3008	u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
3009
3010	/* allocate ring */
3011	ret = r600_ih_ring_alloc(rdev);
3012	if (ret)
3013		return ret;
3014
3015	/* disable irqs */
3016	si_disable_interrupts(rdev);
3017
3018	/* init rlc */
3019	ret = si_rlc_resume(rdev);
3020	if (ret) {
3021		r600_ih_ring_fini(rdev);
3022		return ret;
3023	}
3024
3025	/* setup interrupt control */
3026	/* set dummy read address to ring address */
3027	WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
3028	interrupt_cntl = RREG32(INTERRUPT_CNTL);
3029	/* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
3030	 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
3031	 */
3032	interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
3033	/* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
3034	interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
3035	WREG32(INTERRUPT_CNTL, interrupt_cntl);
3036
3037	WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
3038	rb_bufsz = drm_order(rdev->ih.ring_size / 4);
3039
3040	ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
3041		      IH_WPTR_OVERFLOW_CLEAR |
3042		      (rb_bufsz << 1));
3043
3044	if (rdev->wb.enabled)
3045		ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;
3046
3047	/* set the writeback address whether it's enabled or not */
3048	WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC);
3049	WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF);
3050
3051	WREG32(IH_RB_CNTL, ih_rb_cntl);
3052
3053	/* set rptr, wptr to 0 */
3054	WREG32(IH_RB_RPTR, 0);
3055	WREG32(IH_RB_WPTR, 0);
3056
3057	/* Default settings for IH_CNTL (disabled at first) */
3058	ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10) | MC_VMID(0);
3059	/* RPTR_REARM only works if msi's are enabled */
3060	if (rdev->msi_enabled)
3061		ih_cntl |= RPTR_REARM;
3062	WREG32(IH_CNTL, ih_cntl);
3063
3064	/* force the active interrupt state to all disabled */
3065	si_disable_interrupt_state(rdev);
3066
3067	pci_set_master(rdev->pdev);
3068
3069	/* enable irqs */
3070	si_enable_interrupts(rdev);
3071
3072	return ret;
3073}
3074
3075int si_irq_set(struct radeon_device *rdev)
3076{
3077	u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
3078	u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0;
3079	u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
3080	u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
3081	u32 grbm_int_cntl = 0;
3082	u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
 
3083
3084	if (!rdev->irq.installed) {
3085		WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
3086		return -EINVAL;
3087	}
3088	/* don't enable anything if the ih is disabled */
3089	if (!rdev->ih.enabled) {
3090		si_disable_interrupts(rdev);
3091		/* force the active interrupt state to all disabled */
3092		si_disable_interrupt_state(rdev);
3093		return 0;
3094	}
3095
3096	hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
3097	hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
3098	hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
3099	hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
3100	hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
3101	hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
 
 
 
 
 
 
 
 
 
 
 
3102
3103	/* enable CP interrupts on all rings */
3104	if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
3105		DRM_DEBUG("si_irq_set: sw int gfx\n");
3106		cp_int_cntl |= TIME_STAMP_INT_ENABLE;
3107	}
3108	if (rdev->irq.sw_int[CAYMAN_RING_TYPE_CP1_INDEX]) {
3109		DRM_DEBUG("si_irq_set: sw int cp1\n");
3110		cp_int_cntl1 |= TIME_STAMP_INT_ENABLE;
3111	}
3112	if (rdev->irq.sw_int[CAYMAN_RING_TYPE_CP2_INDEX]) {
3113		DRM_DEBUG("si_irq_set: sw int cp2\n");
3114		cp_int_cntl2 |= TIME_STAMP_INT_ENABLE;
3115	}
 
 
 
 
 
 
 
 
 
3116	if (rdev->irq.crtc_vblank_int[0] ||
3117	    rdev->irq.pflip[0]) {
3118		DRM_DEBUG("si_irq_set: vblank 0\n");
3119		crtc1 |= VBLANK_INT_MASK;
3120	}
3121	if (rdev->irq.crtc_vblank_int[1] ||
3122	    rdev->irq.pflip[1]) {
3123		DRM_DEBUG("si_irq_set: vblank 1\n");
3124		crtc2 |= VBLANK_INT_MASK;
3125	}
3126	if (rdev->irq.crtc_vblank_int[2] ||
3127	    rdev->irq.pflip[2]) {
3128		DRM_DEBUG("si_irq_set: vblank 2\n");
3129		crtc3 |= VBLANK_INT_MASK;
3130	}
3131	if (rdev->irq.crtc_vblank_int[3] ||
3132	    rdev->irq.pflip[3]) {
3133		DRM_DEBUG("si_irq_set: vblank 3\n");
3134		crtc4 |= VBLANK_INT_MASK;
3135	}
3136	if (rdev->irq.crtc_vblank_int[4] ||
3137	    rdev->irq.pflip[4]) {
3138		DRM_DEBUG("si_irq_set: vblank 4\n");
3139		crtc5 |= VBLANK_INT_MASK;
3140	}
3141	if (rdev->irq.crtc_vblank_int[5] ||
3142	    rdev->irq.pflip[5]) {
3143		DRM_DEBUG("si_irq_set: vblank 5\n");
3144		crtc6 |= VBLANK_INT_MASK;
3145	}
3146	if (rdev->irq.hpd[0]) {
3147		DRM_DEBUG("si_irq_set: hpd 1\n");
3148		hpd1 |= DC_HPDx_INT_EN;
3149	}
3150	if (rdev->irq.hpd[1]) {
3151		DRM_DEBUG("si_irq_set: hpd 2\n");
3152		hpd2 |= DC_HPDx_INT_EN;
3153	}
3154	if (rdev->irq.hpd[2]) {
3155		DRM_DEBUG("si_irq_set: hpd 3\n");
3156		hpd3 |= DC_HPDx_INT_EN;
3157	}
3158	if (rdev->irq.hpd[3]) {
3159		DRM_DEBUG("si_irq_set: hpd 4\n");
3160		hpd4 |= DC_HPDx_INT_EN;
3161	}
3162	if (rdev->irq.hpd[4]) {
3163		DRM_DEBUG("si_irq_set: hpd 5\n");
3164		hpd5 |= DC_HPDx_INT_EN;
3165	}
3166	if (rdev->irq.hpd[5]) {
3167		DRM_DEBUG("si_irq_set: hpd 6\n");
3168		hpd6 |= DC_HPDx_INT_EN;
3169	}
3170	if (rdev->irq.gui_idle) {
3171		DRM_DEBUG("gui idle\n");
3172		grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
3173	}
3174
3175	WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
3176	WREG32(CP_INT_CNTL_RING1, cp_int_cntl1);
3177	WREG32(CP_INT_CNTL_RING2, cp_int_cntl2);
3178
 
 
 
3179	WREG32(GRBM_INT_CNTL, grbm_int_cntl);
3180
3181	WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
3182	WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
 
 
 
 
 
 
 
3183	if (rdev->num_crtc >= 4) {
3184		WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
3185		WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
3186	}
3187	if (rdev->num_crtc >= 6) {
3188		WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5);
3189		WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
3190	}
3191
3192	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1);
3193	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2);
 
 
 
 
3194	if (rdev->num_crtc >= 4) {
3195		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3);
3196		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4);
 
 
3197	}
3198	if (rdev->num_crtc >= 6) {
3199		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5);
3200		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6);
 
 
3201	}
3202
3203	WREG32(DC_HPD1_INT_CONTROL, hpd1);
3204	WREG32(DC_HPD2_INT_CONTROL, hpd2);
3205	WREG32(DC_HPD3_INT_CONTROL, hpd3);
3206	WREG32(DC_HPD4_INT_CONTROL, hpd4);
3207	WREG32(DC_HPD5_INT_CONTROL, hpd5);
3208	WREG32(DC_HPD6_INT_CONTROL, hpd6);
 
 
 
 
 
 
 
3209
3210	return 0;
3211}
3212
3213static inline void si_irq_ack(struct radeon_device *rdev)
3214{
3215	u32 tmp;
3216
 
 
 
3217	rdev->irq.stat_regs.evergreen.disp_int = RREG32(DISP_INTERRUPT_STATUS);
3218	rdev->irq.stat_regs.evergreen.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
3219	rdev->irq.stat_regs.evergreen.disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
3220	rdev->irq.stat_regs.evergreen.disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3);
3221	rdev->irq.stat_regs.evergreen.disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4);
3222	rdev->irq.stat_regs.evergreen.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
3223	rdev->irq.stat_regs.evergreen.d1grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET);
3224	rdev->irq.stat_regs.evergreen.d2grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET);
3225	if (rdev->num_crtc >= 4) {
3226		rdev->irq.stat_regs.evergreen.d3grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET);
3227		rdev->irq.stat_regs.evergreen.d4grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET);
3228	}
3229	if (rdev->num_crtc >= 6) {
3230		rdev->irq.stat_regs.evergreen.d5grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET);
3231		rdev->irq.stat_regs.evergreen.d6grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
3232	}
3233
3234	if (rdev->irq.stat_regs.evergreen.d1grph_int & GRPH_PFLIP_INT_OCCURRED)
3235		WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
3236	if (rdev->irq.stat_regs.evergreen.d2grph_int & GRPH_PFLIP_INT_OCCURRED)
3237		WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
3238	if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT)
3239		WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
3240	if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT)
3241		WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK);
3242	if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT)
3243		WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK);
3244	if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT)
3245		WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
3246
3247	if (rdev->num_crtc >= 4) {
3248		if (rdev->irq.stat_regs.evergreen.d3grph_int & GRPH_PFLIP_INT_OCCURRED)
3249			WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
3250		if (rdev->irq.stat_regs.evergreen.d4grph_int & GRPH_PFLIP_INT_OCCURRED)
3251			WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
3252		if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
3253			WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
3254		if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
3255			WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK);
3256		if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
3257			WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK);
3258		if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
3259			WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK);
3260	}
3261
3262	if (rdev->num_crtc >= 6) {
3263		if (rdev->irq.stat_regs.evergreen.d5grph_int & GRPH_PFLIP_INT_OCCURRED)
3264			WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
3265		if (rdev->irq.stat_regs.evergreen.d6grph_int & GRPH_PFLIP_INT_OCCURRED)
3266			WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
3267		if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
3268			WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
3269		if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
3270			WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK);
3271		if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
3272			WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK);
3273		if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
3274			WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK);
3275	}
3276
3277	if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
3278		tmp = RREG32(DC_HPD1_INT_CONTROL);
3279		tmp |= DC_HPDx_INT_ACK;
3280		WREG32(DC_HPD1_INT_CONTROL, tmp);
3281	}
3282	if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
3283		tmp = RREG32(DC_HPD2_INT_CONTROL);
3284		tmp |= DC_HPDx_INT_ACK;
3285		WREG32(DC_HPD2_INT_CONTROL, tmp);
3286	}
3287	if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
3288		tmp = RREG32(DC_HPD3_INT_CONTROL);
3289		tmp |= DC_HPDx_INT_ACK;
3290		WREG32(DC_HPD3_INT_CONTROL, tmp);
3291	}
3292	if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
3293		tmp = RREG32(DC_HPD4_INT_CONTROL);
3294		tmp |= DC_HPDx_INT_ACK;
3295		WREG32(DC_HPD4_INT_CONTROL, tmp);
3296	}
3297	if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
3298		tmp = RREG32(DC_HPD5_INT_CONTROL);
3299		tmp |= DC_HPDx_INT_ACK;
3300		WREG32(DC_HPD5_INT_CONTROL, tmp);
3301	}
3302	if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
3303		tmp = RREG32(DC_HPD5_INT_CONTROL);
3304		tmp |= DC_HPDx_INT_ACK;
3305		WREG32(DC_HPD6_INT_CONTROL, tmp);
3306	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3307}
3308
3309static void si_irq_disable(struct radeon_device *rdev)
3310{
3311	si_disable_interrupts(rdev);
3312	/* Wait and acknowledge irq */
3313	mdelay(1);
3314	si_irq_ack(rdev);
3315	si_disable_interrupt_state(rdev);
3316}
3317
3318static void si_irq_suspend(struct radeon_device *rdev)
3319{
3320	si_irq_disable(rdev);
3321	si_rlc_stop(rdev);
3322}
3323
3324static void si_irq_fini(struct radeon_device *rdev)
3325{
3326	si_irq_suspend(rdev);
3327	r600_ih_ring_fini(rdev);
3328}
3329
3330static inline u32 si_get_ih_wptr(struct radeon_device *rdev)
3331{
3332	u32 wptr, tmp;
3333
3334	if (rdev->wb.enabled)
3335		wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
3336	else
3337		wptr = RREG32(IH_RB_WPTR);
3338
3339	if (wptr & RB_OVERFLOW) {
 
3340		/* When a ring buffer overflow happen start parsing interrupt
3341		 * from the last not overwritten vector (wptr + 16). Hopefully
3342		 * this should allow us to catchup.
3343		 */
3344		dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
3345			wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
3346		rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
3347		tmp = RREG32(IH_RB_CNTL);
3348		tmp |= IH_WPTR_OVERFLOW_CLEAR;
3349		WREG32(IH_RB_CNTL, tmp);
3350	}
3351	return (wptr & rdev->ih.ptr_mask);
3352}
3353
3354/*        SI IV Ring
3355 * Each IV ring entry is 128 bits:
3356 * [7:0]    - interrupt source id
3357 * [31:8]   - reserved
3358 * [59:32]  - interrupt source data
3359 * [63:60]  - reserved
3360 * [71:64]  - RINGID
3361 * [79:72]  - VMID
3362 * [127:80] - reserved
3363 */
3364int si_irq_process(struct radeon_device *rdev)
3365{
3366	u32 wptr;
3367	u32 rptr;
3368	u32 src_id, src_data, ring_id;
3369	u32 ring_index;
3370	unsigned long flags;
3371	bool queue_hotplug = false;
 
 
 
3372
3373	if (!rdev->ih.enabled || rdev->shutdown)
3374		return IRQ_NONE;
3375
3376	wptr = si_get_ih_wptr(rdev);
 
 
 
 
 
 
3377	rptr = rdev->ih.rptr;
3378	DRM_DEBUG("si_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
3379
3380	spin_lock_irqsave(&rdev->ih.lock, flags);
3381	if (rptr == wptr) {
3382		spin_unlock_irqrestore(&rdev->ih.lock, flags);
3383		return IRQ_NONE;
3384	}
3385restart_ih:
3386	/* Order reading of wptr vs. reading of IH ring data */
3387	rmb();
3388
3389	/* display interrupts */
3390	si_irq_ack(rdev);
3391
3392	rdev->ih.wptr = wptr;
3393	while (rptr != wptr) {
3394		/* wptr/rptr are in bytes! */
3395		ring_index = rptr / 4;
3396		src_id =  le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
3397		src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
3398		ring_id = le32_to_cpu(rdev->ih.ring[ring_index + 2]) & 0xff;
3399
3400		switch (src_id) {
3401		case 1: /* D1 vblank/vline */
3402			switch (src_data) {
3403			case 0: /* D1 vblank */
3404				if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) {
3405					if (rdev->irq.crtc_vblank_int[0]) {
3406						drm_handle_vblank(rdev->ddev, 0);
3407						rdev->pm.vblank_sync = true;
3408						wake_up(&rdev->irq.vblank_queue);
3409					}
3410					if (rdev->irq.pflip[0])
3411						radeon_crtc_handle_flip(rdev, 0);
3412					rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
3413					DRM_DEBUG("IH: D1 vblank\n");
3414				}
 
 
 
 
 
3415				break;
3416			case 1: /* D1 vline */
3417				if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) {
3418					rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
3419					DRM_DEBUG("IH: D1 vline\n");
3420				}
 
 
3421				break;
3422			default:
3423				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3424				break;
3425			}
3426			break;
3427		case 2: /* D2 vblank/vline */
3428			switch (src_data) {
3429			case 0: /* D2 vblank */
3430				if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
3431					if (rdev->irq.crtc_vblank_int[1]) {
3432						drm_handle_vblank(rdev->ddev, 1);
3433						rdev->pm.vblank_sync = true;
3434						wake_up(&rdev->irq.vblank_queue);
3435					}
3436					if (rdev->irq.pflip[1])
3437						radeon_crtc_handle_flip(rdev, 1);
3438					rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
3439					DRM_DEBUG("IH: D2 vblank\n");
3440				}
 
 
 
 
 
3441				break;
3442			case 1: /* D2 vline */
3443				if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
3444					rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
3445					DRM_DEBUG("IH: D2 vline\n");
3446				}
 
 
3447				break;
3448			default:
3449				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3450				break;
3451			}
3452			break;
3453		case 3: /* D3 vblank/vline */
3454			switch (src_data) {
3455			case 0: /* D3 vblank */
3456				if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
3457					if (rdev->irq.crtc_vblank_int[2]) {
3458						drm_handle_vblank(rdev->ddev, 2);
3459						rdev->pm.vblank_sync = true;
3460						wake_up(&rdev->irq.vblank_queue);
3461					}
3462					if (rdev->irq.pflip[2])
3463						radeon_crtc_handle_flip(rdev, 2);
3464					rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
3465					DRM_DEBUG("IH: D3 vblank\n");
3466				}
 
 
 
 
 
3467				break;
3468			case 1: /* D3 vline */
3469				if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
3470					rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
3471					DRM_DEBUG("IH: D3 vline\n");
3472				}
 
 
3473				break;
3474			default:
3475				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3476				break;
3477			}
3478			break;
3479		case 4: /* D4 vblank/vline */
3480			switch (src_data) {
3481			case 0: /* D4 vblank */
3482				if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
3483					if (rdev->irq.crtc_vblank_int[3]) {
3484						drm_handle_vblank(rdev->ddev, 3);
3485						rdev->pm.vblank_sync = true;
3486						wake_up(&rdev->irq.vblank_queue);
3487					}
3488					if (rdev->irq.pflip[3])
3489						radeon_crtc_handle_flip(rdev, 3);
3490					rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
3491					DRM_DEBUG("IH: D4 vblank\n");
3492				}
 
 
 
 
 
3493				break;
3494			case 1: /* D4 vline */
3495				if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
3496					rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
3497					DRM_DEBUG("IH: D4 vline\n");
3498				}
 
 
3499				break;
3500			default:
3501				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3502				break;
3503			}
3504			break;
3505		case 5: /* D5 vblank/vline */
3506			switch (src_data) {
3507			case 0: /* D5 vblank */
3508				if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
3509					if (rdev->irq.crtc_vblank_int[4]) {
3510						drm_handle_vblank(rdev->ddev, 4);
3511						rdev->pm.vblank_sync = true;
3512						wake_up(&rdev->irq.vblank_queue);
3513					}
3514					if (rdev->irq.pflip[4])
3515						radeon_crtc_handle_flip(rdev, 4);
3516					rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
3517					DRM_DEBUG("IH: D5 vblank\n");
3518				}
 
 
 
 
 
3519				break;
3520			case 1: /* D5 vline */
3521				if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
3522					rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
3523					DRM_DEBUG("IH: D5 vline\n");
3524				}
 
 
3525				break;
3526			default:
3527				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3528				break;
3529			}
3530			break;
3531		case 6: /* D6 vblank/vline */
3532			switch (src_data) {
3533			case 0: /* D6 vblank */
3534				if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
3535					if (rdev->irq.crtc_vblank_int[5]) {
3536						drm_handle_vblank(rdev->ddev, 5);
3537						rdev->pm.vblank_sync = true;
3538						wake_up(&rdev->irq.vblank_queue);
3539					}
3540					if (rdev->irq.pflip[5])
3541						radeon_crtc_handle_flip(rdev, 5);
3542					rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
3543					DRM_DEBUG("IH: D6 vblank\n");
3544				}
 
 
 
 
 
3545				break;
3546			case 1: /* D6 vline */
3547				if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
3548					rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
3549					DRM_DEBUG("IH: D6 vline\n");
3550				}
 
 
3551				break;
3552			default:
3553				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3554				break;
3555			}
3556			break;
 
 
 
 
 
 
 
 
 
 
3557		case 42: /* HPD hotplug */
3558			switch (src_data) {
3559			case 0:
3560				if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
3561					rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
3562					queue_hotplug = true;
3563					DRM_DEBUG("IH: HPD1\n");
3564				}
 
 
3565				break;
3566			case 1:
3567				if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
3568					rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
3569					queue_hotplug = true;
3570					DRM_DEBUG("IH: HPD2\n");
3571				}
 
 
3572				break;
3573			case 2:
3574				if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
3575					rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
3576					queue_hotplug = true;
3577					DRM_DEBUG("IH: HPD3\n");
3578				}
 
 
3579				break;
3580			case 3:
3581				if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
3582					rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
3583					queue_hotplug = true;
3584					DRM_DEBUG("IH: HPD4\n");
3585				}
 
 
3586				break;
3587			case 4:
3588				if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
3589					rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
3590					queue_hotplug = true;
3591					DRM_DEBUG("IH: HPD5\n");
3592				}
 
 
3593				break;
3594			case 5:
3595				if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
3596					rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
3597					queue_hotplug = true;
3598					DRM_DEBUG("IH: HPD6\n");
3599				}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3600				break;
3601			default:
3602				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3603				break;
3604			}
3605			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3606		case 176: /* RINGID0 CP_INT */
3607			radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
3608			break;
3609		case 177: /* RINGID1 CP_INT */
3610			radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
3611			break;
3612		case 178: /* RINGID2 CP_INT */
3613			radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
3614			break;
3615		case 181: /* CP EOP event */
3616			DRM_DEBUG("IH: CP EOP\n");
3617			switch (ring_id) {
3618			case 0:
3619				radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
3620				break;
3621			case 1:
3622				radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
3623				break;
3624			case 2:
3625				radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
3626				break;
3627			}
3628			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3629		case 233: /* GUI IDLE */
3630			DRM_DEBUG("IH: GUI idle\n");
3631			rdev->pm.gui_idle = true;
3632			wake_up(&rdev->irq.idle_queue);
 
 
3633			break;
3634		default:
3635			DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3636			break;
3637		}
3638
3639		/* wptr/rptr are in bytes! */
3640		rptr += 16;
3641		rptr &= rdev->ih.ptr_mask;
 
3642	}
 
 
 
 
 
 
 
 
 
3643	/* make sure wptr hasn't changed while processing */
3644	wptr = si_get_ih_wptr(rdev);
3645	if (wptr != rdev->ih.wptr)
3646		goto restart_ih;
3647	if (queue_hotplug)
3648		schedule_work(&rdev->hotplug_work);
3649	rdev->ih.rptr = rptr;
3650	WREG32(IH_RB_RPTR, rdev->ih.rptr);
3651	spin_unlock_irqrestore(&rdev->ih.lock, flags);
3652	return IRQ_HANDLED;
3653}
3654
3655/*
3656 * startup/shutdown callbacks
3657 */
3658static int si_startup(struct radeon_device *rdev)
3659{
3660	struct radeon_ring *ring;
3661	int r;
3662
3663	if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
3664	    !rdev->rlc_fw || !rdev->mc_fw) {
3665		r = si_init_microcode(rdev);
3666		if (r) {
3667			DRM_ERROR("Failed to load firmware!\n");
3668			return r;
3669		}
3670	}
3671
3672	r = si_mc_load_microcode(rdev);
3673	if (r) {
3674		DRM_ERROR("Failed to load MC firmware!\n");
3675		return r;
3676	}
3677
 
3678	r = r600_vram_scratch_init(rdev);
3679	if (r)
3680		return r;
3681
3682	si_mc_program(rdev);
 
 
 
 
 
 
 
 
 
3683	r = si_pcie_gart_enable(rdev);
3684	if (r)
3685		return r;
3686	si_gpu_init(rdev);
3687
3688#if 0
3689	r = evergreen_blit_init(rdev);
3690	if (r) {
3691		r600_blit_fini(rdev);
3692		rdev->asic->copy = NULL;
3693		dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
3694	}
3695#endif
3696	/* allocate rlc buffers */
3697	r = si_rlc_init(rdev);
3698	if (r) {
3699		DRM_ERROR("Failed to init rlc BOs!\n");
3700		return r;
3701	}
3702
3703	/* allocate wb buffer */
3704	r = radeon_wb_init(rdev);
3705	if (r)
3706		return r;
3707
3708	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
3709	if (r) {
3710		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
3711		return r;
3712	}
3713
3714	r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
3715	if (r) {
3716		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
3717		return r;
3718	}
3719
3720	r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
3721	if (r) {
3722		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
3723		return r;
3724	}
3725
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3726	/* Enable IRQ */
 
 
 
 
 
 
3727	r = si_irq_init(rdev);
3728	if (r) {
3729		DRM_ERROR("radeon: IH init failed (%d).\n", r);
3730		radeon_irq_kms_fini(rdev);
3731		return r;
3732	}
3733	si_irq_set(rdev);
3734
3735	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
3736	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
3737			     CP_RB0_RPTR, CP_RB0_WPTR,
3738			     0, 0xfffff, RADEON_CP_PACKET2);
3739	if (r)
3740		return r;
3741
3742	ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
3743	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET,
3744			     CP_RB1_RPTR, CP_RB1_WPTR,
3745			     0, 0xfffff, RADEON_CP_PACKET2);
3746	if (r)
3747		return r;
3748
3749	ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
3750	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET,
3751			     CP_RB2_RPTR, CP_RB2_WPTR,
3752			     0, 0xfffff, RADEON_CP_PACKET2);
 
 
 
 
 
 
 
 
 
 
 
3753	if (r)
3754		return r;
3755
3756	r = si_cp_load_microcode(rdev);
3757	if (r)
3758		return r;
3759	r = si_cp_resume(rdev);
3760	if (r)
3761		return r;
3762
3763	r = radeon_ib_pool_start(rdev);
3764	if (r)
3765		return r;
3766
3767	r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
3768	if (r) {
3769		DRM_ERROR("radeon: failed testing IB (%d) on CP ring 0\n", r);
3770		rdev->accel_working = false;
3771		return r;
 
 
 
 
 
3772	}
3773
3774	r = radeon_ib_test(rdev, CAYMAN_RING_TYPE_CP1_INDEX, &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3775	if (r) {
3776		DRM_ERROR("radeon: failed testing IB (%d) on CP ring 1\n", r);
3777		rdev->accel_working = false;
3778		return r;
3779	}
3780
3781	r = radeon_ib_test(rdev, CAYMAN_RING_TYPE_CP2_INDEX, &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]);
3782	if (r) {
3783		DRM_ERROR("radeon: failed testing IB (%d) on CP ring 2\n", r);
3784		rdev->accel_working = false;
3785		return r;
3786	}
3787
3788	r = radeon_vm_manager_start(rdev);
3789	if (r)
3790		return r;
3791
3792	return 0;
3793}
3794
3795int si_resume(struct radeon_device *rdev)
3796{
3797	int r;
3798
3799	/* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
3800	 * posting will perform necessary task to bring back GPU into good
3801	 * shape.
3802	 */
3803	/* post card */
3804	atom_asic_init(rdev->mode_info.atom_context);
3805
 
 
 
 
 
 
3806	rdev->accel_working = true;
3807	r = si_startup(rdev);
3808	if (r) {
3809		DRM_ERROR("si startup failed on resume\n");
3810		rdev->accel_working = false;
3811		return r;
3812	}
3813
3814	return r;
3815
3816}
3817
3818int si_suspend(struct radeon_device *rdev)
3819{
3820	/* FIXME: we should wait for ring to be empty */
3821	radeon_ib_pool_suspend(rdev);
3822	radeon_vm_manager_suspend(rdev);
3823#if 0
3824	r600_blit_suspend(rdev);
3825#endif
3826	si_cp_enable(rdev, false);
3827	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
3828	rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
3829	rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
 
 
 
 
 
3830	si_irq_suspend(rdev);
3831	radeon_wb_disable(rdev);
3832	si_pcie_gart_disable(rdev);
3833	return 0;
3834}
3835
3836/* Plan is to move initialization in that function and use
3837 * helper function so that radeon_device_init pretty much
3838 * do nothing more than calling asic specific function. This
3839 * should also allow to remove a bunch of callback function
3840 * like vram_info.
3841 */
3842int si_init(struct radeon_device *rdev)
3843{
3844	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
3845	int r;
3846
3847	/* Read BIOS */
3848	if (!radeon_get_bios(rdev)) {
3849		if (ASIC_IS_AVIVO(rdev))
3850			return -EINVAL;
3851	}
3852	/* Must be an ATOMBIOS */
3853	if (!rdev->is_atom_bios) {
3854		dev_err(rdev->dev, "Expecting atombios for cayman GPU\n");
3855		return -EINVAL;
3856	}
3857	r = radeon_atombios_init(rdev);
3858	if (r)
3859		return r;
3860
3861	/* Post card if necessary */
3862	if (!radeon_card_posted(rdev)) {
3863		if (!rdev->bios) {
3864			dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
3865			return -EINVAL;
3866		}
3867		DRM_INFO("GPU not posted. posting now...\n");
3868		atom_asic_init(rdev->mode_info.atom_context);
3869	}
 
 
3870	/* Initialize scratch registers */
3871	si_scratch_init(rdev);
3872	/* Initialize surface registers */
3873	radeon_surface_init(rdev);
3874	/* Initialize clocks */
3875	radeon_get_clock_info(rdev->ddev);
3876
3877	/* Fence driver */
3878	r = radeon_fence_driver_init(rdev);
3879	if (r)
3880		return r;
3881
3882	/* initialize memory controller */
3883	r = si_mc_init(rdev);
3884	if (r)
3885		return r;
3886	/* Memory manager */
3887	r = radeon_bo_init(rdev);
3888	if (r)
3889		return r;
3890
3891	r = radeon_irq_kms_init(rdev);
3892	if (r)
3893		return r;
 
 
 
 
 
 
 
 
3894
3895	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
3896	ring->ring_obj = NULL;
3897	r600_ring_init(rdev, ring, 1024 * 1024);
3898
3899	ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
3900	ring->ring_obj = NULL;
3901	r600_ring_init(rdev, ring, 1024 * 1024);
3902
3903	ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
3904	ring->ring_obj = NULL;
3905	r600_ring_init(rdev, ring, 1024 * 1024);
3906
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3907	rdev->ih.ring_obj = NULL;
3908	r600_ih_ring_init(rdev, 64 * 1024);
3909
3910	r = r600_pcie_gart_init(rdev);
3911	if (r)
3912		return r;
3913
3914	r = radeon_ib_pool_init(rdev);
3915	rdev->accel_working = true;
3916	if (r) {
3917		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
3918		rdev->accel_working = false;
3919	}
3920	r = radeon_vm_manager_init(rdev);
3921	if (r) {
3922		dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r);
3923	}
3924
3925	r = si_startup(rdev);
3926	if (r) {
3927		dev_err(rdev->dev, "disabling GPU acceleration\n");
3928		si_cp_fini(rdev);
 
3929		si_irq_fini(rdev);
3930		si_rlc_fini(rdev);
3931		radeon_wb_fini(rdev);
3932		r100_ib_fini(rdev);
3933		radeon_vm_manager_fini(rdev);
3934		radeon_irq_kms_fini(rdev);
3935		si_pcie_gart_fini(rdev);
3936		rdev->accel_working = false;
3937	}
3938
3939	/* Don't start up if the MC ucode is missing.
3940	 * The default clocks and voltages before the MC ucode
3941	 * is loaded are not suffient for advanced operations.
3942	 */
3943	if (!rdev->mc_fw) {
3944		DRM_ERROR("radeon: MC ucode required for NI+.\n");
3945		return -EINVAL;
3946	}
3947
3948	return 0;
3949}
3950
3951void si_fini(struct radeon_device *rdev)
3952{
3953#if 0
3954	r600_blit_fini(rdev);
3955#endif
3956	si_cp_fini(rdev);
 
 
 
3957	si_irq_fini(rdev);
3958	si_rlc_fini(rdev);
3959	radeon_wb_fini(rdev);
3960	radeon_vm_manager_fini(rdev);
3961	r100_ib_fini(rdev);
3962	radeon_irq_kms_fini(rdev);
 
 
 
 
 
3963	si_pcie_gart_fini(rdev);
3964	r600_vram_scratch_fini(rdev);
3965	radeon_gem_fini(rdev);
3966	radeon_fence_driver_fini(rdev);
3967	radeon_bo_fini(rdev);
3968	radeon_atombios_fini(rdev);
3969	kfree(rdev->bios);
3970	rdev->bios = NULL;
3971}
3972