Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.9.4.
   1/*
   2 * Copyright 2013 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23
  24#include <linux/module.h>
  25#include <linux/pci.h>
  26
  27#include "amdgpu.h"
  28#include "amdgpu_pm.h"
  29#include "amdgpu_dpm.h"
  30#include "amdgpu_atombios.h"
  31#include "amd_pcie.h"
  32#include "sid.h"
  33#include "r600_dpm.h"
  34#include "si_dpm.h"
  35#include "atom.h"
  36#include "../include/pptable.h"
  37#include <linux/math64.h>
  38#include <linux/seq_file.h>
  39#include <linux/firmware.h>
  40
  41#define MC_CG_ARB_FREQ_F0           0x0a
  42#define MC_CG_ARB_FREQ_F1           0x0b
  43#define MC_CG_ARB_FREQ_F2           0x0c
  44#define MC_CG_ARB_FREQ_F3           0x0d
  45
  46#define SMC_RAM_END                 0x20000
  47
  48#define SCLK_MIN_DEEPSLEEP_FREQ     1350
  49
  50
  51/* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
  52#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
  53#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
  54#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
  55#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
  56#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
  57#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
  58
  59#define BIOS_SCRATCH_4                                    0x5cd
  60
  61MODULE_FIRMWARE("amdgpu/tahiti_smc.bin");
  62MODULE_FIRMWARE("amdgpu/pitcairn_smc.bin");
  63MODULE_FIRMWARE("amdgpu/pitcairn_k_smc.bin");
  64MODULE_FIRMWARE("amdgpu/verde_smc.bin");
  65MODULE_FIRMWARE("amdgpu/verde_k_smc.bin");
  66MODULE_FIRMWARE("amdgpu/oland_smc.bin");
  67MODULE_FIRMWARE("amdgpu/oland_k_smc.bin");
  68MODULE_FIRMWARE("amdgpu/hainan_smc.bin");
  69MODULE_FIRMWARE("amdgpu/hainan_k_smc.bin");
  70MODULE_FIRMWARE("amdgpu/banks_k_2_smc.bin");
  71
  72static const struct amd_pm_funcs si_dpm_funcs;
  73
  74union power_info {
  75	struct _ATOM_POWERPLAY_INFO info;
  76	struct _ATOM_POWERPLAY_INFO_V2 info_2;
  77	struct _ATOM_POWERPLAY_INFO_V3 info_3;
  78	struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
  79	struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
  80	struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
  81	struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
  82	struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
  83};
  84
  85union fan_info {
  86	struct _ATOM_PPLIB_FANTABLE fan;
  87	struct _ATOM_PPLIB_FANTABLE2 fan2;
  88	struct _ATOM_PPLIB_FANTABLE3 fan3;
  89};
  90
  91union pplib_clock_info {
  92	struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
  93	struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
  94	struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
  95	struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
  96	struct _ATOM_PPLIB_SI_CLOCK_INFO si;
  97};
  98
  99static const u32 r600_utc[R600_PM_NUMBER_OF_TC] =
 100{
 101	R600_UTC_DFLT_00,
 102	R600_UTC_DFLT_01,
 103	R600_UTC_DFLT_02,
 104	R600_UTC_DFLT_03,
 105	R600_UTC_DFLT_04,
 106	R600_UTC_DFLT_05,
 107	R600_UTC_DFLT_06,
 108	R600_UTC_DFLT_07,
 109	R600_UTC_DFLT_08,
 110	R600_UTC_DFLT_09,
 111	R600_UTC_DFLT_10,
 112	R600_UTC_DFLT_11,
 113	R600_UTC_DFLT_12,
 114	R600_UTC_DFLT_13,
 115	R600_UTC_DFLT_14,
 116};
 117
 118static const u32 r600_dtc[R600_PM_NUMBER_OF_TC] =
 119{
 120	R600_DTC_DFLT_00,
 121	R600_DTC_DFLT_01,
 122	R600_DTC_DFLT_02,
 123	R600_DTC_DFLT_03,
 124	R600_DTC_DFLT_04,
 125	R600_DTC_DFLT_05,
 126	R600_DTC_DFLT_06,
 127	R600_DTC_DFLT_07,
 128	R600_DTC_DFLT_08,
 129	R600_DTC_DFLT_09,
 130	R600_DTC_DFLT_10,
 131	R600_DTC_DFLT_11,
 132	R600_DTC_DFLT_12,
 133	R600_DTC_DFLT_13,
 134	R600_DTC_DFLT_14,
 135};
 136
 137static const struct si_cac_config_reg cac_weights_tahiti[] =
 138{
 139	{ 0x0, 0x0000ffff, 0, 0xc, SISLANDS_CACCONFIG_CGIND },
 140	{ 0x0, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
 141	{ 0x1, 0x0000ffff, 0, 0x101, SISLANDS_CACCONFIG_CGIND },
 142	{ 0x1, 0xffff0000, 16, 0xc, SISLANDS_CACCONFIG_CGIND },
 143	{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
 144	{ 0x3, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
 145	{ 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
 146	{ 0x4, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
 147	{ 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
 148	{ 0x5, 0x0000ffff, 0, 0x8fc, SISLANDS_CACCONFIG_CGIND },
 149	{ 0x5, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
 150	{ 0x6, 0x0000ffff, 0, 0x95, SISLANDS_CACCONFIG_CGIND },
 151	{ 0x6, 0xffff0000, 16, 0x34e, SISLANDS_CACCONFIG_CGIND },
 152	{ 0x18f, 0x0000ffff, 0, 0x1a1, SISLANDS_CACCONFIG_CGIND },
 153	{ 0x7, 0x0000ffff, 0, 0xda, SISLANDS_CACCONFIG_CGIND },
 154	{ 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
 155	{ 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
 156	{ 0x8, 0xffff0000, 16, 0x46, SISLANDS_CACCONFIG_CGIND },
 157	{ 0x9, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
 158	{ 0xa, 0x0000ffff, 0, 0x208, SISLANDS_CACCONFIG_CGIND },
 159	{ 0xb, 0x0000ffff, 0, 0xe7, SISLANDS_CACCONFIG_CGIND },
 160	{ 0xb, 0xffff0000, 16, 0x948, SISLANDS_CACCONFIG_CGIND },
 161	{ 0xc, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
 162	{ 0xd, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
 163	{ 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
 164	{ 0xe, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
 165	{ 0xf, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
 166	{ 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
 167	{ 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
 168	{ 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
 169	{ 0x11, 0x0000ffff, 0, 0x167, SISLANDS_CACCONFIG_CGIND },
 170	{ 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
 171	{ 0x12, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
 172	{ 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
 173	{ 0x13, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
 174	{ 0x14, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
 175	{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
 176	{ 0x15, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
 177	{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
 178	{ 0x16, 0x0000ffff, 0, 0x31, SISLANDS_CACCONFIG_CGIND },
 179	{ 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
 180	{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
 181	{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
 182	{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
 183	{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
 184	{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
 185	{ 0x1a, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
 186	{ 0x1a, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
 187	{ 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
 188	{ 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
 189	{ 0x1c, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
 190	{ 0x1c, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
 191	{ 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
 192	{ 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
 193	{ 0x1e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
 194	{ 0x1e, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
 195	{ 0x1f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
 196	{ 0x1f, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
 197	{ 0x20, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
 198	{ 0x6d, 0x0000ffff, 0, 0x18e, SISLANDS_CACCONFIG_CGIND },
 199	{ 0xFFFFFFFF }
 200};
 201
 202static const struct si_cac_config_reg lcac_tahiti[] =
 203{
 204	{ 0x143, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND },
 205	{ 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 206	{ 0x146, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND },
 207	{ 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 208	{ 0x149, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND },
 209	{ 0x149, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 210	{ 0x14c, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND },
 211	{ 0x14c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 212	{ 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
 213	{ 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 214	{ 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
 215	{ 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 216	{ 0x9e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
 217	{ 0x9e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 218	{ 0x101, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
 219	{ 0x101, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 220	{ 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
 221	{ 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 222	{ 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
 223	{ 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 224	{ 0x10a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
 225	{ 0x10a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 226	{ 0x10d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
 227	{ 0x10d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 228	{ 0x8c, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
 229	{ 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 230	{ 0x8f, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
 231	{ 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 232	{ 0x92, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
 233	{ 0x92, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 234	{ 0x95, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
 235	{ 0x95, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 236	{ 0x14f, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
 237	{ 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 238	{ 0x152, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
 239	{ 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 240	{ 0x155, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
 241	{ 0x155, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 242	{ 0x158, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
 243	{ 0x158, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 244	{ 0x110, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
 245	{ 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 246	{ 0x113, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
 247	{ 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 248	{ 0x116, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
 249	{ 0x116, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 250	{ 0x119, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
 251	{ 0x119, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 252	{ 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
 253	{ 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 254	{ 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
 255	{ 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 256	{ 0x122, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
 257	{ 0x122, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 258	{ 0x125, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
 259	{ 0x125, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 260	{ 0x128, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
 261	{ 0x128, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 262	{ 0x12b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
 263	{ 0x12b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 264	{ 0x15b, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
 265	{ 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 266	{ 0x15e, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
 267	{ 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 268	{ 0x161, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
 269	{ 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 270	{ 0x164, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
 271	{ 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 272	{ 0x167, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
 273	{ 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 274	{ 0x16a, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
 275	{ 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 276	{ 0x16d, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
 277	{ 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 278	{ 0x170, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
 279	{ 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 280	{ 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
 281	{ 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 282	{ 0x176, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
 283	{ 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 284	{ 0x179, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
 285	{ 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 286	{ 0x17c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
 287	{ 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 288	{ 0x17f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
 289	{ 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 290	{ 0xFFFFFFFF }
 291
 292};
 293
 294static const struct si_cac_config_reg cac_override_tahiti[] =
 295{
 296	{ 0xFFFFFFFF }
 297};
 298
 299static const struct si_powertune_data powertune_data_tahiti =
 300{
 301	((1 << 16) | 27027),
 302	6,
 303	0,
 304	4,
 305	95,
 306	{
 307		0UL,
 308		0UL,
 309		4521550UL,
 310		309631529UL,
 311		-1270850L,
 312		4513710L,
 313		40
 314	},
 315	595000000UL,
 316	12,
 317	{
 318		0,
 319		0,
 320		0,
 321		0,
 322		0,
 323		0,
 324		0,
 325		0
 326	},
 327	true
 328};
 329
 330static const struct si_dte_data dte_data_tahiti =
 331{
 332	{ 1159409, 0, 0, 0, 0 },
 333	{ 777, 0, 0, 0, 0 },
 334	2,
 335	54000,
 336	127000,
 337	25,
 338	2,
 339	10,
 340	13,
 341	{ 27, 31, 35, 39, 43, 47, 54, 61, 67, 74, 81, 88, 95, 0, 0, 0 },
 342	{ 240888759, 221057860, 235370597, 162287531, 158510299, 131423027, 116673180, 103067515, 87941937, 76209048, 68209175, 64090048, 58301890, 0, 0, 0 },
 343	{ 12024, 11189, 11451, 8411, 7939, 6666, 5681, 4905, 4241, 3720, 3354, 3122, 2890, 0, 0, 0 },
 344	85,
 345	false
 346};
 347
 348#if 0
 349static const struct si_dte_data dte_data_tahiti_le =
 350{
 351	{ 0x1E8480, 0x7A1200, 0x2160EC0, 0x3938700, 0 },
 352	{ 0x7D, 0x7D, 0x4E4, 0xB00, 0 },
 353	0x5,
 354	0xAFC8,
 355	0x64,
 356	0x32,
 357	1,
 358	0,
 359	0x10,
 360	{ 0x78, 0x7C, 0x82, 0x88, 0x8E, 0x94, 0x9A, 0xA0, 0xA6, 0xAC, 0xB0, 0xB4, 0xB8, 0xBC, 0xC0, 0xC4 },
 361	{ 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700 },
 362	{ 0x2AF8, 0x2AF8, 0x29BB, 0x27F9, 0x2637, 0x2475, 0x22B3, 0x20F1, 0x1F2F, 0x1D6D, 0x1734, 0x1414, 0x10F4, 0xDD4, 0xAB4, 0x794 },
 363	85,
 364	true
 365};
 366#endif
 367
 368static const struct si_dte_data dte_data_tahiti_pro =
 369{
 370	{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
 371	{ 0x0, 0x0, 0x0, 0x0, 0x0 },
 372	5,
 373	45000,
 374	100,
 375	0xA,
 376	1,
 377	0,
 378	0x10,
 379	{ 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
 380	{ 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
 381	{ 0x7D0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
 382	90,
 383	true
 384};
 385
 386static const struct si_dte_data dte_data_new_zealand =
 387{
 388	{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0 },
 389	{ 0x29B, 0x3E9, 0x537, 0x7D2, 0 },
 390	0x5,
 391	0xAFC8,
 392	0x69,
 393	0x32,
 394	1,
 395	0,
 396	0x10,
 397	{ 0x82, 0xA0, 0xB4, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE },
 398	{ 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
 399	{ 0xDAC, 0x1388, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685 },
 400	85,
 401	true
 402};
 403
 404static const struct si_dte_data dte_data_aruba_pro =
 405{
 406	{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
 407	{ 0x0, 0x0, 0x0, 0x0, 0x0 },
 408	5,
 409	45000,
 410	100,
 411	0xA,
 412	1,
 413	0,
 414	0x10,
 415	{ 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
 416	{ 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
 417	{ 0x1000, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
 418	90,
 419	true
 420};
 421
 422static const struct si_dte_data dte_data_malta =
 423{
 424	{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
 425	{ 0x0, 0x0, 0x0, 0x0, 0x0 },
 426	5,
 427	45000,
 428	100,
 429	0xA,
 430	1,
 431	0,
 432	0x10,
 433	{ 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
 434	{ 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
 435	{ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
 436	90,
 437	true
 438};
 439
 440static const struct si_cac_config_reg cac_weights_pitcairn[] =
 441{
 442	{ 0x0, 0x0000ffff, 0, 0x8a, SISLANDS_CACCONFIG_CGIND },
 443	{ 0x0, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
 444	{ 0x1, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
 445	{ 0x1, 0xffff0000, 16, 0x24d, SISLANDS_CACCONFIG_CGIND },
 446	{ 0x2, 0x0000ffff, 0, 0x19, SISLANDS_CACCONFIG_CGIND },
 447	{ 0x3, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
 448	{ 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
 449	{ 0x4, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
 450	{ 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
 451	{ 0x5, 0x0000ffff, 0, 0xc11, SISLANDS_CACCONFIG_CGIND },
 452	{ 0x5, 0xffff0000, 16, 0x7f3, SISLANDS_CACCONFIG_CGIND },
 453	{ 0x6, 0x0000ffff, 0, 0x403, SISLANDS_CACCONFIG_CGIND },
 454	{ 0x6, 0xffff0000, 16, 0x367, SISLANDS_CACCONFIG_CGIND },
 455	{ 0x18f, 0x0000ffff, 0, 0x4c9, SISLANDS_CACCONFIG_CGIND },
 456	{ 0x7, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
 457	{ 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
 458	{ 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
 459	{ 0x8, 0xffff0000, 16, 0x45d, SISLANDS_CACCONFIG_CGIND },
 460	{ 0x9, 0x0000ffff, 0, 0x36d, SISLANDS_CACCONFIG_CGIND },
 461	{ 0xa, 0x0000ffff, 0, 0x534, SISLANDS_CACCONFIG_CGIND },
 462	{ 0xb, 0x0000ffff, 0, 0x5da, SISLANDS_CACCONFIG_CGIND },
 463	{ 0xb, 0xffff0000, 16, 0x880, SISLANDS_CACCONFIG_CGIND },
 464	{ 0xc, 0x0000ffff, 0, 0x201, SISLANDS_CACCONFIG_CGIND },
 465	{ 0xd, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
 466	{ 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
 467	{ 0xe, 0x0000ffff, 0, 0x9f, SISLANDS_CACCONFIG_CGIND },
 468	{ 0xf, 0x0000ffff, 0, 0x1f, SISLANDS_CACCONFIG_CGIND },
 469	{ 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
 470	{ 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
 471	{ 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
 472	{ 0x11, 0x0000ffff, 0, 0x5de, SISLANDS_CACCONFIG_CGIND },
 473	{ 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
 474	{ 0x12, 0x0000ffff, 0, 0x7b, SISLANDS_CACCONFIG_CGIND },
 475	{ 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
 476	{ 0x13, 0xffff0000, 16, 0x13, SISLANDS_CACCONFIG_CGIND },
 477	{ 0x14, 0x0000ffff, 0, 0xf9, SISLANDS_CACCONFIG_CGIND },
 478	{ 0x15, 0x0000ffff, 0, 0x66, SISLANDS_CACCONFIG_CGIND },
 479	{ 0x15, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
 480	{ 0x4e, 0x0000ffff, 0, 0x13, SISLANDS_CACCONFIG_CGIND },
 481	{ 0x16, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
 482	{ 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
 483	{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
 484	{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
 485	{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
 486	{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
 487	{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
 488	{ 0x1a, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
 489	{ 0x1a, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
 490	{ 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
 491	{ 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
 492	{ 0x1c, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
 493	{ 0x1c, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
 494	{ 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
 495	{ 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
 496	{ 0x1e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
 497	{ 0x1e, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
 498	{ 0x1f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
 499	{ 0x1f, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
 500	{ 0x20, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
 501	{ 0x6d, 0x0000ffff, 0, 0x186, SISLANDS_CACCONFIG_CGIND },
 502	{ 0xFFFFFFFF }
 503};
 504
 505static const struct si_cac_config_reg lcac_pitcairn[] =
 506{
 507	{ 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
 508	{ 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 509	{ 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
 510	{ 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 511	{ 0x110, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
 512	{ 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 513	{ 0x14f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
 514	{ 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 515	{ 0x8c, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
 516	{ 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 517	{ 0x143, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
 518	{ 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 519	{ 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
 520	{ 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 521	{ 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
 522	{ 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 523	{ 0x113, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
 524	{ 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 525	{ 0x152, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
 526	{ 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 527	{ 0x8f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
 528	{ 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 529	{ 0x146, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
 530	{ 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 531	{ 0x9e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
 532	{ 0x9e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 533	{ 0x10a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
 534	{ 0x10a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 535	{ 0x116, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
 536	{ 0x116, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 537	{ 0x155, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
 538	{ 0x155, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 539	{ 0x92, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
 540	{ 0x92, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 541	{ 0x149, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
 542	{ 0x149, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 543	{ 0x101, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
 544	{ 0x101, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 545	{ 0x10d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
 546	{ 0x10d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 547	{ 0x119, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
 548	{ 0x119, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 549	{ 0x158, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
 550	{ 0x158, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 551	{ 0x95, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
 552	{ 0x95, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 553	{ 0x14c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
 554	{ 0x14c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 555	{ 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
 556	{ 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 557	{ 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
 558	{ 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 559	{ 0x122, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
 560	{ 0x122, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 561	{ 0x125, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
 562	{ 0x125, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 563	{ 0x128, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
 564	{ 0x128, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 565	{ 0x12b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
 566	{ 0x12b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 567	{ 0x164, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
 568	{ 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 569	{ 0x167, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
 570	{ 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 571	{ 0x16a, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
 572	{ 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 573	{ 0x15e, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
 574	{ 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 575	{ 0x161, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
 576	{ 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 577	{ 0x15b, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
 578	{ 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 579	{ 0x16d, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
 580	{ 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 581	{ 0x170, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
 582	{ 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 583	{ 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
 584	{ 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 585	{ 0x176, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
 586	{ 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 587	{ 0x179, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
 588	{ 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 589	{ 0x17c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
 590	{ 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 591	{ 0x17f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
 592	{ 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 593	{ 0xFFFFFFFF }
 594};
 595
 596static const struct si_cac_config_reg cac_override_pitcairn[] =
 597{
 598    { 0xFFFFFFFF }
 599};
 600
 601static const struct si_powertune_data powertune_data_pitcairn =
 602{
 603	((1 << 16) | 27027),
 604	5,
 605	0,
 606	6,
 607	100,
 608	{
 609		51600000UL,
 610		1800000UL,
 611		7194395UL,
 612		309631529UL,
 613		-1270850L,
 614		4513710L,
 615		100
 616	},
 617	117830498UL,
 618	12,
 619	{
 620		0,
 621		0,
 622		0,
 623		0,
 624		0,
 625		0,
 626		0,
 627		0
 628	},
 629	true
 630};
 631
 632static const struct si_dte_data dte_data_pitcairn =
 633{
 634	{ 0, 0, 0, 0, 0 },
 635	{ 0, 0, 0, 0, 0 },
 636	0,
 637	0,
 638	0,
 639	0,
 640	0,
 641	0,
 642	0,
 643	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
 644	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
 645	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
 646	0,
 647	false
 648};
 649
 650static const struct si_dte_data dte_data_curacao_xt =
 651{
 652	{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
 653	{ 0x0, 0x0, 0x0, 0x0, 0x0 },
 654	5,
 655	45000,
 656	100,
 657	0xA,
 658	1,
 659	0,
 660	0x10,
 661	{ 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
 662	{ 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
 663	{ 0x1D17, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
 664	90,
 665	true
 666};
 667
 668static const struct si_dte_data dte_data_curacao_pro =
 669{
 670	{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
 671	{ 0x0, 0x0, 0x0, 0x0, 0x0 },
 672	5,
 673	45000,
 674	100,
 675	0xA,
 676	1,
 677	0,
 678	0x10,
 679	{ 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
 680	{ 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
 681	{ 0x1D17, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
 682	90,
 683	true
 684};
 685
 686static const struct si_dte_data dte_data_neptune_xt =
 687{
 688	{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
 689	{ 0x0, 0x0, 0x0, 0x0, 0x0 },
 690	5,
 691	45000,
 692	100,
 693	0xA,
 694	1,
 695	0,
 696	0x10,
 697	{ 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
 698	{ 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
 699	{ 0x3A2F, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
 700	90,
 701	true
 702};
 703
 704static const struct si_cac_config_reg cac_weights_chelsea_pro[] =
 705{
 706	{ 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
 707	{ 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
 708	{ 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
 709	{ 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
 710	{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
 711	{ 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
 712	{ 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
 713	{ 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
 714	{ 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
 715	{ 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
 716	{ 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
 717	{ 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
 718	{ 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
 719	{ 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
 720	{ 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
 721	{ 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
 722	{ 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
 723	{ 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
 724	{ 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
 725	{ 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
 726	{ 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
 727	{ 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
 728	{ 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
 729	{ 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
 730	{ 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
 731	{ 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
 732	{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
 733	{ 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
 734	{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 735	{ 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
 736	{ 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
 737	{ 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
 738	{ 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
 739	{ 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
 740	{ 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
 741	{ 0x14, 0x0000ffff, 0, 0x2BD, SISLANDS_CACCONFIG_CGIND },
 742	{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
 743	{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
 744	{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
 745	{ 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
 746	{ 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
 747	{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
 748	{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
 749	{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
 750	{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
 751	{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
 752	{ 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
 753	{ 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
 754	{ 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
 755	{ 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
 756	{ 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
 757	{ 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
 758	{ 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
 759	{ 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
 760	{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
 761	{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
 762	{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
 763	{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
 764	{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
 765	{ 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
 766	{ 0xFFFFFFFF }
 767};
 768
 769static const struct si_cac_config_reg cac_weights_chelsea_xt[] =
 770{
 771	{ 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
 772	{ 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
 773	{ 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
 774	{ 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
 775	{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
 776	{ 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
 777	{ 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
 778	{ 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
 779	{ 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
 780	{ 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
 781	{ 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
 782	{ 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
 783	{ 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
 784	{ 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
 785	{ 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
 786	{ 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
 787	{ 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
 788	{ 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
 789	{ 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
 790	{ 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
 791	{ 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
 792	{ 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
 793	{ 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
 794	{ 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
 795	{ 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
 796	{ 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
 797	{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
 798	{ 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
 799	{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 800	{ 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
 801	{ 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
 802	{ 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
 803	{ 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
 804	{ 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
 805	{ 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
 806	{ 0x14, 0x0000ffff, 0, 0x30A, SISLANDS_CACCONFIG_CGIND },
 807	{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
 808	{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
 809	{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
 810	{ 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
 811	{ 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
 812	{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
 813	{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
 814	{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
 815	{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
 816	{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
 817	{ 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
 818	{ 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
 819	{ 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
 820	{ 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
 821	{ 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
 822	{ 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
 823	{ 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
 824	{ 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
 825	{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
 826	{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
 827	{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
 828	{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
 829	{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
 830	{ 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
 831	{ 0xFFFFFFFF }
 832};
 833
 834static const struct si_cac_config_reg cac_weights_heathrow[] =
 835{
 836	{ 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
 837	{ 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
 838	{ 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
 839	{ 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
 840	{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
 841	{ 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
 842	{ 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
 843	{ 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
 844	{ 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
 845	{ 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
 846	{ 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
 847	{ 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
 848	{ 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
 849	{ 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
 850	{ 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
 851	{ 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
 852	{ 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
 853	{ 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
 854	{ 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
 855	{ 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
 856	{ 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
 857	{ 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
 858	{ 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
 859	{ 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
 860	{ 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
 861	{ 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
 862	{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
 863	{ 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
 864	{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 865	{ 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
 866	{ 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
 867	{ 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
 868	{ 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
 869	{ 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
 870	{ 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
 871	{ 0x14, 0x0000ffff, 0, 0x362, SISLANDS_CACCONFIG_CGIND },
 872	{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
 873	{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
 874	{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
 875	{ 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
 876	{ 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
 877	{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
 878	{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
 879	{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
 880	{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
 881	{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
 882	{ 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
 883	{ 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
 884	{ 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
 885	{ 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
 886	{ 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
 887	{ 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
 888	{ 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
 889	{ 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
 890	{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
 891	{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
 892	{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
 893	{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
 894	{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
 895	{ 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
 896	{ 0xFFFFFFFF }
 897};
 898
 899static const struct si_cac_config_reg cac_weights_cape_verde_pro[] =
 900{
 901	{ 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
 902	{ 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
 903	{ 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
 904	{ 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
 905	{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
 906	{ 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
 907	{ 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
 908	{ 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
 909	{ 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
 910	{ 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
 911	{ 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
 912	{ 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
 913	{ 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
 914	{ 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
 915	{ 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
 916	{ 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
 917	{ 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
 918	{ 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
 919	{ 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
 920	{ 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
 921	{ 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
 922	{ 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
 923	{ 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
 924	{ 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
 925	{ 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
 926	{ 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
 927	{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
 928	{ 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
 929	{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 930	{ 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
 931	{ 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
 932	{ 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
 933	{ 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
 934	{ 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
 935	{ 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
 936	{ 0x14, 0x0000ffff, 0, 0x315, SISLANDS_CACCONFIG_CGIND },
 937	{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
 938	{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
 939	{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
 940	{ 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
 941	{ 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
 942	{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
 943	{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
 944	{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
 945	{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
 946	{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
 947	{ 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
 948	{ 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
 949	{ 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
 950	{ 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
 951	{ 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
 952	{ 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
 953	{ 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
 954	{ 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
 955	{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
 956	{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
 957	{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
 958	{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
 959	{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
 960	{ 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
 961	{ 0xFFFFFFFF }
 962};
 963
 964static const struct si_cac_config_reg cac_weights_cape_verde[] =
 965{
 966	{ 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
 967	{ 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
 968	{ 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
 969	{ 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
 970	{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
 971	{ 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
 972	{ 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
 973	{ 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
 974	{ 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
 975	{ 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
 976	{ 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
 977	{ 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
 978	{ 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
 979	{ 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
 980	{ 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
 981	{ 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
 982	{ 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
 983	{ 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
 984	{ 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
 985	{ 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
 986	{ 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
 987	{ 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
 988	{ 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
 989	{ 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
 990	{ 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
 991	{ 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
 992	{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
 993	{ 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
 994	{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
 995	{ 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
 996	{ 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
 997	{ 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
 998	{ 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
 999	{ 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
1000	{ 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
1001	{ 0x14, 0x0000ffff, 0, 0x3BA, SISLANDS_CACCONFIG_CGIND },
1002	{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1003	{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
1004	{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1005	{ 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
1006	{ 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
1007	{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1008	{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1009	{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1010	{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1011	{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1012	{ 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1013	{ 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1014	{ 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1015	{ 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1016	{ 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1017	{ 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1018	{ 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1019	{ 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1020	{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1021	{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1022	{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1023	{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1024	{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1025	{ 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
1026	{ 0xFFFFFFFF }
1027};
1028
1029static const struct si_cac_config_reg lcac_cape_verde[] =
1030{
1031	{ 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1032	{ 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1033	{ 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1034	{ 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1035	{ 0x110, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
1036	{ 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1037	{ 0x14f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
1038	{ 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1039	{ 0x8c, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
1040	{ 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1041	{ 0x143, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1042	{ 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1043	{ 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1044	{ 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1045	{ 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1046	{ 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1047	{ 0x113, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
1048	{ 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1049	{ 0x152, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
1050	{ 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1051	{ 0x8f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1052	{ 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1053	{ 0x146, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1054	{ 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1055	{ 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1056	{ 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1057	{ 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1058	{ 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1059	{ 0x164, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1060	{ 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1061	{ 0x167, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1062	{ 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1063	{ 0x16a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1064	{ 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1065	{ 0x15e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1066	{ 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1067	{ 0x161, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1068	{ 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1069	{ 0x15b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1070	{ 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1071	{ 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1072	{ 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1073	{ 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1074	{ 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1075	{ 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1076	{ 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1077	{ 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1078	{ 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1079	{ 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1080	{ 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1081	{ 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1082	{ 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1083	{ 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1084	{ 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1085	{ 0xFFFFFFFF }
1086};
1087
1088static const struct si_cac_config_reg cac_override_cape_verde[] =
1089{
1090    { 0xFFFFFFFF }
1091};
1092
1093static const struct si_powertune_data powertune_data_cape_verde =
1094{
1095	((1 << 16) | 0x6993),
1096	5,
1097	0,
1098	7,
1099	105,
1100	{
1101		0UL,
1102		0UL,
1103		7194395UL,
1104		309631529UL,
1105		-1270850L,
1106		4513710L,
1107		100
1108	},
1109	117830498UL,
1110	12,
1111	{
1112		0,
1113		0,
1114		0,
1115		0,
1116		0,
1117		0,
1118		0,
1119		0
1120	},
1121	true
1122};
1123
1124static const struct si_dte_data dte_data_cape_verde =
1125{
1126	{ 0, 0, 0, 0, 0 },
1127	{ 0, 0, 0, 0, 0 },
1128	0,
1129	0,
1130	0,
1131	0,
1132	0,
1133	0,
1134	0,
1135	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
1136	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
1137	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
1138	0,
1139	false
1140};
1141
1142static const struct si_dte_data dte_data_venus_xtx =
1143{
1144	{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
1145	{ 0x71C, 0xAAB, 0xE39, 0x11C7, 0x0 },
1146	5,
1147	55000,
1148	0x69,
1149	0xA,
1150	1,
1151	0,
1152	0x3,
1153	{ 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1154	{ 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1155	{ 0xD6D8, 0x88B8, 0x1555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1156	90,
1157	true
1158};
1159
1160static const struct si_dte_data dte_data_venus_xt =
1161{
1162	{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
1163	{ 0xBDA, 0x11C7, 0x17B4, 0x1DA1, 0x0 },
1164	5,
1165	55000,
1166	0x69,
1167	0xA,
1168	1,
1169	0,
1170	0x3,
1171	{ 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1172	{ 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1173	{ 0xAFC8, 0x88B8, 0x238E, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1174	90,
1175	true
1176};
1177
1178static const struct si_dte_data dte_data_venus_pro =
1179{
1180	{  0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
1181	{ 0x11C7, 0x1AAB, 0x238E, 0x2C72, 0x0 },
1182	5,
1183	55000,
1184	0x69,
1185	0xA,
1186	1,
1187	0,
1188	0x3,
1189	{ 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1190	{ 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1191	{ 0x88B8, 0x88B8, 0x3555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1192	90,
1193	true
1194};
1195
1196static const struct si_cac_config_reg cac_weights_oland[] =
1197{
1198	{ 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
1199	{ 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
1200	{ 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
1201	{ 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
1202	{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1203	{ 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
1204	{ 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
1205	{ 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
1206	{ 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
1207	{ 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
1208	{ 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
1209	{ 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
1210	{ 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
1211	{ 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
1212	{ 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
1213	{ 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
1214	{ 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
1215	{ 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
1216	{ 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
1217	{ 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
1218	{ 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
1219	{ 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
1220	{ 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
1221	{ 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
1222	{ 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
1223	{ 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
1224	{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
1225	{ 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1226	{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1227	{ 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
1228	{ 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
1229	{ 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
1230	{ 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
1231	{ 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
1232	{ 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
1233	{ 0x14, 0x0000ffff, 0, 0x3BA, SISLANDS_CACCONFIG_CGIND },
1234	{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1235	{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
1236	{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1237	{ 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
1238	{ 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
1239	{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1240	{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1241	{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1242	{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1243	{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1244	{ 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1245	{ 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1246	{ 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1247	{ 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1248	{ 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1249	{ 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1250	{ 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1251	{ 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1252	{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1253	{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1254	{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1255	{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1256	{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1257	{ 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
1258	{ 0xFFFFFFFF }
1259};
1260
1261static const struct si_cac_config_reg cac_weights_mars_pro[] =
1262{
1263	{ 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
1264	{ 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
1265	{ 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
1266	{ 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND },
1267	{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1268	{ 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
1269	{ 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
1270	{ 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
1271	{ 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND },
1272	{ 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND },
1273	{ 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND },
1274	{ 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND },
1275	{ 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND },
1276	{ 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
1277	{ 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND },
1278	{ 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND },
1279	{ 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND },
1280	{ 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND },
1281	{ 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND },
1282	{ 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND },
1283	{ 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND },
1284	{ 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND },
1285	{ 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND },
1286	{ 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
1287	{ 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND },
1288	{ 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND },
1289	{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
1290	{ 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
1291	{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1292	{ 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
1293	{ 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND },
1294	{ 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
1295	{ 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND },
1296	{ 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND },
1297	{ 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND },
1298	{ 0x14, 0x0000ffff, 0, 0x2, SISLANDS_CACCONFIG_CGIND },
1299	{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1300	{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
1301	{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1302	{ 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND },
1303	{ 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND },
1304	{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1305	{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1306	{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1307	{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1308	{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1309	{ 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND },
1310	{ 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
1311	{ 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1312	{ 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1313	{ 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND },
1314	{ 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND },
1315	{ 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1316	{ 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1317	{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1318	{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1319	{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1320	{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1321	{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1322	{ 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND },
1323	{ 0xFFFFFFFF }
1324};
1325
1326static const struct si_cac_config_reg cac_weights_mars_xt[] =
1327{
1328	{ 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
1329	{ 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
1330	{ 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
1331	{ 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND },
1332	{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1333	{ 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
1334	{ 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
1335	{ 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
1336	{ 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND },
1337	{ 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND },
1338	{ 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND },
1339	{ 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND },
1340	{ 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND },
1341	{ 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
1342	{ 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND },
1343	{ 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND },
1344	{ 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND },
1345	{ 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND },
1346	{ 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND },
1347	{ 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND },
1348	{ 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND },
1349	{ 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND },
1350	{ 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND },
1351	{ 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
1352	{ 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND },
1353	{ 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND },
1354	{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
1355	{ 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
1356	{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1357	{ 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
1358	{ 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND },
1359	{ 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
1360	{ 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND },
1361	{ 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND },
1362	{ 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND },
1363	{ 0x14, 0x0000ffff, 0, 0x60, SISLANDS_CACCONFIG_CGIND },
1364	{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1365	{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
1366	{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1367	{ 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND },
1368	{ 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND },
1369	{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1370	{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1371	{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1372	{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1373	{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1374	{ 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND },
1375	{ 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
1376	{ 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1377	{ 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1378	{ 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND },
1379	{ 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND },
1380	{ 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1381	{ 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1382	{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1383	{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1384	{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1385	{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1386	{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1387	{ 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND },
1388	{ 0xFFFFFFFF }
1389};
1390
1391static const struct si_cac_config_reg cac_weights_oland_pro[] =
1392{
1393	{ 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
1394	{ 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
1395	{ 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
1396	{ 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND },
1397	{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1398	{ 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
1399	{ 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
1400	{ 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
1401	{ 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND },
1402	{ 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND },
1403	{ 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND },
1404	{ 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND },
1405	{ 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND },
1406	{ 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
1407	{ 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND },
1408	{ 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND },
1409	{ 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND },
1410	{ 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND },
1411	{ 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND },
1412	{ 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND },
1413	{ 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND },
1414	{ 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND },
1415	{ 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND },
1416	{ 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
1417	{ 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND },
1418	{ 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND },
1419	{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
1420	{ 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
1421	{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1422	{ 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
1423	{ 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND },
1424	{ 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
1425	{ 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND },
1426	{ 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND },
1427	{ 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND },
1428	{ 0x14, 0x0000ffff, 0, 0x90, SISLANDS_CACCONFIG_CGIND },
1429	{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1430	{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
1431	{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1432	{ 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND },
1433	{ 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND },
1434	{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1435	{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1436	{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1437	{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1438	{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1439	{ 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND },
1440	{ 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
1441	{ 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1442	{ 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1443	{ 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND },
1444	{ 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND },
1445	{ 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1446	{ 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1447	{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1448	{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1449	{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1450	{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1451	{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1452	{ 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND },
1453	{ 0xFFFFFFFF }
1454};
1455
1456static const struct si_cac_config_reg cac_weights_oland_xt[] =
1457{
1458	{ 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
1459	{ 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
1460	{ 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
1461	{ 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND },
1462	{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1463	{ 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
1464	{ 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
1465	{ 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
1466	{ 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND },
1467	{ 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND },
1468	{ 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND },
1469	{ 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND },
1470	{ 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND },
1471	{ 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
1472	{ 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND },
1473	{ 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND },
1474	{ 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND },
1475	{ 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND },
1476	{ 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND },
1477	{ 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND },
1478	{ 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND },
1479	{ 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND },
1480	{ 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND },
1481	{ 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
1482	{ 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND },
1483	{ 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND },
1484	{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
1485	{ 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
1486	{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1487	{ 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
1488	{ 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND },
1489	{ 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
1490	{ 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND },
1491	{ 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND },
1492	{ 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND },
1493	{ 0x14, 0x0000ffff, 0, 0x120, SISLANDS_CACCONFIG_CGIND },
1494	{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1495	{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
1496	{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1497	{ 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND },
1498	{ 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND },
1499	{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1500	{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1501	{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1502	{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1503	{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1504	{ 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND },
1505	{ 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
1506	{ 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1507	{ 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1508	{ 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND },
1509	{ 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND },
1510	{ 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1511	{ 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1512	{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1513	{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1514	{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1515	{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1516	{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1517	{ 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND },
1518	{ 0xFFFFFFFF }
1519};
1520
1521static const struct si_cac_config_reg lcac_oland[] =
1522{
1523	{ 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1524	{ 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1525	{ 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1526	{ 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1527	{ 0x110, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
1528	{ 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1529	{ 0x14f, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
1530	{ 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1531	{ 0x8c, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
1532	{ 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1533	{ 0x143, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
1534	{ 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1535	{ 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1536	{ 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1537	{ 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1538	{ 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1539	{ 0x164, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1540	{ 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1541	{ 0x167, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1542	{ 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1543	{ 0x16a, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1544	{ 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1545	{ 0x15e, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1546	{ 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1547	{ 0x161, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1548	{ 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1549	{ 0x15b, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1550	{ 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1551	{ 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1552	{ 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1553	{ 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1554	{ 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1555	{ 0x173, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1556	{ 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1557	{ 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1558	{ 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1559	{ 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1560	{ 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1561	{ 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1562	{ 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1563	{ 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1564	{ 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1565	{ 0xFFFFFFFF }
1566};
1567
1568static const struct si_cac_config_reg lcac_mars_pro[] =
1569{
1570	{ 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1571	{ 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1572	{ 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1573	{ 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1574	{ 0x110, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
1575	{ 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1576	{ 0x14f, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
1577	{ 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1578	{ 0x8c, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
1579	{ 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1580	{ 0x143, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1581	{ 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1582	{ 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1583	{ 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1584	{ 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1585	{ 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1586	{ 0x164, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1587	{ 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1588	{ 0x167, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1589	{ 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1590	{ 0x16a, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1591	{ 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1592	{ 0x15e, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1593	{ 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1594	{ 0x161, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1595	{ 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1596	{ 0x15b, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1597	{ 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1598	{ 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1599	{ 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1600	{ 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1601	{ 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1602	{ 0x173, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1603	{ 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1604	{ 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1605	{ 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1606	{ 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1607	{ 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1608	{ 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1609	{ 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1610	{ 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1611	{ 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1612	{ 0xFFFFFFFF }
1613};
1614
1615static const struct si_cac_config_reg cac_override_oland[] =
1616{
1617	{ 0xFFFFFFFF }
1618};
1619
1620static const struct si_powertune_data powertune_data_oland =
1621{
1622	((1 << 16) | 0x6993),
1623	5,
1624	0,
1625	7,
1626	105,
1627	{
1628		0UL,
1629		0UL,
1630		7194395UL,
1631		309631529UL,
1632		-1270850L,
1633		4513710L,
1634		100
1635	},
1636	117830498UL,
1637	12,
1638	{
1639		0,
1640		0,
1641		0,
1642		0,
1643		0,
1644		0,
1645		0,
1646		0
1647	},
1648	true
1649};
1650
1651static const struct si_powertune_data powertune_data_mars_pro =
1652{
1653	((1 << 16) | 0x6993),
1654	5,
1655	0,
1656	7,
1657	105,
1658	{
1659		0UL,
1660		0UL,
1661		7194395UL,
1662		309631529UL,
1663		-1270850L,
1664		4513710L,
1665		100
1666	},
1667	117830498UL,
1668	12,
1669	{
1670		0,
1671		0,
1672		0,
1673		0,
1674		0,
1675		0,
1676		0,
1677		0
1678	},
1679	true
1680};
1681
1682static const struct si_dte_data dte_data_oland =
1683{
1684	{ 0, 0, 0, 0, 0 },
1685	{ 0, 0, 0, 0, 0 },
1686	0,
1687	0,
1688	0,
1689	0,
1690	0,
1691	0,
1692	0,
1693	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
1694	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
1695	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
1696	0,
1697	false
1698};
1699
1700static const struct si_dte_data dte_data_mars_pro =
1701{
1702	{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
1703	{ 0x0, 0x0, 0x0, 0x0, 0x0 },
1704	5,
1705	55000,
1706	105,
1707	0xA,
1708	1,
1709	0,
1710	0x10,
1711	{ 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
1712	{ 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
1713	{ 0xF627, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1714	90,
1715	true
1716};
1717
1718static const struct si_dte_data dte_data_sun_xt =
1719{
1720	{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
1721	{ 0x0, 0x0, 0x0, 0x0, 0x0 },
1722	5,
1723	55000,
1724	105,
1725	0xA,
1726	1,
1727	0,
1728	0x10,
1729	{ 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
1730	{ 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
1731	{ 0xD555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1732	90,
1733	true
1734};
1735
1736
1737static const struct si_cac_config_reg cac_weights_hainan[] =
1738{
1739	{ 0x0, 0x0000ffff, 0, 0x2d9, SISLANDS_CACCONFIG_CGIND },
1740	{ 0x0, 0xffff0000, 16, 0x22b, SISLANDS_CACCONFIG_CGIND },
1741	{ 0x1, 0x0000ffff, 0, 0x21c, SISLANDS_CACCONFIG_CGIND },
1742	{ 0x1, 0xffff0000, 16, 0x1dc, SISLANDS_CACCONFIG_CGIND },
1743	{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1744	{ 0x3, 0x0000ffff, 0, 0x24e, SISLANDS_CACCONFIG_CGIND },
1745	{ 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1746	{ 0x4, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1747	{ 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1748	{ 0x5, 0x0000ffff, 0, 0x35e, SISLANDS_CACCONFIG_CGIND },
1749	{ 0x5, 0xffff0000, 16, 0x1143, SISLANDS_CACCONFIG_CGIND },
1750	{ 0x6, 0x0000ffff, 0, 0xe17, SISLANDS_CACCONFIG_CGIND },
1751	{ 0x6, 0xffff0000, 16, 0x441, SISLANDS_CACCONFIG_CGIND },
1752	{ 0x18f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1753	{ 0x7, 0x0000ffff, 0, 0x28b, SISLANDS_CACCONFIG_CGIND },
1754	{ 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1755	{ 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1756	{ 0x8, 0xffff0000, 16, 0xabe, SISLANDS_CACCONFIG_CGIND },
1757	{ 0x9, 0x0000ffff, 0, 0xf11, SISLANDS_CACCONFIG_CGIND },
1758	{ 0xa, 0x0000ffff, 0, 0x907, SISLANDS_CACCONFIG_CGIND },
1759	{ 0xb, 0x0000ffff, 0, 0xb45, SISLANDS_CACCONFIG_CGIND },
1760	{ 0xb, 0xffff0000, 16, 0xd1e, SISLANDS_CACCONFIG_CGIND },
1761	{ 0xc, 0x0000ffff, 0, 0xa2c, SISLANDS_CACCONFIG_CGIND },
1762	{ 0xd, 0x0000ffff, 0, 0x62, SISLANDS_CACCONFIG_CGIND },
1763	{ 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1764	{ 0xe, 0x0000ffff, 0, 0x1f3, SISLANDS_CACCONFIG_CGIND },
1765	{ 0xf, 0x0000ffff, 0, 0x42, SISLANDS_CACCONFIG_CGIND },
1766	{ 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1767	{ 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1768	{ 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1769	{ 0x11, 0x0000ffff, 0, 0x709, SISLANDS_CACCONFIG_CGIND },
1770	{ 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1771	{ 0x12, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1772	{ 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1773	{ 0x13, 0xffff0000, 16, 0x3a, SISLANDS_CACCONFIG_CGIND },
1774	{ 0x14, 0x0000ffff, 0, 0x357, SISLANDS_CACCONFIG_CGIND },
1775	{ 0x15, 0x0000ffff, 0, 0x9f, SISLANDS_CACCONFIG_CGIND },
1776	{ 0x15, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1777	{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1778	{ 0x16, 0x0000ffff, 0, 0x314, SISLANDS_CACCONFIG_CGIND },
1779	{ 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1780	{ 0x17, 0x0000ffff, 0, 0x6d, SISLANDS_CACCONFIG_CGIND },
1781	{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1782	{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1783	{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1784	{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1785	{ 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1786	{ 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1787	{ 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1788	{ 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1789	{ 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1790	{ 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1791	{ 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1792	{ 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1793	{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1794	{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1795	{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1796	{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1797	{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1798	{ 0x6d, 0x0000ffff, 0, 0x1b9, SISLANDS_CACCONFIG_CGIND },
1799	{ 0xFFFFFFFF }
1800};
1801
1802static const struct si_powertune_data powertune_data_hainan =
1803{
1804	((1 << 16) | 0x6993),
1805	5,
1806	0,
1807	9,
1808	105,
1809	{
1810		0UL,
1811		0UL,
1812		7194395UL,
1813		309631529UL,
1814		-1270850L,
1815		4513710L,
1816		100
1817	},
1818	117830498UL,
1819	12,
1820	{
1821		0,
1822		0,
1823		0,
1824		0,
1825		0,
1826		0,
1827		0,
1828		0
1829	},
1830	true
1831};
1832
1833static struct rv7xx_power_info *rv770_get_pi(struct amdgpu_device *adev);
1834static struct evergreen_power_info *evergreen_get_pi(struct amdgpu_device *adev);
1835static struct ni_power_info *ni_get_pi(struct amdgpu_device *adev);
1836static struct  si_ps *si_get_ps(struct amdgpu_ps *rps);
1837
1838static int si_populate_voltage_value(struct amdgpu_device *adev,
1839				     const struct atom_voltage_table *table,
1840				     u16 value, SISLANDS_SMC_VOLTAGE_VALUE *voltage);
1841static int si_get_std_voltage_value(struct amdgpu_device *adev,
1842				    SISLANDS_SMC_VOLTAGE_VALUE *voltage,
1843				    u16 *std_voltage);
1844static int si_write_smc_soft_register(struct amdgpu_device *adev,
1845				      u16 reg_offset, u32 value);
1846static int si_convert_power_level_to_smc(struct amdgpu_device *adev,
1847					 struct rv7xx_pl *pl,
1848					 SISLANDS_SMC_HW_PERFORMANCE_LEVEL *level);
1849static int si_calculate_sclk_params(struct amdgpu_device *adev,
1850				    u32 engine_clock,
1851				    SISLANDS_SMC_SCLK_VALUE *sclk);
1852
1853static void si_thermal_start_smc_fan_control(struct amdgpu_device *adev);
1854static void si_fan_ctrl_set_default_mode(struct amdgpu_device *adev);
1855static void si_dpm_set_irq_funcs(struct amdgpu_device *adev);
1856
1857static struct si_power_info *si_get_pi(struct amdgpu_device *adev)
1858{
1859	struct si_power_info *pi = adev->pm.dpm.priv;
1860	return pi;
1861}
1862
1863static void si_calculate_leakage_for_v_and_t_formula(const struct ni_leakage_coeffients *coeff,
1864						     u16 v, s32 t, u32 ileakage, u32 *leakage)
1865{
1866	s64 kt, kv, leakage_w, i_leakage, vddc;
1867	s64 temperature, t_slope, t_intercept, av, bv, t_ref;
1868	s64 tmp;
1869
1870	i_leakage = div64_s64(drm_int2fixp(ileakage), 100);
1871	vddc = div64_s64(drm_int2fixp(v), 1000);
1872	temperature = div64_s64(drm_int2fixp(t), 1000);
1873
1874	t_slope = div64_s64(drm_int2fixp(coeff->t_slope), 100000000);
1875	t_intercept = div64_s64(drm_int2fixp(coeff->t_intercept), 100000000);
1876	av = div64_s64(drm_int2fixp(coeff->av), 100000000);
1877	bv = div64_s64(drm_int2fixp(coeff->bv), 100000000);
1878	t_ref = drm_int2fixp(coeff->t_ref);
1879
1880	tmp = drm_fixp_mul(t_slope, vddc) + t_intercept;
1881	kt = drm_fixp_exp(drm_fixp_mul(tmp, temperature));
1882	kt = drm_fixp_div(kt, drm_fixp_exp(drm_fixp_mul(tmp, t_ref)));
1883	kv = drm_fixp_mul(av, drm_fixp_exp(drm_fixp_mul(bv, vddc)));
1884
1885	leakage_w = drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage, kt), kv), vddc);
1886
1887	*leakage = drm_fixp2int(leakage_w * 1000);
1888}
1889
1890static void si_calculate_leakage_for_v_and_t(struct amdgpu_device *adev,
1891					     const struct ni_leakage_coeffients *coeff,
1892					     u16 v,
1893					     s32 t,
1894					     u32 i_leakage,
1895					     u32 *leakage)
1896{
1897	si_calculate_leakage_for_v_and_t_formula(coeff, v, t, i_leakage, leakage);
1898}
1899
1900static void si_calculate_leakage_for_v_formula(const struct ni_leakage_coeffients *coeff,
1901					       const u32 fixed_kt, u16 v,
1902					       u32 ileakage, u32 *leakage)
1903{
1904	s64 kt, kv, leakage_w, i_leakage, vddc;
1905
1906	i_leakage = div64_s64(drm_int2fixp(ileakage), 100);
1907	vddc = div64_s64(drm_int2fixp(v), 1000);
1908
1909	kt = div64_s64(drm_int2fixp(fixed_kt), 100000000);
1910	kv = drm_fixp_mul(div64_s64(drm_int2fixp(coeff->av), 100000000),
1911			  drm_fixp_exp(drm_fixp_mul(div64_s64(drm_int2fixp(coeff->bv), 100000000), vddc)));
1912
1913	leakage_w = drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage, kt), kv), vddc);
1914
1915	*leakage = drm_fixp2int(leakage_w * 1000);
1916}
1917
1918static void si_calculate_leakage_for_v(struct amdgpu_device *adev,
1919				       const struct ni_leakage_coeffients *coeff,
1920				       const u32 fixed_kt,
1921				       u16 v,
1922				       u32 i_leakage,
1923				       u32 *leakage)
1924{
1925	si_calculate_leakage_for_v_formula(coeff, fixed_kt, v, i_leakage, leakage);
1926}
1927
1928
1929static void si_update_dte_from_pl2(struct amdgpu_device *adev,
1930				   struct si_dte_data *dte_data)
1931{
1932	u32 p_limit1 = adev->pm.dpm.tdp_limit;
1933	u32 p_limit2 = adev->pm.dpm.near_tdp_limit;
1934	u32 k = dte_data->k;
1935	u32 t_max = dte_data->max_t;
1936	u32 t_split[5] = { 10, 15, 20, 25, 30 };
1937	u32 t_0 = dte_data->t0;
1938	u32 i;
1939
1940	if (p_limit2 != 0 && p_limit2 <= p_limit1) {
1941		dte_data->tdep_count = 3;
1942
1943		for (i = 0; i < k; i++) {
1944			dte_data->r[i] =
1945				(t_split[i] * (t_max - t_0/(u32)1000) * (1 << 14)) /
1946				(p_limit2  * (u32)100);
1947		}
1948
1949		dte_data->tdep_r[1] = dte_data->r[4] * 2;
1950
1951		for (i = 2; i < SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE; i++) {
1952			dte_data->tdep_r[i] = dte_data->r[4];
1953		}
1954	} else {
1955		DRM_ERROR("Invalid PL2! DTE will not be updated.\n");
1956	}
1957}
1958
1959static struct rv7xx_power_info *rv770_get_pi(struct amdgpu_device *adev)
1960{
1961	struct rv7xx_power_info *pi = adev->pm.dpm.priv;
1962
1963	return pi;
1964}
1965
1966static struct ni_power_info *ni_get_pi(struct amdgpu_device *adev)
1967{
1968	struct ni_power_info *pi = adev->pm.dpm.priv;
1969
1970	return pi;
1971}
1972
1973static struct si_ps *si_get_ps(struct amdgpu_ps *aps)
1974{
1975	struct  si_ps *ps = aps->ps_priv;
1976
1977	return ps;
1978}
1979
1980static void si_initialize_powertune_defaults(struct amdgpu_device *adev)
1981{
1982	struct ni_power_info *ni_pi = ni_get_pi(adev);
1983	struct si_power_info *si_pi = si_get_pi(adev);
1984	bool update_dte_from_pl2 = false;
1985
1986	if (adev->asic_type == CHIP_TAHITI) {
1987		si_pi->cac_weights = cac_weights_tahiti;
1988		si_pi->lcac_config = lcac_tahiti;
1989		si_pi->cac_override = cac_override_tahiti;
1990		si_pi->powertune_data = &powertune_data_tahiti;
1991		si_pi->dte_data = dte_data_tahiti;
1992
1993		switch (adev->pdev->device) {
1994		case 0x6798:
1995			si_pi->dte_data.enable_dte_by_default = true;
1996			break;
1997		case 0x6799:
1998			si_pi->dte_data = dte_data_new_zealand;
1999			break;
2000		case 0x6790:
2001		case 0x6791:
2002		case 0x6792:
2003		case 0x679E:
2004			si_pi->dte_data = dte_data_aruba_pro;
2005			update_dte_from_pl2 = true;
2006			break;
2007		case 0x679B:
2008			si_pi->dte_data = dte_data_malta;
2009			update_dte_from_pl2 = true;
2010			break;
2011		case 0x679A:
2012			si_pi->dte_data = dte_data_tahiti_pro;
2013			update_dte_from_pl2 = true;
2014			break;
2015		default:
2016			if (si_pi->dte_data.enable_dte_by_default == true)
2017				DRM_ERROR("DTE is not enabled!\n");
2018			break;
2019		}
2020	} else if (adev->asic_type == CHIP_PITCAIRN) {
2021		si_pi->cac_weights = cac_weights_pitcairn;
2022		si_pi->lcac_config = lcac_pitcairn;
2023		si_pi->cac_override = cac_override_pitcairn;
2024		si_pi->powertune_data = &powertune_data_pitcairn;
2025
2026		switch (adev->pdev->device) {
2027		case 0x6810:
2028		case 0x6818:
2029			si_pi->dte_data = dte_data_curacao_xt;
2030			update_dte_from_pl2 = true;
2031			break;
2032		case 0x6819:
2033		case 0x6811:
2034			si_pi->dte_data = dte_data_curacao_pro;
2035			update_dte_from_pl2 = true;
2036			break;
2037		case 0x6800:
2038		case 0x6806:
2039			si_pi->dte_data = dte_data_neptune_xt;
2040			update_dte_from_pl2 = true;
2041			break;
2042		default:
2043			si_pi->dte_data = dte_data_pitcairn;
2044			break;
2045		}
2046	} else if (adev->asic_type == CHIP_VERDE) {
2047		si_pi->lcac_config = lcac_cape_verde;
2048		si_pi->cac_override = cac_override_cape_verde;
2049		si_pi->powertune_data = &powertune_data_cape_verde;
2050
2051		switch (adev->pdev->device) {
2052		case 0x683B:
2053		case 0x683F:
2054		case 0x6829:
2055		case 0x6835:
2056			si_pi->cac_weights = cac_weights_cape_verde_pro;
2057			si_pi->dte_data = dte_data_cape_verde;
2058			break;
2059		case 0x682C:
2060			si_pi->cac_weights = cac_weights_cape_verde_pro;
2061			si_pi->dte_data = dte_data_sun_xt;
2062			update_dte_from_pl2 = true;
2063			break;
2064		case 0x6825:
2065		case 0x6827:
2066			si_pi->cac_weights = cac_weights_heathrow;
2067			si_pi->dte_data = dte_data_cape_verde;
2068			break;
2069		case 0x6824:
2070		case 0x682D:
2071			si_pi->cac_weights = cac_weights_chelsea_xt;
2072			si_pi->dte_data = dte_data_cape_verde;
2073			break;
2074		case 0x682F:
2075			si_pi->cac_weights = cac_weights_chelsea_pro;
2076			si_pi->dte_data = dte_data_cape_verde;
2077			break;
2078		case 0x6820:
2079			si_pi->cac_weights = cac_weights_heathrow;
2080			si_pi->dte_data = dte_data_venus_xtx;
2081			break;
2082		case 0x6821:
2083			si_pi->cac_weights = cac_weights_heathrow;
2084			si_pi->dte_data = dte_data_venus_xt;
2085			break;
2086		case 0x6823:
2087		case 0x682B:
2088		case 0x6822:
2089		case 0x682A:
2090			si_pi->cac_weights = cac_weights_chelsea_pro;
2091			si_pi->dte_data = dte_data_venus_pro;
2092			break;
2093		default:
2094			si_pi->cac_weights = cac_weights_cape_verde;
2095			si_pi->dte_data = dte_data_cape_verde;
2096			break;
2097		}
2098	} else if (adev->asic_type == CHIP_OLAND) {
2099		si_pi->lcac_config = lcac_mars_pro;
2100		si_pi->cac_override = cac_override_oland;
2101		si_pi->powertune_data = &powertune_data_mars_pro;
2102		si_pi->dte_data = dte_data_mars_pro;
2103
2104		switch (adev->pdev->device) {
2105		case 0x6601:
2106		case 0x6621:
2107		case 0x6603:
2108		case 0x6605:
2109			si_pi->cac_weights = cac_weights_mars_pro;
2110			update_dte_from_pl2 = true;
2111			break;
2112		case 0x6600:
2113		case 0x6606:
2114		case 0x6620:
2115		case 0x6604:
2116			si_pi->cac_weights = cac_weights_mars_xt;
2117			update_dte_from_pl2 = true;
2118			break;
2119		case 0x6611:
2120		case 0x6613:
2121		case 0x6608:
2122			si_pi->cac_weights = cac_weights_oland_pro;
2123			update_dte_from_pl2 = true;
2124			break;
2125		case 0x6610:
2126			si_pi->cac_weights = cac_weights_oland_xt;
2127			update_dte_from_pl2 = true;
2128			break;
2129		default:
2130			si_pi->cac_weights = cac_weights_oland;
2131			si_pi->lcac_config = lcac_oland;
2132			si_pi->cac_override = cac_override_oland;
2133			si_pi->powertune_data = &powertune_data_oland;
2134			si_pi->dte_data = dte_data_oland;
2135			break;
2136		}
2137	} else if (adev->asic_type == CHIP_HAINAN) {
2138		si_pi->cac_weights = cac_weights_hainan;
2139		si_pi->lcac_config = lcac_oland;
2140		si_pi->cac_override = cac_override_oland;
2141		si_pi->powertune_data = &powertune_data_hainan;
2142		si_pi->dte_data = dte_data_sun_xt;
2143		update_dte_from_pl2 = true;
2144	} else {
2145		DRM_ERROR("Unknown SI asic revision, failed to initialize PowerTune!\n");
2146		return;
2147	}
2148
2149	ni_pi->enable_power_containment = false;
2150	ni_pi->enable_cac = false;
2151	ni_pi->enable_sq_ramping = false;
2152	si_pi->enable_dte = false;
2153
2154	if (si_pi->powertune_data->enable_powertune_by_default) {
2155		ni_pi->enable_power_containment = true;
2156		ni_pi->enable_cac = true;
2157		if (si_pi->dte_data.enable_dte_by_default) {
2158			si_pi->enable_dte = true;
2159			if (update_dte_from_pl2)
2160				si_update_dte_from_pl2(adev, &si_pi->dte_data);
2161
2162		}
2163		ni_pi->enable_sq_ramping = true;
2164	}
2165
2166	ni_pi->driver_calculate_cac_leakage = true;
2167	ni_pi->cac_configuration_required = true;
2168
2169	if (ni_pi->cac_configuration_required) {
2170		ni_pi->support_cac_long_term_average = true;
2171		si_pi->dyn_powertune_data.l2_lta_window_size =
2172			si_pi->powertune_data->l2_lta_window_size_default;
2173		si_pi->dyn_powertune_data.lts_truncate =
2174			si_pi->powertune_data->lts_truncate_default;
2175	} else {
2176		ni_pi->support_cac_long_term_average = false;
2177		si_pi->dyn_powertune_data.l2_lta_window_size = 0;
2178		si_pi->dyn_powertune_data.lts_truncate = 0;
2179	}
2180
2181	si_pi->dyn_powertune_data.disable_uvd_powertune = false;
2182}
2183
2184static u32 si_get_smc_power_scaling_factor(struct amdgpu_device *adev)
2185{
2186	return 1;
2187}
2188
2189static u32 si_calculate_cac_wintime(struct amdgpu_device *adev)
2190{
2191	u32 xclk;
2192	u32 wintime;
2193	u32 cac_window;
2194	u32 cac_window_size;
2195
2196	xclk = amdgpu_asic_get_xclk(adev);
2197
2198	if (xclk == 0)
2199		return 0;
2200
2201	cac_window = RREG32(CG_CAC_CTRL) & CAC_WINDOW_MASK;
2202	cac_window_size = ((cac_window & 0xFFFF0000) >> 16) * (cac_window & 0x0000FFFF);
2203
2204	wintime = (cac_window_size * 100) / xclk;
2205
2206	return wintime;
2207}
2208
2209static u32 si_scale_power_for_smc(u32 power_in_watts, u32 scaling_factor)
2210{
2211	return power_in_watts;
2212}
2213
2214static int si_calculate_adjusted_tdp_limits(struct amdgpu_device *adev,
2215					    bool adjust_polarity,
2216					    u32 tdp_adjustment,
2217					    u32 *tdp_limit,
2218					    u32 *near_tdp_limit)
2219{
2220	u32 adjustment_delta, max_tdp_limit;
2221
2222	if (tdp_adjustment > (u32)adev->pm.dpm.tdp_od_limit)
2223		return -EINVAL;
2224
2225	max_tdp_limit = ((100 + 100) * adev->pm.dpm.tdp_limit) / 100;
2226
2227	if (adjust_polarity) {
2228		*tdp_limit = ((100 + tdp_adjustment) * adev->pm.dpm.tdp_limit) / 100;
2229		*near_tdp_limit = adev->pm.dpm.near_tdp_limit_adjusted + (*tdp_limit - adev->pm.dpm.tdp_limit);
2230	} else {
2231		*tdp_limit = ((100 - tdp_adjustment) * adev->pm.dpm.tdp_limit) / 100;
2232		adjustment_delta  = adev->pm.dpm.tdp_limit - *tdp_limit;
2233		if (adjustment_delta < adev->pm.dpm.near_tdp_limit_adjusted)
2234			*near_tdp_limit = adev->pm.dpm.near_tdp_limit_adjusted - adjustment_delta;
2235		else
2236			*near_tdp_limit = 0;
2237	}
2238
2239	if ((*tdp_limit <= 0) || (*tdp_limit > max_tdp_limit))
2240		return -EINVAL;
2241	if ((*near_tdp_limit <= 0) || (*near_tdp_limit > *tdp_limit))
2242		return -EINVAL;
2243
2244	return 0;
2245}
2246
2247static int si_populate_smc_tdp_limits(struct amdgpu_device *adev,
2248				      struct amdgpu_ps *amdgpu_state)
2249{
2250	struct ni_power_info *ni_pi = ni_get_pi(adev);
2251	struct si_power_info *si_pi = si_get_pi(adev);
2252
2253	if (ni_pi->enable_power_containment) {
2254		SISLANDS_SMC_STATETABLE *smc_table = &si_pi->smc_statetable;
2255		PP_SIslands_PAPMParameters *papm_parm;
2256		struct amdgpu_ppm_table *ppm = adev->pm.dpm.dyn_state.ppm_table;
2257		u32 scaling_factor = si_get_smc_power_scaling_factor(adev);
2258		u32 tdp_limit;
2259		u32 near_tdp_limit;
2260		int ret;
2261
2262		if (scaling_factor == 0)
2263			return -EINVAL;
2264
2265		memset(smc_table, 0, sizeof(SISLANDS_SMC_STATETABLE));
2266
2267		ret = si_calculate_adjusted_tdp_limits(adev,
2268						       false, /* ??? */
2269						       adev->pm.dpm.tdp_adjustment,
2270						       &tdp_limit,
2271						       &near_tdp_limit);
2272		if (ret)
2273			return ret;
2274
2275		smc_table->dpm2Params.TDPLimit =
2276			cpu_to_be32(si_scale_power_for_smc(tdp_limit, scaling_factor) * 1000);
2277		smc_table->dpm2Params.NearTDPLimit =
2278			cpu_to_be32(si_scale_power_for_smc(near_tdp_limit, scaling_factor) * 1000);
2279		smc_table->dpm2Params.SafePowerLimit =
2280			cpu_to_be32(si_scale_power_for_smc((near_tdp_limit * SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT) / 100, scaling_factor) * 1000);
2281
2282		ret = amdgpu_si_copy_bytes_to_smc(adev,
2283						  (si_pi->state_table_start + offsetof(SISLANDS_SMC_STATETABLE, dpm2Params) +
2284						   offsetof(PP_SIslands_DPM2Parameters, TDPLimit)),
2285						  (u8 *)(&(smc_table->dpm2Params.TDPLimit)),
2286						  sizeof(u32) * 3,
2287						  si_pi->sram_end);
2288		if (ret)
2289			return ret;
2290
2291		if (si_pi->enable_ppm) {
2292			papm_parm = &si_pi->papm_parm;
2293			memset(papm_parm, 0, sizeof(PP_SIslands_PAPMParameters));
2294			papm_parm->NearTDPLimitTherm = cpu_to_be32(ppm->dgpu_tdp);
2295			papm_parm->dGPU_T_Limit = cpu_to_be32(ppm->tj_max);
2296			papm_parm->dGPU_T_Warning = cpu_to_be32(95);
2297			papm_parm->dGPU_T_Hysteresis = cpu_to_be32(5);
2298			papm_parm->PlatformPowerLimit = 0xffffffff;
2299			papm_parm->NearTDPLimitPAPM = 0xffffffff;
2300
2301			ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->papm_cfg_table_start,
2302							  (u8 *)papm_parm,
2303							  sizeof(PP_SIslands_PAPMParameters),
2304							  si_pi->sram_end);
2305			if (ret)
2306				return ret;
2307		}
2308	}
2309	return 0;
2310}
2311
2312static int si_populate_smc_tdp_limits_2(struct amdgpu_device *adev,
2313					struct amdgpu_ps *amdgpu_state)
2314{
2315	struct ni_power_info *ni_pi = ni_get_pi(adev);
2316	struct si_power_info *si_pi = si_get_pi(adev);
2317
2318	if (ni_pi->enable_power_containment) {
2319		SISLANDS_SMC_STATETABLE *smc_table = &si_pi->smc_statetable;
2320		u32 scaling_factor = si_get_smc_power_scaling_factor(adev);
2321		int ret;
2322
2323		memset(smc_table, 0, sizeof(SISLANDS_SMC_STATETABLE));
2324
2325		smc_table->dpm2Params.NearTDPLimit =
2326			cpu_to_be32(si_scale_power_for_smc(adev->pm.dpm.near_tdp_limit_adjusted, scaling_factor) * 1000);
2327		smc_table->dpm2Params.SafePowerLimit =
2328			cpu_to_be32(si_scale_power_for_smc((adev->pm.dpm.near_tdp_limit_adjusted * SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT) / 100, scaling_factor) * 1000);
2329
2330		ret = amdgpu_si_copy_bytes_to_smc(adev,
2331						  (si_pi->state_table_start +
2332						   offsetof(SISLANDS_SMC_STATETABLE, dpm2Params) +
2333						   offsetof(PP_SIslands_DPM2Parameters, NearTDPLimit)),
2334						  (u8 *)(&(smc_table->dpm2Params.NearTDPLimit)),
2335						  sizeof(u32) * 2,
2336						  si_pi->sram_end);
2337		if (ret)
2338			return ret;
2339	}
2340
2341	return 0;
2342}
2343
2344static u16 si_calculate_power_efficiency_ratio(struct amdgpu_device *adev,
2345					       const u16 prev_std_vddc,
2346					       const u16 curr_std_vddc)
2347{
2348	u64 margin = (u64)SISLANDS_DPM2_PWREFFICIENCYRATIO_MARGIN;
2349	u64 prev_vddc = (u64)prev_std_vddc;
2350	u64 curr_vddc = (u64)curr_std_vddc;
2351	u64 pwr_efficiency_ratio, n, d;
2352
2353	if ((prev_vddc == 0) || (curr_vddc == 0))
2354		return 0;
2355
2356	n = div64_u64((u64)1024 * curr_vddc * curr_vddc * ((u64)1000 + margin), (u64)1000);
2357	d = prev_vddc * prev_vddc;
2358	pwr_efficiency_ratio = div64_u64(n, d);
2359
2360	if (pwr_efficiency_ratio > (u64)0xFFFF)
2361		return 0;
2362
2363	return (u16)pwr_efficiency_ratio;
2364}
2365
2366static bool si_should_disable_uvd_powertune(struct amdgpu_device *adev,
2367					    struct amdgpu_ps *amdgpu_state)
2368{
2369	struct si_power_info *si_pi = si_get_pi(adev);
2370
2371	if (si_pi->dyn_powertune_data.disable_uvd_powertune &&
2372	    amdgpu_state->vclk && amdgpu_state->dclk)
2373		return true;
2374
2375	return false;
2376}
2377
2378struct evergreen_power_info *evergreen_get_pi(struct amdgpu_device *adev)
2379{
2380	struct evergreen_power_info *pi = adev->pm.dpm.priv;
2381
2382	return pi;
2383}
2384
2385static int si_populate_power_containment_values(struct amdgpu_device *adev,
2386						struct amdgpu_ps *amdgpu_state,
2387						SISLANDS_SMC_SWSTATE *smc_state)
2388{
2389	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
2390	struct ni_power_info *ni_pi = ni_get_pi(adev);
2391	struct  si_ps *state = si_get_ps(amdgpu_state);
2392	SISLANDS_SMC_VOLTAGE_VALUE vddc;
2393	u32 prev_sclk;
2394	u32 max_sclk;
2395	u32 min_sclk;
2396	u16 prev_std_vddc;
2397	u16 curr_std_vddc;
2398	int i;
2399	u16 pwr_efficiency_ratio;
2400	u8 max_ps_percent;
2401	bool disable_uvd_power_tune;
2402	int ret;
2403
2404	if (ni_pi->enable_power_containment == false)
2405		return 0;
2406
2407	if (state->performance_level_count == 0)
2408		return -EINVAL;
2409
2410	if (smc_state->levelCount != state->performance_level_count)
2411		return -EINVAL;
2412
2413	disable_uvd_power_tune = si_should_disable_uvd_powertune(adev, amdgpu_state);
2414
2415	smc_state->levels[0].dpm2.MaxPS = 0;
2416	smc_state->levels[0].dpm2.NearTDPDec = 0;
2417	smc_state->levels[0].dpm2.AboveSafeInc = 0;
2418	smc_state->levels[0].dpm2.BelowSafeInc = 0;
2419	smc_state->levels[0].dpm2.PwrEfficiencyRatio = 0;
2420
2421	for (i = 1; i < state->performance_level_count; i++) {
2422		prev_sclk = state->performance_levels[i-1].sclk;
2423		max_sclk  = state->performance_levels[i].sclk;
2424		if (i == 1)
2425			max_ps_percent = SISLANDS_DPM2_MAXPS_PERCENT_M;
2426		else
2427			max_ps_percent = SISLANDS_DPM2_MAXPS_PERCENT_H;
2428
2429		if (prev_sclk > max_sclk)
2430			return -EINVAL;
2431
2432		if ((max_ps_percent == 0) ||
2433		    (prev_sclk == max_sclk) ||
2434		    disable_uvd_power_tune)
2435			min_sclk = max_sclk;
2436		else if (i == 1)
2437			min_sclk = prev_sclk;
2438		else
2439			min_sclk = (prev_sclk * (u32)max_ps_percent) / 100;
2440
2441		if (min_sclk < state->performance_levels[0].sclk)
2442			min_sclk = state->performance_levels[0].sclk;
2443
2444		if (min_sclk == 0)
2445			return -EINVAL;
2446
2447		ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
2448						state->performance_levels[i-1].vddc, &vddc);
2449		if (ret)
2450			return ret;
2451
2452		ret = si_get_std_voltage_value(adev, &vddc, &prev_std_vddc);
2453		if (ret)
2454			return ret;
2455
2456		ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
2457						state->performance_levels[i].vddc, &vddc);
2458		if (ret)
2459			return ret;
2460
2461		ret = si_get_std_voltage_value(adev, &vddc, &curr_std_vddc);
2462		if (ret)
2463			return ret;
2464
2465		pwr_efficiency_ratio = si_calculate_power_efficiency_ratio(adev,
2466									   prev_std_vddc, curr_std_vddc);
2467
2468		smc_state->levels[i].dpm2.MaxPS = (u8)((SISLANDS_DPM2_MAX_PULSE_SKIP * (max_sclk - min_sclk)) / max_sclk);
2469		smc_state->levels[i].dpm2.NearTDPDec = SISLANDS_DPM2_NEAR_TDP_DEC;
2470		smc_state->levels[i].dpm2.AboveSafeInc = SISLANDS_DPM2_ABOVE_SAFE_INC;
2471		smc_state->levels[i].dpm2.BelowSafeInc = SISLANDS_DPM2_BELOW_SAFE_INC;
2472		smc_state->levels[i].dpm2.PwrEfficiencyRatio = cpu_to_be16(pwr_efficiency_ratio);
2473	}
2474
2475	return 0;
2476}
2477
2478static int si_populate_sq_ramping_values(struct amdgpu_device *adev,
2479					 struct amdgpu_ps *amdgpu_state,
2480					 SISLANDS_SMC_SWSTATE *smc_state)
2481{
2482	struct ni_power_info *ni_pi = ni_get_pi(adev);
2483	struct  si_ps *state = si_get_ps(amdgpu_state);
2484	u32 sq_power_throttle, sq_power_throttle2;
2485	bool enable_sq_ramping = ni_pi->enable_sq_ramping;
2486	int i;
2487
2488	if (state->performance_level_count == 0)
2489		return -EINVAL;
2490
2491	if (smc_state->levelCount != state->performance_level_count)
2492		return -EINVAL;
2493
2494	if (adev->pm.dpm.sq_ramping_threshold == 0)
2495		return -EINVAL;
2496
2497	if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER > (MAX_POWER_MASK >> MAX_POWER_SHIFT))
2498		enable_sq_ramping = false;
2499
2500	if (SISLANDS_DPM2_SQ_RAMP_MIN_POWER > (MIN_POWER_MASK >> MIN_POWER_SHIFT))
2501		enable_sq_ramping = false;
2502
2503	if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA > (MAX_POWER_DELTA_MASK >> MAX_POWER_DELTA_SHIFT))
2504		enable_sq_ramping = false;
2505
2506	if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT))
2507		enable_sq_ramping = false;
2508
2509	if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (LTI_RATIO_MASK >> LTI_RATIO_SHIFT))
2510		enable_sq_ramping = false;
2511
2512	for (i = 0; i < state->performance_level_count; i++) {
2513		sq_power_throttle = 0;
2514		sq_power_throttle2 = 0;
2515
2516		if ((state->performance_levels[i].sclk >= adev->pm.dpm.sq_ramping_threshold) &&
2517		    enable_sq_ramping) {
2518			sq_power_throttle |= MAX_POWER(SISLANDS_DPM2_SQ_RAMP_MAX_POWER);
2519			sq_power_throttle |= MIN_POWER(SISLANDS_DPM2_SQ_RAMP_MIN_POWER);
2520			sq_power_throttle2 |= MAX_POWER_DELTA(SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA);
2521			sq_power_throttle2 |= STI_SIZE(SISLANDS_DPM2_SQ_RAMP_STI_SIZE);
2522			sq_power_throttle2 |= LTI_RATIO(SISLANDS_DPM2_SQ_RAMP_LTI_RATIO);
2523		} else {
2524			sq_power_throttle |= MAX_POWER_MASK | MIN_POWER_MASK;
2525			sq_power_throttle2 |= MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
2526		}
2527
2528		smc_state->levels[i].SQPowerThrottle = cpu_to_be32(sq_power_throttle);
2529		smc_state->levels[i].SQPowerThrottle_2 = cpu_to_be32(sq_power_throttle2);
2530	}
2531
2532	return 0;
2533}
2534
2535static int si_enable_power_containment(struct amdgpu_device *adev,
2536				       struct amdgpu_ps *amdgpu_new_state,
2537				       bool enable)
2538{
2539	struct ni_power_info *ni_pi = ni_get_pi(adev);
2540	PPSMC_Result smc_result;
2541	int ret = 0;
2542
2543	if (ni_pi->enable_power_containment) {
2544		if (enable) {
2545			if (!si_should_disable_uvd_powertune(adev, amdgpu_new_state)) {
2546				smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_TDPClampingActive);
2547				if (smc_result != PPSMC_Result_OK) {
2548					ret = -EINVAL;
2549					ni_pi->pc_enabled = false;
2550				} else {
2551					ni_pi->pc_enabled = true;
2552				}
2553			}
2554		} else {
2555			smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_TDPClampingInactive);
2556			if (smc_result != PPSMC_Result_OK)
2557				ret = -EINVAL;
2558			ni_pi->pc_enabled = false;
2559		}
2560	}
2561
2562	return ret;
2563}
2564
2565static int si_initialize_smc_dte_tables(struct amdgpu_device *adev)
2566{
2567	struct si_power_info *si_pi = si_get_pi(adev);
2568	int ret = 0;
2569	struct si_dte_data *dte_data = &si_pi->dte_data;
2570	Smc_SIslands_DTE_Configuration *dte_tables = NULL;
2571	u32 table_size;
2572	u8 tdep_count;
2573	u32 i;
2574
2575	if (dte_data == NULL)
2576		si_pi->enable_dte = false;
2577
2578	if (si_pi->enable_dte == false)
2579		return 0;
2580
2581	if (dte_data->k <= 0)
2582		return -EINVAL;
2583
2584	dte_tables = kzalloc(sizeof(Smc_SIslands_DTE_Configuration), GFP_KERNEL);
2585	if (dte_tables == NULL) {
2586		si_pi->enable_dte = false;
2587		return -ENOMEM;
2588	}
2589
2590	table_size = dte_data->k;
2591
2592	if (table_size > SMC_SISLANDS_DTE_MAX_FILTER_STAGES)
2593		table_size = SMC_SISLANDS_DTE_MAX_FILTER_STAGES;
2594
2595	tdep_count = dte_data->tdep_count;
2596	if (tdep_count > SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE)
2597		tdep_count = SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE;
2598
2599	dte_tables->K = cpu_to_be32(table_size);
2600	dte_tables->T0 = cpu_to_be32(dte_data->t0);
2601	dte_tables->MaxT = cpu_to_be32(dte_data->max_t);
2602	dte_tables->WindowSize = dte_data->window_size;
2603	dte_tables->temp_select = dte_data->temp_select;
2604	dte_tables->DTE_mode = dte_data->dte_mode;
2605	dte_tables->Tthreshold = cpu_to_be32(dte_data->t_threshold);
2606
2607	if (tdep_count > 0)
2608		table_size--;
2609
2610	for (i = 0; i < table_size; i++) {
2611		dte_tables->tau[i] = cpu_to_be32(dte_data->tau[i]);
2612		dte_tables->R[i]   = cpu_to_be32(dte_data->r[i]);
2613	}
2614
2615	dte_tables->Tdep_count = tdep_count;
2616
2617	for (i = 0; i < (u32)tdep_count; i++) {
2618		dte_tables->T_limits[i] = dte_data->t_limits[i];
2619		dte_tables->Tdep_tau[i] = cpu_to_be32(dte_data->tdep_tau[i]);
2620		dte_tables->Tdep_R[i] = cpu_to_be32(dte_data->tdep_r[i]);
2621	}
2622
2623	ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->dte_table_start,
2624					  (u8 *)dte_tables,
2625					  sizeof(Smc_SIslands_DTE_Configuration),
2626					  si_pi->sram_end);
2627	kfree(dte_tables);
2628
2629	return ret;
2630}
2631
2632static int si_get_cac_std_voltage_max_min(struct amdgpu_device *adev,
2633					  u16 *max, u16 *min)
2634{
2635	struct si_power_info *si_pi = si_get_pi(adev);
2636	struct amdgpu_cac_leakage_table *table =
2637		&adev->pm.dpm.dyn_state.cac_leakage_table;
2638	u32 i;
2639	u32 v0_loadline;
2640
2641	if (table == NULL)
2642		return -EINVAL;
2643
2644	*max = 0;
2645	*min = 0xFFFF;
2646
2647	for (i = 0; i < table->count; i++) {
2648		if (table->entries[i].vddc > *max)
2649			*max = table->entries[i].vddc;
2650		if (table->entries[i].vddc < *min)
2651			*min = table->entries[i].vddc;
2652	}
2653
2654	if (si_pi->powertune_data->lkge_lut_v0_percent > 100)
2655		return -EINVAL;
2656
2657	v0_loadline = (*min) * (100 - si_pi->powertune_data->lkge_lut_v0_percent) / 100;
2658
2659	if (v0_loadline > 0xFFFFUL)
2660		return -EINVAL;
2661
2662	*min = (u16)v0_loadline;
2663
2664	if ((*min > *max) || (*max == 0) || (*min == 0))
2665		return -EINVAL;
2666
2667	return 0;
2668}
2669
2670static u16 si_get_cac_std_voltage_step(u16 max, u16 min)
2671{
2672	return ((max - min) + (SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES - 1)) /
2673		SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES;
2674}
2675
2676static int si_init_dte_leakage_table(struct amdgpu_device *adev,
2677				     PP_SIslands_CacConfig *cac_tables,
2678				     u16 vddc_max, u16 vddc_min, u16 vddc_step,
2679				     u16 t0, u16 t_step)
2680{
2681	struct si_power_info *si_pi = si_get_pi(adev);
2682	u32 leakage;
2683	unsigned int i, j;
2684	s32 t;
2685	u32 smc_leakage;
2686	u32 scaling_factor;
2687	u16 voltage;
2688
2689	scaling_factor = si_get_smc_power_scaling_factor(adev);
2690
2691	for (i = 0; i < SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES ; i++) {
2692		t = (1000 * (i * t_step + t0));
2693
2694		for (j = 0; j < SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES; j++) {
2695			voltage = vddc_max - (vddc_step * j);
2696
2697			si_calculate_leakage_for_v_and_t(adev,
2698							 &si_pi->powertune_data->leakage_coefficients,
2699							 voltage,
2700							 t,
2701							 si_pi->dyn_powertune_data.cac_leakage,
2702							 &leakage);
2703
2704			smc_leakage = si_scale_power_for_smc(leakage, scaling_factor) / 4;
2705
2706			if (smc_leakage > 0xFFFF)
2707				smc_leakage = 0xFFFF;
2708
2709			cac_tables->cac_lkge_lut[i][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES-1-j] =
2710				cpu_to_be16((u16)smc_leakage);
2711		}
2712	}
2713	return 0;
2714}
2715
2716static int si_init_simplified_leakage_table(struct amdgpu_device *adev,
2717					    PP_SIslands_CacConfig *cac_tables,
2718					    u16 vddc_max, u16 vddc_min, u16 vddc_step)
2719{
2720	struct si_power_info *si_pi = si_get_pi(adev);
2721	u32 leakage;
2722	unsigned int i, j;
2723	u32 smc_leakage;
2724	u32 scaling_factor;
2725	u16 voltage;
2726
2727	scaling_factor = si_get_smc_power_scaling_factor(adev);
2728
2729	for (j = 0; j < SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES; j++) {
2730		voltage = vddc_max - (vddc_step * j);
2731
2732		si_calculate_leakage_for_v(adev,
2733					   &si_pi->powertune_data->leakage_coefficients,
2734					   si_pi->powertune_data->fixed_kt,
2735					   voltage,
2736					   si_pi->dyn_powertune_data.cac_leakage,
2737					   &leakage);
2738
2739		smc_leakage = si_scale_power_for_smc(leakage, scaling_factor) / 4;
2740
2741		if (smc_leakage > 0xFFFF)
2742			smc_leakage = 0xFFFF;
2743
2744		for (i = 0; i < SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES ; i++)
2745			cac_tables->cac_lkge_lut[i][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES-1-j] =
2746				cpu_to_be16((u16)smc_leakage);
2747	}
2748	return 0;
2749}
2750
2751static int si_initialize_smc_cac_tables(struct amdgpu_device *adev)
2752{
2753	struct ni_power_info *ni_pi = ni_get_pi(adev);
2754	struct si_power_info *si_pi = si_get_pi(adev);
2755	PP_SIslands_CacConfig *cac_tables = NULL;
2756	u16 vddc_max, vddc_min, vddc_step;
2757	u16 t0, t_step;
2758	u32 load_line_slope, reg;
2759	int ret = 0;
2760	u32 ticks_per_us = amdgpu_asic_get_xclk(adev) / 100;
2761
2762	if (ni_pi->enable_cac == false)
2763		return 0;
2764
2765	cac_tables = kzalloc(sizeof(PP_SIslands_CacConfig), GFP_KERNEL);
2766	if (!cac_tables)
2767		return -ENOMEM;
2768
2769	reg = RREG32(CG_CAC_CTRL) & ~CAC_WINDOW_MASK;
2770	reg |= CAC_WINDOW(si_pi->powertune_data->cac_window);
2771	WREG32(CG_CAC_CTRL, reg);
2772
2773	si_pi->dyn_powertune_data.cac_leakage = adev->pm.dpm.cac_leakage;
2774	si_pi->dyn_powertune_data.dc_pwr_value =
2775		si_pi->powertune_data->dc_cac[NISLANDS_DCCAC_LEVEL_0];
2776	si_pi->dyn_powertune_data.wintime = si_calculate_cac_wintime(adev);
2777	si_pi->dyn_powertune_data.shift_n = si_pi->powertune_data->shift_n_default;
2778
2779	si_pi->dyn_powertune_data.leakage_minimum_temperature = 80 * 1000;
2780
2781	ret = si_get_cac_std_voltage_max_min(adev, &vddc_max, &vddc_min);
2782	if (ret)
2783		goto done_free;
2784
2785	vddc_step = si_get_cac_std_voltage_step(vddc_max, vddc_min);
2786	vddc_min = vddc_max - (vddc_step * (SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES - 1));
2787	t_step = 4;
2788	t0 = 60;
2789
2790	if (si_pi->enable_dte || ni_pi->driver_calculate_cac_leakage)
2791		ret = si_init_dte_leakage_table(adev, cac_tables,
2792						vddc_max, vddc_min, vddc_step,
2793						t0, t_step);
2794	else
2795		ret = si_init_simplified_leakage_table(adev, cac_tables,
2796						       vddc_max, vddc_min, vddc_step);
2797	if (ret)
2798		goto done_free;
2799
2800	load_line_slope = ((u32)adev->pm.dpm.load_line_slope << SMC_SISLANDS_SCALE_R) / 100;
2801
2802	cac_tables->l2numWin_TDP = cpu_to_be32(si_pi->dyn_powertune_data.l2_lta_window_size);
2803	cac_tables->lts_truncate_n = si_pi->dyn_powertune_data.lts_truncate;
2804	cac_tables->SHIFT_N = si_pi->dyn_powertune_data.shift_n;
2805	cac_tables->lkge_lut_V0 = cpu_to_be32((u32)vddc_min);
2806	cac_tables->lkge_lut_Vstep = cpu_to_be32((u32)vddc_step);
2807	cac_tables->R_LL = cpu_to_be32(load_line_slope);
2808	cac_tables->WinTime = cpu_to_be32(si_pi->dyn_powertune_data.wintime);
2809	cac_tables->calculation_repeats = cpu_to_be32(2);
2810	cac_tables->dc_cac = cpu_to_be32(0);
2811	cac_tables->log2_PG_LKG_SCALE = 12;
2812	cac_tables->cac_temp = si_pi->powertune_data->operating_temp;
2813	cac_tables->lkge_lut_T0 = cpu_to_be32((u32)t0);
2814	cac_tables->lkge_lut_Tstep = cpu_to_be32((u32)t_step);
2815
2816	ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->cac_table_start,
2817					  (u8 *)cac_tables,
2818					  sizeof(PP_SIslands_CacConfig),
2819					  si_pi->sram_end);
2820
2821	if (ret)
2822		goto done_free;
2823
2824	ret = si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_ticks_per_us, ticks_per_us);
2825
2826done_free:
2827	if (ret) {
2828		ni_pi->enable_cac = false;
2829		ni_pi->enable_power_containment = false;
2830	}
2831
2832	kfree(cac_tables);
2833
2834	return ret;
2835}
2836
2837static int si_program_cac_config_registers(struct amdgpu_device *adev,
2838					   const struct si_cac_config_reg *cac_config_regs)
2839{
2840	const struct si_cac_config_reg *config_regs = cac_config_regs;
2841	u32 data = 0, offset;
2842
2843	if (!config_regs)
2844		return -EINVAL;
2845
2846	while (config_regs->offset != 0xFFFFFFFF) {
2847		switch (config_regs->type) {
2848		case SISLANDS_CACCONFIG_CGIND:
2849			offset = SMC_CG_IND_START + config_regs->offset;
2850			if (offset < SMC_CG_IND_END)
2851				data = RREG32_SMC(offset);
2852			break;
2853		default:
2854			data = RREG32(config_regs->offset);
2855			break;
2856		}
2857
2858		data &= ~config_regs->mask;
2859		data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
2860
2861		switch (config_regs->type) {
2862		case SISLANDS_CACCONFIG_CGIND:
2863			offset = SMC_CG_IND_START + config_regs->offset;
2864			if (offset < SMC_CG_IND_END)
2865				WREG32_SMC(offset, data);
2866			break;
2867		default:
2868			WREG32(config_regs->offset, data);
2869			break;
2870		}
2871		config_regs++;
2872	}
2873	return 0;
2874}
2875
2876static int si_initialize_hardware_cac_manager(struct amdgpu_device *adev)
2877{
2878	struct ni_power_info *ni_pi = ni_get_pi(adev);
2879	struct si_power_info *si_pi = si_get_pi(adev);
2880	int ret;
2881
2882	if ((ni_pi->enable_cac == false) ||
2883	    (ni_pi->cac_configuration_required == false))
2884		return 0;
2885
2886	ret = si_program_cac_config_registers(adev, si_pi->lcac_config);
2887	if (ret)
2888		return ret;
2889	ret = si_program_cac_config_registers(adev, si_pi->cac_override);
2890	if (ret)
2891		return ret;
2892	ret = si_program_cac_config_registers(adev, si_pi->cac_weights);
2893	if (ret)
2894		return ret;
2895
2896	return 0;
2897}
2898
2899static int si_enable_smc_cac(struct amdgpu_device *adev,
2900			     struct amdgpu_ps *amdgpu_new_state,
2901			     bool enable)
2902{
2903	struct ni_power_info *ni_pi = ni_get_pi(adev);
2904	struct si_power_info *si_pi = si_get_pi(adev);
2905	PPSMC_Result smc_result;
2906	int ret = 0;
2907
2908	if (ni_pi->enable_cac) {
2909		if (enable) {
2910			if (!si_should_disable_uvd_powertune(adev, amdgpu_new_state)) {
2911				if (ni_pi->support_cac_long_term_average) {
2912					smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_CACLongTermAvgEnable);
2913					if (smc_result != PPSMC_Result_OK)
2914						ni_pi->support_cac_long_term_average = false;
2915				}
2916
2917				smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableCac);
2918				if (smc_result != PPSMC_Result_OK) {
2919					ret = -EINVAL;
2920					ni_pi->cac_enabled = false;
2921				} else {
2922					ni_pi->cac_enabled = true;
2923				}
2924
2925				if (si_pi->enable_dte) {
2926					smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableDTE);
2927					if (smc_result != PPSMC_Result_OK)
2928						ret = -EINVAL;
2929				}
2930			}
2931		} else if (ni_pi->cac_enabled) {
2932			if (si_pi->enable_dte)
2933				smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableDTE);
2934
2935			smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableCac);
2936
2937			ni_pi->cac_enabled = false;
2938
2939			if (ni_pi->support_cac_long_term_average)
2940				smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_CACLongTermAvgDisable);
2941		}
2942	}
2943	return ret;
2944}
2945
2946static int si_init_smc_spll_table(struct amdgpu_device *adev)
2947{
2948	struct ni_power_info *ni_pi = ni_get_pi(adev);
2949	struct si_power_info *si_pi = si_get_pi(adev);
2950	SMC_SISLANDS_SPLL_DIV_TABLE *spll_table;
2951	SISLANDS_SMC_SCLK_VALUE sclk_params;
2952	u32 fb_div, p_div;
2953	u32 clk_s, clk_v;
2954	u32 sclk = 0;
2955	int ret = 0;
2956	u32 tmp;
2957	int i;
2958
2959	if (si_pi->spll_table_start == 0)
2960		return -EINVAL;
2961
2962	spll_table = kzalloc(sizeof(SMC_SISLANDS_SPLL_DIV_TABLE), GFP_KERNEL);
2963	if (spll_table == NULL)
2964		return -ENOMEM;
2965
2966	for (i = 0; i < 256; i++) {
2967		ret = si_calculate_sclk_params(adev, sclk, &sclk_params);
2968		if (ret)
2969			break;
2970		p_div = (sclk_params.vCG_SPLL_FUNC_CNTL & SPLL_PDIV_A_MASK) >> SPLL_PDIV_A_SHIFT;
2971		fb_div = (sclk_params.vCG_SPLL_FUNC_CNTL_3 & SPLL_FB_DIV_MASK) >> SPLL_FB_DIV_SHIFT;
2972		clk_s = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM & CLK_S_MASK) >> CLK_S_SHIFT;
2973		clk_v = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM_2 & CLK_V_MASK) >> CLK_V_SHIFT;
2974
2975		fb_div &= ~0x00001FFF;
2976		fb_div >>= 1;
2977		clk_v >>= 6;
2978
2979		if (p_div & ~(SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT))
2980			ret = -EINVAL;
2981		if (fb_div & ~(SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT))
2982			ret = -EINVAL;
2983		if (clk_s & ~(SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT))
2984			ret = -EINVAL;
2985		if (clk_v & ~(SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT))
2986			ret = -EINVAL;
2987
2988		if (ret)
2989			break;
2990
2991		tmp = ((fb_div << SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_MASK) |
2992			((p_div << SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_MASK);
2993		spll_table->freq[i] = cpu_to_be32(tmp);
2994
2995		tmp = ((clk_v << SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_MASK) |
2996			((clk_s << SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_MASK);
2997		spll_table->ss[i] = cpu_to_be32(tmp);
2998
2999		sclk += 512;
3000	}
3001
3002
3003	if (!ret)
3004		ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->spll_table_start,
3005						  (u8 *)spll_table,
3006						  sizeof(SMC_SISLANDS_SPLL_DIV_TABLE),
3007						  si_pi->sram_end);
3008
3009	if (ret)
3010		ni_pi->enable_power_containment = false;
3011
3012	kfree(spll_table);
3013
3014	return ret;
3015}
3016
3017static u16 si_get_lower_of_leakage_and_vce_voltage(struct amdgpu_device *adev,
3018						   u16 vce_voltage)
3019{
3020	u16 highest_leakage = 0;
3021	struct si_power_info *si_pi = si_get_pi(adev);
3022	int i;
3023
3024	for (i = 0; i < si_pi->leakage_voltage.count; i++){
3025		if (highest_leakage < si_pi->leakage_voltage.entries[i].voltage)
3026			highest_leakage = si_pi->leakage_voltage.entries[i].voltage;
3027	}
3028
3029	if (si_pi->leakage_voltage.count && (highest_leakage < vce_voltage))
3030		return highest_leakage;
3031
3032	return vce_voltage;
3033}
3034
3035static int si_get_vce_clock_voltage(struct amdgpu_device *adev,
3036				    u32 evclk, u32 ecclk, u16 *voltage)
3037{
3038	u32 i;
3039	int ret = -EINVAL;
3040	struct amdgpu_vce_clock_voltage_dependency_table *table =
3041		&adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
3042
3043	if (((evclk == 0) && (ecclk == 0)) ||
3044	    (table && (table->count == 0))) {
3045		*voltage = 0;
3046		return 0;
3047	}
3048
3049	for (i = 0; i < table->count; i++) {
3050		if ((evclk <= table->entries[i].evclk) &&
3051		    (ecclk <= table->entries[i].ecclk)) {
3052			*voltage = table->entries[i].v;
3053			ret = 0;
3054			break;
3055		}
3056	}
3057
3058	/* if no match return the highest voltage */
3059	if (ret)
3060		*voltage = table->entries[table->count - 1].v;
3061
3062	*voltage = si_get_lower_of_leakage_and_vce_voltage(adev, *voltage);
3063
3064	return ret;
3065}
3066
3067static bool si_dpm_vblank_too_short(void *handle)
3068{
3069	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3070	u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
3071	/* we never hit the non-gddr5 limit so disable it */
3072	u32 switch_limit = adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 0;
3073
3074	if (vblank_time < switch_limit)
3075		return true;
3076	else
3077		return false;
3078
3079}
3080
3081static int ni_copy_and_switch_arb_sets(struct amdgpu_device *adev,
3082				u32 arb_freq_src, u32 arb_freq_dest)
3083{
3084	u32 mc_arb_dram_timing;
3085	u32 mc_arb_dram_timing2;
3086	u32 burst_time;
3087	u32 mc_cg_config;
3088
3089	switch (arb_freq_src) {
3090	case MC_CG_ARB_FREQ_F0:
3091		mc_arb_dram_timing  = RREG32(MC_ARB_DRAM_TIMING);
3092		mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
3093		burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE0_MASK) >> STATE0_SHIFT;
3094		break;
3095	case MC_CG_ARB_FREQ_F1:
3096		mc_arb_dram_timing  = RREG32(MC_ARB_DRAM_TIMING_1);
3097		mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_1);
3098		burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE1_MASK) >> STATE1_SHIFT;
3099		break;
3100	case MC_CG_ARB_FREQ_F2:
3101		mc_arb_dram_timing  = RREG32(MC_ARB_DRAM_TIMING_2);
3102		mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_2);
3103		burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE2_MASK) >> STATE2_SHIFT;
3104		break;
3105	case MC_CG_ARB_FREQ_F3:
3106		mc_arb_dram_timing  = RREG32(MC_ARB_DRAM_TIMING_3);
3107		mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_3);
3108		burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE3_MASK) >> STATE3_SHIFT;
3109		break;
3110	default:
3111		return -EINVAL;
3112	}
3113
3114	switch (arb_freq_dest) {
3115	case MC_CG_ARB_FREQ_F0:
3116		WREG32(MC_ARB_DRAM_TIMING, mc_arb_dram_timing);
3117		WREG32(MC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
3118		WREG32_P(MC_ARB_BURST_TIME, STATE0(burst_time), ~STATE0_MASK);
3119		break;
3120	case MC_CG_ARB_FREQ_F1:
3121		WREG32(MC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
3122		WREG32(MC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
3123		WREG32_P(MC_ARB_BURST_TIME, STATE1(burst_time), ~STATE1_MASK);
3124		break;
3125	case MC_CG_ARB_FREQ_F2:
3126		WREG32(MC_ARB_DRAM_TIMING_2, mc_arb_dram_timing);
3127		WREG32(MC_ARB_DRAM_TIMING2_2, mc_arb_dram_timing2);
3128		WREG32_P(MC_ARB_BURST_TIME, STATE2(burst_time), ~STATE2_MASK);
3129		break;
3130	case MC_CG_ARB_FREQ_F3:
3131		WREG32(MC_ARB_DRAM_TIMING_3, mc_arb_dram_timing);
3132		WREG32(MC_ARB_DRAM_TIMING2_3, mc_arb_dram_timing2);
3133		WREG32_P(MC_ARB_BURST_TIME, STATE3(burst_time), ~STATE3_MASK);
3134		break;
3135	default:
3136		return -EINVAL;
3137	}
3138
3139	mc_cg_config = RREG32(MC_CG_CONFIG) | 0x0000000F;
3140	WREG32(MC_CG_CONFIG, mc_cg_config);
3141	WREG32_P(MC_ARB_CG, CG_ARB_REQ(arb_freq_dest), ~CG_ARB_REQ_MASK);
3142
3143	return 0;
3144}
3145
3146static void ni_update_current_ps(struct amdgpu_device *adev,
3147			  struct amdgpu_ps *rps)
3148{
3149	struct si_ps *new_ps = si_get_ps(rps);
3150	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
3151	struct ni_power_info *ni_pi = ni_get_pi(adev);
3152
3153	eg_pi->current_rps = *rps;
3154	ni_pi->current_ps = *new_ps;
3155	eg_pi->current_rps.ps_priv = &ni_pi->current_ps;
3156	adev->pm.dpm.current_ps = &eg_pi->current_rps;
3157}
3158
3159static void ni_update_requested_ps(struct amdgpu_device *adev,
3160			    struct amdgpu_ps *rps)
3161{
3162	struct si_ps *new_ps = si_get_ps(rps);
3163	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
3164	struct ni_power_info *ni_pi = ni_get_pi(adev);
3165
3166	eg_pi->requested_rps = *rps;
3167	ni_pi->requested_ps = *new_ps;
3168	eg_pi->requested_rps.ps_priv = &ni_pi->requested_ps;
3169	adev->pm.dpm.requested_ps = &eg_pi->requested_rps;
3170}
3171
3172static void ni_set_uvd_clock_before_set_eng_clock(struct amdgpu_device *adev,
3173					   struct amdgpu_ps *new_ps,
3174					   struct amdgpu_ps *old_ps)
3175{
3176	struct si_ps *new_state = si_get_ps(new_ps);
3177	struct si_ps *current_state = si_get_ps(old_ps);
3178
3179	if ((new_ps->vclk == old_ps->vclk) &&
3180	    (new_ps->dclk == old_ps->dclk))
3181		return;
3182
3183	if (new_state->performance_levels[new_state->performance_level_count - 1].sclk >=
3184	    current_state->performance_levels[current_state->performance_level_count - 1].sclk)
3185		return;
3186
3187	amdgpu_asic_set_uvd_clocks(adev, new_ps->vclk, new_ps->dclk);
3188}
3189
3190static void ni_set_uvd_clock_after_set_eng_clock(struct amdgpu_device *adev,
3191					  struct amdgpu_ps *new_ps,
3192					  struct amdgpu_ps *old_ps)
3193{
3194	struct si_ps *new_state = si_get_ps(new_ps);
3195	struct si_ps *current_state = si_get_ps(old_ps);
3196
3197	if ((new_ps->vclk == old_ps->vclk) &&
3198	    (new_ps->dclk == old_ps->dclk))
3199		return;
3200
3201	if (new_state->performance_levels[new_state->performance_level_count - 1].sclk <
3202	    current_state->performance_levels[current_state->performance_level_count - 1].sclk)
3203		return;
3204
3205	amdgpu_asic_set_uvd_clocks(adev, new_ps->vclk, new_ps->dclk);
3206}
3207
3208static u16 btc_find_voltage(struct atom_voltage_table *table, u16 voltage)
3209{
3210	unsigned int i;
3211
3212	for (i = 0; i < table->count; i++)
3213		if (voltage <= table->entries[i].value)
3214			return table->entries[i].value;
3215
3216	return table->entries[table->count - 1].value;
3217}
3218
3219static u32 btc_find_valid_clock(struct amdgpu_clock_array *clocks,
3220		                u32 max_clock, u32 requested_clock)
3221{
3222	unsigned int i;
3223
3224	if ((clocks == NULL) || (clocks->count == 0))
3225		return (requested_clock < max_clock) ? requested_clock : max_clock;
3226
3227	for (i = 0; i < clocks->count; i++) {
3228		if (clocks->values[i] >= requested_clock)
3229			return (clocks->values[i] < max_clock) ? clocks->values[i] : max_clock;
3230	}
3231
3232	return (clocks->values[clocks->count - 1] < max_clock) ?
3233		clocks->values[clocks->count - 1] : max_clock;
3234}
3235
3236static u32 btc_get_valid_mclk(struct amdgpu_device *adev,
3237			      u32 max_mclk, u32 requested_mclk)
3238{
3239	return btc_find_valid_clock(&adev->pm.dpm.dyn_state.valid_mclk_values,
3240				    max_mclk, requested_mclk);
3241}
3242
3243static u32 btc_get_valid_sclk(struct amdgpu_device *adev,
3244		              u32 max_sclk, u32 requested_sclk)
3245{
3246	return btc_find_valid_clock(&adev->pm.dpm.dyn_state.valid_sclk_values,
3247				    max_sclk, requested_sclk);
3248}
3249
3250static void btc_get_max_clock_from_voltage_dependency_table(struct amdgpu_clock_voltage_dependency_table *table,
3251							    u32 *max_clock)
3252{
3253	u32 i, clock = 0;
3254
3255	if ((table == NULL) || (table->count == 0)) {
3256		*max_clock = clock;
3257		return;
3258	}
3259
3260	for (i = 0; i < table->count; i++) {
3261		if (clock < table->entries[i].clk)
3262			clock = table->entries[i].clk;
3263	}
3264	*max_clock = clock;
3265}
3266
3267static void btc_apply_voltage_dependency_rules(struct amdgpu_clock_voltage_dependency_table *table,
3268					       u32 clock, u16 max_voltage, u16 *voltage)
3269{
3270	u32 i;
3271
3272	if ((table == NULL) || (table->count == 0))
3273		return;
3274
3275	for (i= 0; i < table->count; i++) {
3276		if (clock <= table->entries[i].clk) {
3277			if (*voltage < table->entries[i].v)
3278				*voltage = (u16)((table->entries[i].v < max_voltage) ?
3279					   table->entries[i].v : max_voltage);
3280			return;
3281		}
3282	}
3283
3284	*voltage = (*voltage > max_voltage) ? *voltage : max_voltage;
3285}
3286
3287static void btc_adjust_clock_combinations(struct amdgpu_device *adev,
3288					  const struct amdgpu_clock_and_voltage_limits *max_limits,
3289					  struct rv7xx_pl *pl)
3290{
3291
3292	if ((pl->mclk == 0) || (pl->sclk == 0))
3293		return;
3294
3295	if (pl->mclk == pl->sclk)
3296		return;
3297
3298	if (pl->mclk > pl->sclk) {
3299		if (((pl->mclk + (pl->sclk - 1)) / pl->sclk) > adev->pm.dpm.dyn_state.mclk_sclk_ratio)
3300			pl->sclk = btc_get_valid_sclk(adev,
3301						      max_limits->sclk,
3302						      (pl->mclk +
3303						      (adev->pm.dpm.dyn_state.mclk_sclk_ratio - 1)) /
3304						      adev->pm.dpm.dyn_state.mclk_sclk_ratio);
3305	} else {
3306		if ((pl->sclk - pl->mclk) > adev->pm.dpm.dyn_state.sclk_mclk_delta)
3307			pl->mclk = btc_get_valid_mclk(adev,
3308						      max_limits->mclk,
3309						      pl->sclk -
3310						      adev->pm.dpm.dyn_state.sclk_mclk_delta);
3311	}
3312}
3313
3314static void btc_apply_voltage_delta_rules(struct amdgpu_device *adev,
3315					  u16 max_vddc, u16 max_vddci,
3316					  u16 *vddc, u16 *vddci)
3317{
3318	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
3319	u16 new_voltage;
3320
3321	if ((0 == *vddc) || (0 == *vddci))
3322		return;
3323
3324	if (*vddc > *vddci) {
3325		if ((*vddc - *vddci) > adev->pm.dpm.dyn_state.vddc_vddci_delta) {
3326			new_voltage = btc_find_voltage(&eg_pi->vddci_voltage_table,
3327						       (*vddc - adev->pm.dpm.dyn_state.vddc_vddci_delta));
3328			*vddci = (new_voltage < max_vddci) ? new_voltage : max_vddci;
3329		}
3330	} else {
3331		if ((*vddci - *vddc) > adev->pm.dpm.dyn_state.vddc_vddci_delta) {
3332			new_voltage = btc_find_voltage(&eg_pi->vddc_voltage_table,
3333						       (*vddci - adev->pm.dpm.dyn_state.vddc_vddci_delta));
3334			*vddc = (new_voltage < max_vddc) ? new_voltage : max_vddc;
3335		}
3336	}
3337}
3338
3339static void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
3340			    u32 *p, u32 *u)
3341{
3342	u32 b_c = 0;
3343	u32 i_c;
3344	u32 tmp;
3345
3346	i_c = (i * r_c) / 100;
3347	tmp = i_c >> p_b;
3348
3349	while (tmp) {
3350		b_c++;
3351		tmp >>= 1;
3352	}
3353
3354	*u = (b_c + 1) / 2;
3355	*p = i_c / (1 << (2 * (*u)));
3356}
3357
3358static int r600_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th)
3359{
3360	u32 k, a, ah, al;
3361	u32 t1;
3362
3363	if ((fl == 0) || (fh == 0) || (fl > fh))
3364		return -EINVAL;
3365
3366	k = (100 * fh) / fl;
3367	t1 = (t * (k - 100));
3368	a = (1000 * (100 * h + t1)) / (10000 + (t1 / 100));
3369	a = (a + 5) / 10;
3370	ah = ((a * t) + 5000) / 10000;
3371	al = a - ah;
3372
3373	*th = t - ah;
3374	*tl = t + al;
3375
3376	return 0;
3377}
3378
3379static bool r600_is_uvd_state(u32 class, u32 class2)
3380{
3381	if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
3382		return true;
3383	if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
3384		return true;
3385	if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
3386		return true;
3387	if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
3388		return true;
3389	if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
3390		return true;
3391	return false;
3392}
3393
3394static u8 rv770_get_memory_module_index(struct amdgpu_device *adev)
3395{
3396	return (u8) ((RREG32(BIOS_SCRATCH_4) >> 16) & 0xff);
3397}
3398
3399static void rv770_get_max_vddc(struct amdgpu_device *adev)
3400{
3401	struct rv7xx_power_info *pi = rv770_get_pi(adev);
3402	u16 vddc;
3403
3404	if (amdgpu_atombios_get_max_vddc(adev, 0, 0, &vddc))
3405		pi->max_vddc = 0;
3406	else
3407		pi->max_vddc = vddc;
3408}
3409
3410static void rv770_get_engine_memory_ss(struct amdgpu_device *adev)
3411{
3412	struct rv7xx_power_info *pi = rv770_get_pi(adev);
3413	struct amdgpu_atom_ss ss;
3414
3415	pi->sclk_ss = amdgpu_atombios_get_asic_ss_info(adev, &ss,
3416						       ASIC_INTERNAL_ENGINE_SS, 0);
3417	pi->mclk_ss = amdgpu_atombios_get_asic_ss_info(adev, &ss,
3418						       ASIC_INTERNAL_MEMORY_SS, 0);
3419
3420	if (pi->sclk_ss || pi->mclk_ss)
3421		pi->dynamic_ss = true;
3422	else
3423		pi->dynamic_ss = false;
3424}
3425
3426
3427static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
3428					struct amdgpu_ps *rps)
3429{
3430	struct  si_ps *ps = si_get_ps(rps);
3431	struct amdgpu_clock_and_voltage_limits *max_limits;
3432	bool disable_mclk_switching = false;
3433	bool disable_sclk_switching = false;
3434	u32 mclk, sclk;
3435	u16 vddc, vddci, min_vce_voltage = 0;
3436	u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
3437	u32 max_sclk = 0, max_mclk = 0;
3438	int i;
3439
3440	if (adev->asic_type == CHIP_HAINAN) {
3441		if ((adev->pdev->revision == 0x81) ||
3442		    (adev->pdev->revision == 0x83) ||
3443		    (adev->pdev->revision == 0xC3) ||
3444		    (adev->pdev->device == 0x6664) ||
3445		    (adev->pdev->device == 0x6665) ||
3446		    (adev->pdev->device == 0x6667)) {
3447			max_sclk = 75000;
3448		}
3449		if ((adev->pdev->revision == 0xC3) ||
3450		    (adev->pdev->device == 0x6665)) {
3451			max_sclk = 60000;
3452			max_mclk = 80000;
3453		}
3454	} else if (adev->asic_type == CHIP_OLAND) {
3455		if ((adev->pdev->revision == 0xC7) ||
3456		    (adev->pdev->revision == 0x80) ||
3457		    (adev->pdev->revision == 0x81) ||
3458		    (adev->pdev->revision == 0x83) ||
3459		    (adev->pdev->revision == 0x87) ||
3460		    (adev->pdev->device == 0x6604) ||
3461		    (adev->pdev->device == 0x6605)) {
3462			max_sclk = 75000;
3463		}
3464	}
3465
3466	if (rps->vce_active) {
3467		rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk;
3468		rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk;
3469		si_get_vce_clock_voltage(adev, rps->evclk, rps->ecclk,
3470					 &min_vce_voltage);
3471	} else {
3472		rps->evclk = 0;
3473		rps->ecclk = 0;
3474	}
3475
3476	if ((adev->pm.dpm.new_active_crtc_count > 1) ||
3477	    si_dpm_vblank_too_short(adev))
3478		disable_mclk_switching = true;
3479
3480	if (rps->vclk || rps->dclk) {
3481		disable_mclk_switching = true;
3482		disable_sclk_switching = true;
3483	}
3484
3485	if (adev->pm.ac_power)
3486		max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3487	else
3488		max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3489
3490	for (i = ps->performance_level_count - 2; i >= 0; i--) {
3491		if (ps->performance_levels[i].vddc > ps->performance_levels[i+1].vddc)
3492			ps->performance_levels[i].vddc = ps->performance_levels[i+1].vddc;
3493	}
3494	if (adev->pm.ac_power == false) {
3495		for (i = 0; i < ps->performance_level_count; i++) {
3496			if (ps->performance_levels[i].mclk > max_limits->mclk)
3497				ps->performance_levels[i].mclk = max_limits->mclk;
3498			if (ps->performance_levels[i].sclk > max_limits->sclk)
3499				ps->performance_levels[i].sclk = max_limits->sclk;
3500			if (ps->performance_levels[i].vddc > max_limits->vddc)
3501				ps->performance_levels[i].vddc = max_limits->vddc;
3502			if (ps->performance_levels[i].vddci > max_limits->vddci)
3503				ps->performance_levels[i].vddci = max_limits->vddci;
3504		}
3505	}
3506
3507	/* limit clocks to max supported clocks based on voltage dependency tables */
3508	btc_get_max_clock_from_voltage_dependency_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
3509							&max_sclk_vddc);
3510	btc_get_max_clock_from_voltage_dependency_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
3511							&max_mclk_vddci);
3512	btc_get_max_clock_from_voltage_dependency_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
3513							&max_mclk_vddc);
3514
3515	for (i = 0; i < ps->performance_level_count; i++) {
3516		if (max_sclk_vddc) {
3517			if (ps->performance_levels[i].sclk > max_sclk_vddc)
3518				ps->performance_levels[i].sclk = max_sclk_vddc;
3519		}
3520		if (max_mclk_vddci) {
3521			if (ps->performance_levels[i].mclk > max_mclk_vddci)
3522				ps->performance_levels[i].mclk = max_mclk_vddci;
3523		}
3524		if (max_mclk_vddc) {
3525			if (ps->performance_levels[i].mclk > max_mclk_vddc)
3526				ps->performance_levels[i].mclk = max_mclk_vddc;
3527		}
3528		if (max_mclk) {
3529			if (ps->performance_levels[i].mclk > max_mclk)
3530				ps->performance_levels[i].mclk = max_mclk;
3531		}
3532		if (max_sclk) {
3533			if (ps->performance_levels[i].sclk > max_sclk)
3534				ps->performance_levels[i].sclk = max_sclk;
3535		}
3536	}
3537
3538	/* XXX validate the min clocks required for display */
3539
3540	if (disable_mclk_switching) {
3541		mclk  = ps->performance_levels[ps->performance_level_count - 1].mclk;
3542		vddci = ps->performance_levels[ps->performance_level_count - 1].vddci;
3543	} else {
3544		mclk = ps->performance_levels[0].mclk;
3545		vddci = ps->performance_levels[0].vddci;
3546	}
3547
3548	if (disable_sclk_switching) {
3549		sclk = ps->performance_levels[ps->performance_level_count - 1].sclk;
3550		vddc = ps->performance_levels[ps->performance_level_count - 1].vddc;
3551	} else {
3552		sclk = ps->performance_levels[0].sclk;
3553		vddc = ps->performance_levels[0].vddc;
3554	}
3555
3556	if (rps->vce_active) {
3557		if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk)
3558			sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk;
3559		if (mclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk)
3560			mclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk;
3561	}
3562
3563	/* adjusted low state */
3564	ps->performance_levels[0].sclk = sclk;
3565	ps->performance_levels[0].mclk = mclk;
3566	ps->performance_levels[0].vddc = vddc;
3567	ps->performance_levels[0].vddci = vddci;
3568
3569	if (disable_sclk_switching) {
3570		sclk = ps->performance_levels[0].sclk;
3571		for (i = 1; i < ps->performance_level_count; i++) {
3572			if (sclk < ps->performance_levels[i].sclk)
3573				sclk = ps->performance_levels[i].sclk;
3574		}
3575		for (i = 0; i < ps->performance_level_count; i++) {
3576			ps->performance_levels[i].sclk = sclk;
3577			ps->performance_levels[i].vddc = vddc;
3578		}
3579	} else {
3580		for (i = 1; i < ps->performance_level_count; i++) {
3581			if (ps->performance_levels[i].sclk < ps->performance_levels[i - 1].sclk)
3582				ps->performance_levels[i].sclk = ps->performance_levels[i - 1].sclk;
3583			if (ps->performance_levels[i].vddc < ps->performance_levels[i - 1].vddc)
3584				ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc;
3585		}
3586	}
3587
3588	if (disable_mclk_switching) {
3589		mclk = ps->performance_levels[0].mclk;
3590		for (i = 1; i < ps->performance_level_count; i++) {
3591			if (mclk < ps->performance_levels[i].mclk)
3592				mclk = ps->performance_levels[i].mclk;
3593		}
3594		for (i = 0; i < ps->performance_level_count; i++) {
3595			ps->performance_levels[i].mclk = mclk;
3596			ps->performance_levels[i].vddci = vddci;
3597		}
3598	} else {
3599		for (i = 1; i < ps->performance_level_count; i++) {
3600			if (ps->performance_levels[i].mclk < ps->performance_levels[i - 1].mclk)
3601				ps->performance_levels[i].mclk = ps->performance_levels[i - 1].mclk;
3602			if (ps->performance_levels[i].vddci < ps->performance_levels[i - 1].vddci)
3603				ps->performance_levels[i].vddci = ps->performance_levels[i - 1].vddci;
3604		}
3605	}
3606
3607	for (i = 0; i < ps->performance_level_count; i++)
3608		btc_adjust_clock_combinations(adev, max_limits,
3609					      &ps->performance_levels[i]);
3610
3611	for (i = 0; i < ps->performance_level_count; i++) {
3612		if (ps->performance_levels[i].vddc < min_vce_voltage)
3613			ps->performance_levels[i].vddc = min_vce_voltage;
3614		btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
3615						   ps->performance_levels[i].sclk,
3616						   max_limits->vddc,  &ps->performance_levels[i].vddc);
3617		btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
3618						   ps->performance_levels[i].mclk,
3619						   max_limits->vddci, &ps->performance_levels[i].vddci);
3620		btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
3621						   ps->performance_levels[i].mclk,
3622						   max_limits->vddc,  &ps->performance_levels[i].vddc);
3623		btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk,
3624						   adev->clock.current_dispclk,
3625						   max_limits->vddc,  &ps->performance_levels[i].vddc);
3626	}
3627
3628	for (i = 0; i < ps->performance_level_count; i++) {
3629		btc_apply_voltage_delta_rules(adev,
3630					      max_limits->vddc, max_limits->vddci,
3631					      &ps->performance_levels[i].vddc,
3632					      &ps->performance_levels[i].vddci);
3633	}
3634
3635	ps->dc_compatible = true;
3636	for (i = 0; i < ps->performance_level_count; i++) {
3637		if (ps->performance_levels[i].vddc > adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc)
3638			ps->dc_compatible = false;
3639	}
3640}
3641
3642#if 0
3643static int si_read_smc_soft_register(struct amdgpu_device *adev,
3644				     u16 reg_offset, u32 *value)
3645{
3646	struct si_power_info *si_pi = si_get_pi(adev);
3647
3648	return amdgpu_si_read_smc_sram_dword(adev,
3649					     si_pi->soft_regs_start + reg_offset, value,
3650					     si_pi->sram_end);
3651}
3652#endif
3653
3654static int si_write_smc_soft_register(struct amdgpu_device *adev,
3655				      u16 reg_offset, u32 value)
3656{
3657	struct si_power_info *si_pi = si_get_pi(adev);
3658
3659	return amdgpu_si_write_smc_sram_dword(adev,
3660					      si_pi->soft_regs_start + reg_offset,
3661					      value, si_pi->sram_end);
3662}
3663
3664static bool si_is_special_1gb_platform(struct amdgpu_device *adev)
3665{
3666	bool ret = false;
3667	u32 tmp, width, row, column, bank, density;
3668	bool is_memory_gddr5, is_special;
3669
3670	tmp = RREG32(MC_SEQ_MISC0);
3671	is_memory_gddr5 = (MC_SEQ_MISC0_GDDR5_VALUE == ((tmp & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT));
3672	is_special = (MC_SEQ_MISC0_REV_ID_VALUE == ((tmp & MC_SEQ_MISC0_REV_ID_MASK) >> MC_SEQ_MISC0_REV_ID_SHIFT))
3673		& (MC_SEQ_MISC0_VEN_ID_VALUE == ((tmp & MC_SEQ_MISC0_VEN_ID_MASK) >> MC_SEQ_MISC0_VEN_ID_SHIFT));
3674
3675	WREG32(MC_SEQ_IO_DEBUG_INDEX, 0xb);
3676	width = ((RREG32(MC_SEQ_IO_DEBUG_DATA) >> 1) & 1) ? 16 : 32;
3677
3678	tmp = RREG32(MC_ARB_RAMCFG);
3679	row = ((tmp & NOOFROWS_MASK) >> NOOFROWS_SHIFT) + 10;
3680	column = ((tmp & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) + 8;
3681	bank = ((tmp & NOOFBANK_MASK) >> NOOFBANK_SHIFT) + 2;
3682
3683	density = (1 << (row + column - 20 + bank)) * width;
3684
3685	if ((adev->pdev->device == 0x6819) &&
3686	    is_memory_gddr5 && is_special && (density == 0x400))
3687		ret = true;
3688
3689	return ret;
3690}
3691
3692static void si_get_leakage_vddc(struct amdgpu_device *adev)
3693{
3694	struct si_power_info *si_pi = si_get_pi(adev);
3695	u16 vddc, count = 0;
3696	int i, ret;
3697
3698	for (i = 0; i < SISLANDS_MAX_LEAKAGE_COUNT; i++) {
3699		ret = amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(adev, &vddc, SISLANDS_LEAKAGE_INDEX0 + i);
3700
3701		if (!ret && (vddc > 0) && (vddc != (SISLANDS_LEAKAGE_INDEX0 + i))) {
3702			si_pi->leakage_voltage.entries[count].voltage = vddc;
3703			si_pi->leakage_voltage.entries[count].leakage_index =
3704				SISLANDS_LEAKAGE_INDEX0 + i;
3705			count++;
3706		}
3707	}
3708	si_pi->leakage_voltage.count = count;
3709}
3710
3711static int si_get_leakage_voltage_from_leakage_index(struct amdgpu_device *adev,
3712						     u32 index, u16 *leakage_voltage)
3713{
3714	struct si_power_info *si_pi = si_get_pi(adev);
3715	int i;
3716
3717	if (leakage_voltage == NULL)
3718		return -EINVAL;
3719
3720	if ((index & 0xff00) != 0xff00)
3721		return -EINVAL;
3722
3723	if ((index & 0xff) > SISLANDS_MAX_LEAKAGE_COUNT + 1)
3724		return -EINVAL;
3725
3726	if (index < SISLANDS_LEAKAGE_INDEX0)
3727		return -EINVAL;
3728
3729	for (i = 0; i < si_pi->leakage_voltage.count; i++) {
3730		if (si_pi->leakage_voltage.entries[i].leakage_index == index) {
3731			*leakage_voltage = si_pi->leakage_voltage.entries[i].voltage;
3732			return 0;
3733		}
3734	}
3735	return -EAGAIN;
3736}
3737
3738static void si_set_dpm_event_sources(struct amdgpu_device *adev, u32 sources)
3739{
3740	struct rv7xx_power_info *pi = rv770_get_pi(adev);
3741	bool want_thermal_protection;
3742	enum amdgpu_dpm_event_src dpm_event_src;
3743
3744	switch (sources) {
3745	case 0:
3746	default:
3747		want_thermal_protection = false;
3748		break;
3749	case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL):
3750		want_thermal_protection = true;
3751		dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGITAL;
3752		break;
3753	case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
3754		want_thermal_protection = true;
3755		dpm_event_src = AMDGPU_DPM_EVENT_SRC_EXTERNAL;
3756		break;
3757	case ((1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
3758	      (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL)):
3759		want_thermal_protection = true;
3760		dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
3761		break;
3762	}
3763
3764	if (want_thermal_protection) {
3765		WREG32_P(CG_THERMAL_CTRL, DPM_EVENT_SRC(dpm_event_src), ~DPM_EVENT_SRC_MASK);
3766		if (pi->thermal_protection)
3767			WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS);
3768	} else {
3769		WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS);
3770	}
3771}
3772
3773static void si_enable_auto_throttle_source(struct amdgpu_device *adev,
3774					   enum amdgpu_dpm_auto_throttle_src source,
3775					   bool enable)
3776{
3777	struct rv7xx_power_info *pi = rv770_get_pi(adev);
3778
3779	if (enable) {
3780		if (!(pi->active_auto_throttle_sources & (1 << source))) {
3781			pi->active_auto_throttle_sources |= 1 << source;
3782			si_set_dpm_event_sources(adev, pi->active_auto_throttle_sources);
3783		}
3784	} else {
3785		if (pi->active_auto_throttle_sources & (1 << source)) {
3786			pi->active_auto_throttle_sources &= ~(1 << source);
3787			si_set_dpm_event_sources(adev, pi->active_auto_throttle_sources);
3788		}
3789	}
3790}
3791
3792static void si_start_dpm(struct amdgpu_device *adev)
3793{
3794	WREG32_P(GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, ~GLOBAL_PWRMGT_EN);
3795}
3796
3797static void si_stop_dpm(struct amdgpu_device *adev)
3798{
3799	WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN);
3800}
3801
3802static void si_enable_sclk_control(struct amdgpu_device *adev, bool enable)
3803{
3804	if (enable)
3805		WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF);
3806	else
3807		WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF);
3808
3809}
3810
3811#if 0
3812static int si_notify_hardware_of_thermal_state(struct amdgpu_device *adev,
3813					       u32 thermal_level)
3814{
3815	PPSMC_Result ret;
3816
3817	if (thermal_level == 0) {
3818		ret = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableThermalInterrupt);
3819		if (ret == PPSMC_Result_OK)
3820			return 0;
3821		else
3822			return -EINVAL;
3823	}
3824	return 0;
3825}
3826
3827static void si_notify_hardware_vpu_recovery_event(struct amdgpu_device *adev)
3828{
3829	si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_tdr_is_about_to_happen, true);
3830}
3831#endif
3832
3833#if 0
3834static int si_notify_hw_of_powersource(struct amdgpu_device *adev, bool ac_power)
3835{
3836	if (ac_power)
3837		return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_RunningOnAC) == PPSMC_Result_OK) ?
3838			0 : -EINVAL;
3839
3840	return 0;
3841}
3842#endif
3843
3844static PPSMC_Result si_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
3845						      PPSMC_Msg msg, u32 parameter)
3846{
3847	WREG32(SMC_SCRATCH0, parameter);
3848	return amdgpu_si_send_msg_to_smc(adev, msg);
3849}
3850
3851static int si_restrict_performance_levels_before_switch(struct amdgpu_device *adev)
3852{
3853	if (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_NoForcedLevel) != PPSMC_Result_OK)
3854		return -EINVAL;
3855
3856	return (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, 1) == PPSMC_Result_OK) ?
3857		0 : -EINVAL;
3858}
3859
3860static int si_dpm_force_performance_level(void *handle,
3861				   enum amd_dpm_forced_level level)
3862{
3863	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3864	struct amdgpu_ps *rps = adev->pm.dpm.current_ps;
3865	struct  si_ps *ps = si_get_ps(rps);
3866	u32 levels = ps->performance_level_count;
3867
3868	if (level == AMD_DPM_FORCED_LEVEL_HIGH) {
3869		if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK)
3870			return -EINVAL;
3871
3872		if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetForcedLevels, 1) != PPSMC_Result_OK)
3873			return -EINVAL;
3874	} else if (level == AMD_DPM_FORCED_LEVEL_LOW) {
3875		if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK)
3876			return -EINVAL;
3877
3878		if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, 1) != PPSMC_Result_OK)
3879			return -EINVAL;
3880	} else if (level == AMD_DPM_FORCED_LEVEL_AUTO) {
3881		if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK)
3882			return -EINVAL;
3883
3884		if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK)
3885			return -EINVAL;
3886	}
3887
3888	adev->pm.dpm.forced_level = level;
3889
3890	return 0;
3891}
3892
3893#if 0
3894static int si_set_boot_state(struct amdgpu_device *adev)
3895{
3896	return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_SwitchToInitialState) == PPSMC_Result_OK) ?
3897		0 : -EINVAL;
3898}
3899#endif
3900
3901static int si_set_sw_state(struct amdgpu_device *adev)
3902{
3903	return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_SwitchToSwState) == PPSMC_Result_OK) ?
3904		0 : -EINVAL;
3905}
3906
3907static int si_halt_smc(struct amdgpu_device *adev)
3908{
3909	if (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_Halt) != PPSMC_Result_OK)
3910		return -EINVAL;
3911
3912	return (amdgpu_si_wait_for_smc_inactive(adev) == PPSMC_Result_OK) ?
3913		0 : -EINVAL;
3914}
3915
3916static int si_resume_smc(struct amdgpu_device *adev)
3917{
3918	if (amdgpu_si_send_msg_to_smc(adev, PPSMC_FlushDataCache) != PPSMC_Result_OK)
3919		return -EINVAL;
3920
3921	return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_Resume) == PPSMC_Result_OK) ?
3922		0 : -EINVAL;
3923}
3924
3925static void si_dpm_start_smc(struct amdgpu_device *adev)
3926{
3927	amdgpu_si_program_jump_on_start(adev);
3928	amdgpu_si_start_smc(adev);
3929	amdgpu_si_smc_clock(adev, true);
3930}
3931
3932static void si_dpm_stop_smc(struct amdgpu_device *adev)
3933{
3934	amdgpu_si_reset_smc(adev);
3935	amdgpu_si_smc_clock(adev, false);
3936}
3937
3938static int si_process_firmware_header(struct amdgpu_device *adev)
3939{
3940	struct si_power_info *si_pi = si_get_pi(adev);
3941	u32 tmp;
3942	int ret;
3943
3944	ret = amdgpu_si_read_smc_sram_dword(adev,
3945					    SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
3946					    SISLANDS_SMC_FIRMWARE_HEADER_stateTable,
3947					    &tmp, si_pi->sram_end);
3948	if (ret)
3949		return ret;
3950
3951	si_pi->state_table_start = tmp;
3952
3953	ret = amdgpu_si_read_smc_sram_dword(adev,
3954					    SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
3955					    SISLANDS_SMC_FIRMWARE_HEADER_softRegisters,
3956					    &tmp, si_pi->sram_end);
3957	if (ret)
3958		return ret;
3959
3960	si_pi->soft_regs_start = tmp;
3961
3962	ret = amdgpu_si_read_smc_sram_dword(adev,
3963					    SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
3964					    SISLANDS_SMC_FIRMWARE_HEADER_mcRegisterTable,
3965					    &tmp, si_pi->sram_end);
3966	if (ret)
3967		return ret;
3968
3969	si_pi->mc_reg_table_start = tmp;
3970
3971	ret = amdgpu_si_read_smc_sram_dword(adev,
3972					    SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
3973					    SISLANDS_SMC_FIRMWARE_HEADER_fanTable,
3974					    &tmp, si_pi->sram_end);
3975	if (ret)
3976		return ret;
3977
3978	si_pi->fan_table_start = tmp;
3979
3980	ret = amdgpu_si_read_smc_sram_dword(adev,
3981					    SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
3982					    SISLANDS_SMC_FIRMWARE_HEADER_mcArbDramAutoRefreshTable,
3983					    &tmp, si_pi->sram_end);
3984	if (ret)
3985		return ret;
3986
3987	si_pi->arb_table_start = tmp;
3988
3989	ret = amdgpu_si_read_smc_sram_dword(adev,
3990					    SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
3991					    SISLANDS_SMC_FIRMWARE_HEADER_CacConfigTable,
3992					    &tmp, si_pi->sram_end);
3993	if (ret)
3994		return ret;
3995
3996	si_pi->cac_table_start = tmp;
3997
3998	ret = amdgpu_si_read_smc_sram_dword(adev,
3999					    SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
4000					    SISLANDS_SMC_FIRMWARE_HEADER_DteConfiguration,
4001					    &tmp, si_pi->sram_end);
4002	if (ret)
4003		return ret;
4004
4005	si_pi->dte_table_start = tmp;
4006
4007	ret = amdgpu_si_read_smc_sram_dword(adev,
4008					    SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
4009					    SISLANDS_SMC_FIRMWARE_HEADER_spllTable,
4010					    &tmp, si_pi->sram_end);
4011	if (ret)
4012		return ret;
4013
4014	si_pi->spll_table_start = tmp;
4015
4016	ret = amdgpu_si_read_smc_sram_dword(adev,
4017					    SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
4018					    SISLANDS_SMC_FIRMWARE_HEADER_PAPMParameters,
4019					    &tmp, si_pi->sram_end);
4020	if (ret)
4021		return ret;
4022
4023	si_pi->papm_cfg_table_start = tmp;
4024
4025	return ret;
4026}
4027
4028static void si_read_clock_registers(struct amdgpu_device *adev)
4029{
4030	struct si_power_info *si_pi = si_get_pi(adev);
4031
4032	si_pi->clock_registers.cg_spll_func_cntl = RREG32(CG_SPLL_FUNC_CNTL);
4033	si_pi->clock_registers.cg_spll_func_cntl_2 = RREG32(CG_SPLL_FUNC_CNTL_2);
4034	si_pi->clock_registers.cg_spll_func_cntl_3 = RREG32(CG_SPLL_FUNC_CNTL_3);
4035	si_pi->clock_registers.cg_spll_func_cntl_4 = RREG32(CG_SPLL_FUNC_CNTL_4);
4036	si_pi->clock_registers.cg_spll_spread_spectrum = RREG32(CG_SPLL_SPREAD_SPECTRUM);
4037	si_pi->clock_registers.cg_spll_spread_spectrum_2 = RREG32(CG_SPLL_SPREAD_SPECTRUM_2);
4038	si_pi->clock_registers.dll_cntl = RREG32(DLL_CNTL);
4039	si_pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL);
4040	si_pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL);
4041	si_pi->clock_registers.mpll_dq_func_cntl = RREG32(MPLL_DQ_FUNC_CNTL);
4042	si_pi->clock_registers.mpll_func_cntl = RREG32(MPLL_FUNC_CNTL);
4043	si_pi->clock_registers.mpll_func_cntl_1 = RREG32(MPLL_FUNC_CNTL_1);
4044	si_pi->clock_registers.mpll_func_cntl_2 = RREG32(MPLL_FUNC_CNTL_2);
4045	si_pi->clock_registers.mpll_ss1 = RREG32(MPLL_SS1);
4046	si_pi->clock_registers.mpll_ss2 = RREG32(MPLL_SS2);
4047}
4048
4049static void si_enable_thermal_protection(struct amdgpu_device *adev,
4050					  bool enable)
4051{
4052	if (enable)
4053		WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS);
4054	else
4055		WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS);
4056}
4057
4058static void si_enable_acpi_power_management(struct amdgpu_device *adev)
4059{
4060	WREG32_P(GENERAL_PWRMGT, STATIC_PM_EN, ~STATIC_PM_EN);
4061}
4062
4063#if 0
4064static int si_enter_ulp_state(struct amdgpu_device *adev)
4065{
4066	WREG32(SMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower);
4067
4068	udelay(25000);
4069
4070	return 0;
4071}
4072
4073static int si_exit_ulp_state(struct amdgpu_device *adev)
4074{
4075	int i;
4076
4077	WREG32(SMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower);
4078
4079	udelay(7000);
4080
4081	for (i = 0; i < adev->usec_timeout; i++) {
4082		if (RREG32(SMC_RESP_0) == 1)
4083			break;
4084		udelay(1000);
4085	}
4086
4087	return 0;
4088}
4089#endif
4090
4091static int si_notify_smc_display_change(struct amdgpu_device *adev,
4092				     bool has_display)
4093{
4094	PPSMC_Msg msg = has_display ?
4095		PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay;
4096
4097	return (amdgpu_si_send_msg_to_smc(adev, msg) == PPSMC_Result_OK) ?
4098		0 : -EINVAL;
4099}
4100
4101static void si_program_response_times(struct amdgpu_device *adev)
4102{
4103	u32 voltage_response_time, acpi_delay_time, vbi_time_out;
4104	u32 vddc_dly, acpi_dly, vbi_dly;
4105	u32 reference_clock;
4106
4107	si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_mvdd_chg_time, 1);
4108
4109	voltage_response_time = (u32)adev->pm.dpm.voltage_response_time;
4110
4111	if (voltage_response_time == 0)
4112		voltage_response_time = 1000;
4113
4114	acpi_delay_time = 15000;
4115	vbi_time_out = 100000;
4116
4117	reference_clock = amdgpu_asic_get_xclk(adev);
4118
4119	vddc_dly = (voltage_response_time  * reference_clock) / 100;
4120	acpi_dly = (acpi_delay_time * reference_clock) / 100;
4121	vbi_dly  = (vbi_time_out * reference_clock) / 100;
4122
4123	si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_delay_vreg,  vddc_dly);
4124	si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_delay_acpi,  acpi_dly);
4125	si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_mclk_chg_timeout, vbi_dly);
4126	si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_mc_block_delay, 0xAA);
4127}
4128
4129static void si_program_ds_registers(struct amdgpu_device *adev)
4130{
4131	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
4132	u32 tmp;
4133
4134	/* DEEP_SLEEP_CLK_SEL field should be 0x10 on tahiti A0 */
4135	if (adev->asic_type == CHIP_TAHITI && adev->rev_id == 0x0)
4136		tmp = 0x10;
4137	else
4138		tmp = 0x1;
4139
4140	if (eg_pi->sclk_deep_sleep) {
4141		WREG32_P(MISC_CLK_CNTL, DEEP_SLEEP_CLK_SEL(tmp), ~DEEP_SLEEP_CLK_SEL_MASK);
4142		WREG32_P(CG_SPLL_AUTOSCALE_CNTL, AUTOSCALE_ON_SS_CLEAR,
4143			 ~AUTOSCALE_ON_SS_CLEAR);
4144	}
4145}
4146
4147static void si_program_display_gap(struct amdgpu_device *adev)
4148{
4149	u32 tmp, pipe;
4150	int i;
4151
4152	tmp = RREG32(CG_DISPLAY_GAP_CNTL) & ~(DISP1_GAP_MASK | DISP2_GAP_MASK);
4153	if (adev->pm.dpm.new_active_crtc_count > 0)
4154		tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
4155	else
4156		tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE);
4157
4158	if (adev->pm.dpm.new_active_crtc_count > 1)
4159		tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
4160	else
4161		tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE);
4162
4163	WREG32(CG_DISPLAY_GAP_CNTL, tmp);
4164
4165	tmp = RREG32(DCCG_DISP_SLOW_SELECT_REG);
4166	pipe = (tmp & DCCG_DISP1_SLOW_SELECT_MASK) >> DCCG_DISP1_SLOW_SELECT_SHIFT;
4167
4168	if ((adev->pm.dpm.new_active_crtc_count > 0) &&
4169	    (!(adev->pm.dpm.new_active_crtcs & (1 << pipe)))) {
4170		/* find the first active crtc */
4171		for (i = 0; i < adev->mode_info.num_crtc; i++) {
4172			if (adev->pm.dpm.new_active_crtcs & (1 << i))
4173				break;
4174		}
4175		if (i == adev->mode_info.num_crtc)
4176			pipe = 0;
4177		else
4178			pipe = i;
4179
4180		tmp &= ~DCCG_DISP1_SLOW_SELECT_MASK;
4181		tmp |= DCCG_DISP1_SLOW_SELECT(pipe);
4182		WREG32(DCCG_DISP_SLOW_SELECT_REG, tmp);
4183	}
4184
4185	/* Setting this to false forces the performance state to low if the crtcs are disabled.
4186	 * This can be a problem on PowerXpress systems or if you want to use the card
4187	 * for offscreen rendering or compute if there are no crtcs enabled.
4188	 */
4189	si_notify_smc_display_change(adev, adev->pm.dpm.new_active_crtc_count > 0);
4190}
4191
4192static void si_enable_spread_spectrum(struct amdgpu_device *adev, bool enable)
4193{
4194	struct rv7xx_power_info *pi = rv770_get_pi(adev);
4195
4196	if (enable) {
4197		if (pi->sclk_ss)
4198			WREG32_P(GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, ~DYN_SPREAD_SPECTRUM_EN);
4199	} else {
4200		WREG32_P(CG_SPLL_SPREAD_SPECTRUM, 0, ~SSEN);
4201		WREG32_P(GENERAL_PWRMGT, 0, ~DYN_SPREAD_SPECTRUM_EN);
4202	}
4203}
4204
4205static void si_setup_bsp(struct amdgpu_device *adev)
4206{
4207	struct rv7xx_power_info *pi = rv770_get_pi(adev);
4208	u32 xclk = amdgpu_asic_get_xclk(adev);
4209
4210	r600_calculate_u_and_p(pi->asi,
4211			       xclk,
4212			       16,
4213			       &pi->bsp,
4214			       &pi->bsu);
4215
4216	r600_calculate_u_and_p(pi->pasi,
4217			       xclk,
4218			       16,
4219			       &pi->pbsp,
4220			       &pi->pbsu);
4221
4222
4223        pi->dsp = BSP(pi->bsp) | BSU(pi->bsu);
4224	pi->psp = BSP(pi->pbsp) | BSU(pi->pbsu);
4225
4226	WREG32(CG_BSP, pi->dsp);
4227}
4228
4229static void si_program_git(struct amdgpu_device *adev)
4230{
4231	WREG32_P(CG_GIT, CG_GICST(R600_GICST_DFLT), ~CG_GICST_MASK);
4232}
4233
4234static void si_program_tp(struct amdgpu_device *adev)
4235{
4236	int i;
4237	enum r600_td td = R600_TD_DFLT;
4238
4239	for (i = 0; i < R600_PM_NUMBER_OF_TC; i++)
4240		WREG32(CG_FFCT_0 + i, (UTC_0(r600_utc[i]) | DTC_0(r600_dtc[i])));
4241
4242	if (td == R600_TD_AUTO)
4243		WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_FORCE_TREND_SEL);
4244	else
4245		WREG32_P(SCLK_PWRMGT_CNTL, FIR_FORCE_TREND_SEL, ~FIR_FORCE_TREND_SEL);
4246
4247	if (td == R600_TD_UP)
4248		WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_TREND_MODE);
4249
4250	if (td == R600_TD_DOWN)
4251		WREG32_P(SCLK_PWRMGT_CNTL, FIR_TREND_MODE, ~FIR_TREND_MODE);
4252}
4253
4254static void si_program_tpp(struct amdgpu_device *adev)
4255{
4256	WREG32(CG_TPC, R600_TPC_DFLT);
4257}
4258
4259static void si_program_sstp(struct amdgpu_device *adev)
4260{
4261	WREG32(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT)));
4262}
4263
4264static void si_enable_display_gap(struct amdgpu_device *adev)
4265{
4266	u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL);
4267
4268	tmp &= ~(DISP1_GAP_MASK | DISP2_GAP_MASK);
4269	tmp |= (DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE) |
4270		DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE));
4271
4272	tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK);
4273	tmp |= (DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK) |
4274		DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE));
4275	WREG32(CG_DISPLAY_GAP_CNTL, tmp);
4276}
4277
4278static void si_program_vc(struct amdgpu_device *adev)
4279{
4280	struct rv7xx_power_info *pi = rv770_get_pi(adev);
4281
4282	WREG32(CG_FTV, pi->vrc);
4283}
4284
4285static void si_clear_vc(struct amdgpu_device *adev)
4286{
4287	WREG32(CG_FTV, 0);
4288}
4289
4290static u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock)
4291{
4292	u8 mc_para_index;
4293
4294	if (memory_clock < 10000)
4295		mc_para_index = 0;
4296	else if (memory_clock >= 80000)
4297		mc_para_index = 0x0f;
4298	else
4299		mc_para_index = (u8)((memory_clock - 10000) / 5000 + 1);
4300	return mc_para_index;
4301}
4302
4303static u8 si_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode)
4304{
4305	u8 mc_para_index;
4306
4307	if (strobe_mode) {
4308		if (memory_clock < 12500)
4309			mc_para_index = 0x00;
4310		else if (memory_clock > 47500)
4311			mc_para_index = 0x0f;
4312		else
4313			mc_para_index = (u8)((memory_clock - 10000) / 2500);
4314	} else {
4315		if (memory_clock < 65000)
4316			mc_para_index = 0x00;
4317		else if (memory_clock > 135000)
4318			mc_para_index = 0x0f;
4319		else
4320			mc_para_index = (u8)((memory_clock - 60000) / 5000);
4321	}
4322	return mc_para_index;
4323}
4324
4325static u8 si_get_strobe_mode_settings(struct amdgpu_device *adev, u32 mclk)
4326{
4327	struct rv7xx_power_info *pi = rv770_get_pi(adev);
4328	bool strobe_mode = false;
4329	u8 result = 0;
4330
4331	if (mclk <= pi->mclk_strobe_mode_threshold)
4332		strobe_mode = true;
4333
4334	if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
4335		result = si_get_mclk_frequency_ratio(mclk, strobe_mode);
4336	else
4337		result = si_get_ddr3_mclk_frequency_ratio(mclk);
4338
4339	if (strobe_mode)
4340		result |= SISLANDS_SMC_STROBE_ENABLE;
4341
4342	return result;
4343}
4344
4345static int si_upload_firmware(struct amdgpu_device *adev)
4346{
4347	struct si_power_info *si_pi = si_get_pi(adev);
4348
4349	amdgpu_si_reset_smc(adev);
4350	amdgpu_si_smc_clock(adev, false);
4351
4352	return amdgpu_si_load_smc_ucode(adev, si_pi->sram_end);
4353}
4354
4355static bool si_validate_phase_shedding_tables(struct amdgpu_device *adev,
4356					      const struct atom_voltage_table *table,
4357					      const struct amdgpu_phase_shedding_limits_table *limits)
4358{
4359	u32 data, num_bits, num_levels;
4360
4361	if ((table == NULL) || (limits == NULL))
4362		return false;
4363
4364	data = table->mask_low;
4365
4366	num_bits = hweight32(data);
4367
4368	if (num_bits == 0)
4369		return false;
4370
4371	num_levels = (1 << num_bits);
4372
4373	if (table->count != num_levels)
4374		return false;
4375
4376	if (limits->count != (num_levels - 1))
4377		return false;
4378
4379	return true;
4380}
4381
4382static void si_trim_voltage_table_to_fit_state_table(struct amdgpu_device *adev,
4383					      u32 max_voltage_steps,
4384					      struct atom_voltage_table *voltage_table)
4385{
4386	unsigned int i, diff;
4387
4388	if (voltage_table->count <= max_voltage_steps)
4389		return;
4390
4391	diff = voltage_table->count - max_voltage_steps;
4392
4393	for (i= 0; i < max_voltage_steps; i++)
4394		voltage_table->entries[i] = voltage_table->entries[i + diff];
4395
4396	voltage_table->count = max_voltage_steps;
4397}
4398
4399static int si_get_svi2_voltage_table(struct amdgpu_device *adev,
4400				     struct amdgpu_clock_voltage_dependency_table *voltage_dependency_table,
4401				     struct atom_voltage_table *voltage_table)
4402{
4403	u32 i;
4404
4405	if (voltage_dependency_table == NULL)
4406		return -EINVAL;
4407
4408	voltage_table->mask_low = 0;
4409	voltage_table->phase_delay = 0;
4410
4411	voltage_table->count = voltage_dependency_table->count;
4412	for (i = 0; i < voltage_table->count; i++) {
4413		voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
4414		voltage_table->entries[i].smio_low = 0;
4415	}
4416
4417	return 0;
4418}
4419
4420static int si_construct_voltage_tables(struct amdgpu_device *adev)
4421{
4422	struct rv7xx_power_info *pi = rv770_get_pi(adev);
4423	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
4424	struct si_power_info *si_pi = si_get_pi(adev);
4425	int ret;
4426
4427	if (pi->voltage_control) {
4428		ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDC,
4429						    VOLTAGE_OBJ_GPIO_LUT, &eg_pi->vddc_voltage_table);
4430		if (ret)
4431			return ret;
4432
4433		if (eg_pi->vddc_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
4434			si_trim_voltage_table_to_fit_state_table(adev,
4435								 SISLANDS_MAX_NO_VREG_STEPS,
4436								 &eg_pi->vddc_voltage_table);
4437	} else if (si_pi->voltage_control_svi2) {
4438		ret = si_get_svi2_voltage_table(adev,
4439						&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
4440						&eg_pi->vddc_voltage_table);
4441		if (ret)
4442			return ret;
4443	} else {
4444		return -EINVAL;
4445	}
4446
4447	if (eg_pi->vddci_control) {
4448		ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDCI,
4449						    VOLTAGE_OBJ_GPIO_LUT, &eg_pi->vddci_voltage_table);
4450		if (ret)
4451			return ret;
4452
4453		if (eg_pi->vddci_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
4454			si_trim_voltage_table_to_fit_state_table(adev,
4455								 SISLANDS_MAX_NO_VREG_STEPS,
4456								 &eg_pi->vddci_voltage_table);
4457	}
4458	if (si_pi->vddci_control_svi2) {
4459		ret = si_get_svi2_voltage_table(adev,
4460						&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
4461						&eg_pi->vddci_voltage_table);
4462		if (ret)
4463			return ret;
4464	}
4465
4466	if (pi->mvdd_control) {
4467		ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_MVDDC,
4468						    VOLTAGE_OBJ_GPIO_LUT, &si_pi->mvdd_voltage_table);
4469
4470		if (ret) {
4471			pi->mvdd_control = false;
4472			return ret;
4473		}
4474
4475		if (si_pi->mvdd_voltage_table.count == 0) {
4476			pi->mvdd_control = false;
4477			return -EINVAL;
4478		}
4479
4480		if (si_pi->mvdd_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
4481			si_trim_voltage_table_to_fit_state_table(adev,
4482								 SISLANDS_MAX_NO_VREG_STEPS,
4483								 &si_pi->mvdd_voltage_table);
4484	}
4485
4486	if (si_pi->vddc_phase_shed_control) {
4487		ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDC,
4488						    VOLTAGE_OBJ_PHASE_LUT, &si_pi->vddc_phase_shed_table);
4489		if (ret)
4490			si_pi->vddc_phase_shed_control = false;
4491
4492		if ((si_pi->vddc_phase_shed_table.count == 0) ||
4493		    (si_pi->vddc_phase_shed_table.count > SISLANDS_MAX_NO_VREG_STEPS))
4494			si_pi->vddc_phase_shed_control = false;
4495	}
4496
4497	return 0;
4498}
4499
4500static void si_populate_smc_voltage_table(struct amdgpu_device *adev,
4501					  const struct atom_voltage_table *voltage_table,
4502					  SISLANDS_SMC_STATETABLE *table)
4503{
4504	unsigned int i;
4505
4506	for (i = 0; i < voltage_table->count; i++)
4507		table->lowSMIO[i] |= cpu_to_be32(voltage_table->entries[i].smio_low);
4508}
4509
4510static int si_populate_smc_voltage_tables(struct amdgpu_device *adev,
4511					  SISLANDS_SMC_STATETABLE *table)
4512{
4513	struct rv7xx_power_info *pi = rv770_get_pi(adev);
4514	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
4515	struct si_power_info *si_pi = si_get_pi(adev);
4516	u8 i;
4517
4518	if (si_pi->voltage_control_svi2) {
4519		si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svc,
4520			si_pi->svc_gpio_id);
4521		si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svd,
4522			si_pi->svd_gpio_id);
4523		si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_svi_rework_plat_type,
4524					   2);
4525	} else {
4526		if (eg_pi->vddc_voltage_table.count) {
4527			si_populate_smc_voltage_table(adev, &eg_pi->vddc_voltage_table, table);
4528			table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] =
4529				cpu_to_be32(eg_pi->vddc_voltage_table.mask_low);
4530
4531			for (i = 0; i < eg_pi->vddc_voltage_table.count; i++) {
4532				if (pi->max_vddc_in_table <= eg_pi->vddc_voltage_table.entries[i].value) {
4533					table->maxVDDCIndexInPPTable = i;
4534					break;
4535				}
4536			}
4537		}
4538
4539		if (eg_pi->vddci_voltage_table.count) {
4540			si_populate_smc_voltage_table(adev, &eg_pi->vddci_voltage_table, table);
4541
4542			table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDCI] =
4543				cpu_to_be32(eg_pi->vddci_voltage_table.mask_low);
4544		}
4545
4546
4547		if (si_pi->mvdd_voltage_table.count) {
4548			si_populate_smc_voltage_table(adev, &si_pi->mvdd_voltage_table, table);
4549
4550			table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_MVDD] =
4551				cpu_to_be32(si_pi->mvdd_voltage_table.mask_low);
4552		}
4553
4554		if (si_pi->vddc_phase_shed_control) {
4555			if (si_validate_phase_shedding_tables(adev, &si_pi->vddc_phase_shed_table,
4556							      &adev->pm.dpm.dyn_state.phase_shedding_limits_table)) {
4557				si_populate_smc_voltage_table(adev, &si_pi->vddc_phase_shed_table, table);
4558
4559				table->phaseMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC_PHASE_SHEDDING] =
4560					cpu_to_be32(si_pi->vddc_phase_shed_table.mask_low);
4561
4562				si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_phase_shedding_delay,
4563							   (u32)si_pi->vddc_phase_shed_table.phase_delay);
4564			} else {
4565				si_pi->vddc_phase_shed_control = false;
4566			}
4567		}
4568	}
4569
4570	return 0;
4571}
4572
4573static int si_populate_voltage_value(struct amdgpu_device *adev,
4574				     const struct atom_voltage_table *table,
4575				     u16 value, SISLANDS_SMC_VOLTAGE_VALUE *voltage)
4576{
4577	unsigned int i;
4578
4579	for (i = 0; i < table->count; i++) {
4580		if (value <= table->entries[i].value) {
4581			voltage->index = (u8)i;
4582			voltage->value = cpu_to_be16(table->entries[i].value);
4583			break;
4584		}
4585	}
4586
4587	if (i >= table->count)
4588		return -EINVAL;
4589
4590	return 0;
4591}
4592
4593static int si_populate_mvdd_value(struct amdgpu_device *adev, u32 mclk,
4594				  SISLANDS_SMC_VOLTAGE_VALUE *voltage)
4595{
4596	struct rv7xx_power_info *pi = rv770_get_pi(adev);
4597	struct si_power_info *si_pi = si_get_pi(adev);
4598
4599	if (pi->mvdd_control) {
4600		if (mclk <= pi->mvdd_split_frequency)
4601			voltage->index = 0;
4602		else
4603			voltage->index = (u8)(si_pi->mvdd_voltage_table.count) - 1;
4604
4605		voltage->value = cpu_to_be16(si_pi->mvdd_voltage_table.entries[voltage->index].value);
4606	}
4607	return 0;
4608}
4609
4610static int si_get_std_voltage_value(struct amdgpu_device *adev,
4611				    SISLANDS_SMC_VOLTAGE_VALUE *voltage,
4612				    u16 *std_voltage)
4613{
4614	u16 v_index;
4615	bool voltage_found = false;
4616	*std_voltage = be16_to_cpu(voltage->value);
4617
4618	if (adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
4619		if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_NEW_CAC_VOLTAGE) {
4620			if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL)
4621				return -EINVAL;
4622
4623			for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
4624				if (be16_to_cpu(voltage->value) ==
4625				    (u16)adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
4626					voltage_found = true;
4627					if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
4628						*std_voltage =
4629							adev->pm.dpm.dyn_state.cac_leakage_table.entries[v_index].vddc;
4630					else
4631						*std_voltage =
4632							adev->pm.dpm.dyn_state.cac_leakage_table.entries[adev->pm.dpm.dyn_state.cac_leakage_table.count-1].vddc;
4633					break;
4634				}
4635			}
4636
4637			if (!voltage_found) {
4638				for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
4639					if (be16_to_cpu(voltage->value) <=
4640					    (u16)adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
4641						voltage_found = true;
4642						if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
4643							*std_voltage =
4644								adev->pm.dpm.dyn_state.cac_leakage_table.entries[v_index].vddc;
4645						else
4646							*std_voltage =
4647								adev->pm.dpm.dyn_state.cac_leakage_table.entries[adev->pm.dpm.dyn_state.cac_leakage_table.count-1].vddc;
4648						break;
4649					}
4650				}
4651			}
4652		} else {
4653			if ((u32)voltage->index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
4654				*std_voltage = adev->pm.dpm.dyn_state.cac_leakage_table.entries[voltage->index].vddc;
4655		}
4656	}
4657
4658	return 0;
4659}
4660
4661static int si_populate_std_voltage_value(struct amdgpu_device *adev,
4662					 u16 value, u8 index,
4663					 SISLANDS_SMC_VOLTAGE_VALUE *voltage)
4664{
4665	voltage->index = index;
4666	voltage->value = cpu_to_be16(value);
4667
4668	return 0;
4669}
4670
4671static int si_populate_phase_shedding_value(struct amdgpu_device *adev,
4672					    const struct amdgpu_phase_shedding_limits_table *limits,
4673					    u16 voltage, u32 sclk, u32 mclk,
4674					    SISLANDS_SMC_VOLTAGE_VALUE *smc_voltage)
4675{
4676	unsigned int i;
4677
4678	for (i = 0; i < limits->count; i++) {
4679		if ((voltage <= limits->entries[i].voltage) &&
4680		    (sclk <= limits->entries[i].sclk) &&
4681		    (mclk <= limits->entries[i].mclk))
4682			break;
4683	}
4684
4685	smc_voltage->phase_settings = (u8)i;
4686
4687	return 0;
4688}
4689
4690static int si_init_arb_table_index(struct amdgpu_device *adev)
4691{
4692	struct si_power_info *si_pi = si_get_pi(adev);
4693	u32 tmp;
4694	int ret;
4695
4696	ret = amdgpu_si_read_smc_sram_dword(adev, si_pi->arb_table_start,
4697					    &tmp, si_pi->sram_end);
4698	if (ret)
4699		return ret;
4700
4701	tmp &= 0x00FFFFFF;
4702	tmp |= MC_CG_ARB_FREQ_F1 << 24;
4703
4704	return amdgpu_si_write_smc_sram_dword(adev, si_pi->arb_table_start,
4705					      tmp, si_pi->sram_end);
4706}
4707
4708static int si_initial_switch_from_arb_f0_to_f1(struct amdgpu_device *adev)
4709{
4710	return ni_copy_and_switch_arb_sets(adev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
4711}
4712
4713static int si_reset_to_default(struct amdgpu_device *adev)
4714{
4715	return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ?
4716		0 : -EINVAL;
4717}
4718
4719static int si_force_switch_to_arb_f0(struct amdgpu_device *adev)
4720{
4721	struct si_power_info *si_pi = si_get_pi(adev);
4722	u32 tmp;
4723	int ret;
4724
4725	ret = amdgpu_si_read_smc_sram_dword(adev, si_pi->arb_table_start,
4726					    &tmp, si_pi->sram_end);
4727	if (ret)
4728		return ret;
4729
4730	tmp = (tmp >> 24) & 0xff;
4731
4732	if (tmp == MC_CG_ARB_FREQ_F0)
4733		return 0;
4734
4735	return ni_copy_and_switch_arb_sets(adev, tmp, MC_CG_ARB_FREQ_F0);
4736}
4737
4738static u32 si_calculate_memory_refresh_rate(struct amdgpu_device *adev,
4739					    u32 engine_clock)
4740{
4741	u32 dram_rows;
4742	u32 dram_refresh_rate;
4743	u32 mc_arb_rfsh_rate;
4744	u32 tmp = (RREG32(MC_ARB_RAMCFG) & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
4745
4746	if (tmp >= 4)
4747		dram_rows = 16384;
4748	else
4749		dram_rows = 1 << (tmp + 10);
4750
4751	dram_refresh_rate = 1 << ((RREG32(MC_SEQ_MISC0) & 0x3) + 3);
4752	mc_arb_rfsh_rate = ((engine_clock * 10) * dram_refresh_rate / dram_rows - 32) / 64;
4753
4754	return mc_arb_rfsh_rate;
4755}
4756
4757static int si_populate_memory_timing_parameters(struct amdgpu_device *adev,
4758						struct rv7xx_pl *pl,
4759						SMC_SIslands_MCArbDramTimingRegisterSet *arb_regs)
4760{
4761	u32 dram_timing;
4762	u32 dram_timing2;
4763	u32 burst_time;
4764
4765	arb_regs->mc_arb_rfsh_rate =
4766		(u8)si_calculate_memory_refresh_rate(adev, pl->sclk);
4767
4768	amdgpu_atombios_set_engine_dram_timings(adev,
4769					    pl->sclk,
4770		                            pl->mclk);
4771
4772	dram_timing  = RREG32(MC_ARB_DRAM_TIMING);
4773	dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
4774	burst_time = RREG32(MC_ARB_BURST_TIME) & STATE0_MASK;
4775
4776	arb_regs->mc_arb_dram_timing  = cpu_to_be32(dram_timing);
4777	arb_regs->mc_arb_dram_timing2 = cpu_to_be32(dram_timing2);
4778	arb_regs->mc_arb_burst_time = (u8)burst_time;
4779
4780	return 0;
4781}
4782
4783static int si_do_program_memory_timing_parameters(struct amdgpu_device *adev,
4784						  struct amdgpu_ps *amdgpu_state,
4785						  unsigned int first_arb_set)
4786{
4787	struct si_power_info *si_pi = si_get_pi(adev);
4788	struct  si_ps *state = si_get_ps(amdgpu_state);
4789	SMC_SIslands_MCArbDramTimingRegisterSet arb_regs = { 0 };
4790	int i, ret = 0;
4791
4792	for (i = 0; i < state->performance_level_count; i++) {
4793		ret = si_populate_memory_timing_parameters(adev, &state->performance_levels[i], &arb_regs);
4794		if (ret)
4795			break;
4796		ret = amdgpu_si_copy_bytes_to_smc(adev,
4797						  si_pi->arb_table_start +
4798						  offsetof(SMC_SIslands_MCArbDramTimingRegisters, data) +
4799						  sizeof(SMC_SIslands_MCArbDramTimingRegisterSet) * (first_arb_set + i),
4800						  (u8 *)&arb_regs,
4801						  sizeof(SMC_SIslands_MCArbDramTimingRegisterSet),
4802						  si_pi->sram_end);
4803		if (ret)
4804			break;
4805	}
4806
4807	return ret;
4808}
4809
4810static int si_program_memory_timing_parameters(struct amdgpu_device *adev,
4811					       struct amdgpu_ps *amdgpu_new_state)
4812{
4813	return si_do_program_memory_timing_parameters(adev, amdgpu_new_state,
4814						      SISLANDS_DRIVER_STATE_ARB_INDEX);
4815}
4816
4817static int si_populate_initial_mvdd_value(struct amdgpu_device *adev,
4818					  struct SISLANDS_SMC_VOLTAGE_VALUE *voltage)
4819{
4820	struct rv7xx_power_info *pi = rv770_get_pi(adev);
4821	struct si_power_info *si_pi = si_get_pi(adev);
4822
4823	if (pi->mvdd_control)
4824		return si_populate_voltage_value(adev, &si_pi->mvdd_voltage_table,
4825						 si_pi->mvdd_bootup_value, voltage);
4826
4827	return 0;
4828}
4829
4830static int si_populate_smc_initial_state(struct amdgpu_device *adev,
4831					 struct amdgpu_ps *amdgpu_initial_state,
4832					 SISLANDS_SMC_STATETABLE *table)
4833{
4834	struct  si_ps *initial_state = si_get_ps(amdgpu_initial_state);
4835	struct rv7xx_power_info *pi = rv770_get_pi(adev);
4836	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
4837	struct si_power_info *si_pi = si_get_pi(adev);
4838	u32 reg;
4839	int ret;
4840
4841	table->initialState.levels[0].mclk.vDLL_CNTL =
4842		cpu_to_be32(si_pi->clock_registers.dll_cntl);
4843	table->initialState.levels[0].mclk.vMCLK_PWRMGT_CNTL =
4844		cpu_to_be32(si_pi->clock_registers.mclk_pwrmgt_cntl);
4845	table->initialState.levels[0].mclk.vMPLL_AD_FUNC_CNTL =
4846		cpu_to_be32(si_pi->clock_registers.mpll_ad_func_cntl);
4847	table->initialState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL =
4848		cpu_to_be32(si_pi->clock_registers.mpll_dq_func_cntl);
4849	table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL =
4850		cpu_to_be32(si_pi->clock_registers.mpll_func_cntl);
4851	table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL_1 =
4852		cpu_to_be32(si_pi->clock_registers.mpll_func_cntl_1);
4853	table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL_2 =
4854		cpu_to_be32(si_pi->clock_registers.mpll_func_cntl_2);
4855	table->initialState.levels[0].mclk.vMPLL_SS =
4856		cpu_to_be32(si_pi->clock_registers.mpll_ss1);
4857	table->initialState.levels[0].mclk.vMPLL_SS2 =
4858		cpu_to_be32(si_pi->clock_registers.mpll_ss2);
4859
4860	table->initialState.levels[0].mclk.mclk_value =
4861		cpu_to_be32(initial_state->performance_levels[0].mclk);
4862
4863	table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
4864		cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl);
4865	table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
4866		cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_2);
4867	table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
4868		cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_3);
4869	table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 =
4870		cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_4);
4871	table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM =
4872		cpu_to_be32(si_pi->clock_registers.cg_spll_spread_spectrum);
4873	table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM_2  =
4874		cpu_to_be32(si_pi->clock_registers.cg_spll_spread_spectrum_2);
4875
4876	table->initialState.levels[0].sclk.sclk_value =
4877		cpu_to_be32(initial_state->performance_levels[0].sclk);
4878
4879	table->initialState.levels[0].arbRefreshState =
4880		SISLANDS_INITIAL_STATE_ARB_INDEX;
4881
4882	table->initialState.levels[0].ACIndex = 0;
4883
4884	ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
4885					initial_state->performance_levels[0].vddc,
4886					&table->initialState.levels[0].vddc);
4887
4888	if (!ret) {
4889		u16 std_vddc;
4890
4891		ret = si_get_std_voltage_value(adev,
4892					       &table->initialState.levels[0].vddc,
4893					       &std_vddc);
4894		if (!ret)
4895			si_populate_std_voltage_value(adev, std_vddc,
4896						      table->initialState.levels[0].vddc.index,
4897						      &table->initialState.levels[0].std_vddc);
4898	}
4899
4900	if (eg_pi->vddci_control)
4901		si_populate_voltage_value(adev,
4902					  &eg_pi->vddci_voltage_table,
4903					  initial_state->performance_levels[0].vddci,
4904					  &table->initialState.levels[0].vddci);
4905
4906	if (si_pi->vddc_phase_shed_control)
4907		si_populate_phase_shedding_value(adev,
4908						 &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
4909						 initial_state->performance_levels[0].vddc,
4910						 initial_state->performance_levels[0].sclk,
4911						 initial_state->performance_levels[0].mclk,
4912						 &table->initialState.levels[0].vddc);
4913
4914	si_populate_initial_mvdd_value(adev, &table->initialState.levels[0].mvdd);
4915
4916	reg = CG_R(0xffff) | CG_L(0);
4917	table->initialState.levels[0].aT = cpu_to_be32(reg);
4918	table->initialState.levels[0].bSP = cpu_to_be32(pi->dsp);
4919	table->initialState.levels[0].gen2PCIE = (u8)si_pi->boot_pcie_gen;
4920
4921	if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
4922		table->initialState.levels[0].strobeMode =
4923			si_get_strobe_mode_settings(adev,
4924						    initial_state->performance_levels[0].mclk);
4925
4926		if (initial_state->performance_levels[0].mclk > pi->mclk_edc_enable_threshold)
4927			table->initialState.levels[0].mcFlags = SISLANDS_SMC_MC_EDC_RD_FLAG | SISLANDS_SMC_MC_EDC_WR_FLAG;
4928		else
4929			table->initialState.levels[0].mcFlags =  0;
4930	}
4931
4932	table->initialState.levelCount = 1;
4933
4934	table->initialState.flags |= PPSMC_SWSTATE_FLAG_DC;
4935
4936	table->initialState.levels[0].dpm2.MaxPS = 0;
4937	table->initialState.levels[0].dpm2.NearTDPDec = 0;
4938	table->initialState.levels[0].dpm2.AboveSafeInc = 0;
4939	table->initialState.levels[0].dpm2.BelowSafeInc = 0;
4940	table->initialState.levels[0].dpm2.PwrEfficiencyRatio = 0;
4941
4942	reg = MIN_POWER_MASK | MAX_POWER_MASK;
4943	table->initialState.levels[0].SQPowerThrottle = cpu_to_be32(reg);
4944
4945	reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
4946	table->initialState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg);
4947
4948	return 0;
4949}
4950
4951static int si_populate_smc_acpi_state(struct amdgpu_device *adev,
4952				      SISLANDS_SMC_STATETABLE *table)
4953{
4954	struct rv7xx_power_info *pi = rv770_get_pi(adev);
4955	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
4956	struct si_power_info *si_pi = si_get_pi(adev);
4957	u32 spll_func_cntl = si_pi->clock_registers.cg_spll_func_cntl;
4958	u32 spll_func_cntl_2 = si_pi->clock_registers.cg_spll_func_cntl_2;
4959	u32 spll_func_cntl_3 = si_pi->clock_registers.cg_spll_func_cntl_3;
4960	u32 spll_func_cntl_4 = si_pi->clock_registers.cg_spll_func_cntl_4;
4961	u32 dll_cntl = si_pi->clock_registers.dll_cntl;
4962	u32 mclk_pwrmgt_cntl = si_pi->clock_registers.mclk_pwrmgt_cntl;
4963	u32 mpll_ad_func_cntl = si_pi->clock_registers.mpll_ad_func_cntl;
4964	u32 mpll_dq_func_cntl = si_pi->clock_registers.mpll_dq_func_cntl;
4965	u32 mpll_func_cntl = si_pi->clock_registers.mpll_func_cntl;
4966	u32 mpll_func_cntl_1 = si_pi->clock_registers.mpll_func_cntl_1;
4967	u32 mpll_func_cntl_2 = si_pi->clock_registers.mpll_func_cntl_2;
4968	u32 reg;
4969	int ret;
4970
4971	table->ACPIState = table->initialState;
4972
4973	table->ACPIState.flags &= ~PPSMC_SWSTATE_FLAG_DC;
4974
4975	if (pi->acpi_vddc) {
4976		ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
4977						pi->acpi_vddc, &table->ACPIState.levels[0].vddc);
4978		if (!ret) {
4979			u16 std_vddc;
4980
4981			ret = si_get_std_voltage_value(adev,
4982						       &table->ACPIState.levels[0].vddc, &std_vddc);
4983			if (!ret)
4984				si_populate_std_voltage_value(adev, std_vddc,
4985							      table->ACPIState.levels[0].vddc.index,
4986							      &table->ACPIState.levels[0].std_vddc);
4987		}
4988		table->ACPIState.levels[0].gen2PCIE = si_pi->acpi_pcie_gen;
4989
4990		if (si_pi->vddc_phase_shed_control) {
4991			si_populate_phase_shedding_value(adev,
4992							 &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
4993							 pi->acpi_vddc,
4994							 0,
4995							 0,
4996							 &table->ACPIState.levels[0].vddc);
4997		}
4998	} else {
4999		ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
5000						pi->min_vddc_in_table, &table->ACPIState.levels[0].vddc);
5001		if (!ret) {
5002			u16 std_vddc;
5003
5004			ret = si_get_std_voltage_value(adev,
5005						       &table->ACPIState.levels[0].vddc, &std_vddc);
5006
5007			if (!ret)
5008				si_populate_std_voltage_value(adev, std_vddc,
5009							      table->ACPIState.levels[0].vddc.index,
5010							      &table->ACPIState.levels[0].std_vddc);
5011		}
5012		table->ACPIState.levels[0].gen2PCIE =
5013			(u8)amdgpu_get_pcie_gen_support(adev,
5014							si_pi->sys_pcie_mask,
5015							si_pi->boot_pcie_gen,
5016							AMDGPU_PCIE_GEN1);
5017
5018		if (si_pi->vddc_phase_shed_control)
5019			si_populate_phase_shedding_value(adev,
5020							 &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
5021							 pi->min_vddc_in_table,
5022							 0,
5023							 0,
5024							 &table->ACPIState.levels[0].vddc);
5025	}
5026
5027	if (pi->acpi_vddc) {
5028		if (eg_pi->acpi_vddci)
5029			si_populate_voltage_value(adev, &eg_pi->vddci_voltage_table,
5030						  eg_pi->acpi_vddci,
5031						  &table->ACPIState.levels[0].vddci);
5032	}
5033
5034	mclk_pwrmgt_cntl |= MRDCK0_RESET | MRDCK1_RESET;
5035	mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
5036
5037	dll_cntl &= ~(MRDCK0_BYPASS | MRDCK1_BYPASS);
5038
5039	spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
5040	spll_func_cntl_2 |= SCLK_MUX_SEL(4);
5041
5042	table->ACPIState.levels[0].mclk.vDLL_CNTL =
5043		cpu_to_be32(dll_cntl);
5044	table->ACPIState.levels[0].mclk.vMCLK_PWRMGT_CNTL =
5045		cpu_to_be32(mclk_pwrmgt_cntl);
5046	table->ACPIState.levels[0].mclk.vMPLL_AD_FUNC_CNTL =
5047		cpu_to_be32(mpll_ad_func_cntl);
5048	table->ACPIState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL =
5049		cpu_to_be32(mpll_dq_func_cntl);
5050	table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL =
5051		cpu_to_be32(mpll_func_cntl);
5052	table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL_1 =
5053		cpu_to_be32(mpll_func_cntl_1);
5054	table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL_2 =
5055		cpu_to_be32(mpll_func_cntl_2);
5056	table->ACPIState.levels[0].mclk.vMPLL_SS =
5057		cpu_to_be32(si_pi->clock_registers.mpll_ss1);
5058	table->ACPIState.levels[0].mclk.vMPLL_SS2 =
5059		cpu_to_be32(si_pi->clock_registers.mpll_ss2);
5060
5061	table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
5062		cpu_to_be32(spll_func_cntl);
5063	table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
5064		cpu_to_be32(spll_func_cntl_2);
5065	table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
5066		cpu_to_be32(spll_func_cntl_3);
5067	table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 =
5068		cpu_to_be32(spll_func_cntl_4);
5069
5070	table->ACPIState.levels[0].mclk.mclk_value = 0;
5071	table->ACPIState.levels[0].sclk.sclk_value = 0;
5072
5073	si_populate_mvdd_value(adev, 0, &table->ACPIState.levels[0].mvdd);
5074
5075	if (eg_pi->dynamic_ac_timing)
5076		table->ACPIState.levels[0].ACIndex = 0;
5077
5078	table->ACPIState.levels[0].dpm2.MaxPS = 0;
5079	table->ACPIState.levels[0].dpm2.NearTDPDec = 0;
5080	table->ACPIState.levels[0].dpm2.AboveSafeInc = 0;
5081	table->ACPIState.levels[0].dpm2.BelowSafeInc = 0;
5082	table->ACPIState.levels[0].dpm2.PwrEfficiencyRatio = 0;
5083
5084	reg = MIN_POWER_MASK | MAX_POWER_MASK;
5085	table->ACPIState.levels[0].SQPowerThrottle = cpu_to_be32(reg);
5086
5087	reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
5088	table->ACPIState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg);
5089
5090	return 0;
5091}
5092
5093static int si_populate_ulv_state(struct amdgpu_device *adev,
5094				 SISLANDS_SMC_SWSTATE *state)
5095{
5096	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
5097	struct si_power_info *si_pi = si_get_pi(adev);
5098	struct si_ulv_param *ulv = &si_pi->ulv;
5099	u32 sclk_in_sr = 1350; /* ??? */
5100	int ret;
5101
5102	ret = si_convert_power_level_to_smc(adev, &ulv->pl,
5103					    &state->levels[0]);
5104	if (!ret) {
5105		if (eg_pi->sclk_deep_sleep) {
5106			if (sclk_in_sr <= SCLK_MIN_DEEPSLEEP_FREQ)
5107				state->levels[0].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_BYPASS;
5108			else
5109				state->levels[0].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE;
5110		}
5111		if (ulv->one_pcie_lane_in_ulv)
5112			state->flags |= PPSMC_SWSTATE_FLAG_PCIE_X1;
5113		state->levels[0].arbRefreshState = (u8)(SISLANDS_ULV_STATE_ARB_INDEX);
5114		state->levels[0].ACIndex = 1;
5115		state->levels[0].std_vddc = state->levels[0].vddc;
5116		state->levelCount = 1;
5117
5118		state->flags |= PPSMC_SWSTATE_FLAG_DC;
5119	}
5120
5121	return ret;
5122}
5123
5124static int si_program_ulv_memory_timing_parameters(struct amdgpu_device *adev)
5125{
5126	struct si_power_info *si_pi = si_get_pi(adev);
5127	struct si_ulv_param *ulv = &si_pi->ulv;
5128	SMC_SIslands_MCArbDramTimingRegisterSet arb_regs = { 0 };
5129	int ret;
5130
5131	ret = si_populate_memory_timing_parameters(adev, &ulv->pl,
5132						   &arb_regs);
5133	if (ret)
5134		return ret;
5135
5136	si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_ulv_volt_change_delay,
5137				   ulv->volt_change_delay);
5138
5139	ret = amdgpu_si_copy_bytes_to_smc(adev,
5140					  si_pi->arb_table_start +
5141					  offsetof(SMC_SIslands_MCArbDramTimingRegisters, data) +
5142					  sizeof(SMC_SIslands_MCArbDramTimingRegisterSet) * SISLANDS_ULV_STATE_ARB_INDEX,
5143					  (u8 *)&arb_regs,
5144					  sizeof(SMC_SIslands_MCArbDramTimingRegisterSet),
5145					  si_pi->sram_end);
5146
5147	return ret;
5148}
5149
5150static void si_get_mvdd_configuration(struct amdgpu_device *adev)
5151{
5152	struct rv7xx_power_info *pi = rv770_get_pi(adev);
5153
5154	pi->mvdd_split_frequency = 30000;
5155}
5156
5157static int si_init_smc_table(struct amdgpu_device *adev)
5158{
5159	struct si_power_info *si_pi = si_get_pi(adev);
5160	struct amdgpu_ps *amdgpu_boot_state = adev->pm.dpm.boot_ps;
5161	const struct si_ulv_param *ulv = &si_pi->ulv;
5162	SISLANDS_SMC_STATETABLE  *table = &si_pi->smc_statetable;
5163	int ret;
5164	u32 lane_width;
5165	u32 vr_hot_gpio;
5166
5167	si_populate_smc_voltage_tables(adev, table);
5168
5169	switch (adev->pm.int_thermal_type) {
5170	case THERMAL_TYPE_SI:
5171	case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
5172		table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL;
5173		break;
5174	case THERMAL_TYPE_NONE:
5175		table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE;
5176		break;
5177	default:
5178		table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL;
5179		break;
5180	}
5181
5182	if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
5183		table->systemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
5184
5185	if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT) {
5186		if ((adev->pdev->device != 0x6818) && (adev->pdev->device != 0x6819))
5187			table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT;
5188	}
5189
5190	if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
5191		table->systemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
5192
5193	if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
5194		table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
5195
5196	if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REVERT_GPIO5_POLARITY)
5197		table->extraFlags |= PPSMC_EXTRAFLAGS_AC2DC_GPIO5_POLARITY_HIGH;
5198
5199	if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VRHOT_GPIO_CONFIGURABLE) {
5200		table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT_PROG_GPIO;
5201		vr_hot_gpio = adev->pm.dpm.backbias_response_time;
5202		si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_vr_hot_gpio,
5203					   vr_hot_gpio);
5204	}
5205
5206	ret = si_populate_smc_initial_state(adev, amdgpu_boot_state, table);
5207	if (ret)
5208		return ret;
5209
5210	ret = si_populate_smc_acpi_state(adev, table);
5211	if (ret)
5212		return ret;
5213
5214	table->driverState = table->initialState;
5215
5216	ret = si_do_program_memory_timing_parameters(adev, amdgpu_boot_state,
5217						     SISLANDS_INITIAL_STATE_ARB_INDEX);
5218	if (ret)
5219		return ret;
5220
5221	if (ulv->supported && ulv->pl.vddc) {
5222		ret = si_populate_ulv_state(adev, &table->ULVState);
5223		if (ret)
5224			return ret;
5225
5226		ret = si_program_ulv_memory_timing_parameters(adev);
5227		if (ret)
5228			return ret;
5229
5230		WREG32(CG_ULV_CONTROL, ulv->cg_ulv_control);
5231		WREG32(CG_ULV_PARAMETER, ulv->cg_ulv_parameter);
5232
5233		lane_width = amdgpu_get_pcie_lanes(adev);
5234		si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width, lane_width);
5235	} else {
5236		table->ULVState = table->initialState;
5237	}
5238
5239	return amdgpu_si_copy_bytes_to_smc(adev, si_pi->state_table_start,
5240					   (u8 *)table, sizeof(SISLANDS_SMC_STATETABLE),
5241					   si_pi->sram_end);
5242}
5243
5244static int si_calculate_sclk_params(struct amdgpu_device *adev,
5245				    u32 engine_clock,
5246				    SISLANDS_SMC_SCLK_VALUE *sclk)
5247{
5248	struct rv7xx_power_info *pi = rv770_get_pi(adev);
5249	struct si_power_info *si_pi = si_get_pi(adev);
5250	struct atom_clock_dividers dividers;
5251	u32 spll_func_cntl = si_pi->clock_registers.cg_spll_func_cntl;
5252	u32 spll_func_cntl_2 = si_pi->clock_registers.cg_spll_func_cntl_2;
5253	u32 spll_func_cntl_3 = si_pi->clock_registers.cg_spll_func_cntl_3;
5254	u32 spll_func_cntl_4 = si_pi->clock_registers.cg_spll_func_cntl_4;
5255	u32 cg_spll_spread_spectrum = si_pi->clock_registers.cg_spll_spread_spectrum;
5256	u32 cg_spll_spread_spectrum_2 = si_pi->clock_registers.cg_spll_spread_spectrum_2;
5257	u64 tmp;
5258	u32 reference_clock = adev->clock.spll.reference_freq;
5259	u32 reference_divider;
5260	u32 fbdiv;
5261	int ret;
5262
5263	ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
5264					     engine_clock, false, &dividers);
5265	if (ret)
5266		return ret;
5267
5268	reference_divider = 1 + dividers.ref_div;
5269
5270	tmp = (u64) engine_clock * reference_divider * dividers.post_div * 16384;
5271	do_div(tmp, reference_clock);
5272	fbdiv = (u32) tmp;
5273
5274	spll_func_cntl &= ~(SPLL_PDIV_A_MASK | SPLL_REF_DIV_MASK);
5275	spll_func_cntl |= SPLL_REF_DIV(dividers.ref_div);
5276	spll_func_cntl |= SPLL_PDIV_A(dividers.post_div);
5277
5278	spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
5279	spll_func_cntl_2 |= SCLK_MUX_SEL(2);
5280
5281	spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
5282	spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
5283	spll_func_cntl_3 |= SPLL_DITHEN;
5284
5285	if (pi->sclk_ss) {
5286		struct amdgpu_atom_ss ss;
5287		u32 vco_freq = engine_clock * dividers.post_div;
5288
5289		if (amdgpu_atombios_get_asic_ss_info(adev, &ss,
5290						     ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
5291			u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
5292			u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
5293
5294			cg_spll_spread_spectrum &= ~CLK_S_MASK;
5295			cg_spll_spread_spectrum |= CLK_S(clk_s);
5296			cg_spll_spread_spectrum |= SSEN;
5297
5298			cg_spll_spread_spectrum_2 &= ~CLK_V_MASK;
5299			cg_spll_spread_spectrum_2 |= CLK_V(clk_v);
5300		}
5301	}
5302
5303	sclk->sclk_value = engine_clock;
5304	sclk->vCG_SPLL_FUNC_CNTL = spll_func_cntl;
5305	sclk->vCG_SPLL_FUNC_CNTL_2 = spll_func_cntl_2;
5306	sclk->vCG_SPLL_FUNC_CNTL_3 = spll_func_cntl_3;
5307	sclk->vCG_SPLL_FUNC_CNTL_4 = spll_func_cntl_4;
5308	sclk->vCG_SPLL_SPREAD_SPECTRUM = cg_spll_spread_spectrum;
5309	sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cg_spll_spread_spectrum_2;
5310
5311	return 0;
5312}
5313
5314static int si_populate_sclk_value(struct amdgpu_device *adev,
5315				  u32 engine_clock,
5316				  SISLANDS_SMC_SCLK_VALUE *sclk)
5317{
5318	SISLANDS_SMC_SCLK_VALUE sclk_tmp;
5319	int ret;
5320
5321	ret = si_calculate_sclk_params(adev, engine_clock, &sclk_tmp);
5322	if (!ret) {
5323		sclk->sclk_value = cpu_to_be32(sclk_tmp.sclk_value);
5324		sclk->vCG_SPLL_FUNC_CNTL = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL);
5325		sclk->vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_2);
5326		sclk->vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_3);
5327		sclk->vCG_SPLL_FUNC_CNTL_4 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_4);
5328		sclk->vCG_SPLL_SPREAD_SPECTRUM = cpu_to_be32(sclk_tmp.vCG_SPLL_SPREAD_SPECTRUM);
5329		sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cpu_to_be32(sclk_tmp.vCG_SPLL_SPREAD_SPECTRUM_2);
5330	}
5331
5332	return ret;
5333}
5334
5335static int si_populate_mclk_value(struct amdgpu_device *adev,
5336				  u32 engine_clock,
5337				  u32 memory_clock,
5338				  SISLANDS_SMC_MCLK_VALUE *mclk,
5339				  bool strobe_mode,
5340				  bool dll_state_on)
5341{
5342	struct rv7xx_power_info *pi = rv770_get_pi(adev);
5343	struct si_power_info *si_pi = si_get_pi(adev);
5344	u32  dll_cntl = si_pi->clock_registers.dll_cntl;
5345	u32  mclk_pwrmgt_cntl = si_pi->clock_registers.mclk_pwrmgt_cntl;
5346	u32  mpll_ad_func_cntl = si_pi->clock_registers.mpll_ad_func_cntl;
5347	u32  mpll_dq_func_cntl = si_pi->clock_registers.mpll_dq_func_cntl;
5348	u32  mpll_func_cntl = si_pi->clock_registers.mpll_func_cntl;
5349	u32  mpll_func_cntl_1 = si_pi->clock_registers.mpll_func_cntl_1;
5350	u32  mpll_func_cntl_2 = si_pi->clock_registers.mpll_func_cntl_2;
5351	u32  mpll_ss1 = si_pi->clock_registers.mpll_ss1;
5352	u32  mpll_ss2 = si_pi->clock_registers.mpll_ss2;
5353	struct atom_mpll_param mpll_param;
5354	int ret;
5355
5356	ret = amdgpu_atombios_get_memory_pll_dividers(adev, memory_clock, strobe_mode, &mpll_param);
5357	if (ret)
5358		return ret;
5359
5360	mpll_func_cntl &= ~BWCTRL_MASK;
5361	mpll_func_cntl |= BWCTRL(mpll_param.bwcntl);
5362
5363	mpll_func_cntl_1 &= ~(CLKF_MASK | CLKFRAC_MASK | VCO_MODE_MASK);
5364	mpll_func_cntl_1 |= CLKF(mpll_param.clkf) |
5365		CLKFRAC(mpll_param.clkfrac) | VCO_MODE(mpll_param.vco_mode);
5366
5367	mpll_ad_func_cntl &= ~YCLK_POST_DIV_MASK;
5368	mpll_ad_func_cntl |= YCLK_POST_DIV(mpll_param.post_div);
5369
5370	if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
5371		mpll_dq_func_cntl &= ~(YCLK_SEL_MASK | YCLK_POST_DIV_MASK);
5372		mpll_dq_func_cntl |= YCLK_SEL(mpll_param.yclk_sel) |
5373			YCLK_POST_DIV(mpll_param.post_div);
5374	}
5375
5376	if (pi->mclk_ss) {
5377		struct amdgpu_atom_ss ss;
5378		u32 freq_nom;
5379		u32 tmp;
5380		u32 reference_clock = adev->clock.mpll.reference_freq;
5381
5382		if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
5383			freq_nom = memory_clock * 4;
5384		else
5385			freq_nom = memory_clock * 2;
5386
5387		tmp = freq_nom / reference_clock;
5388		tmp = tmp * tmp;
5389		if (amdgpu_atombios_get_asic_ss_info(adev, &ss,
5390		                                     ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
5391			u32 clks = reference_clock * 5 / ss.rate;
5392			u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
5393
5394		        mpll_ss1 &= ~CLKV_MASK;
5395		        mpll_ss1 |= CLKV(clkv);
5396
5397		        mpll_ss2 &= ~CLKS_MASK;
5398		        mpll_ss2 |= CLKS(clks);
5399		}
5400	}
5401
5402	mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK;
5403	mclk_pwrmgt_cntl |= DLL_SPEED(mpll_param.dll_speed);
5404
5405	if (dll_state_on)
5406		mclk_pwrmgt_cntl |= MRDCK0_PDNB | MRDCK1_PDNB;
5407	else
5408		mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
5409
5410	mclk->mclk_value = cpu_to_be32(memory_clock);
5411	mclk->vMPLL_FUNC_CNTL = cpu_to_be32(mpll_func_cntl);
5412	mclk->vMPLL_FUNC_CNTL_1 = cpu_to_be32(mpll_func_cntl_1);
5413	mclk->vMPLL_FUNC_CNTL_2 = cpu_to_be32(mpll_func_cntl_2);
5414	mclk->vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl);
5415	mclk->vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl);
5416	mclk->vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl);
5417	mclk->vDLL_CNTL = cpu_to_be32(dll_cntl);
5418	mclk->vMPLL_SS = cpu_to_be32(mpll_ss1);
5419	mclk->vMPLL_SS2 = cpu_to_be32(mpll_ss2);
5420
5421	return 0;
5422}
5423
5424static void si_populate_smc_sp(struct amdgpu_device *adev,
5425			       struct amdgpu_ps *amdgpu_state,
5426			       SISLANDS_SMC_SWSTATE *smc_state)
5427{
5428	struct  si_ps *ps = si_get_ps(amdgpu_state);
5429	struct rv7xx_power_info *pi = rv770_get_pi(adev);
5430	int i;
5431
5432	for (i = 0; i < ps->performance_level_count - 1; i++)
5433		smc_state->levels[i].bSP = cpu_to_be32(pi->dsp);
5434
5435	smc_state->levels[ps->performance_level_count - 1].bSP =
5436		cpu_to_be32(pi->psp);
5437}
5438
5439static int si_convert_power_level_to_smc(struct amdgpu_device *adev,
5440					 struct rv7xx_pl *pl,
5441					 SISLANDS_SMC_HW_PERFORMANCE_LEVEL *level)
5442{
5443	struct rv7xx_power_info *pi = rv770_get_pi(adev);
5444	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
5445	struct si_power_info *si_pi = si_get_pi(adev);
5446	int ret;
5447	bool dll_state_on;
5448	u16 std_vddc;
5449	bool gmc_pg = false;
5450
5451	if (eg_pi->pcie_performance_request &&
5452	    (si_pi->force_pcie_gen != AMDGPU_PCIE_GEN_INVALID))
5453		level->gen2PCIE = (u8)si_pi->force_pcie_gen;
5454	else
5455		level->gen2PCIE = (u8)pl->pcie_gen;
5456
5457	ret = si_populate_sclk_value(adev, pl->sclk, &level->sclk);
5458	if (ret)
5459		return ret;
5460
5461	level->mcFlags =  0;
5462
5463	if (pi->mclk_stutter_mode_threshold &&
5464	    (pl->mclk <= pi->mclk_stutter_mode_threshold) &&
5465	    !eg_pi->uvd_enabled &&
5466	    (RREG32(DPG_PIPE_STUTTER_CONTROL) & STUTTER_ENABLE) &&
5467	    (adev->pm.dpm.new_active_crtc_count <= 2)) {
5468		level->mcFlags |= SISLANDS_SMC_MC_STUTTER_EN;
5469
5470		if (gmc_pg)
5471			level->mcFlags |= SISLANDS_SMC_MC_PG_EN;
5472	}
5473
5474	if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
5475		if (pl->mclk > pi->mclk_edc_enable_threshold)
5476			level->mcFlags |= SISLANDS_SMC_MC_EDC_RD_FLAG;
5477
5478		if (pl->mclk > eg_pi->mclk_edc_wr_enable_threshold)
5479			level->mcFlags |= SISLANDS_SMC_MC_EDC_WR_FLAG;
5480
5481		level->strobeMode = si_get_strobe_mode_settings(adev, pl->mclk);
5482
5483		if (level->strobeMode & SISLANDS_SMC_STROBE_ENABLE) {
5484			if (si_get_mclk_frequency_ratio(pl->mclk, true) >=
5485			    ((RREG32(MC_SEQ_MISC7) >> 16) & 0xf))
5486				dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
5487			else
5488				dll_state_on = ((RREG32(MC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
5489		} else {
5490			dll_state_on = false;
5491		}
5492	} else {
5493		level->strobeMode = si_get_strobe_mode_settings(adev,
5494								pl->mclk);
5495
5496		dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
5497	}
5498
5499	ret = si_populate_mclk_value(adev,
5500				     pl->sclk,
5501				     pl->mclk,
5502				     &level->mclk,
5503				     (level->strobeMode & SISLANDS_SMC_STROBE_ENABLE) != 0, dll_state_on);
5504	if (ret)
5505		return ret;
5506
5507	ret = si_populate_voltage_value(adev,
5508					&eg_pi->vddc_voltage_table,
5509					pl->vddc, &level->vddc);
5510	if (ret)
5511		return ret;
5512
5513
5514	ret = si_get_std_voltage_value(adev, &level->vddc, &std_vddc);
5515	if (ret)
5516		return ret;
5517
5518	ret = si_populate_std_voltage_value(adev, std_vddc,
5519					    level->vddc.index, &level->std_vddc);
5520	if (ret)
5521		return ret;
5522
5523	if (eg_pi->vddci_control) {
5524		ret = si_populate_voltage_value(adev, &eg_pi->vddci_voltage_table,
5525						pl->vddci, &level->vddci);
5526		if (ret)
5527			return ret;
5528	}
5529
5530	if (si_pi->vddc_phase_shed_control) {
5531		ret = si_populate_phase_shedding_value(adev,
5532						       &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
5533						       pl->vddc,
5534						       pl->sclk,
5535						       pl->mclk,
5536						       &level->vddc);
5537		if (ret)
5538			return ret;
5539	}
5540
5541	level->MaxPoweredUpCU = si_pi->max_cu;
5542
5543	ret = si_populate_mvdd_value(adev, pl->mclk, &level->mvdd);
5544
5545	return ret;
5546}
5547
5548static int si_populate_smc_t(struct amdgpu_device *adev,
5549			     struct amdgpu_ps *amdgpu_state,
5550			     SISLANDS_SMC_SWSTATE *smc_state)
5551{
5552	struct rv7xx_power_info *pi = rv770_get_pi(adev);
5553	struct  si_ps *state = si_get_ps(amdgpu_state);
5554	u32 a_t;
5555	u32 t_l, t_h;
5556	u32 high_bsp;
5557	int i, ret;
5558
5559	if (state->performance_level_count >= 9)
5560		return -EINVAL;
5561
5562	if (state->performance_level_count < 2) {
5563		a_t = CG_R(0xffff) | CG_L(0);
5564		smc_state->levels[0].aT = cpu_to_be32(a_t);
5565		return 0;
5566	}
5567
5568	smc_state->levels[0].aT = cpu_to_be32(0);
5569
5570	for (i = 0; i <= state->performance_level_count - 2; i++) {
5571		ret = r600_calculate_at(
5572			(50 / SISLANDS_MAX_HARDWARE_POWERLEVELS) * 100 * (i + 1),
5573			100 * R600_AH_DFLT,
5574			state->performance_levels[i + 1].sclk,
5575			state->performance_levels[i].sclk,
5576			&t_l,
5577			&t_h);
5578
5579		if (ret) {
5580			t_h = (i + 1) * 1000 - 50 * R600_AH_DFLT;
5581			t_l = (i + 1) * 1000 + 50 * R600_AH_DFLT;
5582		}
5583
5584		a_t = be32_to_cpu(smc_state->levels[i].aT) & ~CG_R_MASK;
5585		a_t |= CG_R(t_l * pi->bsp / 20000);
5586		smc_state->levels[i].aT = cpu_to_be32(a_t);
5587
5588		high_bsp = (i == state->performance_level_count - 2) ?
5589			pi->pbsp : pi->bsp;
5590		a_t = CG_R(0xffff) | CG_L(t_h * high_bsp / 20000);
5591		smc_state->levels[i + 1].aT = cpu_to_be32(a_t);
5592	}
5593
5594	return 0;
5595}
5596
5597static int si_disable_ulv(struct amdgpu_device *adev)
5598{
5599	struct si_power_info *si_pi = si_get_pi(adev);
5600	struct si_ulv_param *ulv = &si_pi->ulv;
5601
5602	if (ulv->supported)
5603		return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
5604			0 : -EINVAL;
5605
5606	return 0;
5607}
5608
5609static bool si_is_state_ulv_compatible(struct amdgpu_device *adev,
5610				       struct amdgpu_ps *amdgpu_state)
5611{
5612	const struct si_power_info *si_pi = si_get_pi(adev);
5613	const struct si_ulv_param *ulv = &si_pi->ulv;
5614	const struct  si_ps *state = si_get_ps(amdgpu_state);
5615	int i;
5616
5617	if (state->performance_levels[0].mclk != ulv->pl.mclk)
5618		return false;
5619
5620	/* XXX validate against display requirements! */
5621
5622	for (i = 0; i < adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count; i++) {
5623		if (adev->clock.current_dispclk <=
5624		    adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[i].clk) {
5625			if (ulv->pl.vddc <
5626			    adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[i].v)
5627				return false;
5628		}
5629	}
5630
5631	if ((amdgpu_state->vclk != 0) || (amdgpu_state->dclk != 0))
5632		return false;
5633
5634	return true;
5635}
5636
5637static int si_set_power_state_conditionally_enable_ulv(struct amdgpu_device *adev,
5638						       struct amdgpu_ps *amdgpu_new_state)
5639{
5640	const struct si_power_info *si_pi = si_get_pi(adev);
5641	const struct si_ulv_param *ulv = &si_pi->ulv;
5642
5643	if (ulv->supported) {
5644		if (si_is_state_ulv_compatible(adev, amdgpu_new_state))
5645			return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ?
5646				0 : -EINVAL;
5647	}
5648	return 0;
5649}
5650
5651static int si_convert_power_state_to_smc(struct amdgpu_device *adev,
5652					 struct amdgpu_ps *amdgpu_state,
5653					 SISLANDS_SMC_SWSTATE *smc_state)
5654{
5655	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
5656	struct ni_power_info *ni_pi = ni_get_pi(adev);
5657	struct si_power_info *si_pi = si_get_pi(adev);
5658	struct  si_ps *state = si_get_ps(amdgpu_state);
5659	int i, ret;
5660	u32 threshold;
5661	u32 sclk_in_sr = 1350; /* ??? */
5662
5663	if (state->performance_level_count > SISLANDS_MAX_HARDWARE_POWERLEVELS)
5664		return -EINVAL;
5665
5666	threshold = state->performance_levels[state->performance_level_count-1].sclk * 100 / 100;
5667
5668	if (amdgpu_state->vclk && amdgpu_state->dclk) {
5669		eg_pi->uvd_enabled = true;
5670		if (eg_pi->smu_uvd_hs)
5671			smc_state->flags |= PPSMC_SWSTATE_FLAG_UVD;
5672	} else {
5673		eg_pi->uvd_enabled = false;
5674	}
5675
5676	if (state->dc_compatible)
5677		smc_state->flags |= PPSMC_SWSTATE_FLAG_DC;
5678
5679	smc_state->levelCount = 0;
5680	for (i = 0; i < state->performance_level_count; i++) {
5681		if (eg_pi->sclk_deep_sleep) {
5682			if ((i == 0) || si_pi->sclk_deep_sleep_above_low) {
5683				if (sclk_in_sr <= SCLK_MIN_DEEPSLEEP_FREQ)
5684					smc_state->levels[i].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_BYPASS;
5685				else
5686					smc_state->levels[i].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE;
5687			}
5688		}
5689
5690		ret = si_convert_power_level_to_smc(adev, &state->performance_levels[i],
5691						    &smc_state->levels[i]);
5692		smc_state->levels[i].arbRefreshState =
5693			(u8)(SISLANDS_DRIVER_STATE_ARB_INDEX + i);
5694
5695		if (ret)
5696			return ret;
5697
5698		if (ni_pi->enable_power_containment)
5699			smc_state->levels[i].displayWatermark =
5700				(state->performance_levels[i].sclk < threshold) ?
5701				PPSMC_DISPLAY_WATERMARK_LOW : PPSMC_DISPLAY_WATERMARK_HIGH;
5702		else
5703			smc_state->levels[i].displayWatermark = (i < 2) ?
5704				PPSMC_DISPLAY_WATERMARK_LOW : PPSMC_DISPLAY_WATERMARK_HIGH;
5705
5706		if (eg_pi->dynamic_ac_timing)
5707			smc_state->levels[i].ACIndex = SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT + i;
5708		else
5709			smc_state->levels[i].ACIndex = 0;
5710
5711		smc_state->levelCount++;
5712	}
5713
5714	si_write_smc_soft_register(adev,
5715				   SI_SMC_SOFT_REGISTER_watermark_threshold,
5716				   threshold / 512);
5717
5718	si_populate_smc_sp(adev, amdgpu_state, smc_state);
5719
5720	ret = si_populate_power_containment_values(adev, amdgpu_state, smc_state);
5721	if (ret)
5722		ni_pi->enable_power_containment = false;
5723
5724	ret = si_populate_sq_ramping_values(adev, amdgpu_state, smc_state);
5725	if (ret)
5726		ni_pi->enable_sq_ramping = false;
5727
5728	return si_populate_smc_t(adev, amdgpu_state, smc_state);
5729}
5730
5731static int si_upload_sw_state(struct amdgpu_device *adev,
5732			      struct amdgpu_ps *amdgpu_new_state)
5733{
5734	struct si_power_info *si_pi = si_get_pi(adev);
5735	struct  si_ps *new_state = si_get_ps(amdgpu_new_state);
5736	int ret;
5737	u32 address = si_pi->state_table_start +
5738		offsetof(SISLANDS_SMC_STATETABLE, driverState);
5739	u32 state_size = sizeof(SISLANDS_SMC_SWSTATE) +
5740		((new_state->performance_level_count - 1) *
5741		 sizeof(SISLANDS_SMC_HW_PERFORMANCE_LEVEL));
5742	SISLANDS_SMC_SWSTATE *smc_state = &si_pi->smc_statetable.driverState;
5743
5744	memset(smc_state, 0, state_size);
5745
5746	ret = si_convert_power_state_to_smc(adev, amdgpu_new_state, smc_state);
5747	if (ret)
5748		return ret;
5749
5750	return amdgpu_si_copy_bytes_to_smc(adev, address, (u8 *)smc_state,
5751					   state_size, si_pi->sram_end);
5752}
5753
5754static int si_upload_ulv_state(struct amdgpu_device *adev)
5755{
5756	struct si_power_info *si_pi = si_get_pi(adev);
5757	struct si_ulv_param *ulv = &si_pi->ulv;
5758	int ret = 0;
5759
5760	if (ulv->supported && ulv->pl.vddc) {
5761		u32 address = si_pi->state_table_start +
5762			offsetof(SISLANDS_SMC_STATETABLE, ULVState);
5763		SISLANDS_SMC_SWSTATE *smc_state = &si_pi->smc_statetable.ULVState;
5764		u32 state_size = sizeof(SISLANDS_SMC_SWSTATE);
5765
5766		memset(smc_state, 0, state_size);
5767
5768		ret = si_populate_ulv_state(adev, smc_state);
5769		if (!ret)
5770			ret = amdgpu_si_copy_bytes_to_smc(adev, address, (u8 *)smc_state,
5771							  state_size, si_pi->sram_end);
5772	}
5773
5774	return ret;
5775}
5776
5777static int si_upload_smc_data(struct amdgpu_device *adev)
5778{
5779	struct amdgpu_crtc *amdgpu_crtc = NULL;
5780	int i;
5781
5782	if (adev->pm.dpm.new_active_crtc_count == 0)
5783		return 0;
5784
5785	for (i = 0; i < adev->mode_info.num_crtc; i++) {
5786		if (adev->pm.dpm.new_active_crtcs & (1 << i)) {
5787			amdgpu_crtc = adev->mode_info.crtcs[i];
5788			break;
5789		}
5790	}
5791
5792	if (amdgpu_crtc == NULL)
5793		return 0;
5794
5795	if (amdgpu_crtc->line_time <= 0)
5796		return 0;
5797
5798	if (si_write_smc_soft_register(adev,
5799				       SI_SMC_SOFT_REGISTER_crtc_index,
5800				       amdgpu_crtc->crtc_id) != PPSMC_Result_OK)
5801		return 0;
5802
5803	if (si_write_smc_soft_register(adev,
5804				       SI_SMC_SOFT_REGISTER_mclk_change_block_cp_min,
5805				       amdgpu_crtc->wm_high / amdgpu_crtc->line_time) != PPSMC_Result_OK)
5806		return 0;
5807
5808	if (si_write_smc_soft_register(adev,
5809				       SI_SMC_SOFT_REGISTER_mclk_change_block_cp_max,
5810				       amdgpu_crtc->wm_low / amdgpu_crtc->line_time) != PPSMC_Result_OK)
5811		return 0;
5812
5813	return 0;
5814}
5815
5816static int si_set_mc_special_registers(struct amdgpu_device *adev,
5817				       struct si_mc_reg_table *table)
5818{
5819	u8 i, j, k;
5820	u32 temp_reg;
5821
5822	for (i = 0, j = table->last; i < table->last; i++) {
5823		if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
5824			return -EINVAL;
5825		switch (table->mc_reg_address[i].s1) {
5826		case MC_SEQ_MISC1:
5827			temp_reg = RREG32(MC_PMG_CMD_EMRS);
5828			table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS;
5829			table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP;
5830			for (k = 0; k < table->num_entries; k++)
5831				table->mc_reg_table_entry[k].mc_data[j] =
5832					((temp_reg & 0xffff0000)) |
5833					((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
5834			j++;
5835
5836			if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
5837				return -EINVAL;
5838			temp_reg = RREG32(MC_PMG_CMD_MRS);
5839			table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS;
5840			table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP;
5841			for (k = 0; k < table->num_entries; k++) {
5842				table->mc_reg_table_entry[k].mc_data[j] =
5843					(temp_reg & 0xffff0000) |
5844					(table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
5845				if (adev->gmc.vram_type != AMDGPU_VRAM_TYPE_GDDR5)
5846					table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
5847			}
5848			j++;
5849
5850			if (adev->gmc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) {
5851				if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
5852					return -EINVAL;
5853				table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD;
5854				table->mc_reg_address[j].s0 = MC_PMG_AUTO_CMD;
5855				for (k = 0; k < table->num_entries; k++)
5856					table->mc_reg_table_entry[k].mc_data[j] =
5857						(table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
5858				j++;
5859			}
5860			break;
5861		case MC_SEQ_RESERVE_M:
5862			temp_reg = RREG32(MC_PMG_CMD_MRS1);
5863			table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1;
5864			table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP;
5865			for(k = 0; k < table->num_entries; k++)
5866				table->mc_reg_table_entry[k].mc_data[j] =
5867					(temp_reg & 0xffff0000) |
5868					(table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
5869			j++;
5870			break;
5871		default:
5872			break;
5873		}
5874	}
5875
5876	table->last = j;
5877
5878	return 0;
5879}
5880
5881static bool si_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
5882{
5883	bool result = true;
5884	switch (in_reg) {
5885	case  MC_SEQ_RAS_TIMING:
5886		*out_reg = MC_SEQ_RAS_TIMING_LP;
5887		break;
5888	case MC_SEQ_CAS_TIMING:
5889		*out_reg = MC_SEQ_CAS_TIMING_LP;
5890		break;
5891	case MC_SEQ_MISC_TIMING:
5892		*out_reg = MC_SEQ_MISC_TIMING_LP;
5893		break;
5894	case MC_SEQ_MISC_TIMING2:
5895		*out_reg = MC_SEQ_MISC_TIMING2_LP;
5896		break;
5897	case MC_SEQ_RD_CTL_D0:
5898		*out_reg = MC_SEQ_RD_CTL_D0_LP;
5899		break;
5900	case MC_SEQ_RD_CTL_D1:
5901		*out_reg = MC_SEQ_RD_CTL_D1_LP;
5902		break;
5903	case MC_SEQ_WR_CTL_D0:
5904		*out_reg = MC_SEQ_WR_CTL_D0_LP;
5905		break;
5906	case MC_SEQ_WR_CTL_D1:
5907		*out_reg = MC_SEQ_WR_CTL_D1_LP;
5908		break;
5909	case MC_PMG_CMD_EMRS:
5910		*out_reg = MC_SEQ_PMG_CMD_EMRS_LP;
5911		break;
5912	case MC_PMG_CMD_MRS:
5913		*out_reg = MC_SEQ_PMG_CMD_MRS_LP;
5914		break;
5915	case MC_PMG_CMD_MRS1:
5916		*out_reg = MC_SEQ_PMG_CMD_MRS1_LP;
5917		break;
5918	case MC_SEQ_PMG_TIMING:
5919		*out_reg = MC_SEQ_PMG_TIMING_LP;
5920		break;
5921	case MC_PMG_CMD_MRS2:
5922		*out_reg = MC_SEQ_PMG_CMD_MRS2_LP;
5923		break;
5924	case MC_SEQ_WR_CTL_2:
5925		*out_reg = MC_SEQ_WR_CTL_2_LP;
5926		break;
5927	default:
5928		result = false;
5929		break;
5930	}
5931
5932	return result;
5933}
5934
5935static void si_set_valid_flag(struct si_mc_reg_table *table)
5936{
5937	u8 i, j;
5938
5939	for (i = 0; i < table->last; i++) {
5940		for (j = 1; j < table->num_entries; j++) {
5941			if (table->mc_reg_table_entry[j-1].mc_data[i] != table->mc_reg_table_entry[j].mc_data[i]) {
5942				table->valid_flag |= 1 << i;
5943				break;
5944			}
5945		}
5946	}
5947}
5948
5949static void si_set_s0_mc_reg_index(struct si_mc_reg_table *table)
5950{
5951	u32 i;
5952	u16 address;
5953
5954	for (i = 0; i < table->last; i++)
5955		table->mc_reg_address[i].s0 = si_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
5956			address : table->mc_reg_address[i].s1;
5957
5958}
5959
5960static int si_copy_vbios_mc_reg_table(struct atom_mc_reg_table *table,
5961				      struct si_mc_reg_table *si_table)
5962{
5963	u8 i, j;
5964
5965	if (table->last > SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
5966		return -EINVAL;
5967	if (table->num_entries > MAX_AC_TIMING_ENTRIES)
5968		return -EINVAL;
5969
5970	for (i = 0; i < table->last; i++)
5971		si_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
5972	si_table->last = table->last;
5973
5974	for (i = 0; i < table->num_entries; i++) {
5975		si_table->mc_reg_table_entry[i].mclk_max =
5976			table->mc_reg_table_entry[i].mclk_max;
5977		for (j = 0; j < table->last; j++) {
5978			si_table->mc_reg_table_entry[i].mc_data[j] =
5979				table->mc_reg_table_entry[i].mc_data[j];
5980		}
5981	}
5982	si_table->num_entries = table->num_entries;
5983
5984	return 0;
5985}
5986
5987static int si_initialize_mc_reg_table(struct amdgpu_device *adev)
5988{
5989	struct si_power_info *si_pi = si_get_pi(adev);
5990	struct atom_mc_reg_table *table;
5991	struct si_mc_reg_table *si_table = &si_pi->mc_reg_table;
5992	u8 module_index = rv770_get_memory_module_index(adev);
5993	int ret;
5994
5995	table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
5996	if (!table)
5997		return -ENOMEM;
5998
5999	WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING));
6000	WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING));
6001	WREG32(MC_SEQ_MISC_TIMING_LP, RREG32(MC_SEQ_MISC_TIMING));
6002	WREG32(MC_SEQ_MISC_TIMING2_LP, RREG32(MC_SEQ_MISC_TIMING2));
6003	WREG32(MC_SEQ_PMG_CMD_EMRS_LP, RREG32(MC_PMG_CMD_EMRS));
6004	WREG32(MC_SEQ_PMG_CMD_MRS_LP, RREG32(MC_PMG_CMD_MRS));
6005	WREG32(MC_SEQ_PMG_CMD_MRS1_LP, RREG32(MC_PMG_CMD_MRS1));
6006	WREG32(MC_SEQ_WR_CTL_D0_LP, RREG32(MC_SEQ_WR_CTL_D0));
6007	WREG32(MC_SEQ_WR_CTL_D1_LP, RREG32(MC_SEQ_WR_CTL_D1));
6008	WREG32(MC_SEQ_RD_CTL_D0_LP, RREG32(MC_SEQ_RD_CTL_D0));
6009	WREG32(MC_SEQ_RD_CTL_D1_LP, RREG32(MC_SEQ_RD_CTL_D1));
6010	WREG32(MC_SEQ_PMG_TIMING_LP, RREG32(MC_SEQ_PMG_TIMING));
6011	WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2));
6012	WREG32(MC_SEQ_WR_CTL_2_LP, RREG32(MC_SEQ_WR_CTL_2));
6013
6014	ret = amdgpu_atombios_init_mc_reg_table(adev, module_index, table);
6015	if (ret)
6016		goto init_mc_done;
6017
6018	ret = si_copy_vbios_mc_reg_table(table, si_table);
6019	if (ret)
6020		goto init_mc_done;
6021
6022	si_set_s0_mc_reg_index(si_table);
6023
6024	ret = si_set_mc_special_registers(adev, si_table);
6025	if (ret)
6026		goto init_mc_done;
6027
6028	si_set_valid_flag(si_table);
6029
6030init_mc_done:
6031	kfree(table);
6032
6033	return ret;
6034
6035}
6036
6037static void si_populate_mc_reg_addresses(struct amdgpu_device *adev,
6038					 SMC_SIslands_MCRegisters *mc_reg_table)
6039{
6040	struct si_power_info *si_pi = si_get_pi(adev);
6041	u32 i, j;
6042
6043	for (i = 0, j = 0; j < si_pi->mc_reg_table.last; j++) {
6044		if (si_pi->mc_reg_table.valid_flag & (1 << j)) {
6045			if (i >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
6046				break;
6047			mc_reg_table->address[i].s0 =
6048				cpu_to_be16(si_pi->mc_reg_table.mc_reg_address[j].s0);
6049			mc_reg_table->address[i].s1 =
6050				cpu_to_be16(si_pi->mc_reg_table.mc_reg_address[j].s1);
6051			i++;
6052		}
6053	}
6054	mc_reg_table->last = (u8)i;
6055}
6056
6057static void si_convert_mc_registers(const struct si_mc_reg_entry *entry,
6058				    SMC_SIslands_MCRegisterSet *data,
6059				    u32 num_entries, u32 valid_flag)
6060{
6061	u32 i, j;
6062
6063	for(i = 0, j = 0; j < num_entries; j++) {
6064		if (valid_flag & (1 << j)) {
6065			data->value[i] = cpu_to_be32(entry->mc_data[j]);
6066			i++;
6067		}
6068	}
6069}
6070
6071static void si_convert_mc_reg_table_entry_to_smc(struct amdgpu_device *adev,
6072						 struct rv7xx_pl *pl,
6073						 SMC_SIslands_MCRegisterSet *mc_reg_table_data)
6074{
6075	struct si_power_info *si_pi = si_get_pi(adev);
6076	u32 i = 0;
6077
6078	for (i = 0; i < si_pi->mc_reg_table.num_entries; i++) {
6079		if (pl->mclk <= si_pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
6080			break;
6081	}
6082
6083	if ((i == si_pi->mc_reg_table.num_entries) && (i > 0))
6084		--i;
6085
6086	si_convert_mc_registers(&si_pi->mc_reg_table.mc_reg_table_entry[i],
6087				mc_reg_table_data, si_pi->mc_reg_table.last,
6088				si_pi->mc_reg_table.valid_flag);
6089}
6090
6091static void si_convert_mc_reg_table_to_smc(struct amdgpu_device *adev,
6092					   struct amdgpu_ps *amdgpu_state,
6093					   SMC_SIslands_MCRegisters *mc_reg_table)
6094{
6095	struct si_ps *state = si_get_ps(amdgpu_state);
6096	int i;
6097
6098	for (i = 0; i < state->performance_level_count; i++) {
6099		si_convert_mc_reg_table_entry_to_smc(adev,
6100						     &state->performance_levels[i],
6101						     &mc_reg_table->data[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT + i]);
6102	}
6103}
6104
6105static int si_populate_mc_reg_table(struct amdgpu_device *adev,
6106				    struct amdgpu_ps *amdgpu_boot_state)
6107{
6108	struct  si_ps *boot_state = si_get_ps(amdgpu_boot_state);
6109	struct si_power_info *si_pi = si_get_pi(adev);
6110	struct si_ulv_param *ulv = &si_pi->ulv;
6111	SMC_SIslands_MCRegisters *smc_mc_reg_table = &si_pi->smc_mc_reg_table;
6112
6113	memset(smc_mc_reg_table, 0, sizeof(SMC_SIslands_MCRegisters));
6114
6115	si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_seq_index, 1);
6116
6117	si_populate_mc_reg_addresses(adev, smc_mc_reg_table);
6118
6119	si_convert_mc_reg_table_entry_to_smc(adev, &boot_state->performance_levels[0],
6120					     &smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_INITIAL_SLOT]);
6121
6122	si_convert_mc_registers(&si_pi->mc_reg_table.mc_reg_table_entry[0],
6123				&smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_ACPI_SLOT],
6124				si_pi->mc_reg_table.last,
6125				si_pi->mc_reg_table.valid_flag);
6126
6127	if (ulv->supported && ulv->pl.vddc != 0)
6128		si_convert_mc_reg_table_entry_to_smc(adev, &ulv->pl,
6129						     &smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_ULV_SLOT]);
6130	else
6131		si_convert_mc_registers(&si_pi->mc_reg_table.mc_reg_table_entry[0],
6132					&smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_ULV_SLOT],
6133					si_pi->mc_reg_table.last,
6134					si_pi->mc_reg_table.valid_flag);
6135
6136	si_convert_mc_reg_table_to_smc(adev, amdgpu_boot_state, smc_mc_reg_table);
6137
6138	return amdgpu_si_copy_bytes_to_smc(adev, si_pi->mc_reg_table_start,
6139					   (u8 *)smc_mc_reg_table,
6140					   sizeof(SMC_SIslands_MCRegisters), si_pi->sram_end);
6141}
6142
6143static int si_upload_mc_reg_table(struct amdgpu_device *adev,
6144				  struct amdgpu_ps *amdgpu_new_state)
6145{
6146	struct si_ps *new_state = si_get_ps(amdgpu_new_state);
6147	struct si_power_info *si_pi = si_get_pi(adev);
6148	u32 address = si_pi->mc_reg_table_start +
6149		offsetof(SMC_SIslands_MCRegisters,
6150			 data[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT]);
6151	SMC_SIslands_MCRegisters *smc_mc_reg_table = &si_pi->smc_mc_reg_table;
6152
6153	memset(smc_mc_reg_table, 0, sizeof(SMC_SIslands_MCRegisters));
6154
6155	si_convert_mc_reg_table_to_smc(adev, amdgpu_new_state, smc_mc_reg_table);
6156
6157	return amdgpu_si_copy_bytes_to_smc(adev, address,
6158					   (u8 *)&smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT],
6159					   sizeof(SMC_SIslands_MCRegisterSet) * new_state->performance_level_count,
6160					   si_pi->sram_end);
6161}
6162
6163static void si_enable_voltage_control(struct amdgpu_device *adev, bool enable)
6164{
6165	if (enable)
6166		WREG32_P(GENERAL_PWRMGT, VOLT_PWRMGT_EN, ~VOLT_PWRMGT_EN);
6167	else
6168		WREG32_P(GENERAL_PWRMGT, 0, ~VOLT_PWRMGT_EN);
6169}
6170
6171static enum amdgpu_pcie_gen si_get_maximum_link_speed(struct amdgpu_device *adev,
6172						      struct amdgpu_ps *amdgpu_state)
6173{
6174	struct si_ps *state = si_get_ps(amdgpu_state);
6175	int i;
6176	u16 pcie_speed, max_speed = 0;
6177
6178	for (i = 0; i < state->performance_level_count; i++) {
6179		pcie_speed = state->performance_levels[i].pcie_gen;
6180		if (max_speed < pcie_speed)
6181			max_speed = pcie_speed;
6182	}
6183	return max_speed;
6184}
6185
6186static u16 si_get_current_pcie_speed(struct amdgpu_device *adev)
6187{
6188	u32 speed_cntl;
6189
6190	speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & LC_CURRENT_DATA_RATE_MASK;
6191	speed_cntl >>= LC_CURRENT_DATA_RATE_SHIFT;
6192
6193	return (u16)speed_cntl;
6194}
6195
6196static void si_request_link_speed_change_before_state_change(struct amdgpu_device *adev,
6197							     struct amdgpu_ps *amdgpu_new_state,
6198							     struct amdgpu_ps *amdgpu_current_state)
6199{
6200	struct si_power_info *si_pi = si_get_pi(adev);
6201	enum amdgpu_pcie_gen target_link_speed = si_get_maximum_link_speed(adev, amdgpu_new_state);
6202	enum amdgpu_pcie_gen current_link_speed;
6203
6204	if (si_pi->force_pcie_gen == AMDGPU_PCIE_GEN_INVALID)
6205		current_link_speed = si_get_maximum_link_speed(adev, amdgpu_current_state);
6206	else
6207		current_link_speed = si_pi->force_pcie_gen;
6208
6209	si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
6210	si_pi->pspp_notify_required = false;
6211	if (target_link_speed > current_link_speed) {
6212		switch (target_link_speed) {
6213#if defined(CONFIG_ACPI)
6214		case AMDGPU_PCIE_GEN3:
6215			if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
6216				break;
6217			si_pi->force_pcie_gen = AMDGPU_PCIE_GEN2;
6218			if (current_link_speed == AMDGPU_PCIE_GEN2)
6219				break;
6220			/* fall through */
6221		case AMDGPU_PCIE_GEN2:
6222			if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
6223				break;
6224#endif
6225			/* fall through */
6226		default:
6227			si_pi->force_pcie_gen = si_get_current_pcie_speed(adev);
6228			break;
6229		}
6230	} else {
6231		if (target_link_speed < current_link_speed)
6232			si_pi->pspp_notify_required = true;
6233	}
6234}
6235
6236static void si_notify_link_speed_change_after_state_change(struct amdgpu_device *adev,
6237							   struct amdgpu_ps *amdgpu_new_state,
6238							   struct amdgpu_ps *amdgpu_current_state)
6239{
6240	struct si_power_info *si_pi = si_get_pi(adev);
6241	enum amdgpu_pcie_gen target_link_speed = si_get_maximum_link_speed(adev, amdgpu_new_state);
6242	u8 request;
6243
6244	if (si_pi->pspp_notify_required) {
6245		if (target_link_speed == AMDGPU_PCIE_GEN3)
6246			request = PCIE_PERF_REQ_PECI_GEN3;
6247		else if (target_link_speed == AMDGPU_PCIE_GEN2)
6248			request = PCIE_PERF_REQ_PECI_GEN2;
6249		else
6250			request = PCIE_PERF_REQ_PECI_GEN1;
6251
6252		if ((request == PCIE_PERF_REQ_PECI_GEN1) &&
6253		    (si_get_current_pcie_speed(adev) > 0))
6254			return;
6255
6256#if defined(CONFIG_ACPI)
6257		amdgpu_acpi_pcie_performance_request(adev, request, false);
6258#endif
6259	}
6260}
6261
6262#if 0
6263static int si_ds_request(struct amdgpu_device *adev,
6264			 bool ds_status_on, u32 count_write)
6265{
6266	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
6267
6268	if (eg_pi->sclk_deep_sleep) {
6269		if (ds_status_on)
6270			return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_CancelThrottleOVRDSCLKDS) ==
6271				PPSMC_Result_OK) ?
6272				0 : -EINVAL;
6273		else
6274			return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_ThrottleOVRDSCLKDS) ==
6275				PPSMC_Result_OK) ? 0 : -EINVAL;
6276	}
6277	return 0;
6278}
6279#endif
6280
6281static void si_set_max_cu_value(struct amdgpu_device *adev)
6282{
6283	struct si_power_info *si_pi = si_get_pi(adev);
6284
6285	if (adev->asic_type == CHIP_VERDE) {
6286		switch (adev->pdev->device) {
6287		case 0x6820:
6288		case 0x6825:
6289		case 0x6821:
6290		case 0x6823:
6291		case 0x6827:
6292			si_pi->max_cu = 10;
6293			break;
6294		case 0x682D:
6295		case 0x6824:
6296		case 0x682F:
6297		case 0x6826:
6298			si_pi->max_cu = 8;
6299			break;
6300		case 0x6828:
6301		case 0x6830:
6302		case 0x6831:
6303		case 0x6838:
6304		case 0x6839:
6305		case 0x683D:
6306			si_pi->max_cu = 10;
6307			break;
6308		case 0x683B:
6309		case 0x683F:
6310		case 0x6829:
6311			si_pi->max_cu = 8;
6312			break;
6313		default:
6314			si_pi->max_cu = 0;
6315			break;
6316		}
6317	} else {
6318		si_pi->max_cu = 0;
6319	}
6320}
6321
6322static int si_patch_single_dependency_table_based_on_leakage(struct amdgpu_device *adev,
6323							     struct amdgpu_clock_voltage_dependency_table *table)
6324{
6325	u32 i;
6326	int j;
6327	u16 leakage_voltage;
6328
6329	if (table) {
6330		for (i = 0; i < table->count; i++) {
6331			switch (si_get_leakage_voltage_from_leakage_index(adev,
6332									  table->entries[i].v,
6333									  &leakage_voltage)) {
6334			case 0:
6335				table->entries[i].v = leakage_voltage;
6336				break;
6337			case -EAGAIN:
6338				return -EINVAL;
6339			case -EINVAL:
6340			default:
6341				break;
6342			}
6343		}
6344
6345		for (j = (table->count - 2); j >= 0; j--) {
6346			table->entries[j].v = (table->entries[j].v <= table->entries[j + 1].v) ?
6347				table->entries[j].v : table->entries[j + 1].v;
6348		}
6349	}
6350	return 0;
6351}
6352
6353static int si_patch_dependency_tables_based_on_leakage(struct amdgpu_device *adev)
6354{
6355	int ret = 0;
6356
6357	ret = si_patch_single_dependency_table_based_on_leakage(adev,
6358								&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
6359	if (ret)
6360		DRM_ERROR("Could not patch vddc_on_sclk leakage table\n");
6361	ret = si_patch_single_dependency_table_based_on_leakage(adev,
6362								&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
6363	if (ret)
6364		DRM_ERROR("Could not patch vddc_on_mclk leakage table\n");
6365	ret = si_patch_single_dependency_table_based_on_leakage(adev,
6366								&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk);
6367	if (ret)
6368		DRM_ERROR("Could not patch vddci_on_mclk leakage table\n");
6369	return ret;
6370}
6371
6372static void si_set_pcie_lane_width_in_smc(struct amdgpu_device *adev,
6373					  struct amdgpu_ps *amdgpu_new_state,
6374					  struct amdgpu_ps *amdgpu_current_state)
6375{
6376	u32 lane_width;
6377	u32 new_lane_width =
6378		((amdgpu_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
6379	u32 current_lane_width =
6380		((amdgpu_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
6381
6382	if (new_lane_width != current_lane_width) {
6383		amdgpu_set_pcie_lanes(adev, new_lane_width);
6384		lane_width = amdgpu_get_pcie_lanes(adev);
6385		si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width, lane_width);
6386	}
6387}
6388
6389static void si_dpm_setup_asic(struct amdgpu_device *adev)
6390{
6391	si_read_clock_registers(adev);
6392	si_enable_acpi_power_management(adev);
6393}
6394
6395static int si_thermal_enable_alert(struct amdgpu_device *adev,
6396				   bool enable)
6397{
6398	u32 thermal_int = RREG32(CG_THERMAL_INT);
6399
6400	if (enable) {
6401		PPSMC_Result result;
6402
6403		thermal_int &= ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
6404		WREG32(CG_THERMAL_INT, thermal_int);
6405		result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableThermalInterrupt);
6406		if (result != PPSMC_Result_OK) {
6407			DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
6408			return -EINVAL;
6409		}
6410	} else {
6411		thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
6412		WREG32(CG_THERMAL_INT, thermal_int);
6413	}
6414
6415	return 0;
6416}
6417
6418static int si_thermal_set_temperature_range(struct amdgpu_device *adev,
6419					    int min_temp, int max_temp)
6420{
6421	int low_temp = 0 * 1000;
6422	int high_temp = 255 * 1000;
6423
6424	if (low_temp < min_temp)
6425		low_temp = min_temp;
6426	if (high_temp > max_temp)
6427		high_temp = max_temp;
6428	if (high_temp < low_temp) {
6429		DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
6430		return -EINVAL;
6431	}
6432
6433	WREG32_P(CG_THERMAL_INT, DIG_THERM_INTH(high_temp / 1000), ~DIG_THERM_INTH_MASK);
6434	WREG32_P(CG_THERMAL_INT, DIG_THERM_INTL(low_temp / 1000), ~DIG_THERM_INTL_MASK);
6435	WREG32_P(CG_THERMAL_CTRL, DIG_THERM_DPM(high_temp / 1000), ~DIG_THERM_DPM_MASK);
6436
6437	adev->pm.dpm.thermal.min_temp = low_temp;
6438	adev->pm.dpm.thermal.max_temp = high_temp;
6439
6440	return 0;
6441}
6442
6443static void si_fan_ctrl_set_static_mode(struct amdgpu_device *adev, u32 mode)
6444{
6445	struct si_power_info *si_pi = si_get_pi(adev);
6446	u32 tmp;
6447
6448	if (si_pi->fan_ctrl_is_in_default_mode) {
6449		tmp = (RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK) >> FDO_PWM_MODE_SHIFT;
6450		si_pi->fan_ctrl_default_mode = tmp;
6451		tmp = (RREG32(CG_FDO_CTRL2) & TMIN_MASK) >> TMIN_SHIFT;
6452		si_pi->t_min = tmp;
6453		si_pi->fan_ctrl_is_in_default_mode = false;
6454	}
6455
6456	tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK;
6457	tmp |= TMIN(0);
6458	WREG32(CG_FDO_CTRL2, tmp);
6459
6460	tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
6461	tmp |= FDO_PWM_MODE(mode);
6462	WREG32(CG_FDO_CTRL2, tmp);
6463}
6464
6465static int si_thermal_setup_fan_table(struct amdgpu_device *adev)
6466{
6467	struct si_power_info *si_pi = si_get_pi(adev);
6468	PP_SIslands_FanTable fan_table = { FDO_MODE_HARDWARE };
6469	u32 duty100;
6470	u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2;
6471	u16 fdo_min, slope1, slope2;
6472	u32 reference_clock, tmp;
6473	int ret;
6474	u64 tmp64;
6475
6476	if (!si_pi->fan_table_start) {
6477		adev->pm.dpm.fan.ucode_fan_control = false;
6478		return 0;
6479	}
6480
6481	duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
6482
6483	if (duty100 == 0) {
6484		adev->pm.dpm.fan.ucode_fan_control = false;
6485		return 0;
6486	}
6487
6488	tmp64 = (u64)adev->pm.dpm.fan.pwm_min * duty100;
6489	do_div(tmp64, 10000);
6490	fdo_min = (u16)tmp64;
6491
6492	t_diff1 = adev->pm.dpm.fan.t_med - adev->pm.dpm.fan.t_min;
6493	t_diff2 = adev->pm.dpm.fan.t_high - adev->pm.dpm.fan.t_med;
6494
6495	pwm_diff1 = adev->pm.dpm.fan.pwm_med - adev->pm.dpm.fan.pwm_min;
6496	pwm_diff2 = adev->pm.dpm.fan.pwm_high - adev->pm.dpm.fan.pwm_med;
6497
6498	slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
6499	slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
6500
6501	fan_table.temp_min = cpu_to_be16((50 + adev->pm.dpm.fan.t_min) / 100);
6502	fan_table.temp_med = cpu_to_be16((50 + adev->pm.dpm.fan.t_med) / 100);
6503	fan_table.temp_max = cpu_to_be16((50 + adev->pm.dpm.fan.t_max) / 100);
6504	fan_table.slope1 = cpu_to_be16(slope1);
6505	fan_table.slope2 = cpu_to_be16(slope2);
6506	fan_table.fdo_min = cpu_to_be16(fdo_min);
6507	fan_table.hys_down = cpu_to_be16(adev->pm.dpm.fan.t_hyst);
6508	fan_table.hys_up = cpu_to_be16(1);
6509	fan_table.hys_slope = cpu_to_be16(1);
6510	fan_table.temp_resp_lim = cpu_to_be16(5);
6511	reference_clock = amdgpu_asic_get_xclk(adev);
6512
6513	fan_table.refresh_period = cpu_to_be32((adev->pm.dpm.fan.cycle_delay *
6514						reference_clock) / 1600);
6515	fan_table.fdo_max = cpu_to_be16((u16)duty100);
6516
6517	tmp = (RREG32(CG_MULT_THERMAL_CTRL) & TEMP_SEL_MASK) >> TEMP_SEL_SHIFT;
6518	fan_table.temp_src = (uint8_t)tmp;
6519
6520	ret = amdgpu_si_copy_bytes_to_smc(adev,
6521					  si_pi->fan_table_start,
6522					  (u8 *)(&fan_table),
6523					  sizeof(fan_table),
6524					  si_pi->sram_end);
6525
6526	if (ret) {
6527		DRM_ERROR("Failed to load fan table to the SMC.");
6528		adev->pm.dpm.fan.ucode_fan_control = false;
6529	}
6530
6531	return ret;
6532}
6533
6534static int si_fan_ctrl_start_smc_fan_control(struct amdgpu_device *adev)
6535{
6536	struct si_power_info *si_pi = si_get_pi(adev);
6537	PPSMC_Result ret;
6538
6539	ret = amdgpu_si_send_msg_to_smc(adev, PPSMC_StartFanControl);
6540	if (ret == PPSMC_Result_OK) {
6541		si_pi->fan_is_controlled_by_smc = true;
6542		return 0;
6543	} else {
6544		return -EINVAL;
6545	}
6546}
6547
6548static int si_fan_ctrl_stop_smc_fan_control(struct amdgpu_device *adev)
6549{
6550	struct si_power_info *si_pi = si_get_pi(adev);
6551	PPSMC_Result ret;
6552
6553	ret = amdgpu_si_send_msg_to_smc(adev, PPSMC_StopFanControl);
6554
6555	if (ret == PPSMC_Result_OK) {
6556		si_pi->fan_is_controlled_by_smc = false;
6557		return 0;
6558	} else {
6559		return -EINVAL;
6560	}
6561}
6562
6563static int si_dpm_get_fan_speed_percent(void *handle,
6564				      u32 *speed)
6565{
6566	u32 duty, duty100;
6567	u64 tmp64;
6568	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6569
6570	if (adev->pm.no_fan)
6571		return -ENOENT;
6572
6573	duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
6574	duty = (RREG32(CG_THERMAL_STATUS) & FDO_PWM_DUTY_MASK) >> FDO_PWM_DUTY_SHIFT;
6575
6576	if (duty100 == 0)
6577		return -EINVAL;
6578
6579	tmp64 = (u64)duty * 100;
6580	do_div(tmp64, duty100);
6581	*speed = (u32)tmp64;
6582
6583	if (*speed > 100)
6584		*speed = 100;
6585
6586	return 0;
6587}
6588
6589static int si_dpm_set_fan_speed_percent(void *handle,
6590				      u32 speed)
6591{
6592	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6593	struct si_power_info *si_pi = si_get_pi(adev);
6594	u32 tmp;
6595	u32 duty, duty100;
6596	u64 tmp64;
6597
6598	if (adev->pm.no_fan)
6599		return -ENOENT;
6600
6601	if (si_pi->fan_is_controlled_by_smc)
6602		return -EINVAL;
6603
6604	if (speed > 100)
6605		return -EINVAL;
6606
6607	duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
6608
6609	if (duty100 == 0)
6610		return -EINVAL;
6611
6612	tmp64 = (u64)speed * duty100;
6613	do_div(tmp64, 100);
6614	duty = (u32)tmp64;
6615
6616	tmp = RREG32(CG_FDO_CTRL0) & ~FDO_STATIC_DUTY_MASK;
6617	tmp |= FDO_STATIC_DUTY(duty);
6618	WREG32(CG_FDO_CTRL0, tmp);
6619
6620	return 0;
6621}
6622
6623static void si_dpm_set_fan_control_mode(void *handle, u32 mode)
6624{
6625	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6626
6627	if (mode) {
6628		/* stop auto-manage */
6629		if (adev->pm.dpm.fan.ucode_fan_control)
6630			si_fan_ctrl_stop_smc_fan_control(adev);
6631		si_fan_ctrl_set_static_mode(adev, mode);
6632	} else {
6633		/* restart auto-manage */
6634		if (adev->pm.dpm.fan.ucode_fan_control)
6635			si_thermal_start_smc_fan_control(adev);
6636		else
6637			si_fan_ctrl_set_default_mode(adev);
6638	}
6639}
6640
6641static u32 si_dpm_get_fan_control_mode(void *handle)
6642{
6643	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6644	struct si_power_info *si_pi = si_get_pi(adev);
6645	u32 tmp;
6646
6647	if (si_pi->fan_is_controlled_by_smc)
6648		return 0;
6649
6650	tmp = RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK;
6651	return (tmp >> FDO_PWM_MODE_SHIFT);
6652}
6653
6654#if 0
6655static int si_fan_ctrl_get_fan_speed_rpm(struct amdgpu_device *adev,
6656					 u32 *speed)
6657{
6658	u32 tach_period;
6659	u32 xclk = amdgpu_asic_get_xclk(adev);
6660
6661	if (adev->pm.no_fan)
6662		return -ENOENT;
6663
6664	if (adev->pm.fan_pulses_per_revolution == 0)
6665		return -ENOENT;
6666
6667	tach_period = (RREG32(CG_TACH_STATUS) & TACH_PERIOD_MASK) >> TACH_PERIOD_SHIFT;
6668	if (tach_period == 0)
6669		return -ENOENT;
6670
6671	*speed = 60 * xclk * 10000 / tach_period;
6672
6673	return 0;
6674}
6675
6676static int si_fan_ctrl_set_fan_speed_rpm(struct amdgpu_device *adev,
6677					 u32 speed)
6678{
6679	u32 tach_period, tmp;
6680	u32 xclk = amdgpu_asic_get_xclk(adev);
6681
6682	if (adev->pm.no_fan)
6683		return -ENOENT;
6684
6685	if (adev->pm.fan_pulses_per_revolution == 0)
6686		return -ENOENT;
6687
6688	if ((speed < adev->pm.fan_min_rpm) ||
6689	    (speed > adev->pm.fan_max_rpm))
6690		return -EINVAL;
6691
6692	if (adev->pm.dpm.fan.ucode_fan_control)
6693		si_fan_ctrl_stop_smc_fan_control(adev);
6694
6695	tach_period = 60 * xclk * 10000 / (8 * speed);
6696	tmp = RREG32(CG_TACH_CTRL) & ~TARGET_PERIOD_MASK;
6697	tmp |= TARGET_PERIOD(tach_period);
6698	WREG32(CG_TACH_CTRL, tmp);
6699
6700	si_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC_RPM);
6701
6702	return 0;
6703}
6704#endif
6705
6706static void si_fan_ctrl_set_default_mode(struct amdgpu_device *adev)
6707{
6708	struct si_power_info *si_pi = si_get_pi(adev);
6709	u32 tmp;
6710
6711	if (!si_pi->fan_ctrl_is_in_default_mode) {
6712		tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
6713		tmp |= FDO_PWM_MODE(si_pi->fan_ctrl_default_mode);
6714		WREG32(CG_FDO_CTRL2, tmp);
6715
6716		tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK;
6717		tmp |= TMIN(si_pi->t_min);
6718		WREG32(CG_FDO_CTRL2, tmp);
6719		si_pi->fan_ctrl_is_in_default_mode = true;
6720	}
6721}
6722
6723static void si_thermal_start_smc_fan_control(struct amdgpu_device *adev)
6724{
6725	if (adev->pm.dpm.fan.ucode_fan_control) {
6726		si_fan_ctrl_start_smc_fan_control(adev);
6727		si_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC);
6728	}
6729}
6730
6731static void si_thermal_initialize(struct amdgpu_device *adev)
6732{
6733	u32 tmp;
6734
6735	if (adev->pm.fan_pulses_per_revolution) {
6736		tmp = RREG32(CG_TACH_CTRL) & ~EDGE_PER_REV_MASK;
6737		tmp |= EDGE_PER_REV(adev->pm.fan_pulses_per_revolution -1);
6738		WREG32(CG_TACH_CTRL, tmp);
6739	}
6740
6741	tmp = RREG32(CG_FDO_CTRL2) & ~TACH_PWM_RESP_RATE_MASK;
6742	tmp |= TACH_PWM_RESP_RATE(0x28);
6743	WREG32(CG_FDO_CTRL2, tmp);
6744}
6745
6746static int si_thermal_start_thermal_controller(struct amdgpu_device *adev)
6747{
6748	int ret;
6749
6750	si_thermal_initialize(adev);
6751	ret = si_thermal_set_temperature_range(adev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
6752	if (ret)
6753		return ret;
6754	ret = si_thermal_enable_alert(adev, true);
6755	if (ret)
6756		return ret;
6757	if (adev->pm.dpm.fan.ucode_fan_control) {
6758		ret = si_halt_smc(adev);
6759		if (ret)
6760			return ret;
6761		ret = si_thermal_setup_fan_table(adev);
6762		if (ret)
6763			return ret;
6764		ret = si_resume_smc(adev);
6765		if (ret)
6766			return ret;
6767		si_thermal_start_smc_fan_control(adev);
6768	}
6769
6770	return 0;
6771}
6772
6773static void si_thermal_stop_thermal_controller(struct amdgpu_device *adev)
6774{
6775	if (!adev->pm.no_fan) {
6776		si_fan_ctrl_set_default_mode(adev);
6777		si_fan_ctrl_stop_smc_fan_control(adev);
6778	}
6779}
6780
6781static int si_dpm_enable(struct amdgpu_device *adev)
6782{
6783	struct rv7xx_power_info *pi = rv770_get_pi(adev);
6784	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
6785	struct si_power_info *si_pi = si_get_pi(adev);
6786	struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
6787	int ret;
6788
6789	if (amdgpu_si_is_smc_running(adev))
6790		return -EINVAL;
6791	if (pi->voltage_control || si_pi->voltage_control_svi2)
6792		si_enable_voltage_control(adev, true);
6793	if (pi->mvdd_control)
6794		si_get_mvdd_configuration(adev);
6795	if (pi->voltage_control || si_pi->voltage_control_svi2) {
6796		ret = si_construct_voltage_tables(adev);
6797		if (ret) {
6798			DRM_ERROR("si_construct_voltage_tables failed\n");
6799			return ret;
6800		}
6801	}
6802	if (eg_pi->dynamic_ac_timing) {
6803		ret = si_initialize_mc_reg_table(adev);
6804		if (ret)
6805			eg_pi->dynamic_ac_timing = false;
6806	}
6807	if (pi->dynamic_ss)
6808		si_enable_spread_spectrum(adev, true);
6809	if (pi->thermal_protection)
6810		si_enable_thermal_protection(adev, true);
6811	si_setup_bsp(adev);
6812	si_program_git(adev);
6813	si_program_tp(adev);
6814	si_program_tpp(adev);
6815	si_program_sstp(adev);
6816	si_enable_display_gap(adev);
6817	si_program_vc(adev);
6818	ret = si_upload_firmware(adev);
6819	if (ret) {
6820		DRM_ERROR("si_upload_firmware failed\n");
6821		return ret;
6822	}
6823	ret = si_process_firmware_header(adev);
6824	if (ret) {
6825		DRM_ERROR("si_process_firmware_header failed\n");
6826		return ret;
6827	}
6828	ret = si_initial_switch_from_arb_f0_to_f1(adev);
6829	if (ret) {
6830		DRM_ERROR("si_initial_switch_from_arb_f0_to_f1 failed\n");
6831		return ret;
6832	}
6833	ret = si_init_smc_table(adev);
6834	if (ret) {
6835		DRM_ERROR("si_init_smc_table failed\n");
6836		return ret;
6837	}
6838	ret = si_init_smc_spll_table(adev);
6839	if (ret) {
6840		DRM_ERROR("si_init_smc_spll_table failed\n");
6841		return ret;
6842	}
6843	ret = si_init_arb_table_index(adev);
6844	if (ret) {
6845		DRM_ERROR("si_init_arb_table_index failed\n");
6846		return ret;
6847	}
6848	if (eg_pi->dynamic_ac_timing) {
6849		ret = si_populate_mc_reg_table(adev, boot_ps);
6850		if (ret) {
6851			DRM_ERROR("si_populate_mc_reg_table failed\n");
6852			return ret;
6853		}
6854	}
6855	ret = si_initialize_smc_cac_tables(adev);
6856	if (ret) {
6857		DRM_ERROR("si_initialize_smc_cac_tables failed\n");
6858		return ret;
6859	}
6860	ret = si_initialize_hardware_cac_manager(adev);
6861	if (ret) {
6862		DRM_ERROR("si_initialize_hardware_cac_manager failed\n");
6863		return ret;
6864	}
6865	ret = si_initialize_smc_dte_tables(adev);
6866	if (ret) {
6867		DRM_ERROR("si_initialize_smc_dte_tables failed\n");
6868		return ret;
6869	}
6870	ret = si_populate_smc_tdp_limits(adev, boot_ps);
6871	if (ret) {
6872		DRM_ERROR("si_populate_smc_tdp_limits failed\n");
6873		return ret;
6874	}
6875	ret = si_populate_smc_tdp_limits_2(adev, boot_ps);
6876	if (ret) {
6877		DRM_ERROR("si_populate_smc_tdp_limits_2 failed\n");
6878		return ret;
6879	}
6880	si_program_response_times(adev);
6881	si_program_ds_registers(adev);
6882	si_dpm_start_smc(adev);
6883	ret = si_notify_smc_display_change(adev, false);
6884	if (ret) {
6885		DRM_ERROR("si_notify_smc_display_change failed\n");
6886		return ret;
6887	}
6888	si_enable_sclk_control(adev, true);
6889	si_start_dpm(adev);
6890
6891	si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
6892	si_thermal_start_thermal_controller(adev);
6893
6894	return 0;
6895}
6896
6897static int si_set_temperature_range(struct amdgpu_device *adev)
6898{
6899	int ret;
6900
6901	ret = si_thermal_enable_alert(adev, false);
6902	if (ret)
6903		return ret;
6904	ret = si_thermal_set_temperature_range(adev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
6905	if (ret)
6906		return ret;
6907	ret = si_thermal_enable_alert(adev, true);
6908	if (ret)
6909		return ret;
6910
6911	return ret;
6912}
6913
6914static void si_dpm_disable(struct amdgpu_device *adev)
6915{
6916	struct rv7xx_power_info *pi = rv770_get_pi(adev);
6917	struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
6918
6919	if (!amdgpu_si_is_smc_running(adev))
6920		return;
6921	si_thermal_stop_thermal_controller(adev);
6922	si_disable_ulv(adev);
6923	si_clear_vc(adev);
6924	if (pi->thermal_protection)
6925		si_enable_thermal_protection(adev, false);
6926	si_enable_power_containment(adev, boot_ps, false);
6927	si_enable_smc_cac(adev, boot_ps, false);
6928	si_enable_spread_spectrum(adev, false);
6929	si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
6930	si_stop_dpm(adev);
6931	si_reset_to_default(adev);
6932	si_dpm_stop_smc(adev);
6933	si_force_switch_to_arb_f0(adev);
6934
6935	ni_update_current_ps(adev, boot_ps);
6936}
6937
6938static int si_dpm_pre_set_power_state(void *handle)
6939{
6940	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6941	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
6942	struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;
6943	struct amdgpu_ps *new_ps = &requested_ps;
6944
6945	ni_update_requested_ps(adev, new_ps);
6946	si_apply_state_adjust_rules(adev, &eg_pi->requested_rps);
6947
6948	return 0;
6949}
6950
6951static int si_power_control_set_level(struct amdgpu_device *adev)
6952{
6953	struct amdgpu_ps *new_ps = adev->pm.dpm.requested_ps;
6954	int ret;
6955
6956	ret = si_restrict_performance_levels_before_switch(adev);
6957	if (ret)
6958		return ret;
6959	ret = si_halt_smc(adev);
6960	if (ret)
6961		return ret;
6962	ret = si_populate_smc_tdp_limits(adev, new_ps);
6963	if (ret)
6964		return ret;
6965	ret = si_populate_smc_tdp_limits_2(adev, new_ps);
6966	if (ret)
6967		return ret;
6968	ret = si_resume_smc(adev);
6969	if (ret)
6970		return ret;
6971	ret = si_set_sw_state(adev);
6972	if (ret)
6973		return ret;
6974	return 0;
6975}
6976
6977static int si_dpm_set_power_state(void *handle)
6978{
6979	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6980	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
6981	struct amdgpu_ps *new_ps = &eg_pi->requested_rps;
6982	struct amdgpu_ps *old_ps = &eg_pi->current_rps;
6983	int ret;
6984
6985	ret = si_disable_ulv(adev);
6986	if (ret) {
6987		DRM_ERROR("si_disable_ulv failed\n");
6988		return ret;
6989	}
6990	ret = si_restrict_performance_levels_before_switch(adev);
6991	if (ret) {
6992		DRM_ERROR("si_restrict_performance_levels_before_switch failed\n");
6993		return ret;
6994	}
6995	if (eg_pi->pcie_performance_request)
6996		si_request_link_speed_change_before_state_change(adev, new_ps, old_ps);
6997	ni_set_uvd_clock_before_set_eng_clock(adev, new_ps, old_ps);
6998	ret = si_enable_power_containment(adev, new_ps, false);
6999	if (ret) {
7000		DRM_ERROR("si_enable_power_containment failed\n");
7001		return ret;
7002	}
7003	ret = si_enable_smc_cac(adev, new_ps, false);
7004	if (ret) {
7005		DRM_ERROR("si_enable_smc_cac failed\n");
7006		return ret;
7007	}
7008	ret = si_halt_smc(adev);
7009	if (ret) {
7010		DRM_ERROR("si_halt_smc failed\n");
7011		return ret;
7012	}
7013	ret = si_upload_sw_state(adev, new_ps);
7014	if (ret) {
7015		DRM_ERROR("si_upload_sw_state failed\n");
7016		return ret;
7017	}
7018	ret = si_upload_smc_data(adev);
7019	if (ret) {
7020		DRM_ERROR("si_upload_smc_data failed\n");
7021		return ret;
7022	}
7023	ret = si_upload_ulv_state(adev);
7024	if (ret) {
7025		DRM_ERROR("si_upload_ulv_state failed\n");
7026		return ret;
7027	}
7028	if (eg_pi->dynamic_ac_timing) {
7029		ret = si_upload_mc_reg_table(adev, new_ps);
7030		if (ret) {
7031			DRM_ERROR("si_upload_mc_reg_table failed\n");
7032			return ret;
7033		}
7034	}
7035	ret = si_program_memory_timing_parameters(adev, new_ps);
7036	if (ret) {
7037		DRM_ERROR("si_program_memory_timing_parameters failed\n");
7038		return ret;
7039	}
7040	si_set_pcie_lane_width_in_smc(adev, new_ps, old_ps);
7041
7042	ret = si_resume_smc(adev);
7043	if (ret) {
7044		DRM_ERROR("si_resume_smc failed\n");
7045		return ret;
7046	}
7047	ret = si_set_sw_state(adev);
7048	if (ret) {
7049		DRM_ERROR("si_set_sw_state failed\n");
7050		return ret;
7051	}
7052	ni_set_uvd_clock_after_set_eng_clock(adev, new_ps, old_ps);
7053	if (eg_pi->pcie_performance_request)
7054		si_notify_link_speed_change_after_state_change(adev, new_ps, old_ps);
7055	ret = si_set_power_state_conditionally_enable_ulv(adev, new_ps);
7056	if (ret) {
7057		DRM_ERROR("si_set_power_state_conditionally_enable_ulv failed\n");
7058		return ret;
7059	}
7060	ret = si_enable_smc_cac(adev, new_ps, true);
7061	if (ret) {
7062		DRM_ERROR("si_enable_smc_cac failed\n");
7063		return ret;
7064	}
7065	ret = si_enable_power_containment(adev, new_ps, true);
7066	if (ret) {
7067		DRM_ERROR("si_enable_power_containment failed\n");
7068		return ret;
7069	}
7070
7071	ret = si_power_control_set_level(adev);
7072	if (ret) {
7073		DRM_ERROR("si_power_control_set_level failed\n");
7074		return ret;
7075	}
7076
7077	return 0;
7078}
7079
7080static void si_dpm_post_set_power_state(void *handle)
7081{
7082	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7083	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
7084	struct amdgpu_ps *new_ps = &eg_pi->requested_rps;
7085
7086	ni_update_current_ps(adev, new_ps);
7087}
7088
7089#if 0
7090void si_dpm_reset_asic(struct amdgpu_device *adev)
7091{
7092	si_restrict_performance_levels_before_switch(adev);
7093	si_disable_ulv(adev);
7094	si_set_boot_state(adev);
7095}
7096#endif
7097
7098static void si_dpm_display_configuration_changed(void *handle)
7099{
7100	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7101
7102	si_program_display_gap(adev);
7103}
7104
7105
7106static void si_parse_pplib_non_clock_info(struct amdgpu_device *adev,
7107					  struct amdgpu_ps *rps,
7108					  struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
7109					  u8 table_rev)
7110{
7111	rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
7112	rps->class = le16_to_cpu(non_clock_info->usClassification);
7113	rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
7114
7115	if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
7116		rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
7117		rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
7118	} else if (r600_is_uvd_state(rps->class, rps->class2)) {
7119		rps->vclk = RV770_DEFAULT_VCLK_FREQ;
7120		rps->dclk = RV770_DEFAULT_DCLK_FREQ;
7121	} else {
7122		rps->vclk = 0;
7123		rps->dclk = 0;
7124	}
7125
7126	if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
7127		adev->pm.dpm.boot_ps = rps;
7128	if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
7129		adev->pm.dpm.uvd_ps = rps;
7130}
7131
7132static void si_parse_pplib_clock_info(struct amdgpu_device *adev,
7133				      struct amdgpu_ps *rps, int index,
7134				      union pplib_clock_info *clock_info)
7135{
7136	struct rv7xx_power_info *pi = rv770_get_pi(adev);
7137	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
7138	struct si_power_info *si_pi = si_get_pi(adev);
7139	struct  si_ps *ps = si_get_ps(rps);
7140	u16 leakage_voltage;
7141	struct rv7xx_pl *pl = &ps->performance_levels[index];
7142	int ret;
7143
7144	ps->performance_level_count = index + 1;
7145
7146	pl->sclk = le16_to_cpu(clock_info->si.usEngineClockLow);
7147	pl->sclk |= clock_info->si.ucEngineClockHigh << 16;
7148	pl->mclk = le16_to_cpu(clock_info->si.usMemoryClockLow);
7149	pl->mclk |= clock_info->si.ucMemoryClockHigh << 16;
7150
7151	pl->vddc = le16_to_cpu(clock_info->si.usVDDC);
7152	pl->vddci = le16_to_cpu(clock_info->si.usVDDCI);
7153	pl->flags = le32_to_cpu(clock_info->si.ulFlags);
7154	pl->pcie_gen = amdgpu_get_pcie_gen_support(adev,
7155						   si_pi->sys_pcie_mask,
7156						   si_pi->boot_pcie_gen,
7157						   clock_info->si.ucPCIEGen);
7158
7159	/* patch up vddc if necessary */
7160	ret = si_get_leakage_voltage_from_leakage_index(adev, pl->vddc,
7161							&leakage_voltage);
7162	if (ret == 0)
7163		pl->vddc = leakage_voltage;
7164
7165	if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
7166		pi->acpi_vddc = pl->vddc;
7167		eg_pi->acpi_vddci = pl->vddci;
7168		si_pi->acpi_pcie_gen = pl->pcie_gen;
7169	}
7170
7171	if ((rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) &&
7172	    index == 0) {
7173		/* XXX disable for A0 tahiti */
7174		si_pi->ulv.supported = false;
7175		si_pi->ulv.pl = *pl;
7176		si_pi->ulv.one_pcie_lane_in_ulv = false;
7177		si_pi->ulv.volt_change_delay = SISLANDS_ULVVOLTAGECHANGEDELAY_DFLT;
7178		si_pi->ulv.cg_ulv_parameter = SISLANDS_CGULVPARAMETER_DFLT;
7179		si_pi->ulv.cg_ulv_control = SISLANDS_CGULVCONTROL_DFLT;
7180	}
7181
7182	if (pi->min_vddc_in_table > pl->vddc)
7183		pi->min_vddc_in_table = pl->vddc;
7184
7185	if (pi->max_vddc_in_table < pl->vddc)
7186		pi->max_vddc_in_table = pl->vddc;
7187
7188	/* patch up boot state */
7189	if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
7190		u16 vddc, vddci, mvdd;
7191		amdgpu_atombios_get_default_voltages(adev, &vddc, &vddci, &mvdd);
7192		pl->mclk = adev->clock.default_mclk;
7193		pl->sclk = adev->clock.default_sclk;
7194		pl->vddc = vddc;
7195		pl->vddci = vddci;
7196		si_pi->mvdd_bootup_value = mvdd;
7197	}
7198
7199	if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
7200	    ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
7201		adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk = pl->sclk;
7202		adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk = pl->mclk;
7203		adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc = pl->vddc;
7204		adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci = pl->vddci;
7205	}
7206}
7207
7208union pplib_power_state {
7209	struct _ATOM_PPLIB_STATE v1;
7210	struct _ATOM_PPLIB_STATE_V2 v2;
7211};
7212
7213static int si_parse_power_table(struct amdgpu_device *adev)
7214{
7215	struct amdgpu_mode_info *mode_info = &adev->mode_info;
7216	struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
7217	union pplib_power_state *power_state;
7218	int i, j, k, non_clock_array_index, clock_array_index;
7219	union pplib_clock_info *clock_info;
7220	struct _StateArray *state_array;
7221	struct _ClockInfoArray *clock_info_array;
7222	struct _NonClockInfoArray *non_clock_info_array;
7223	union power_info *power_info;
7224	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
7225	u16 data_offset;
7226	u8 frev, crev;
7227	u8 *power_state_offset;
7228	struct  si_ps *ps;
7229
7230	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
7231				   &frev, &crev, &data_offset))
7232		return -EINVAL;
7233	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
7234
7235	amdgpu_add_thermal_controller(adev);
7236
7237	state_array = (struct _StateArray *)
7238		(mode_info->atom_context->bios + data_offset +
7239		 le16_to_cpu(power_info->pplib.usStateArrayOffset));
7240	clock_info_array = (struct _ClockInfoArray *)
7241		(mode_info->atom_context->bios + data_offset +
7242		 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
7243	non_clock_info_array = (struct _NonClockInfoArray *)
7244		(mode_info->atom_context->bios + data_offset +
7245		 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
7246
7247	adev->pm.dpm.ps = kcalloc(state_array->ucNumEntries,
7248				  sizeof(struct amdgpu_ps),
7249				  GFP_KERNEL);
7250	if (!adev->pm.dpm.ps)
7251		return -ENOMEM;
7252	power_state_offset = (u8 *)state_array->states;
7253	for (i = 0; i < state_array->ucNumEntries; i++) {
7254		u8 *idx;
7255		power_state = (union pplib_power_state *)power_state_offset;
7256		non_clock_array_index = power_state->v2.nonClockInfoIndex;
7257		non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
7258			&non_clock_info_array->nonClockInfo[non_clock_array_index];
7259		ps = kzalloc(sizeof(struct  si_ps), GFP_KERNEL);
7260		if (ps == NULL) {
7261			kfree(adev->pm.dpm.ps);
7262			return -ENOMEM;
7263		}
7264		adev->pm.dpm.ps[i].ps_priv = ps;
7265		si_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i],
7266					      non_clock_info,
7267					      non_clock_info_array->ucEntrySize);
7268		k = 0;
7269		idx = (u8 *)&power_state->v2.clockInfoIndex[0];
7270		for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
7271			clock_array_index = idx[j];
7272			if (clock_array_index >= clock_info_array->ucNumEntries)
7273				continue;
7274			if (k >= SISLANDS_MAX_HARDWARE_POWERLEVELS)
7275				break;
7276			clock_info = (union pplib_clock_info *)
7277				((u8 *)&clock_info_array->clockInfo[0] +
7278				 (clock_array_index * clock_info_array->ucEntrySize));
7279			si_parse_pplib_clock_info(adev,
7280						  &adev->pm.dpm.ps[i], k,
7281						  clock_info);
7282			k++;
7283		}
7284		power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
7285	}
7286	adev->pm.dpm.num_ps = state_array->ucNumEntries;
7287
7288	/* fill in the vce power states */
7289	for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
7290		u32 sclk, mclk;
7291		clock_array_index = adev->pm.dpm.vce_states[i].clk_idx;
7292		clock_info = (union pplib_clock_info *)
7293			&clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
7294		sclk = le16_to_cpu(clock_info->si.usEngineClockLow);
7295		sclk |= clock_info->si.ucEngineClockHigh << 16;
7296		mclk = le16_to_cpu(clock_info->si.usMemoryClockLow);
7297		mclk |= clock_info->si.ucMemoryClockHigh << 16;
7298		adev->pm.dpm.vce_states[i].sclk = sclk;
7299		adev->pm.dpm.vce_states[i].mclk = mclk;
7300	}
7301
7302	return 0;
7303}
7304
7305static int si_dpm_init(struct amdgpu_device *adev)
7306{
7307	struct rv7xx_power_info *pi;
7308	struct evergreen_power_info *eg_pi;
7309	struct ni_power_info *ni_pi;
7310	struct si_power_info *si_pi;
7311	struct atom_clock_dividers dividers;
7312	int ret;
7313
7314	si_pi = kzalloc(sizeof(struct si_power_info), GFP_KERNEL);
7315	if (si_pi == NULL)
7316		return -ENOMEM;
7317	adev->pm.dpm.priv = si_pi;
7318	ni_pi = &si_pi->ni;
7319	eg_pi = &ni_pi->eg;
7320	pi = &eg_pi->rv7xx;
7321
7322	si_pi->sys_pcie_mask =
7323		adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK;
7324	si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
7325	si_pi->boot_pcie_gen = si_get_current_pcie_speed(adev);
7326
7327	si_set_max_cu_value(adev);
7328
7329	rv770_get_max_vddc(adev);
7330	si_get_leakage_vddc(adev);
7331	si_patch_dependency_tables_based_on_leakage(adev);
7332
7333	pi->acpi_vddc = 0;
7334	eg_pi->acpi_vddci = 0;
7335	pi->min_vddc_in_table = 0;
7336	pi->max_vddc_in_table = 0;
7337
7338	ret = amdgpu_get_platform_caps(adev);
7339	if (ret)
7340		return ret;
7341
7342	ret = amdgpu_parse_extended_power_table(adev);
7343	if (ret)
7344		return ret;
7345
7346	ret = si_parse_power_table(adev);
7347	if (ret)
7348		return ret;
7349
7350	adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
7351		kcalloc(4,
7352			sizeof(struct amdgpu_clock_voltage_dependency_entry),
7353			GFP_KERNEL);
7354	if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
7355		amdgpu_free_extended_power_table(adev);
7356		return -ENOMEM;
7357	}
7358	adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
7359	adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
7360	adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
7361	adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
7362	adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
7363	adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
7364	adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
7365	adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
7366	adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
7367
7368	if (adev->pm.dpm.voltage_response_time == 0)
7369		adev->pm.dpm.voltage_response_time = R600_VOLTAGERESPONSETIME_DFLT;
7370	if (adev->pm.dpm.backbias_response_time == 0)
7371		adev->pm.dpm.backbias_response_time = R600_BACKBIASRESPONSETIME_DFLT;
7372
7373	ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
7374					     0, false, &dividers);
7375	if (ret)
7376		pi->ref_div = dividers.ref_div + 1;
7377	else
7378		pi->ref_div = R600_REFERENCEDIVIDER_DFLT;
7379
7380	eg_pi->smu_uvd_hs = false;
7381
7382	pi->mclk_strobe_mode_threshold = 40000;
7383	if (si_is_special_1gb_platform(adev))
7384		pi->mclk_stutter_mode_threshold = 0;
7385	else
7386		pi->mclk_stutter_mode_threshold = pi->mclk_strobe_mode_threshold;
7387	pi->mclk_edc_enable_threshold = 40000;
7388	eg_pi->mclk_edc_wr_enable_threshold = 40000;
7389
7390	ni_pi->mclk_rtt_mode_threshold = eg_pi->mclk_edc_wr_enable_threshold;
7391
7392	pi->voltage_control =
7393		amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDC,
7394					    VOLTAGE_OBJ_GPIO_LUT);
7395	if (!pi->voltage_control) {
7396		si_pi->voltage_control_svi2 =
7397			amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDC,
7398						    VOLTAGE_OBJ_SVID2);
7399		if (si_pi->voltage_control_svi2)
7400			amdgpu_atombios_get_svi2_info(adev, SET_VOLTAGE_TYPE_ASIC_VDDC,
7401						  &si_pi->svd_gpio_id, &si_pi->svc_gpio_id);
7402	}
7403
7404	pi->mvdd_control =
7405		amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_MVDDC,
7406					    VOLTAGE_OBJ_GPIO_LUT);
7407
7408	eg_pi->vddci_control =
7409		amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDCI,
7410					    VOLTAGE_OBJ_GPIO_LUT);
7411	if (!eg_pi->vddci_control)
7412		si_pi->vddci_control_svi2 =
7413			amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDCI,
7414						    VOLTAGE_OBJ_SVID2);
7415
7416	si_pi->vddc_phase_shed_control =
7417		amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDC,
7418					    VOLTAGE_OBJ_PHASE_LUT);
7419
7420	rv770_get_engine_memory_ss(adev);
7421
7422	pi->asi = RV770_ASI_DFLT;
7423	pi->pasi = CYPRESS_HASI_DFLT;
7424	pi->vrc = SISLANDS_VRC_DFLT;
7425
7426	pi->gfx_clock_gating = true;
7427
7428	eg_pi->sclk_deep_sleep = true;
7429	si_pi->sclk_deep_sleep_above_low = false;
7430
7431	if (adev->pm.int_thermal_type != THERMAL_TYPE_NONE)
7432		pi->thermal_protection = true;
7433	else
7434		pi->thermal_protection = false;
7435
7436	eg_pi->dynamic_ac_timing = true;
7437
7438	eg_pi->light_sleep = true;
7439#if defined(CONFIG_ACPI)
7440	eg_pi->pcie_performance_request =
7441		amdgpu_acpi_is_pcie_performance_request_supported(adev);
7442#else
7443	eg_pi->pcie_performance_request = false;
7444#endif
7445
7446	si_pi->sram_end = SMC_RAM_END;
7447
7448	adev->pm.dpm.dyn_state.mclk_sclk_ratio = 4;
7449	adev->pm.dpm.dyn_state.sclk_mclk_delta = 15000;
7450	adev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
7451	adev->pm.dpm.dyn_state.valid_sclk_values.count = 0;
7452	adev->pm.dpm.dyn_state.valid_sclk_values.values = NULL;
7453	adev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
7454	adev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
7455
7456	si_initialize_powertune_defaults(adev);
7457
7458	/* make sure dc limits are valid */
7459	if ((adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
7460	    (adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
7461		adev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
7462			adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
7463
7464	si_pi->fan_ctrl_is_in_default_mode = true;
7465
7466	return 0;
7467}
7468
7469static void si_dpm_fini(struct amdgpu_device *adev)
7470{
7471	int i;
7472
7473	if (adev->pm.dpm.ps)
7474		for (i = 0; i < adev->pm.dpm.num_ps; i++)
7475			kfree(adev->pm.dpm.ps[i].ps_priv);
7476	kfree(adev->pm.dpm.ps);
7477	kfree(adev->pm.dpm.priv);
7478	kfree(adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
7479	amdgpu_free_extended_power_table(adev);
7480}
7481
7482static void si_dpm_debugfs_print_current_performance_level(void *handle,
7483						    struct seq_file *m)
7484{
7485	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7486	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
7487	struct amdgpu_ps *rps = &eg_pi->current_rps;
7488	struct  si_ps *ps = si_get_ps(rps);
7489	struct rv7xx_pl *pl;
7490	u32 current_index =
7491		(RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >>
7492		CURRENT_STATE_INDEX_SHIFT;
7493
7494	if (current_index >= ps->performance_level_count) {
7495		seq_printf(m, "invalid dpm profile %d\n", current_index);
7496	} else {
7497		pl = &ps->performance_levels[current_index];
7498		seq_printf(m, "uvd    vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
7499		seq_printf(m, "power level %d    sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n",
7500			   current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1);
7501	}
7502}
7503
7504static int si_dpm_set_interrupt_state(struct amdgpu_device *adev,
7505				      struct amdgpu_irq_src *source,
7506				      unsigned type,
7507				      enum amdgpu_interrupt_state state)
7508{
7509	u32 cg_thermal_int;
7510
7511	switch (type) {
7512	case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH:
7513		switch (state) {
7514		case AMDGPU_IRQ_STATE_DISABLE:
7515			cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);
7516			cg_thermal_int |= THERM_INT_MASK_HIGH;
7517			WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);
7518			break;
7519		case AMDGPU_IRQ_STATE_ENABLE:
7520			cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);
7521			cg_thermal_int &= ~THERM_INT_MASK_HIGH;
7522			WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);
7523			break;
7524		default:
7525			break;
7526		}
7527		break;
7528
7529	case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW:
7530		switch (state) {
7531		case AMDGPU_IRQ_STATE_DISABLE:
7532			cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);
7533			cg_thermal_int |= THERM_INT_MASK_LOW;
7534			WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);
7535			break;
7536		case AMDGPU_IRQ_STATE_ENABLE:
7537			cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);
7538			cg_thermal_int &= ~THERM_INT_MASK_LOW;
7539			WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);
7540			break;
7541		default:
7542			break;
7543		}
7544		break;
7545
7546	default:
7547		break;
7548	}
7549	return 0;
7550}
7551
7552static int si_dpm_process_interrupt(struct amdgpu_device *adev,
7553				    struct amdgpu_irq_src *source,
7554				    struct amdgpu_iv_entry *entry)
7555{
7556	bool queue_thermal = false;
7557
7558	if (entry == NULL)
7559		return -EINVAL;
7560
7561	switch (entry->src_id) {
7562	case 230: /* thermal low to high */
7563		DRM_DEBUG("IH: thermal low to high\n");
7564		adev->pm.dpm.thermal.high_to_low = false;
7565		queue_thermal = true;
7566		break;
7567	case 231: /* thermal high to low */
7568		DRM_DEBUG("IH: thermal high to low\n");
7569		adev->pm.dpm.thermal.high_to_low = true;
7570		queue_thermal = true;
7571		break;
7572	default:
7573		break;
7574	}
7575
7576	if (queue_thermal)
7577		schedule_work(&adev->pm.dpm.thermal.work);
7578
7579	return 0;
7580}
7581
7582static int si_dpm_late_init(void *handle)
7583{
7584	int ret;
7585	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7586
7587	if (!adev->pm.dpm_enabled)
7588		return 0;
7589
7590	ret = si_set_temperature_range(adev);
7591	if (ret)
7592		return ret;
7593#if 0 //TODO ?
7594	si_dpm_powergate_uvd(adev, true);
7595#endif
7596	return 0;
7597}
7598
7599/**
7600 * si_dpm_init_microcode - load ucode images from disk
7601 *
7602 * @adev: amdgpu_device pointer
7603 *
7604 * Use the firmware interface to load the ucode images into
7605 * the driver (not loaded into hw).
7606 * Returns 0 on success, error on failure.
7607 */
7608static int si_dpm_init_microcode(struct amdgpu_device *adev)
7609{
7610	const char *chip_name;
7611	char fw_name[30];
7612	int err;
7613
7614	DRM_DEBUG("\n");
7615	switch (adev->asic_type) {
7616	case CHIP_TAHITI:
7617		chip_name = "tahiti";
7618		break;
7619	case CHIP_PITCAIRN:
7620		if ((adev->pdev->revision == 0x81) &&
7621		    ((adev->pdev->device == 0x6810) ||
7622		    (adev->pdev->device == 0x6811)))
7623			chip_name = "pitcairn_k";
7624		else
7625			chip_name = "pitcairn";
7626		break;
7627	case CHIP_VERDE:
7628		if (((adev->pdev->device == 0x6820) &&
7629			((adev->pdev->revision == 0x81) ||
7630			(adev->pdev->revision == 0x83))) ||
7631		    ((adev->pdev->device == 0x6821) &&
7632			((adev->pdev->revision == 0x83) ||
7633			(adev->pdev->revision == 0x87))) ||
7634		    ((adev->pdev->revision == 0x87) &&
7635			((adev->pdev->device == 0x6823) ||
7636			(adev->pdev->device == 0x682b))))
7637			chip_name = "verde_k";
7638		else
7639			chip_name = "verde";
7640		break;
7641	case CHIP_OLAND:
7642		if (((adev->pdev->revision == 0x81) &&
7643			((adev->pdev->device == 0x6600) ||
7644			(adev->pdev->device == 0x6604) ||
7645			(adev->pdev->device == 0x6605) ||
7646			(adev->pdev->device == 0x6610))) ||
7647		    ((adev->pdev->revision == 0x83) &&
7648			(adev->pdev->device == 0x6610)))
7649			chip_name = "oland_k";
7650		else
7651			chip_name = "oland";
7652		break;
7653	case CHIP_HAINAN:
7654		if (((adev->pdev->revision == 0x81) &&
7655			(adev->pdev->device == 0x6660)) ||
7656		    ((adev->pdev->revision == 0x83) &&
7657			((adev->pdev->device == 0x6660) ||
7658			(adev->pdev->device == 0x6663) ||
7659			(adev->pdev->device == 0x6665) ||
7660			 (adev->pdev->device == 0x6667))))
7661			chip_name = "hainan_k";
7662		else if ((adev->pdev->revision == 0xc3) &&
7663			 (adev->pdev->device == 0x6665))
7664			chip_name = "banks_k_2";
7665		else
7666			chip_name = "hainan";
7667		break;
7668	default: BUG();
7669	}
7670
7671	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name);
7672	err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
7673	if (err)
7674		goto out;
7675	err = amdgpu_ucode_validate(adev->pm.fw);
7676
7677out:
7678	if (err) {
7679		DRM_ERROR("si_smc: Failed to load firmware. err = %d\"%s\"\n",
7680			  err, fw_name);
7681		release_firmware(adev->pm.fw);
7682		adev->pm.fw = NULL;
7683	}
7684	return err;
7685
7686}
7687
7688static int si_dpm_sw_init(void *handle)
7689{
7690	int ret;
7691	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7692
7693	ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 230, &adev->pm.dpm.thermal.irq);
7694	if (ret)
7695		return ret;
7696
7697	ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 231, &adev->pm.dpm.thermal.irq);
7698	if (ret)
7699		return ret;
7700
7701	/* default to balanced state */
7702	adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
7703	adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
7704	adev->pm.dpm.forced_level = AMD_DPM_FORCED_LEVEL_AUTO;
7705	adev->pm.default_sclk = adev->clock.default_sclk;
7706	adev->pm.default_mclk = adev->clock.default_mclk;
7707	adev->pm.current_sclk = adev->clock.default_sclk;
7708	adev->pm.current_mclk = adev->clock.default_mclk;
7709	adev->pm.int_thermal_type = THERMAL_TYPE_NONE;
7710
7711	if (amdgpu_dpm == 0)
7712		return 0;
7713
7714	ret = si_dpm_init_microcode(adev);
7715	if (ret)
7716		return ret;
7717
7718	INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler);
7719	mutex_lock(&adev->pm.mutex);
7720	ret = si_dpm_init(adev);
7721	if (ret)
7722		goto dpm_failed;
7723	adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
7724	if (amdgpu_dpm == 1)
7725		amdgpu_pm_print_power_states(adev);
7726	mutex_unlock(&adev->pm.mutex);
7727	DRM_INFO("amdgpu: dpm initialized\n");
7728
7729	return 0;
7730
7731dpm_failed:
7732	si_dpm_fini(adev);
7733	mutex_unlock(&adev->pm.mutex);
7734	DRM_ERROR("amdgpu: dpm initialization failed\n");
7735	return ret;
7736}
7737
7738static int si_dpm_sw_fini(void *handle)
7739{
7740	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7741
7742	flush_work(&adev->pm.dpm.thermal.work);
7743
7744	mutex_lock(&adev->pm.mutex);
7745	si_dpm_fini(adev);
7746	mutex_unlock(&adev->pm.mutex);
7747
7748	return 0;
7749}
7750
7751static int si_dpm_hw_init(void *handle)
7752{
7753	int ret;
7754
7755	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7756
7757	if (!amdgpu_dpm)
7758		return 0;
7759
7760	mutex_lock(&adev->pm.mutex);
7761	si_dpm_setup_asic(adev);
7762	ret = si_dpm_enable(adev);
7763	if (ret)
7764		adev->pm.dpm_enabled = false;
7765	else
7766		adev->pm.dpm_enabled = true;
7767	mutex_unlock(&adev->pm.mutex);
7768	amdgpu_pm_compute_clocks(adev);
7769	return ret;
7770}
7771
7772static int si_dpm_hw_fini(void *handle)
7773{
7774	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7775
7776	if (adev->pm.dpm_enabled) {
7777		mutex_lock(&adev->pm.mutex);
7778		si_dpm_disable(adev);
7779		mutex_unlock(&adev->pm.mutex);
7780	}
7781
7782	return 0;
7783}
7784
7785static int si_dpm_suspend(void *handle)
7786{
7787	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7788
7789	if (adev->pm.dpm_enabled) {
7790		mutex_lock(&adev->pm.mutex);
7791		/* disable dpm */
7792		si_dpm_disable(adev);
7793		/* reset the power state */
7794		adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
7795		mutex_unlock(&adev->pm.mutex);
7796	}
7797	return 0;
7798}
7799
7800static int si_dpm_resume(void *handle)
7801{
7802	int ret;
7803	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7804
7805	if (adev->pm.dpm_enabled) {
7806		/* asic init will reset to the boot state */
7807		mutex_lock(&adev->pm.mutex);
7808		si_dpm_setup_asic(adev);
7809		ret = si_dpm_enable(adev);
7810		if (ret)
7811			adev->pm.dpm_enabled = false;
7812		else
7813			adev->pm.dpm_enabled = true;
7814		mutex_unlock(&adev->pm.mutex);
7815		if (adev->pm.dpm_enabled)
7816			amdgpu_pm_compute_clocks(adev);
7817	}
7818	return 0;
7819}
7820
7821static bool si_dpm_is_idle(void *handle)
7822{
7823	/* XXX */
7824	return true;
7825}
7826
7827static int si_dpm_wait_for_idle(void *handle)
7828{
7829	/* XXX */
7830	return 0;
7831}
7832
7833static int si_dpm_soft_reset(void *handle)
7834{
7835	return 0;
7836}
7837
7838static int si_dpm_set_clockgating_state(void *handle,
7839					enum amd_clockgating_state state)
7840{
7841	return 0;
7842}
7843
7844static int si_dpm_set_powergating_state(void *handle,
7845					enum amd_powergating_state state)
7846{
7847	return 0;
7848}
7849
7850/* get temperature in millidegrees */
7851static int si_dpm_get_temp(void *handle)
7852{
7853	u32 temp;
7854	int actual_temp = 0;
7855	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7856
7857	temp = (RREG32(CG_MULT_THERMAL_STATUS) & CTF_TEMP_MASK) >>
7858		CTF_TEMP_SHIFT;
7859
7860	if (temp & 0x200)
7861		actual_temp = 255;
7862	else
7863		actual_temp = temp & 0x1ff;
7864
7865	actual_temp = (actual_temp * 1000);
7866
7867	return actual_temp;
7868}
7869
7870static u32 si_dpm_get_sclk(void *handle, bool low)
7871{
7872	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7873	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
7874	struct  si_ps *requested_state = si_get_ps(&eg_pi->requested_rps);
7875
7876	if (low)
7877		return requested_state->performance_levels[0].sclk;
7878	else
7879		return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
7880}
7881
7882static u32 si_dpm_get_mclk(void *handle, bool low)
7883{
7884	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7885	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
7886	struct  si_ps *requested_state = si_get_ps(&eg_pi->requested_rps);
7887
7888	if (low)
7889		return requested_state->performance_levels[0].mclk;
7890	else
7891		return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
7892}
7893
7894static void si_dpm_print_power_state(void *handle,
7895				     void *current_ps)
7896{
7897	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7898	struct amdgpu_ps *rps = (struct amdgpu_ps *)current_ps;
7899	struct  si_ps *ps = si_get_ps(rps);
7900	struct rv7xx_pl *pl;
7901	int i;
7902
7903	amdgpu_dpm_print_class_info(rps->class, rps->class2);
7904	amdgpu_dpm_print_cap_info(rps->caps);
7905	DRM_INFO("\tuvd    vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
7906	for (i = 0; i < ps->performance_level_count; i++) {
7907		pl = &ps->performance_levels[i];
7908		if (adev->asic_type >= CHIP_TAHITI)
7909			DRM_INFO("\t\tpower level %d    sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n",
7910				 i, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1);
7911		else
7912			DRM_INFO("\t\tpower level %d    sclk: %u mclk: %u vddc: %u vddci: %u\n",
7913				 i, pl->sclk, pl->mclk, pl->vddc, pl->vddci);
7914	}
7915	amdgpu_dpm_print_ps_status(adev, rps);
7916}
7917
7918static int si_dpm_early_init(void *handle)
7919{
7920
7921	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7922
7923	adev->powerplay.pp_funcs = &si_dpm_funcs;
7924	adev->powerplay.pp_handle = adev;
7925	si_dpm_set_irq_funcs(adev);
7926	return 0;
7927}
7928
7929static inline bool si_are_power_levels_equal(const struct rv7xx_pl  *si_cpl1,
7930						const struct rv7xx_pl *si_cpl2)
7931{
7932	return ((si_cpl1->mclk == si_cpl2->mclk) &&
7933		  (si_cpl1->sclk == si_cpl2->sclk) &&
7934		  (si_cpl1->pcie_gen == si_cpl2->pcie_gen) &&
7935		  (si_cpl1->vddc == si_cpl2->vddc) &&
7936		  (si_cpl1->vddci == si_cpl2->vddci));
7937}
7938
7939static int si_check_state_equal(void *handle,
7940				void *current_ps,
7941				void *request_ps,
7942				bool *equal)
7943{
7944	struct si_ps *si_cps;
7945	struct si_ps *si_rps;
7946	int i;
7947	struct amdgpu_ps *cps = (struct amdgpu_ps *)current_ps;
7948	struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps;
7949	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7950
7951	if (adev == NULL || cps == NULL || rps == NULL || equal == NULL)
7952		return -EINVAL;
7953
7954	si_cps = si_get_ps((struct amdgpu_ps *)cps);
7955	si_rps = si_get_ps((struct amdgpu_ps *)rps);
7956
7957	if (si_cps == NULL) {
7958		printk("si_cps is NULL\n");
7959		*equal = false;
7960		return 0;
7961	}
7962
7963	if (si_cps->performance_level_count != si_rps->performance_level_count) {
7964		*equal = false;
7965		return 0;
7966	}
7967
7968	for (i = 0; i < si_cps->performance_level_count; i++) {
7969		if (!si_are_power_levels_equal(&(si_cps->performance_levels[i]),
7970					&(si_rps->performance_levels[i]))) {
7971			*equal = false;
7972			return 0;
7973		}
7974	}
7975
7976	/* If all performance levels are the same try to use the UVD clocks to break the tie.*/
7977	*equal = ((cps->vclk == rps->vclk) && (cps->dclk == rps->dclk));
7978	*equal &= ((cps->evclk == rps->evclk) && (cps->ecclk == rps->ecclk));
7979
7980	return 0;
7981}
7982
7983static int si_dpm_read_sensor(void *handle, int idx,
7984			      void *value, int *size)
7985{
7986	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7987	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
7988	struct amdgpu_ps *rps = &eg_pi->current_rps;
7989	struct  si_ps *ps = si_get_ps(rps);
7990	uint32_t sclk, mclk;
7991	u32 pl_index =
7992		(RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >>
7993		CURRENT_STATE_INDEX_SHIFT;
7994
7995	/* size must be at least 4 bytes for all sensors */
7996	if (*size < 4)
7997		return -EINVAL;
7998
7999	switch (idx) {
8000	case AMDGPU_PP_SENSOR_GFX_SCLK:
8001		if (pl_index < ps->performance_level_count) {
8002			sclk = ps->performance_levels[pl_index].sclk;
8003			*((uint32_t *)value) = sclk;
8004			*size = 4;
8005			return 0;
8006		}
8007		return -EINVAL;
8008	case AMDGPU_PP_SENSOR_GFX_MCLK:
8009		if (pl_index < ps->performance_level_count) {
8010			mclk = ps->performance_levels[pl_index].mclk;
8011			*((uint32_t *)value) = mclk;
8012			*size = 4;
8013			return 0;
8014		}
8015		return -EINVAL;
8016	case AMDGPU_PP_SENSOR_GPU_TEMP:
8017		*((uint32_t *)value) = si_dpm_get_temp(adev);
8018		*size = 4;
8019		return 0;
8020	default:
8021		return -EINVAL;
8022	}
8023}
8024
8025static const struct amd_ip_funcs si_dpm_ip_funcs = {
8026	.name = "si_dpm",
8027	.early_init = si_dpm_early_init,
8028	.late_init = si_dpm_late_init,
8029	.sw_init = si_dpm_sw_init,
8030	.sw_fini = si_dpm_sw_fini,
8031	.hw_init = si_dpm_hw_init,
8032	.hw_fini = si_dpm_hw_fini,
8033	.suspend = si_dpm_suspend,
8034	.resume = si_dpm_resume,
8035	.is_idle = si_dpm_is_idle,
8036	.wait_for_idle = si_dpm_wait_for_idle,
8037	.soft_reset = si_dpm_soft_reset,
8038	.set_clockgating_state = si_dpm_set_clockgating_state,
8039	.set_powergating_state = si_dpm_set_powergating_state,
8040};
8041
8042const struct amdgpu_ip_block_version si_smu_ip_block =
8043{
8044	.type = AMD_IP_BLOCK_TYPE_SMC,
8045	.major = 6,
8046	.minor = 0,
8047	.rev = 0,
8048	.funcs = &si_dpm_ip_funcs,
8049};
8050
8051static const struct amd_pm_funcs si_dpm_funcs = {
8052	.pre_set_power_state = &si_dpm_pre_set_power_state,
8053	.set_power_state = &si_dpm_set_power_state,
8054	.post_set_power_state = &si_dpm_post_set_power_state,
8055	.display_configuration_changed = &si_dpm_display_configuration_changed,
8056	.get_sclk = &si_dpm_get_sclk,
8057	.get_mclk = &si_dpm_get_mclk,
8058	.print_power_state = &si_dpm_print_power_state,
8059	.debugfs_print_current_performance_level = &si_dpm_debugfs_print_current_performance_level,
8060	.force_performance_level = &si_dpm_force_performance_level,
8061	.vblank_too_short = &si_dpm_vblank_too_short,
8062	.set_fan_control_mode = &si_dpm_set_fan_control_mode,
8063	.get_fan_control_mode = &si_dpm_get_fan_control_mode,
8064	.set_fan_speed_percent = &si_dpm_set_fan_speed_percent,
8065	.get_fan_speed_percent = &si_dpm_get_fan_speed_percent,
8066	.check_state_equal = &si_check_state_equal,
8067	.get_vce_clock_state = amdgpu_get_vce_clock_state,
8068	.read_sensor = &si_dpm_read_sensor,
8069};
8070
8071static const struct amdgpu_irq_src_funcs si_dpm_irq_funcs = {
8072	.set = si_dpm_set_interrupt_state,
8073	.process = si_dpm_process_interrupt,
8074};
8075
8076static void si_dpm_set_irq_funcs(struct amdgpu_device *adev)
8077{
8078	adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST;
8079	adev->pm.dpm.thermal.irq.funcs = &si_dpm_irq_funcs;
8080}
8081