Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.9.4.
   1/*
   2 * Copyright 2013 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23
  24#include <linux/firmware.h>
  25#include "drmP.h"
  26#include "amdgpu.h"
  27#include "amdgpu_pm.h"
  28#include "amdgpu_ucode.h"
  29#include "cikd.h"
  30#include "amdgpu_dpm.h"
  31#include "ci_dpm.h"
  32#include "gfx_v7_0.h"
  33#include "atom.h"
  34#include "amd_pcie.h"
  35#include <linux/seq_file.h>
  36
  37#include "smu/smu_7_0_1_d.h"
  38#include "smu/smu_7_0_1_sh_mask.h"
  39
  40#include "dce/dce_8_0_d.h"
  41#include "dce/dce_8_0_sh_mask.h"
  42
  43#include "bif/bif_4_1_d.h"
  44#include "bif/bif_4_1_sh_mask.h"
  45
  46#include "gca/gfx_7_2_d.h"
  47#include "gca/gfx_7_2_sh_mask.h"
  48
  49#include "gmc/gmc_7_1_d.h"
  50#include "gmc/gmc_7_1_sh_mask.h"
  51
  52MODULE_FIRMWARE("radeon/bonaire_smc.bin");
  53MODULE_FIRMWARE("radeon/bonaire_k_smc.bin");
  54MODULE_FIRMWARE("radeon/hawaii_smc.bin");
  55MODULE_FIRMWARE("radeon/hawaii_k_smc.bin");
  56
  57#define MC_CG_ARB_FREQ_F0           0x0a
  58#define MC_CG_ARB_FREQ_F1           0x0b
  59#define MC_CG_ARB_FREQ_F2           0x0c
  60#define MC_CG_ARB_FREQ_F3           0x0d
  61
  62#define SMC_RAM_END 0x40000
  63
  64#define VOLTAGE_SCALE               4
  65#define VOLTAGE_VID_OFFSET_SCALE1    625
  66#define VOLTAGE_VID_OFFSET_SCALE2    100
  67
  68static const struct ci_pt_defaults defaults_hawaii_xt =
  69{
  70	1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
  71	{ 0x2E,  0x00,  0x00,  0x88,  0x00,  0x00,  0x72,  0x60,  0x51,  0xA7,  0x79,  0x6B,  0x90,  0xBD,  0x79  },
  72	{ 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
  73};
  74
  75static const struct ci_pt_defaults defaults_hawaii_pro =
  76{
  77	1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
  78	{ 0x2E,  0x00,  0x00,  0x88,  0x00,  0x00,  0x72,  0x60,  0x51,  0xA7,  0x79,  0x6B,  0x90,  0xBD,  0x79  },
  79	{ 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
  80};
  81
  82static const struct ci_pt_defaults defaults_bonaire_xt =
  83{
  84	1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
  85	{ 0x79,  0x253, 0x25D, 0xAE,  0x72,  0x80,  0x83,  0x86,  0x6F,  0xC8,  0xC9,  0xC9,  0x2F,  0x4D,  0x61  },
  86	{ 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
  87};
  88
  89#if 0
  90static const struct ci_pt_defaults defaults_bonaire_pro =
  91{
  92	1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062,
  93	{ 0x8C,  0x23F, 0x244, 0xA6,  0x83,  0x85,  0x86,  0x86,  0x83,  0xDB,  0xDB,  0xDA,  0x67,  0x60,  0x5F  },
  94	{ 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB }
  95};
  96#endif
  97
  98static const struct ci_pt_defaults defaults_saturn_xt =
  99{
 100	1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
 101	{ 0x8C,  0x247, 0x249, 0xA6,  0x80,  0x81,  0x8B,  0x89,  0x86,  0xC9,  0xCA,  0xC9,  0x4D,  0x4D,  0x4D  },
 102	{ 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
 103};
 104
 105#if 0
 106static const struct ci_pt_defaults defaults_saturn_pro =
 107{
 108	1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000,
 109	{ 0x96,  0x21D, 0x23B, 0xA1,  0x85,  0x87,  0x83,  0x84,  0x81,  0xE6,  0xE6,  0xE6,  0x71,  0x6A,  0x6A  },
 110	{ 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 }
 111};
 112#endif
 113
 114static const struct ci_pt_config_reg didt_config_ci[] =
 115{
 116	{ 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 117	{ 0x10, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 118	{ 0x10, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 119	{ 0x10, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 120	{ 0x11, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 121	{ 0x11, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 122	{ 0x11, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 123	{ 0x11, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 124	{ 0x12, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 125	{ 0x12, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 126	{ 0x12, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 127	{ 0x12, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 128	{ 0x2, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
 129	{ 0x2, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
 130	{ 0x2, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
 131	{ 0x1, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
 132	{ 0x1, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
 133	{ 0x0, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 134	{ 0x30, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 135	{ 0x30, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 136	{ 0x30, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 137	{ 0x30, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 138	{ 0x31, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 139	{ 0x31, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 140	{ 0x31, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 141	{ 0x31, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 142	{ 0x32, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 143	{ 0x32, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 144	{ 0x32, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 145	{ 0x32, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 146	{ 0x22, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
 147	{ 0x22, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
 148	{ 0x22, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
 149	{ 0x21, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
 150	{ 0x21, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
 151	{ 0x20, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 152	{ 0x50, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 153	{ 0x50, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 154	{ 0x50, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 155	{ 0x50, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 156	{ 0x51, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 157	{ 0x51, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 158	{ 0x51, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 159	{ 0x51, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 160	{ 0x52, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 161	{ 0x52, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 162	{ 0x52, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 163	{ 0x52, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 164	{ 0x42, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
 165	{ 0x42, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
 166	{ 0x42, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
 167	{ 0x41, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
 168	{ 0x41, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
 169	{ 0x40, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 170	{ 0x70, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 171	{ 0x70, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 172	{ 0x70, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 173	{ 0x70, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 174	{ 0x71, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 175	{ 0x71, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 176	{ 0x71, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 177	{ 0x71, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 178	{ 0x72, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 179	{ 0x72, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 180	{ 0x72, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 181	{ 0x72, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 182	{ 0x62, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
 183	{ 0x62, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
 184	{ 0x62, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
 185	{ 0x61, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
 186	{ 0x61, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
 187	{ 0x60, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 188	{ 0xFFFFFFFF }
 189};
 190
 191static u8 ci_get_memory_module_index(struct amdgpu_device *adev)
 192{
 193	return (u8) ((RREG32(mmBIOS_SCRATCH_4) >> 16) & 0xff);
 194}
 195
 196#define MC_CG_ARB_FREQ_F0           0x0a
 197#define MC_CG_ARB_FREQ_F1           0x0b
 198#define MC_CG_ARB_FREQ_F2           0x0c
 199#define MC_CG_ARB_FREQ_F3           0x0d
 200
 201static int ci_copy_and_switch_arb_sets(struct amdgpu_device *adev,
 202				       u32 arb_freq_src, u32 arb_freq_dest)
 203{
 204	u32 mc_arb_dram_timing;
 205	u32 mc_arb_dram_timing2;
 206	u32 burst_time;
 207	u32 mc_cg_config;
 208
 209	switch (arb_freq_src) {
 210	case MC_CG_ARB_FREQ_F0:
 211		mc_arb_dram_timing  = RREG32(mmMC_ARB_DRAM_TIMING);
 212		mc_arb_dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2);
 213		burst_time = (RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE0_MASK) >>
 214			 MC_ARB_BURST_TIME__STATE0__SHIFT;
 215		break;
 216	case MC_CG_ARB_FREQ_F1:
 217		mc_arb_dram_timing  = RREG32(mmMC_ARB_DRAM_TIMING_1);
 218		mc_arb_dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2_1);
 219		burst_time = (RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE1_MASK) >>
 220			 MC_ARB_BURST_TIME__STATE1__SHIFT;
 221		break;
 222	default:
 223		return -EINVAL;
 224	}
 225
 226	switch (arb_freq_dest) {
 227	case MC_CG_ARB_FREQ_F0:
 228		WREG32(mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
 229		WREG32(mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
 230		WREG32_P(mmMC_ARB_BURST_TIME, (burst_time << MC_ARB_BURST_TIME__STATE0__SHIFT),
 231			~MC_ARB_BURST_TIME__STATE0_MASK);
 232		break;
 233	case MC_CG_ARB_FREQ_F1:
 234		WREG32(mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
 235		WREG32(mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
 236		WREG32_P(mmMC_ARB_BURST_TIME, (burst_time << MC_ARB_BURST_TIME__STATE1__SHIFT),
 237			~MC_ARB_BURST_TIME__STATE1_MASK);
 238		break;
 239	default:
 240		return -EINVAL;
 241	}
 242
 243	mc_cg_config = RREG32(mmMC_CG_CONFIG) | 0x0000000F;
 244	WREG32(mmMC_CG_CONFIG, mc_cg_config);
 245	WREG32_P(mmMC_ARB_CG, (arb_freq_dest) << MC_ARB_CG__CG_ARB_REQ__SHIFT,
 246		~MC_ARB_CG__CG_ARB_REQ_MASK);
 247
 248	return 0;
 249}
 250
 251static u8 ci_get_ddr3_mclk_frequency_ratio(u32 memory_clock)
 252{
 253	u8 mc_para_index;
 254
 255	if (memory_clock < 10000)
 256		mc_para_index = 0;
 257	else if (memory_clock >= 80000)
 258		mc_para_index = 0x0f;
 259	else
 260		mc_para_index = (u8)((memory_clock - 10000) / 5000 + 1);
 261	return mc_para_index;
 262}
 263
 264static u8 ci_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode)
 265{
 266	u8 mc_para_index;
 267
 268	if (strobe_mode) {
 269		if (memory_clock < 12500)
 270			mc_para_index = 0x00;
 271		else if (memory_clock > 47500)
 272			mc_para_index = 0x0f;
 273		else
 274			mc_para_index = (u8)((memory_clock - 10000) / 2500);
 275	} else {
 276		if (memory_clock < 65000)
 277			mc_para_index = 0x00;
 278		else if (memory_clock > 135000)
 279			mc_para_index = 0x0f;
 280		else
 281			mc_para_index = (u8)((memory_clock - 60000) / 5000);
 282	}
 283	return mc_para_index;
 284}
 285
 286static void ci_trim_voltage_table_to_fit_state_table(struct amdgpu_device *adev,
 287						     u32 max_voltage_steps,
 288						     struct atom_voltage_table *voltage_table)
 289{
 290	unsigned int i, diff;
 291
 292	if (voltage_table->count <= max_voltage_steps)
 293		return;
 294
 295	diff = voltage_table->count - max_voltage_steps;
 296
 297	for (i = 0; i < max_voltage_steps; i++)
 298		voltage_table->entries[i] = voltage_table->entries[i + diff];
 299
 300	voltage_table->count = max_voltage_steps;
 301}
 302
 303static int ci_get_std_voltage_value_sidd(struct amdgpu_device *adev,
 304					 struct atom_voltage_table_entry *voltage_table,
 305					 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd);
 306static int ci_set_power_limit(struct amdgpu_device *adev, u32 n);
 307static int ci_set_overdrive_target_tdp(struct amdgpu_device *adev,
 308				       u32 target_tdp);
 309static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate);
 310static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev);
 311static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev);
 312
 313static PPSMC_Result amdgpu_ci_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
 314							     PPSMC_Msg msg, u32 parameter);
 315static void ci_thermal_start_smc_fan_control(struct amdgpu_device *adev);
 316static void ci_fan_ctrl_set_default_mode(struct amdgpu_device *adev);
 317
 318static struct ci_power_info *ci_get_pi(struct amdgpu_device *adev)
 319{
 320	struct ci_power_info *pi = adev->pm.dpm.priv;
 321
 322	return pi;
 323}
 324
 325static struct ci_ps *ci_get_ps(struct amdgpu_ps *rps)
 326{
 327	struct ci_ps *ps = rps->ps_priv;
 328
 329	return ps;
 330}
 331
 332static void ci_initialize_powertune_defaults(struct amdgpu_device *adev)
 333{
 334	struct ci_power_info *pi = ci_get_pi(adev);
 335
 336	switch (adev->pdev->device) {
 337	case 0x6649:
 338	case 0x6650:
 339	case 0x6651:
 340	case 0x6658:
 341	case 0x665C:
 342	case 0x665D:
 343	default:
 344		pi->powertune_defaults = &defaults_bonaire_xt;
 345		break;
 346	case 0x6640:
 347	case 0x6641:
 348	case 0x6646:
 349	case 0x6647:
 350		pi->powertune_defaults = &defaults_saturn_xt;
 351		break;
 352	case 0x67B8:
 353	case 0x67B0:
 354		pi->powertune_defaults = &defaults_hawaii_xt;
 355		break;
 356	case 0x67BA:
 357	case 0x67B1:
 358		pi->powertune_defaults = &defaults_hawaii_pro;
 359		break;
 360	case 0x67A0:
 361	case 0x67A1:
 362	case 0x67A2:
 363	case 0x67A8:
 364	case 0x67A9:
 365	case 0x67AA:
 366	case 0x67B9:
 367	case 0x67BE:
 368		pi->powertune_defaults = &defaults_bonaire_xt;
 369		break;
 370	}
 371
 372	pi->dte_tj_offset = 0;
 373
 374	pi->caps_power_containment = true;
 375	pi->caps_cac = false;
 376	pi->caps_sq_ramping = false;
 377	pi->caps_db_ramping = false;
 378	pi->caps_td_ramping = false;
 379	pi->caps_tcp_ramping = false;
 380
 381	if (pi->caps_power_containment) {
 382		pi->caps_cac = true;
 383		if (adev->asic_type == CHIP_HAWAII)
 384			pi->enable_bapm_feature = false;
 385		else
 386			pi->enable_bapm_feature = true;
 387		pi->enable_tdc_limit_feature = true;
 388		pi->enable_pkg_pwr_tracking_feature = true;
 389	}
 390}
 391
 392static u8 ci_convert_to_vid(u16 vddc)
 393{
 394	return (6200 - (vddc * VOLTAGE_SCALE)) / 25;
 395}
 396
 397static int ci_populate_bapm_vddc_vid_sidd(struct amdgpu_device *adev)
 398{
 399	struct ci_power_info *pi = ci_get_pi(adev);
 400	u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
 401	u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
 402	u8 *hi2_vid = pi->smc_powertune_table.BapmVddCVidHiSidd2;
 403	u32 i;
 404
 405	if (adev->pm.dpm.dyn_state.cac_leakage_table.entries == NULL)
 406		return -EINVAL;
 407	if (adev->pm.dpm.dyn_state.cac_leakage_table.count > 8)
 408		return -EINVAL;
 409	if (adev->pm.dpm.dyn_state.cac_leakage_table.count !=
 410	    adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count)
 411		return -EINVAL;
 412
 413	for (i = 0; i < adev->pm.dpm.dyn_state.cac_leakage_table.count; i++) {
 414		if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
 415			lo_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1);
 416			hi_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2);
 417			hi2_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3);
 418		} else {
 419			lo_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc);
 420			hi_vid[i] = ci_convert_to_vid((u16)adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage);
 421		}
 422	}
 423	return 0;
 424}
 425
 426static int ci_populate_vddc_vid(struct amdgpu_device *adev)
 427{
 428	struct ci_power_info *pi = ci_get_pi(adev);
 429	u8 *vid = pi->smc_powertune_table.VddCVid;
 430	u32 i;
 431
 432	if (pi->vddc_voltage_table.count > 8)
 433		return -EINVAL;
 434
 435	for (i = 0; i < pi->vddc_voltage_table.count; i++)
 436		vid[i] = ci_convert_to_vid(pi->vddc_voltage_table.entries[i].value);
 437
 438	return 0;
 439}
 440
 441static int ci_populate_svi_load_line(struct amdgpu_device *adev)
 442{
 443	struct ci_power_info *pi = ci_get_pi(adev);
 444	const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
 445
 446	pi->smc_powertune_table.SviLoadLineEn = pt_defaults->svi_load_line_en;
 447	pi->smc_powertune_table.SviLoadLineVddC = pt_defaults->svi_load_line_vddc;
 448	pi->smc_powertune_table.SviLoadLineTrimVddC = 3;
 449	pi->smc_powertune_table.SviLoadLineOffsetVddC = 0;
 450
 451	return 0;
 452}
 453
 454static int ci_populate_tdc_limit(struct amdgpu_device *adev)
 455{
 456	struct ci_power_info *pi = ci_get_pi(adev);
 457	const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
 458	u16 tdc_limit;
 459
 460	tdc_limit = adev->pm.dpm.dyn_state.cac_tdp_table->tdc * 256;
 461	pi->smc_powertune_table.TDC_VDDC_PkgLimit = cpu_to_be16(tdc_limit);
 462	pi->smc_powertune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
 463		pt_defaults->tdc_vddc_throttle_release_limit_perc;
 464	pi->smc_powertune_table.TDC_MAWt = pt_defaults->tdc_mawt;
 465
 466	return 0;
 467}
 468
 469static int ci_populate_dw8(struct amdgpu_device *adev)
 470{
 471	struct ci_power_info *pi = ci_get_pi(adev);
 472	const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
 473	int ret;
 474
 475	ret = amdgpu_ci_read_smc_sram_dword(adev,
 476				     SMU7_FIRMWARE_HEADER_LOCATION +
 477				     offsetof(SMU7_Firmware_Header, PmFuseTable) +
 478				     offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl),
 479				     (u32 *)&pi->smc_powertune_table.TdcWaterfallCtl,
 480				     pi->sram_end);
 481	if (ret)
 482		return -EINVAL;
 483	else
 484		pi->smc_powertune_table.TdcWaterfallCtl = pt_defaults->tdc_waterfall_ctl;
 485
 486	return 0;
 487}
 488
 489static int ci_populate_fuzzy_fan(struct amdgpu_device *adev)
 490{
 491	struct ci_power_info *pi = ci_get_pi(adev);
 492
 493	if ((adev->pm.dpm.fan.fan_output_sensitivity & (1 << 15)) ||
 494	    (adev->pm.dpm.fan.fan_output_sensitivity == 0))
 495		adev->pm.dpm.fan.fan_output_sensitivity =
 496			adev->pm.dpm.fan.default_fan_output_sensitivity;
 497
 498	pi->smc_powertune_table.FuzzyFan_PwmSetDelta =
 499		cpu_to_be16(adev->pm.dpm.fan.fan_output_sensitivity);
 500
 501	return 0;
 502}
 503
 504static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct amdgpu_device *adev)
 505{
 506	struct ci_power_info *pi = ci_get_pi(adev);
 507	u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
 508	u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
 509	int i, min, max;
 510
 511	min = max = hi_vid[0];
 512	for (i = 0; i < 8; i++) {
 513		if (0 != hi_vid[i]) {
 514			if (min > hi_vid[i])
 515				min = hi_vid[i];
 516			if (max < hi_vid[i])
 517				max = hi_vid[i];
 518		}
 519
 520		if (0 != lo_vid[i]) {
 521			if (min > lo_vid[i])
 522				min = lo_vid[i];
 523			if (max < lo_vid[i])
 524				max = lo_vid[i];
 525		}
 526	}
 527
 528	if ((min == 0) || (max == 0))
 529		return -EINVAL;
 530	pi->smc_powertune_table.GnbLPMLMaxVid = (u8)max;
 531	pi->smc_powertune_table.GnbLPMLMinVid = (u8)min;
 532
 533	return 0;
 534}
 535
 536static int ci_populate_bapm_vddc_base_leakage_sidd(struct amdgpu_device *adev)
 537{
 538	struct ci_power_info *pi = ci_get_pi(adev);
 539	u16 hi_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd;
 540	u16 lo_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd;
 541	struct amdgpu_cac_tdp_table *cac_tdp_table =
 542		adev->pm.dpm.dyn_state.cac_tdp_table;
 543
 544	hi_sidd = cac_tdp_table->high_cac_leakage / 100 * 256;
 545	lo_sidd = cac_tdp_table->low_cac_leakage / 100 * 256;
 546
 547	pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd = cpu_to_be16(hi_sidd);
 548	pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd = cpu_to_be16(lo_sidd);
 549
 550	return 0;
 551}
 552
 553static int ci_populate_bapm_parameters_in_dpm_table(struct amdgpu_device *adev)
 554{
 555	struct ci_power_info *pi = ci_get_pi(adev);
 556	const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
 557	SMU7_Discrete_DpmTable  *dpm_table = &pi->smc_state_table;
 558	struct amdgpu_cac_tdp_table *cac_tdp_table =
 559		adev->pm.dpm.dyn_state.cac_tdp_table;
 560	struct amdgpu_ppm_table *ppm = adev->pm.dpm.dyn_state.ppm_table;
 561	int i, j, k;
 562	const u16 *def1;
 563	const u16 *def2;
 564
 565	dpm_table->DefaultTdp = cac_tdp_table->tdp * 256;
 566	dpm_table->TargetTdp = cac_tdp_table->configurable_tdp * 256;
 567
 568	dpm_table->DTETjOffset = (u8)pi->dte_tj_offset;
 569	dpm_table->GpuTjMax =
 570		(u8)(pi->thermal_temp_setting.temperature_high / 1000);
 571	dpm_table->GpuTjHyst = 8;
 572
 573	dpm_table->DTEAmbientTempBase = pt_defaults->dte_ambient_temp_base;
 574
 575	if (ppm) {
 576		dpm_table->PPM_PkgPwrLimit = cpu_to_be16((u16)ppm->dgpu_tdp * 256 / 1000);
 577		dpm_table->PPM_TemperatureLimit = cpu_to_be16((u16)ppm->tj_max * 256);
 578	} else {
 579		dpm_table->PPM_PkgPwrLimit = cpu_to_be16(0);
 580		dpm_table->PPM_TemperatureLimit = cpu_to_be16(0);
 581	}
 582
 583	dpm_table->BAPM_TEMP_GRADIENT = cpu_to_be32(pt_defaults->bapm_temp_gradient);
 584	def1 = pt_defaults->bapmti_r;
 585	def2 = pt_defaults->bapmti_rc;
 586
 587	for (i = 0; i < SMU7_DTE_ITERATIONS; i++) {
 588		for (j = 0; j < SMU7_DTE_SOURCES; j++) {
 589			for (k = 0; k < SMU7_DTE_SINKS; k++) {
 590				dpm_table->BAPMTI_R[i][j][k] = cpu_to_be16(*def1);
 591				dpm_table->BAPMTI_RC[i][j][k] = cpu_to_be16(*def2);
 592				def1++;
 593				def2++;
 594			}
 595		}
 596	}
 597
 598	return 0;
 599}
 600
 601static int ci_populate_pm_base(struct amdgpu_device *adev)
 602{
 603	struct ci_power_info *pi = ci_get_pi(adev);
 604	u32 pm_fuse_table_offset;
 605	int ret;
 606
 607	if (pi->caps_power_containment) {
 608		ret = amdgpu_ci_read_smc_sram_dword(adev,
 609					     SMU7_FIRMWARE_HEADER_LOCATION +
 610					     offsetof(SMU7_Firmware_Header, PmFuseTable),
 611					     &pm_fuse_table_offset, pi->sram_end);
 612		if (ret)
 613			return ret;
 614		ret = ci_populate_bapm_vddc_vid_sidd(adev);
 615		if (ret)
 616			return ret;
 617		ret = ci_populate_vddc_vid(adev);
 618		if (ret)
 619			return ret;
 620		ret = ci_populate_svi_load_line(adev);
 621		if (ret)
 622			return ret;
 623		ret = ci_populate_tdc_limit(adev);
 624		if (ret)
 625			return ret;
 626		ret = ci_populate_dw8(adev);
 627		if (ret)
 628			return ret;
 629		ret = ci_populate_fuzzy_fan(adev);
 630		if (ret)
 631			return ret;
 632		ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(adev);
 633		if (ret)
 634			return ret;
 635		ret = ci_populate_bapm_vddc_base_leakage_sidd(adev);
 636		if (ret)
 637			return ret;
 638		ret = amdgpu_ci_copy_bytes_to_smc(adev, pm_fuse_table_offset,
 639					   (u8 *)&pi->smc_powertune_table,
 640					   sizeof(SMU7_Discrete_PmFuses), pi->sram_end);
 641		if (ret)
 642			return ret;
 643	}
 644
 645	return 0;
 646}
 647
 648static void ci_do_enable_didt(struct amdgpu_device *adev, const bool enable)
 649{
 650	struct ci_power_info *pi = ci_get_pi(adev);
 651	u32 data;
 652
 653	if (pi->caps_sq_ramping) {
 654		data = RREG32_DIDT(ixDIDT_SQ_CTRL0);
 655		if (enable)
 656			data |= DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
 657		else
 658			data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
 659		WREG32_DIDT(ixDIDT_SQ_CTRL0, data);
 660	}
 661
 662	if (pi->caps_db_ramping) {
 663		data = RREG32_DIDT(ixDIDT_DB_CTRL0);
 664		if (enable)
 665			data |= DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
 666		else
 667			data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
 668		WREG32_DIDT(ixDIDT_DB_CTRL0, data);
 669	}
 670
 671	if (pi->caps_td_ramping) {
 672		data = RREG32_DIDT(ixDIDT_TD_CTRL0);
 673		if (enable)
 674			data |= DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
 675		else
 676			data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
 677		WREG32_DIDT(ixDIDT_TD_CTRL0, data);
 678	}
 679
 680	if (pi->caps_tcp_ramping) {
 681		data = RREG32_DIDT(ixDIDT_TCP_CTRL0);
 682		if (enable)
 683			data |= DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
 684		else
 685			data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
 686		WREG32_DIDT(ixDIDT_TCP_CTRL0, data);
 687	}
 688}
 689
 690static int ci_program_pt_config_registers(struct amdgpu_device *adev,
 691					  const struct ci_pt_config_reg *cac_config_regs)
 692{
 693	const struct ci_pt_config_reg *config_regs = cac_config_regs;
 694	u32 data;
 695	u32 cache = 0;
 696
 697	if (config_regs == NULL)
 698		return -EINVAL;
 699
 700	while (config_regs->offset != 0xFFFFFFFF) {
 701		if (config_regs->type == CISLANDS_CONFIGREG_CACHE) {
 702			cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
 703		} else {
 704			switch (config_regs->type) {
 705			case CISLANDS_CONFIGREG_SMC_IND:
 706				data = RREG32_SMC(config_regs->offset);
 707				break;
 708			case CISLANDS_CONFIGREG_DIDT_IND:
 709				data = RREG32_DIDT(config_regs->offset);
 710				break;
 711			default:
 712				data = RREG32(config_regs->offset);
 713				break;
 714			}
 715
 716			data &= ~config_regs->mask;
 717			data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
 718			data |= cache;
 719
 720			switch (config_regs->type) {
 721			case CISLANDS_CONFIGREG_SMC_IND:
 722				WREG32_SMC(config_regs->offset, data);
 723				break;
 724			case CISLANDS_CONFIGREG_DIDT_IND:
 725				WREG32_DIDT(config_regs->offset, data);
 726				break;
 727			default:
 728				WREG32(config_regs->offset, data);
 729				break;
 730			}
 731			cache = 0;
 732		}
 733		config_regs++;
 734	}
 735	return 0;
 736}
 737
 738static int ci_enable_didt(struct amdgpu_device *adev, bool enable)
 739{
 740	struct ci_power_info *pi = ci_get_pi(adev);
 741	int ret;
 742
 743	if (pi->caps_sq_ramping || pi->caps_db_ramping ||
 744	    pi->caps_td_ramping || pi->caps_tcp_ramping) {
 745		adev->gfx.rlc.funcs->enter_safe_mode(adev);
 746
 747		if (enable) {
 748			ret = ci_program_pt_config_registers(adev, didt_config_ci);
 749			if (ret) {
 750				adev->gfx.rlc.funcs->exit_safe_mode(adev);
 751				return ret;
 752			}
 753		}
 754
 755		ci_do_enable_didt(adev, enable);
 756
 757		adev->gfx.rlc.funcs->exit_safe_mode(adev);
 758	}
 759
 760	return 0;
 761}
 762
 763static int ci_enable_power_containment(struct amdgpu_device *adev, bool enable)
 764{
 765	struct ci_power_info *pi = ci_get_pi(adev);
 766	PPSMC_Result smc_result;
 767	int ret = 0;
 768
 769	if (enable) {
 770		pi->power_containment_features = 0;
 771		if (pi->caps_power_containment) {
 772			if (pi->enable_bapm_feature) {
 773				smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableDTE);
 774				if (smc_result != PPSMC_Result_OK)
 775					ret = -EINVAL;
 776				else
 777					pi->power_containment_features |= POWERCONTAINMENT_FEATURE_BAPM;
 778			}
 779
 780			if (pi->enable_tdc_limit_feature) {
 781				smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_TDCLimitEnable);
 782				if (smc_result != PPSMC_Result_OK)
 783					ret = -EINVAL;
 784				else
 785					pi->power_containment_features |= POWERCONTAINMENT_FEATURE_TDCLimit;
 786			}
 787
 788			if (pi->enable_pkg_pwr_tracking_feature) {
 789				smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PkgPwrLimitEnable);
 790				if (smc_result != PPSMC_Result_OK) {
 791					ret = -EINVAL;
 792				} else {
 793					struct amdgpu_cac_tdp_table *cac_tdp_table =
 794						adev->pm.dpm.dyn_state.cac_tdp_table;
 795					u32 default_pwr_limit =
 796						(u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
 797
 798					pi->power_containment_features |= POWERCONTAINMENT_FEATURE_PkgPwrLimit;
 799
 800					ci_set_power_limit(adev, default_pwr_limit);
 801				}
 802			}
 803		}
 804	} else {
 805		if (pi->caps_power_containment && pi->power_containment_features) {
 806			if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_TDCLimit)
 807				amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_TDCLimitDisable);
 808
 809			if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)
 810				amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableDTE);
 811
 812			if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit)
 813				amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PkgPwrLimitDisable);
 814			pi->power_containment_features = 0;
 815		}
 816	}
 817
 818	return ret;
 819}
 820
 821static int ci_enable_smc_cac(struct amdgpu_device *adev, bool enable)
 822{
 823	struct ci_power_info *pi = ci_get_pi(adev);
 824	PPSMC_Result smc_result;
 825	int ret = 0;
 826
 827	if (pi->caps_cac) {
 828		if (enable) {
 829			smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableCac);
 830			if (smc_result != PPSMC_Result_OK) {
 831				ret = -EINVAL;
 832				pi->cac_enabled = false;
 833			} else {
 834				pi->cac_enabled = true;
 835			}
 836		} else if (pi->cac_enabled) {
 837			amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableCac);
 838			pi->cac_enabled = false;
 839		}
 840	}
 841
 842	return ret;
 843}
 844
 845static int ci_enable_thermal_based_sclk_dpm(struct amdgpu_device *adev,
 846					    bool enable)
 847{
 848	struct ci_power_info *pi = ci_get_pi(adev);
 849	PPSMC_Result smc_result = PPSMC_Result_OK;
 850
 851	if (pi->thermal_sclk_dpm_enabled) {
 852		if (enable)
 853			smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_ENABLE_THERMAL_DPM);
 854		else
 855			smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DISABLE_THERMAL_DPM);
 856	}
 857
 858	if (smc_result == PPSMC_Result_OK)
 859		return 0;
 860	else
 861		return -EINVAL;
 862}
 863
 864static int ci_power_control_set_level(struct amdgpu_device *adev)
 865{
 866	struct ci_power_info *pi = ci_get_pi(adev);
 867	struct amdgpu_cac_tdp_table *cac_tdp_table =
 868		adev->pm.dpm.dyn_state.cac_tdp_table;
 869	s32 adjust_percent;
 870	s32 target_tdp;
 871	int ret = 0;
 872	bool adjust_polarity = false; /* ??? */
 873
 874	if (pi->caps_power_containment) {
 875		adjust_percent = adjust_polarity ?
 876			adev->pm.dpm.tdp_adjustment : (-1 * adev->pm.dpm.tdp_adjustment);
 877		target_tdp = ((100 + adjust_percent) *
 878			      (s32)cac_tdp_table->configurable_tdp) / 100;
 879
 880		ret = ci_set_overdrive_target_tdp(adev, (u32)target_tdp);
 881	}
 882
 883	return ret;
 884}
 885
 886static void ci_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate)
 887{
 888	struct ci_power_info *pi = ci_get_pi(adev);
 889
 890	pi->uvd_power_gated = gate;
 891
 892	ci_update_uvd_dpm(adev, gate);
 893}
 894
 895static bool ci_dpm_vblank_too_short(struct amdgpu_device *adev)
 896{
 897	u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
 898	u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300;
 899
 900	if (vblank_time < switch_limit)
 901		return true;
 902	else
 903		return false;
 904
 905}
 906
 907static void ci_apply_state_adjust_rules(struct amdgpu_device *adev,
 908					struct amdgpu_ps *rps)
 909{
 910	struct ci_ps *ps = ci_get_ps(rps);
 911	struct ci_power_info *pi = ci_get_pi(adev);
 912	struct amdgpu_clock_and_voltage_limits *max_limits;
 913	bool disable_mclk_switching;
 914	u32 sclk, mclk;
 915	int i;
 916
 917	if (rps->vce_active) {
 918		rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk;
 919		rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk;
 920	} else {
 921		rps->evclk = 0;
 922		rps->ecclk = 0;
 923	}
 924
 925	if ((adev->pm.dpm.new_active_crtc_count > 1) ||
 926	    ci_dpm_vblank_too_short(adev))
 927		disable_mclk_switching = true;
 928	else
 929		disable_mclk_switching = false;
 930
 931	if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
 932		pi->battery_state = true;
 933	else
 934		pi->battery_state = false;
 935
 936	if (adev->pm.dpm.ac_power)
 937		max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
 938	else
 939		max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
 940
 941	if (adev->pm.dpm.ac_power == false) {
 942		for (i = 0; i < ps->performance_level_count; i++) {
 943			if (ps->performance_levels[i].mclk > max_limits->mclk)
 944				ps->performance_levels[i].mclk = max_limits->mclk;
 945			if (ps->performance_levels[i].sclk > max_limits->sclk)
 946				ps->performance_levels[i].sclk = max_limits->sclk;
 947		}
 948	}
 949
 950	/* XXX validate the min clocks required for display */
 951
 952	if (disable_mclk_switching) {
 953		mclk  = ps->performance_levels[ps->performance_level_count - 1].mclk;
 954		sclk = ps->performance_levels[0].sclk;
 955	} else {
 956		mclk = ps->performance_levels[0].mclk;
 957		sclk = ps->performance_levels[0].sclk;
 958	}
 959
 960	if (adev->pm.pm_display_cfg.min_core_set_clock > sclk)
 961		sclk = adev->pm.pm_display_cfg.min_core_set_clock;
 962
 963	if (adev->pm.pm_display_cfg.min_mem_set_clock > mclk)
 964		mclk = adev->pm.pm_display_cfg.min_mem_set_clock;
 965
 966	if (rps->vce_active) {
 967		if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk)
 968			sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk;
 969		if (mclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk)
 970			mclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk;
 971	}
 972
 973	ps->performance_levels[0].sclk = sclk;
 974	ps->performance_levels[0].mclk = mclk;
 975
 976	if (ps->performance_levels[1].sclk < ps->performance_levels[0].sclk)
 977		ps->performance_levels[1].sclk = ps->performance_levels[0].sclk;
 978
 979	if (disable_mclk_switching) {
 980		if (ps->performance_levels[0].mclk < ps->performance_levels[1].mclk)
 981			ps->performance_levels[0].mclk = ps->performance_levels[1].mclk;
 982	} else {
 983		if (ps->performance_levels[1].mclk < ps->performance_levels[0].mclk)
 984			ps->performance_levels[1].mclk = ps->performance_levels[0].mclk;
 985	}
 986}
 987
 988static int ci_thermal_set_temperature_range(struct amdgpu_device *adev,
 989					    int min_temp, int max_temp)
 990{
 991	int low_temp = 0 * 1000;
 992	int high_temp = 255 * 1000;
 993	u32 tmp;
 994
 995	if (low_temp < min_temp)
 996		low_temp = min_temp;
 997	if (high_temp > max_temp)
 998		high_temp = max_temp;
 999	if (high_temp < low_temp) {
1000		DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
1001		return -EINVAL;
1002	}
1003
1004	tmp = RREG32_SMC(ixCG_THERMAL_INT);
1005	tmp &= ~(CG_THERMAL_INT__DIG_THERM_INTH_MASK | CG_THERMAL_INT__DIG_THERM_INTL_MASK);
1006	tmp |= ((high_temp / 1000) << CG_THERMAL_INT__DIG_THERM_INTH__SHIFT) |
1007		((low_temp / 1000)) << CG_THERMAL_INT__DIG_THERM_INTL__SHIFT;
1008	WREG32_SMC(ixCG_THERMAL_INT, tmp);
1009
1010#if 0
1011	/* XXX: need to figure out how to handle this properly */
1012	tmp = RREG32_SMC(ixCG_THERMAL_CTRL);
1013	tmp &= DIG_THERM_DPM_MASK;
1014	tmp |= DIG_THERM_DPM(high_temp / 1000);
1015	WREG32_SMC(ixCG_THERMAL_CTRL, tmp);
1016#endif
1017
1018	adev->pm.dpm.thermal.min_temp = low_temp;
1019	adev->pm.dpm.thermal.max_temp = high_temp;
1020	return 0;
1021}
1022
1023static int ci_thermal_enable_alert(struct amdgpu_device *adev,
1024				   bool enable)
1025{
1026	u32 thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
1027	PPSMC_Result result;
1028
1029	if (enable) {
1030		thermal_int &= ~(CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK |
1031				 CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK);
1032		WREG32_SMC(ixCG_THERMAL_INT, thermal_int);
1033		result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Thermal_Cntl_Enable);
1034		if (result != PPSMC_Result_OK) {
1035			DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
1036			return -EINVAL;
1037		}
1038	} else {
1039		thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK |
1040			CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
1041		WREG32_SMC(ixCG_THERMAL_INT, thermal_int);
1042		result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Thermal_Cntl_Disable);
1043		if (result != PPSMC_Result_OK) {
1044			DRM_DEBUG_KMS("Could not disable thermal interrupts.\n");
1045			return -EINVAL;
1046		}
1047	}
1048
1049	return 0;
1050}
1051
1052static void ci_fan_ctrl_set_static_mode(struct amdgpu_device *adev, u32 mode)
1053{
1054	struct ci_power_info *pi = ci_get_pi(adev);
1055	u32 tmp;
1056
1057	if (pi->fan_ctrl_is_in_default_mode) {
1058		tmp = (RREG32_SMC(ixCG_FDO_CTRL2) & CG_FDO_CTRL2__FDO_PWM_MODE_MASK)
1059			>> CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
1060		pi->fan_ctrl_default_mode = tmp;
1061		tmp = (RREG32_SMC(ixCG_FDO_CTRL2) & CG_FDO_CTRL2__TMIN_MASK)
1062			>> CG_FDO_CTRL2__TMIN__SHIFT;
1063		pi->t_min = tmp;
1064		pi->fan_ctrl_is_in_default_mode = false;
1065	}
1066
1067	tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TMIN_MASK;
1068	tmp |= 0 << CG_FDO_CTRL2__TMIN__SHIFT;
1069	WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1070
1071	tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
1072	tmp |= mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
1073	WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1074}
1075
1076static int ci_thermal_setup_fan_table(struct amdgpu_device *adev)
1077{
1078	struct ci_power_info *pi = ci_get_pi(adev);
1079	SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
1080	u32 duty100;
1081	u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2;
1082	u16 fdo_min, slope1, slope2;
1083	u32 reference_clock, tmp;
1084	int ret;
1085	u64 tmp64;
1086
1087	if (!pi->fan_table_start) {
1088		adev->pm.dpm.fan.ucode_fan_control = false;
1089		return 0;
1090	}
1091
1092	duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK)
1093		>> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
1094
1095	if (duty100 == 0) {
1096		adev->pm.dpm.fan.ucode_fan_control = false;
1097		return 0;
1098	}
1099
1100	tmp64 = (u64)adev->pm.dpm.fan.pwm_min * duty100;
1101	do_div(tmp64, 10000);
1102	fdo_min = (u16)tmp64;
1103
1104	t_diff1 = adev->pm.dpm.fan.t_med - adev->pm.dpm.fan.t_min;
1105	t_diff2 = adev->pm.dpm.fan.t_high - adev->pm.dpm.fan.t_med;
1106
1107	pwm_diff1 = adev->pm.dpm.fan.pwm_med - adev->pm.dpm.fan.pwm_min;
1108	pwm_diff2 = adev->pm.dpm.fan.pwm_high - adev->pm.dpm.fan.pwm_med;
1109
1110	slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
1111	slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
1112
1113	fan_table.TempMin = cpu_to_be16((50 + adev->pm.dpm.fan.t_min) / 100);
1114	fan_table.TempMed = cpu_to_be16((50 + adev->pm.dpm.fan.t_med) / 100);
1115	fan_table.TempMax = cpu_to_be16((50 + adev->pm.dpm.fan.t_max) / 100);
1116
1117	fan_table.Slope1 = cpu_to_be16(slope1);
1118	fan_table.Slope2 = cpu_to_be16(slope2);
1119
1120	fan_table.FdoMin = cpu_to_be16(fdo_min);
1121
1122	fan_table.HystDown = cpu_to_be16(adev->pm.dpm.fan.t_hyst);
1123
1124	fan_table.HystUp = cpu_to_be16(1);
1125
1126	fan_table.HystSlope = cpu_to_be16(1);
1127
1128	fan_table.TempRespLim = cpu_to_be16(5);
1129
1130	reference_clock = amdgpu_asic_get_xclk(adev);
1131
1132	fan_table.RefreshPeriod = cpu_to_be32((adev->pm.dpm.fan.cycle_delay *
1133					       reference_clock) / 1600);
1134
1135	fan_table.FdoMax = cpu_to_be16((u16)duty100);
1136
1137	tmp = (RREG32_SMC(ixCG_MULT_THERMAL_CTRL) & CG_MULT_THERMAL_CTRL__TEMP_SEL_MASK)
1138		>> CG_MULT_THERMAL_CTRL__TEMP_SEL__SHIFT;
1139	fan_table.TempSrc = (uint8_t)tmp;
1140
1141	ret = amdgpu_ci_copy_bytes_to_smc(adev,
1142					  pi->fan_table_start,
1143					  (u8 *)(&fan_table),
1144					  sizeof(fan_table),
1145					  pi->sram_end);
1146
1147	if (ret) {
1148		DRM_ERROR("Failed to load fan table to the SMC.");
1149		adev->pm.dpm.fan.ucode_fan_control = false;
1150	}
1151
1152	return 0;
1153}
1154
1155static int ci_fan_ctrl_start_smc_fan_control(struct amdgpu_device *adev)
1156{
1157	struct ci_power_info *pi = ci_get_pi(adev);
1158	PPSMC_Result ret;
1159
1160	if (pi->caps_od_fuzzy_fan_control_support) {
1161		ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
1162							       PPSMC_StartFanControl,
1163							       FAN_CONTROL_FUZZY);
1164		if (ret != PPSMC_Result_OK)
1165			return -EINVAL;
1166		ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
1167							       PPSMC_MSG_SetFanPwmMax,
1168							       adev->pm.dpm.fan.default_max_fan_pwm);
1169		if (ret != PPSMC_Result_OK)
1170			return -EINVAL;
1171	} else {
1172		ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
1173							       PPSMC_StartFanControl,
1174							       FAN_CONTROL_TABLE);
1175		if (ret != PPSMC_Result_OK)
1176			return -EINVAL;
1177	}
1178
1179	pi->fan_is_controlled_by_smc = true;
1180	return 0;
1181}
1182
1183
1184static int ci_fan_ctrl_stop_smc_fan_control(struct amdgpu_device *adev)
1185{
1186	PPSMC_Result ret;
1187	struct ci_power_info *pi = ci_get_pi(adev);
1188
1189	ret = amdgpu_ci_send_msg_to_smc(adev, PPSMC_StopFanControl);
1190	if (ret == PPSMC_Result_OK) {
1191		pi->fan_is_controlled_by_smc = false;
1192		return 0;
1193	} else {
1194		return -EINVAL;
1195	}
1196}
1197
1198static int ci_dpm_get_fan_speed_percent(struct amdgpu_device *adev,
1199					u32 *speed)
1200{
1201	u32 duty, duty100;
1202	u64 tmp64;
1203
1204	if (adev->pm.no_fan)
1205		return -ENOENT;
1206
1207	duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK)
1208		>> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
1209	duty = (RREG32_SMC(ixCG_THERMAL_STATUS) & CG_THERMAL_STATUS__FDO_PWM_DUTY_MASK)
1210		>> CG_THERMAL_STATUS__FDO_PWM_DUTY__SHIFT;
1211
1212	if (duty100 == 0)
1213		return -EINVAL;
1214
1215	tmp64 = (u64)duty * 100;
1216	do_div(tmp64, duty100);
1217	*speed = (u32)tmp64;
1218
1219	if (*speed > 100)
1220		*speed = 100;
1221
1222	return 0;
1223}
1224
1225static int ci_dpm_set_fan_speed_percent(struct amdgpu_device *adev,
1226					u32 speed)
1227{
1228	u32 tmp;
1229	u32 duty, duty100;
1230	u64 tmp64;
1231	struct ci_power_info *pi = ci_get_pi(adev);
1232
1233	if (adev->pm.no_fan)
1234		return -ENOENT;
1235
1236	if (pi->fan_is_controlled_by_smc)
1237		return -EINVAL;
1238
1239	if (speed > 100)
1240		return -EINVAL;
1241
1242	duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK)
1243		>> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
1244
1245	if (duty100 == 0)
1246		return -EINVAL;
1247
1248	tmp64 = (u64)speed * duty100;
1249	do_div(tmp64, 100);
1250	duty = (u32)tmp64;
1251
1252	tmp = RREG32_SMC(ixCG_FDO_CTRL0) & ~CG_FDO_CTRL0__FDO_STATIC_DUTY_MASK;
1253	tmp |= duty << CG_FDO_CTRL0__FDO_STATIC_DUTY__SHIFT;
1254	WREG32_SMC(ixCG_FDO_CTRL0, tmp);
1255
1256	return 0;
1257}
1258
1259static void ci_dpm_set_fan_control_mode(struct amdgpu_device *adev, u32 mode)
1260{
1261	if (mode) {
1262		/* stop auto-manage */
1263		if (adev->pm.dpm.fan.ucode_fan_control)
1264			ci_fan_ctrl_stop_smc_fan_control(adev);
1265		ci_fan_ctrl_set_static_mode(adev, mode);
1266	} else {
1267		/* restart auto-manage */
1268		if (adev->pm.dpm.fan.ucode_fan_control)
1269			ci_thermal_start_smc_fan_control(adev);
1270		else
1271			ci_fan_ctrl_set_default_mode(adev);
1272	}
1273}
1274
1275static u32 ci_dpm_get_fan_control_mode(struct amdgpu_device *adev)
1276{
1277	struct ci_power_info *pi = ci_get_pi(adev);
1278	u32 tmp;
1279
1280	if (pi->fan_is_controlled_by_smc)
1281		return 0;
1282
1283	tmp = RREG32_SMC(ixCG_FDO_CTRL2) & CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
1284	return (tmp >> CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT);
1285}
1286
1287#if 0
1288static int ci_fan_ctrl_get_fan_speed_rpm(struct amdgpu_device *adev,
1289					 u32 *speed)
1290{
1291	u32 tach_period;
1292	u32 xclk = amdgpu_asic_get_xclk(adev);
1293
1294	if (adev->pm.no_fan)
1295		return -ENOENT;
1296
1297	if (adev->pm.fan_pulses_per_revolution == 0)
1298		return -ENOENT;
1299
1300	tach_period = (RREG32_SMC(ixCG_TACH_STATUS) & CG_TACH_STATUS__TACH_PERIOD_MASK)
1301		>> CG_TACH_STATUS__TACH_PERIOD__SHIFT;
1302	if (tach_period == 0)
1303		return -ENOENT;
1304
1305	*speed = 60 * xclk * 10000 / tach_period;
1306
1307	return 0;
1308}
1309
1310static int ci_fan_ctrl_set_fan_speed_rpm(struct amdgpu_device *adev,
1311					 u32 speed)
1312{
1313	u32 tach_period, tmp;
1314	u32 xclk = amdgpu_asic_get_xclk(adev);
1315
1316	if (adev->pm.no_fan)
1317		return -ENOENT;
1318
1319	if (adev->pm.fan_pulses_per_revolution == 0)
1320		return -ENOENT;
1321
1322	if ((speed < adev->pm.fan_min_rpm) ||
1323	    (speed > adev->pm.fan_max_rpm))
1324		return -EINVAL;
1325
1326	if (adev->pm.dpm.fan.ucode_fan_control)
1327		ci_fan_ctrl_stop_smc_fan_control(adev);
1328
1329	tach_period = 60 * xclk * 10000 / (8 * speed);
1330	tmp = RREG32_SMC(ixCG_TACH_CTRL) & ~CG_TACH_CTRL__TARGET_PERIOD_MASK;
1331	tmp |= tach_period << CG_TACH_CTRL__TARGET_PERIOD__SHIFT;
1332	WREG32_SMC(CG_TACH_CTRL, tmp);
1333
1334	ci_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC_RPM);
1335
1336	return 0;
1337}
1338#endif
1339
1340static void ci_fan_ctrl_set_default_mode(struct amdgpu_device *adev)
1341{
1342	struct ci_power_info *pi = ci_get_pi(adev);
1343	u32 tmp;
1344
1345	if (!pi->fan_ctrl_is_in_default_mode) {
1346		tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
1347		tmp |= pi->fan_ctrl_default_mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
1348		WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1349
1350		tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TMIN_MASK;
1351		tmp |= pi->t_min << CG_FDO_CTRL2__TMIN__SHIFT;
1352		WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1353		pi->fan_ctrl_is_in_default_mode = true;
1354	}
1355}
1356
1357static void ci_thermal_start_smc_fan_control(struct amdgpu_device *adev)
1358{
1359	if (adev->pm.dpm.fan.ucode_fan_control) {
1360		ci_fan_ctrl_start_smc_fan_control(adev);
1361		ci_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC);
1362	}
1363}
1364
1365static void ci_thermal_initialize(struct amdgpu_device *adev)
1366{
1367	u32 tmp;
1368
1369	if (adev->pm.fan_pulses_per_revolution) {
1370		tmp = RREG32_SMC(ixCG_TACH_CTRL) & ~CG_TACH_CTRL__EDGE_PER_REV_MASK;
1371		tmp |= (adev->pm.fan_pulses_per_revolution - 1)
1372			<< CG_TACH_CTRL__EDGE_PER_REV__SHIFT;
1373		WREG32_SMC(ixCG_TACH_CTRL, tmp);
1374	}
1375
1376	tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TACH_PWM_RESP_RATE_MASK;
1377	tmp |= 0x28 << CG_FDO_CTRL2__TACH_PWM_RESP_RATE__SHIFT;
1378	WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1379}
1380
1381static int ci_thermal_start_thermal_controller(struct amdgpu_device *adev)
1382{
1383	int ret;
1384
1385	ci_thermal_initialize(adev);
1386	ret = ci_thermal_set_temperature_range(adev, CISLANDS_TEMP_RANGE_MIN, CISLANDS_TEMP_RANGE_MAX);
1387	if (ret)
1388		return ret;
1389	ret = ci_thermal_enable_alert(adev, true);
1390	if (ret)
1391		return ret;
1392	if (adev->pm.dpm.fan.ucode_fan_control) {
1393		ret = ci_thermal_setup_fan_table(adev);
1394		if (ret)
1395			return ret;
1396		ci_thermal_start_smc_fan_control(adev);
1397	}
1398
1399	return 0;
1400}
1401
1402static void ci_thermal_stop_thermal_controller(struct amdgpu_device *adev)
1403{
1404	if (!adev->pm.no_fan)
1405		ci_fan_ctrl_set_default_mode(adev);
1406}
1407
1408static int ci_read_smc_soft_register(struct amdgpu_device *adev,
1409				     u16 reg_offset, u32 *value)
1410{
1411	struct ci_power_info *pi = ci_get_pi(adev);
1412
1413	return amdgpu_ci_read_smc_sram_dword(adev,
1414				      pi->soft_regs_start + reg_offset,
1415				      value, pi->sram_end);
1416}
1417
1418static int ci_write_smc_soft_register(struct amdgpu_device *adev,
1419				      u16 reg_offset, u32 value)
1420{
1421	struct ci_power_info *pi = ci_get_pi(adev);
1422
1423	return amdgpu_ci_write_smc_sram_dword(adev,
1424				       pi->soft_regs_start + reg_offset,
1425				       value, pi->sram_end);
1426}
1427
1428static void ci_init_fps_limits(struct amdgpu_device *adev)
1429{
1430	struct ci_power_info *pi = ci_get_pi(adev);
1431	SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
1432
1433	if (pi->caps_fps) {
1434		u16 tmp;
1435
1436		tmp = 45;
1437		table->FpsHighT = cpu_to_be16(tmp);
1438
1439		tmp = 30;
1440		table->FpsLowT = cpu_to_be16(tmp);
1441	}
1442}
1443
1444static int ci_update_sclk_t(struct amdgpu_device *adev)
1445{
1446	struct ci_power_info *pi = ci_get_pi(adev);
1447	int ret = 0;
1448	u32 low_sclk_interrupt_t = 0;
1449
1450	if (pi->caps_sclk_throttle_low_notification) {
1451		low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
1452
1453		ret = amdgpu_ci_copy_bytes_to_smc(adev,
1454					   pi->dpm_table_start +
1455					   offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT),
1456					   (u8 *)&low_sclk_interrupt_t,
1457					   sizeof(u32), pi->sram_end);
1458
1459	}
1460
1461	return ret;
1462}
1463
1464static void ci_get_leakage_voltages(struct amdgpu_device *adev)
1465{
1466	struct ci_power_info *pi = ci_get_pi(adev);
1467	u16 leakage_id, virtual_voltage_id;
1468	u16 vddc, vddci;
1469	int i;
1470
1471	pi->vddc_leakage.count = 0;
1472	pi->vddci_leakage.count = 0;
1473
1474	if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
1475		for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
1476			virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1477			if (amdgpu_atombios_get_voltage_evv(adev, virtual_voltage_id, &vddc) != 0)
1478				continue;
1479			if (vddc != 0 && vddc != virtual_voltage_id) {
1480				pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
1481				pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
1482				pi->vddc_leakage.count++;
1483			}
1484		}
1485	} else if (amdgpu_atombios_get_leakage_id_from_vbios(adev, &leakage_id) == 0) {
1486		for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
1487			virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1488			if (amdgpu_atombios_get_leakage_vddc_based_on_leakage_params(adev, &vddc, &vddci,
1489										     virtual_voltage_id,
1490										     leakage_id) == 0) {
1491				if (vddc != 0 && vddc != virtual_voltage_id) {
1492					pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
1493					pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
1494					pi->vddc_leakage.count++;
1495				}
1496				if (vddci != 0 && vddci != virtual_voltage_id) {
1497					pi->vddci_leakage.actual_voltage[pi->vddci_leakage.count] = vddci;
1498					pi->vddci_leakage.leakage_id[pi->vddci_leakage.count] = virtual_voltage_id;
1499					pi->vddci_leakage.count++;
1500				}
1501			}
1502		}
1503	}
1504}
1505
1506static void ci_set_dpm_event_sources(struct amdgpu_device *adev, u32 sources)
1507{
1508	struct ci_power_info *pi = ci_get_pi(adev);
1509	bool want_thermal_protection;
1510	enum amdgpu_dpm_event_src dpm_event_src;
1511	u32 tmp;
1512
1513	switch (sources) {
1514	case 0:
1515	default:
1516		want_thermal_protection = false;
1517		break;
1518	case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL):
1519		want_thermal_protection = true;
1520		dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGITAL;
1521		break;
1522	case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
1523		want_thermal_protection = true;
1524		dpm_event_src = AMDGPU_DPM_EVENT_SRC_EXTERNAL;
1525		break;
1526	case ((1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
1527	      (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL)):
1528		want_thermal_protection = true;
1529		dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
1530		break;
1531	}
1532
1533	if (want_thermal_protection) {
1534#if 0
1535		/* XXX: need to figure out how to handle this properly */
1536		tmp = RREG32_SMC(ixCG_THERMAL_CTRL);
1537		tmp &= DPM_EVENT_SRC_MASK;
1538		tmp |= DPM_EVENT_SRC(dpm_event_src);
1539		WREG32_SMC(ixCG_THERMAL_CTRL, tmp);
1540#endif
1541
1542		tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1543		if (pi->thermal_protection)
1544			tmp &= ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
1545		else
1546			tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
1547		WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1548	} else {
1549		tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1550		tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
1551		WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1552	}
1553}
1554
1555static void ci_enable_auto_throttle_source(struct amdgpu_device *adev,
1556					   enum amdgpu_dpm_auto_throttle_src source,
1557					   bool enable)
1558{
1559	struct ci_power_info *pi = ci_get_pi(adev);
1560
1561	if (enable) {
1562		if (!(pi->active_auto_throttle_sources & (1 << source))) {
1563			pi->active_auto_throttle_sources |= 1 << source;
1564			ci_set_dpm_event_sources(adev, pi->active_auto_throttle_sources);
1565		}
1566	} else {
1567		if (pi->active_auto_throttle_sources & (1 << source)) {
1568			pi->active_auto_throttle_sources &= ~(1 << source);
1569			ci_set_dpm_event_sources(adev, pi->active_auto_throttle_sources);
1570		}
1571	}
1572}
1573
1574static void ci_enable_vr_hot_gpio_interrupt(struct amdgpu_device *adev)
1575{
1576	if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT)
1577		amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableVRHotGPIOInterrupt);
1578}
1579
1580static int ci_unfreeze_sclk_mclk_dpm(struct amdgpu_device *adev)
1581{
1582	struct ci_power_info *pi = ci_get_pi(adev);
1583	PPSMC_Result smc_result;
1584
1585	if (!pi->need_update_smu7_dpm_table)
1586		return 0;
1587
1588	if ((!pi->sclk_dpm_key_disabled) &&
1589	    (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1590		smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
1591		if (smc_result != PPSMC_Result_OK)
1592			return -EINVAL;
1593	}
1594
1595	if ((!pi->mclk_dpm_key_disabled) &&
1596	    (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1597		smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
1598		if (smc_result != PPSMC_Result_OK)
1599			return -EINVAL;
1600	}
1601
1602	pi->need_update_smu7_dpm_table = 0;
1603	return 0;
1604}
1605
1606static int ci_enable_sclk_mclk_dpm(struct amdgpu_device *adev, bool enable)
1607{
1608	struct ci_power_info *pi = ci_get_pi(adev);
1609	PPSMC_Result smc_result;
1610
1611	if (enable) {
1612		if (!pi->sclk_dpm_key_disabled) {
1613			smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DPM_Enable);
1614			if (smc_result != PPSMC_Result_OK)
1615				return -EINVAL;
1616		}
1617
1618		if (!pi->mclk_dpm_key_disabled) {
1619			smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_Enable);
1620			if (smc_result != PPSMC_Result_OK)
1621				return -EINVAL;
1622
1623			WREG32_P(mmMC_SEQ_CNTL_3, MC_SEQ_CNTL_3__CAC_EN_MASK,
1624					~MC_SEQ_CNTL_3__CAC_EN_MASK);
1625
1626			WREG32_SMC(ixLCAC_MC0_CNTL, 0x05);
1627			WREG32_SMC(ixLCAC_MC1_CNTL, 0x05);
1628			WREG32_SMC(ixLCAC_CPL_CNTL, 0x100005);
1629
1630			udelay(10);
1631
1632			WREG32_SMC(ixLCAC_MC0_CNTL, 0x400005);
1633			WREG32_SMC(ixLCAC_MC1_CNTL, 0x400005);
1634			WREG32_SMC(ixLCAC_CPL_CNTL, 0x500005);
1635		}
1636	} else {
1637		if (!pi->sclk_dpm_key_disabled) {
1638			smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DPM_Disable);
1639			if (smc_result != PPSMC_Result_OK)
1640				return -EINVAL;
1641		}
1642
1643		if (!pi->mclk_dpm_key_disabled) {
1644			smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_Disable);
1645			if (smc_result != PPSMC_Result_OK)
1646				return -EINVAL;
1647		}
1648	}
1649
1650	return 0;
1651}
1652
1653static int ci_start_dpm(struct amdgpu_device *adev)
1654{
1655	struct ci_power_info *pi = ci_get_pi(adev);
1656	PPSMC_Result smc_result;
1657	int ret;
1658	u32 tmp;
1659
1660	tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1661	tmp |= GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK;
1662	WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1663
1664	tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
1665	tmp |= SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK;
1666	WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
1667
1668	ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, VoltageChangeTimeout), 0x1000);
1669
1670	WREG32_P(mmBIF_LNCNT_RESET, 0, ~BIF_LNCNT_RESET__RESET_LNCNT_EN_MASK);
1671
1672	smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Voltage_Cntl_Enable);
1673	if (smc_result != PPSMC_Result_OK)
1674		return -EINVAL;
1675
1676	ret = ci_enable_sclk_mclk_dpm(adev, true);
1677	if (ret)
1678		return ret;
1679
1680	if (!pi->pcie_dpm_key_disabled) {
1681		smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PCIeDPM_Enable);
1682		if (smc_result != PPSMC_Result_OK)
1683			return -EINVAL;
1684	}
1685
1686	return 0;
1687}
1688
1689static int ci_freeze_sclk_mclk_dpm(struct amdgpu_device *adev)
1690{
1691	struct ci_power_info *pi = ci_get_pi(adev);
1692	PPSMC_Result smc_result;
1693
1694	if (!pi->need_update_smu7_dpm_table)
1695		return 0;
1696
1697	if ((!pi->sclk_dpm_key_disabled) &&
1698	    (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1699		smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_SCLKDPM_FreezeLevel);
1700		if (smc_result != PPSMC_Result_OK)
1701			return -EINVAL;
1702	}
1703
1704	if ((!pi->mclk_dpm_key_disabled) &&
1705	    (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1706		smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_FreezeLevel);
1707		if (smc_result != PPSMC_Result_OK)
1708			return -EINVAL;
1709	}
1710
1711	return 0;
1712}
1713
1714static int ci_stop_dpm(struct amdgpu_device *adev)
1715{
1716	struct ci_power_info *pi = ci_get_pi(adev);
1717	PPSMC_Result smc_result;
1718	int ret;
1719	u32 tmp;
1720
1721	tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1722	tmp &= ~GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK;
1723	WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1724
1725	tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
1726	tmp &= ~SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK;
1727	WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
1728
1729	if (!pi->pcie_dpm_key_disabled) {
1730		smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PCIeDPM_Disable);
1731		if (smc_result != PPSMC_Result_OK)
1732			return -EINVAL;
1733	}
1734
1735	ret = ci_enable_sclk_mclk_dpm(adev, false);
1736	if (ret)
1737		return ret;
1738
1739	smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Voltage_Cntl_Disable);
1740	if (smc_result != PPSMC_Result_OK)
1741		return -EINVAL;
1742
1743	return 0;
1744}
1745
1746static void ci_enable_sclk_control(struct amdgpu_device *adev, bool enable)
1747{
1748	u32 tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
1749
1750	if (enable)
1751		tmp &= ~SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK;
1752	else
1753		tmp |= SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK;
1754	WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
1755}
1756
1757#if 0
1758static int ci_notify_hw_of_power_source(struct amdgpu_device *adev,
1759					bool ac_power)
1760{
1761	struct ci_power_info *pi = ci_get_pi(adev);
1762	struct amdgpu_cac_tdp_table *cac_tdp_table =
1763		adev->pm.dpm.dyn_state.cac_tdp_table;
1764	u32 power_limit;
1765
1766	if (ac_power)
1767		power_limit = (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
1768	else
1769		power_limit = (u32)(cac_tdp_table->battery_power_limit * 256);
1770
1771	ci_set_power_limit(adev, power_limit);
1772
1773	if (pi->caps_automatic_dc_transition) {
1774		if (ac_power)
1775			amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_RunningOnAC);
1776		else
1777			amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Remove_DC_Clamp);
1778	}
1779
1780	return 0;
1781}
1782#endif
1783
1784static PPSMC_Result amdgpu_ci_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
1785						      PPSMC_Msg msg, u32 parameter)
1786{
1787	WREG32(mmSMC_MSG_ARG_0, parameter);
1788	return amdgpu_ci_send_msg_to_smc(adev, msg);
1789}
1790
1791static PPSMC_Result amdgpu_ci_send_msg_to_smc_return_parameter(struct amdgpu_device *adev,
1792							PPSMC_Msg msg, u32 *parameter)
1793{
1794	PPSMC_Result smc_result;
1795
1796	smc_result = amdgpu_ci_send_msg_to_smc(adev, msg);
1797
1798	if ((smc_result == PPSMC_Result_OK) && parameter)
1799		*parameter = RREG32(mmSMC_MSG_ARG_0);
1800
1801	return smc_result;
1802}
1803
1804static int ci_dpm_force_state_sclk(struct amdgpu_device *adev, u32 n)
1805{
1806	struct ci_power_info *pi = ci_get_pi(adev);
1807
1808	if (!pi->sclk_dpm_key_disabled) {
1809		PPSMC_Result smc_result =
1810			amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SCLKDPM_SetEnabledMask, 1 << n);
1811		if (smc_result != PPSMC_Result_OK)
1812			return -EINVAL;
1813	}
1814
1815	return 0;
1816}
1817
1818static int ci_dpm_force_state_mclk(struct amdgpu_device *adev, u32 n)
1819{
1820	struct ci_power_info *pi = ci_get_pi(adev);
1821
1822	if (!pi->mclk_dpm_key_disabled) {
1823		PPSMC_Result smc_result =
1824			amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_MCLKDPM_SetEnabledMask, 1 << n);
1825		if (smc_result != PPSMC_Result_OK)
1826			return -EINVAL;
1827	}
1828
1829	return 0;
1830}
1831
1832static int ci_dpm_force_state_pcie(struct amdgpu_device *adev, u32 n)
1833{
1834	struct ci_power_info *pi = ci_get_pi(adev);
1835
1836	if (!pi->pcie_dpm_key_disabled) {
1837		PPSMC_Result smc_result =
1838			amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_PCIeDPM_ForceLevel, n);
1839		if (smc_result != PPSMC_Result_OK)
1840			return -EINVAL;
1841	}
1842
1843	return 0;
1844}
1845
1846static int ci_set_power_limit(struct amdgpu_device *adev, u32 n)
1847{
1848	struct ci_power_info *pi = ci_get_pi(adev);
1849
1850	if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
1851		PPSMC_Result smc_result =
1852			amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_PkgPwrSetLimit, n);
1853		if (smc_result != PPSMC_Result_OK)
1854			return -EINVAL;
1855	}
1856
1857	return 0;
1858}
1859
1860static int ci_set_overdrive_target_tdp(struct amdgpu_device *adev,
1861				       u32 target_tdp)
1862{
1863	PPSMC_Result smc_result =
1864		amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
1865	if (smc_result != PPSMC_Result_OK)
1866		return -EINVAL;
1867	return 0;
1868}
1869
1870#if 0
1871static int ci_set_boot_state(struct amdgpu_device *adev)
1872{
1873	return ci_enable_sclk_mclk_dpm(adev, false);
1874}
1875#endif
1876
1877static u32 ci_get_average_sclk_freq(struct amdgpu_device *adev)
1878{
1879	u32 sclk_freq;
1880	PPSMC_Result smc_result =
1881		amdgpu_ci_send_msg_to_smc_return_parameter(adev,
1882						    PPSMC_MSG_API_GetSclkFrequency,
1883						    &sclk_freq);
1884	if (smc_result != PPSMC_Result_OK)
1885		sclk_freq = 0;
1886
1887	return sclk_freq;
1888}
1889
1890static u32 ci_get_average_mclk_freq(struct amdgpu_device *adev)
1891{
1892	u32 mclk_freq;
1893	PPSMC_Result smc_result =
1894		amdgpu_ci_send_msg_to_smc_return_parameter(adev,
1895						    PPSMC_MSG_API_GetMclkFrequency,
1896						    &mclk_freq);
1897	if (smc_result != PPSMC_Result_OK)
1898		mclk_freq = 0;
1899
1900	return mclk_freq;
1901}
1902
1903static void ci_dpm_start_smc(struct amdgpu_device *adev)
1904{
1905	int i;
1906
1907	amdgpu_ci_program_jump_on_start(adev);
1908	amdgpu_ci_start_smc_clock(adev);
1909	amdgpu_ci_start_smc(adev);
1910	for (i = 0; i < adev->usec_timeout; i++) {
1911		if (RREG32_SMC(ixFIRMWARE_FLAGS) & FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK)
1912			break;
1913	}
1914}
1915
1916static void ci_dpm_stop_smc(struct amdgpu_device *adev)
1917{
1918	amdgpu_ci_reset_smc(adev);
1919	amdgpu_ci_stop_smc_clock(adev);
1920}
1921
1922static int ci_process_firmware_header(struct amdgpu_device *adev)
1923{
1924	struct ci_power_info *pi = ci_get_pi(adev);
1925	u32 tmp;
1926	int ret;
1927
1928	ret = amdgpu_ci_read_smc_sram_dword(adev,
1929				     SMU7_FIRMWARE_HEADER_LOCATION +
1930				     offsetof(SMU7_Firmware_Header, DpmTable),
1931				     &tmp, pi->sram_end);
1932	if (ret)
1933		return ret;
1934
1935	pi->dpm_table_start = tmp;
1936
1937	ret = amdgpu_ci_read_smc_sram_dword(adev,
1938				     SMU7_FIRMWARE_HEADER_LOCATION +
1939				     offsetof(SMU7_Firmware_Header, SoftRegisters),
1940				     &tmp, pi->sram_end);
1941	if (ret)
1942		return ret;
1943
1944	pi->soft_regs_start = tmp;
1945
1946	ret = amdgpu_ci_read_smc_sram_dword(adev,
1947				     SMU7_FIRMWARE_HEADER_LOCATION +
1948				     offsetof(SMU7_Firmware_Header, mcRegisterTable),
1949				     &tmp, pi->sram_end);
1950	if (ret)
1951		return ret;
1952
1953	pi->mc_reg_table_start = tmp;
1954
1955	ret = amdgpu_ci_read_smc_sram_dword(adev,
1956				     SMU7_FIRMWARE_HEADER_LOCATION +
1957				     offsetof(SMU7_Firmware_Header, FanTable),
1958				     &tmp, pi->sram_end);
1959	if (ret)
1960		return ret;
1961
1962	pi->fan_table_start = tmp;
1963
1964	ret = amdgpu_ci_read_smc_sram_dword(adev,
1965				     SMU7_FIRMWARE_HEADER_LOCATION +
1966				     offsetof(SMU7_Firmware_Header, mcArbDramTimingTable),
1967				     &tmp, pi->sram_end);
1968	if (ret)
1969		return ret;
1970
1971	pi->arb_table_start = tmp;
1972
1973	return 0;
1974}
1975
1976static void ci_read_clock_registers(struct amdgpu_device *adev)
1977{
1978	struct ci_power_info *pi = ci_get_pi(adev);
1979
1980	pi->clock_registers.cg_spll_func_cntl =
1981		RREG32_SMC(ixCG_SPLL_FUNC_CNTL);
1982	pi->clock_registers.cg_spll_func_cntl_2 =
1983		RREG32_SMC(ixCG_SPLL_FUNC_CNTL_2);
1984	pi->clock_registers.cg_spll_func_cntl_3 =
1985		RREG32_SMC(ixCG_SPLL_FUNC_CNTL_3);
1986	pi->clock_registers.cg_spll_func_cntl_4 =
1987		RREG32_SMC(ixCG_SPLL_FUNC_CNTL_4);
1988	pi->clock_registers.cg_spll_spread_spectrum =
1989		RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM);
1990	pi->clock_registers.cg_spll_spread_spectrum_2 =
1991		RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM_2);
1992	pi->clock_registers.dll_cntl = RREG32(mmDLL_CNTL);
1993	pi->clock_registers.mclk_pwrmgt_cntl = RREG32(mmMCLK_PWRMGT_CNTL);
1994	pi->clock_registers.mpll_ad_func_cntl = RREG32(mmMPLL_AD_FUNC_CNTL);
1995	pi->clock_registers.mpll_dq_func_cntl = RREG32(mmMPLL_DQ_FUNC_CNTL);
1996	pi->clock_registers.mpll_func_cntl = RREG32(mmMPLL_FUNC_CNTL);
1997	pi->clock_registers.mpll_func_cntl_1 = RREG32(mmMPLL_FUNC_CNTL_1);
1998	pi->clock_registers.mpll_func_cntl_2 = RREG32(mmMPLL_FUNC_CNTL_2);
1999	pi->clock_registers.mpll_ss1 = RREG32(mmMPLL_SS1);
2000	pi->clock_registers.mpll_ss2 = RREG32(mmMPLL_SS2);
2001}
2002
2003static void ci_init_sclk_t(struct amdgpu_device *adev)
2004{
2005	struct ci_power_info *pi = ci_get_pi(adev);
2006
2007	pi->low_sclk_interrupt_t = 0;
2008}
2009
2010static void ci_enable_thermal_protection(struct amdgpu_device *adev,
2011					 bool enable)
2012{
2013	u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2014
2015	if (enable)
2016		tmp &= ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
2017	else
2018		tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
2019	WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2020}
2021
2022static void ci_enable_acpi_power_management(struct amdgpu_device *adev)
2023{
2024	u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2025
2026	tmp |= GENERAL_PWRMGT__STATIC_PM_EN_MASK;
2027
2028	WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2029}
2030
2031#if 0
2032static int ci_enter_ulp_state(struct amdgpu_device *adev)
2033{
2034
2035	WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower);
2036
2037	udelay(25000);
2038
2039	return 0;
2040}
2041
2042static int ci_exit_ulp_state(struct amdgpu_device *adev)
2043{
2044	int i;
2045
2046	WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower);
2047
2048	udelay(7000);
2049
2050	for (i = 0; i < adev->usec_timeout; i++) {
2051		if (RREG32(mmSMC_RESP_0) == 1)
2052			break;
2053		udelay(1000);
2054	}
2055
2056	return 0;
2057}
2058#endif
2059
2060static int ci_notify_smc_display_change(struct amdgpu_device *adev,
2061					bool has_display)
2062{
2063	PPSMC_Msg msg = has_display ? PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay;
2064
2065	return (amdgpu_ci_send_msg_to_smc(adev, msg) == PPSMC_Result_OK) ?  0 : -EINVAL;
2066}
2067
2068static int ci_enable_ds_master_switch(struct amdgpu_device *adev,
2069				      bool enable)
2070{
2071	struct ci_power_info *pi = ci_get_pi(adev);
2072
2073	if (enable) {
2074		if (pi->caps_sclk_ds) {
2075			if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_ON) != PPSMC_Result_OK)
2076				return -EINVAL;
2077		} else {
2078			if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
2079				return -EINVAL;
2080		}
2081	} else {
2082		if (pi->caps_sclk_ds) {
2083			if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
2084				return -EINVAL;
2085		}
2086	}
2087
2088	return 0;
2089}
2090
2091static void ci_program_display_gap(struct amdgpu_device *adev)
2092{
2093	u32 tmp = RREG32_SMC(ixCG_DISPLAY_GAP_CNTL);
2094	u32 pre_vbi_time_in_us;
2095	u32 frame_time_in_us;
2096	u32 ref_clock = adev->clock.spll.reference_freq;
2097	u32 refresh_rate = amdgpu_dpm_get_vrefresh(adev);
2098	u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
2099
2100	tmp &= ~CG_DISPLAY_GAP_CNTL__DISP_GAP_MASK;
2101	if (adev->pm.dpm.new_active_crtc_count > 0)
2102		tmp |= (AMDGPU_PM_DISPLAY_GAP_VBLANK_OR_WM << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT);
2103	else
2104		tmp |= (AMDGPU_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT);
2105	WREG32_SMC(ixCG_DISPLAY_GAP_CNTL, tmp);
2106
2107	if (refresh_rate == 0)
2108		refresh_rate = 60;
2109	if (vblank_time == 0xffffffff)
2110		vblank_time = 500;
2111	frame_time_in_us = 1000000 / refresh_rate;
2112	pre_vbi_time_in_us =
2113		frame_time_in_us - 200 - vblank_time;
2114	tmp = pre_vbi_time_in_us * (ref_clock / 100);
2115
2116	WREG32_SMC(ixCG_DISPLAY_GAP_CNTL2, tmp);
2117	ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, PreVBlankGap), 0x64);
2118	ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
2119
2120
2121	ci_notify_smc_display_change(adev, (adev->pm.dpm.new_active_crtc_count == 1));
2122
2123}
2124
2125static void ci_enable_spread_spectrum(struct amdgpu_device *adev, bool enable)
2126{
2127	struct ci_power_info *pi = ci_get_pi(adev);
2128	u32 tmp;
2129
2130	if (enable) {
2131		if (pi->caps_sclk_ss_support) {
2132			tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2133			tmp |= GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK;
2134			WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2135		}
2136	} else {
2137		tmp = RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM);
2138		tmp &= ~CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK;
2139		WREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM, tmp);
2140
2141		tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2142		tmp &= ~GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK;
2143		WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2144	}
2145}
2146
2147static void ci_program_sstp(struct amdgpu_device *adev)
2148{
2149	WREG32_SMC(ixCG_STATIC_SCREEN_PARAMETER,
2150	((CISLANDS_SSTU_DFLT << CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD_UNIT__SHIFT) |
2151	 (CISLANDS_SST_DFLT << CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD__SHIFT)));
2152}
2153
2154static void ci_enable_display_gap(struct amdgpu_device *adev)
2155{
2156	u32 tmp = RREG32_SMC(ixCG_DISPLAY_GAP_CNTL);
2157
2158	tmp &= ~(CG_DISPLAY_GAP_CNTL__DISP_GAP_MASK |
2159			CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG_MASK);
2160	tmp |= ((AMDGPU_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT) |
2161		(AMDGPU_PM_DISPLAY_GAP_VBLANK << CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG__SHIFT));
2162
2163	WREG32_SMC(ixCG_DISPLAY_GAP_CNTL, tmp);
2164}
2165
2166static void ci_program_vc(struct amdgpu_device *adev)
2167{
2168	u32 tmp;
2169
2170	tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
2171	tmp &= ~(SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK);
2172	WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
2173
2174	WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, CISLANDS_VRC_DFLT0);
2175	WREG32_SMC(ixCG_FREQ_TRAN_VOTING_1, CISLANDS_VRC_DFLT1);
2176	WREG32_SMC(ixCG_FREQ_TRAN_VOTING_2, CISLANDS_VRC_DFLT2);
2177	WREG32_SMC(ixCG_FREQ_TRAN_VOTING_3, CISLANDS_VRC_DFLT3);
2178	WREG32_SMC(ixCG_FREQ_TRAN_VOTING_4, CISLANDS_VRC_DFLT4);
2179	WREG32_SMC(ixCG_FREQ_TRAN_VOTING_5, CISLANDS_VRC_DFLT5);
2180	WREG32_SMC(ixCG_FREQ_TRAN_VOTING_6, CISLANDS_VRC_DFLT6);
2181	WREG32_SMC(ixCG_FREQ_TRAN_VOTING_7, CISLANDS_VRC_DFLT7);
2182}
2183
2184static void ci_clear_vc(struct amdgpu_device *adev)
2185{
2186	u32 tmp;
2187
2188	tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
2189	tmp |= (SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK);
2190	WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
2191
2192	WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0);
2193	WREG32_SMC(ixCG_FREQ_TRAN_VOTING_1, 0);
2194	WREG32_SMC(ixCG_FREQ_TRAN_VOTING_2, 0);
2195	WREG32_SMC(ixCG_FREQ_TRAN_VOTING_3, 0);
2196	WREG32_SMC(ixCG_FREQ_TRAN_VOTING_4, 0);
2197	WREG32_SMC(ixCG_FREQ_TRAN_VOTING_5, 0);
2198	WREG32_SMC(ixCG_FREQ_TRAN_VOTING_6, 0);
2199	WREG32_SMC(ixCG_FREQ_TRAN_VOTING_7, 0);
2200}
2201
2202static int ci_upload_firmware(struct amdgpu_device *adev)
2203{
2204	struct ci_power_info *pi = ci_get_pi(adev);
2205	int i, ret;
2206
2207	if (amdgpu_ci_is_smc_running(adev)) {
2208		DRM_INFO("smc is running, no need to load smc firmware\n");
2209		return 0;
2210	}
2211
2212	for (i = 0; i < adev->usec_timeout; i++) {
2213		if (RREG32_SMC(ixRCU_UC_EVENTS) & RCU_UC_EVENTS__boot_seq_done_MASK)
2214			break;
2215	}
2216	WREG32_SMC(ixSMC_SYSCON_MISC_CNTL, 1);
2217
2218	amdgpu_ci_stop_smc_clock(adev);
2219	amdgpu_ci_reset_smc(adev);
2220
2221	ret = amdgpu_ci_load_smc_ucode(adev, pi->sram_end);
2222
2223	return ret;
2224
2225}
2226
2227static int ci_get_svi2_voltage_table(struct amdgpu_device *adev,
2228				     struct amdgpu_clock_voltage_dependency_table *voltage_dependency_table,
2229				     struct atom_voltage_table *voltage_table)
2230{
2231	u32 i;
2232
2233	if (voltage_dependency_table == NULL)
2234		return -EINVAL;
2235
2236	voltage_table->mask_low = 0;
2237	voltage_table->phase_delay = 0;
2238
2239	voltage_table->count = voltage_dependency_table->count;
2240	for (i = 0; i < voltage_table->count; i++) {
2241		voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
2242		voltage_table->entries[i].smio_low = 0;
2243	}
2244
2245	return 0;
2246}
2247
2248static int ci_construct_voltage_tables(struct amdgpu_device *adev)
2249{
2250	struct ci_power_info *pi = ci_get_pi(adev);
2251	int ret;
2252
2253	if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2254		ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDC,
2255							VOLTAGE_OBJ_GPIO_LUT,
2256							&pi->vddc_voltage_table);
2257		if (ret)
2258			return ret;
2259	} else if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2260		ret = ci_get_svi2_voltage_table(adev,
2261						&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2262						&pi->vddc_voltage_table);
2263		if (ret)
2264			return ret;
2265	}
2266
2267	if (pi->vddc_voltage_table.count > SMU7_MAX_LEVELS_VDDC)
2268		ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_VDDC,
2269							 &pi->vddc_voltage_table);
2270
2271	if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2272		ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDCI,
2273							VOLTAGE_OBJ_GPIO_LUT,
2274							&pi->vddci_voltage_table);
2275		if (ret)
2276			return ret;
2277	} else if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2278		ret = ci_get_svi2_voltage_table(adev,
2279						&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2280						&pi->vddci_voltage_table);
2281		if (ret)
2282			return ret;
2283	}
2284
2285	if (pi->vddci_voltage_table.count > SMU7_MAX_LEVELS_VDDCI)
2286		ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_VDDCI,
2287							 &pi->vddci_voltage_table);
2288
2289	if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2290		ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_MVDDC,
2291							VOLTAGE_OBJ_GPIO_LUT,
2292							&pi->mvdd_voltage_table);
2293		if (ret)
2294			return ret;
2295	} else if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2296		ret = ci_get_svi2_voltage_table(adev,
2297						&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
2298						&pi->mvdd_voltage_table);
2299		if (ret)
2300			return ret;
2301	}
2302
2303	if (pi->mvdd_voltage_table.count > SMU7_MAX_LEVELS_MVDD)
2304		ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_MVDD,
2305							 &pi->mvdd_voltage_table);
2306
2307	return 0;
2308}
2309
2310static void ci_populate_smc_voltage_table(struct amdgpu_device *adev,
2311					  struct atom_voltage_table_entry *voltage_table,
2312					  SMU7_Discrete_VoltageLevel *smc_voltage_table)
2313{
2314	int ret;
2315
2316	ret = ci_get_std_voltage_value_sidd(adev, voltage_table,
2317					    &smc_voltage_table->StdVoltageHiSidd,
2318					    &smc_voltage_table->StdVoltageLoSidd);
2319
2320	if (ret) {
2321		smc_voltage_table->StdVoltageHiSidd = voltage_table->value * VOLTAGE_SCALE;
2322		smc_voltage_table->StdVoltageLoSidd = voltage_table->value * VOLTAGE_SCALE;
2323	}
2324
2325	smc_voltage_table->Voltage = cpu_to_be16(voltage_table->value * VOLTAGE_SCALE);
2326	smc_voltage_table->StdVoltageHiSidd =
2327		cpu_to_be16(smc_voltage_table->StdVoltageHiSidd);
2328	smc_voltage_table->StdVoltageLoSidd =
2329		cpu_to_be16(smc_voltage_table->StdVoltageLoSidd);
2330}
2331
2332static int ci_populate_smc_vddc_table(struct amdgpu_device *adev,
2333				      SMU7_Discrete_DpmTable *table)
2334{
2335	struct ci_power_info *pi = ci_get_pi(adev);
2336	unsigned int count;
2337
2338	table->VddcLevelCount = pi->vddc_voltage_table.count;
2339	for (count = 0; count < table->VddcLevelCount; count++) {
2340		ci_populate_smc_voltage_table(adev,
2341					      &pi->vddc_voltage_table.entries[count],
2342					      &table->VddcLevel[count]);
2343
2344		if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2345			table->VddcLevel[count].Smio |=
2346				pi->vddc_voltage_table.entries[count].smio_low;
2347		else
2348			table->VddcLevel[count].Smio = 0;
2349	}
2350	table->VddcLevelCount = cpu_to_be32(table->VddcLevelCount);
2351
2352	return 0;
2353}
2354
2355static int ci_populate_smc_vddci_table(struct amdgpu_device *adev,
2356				       SMU7_Discrete_DpmTable *table)
2357{
2358	unsigned int count;
2359	struct ci_power_info *pi = ci_get_pi(adev);
2360
2361	table->VddciLevelCount = pi->vddci_voltage_table.count;
2362	for (count = 0; count < table->VddciLevelCount; count++) {
2363		ci_populate_smc_voltage_table(adev,
2364					      &pi->vddci_voltage_table.entries[count],
2365					      &table->VddciLevel[count]);
2366
2367		if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2368			table->VddciLevel[count].Smio |=
2369				pi->vddci_voltage_table.entries[count].smio_low;
2370		else
2371			table->VddciLevel[count].Smio = 0;
2372	}
2373	table->VddciLevelCount = cpu_to_be32(table->VddciLevelCount);
2374
2375	return 0;
2376}
2377
2378static int ci_populate_smc_mvdd_table(struct amdgpu_device *adev,
2379				      SMU7_Discrete_DpmTable *table)
2380{
2381	struct ci_power_info *pi = ci_get_pi(adev);
2382	unsigned int count;
2383
2384	table->MvddLevelCount = pi->mvdd_voltage_table.count;
2385	for (count = 0; count < table->MvddLevelCount; count++) {
2386		ci_populate_smc_voltage_table(adev,
2387					      &pi->mvdd_voltage_table.entries[count],
2388					      &table->MvddLevel[count]);
2389
2390		if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2391			table->MvddLevel[count].Smio |=
2392				pi->mvdd_voltage_table.entries[count].smio_low;
2393		else
2394			table->MvddLevel[count].Smio = 0;
2395	}
2396	table->MvddLevelCount = cpu_to_be32(table->MvddLevelCount);
2397
2398	return 0;
2399}
2400
2401static int ci_populate_smc_voltage_tables(struct amdgpu_device *adev,
2402					  SMU7_Discrete_DpmTable *table)
2403{
2404	int ret;
2405
2406	ret = ci_populate_smc_vddc_table(adev, table);
2407	if (ret)
2408		return ret;
2409
2410	ret = ci_populate_smc_vddci_table(adev, table);
2411	if (ret)
2412		return ret;
2413
2414	ret = ci_populate_smc_mvdd_table(adev, table);
2415	if (ret)
2416		return ret;
2417
2418	return 0;
2419}
2420
2421static int ci_populate_mvdd_value(struct amdgpu_device *adev, u32 mclk,
2422				  SMU7_Discrete_VoltageLevel *voltage)
2423{
2424	struct ci_power_info *pi = ci_get_pi(adev);
2425	u32 i = 0;
2426
2427	if (pi->mvdd_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
2428		for (i = 0; i < adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count; i++) {
2429			if (mclk <= adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries[i].clk) {
2430				voltage->Voltage = pi->mvdd_voltage_table.entries[i].value;
2431				break;
2432			}
2433		}
2434
2435		if (i >= adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count)
2436			return -EINVAL;
2437	}
2438
2439	return -EINVAL;
2440}
2441
2442static int ci_get_std_voltage_value_sidd(struct amdgpu_device *adev,
2443					 struct atom_voltage_table_entry *voltage_table,
2444					 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd)
2445{
2446	u16 v_index, idx;
2447	bool voltage_found = false;
2448	*std_voltage_hi_sidd = voltage_table->value * VOLTAGE_SCALE;
2449	*std_voltage_lo_sidd = voltage_table->value * VOLTAGE_SCALE;
2450
2451	if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL)
2452		return -EINVAL;
2453
2454	if (adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
2455		for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
2456			if (voltage_table->value ==
2457			    adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
2458				voltage_found = true;
2459				if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
2460					idx = v_index;
2461				else
2462					idx = adev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
2463				*std_voltage_lo_sidd =
2464					adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
2465				*std_voltage_hi_sidd =
2466					adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
2467				break;
2468			}
2469		}
2470
2471		if (!voltage_found) {
2472			for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
2473				if (voltage_table->value <=
2474				    adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
2475					voltage_found = true;
2476					if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
2477						idx = v_index;
2478					else
2479						idx = adev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
2480					*std_voltage_lo_sidd =
2481						adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
2482					*std_voltage_hi_sidd =
2483						adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
2484					break;
2485				}
2486			}
2487		}
2488	}
2489
2490	return 0;
2491}
2492
2493static void ci_populate_phase_value_based_on_sclk(struct amdgpu_device *adev,
2494						  const struct amdgpu_phase_shedding_limits_table *limits,
2495						  u32 sclk,
2496						  u32 *phase_shedding)
2497{
2498	unsigned int i;
2499
2500	*phase_shedding = 1;
2501
2502	for (i = 0; i < limits->count; i++) {
2503		if (sclk < limits->entries[i].sclk) {
2504			*phase_shedding = i;
2505			break;
2506		}
2507	}
2508}
2509
2510static void ci_populate_phase_value_based_on_mclk(struct amdgpu_device *adev,
2511						  const struct amdgpu_phase_shedding_limits_table *limits,
2512						  u32 mclk,
2513						  u32 *phase_shedding)
2514{
2515	unsigned int i;
2516
2517	*phase_shedding = 1;
2518
2519	for (i = 0; i < limits->count; i++) {
2520		if (mclk < limits->entries[i].mclk) {
2521			*phase_shedding = i;
2522			break;
2523		}
2524	}
2525}
2526
2527static int ci_init_arb_table_index(struct amdgpu_device *adev)
2528{
2529	struct ci_power_info *pi = ci_get_pi(adev);
2530	u32 tmp;
2531	int ret;
2532
2533	ret = amdgpu_ci_read_smc_sram_dword(adev, pi->arb_table_start,
2534				     &tmp, pi->sram_end);
2535	if (ret)
2536		return ret;
2537
2538	tmp &= 0x00FFFFFF;
2539	tmp |= MC_CG_ARB_FREQ_F1 << 24;
2540
2541	return amdgpu_ci_write_smc_sram_dword(adev, pi->arb_table_start,
2542				       tmp, pi->sram_end);
2543}
2544
2545static int ci_get_dependency_volt_by_clk(struct amdgpu_device *adev,
2546					 struct amdgpu_clock_voltage_dependency_table *allowed_clock_voltage_table,
2547					 u32 clock, u32 *voltage)
2548{
2549	u32 i = 0;
2550
2551	if (allowed_clock_voltage_table->count == 0)
2552		return -EINVAL;
2553
2554	for (i = 0; i < allowed_clock_voltage_table->count; i++) {
2555		if (allowed_clock_voltage_table->entries[i].clk >= clock) {
2556			*voltage = allowed_clock_voltage_table->entries[i].v;
2557			return 0;
2558		}
2559	}
2560
2561	*voltage = allowed_clock_voltage_table->entries[i-1].v;
2562
2563	return 0;
2564}
2565
2566static u8 ci_get_sleep_divider_id_from_clock(u32 sclk, u32 min_sclk_in_sr)
2567{
2568	u32 i;
2569	u32 tmp;
2570	u32 min = max(min_sclk_in_sr, (u32)CISLAND_MINIMUM_ENGINE_CLOCK);
2571
2572	if (sclk < min)
2573		return 0;
2574
2575	for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID;  ; i--) {
2576		tmp = sclk >> i;
2577		if (tmp >= min || i == 0)
2578			break;
2579	}
2580
2581	return (u8)i;
2582}
2583
2584static int ci_initial_switch_from_arb_f0_to_f1(struct amdgpu_device *adev)
2585{
2586	return ci_copy_and_switch_arb_sets(adev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
2587}
2588
2589static int ci_reset_to_default(struct amdgpu_device *adev)
2590{
2591	return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ?
2592		0 : -EINVAL;
2593}
2594
2595static int ci_force_switch_to_arb_f0(struct amdgpu_device *adev)
2596{
2597	u32 tmp;
2598
2599	tmp = (RREG32_SMC(ixSMC_SCRATCH9) & 0x0000ff00) >> 8;
2600
2601	if (tmp == MC_CG_ARB_FREQ_F0)
2602		return 0;
2603
2604	return ci_copy_and_switch_arb_sets(adev, tmp, MC_CG_ARB_FREQ_F0);
2605}
2606
2607static void ci_register_patching_mc_arb(struct amdgpu_device *adev,
2608					const u32 engine_clock,
2609					const u32 memory_clock,
2610					u32 *dram_timimg2)
2611{
2612	bool patch;
2613	u32 tmp, tmp2;
2614
2615	tmp = RREG32(mmMC_SEQ_MISC0);
2616	patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
2617
2618	if (patch &&
2619	    ((adev->pdev->device == 0x67B0) ||
2620	     (adev->pdev->device == 0x67B1))) {
2621		if ((memory_clock > 100000) && (memory_clock <= 125000)) {
2622			tmp2 = (((0x31 * engine_clock) / 125000) - 1) & 0xff;
2623			*dram_timimg2 &= ~0x00ff0000;
2624			*dram_timimg2 |= tmp2 << 16;
2625		} else if ((memory_clock > 125000) && (memory_clock <= 137500)) {
2626			tmp2 = (((0x36 * engine_clock) / 137500) - 1) & 0xff;
2627			*dram_timimg2 &= ~0x00ff0000;
2628			*dram_timimg2 |= tmp2 << 16;
2629		}
2630	}
2631}
2632
2633static int ci_populate_memory_timing_parameters(struct amdgpu_device *adev,
2634						u32 sclk,
2635						u32 mclk,
2636						SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs)
2637{
2638	u32 dram_timing;
2639	u32 dram_timing2;
2640	u32 burst_time;
2641
2642	amdgpu_atombios_set_engine_dram_timings(adev, sclk, mclk);
2643
2644	dram_timing  = RREG32(mmMC_ARB_DRAM_TIMING);
2645	dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2);
2646	burst_time = RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE0_MASK;
2647
2648	ci_register_patching_mc_arb(adev, sclk, mclk, &dram_timing2);
2649
2650	arb_regs->McArbDramTiming  = cpu_to_be32(dram_timing);
2651	arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2);
2652	arb_regs->McArbBurstTime = (u8)burst_time;
2653
2654	return 0;
2655}
2656
2657static int ci_do_program_memory_timing_parameters(struct amdgpu_device *adev)
2658{
2659	struct ci_power_info *pi = ci_get_pi(adev);
2660	SMU7_Discrete_MCArbDramTimingTable arb_regs;
2661	u32 i, j;
2662	int ret =  0;
2663
2664	memset(&arb_regs, 0, sizeof(SMU7_Discrete_MCArbDramTimingTable));
2665
2666	for (i = 0; i < pi->dpm_table.sclk_table.count; i++) {
2667		for (j = 0; j < pi->dpm_table.mclk_table.count; j++) {
2668			ret = ci_populate_memory_timing_parameters(adev,
2669								   pi->dpm_table.sclk_table.dpm_levels[i].value,
2670								   pi->dpm_table.mclk_table.dpm_levels[j].value,
2671								   &arb_regs.entries[i][j]);
2672			if (ret)
2673				break;
2674		}
2675	}
2676
2677	if (ret == 0)
2678		ret = amdgpu_ci_copy_bytes_to_smc(adev,
2679					   pi->arb_table_start,
2680					   (u8 *)&arb_regs,
2681					   sizeof(SMU7_Discrete_MCArbDramTimingTable),
2682					   pi->sram_end);
2683
2684	return ret;
2685}
2686
2687static int ci_program_memory_timing_parameters(struct amdgpu_device *adev)
2688{
2689	struct ci_power_info *pi = ci_get_pi(adev);
2690
2691	if (pi->need_update_smu7_dpm_table == 0)
2692		return 0;
2693
2694	return ci_do_program_memory_timing_parameters(adev);
2695}
2696
2697static void ci_populate_smc_initial_state(struct amdgpu_device *adev,
2698					  struct amdgpu_ps *amdgpu_boot_state)
2699{
2700	struct ci_ps *boot_state = ci_get_ps(amdgpu_boot_state);
2701	struct ci_power_info *pi = ci_get_pi(adev);
2702	u32 level = 0;
2703
2704	for (level = 0; level < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; level++) {
2705		if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[level].clk >=
2706		    boot_state->performance_levels[0].sclk) {
2707			pi->smc_state_table.GraphicsBootLevel = level;
2708			break;
2709		}
2710	}
2711
2712	for (level = 0; level < adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.count; level++) {
2713		if (adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries[level].clk >=
2714		    boot_state->performance_levels[0].mclk) {
2715			pi->smc_state_table.MemoryBootLevel = level;
2716			break;
2717		}
2718	}
2719}
2720
2721static u32 ci_get_dpm_level_enable_mask_value(struct ci_single_dpm_table *dpm_table)
2722{
2723	u32 i;
2724	u32 mask_value = 0;
2725
2726	for (i = dpm_table->count; i > 0; i--) {
2727		mask_value = mask_value << 1;
2728		if (dpm_table->dpm_levels[i-1].enabled)
2729			mask_value |= 0x1;
2730		else
2731			mask_value &= 0xFFFFFFFE;
2732	}
2733
2734	return mask_value;
2735}
2736
2737static void ci_populate_smc_link_level(struct amdgpu_device *adev,
2738				       SMU7_Discrete_DpmTable *table)
2739{
2740	struct ci_power_info *pi = ci_get_pi(adev);
2741	struct ci_dpm_table *dpm_table = &pi->dpm_table;
2742	u32 i;
2743
2744	for (i = 0; i < dpm_table->pcie_speed_table.count; i++) {
2745		table->LinkLevel[i].PcieGenSpeed =
2746			(u8)dpm_table->pcie_speed_table.dpm_levels[i].value;
2747		table->LinkLevel[i].PcieLaneCount =
2748			amdgpu_encode_pci_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
2749		table->LinkLevel[i].EnabledForActivity = 1;
2750		table->LinkLevel[i].DownT = cpu_to_be32(5);
2751		table->LinkLevel[i].UpT = cpu_to_be32(30);
2752	}
2753
2754	pi->smc_state_table.LinkLevelCount = (u8)dpm_table->pcie_speed_table.count;
2755	pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
2756		ci_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
2757}
2758
2759static int ci_populate_smc_uvd_level(struct amdgpu_device *adev,
2760				     SMU7_Discrete_DpmTable *table)
2761{
2762	u32 count;
2763	struct atom_clock_dividers dividers;
2764	int ret = -EINVAL;
2765
2766	table->UvdLevelCount =
2767		adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count;
2768
2769	for (count = 0; count < table->UvdLevelCount; count++) {
2770		table->UvdLevel[count].VclkFrequency =
2771			adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].vclk;
2772		table->UvdLevel[count].DclkFrequency =
2773			adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].dclk;
2774		table->UvdLevel[count].MinVddc =
2775			adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2776		table->UvdLevel[count].MinVddcPhases = 1;
2777
2778		ret = amdgpu_atombios_get_clock_dividers(adev,
2779							 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2780							 table->UvdLevel[count].VclkFrequency, false, &dividers);
2781		if (ret)
2782			return ret;
2783
2784		table->UvdLevel[count].VclkDivider = (u8)dividers.post_divider;
2785
2786		ret = amdgpu_atombios_get_clock_dividers(adev,
2787							 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2788							 table->UvdLevel[count].DclkFrequency, false, &dividers);
2789		if (ret)
2790			return ret;
2791
2792		table->UvdLevel[count].DclkDivider = (u8)dividers.post_divider;
2793
2794		table->UvdLevel[count].VclkFrequency = cpu_to_be32(table->UvdLevel[count].VclkFrequency);
2795		table->UvdLevel[count].DclkFrequency = cpu_to_be32(table->UvdLevel[count].DclkFrequency);
2796		table->UvdLevel[count].MinVddc = cpu_to_be16(table->UvdLevel[count].MinVddc);
2797	}
2798
2799	return ret;
2800}
2801
2802static int ci_populate_smc_vce_level(struct amdgpu_device *adev,
2803				     SMU7_Discrete_DpmTable *table)
2804{
2805	u32 count;
2806	struct atom_clock_dividers dividers;
2807	int ret = -EINVAL;
2808
2809	table->VceLevelCount =
2810		adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count;
2811
2812	for (count = 0; count < table->VceLevelCount; count++) {
2813		table->VceLevel[count].Frequency =
2814			adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].evclk;
2815		table->VceLevel[count].MinVoltage =
2816			(u16)adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2817		table->VceLevel[count].MinPhases = 1;
2818
2819		ret = amdgpu_atombios_get_clock_dividers(adev,
2820							 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2821							 table->VceLevel[count].Frequency, false, &dividers);
2822		if (ret)
2823			return ret;
2824
2825		table->VceLevel[count].Divider = (u8)dividers.post_divider;
2826
2827		table->VceLevel[count].Frequency = cpu_to_be32(table->VceLevel[count].Frequency);
2828		table->VceLevel[count].MinVoltage = cpu_to_be16(table->VceLevel[count].MinVoltage);
2829	}
2830
2831	return ret;
2832
2833}
2834
2835static int ci_populate_smc_acp_level(struct amdgpu_device *adev,
2836				     SMU7_Discrete_DpmTable *table)
2837{
2838	u32 count;
2839	struct atom_clock_dividers dividers;
2840	int ret = -EINVAL;
2841
2842	table->AcpLevelCount = (u8)
2843		(adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count);
2844
2845	for (count = 0; count < table->AcpLevelCount; count++) {
2846		table->AcpLevel[count].Frequency =
2847			adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].clk;
2848		table->AcpLevel[count].MinVoltage =
2849			adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].v;
2850		table->AcpLevel[count].MinPhases = 1;
2851
2852		ret = amdgpu_atombios_get_clock_dividers(adev,
2853							 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2854							 table->AcpLevel[count].Frequency, false, &dividers);
2855		if (ret)
2856			return ret;
2857
2858		table->AcpLevel[count].Divider = (u8)dividers.post_divider;
2859
2860		table->AcpLevel[count].Frequency = cpu_to_be32(table->AcpLevel[count].Frequency);
2861		table->AcpLevel[count].MinVoltage = cpu_to_be16(table->AcpLevel[count].MinVoltage);
2862	}
2863
2864	return ret;
2865}
2866
2867static int ci_populate_smc_samu_level(struct amdgpu_device *adev,
2868				      SMU7_Discrete_DpmTable *table)
2869{
2870	u32 count;
2871	struct atom_clock_dividers dividers;
2872	int ret = -EINVAL;
2873
2874	table->SamuLevelCount =
2875		adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count;
2876
2877	for (count = 0; count < table->SamuLevelCount; count++) {
2878		table->SamuLevel[count].Frequency =
2879			adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].clk;
2880		table->SamuLevel[count].MinVoltage =
2881			adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2882		table->SamuLevel[count].MinPhases = 1;
2883
2884		ret = amdgpu_atombios_get_clock_dividers(adev,
2885							 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2886							 table->SamuLevel[count].Frequency, false, &dividers);
2887		if (ret)
2888			return ret;
2889
2890		table->SamuLevel[count].Divider = (u8)dividers.post_divider;
2891
2892		table->SamuLevel[count].Frequency = cpu_to_be32(table->SamuLevel[count].Frequency);
2893		table->SamuLevel[count].MinVoltage = cpu_to_be16(table->SamuLevel[count].MinVoltage);
2894	}
2895
2896	return ret;
2897}
2898
2899static int ci_calculate_mclk_params(struct amdgpu_device *adev,
2900				    u32 memory_clock,
2901				    SMU7_Discrete_MemoryLevel *mclk,
2902				    bool strobe_mode,
2903				    bool dll_state_on)
2904{
2905	struct ci_power_info *pi = ci_get_pi(adev);
2906	u32  dll_cntl = pi->clock_registers.dll_cntl;
2907	u32  mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
2908	u32  mpll_ad_func_cntl = pi->clock_registers.mpll_ad_func_cntl;
2909	u32  mpll_dq_func_cntl = pi->clock_registers.mpll_dq_func_cntl;
2910	u32  mpll_func_cntl = pi->clock_registers.mpll_func_cntl;
2911	u32  mpll_func_cntl_1 = pi->clock_registers.mpll_func_cntl_1;
2912	u32  mpll_func_cntl_2 = pi->clock_registers.mpll_func_cntl_2;
2913	u32  mpll_ss1 = pi->clock_registers.mpll_ss1;
2914	u32  mpll_ss2 = pi->clock_registers.mpll_ss2;
2915	struct atom_mpll_param mpll_param;
2916	int ret;
2917
2918	ret = amdgpu_atombios_get_memory_pll_dividers(adev, memory_clock, strobe_mode, &mpll_param);
2919	if (ret)
2920		return ret;
2921
2922	mpll_func_cntl &= ~MPLL_FUNC_CNTL__BWCTRL_MASK;
2923	mpll_func_cntl |= (mpll_param.bwcntl << MPLL_FUNC_CNTL__BWCTRL__SHIFT);
2924
2925	mpll_func_cntl_1 &= ~(MPLL_FUNC_CNTL_1__CLKF_MASK | MPLL_FUNC_CNTL_1__CLKFRAC_MASK |
2926			MPLL_FUNC_CNTL_1__VCO_MODE_MASK);
2927	mpll_func_cntl_1 |= (mpll_param.clkf) << MPLL_FUNC_CNTL_1__CLKF__SHIFT |
2928		(mpll_param.clkfrac << MPLL_FUNC_CNTL_1__CLKFRAC__SHIFT) |
2929		(mpll_param.vco_mode << MPLL_FUNC_CNTL_1__VCO_MODE__SHIFT);
2930
2931	mpll_ad_func_cntl &= ~MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK;
2932	mpll_ad_func_cntl |= (mpll_param.post_div << MPLL_AD_FUNC_CNTL__YCLK_POST_DIV__SHIFT);
2933
2934	if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
2935		mpll_dq_func_cntl &= ~(MPLL_DQ_FUNC_CNTL__YCLK_SEL_MASK |
2936				MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK);
2937		mpll_dq_func_cntl |= (mpll_param.yclk_sel << MPLL_DQ_FUNC_CNTL__YCLK_SEL__SHIFT) |
2938				(mpll_param.post_div << MPLL_AD_FUNC_CNTL__YCLK_POST_DIV__SHIFT);
2939	}
2940
2941	if (pi->caps_mclk_ss_support) {
2942		struct amdgpu_atom_ss ss;
2943		u32 freq_nom;
2944		u32 tmp;
2945		u32 reference_clock = adev->clock.mpll.reference_freq;
2946
2947		if (mpll_param.qdr == 1)
2948			freq_nom = memory_clock * 4 * (1 << mpll_param.post_div);
2949		else
2950			freq_nom = memory_clock * 2 * (1 << mpll_param.post_div);
2951
2952		tmp = (freq_nom / reference_clock);
2953		tmp = tmp * tmp;
2954		if (amdgpu_atombios_get_asic_ss_info(adev, &ss,
2955						     ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
2956			u32 clks = reference_clock * 5 / ss.rate;
2957			u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
2958
2959			mpll_ss1 &= ~MPLL_SS1__CLKV_MASK;
2960			mpll_ss1 |= (clkv << MPLL_SS1__CLKV__SHIFT);
2961
2962			mpll_ss2 &= ~MPLL_SS2__CLKS_MASK;
2963			mpll_ss2 |= (clks << MPLL_SS2__CLKS__SHIFT);
2964		}
2965	}
2966
2967	mclk_pwrmgt_cntl &= ~MCLK_PWRMGT_CNTL__DLL_SPEED_MASK;
2968	mclk_pwrmgt_cntl |= (mpll_param.dll_speed << MCLK_PWRMGT_CNTL__DLL_SPEED__SHIFT);
2969
2970	if (dll_state_on)
2971		mclk_pwrmgt_cntl |= MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK |
2972			MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK;
2973	else
2974		mclk_pwrmgt_cntl &= ~(MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK |
2975			MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK);
2976
2977	mclk->MclkFrequency = memory_clock;
2978	mclk->MpllFuncCntl = mpll_func_cntl;
2979	mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
2980	mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
2981	mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
2982	mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
2983	mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
2984	mclk->DllCntl = dll_cntl;
2985	mclk->MpllSs1 = mpll_ss1;
2986	mclk->MpllSs2 = mpll_ss2;
2987
2988	return 0;
2989}
2990
2991static int ci_populate_single_memory_level(struct amdgpu_device *adev,
2992					   u32 memory_clock,
2993					   SMU7_Discrete_MemoryLevel *memory_level)
2994{
2995	struct ci_power_info *pi = ci_get_pi(adev);
2996	int ret;
2997	bool dll_state_on;
2998
2999	if (adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) {
3000		ret = ci_get_dependency_volt_by_clk(adev,
3001						    &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
3002						    memory_clock, &memory_level->MinVddc);
3003		if (ret)
3004			return ret;
3005	}
3006
3007	if (adev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) {
3008		ret = ci_get_dependency_volt_by_clk(adev,
3009						    &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
3010						    memory_clock, &memory_level->MinVddci);
3011		if (ret)
3012			return ret;
3013	}
3014
3015	if (adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) {
3016		ret = ci_get_dependency_volt_by_clk(adev,
3017						    &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
3018						    memory_clock, &memory_level->MinMvdd);
3019		if (ret)
3020			return ret;
3021	}
3022
3023	memory_level->MinVddcPhases = 1;
3024
3025	if (pi->vddc_phase_shed_control)
3026		ci_populate_phase_value_based_on_mclk(adev,
3027						      &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
3028						      memory_clock,
3029						      &memory_level->MinVddcPhases);
3030
3031	memory_level->EnabledForThrottle = 1;
3032	memory_level->UpH = 0;
3033	memory_level->DownH = 100;
3034	memory_level->VoltageDownH = 0;
3035	memory_level->ActivityLevel = (u16)pi->mclk_activity_target;
3036
3037	memory_level->StutterEnable = false;
3038	memory_level->StrobeEnable = false;
3039	memory_level->EdcReadEnable = false;
3040	memory_level->EdcWriteEnable = false;
3041	memory_level->RttEnable = false;
3042
3043	memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
3044
3045	if (pi->mclk_stutter_mode_threshold &&
3046	    (memory_clock <= pi->mclk_stutter_mode_threshold) &&
3047	    (!pi->uvd_enabled) &&
3048	    (RREG32(mmDPG_PIPE_STUTTER_CONTROL) & DPG_PIPE_STUTTER_CONTROL__STUTTER_ENABLE_MASK) &&
3049	    (adev->pm.dpm.new_active_crtc_count <= 2))
3050		memory_level->StutterEnable = true;
3051
3052	if (pi->mclk_strobe_mode_threshold &&
3053	    (memory_clock <= pi->mclk_strobe_mode_threshold))
3054		memory_level->StrobeEnable = 1;
3055
3056	if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
3057		memory_level->StrobeRatio =
3058			ci_get_mclk_frequency_ratio(memory_clock, memory_level->StrobeEnable);
3059		if (pi->mclk_edc_enable_threshold &&
3060		    (memory_clock > pi->mclk_edc_enable_threshold))
3061			memory_level->EdcReadEnable = true;
3062
3063		if (pi->mclk_edc_wr_enable_threshold &&
3064		    (memory_clock > pi->mclk_edc_wr_enable_threshold))
3065			memory_level->EdcWriteEnable = true;
3066
3067		if (memory_level->StrobeEnable) {
3068			if (ci_get_mclk_frequency_ratio(memory_clock, true) >=
3069			    ((RREG32(mmMC_SEQ_MISC7) >> 16) & 0xf))
3070				dll_state_on = ((RREG32(mmMC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
3071			else
3072				dll_state_on = ((RREG32(mmMC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
3073		} else {
3074			dll_state_on = pi->dll_default_on;
3075		}
3076	} else {
3077		memory_level->StrobeRatio = ci_get_ddr3_mclk_frequency_ratio(memory_clock);
3078		dll_state_on = ((RREG32(mmMC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
3079	}
3080
3081	ret = ci_calculate_mclk_params(adev, memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
3082	if (ret)
3083		return ret;
3084
3085	memory_level->MinVddc = cpu_to_be32(memory_level->MinVddc * VOLTAGE_SCALE);
3086	memory_level->MinVddcPhases = cpu_to_be32(memory_level->MinVddcPhases);
3087	memory_level->MinVddci = cpu_to_be32(memory_level->MinVddci * VOLTAGE_SCALE);
3088	memory_level->MinMvdd = cpu_to_be32(memory_level->MinMvdd * VOLTAGE_SCALE);
3089
3090	memory_level->MclkFrequency = cpu_to_be32(memory_level->MclkFrequency);
3091	memory_level->ActivityLevel = cpu_to_be16(memory_level->ActivityLevel);
3092	memory_level->MpllFuncCntl = cpu_to_be32(memory_level->MpllFuncCntl);
3093	memory_level->MpllFuncCntl_1 = cpu_to_be32(memory_level->MpllFuncCntl_1);
3094	memory_level->MpllFuncCntl_2 = cpu_to_be32(memory_level->MpllFuncCntl_2);
3095	memory_level->MpllAdFuncCntl = cpu_to_be32(memory_level->MpllAdFuncCntl);
3096	memory_level->MpllDqFuncCntl = cpu_to_be32(memory_level->MpllDqFuncCntl);
3097	memory_level->MclkPwrmgtCntl = cpu_to_be32(memory_level->MclkPwrmgtCntl);
3098	memory_level->DllCntl = cpu_to_be32(memory_level->DllCntl);
3099	memory_level->MpllSs1 = cpu_to_be32(memory_level->MpllSs1);
3100	memory_level->MpllSs2 = cpu_to_be32(memory_level->MpllSs2);
3101
3102	return 0;
3103}
3104
3105static int ci_populate_smc_acpi_level(struct amdgpu_device *adev,
3106				      SMU7_Discrete_DpmTable *table)
3107{
3108	struct ci_power_info *pi = ci_get_pi(adev);
3109	struct atom_clock_dividers dividers;
3110	SMU7_Discrete_VoltageLevel voltage_level;
3111	u32 spll_func_cntl = pi->clock_registers.cg_spll_func_cntl;
3112	u32 spll_func_cntl_2 = pi->clock_registers.cg_spll_func_cntl_2;
3113	u32 dll_cntl = pi->clock_registers.dll_cntl;
3114	u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
3115	int ret;
3116
3117	table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
3118
3119	if (pi->acpi_vddc)
3120		table->ACPILevel.MinVddc = cpu_to_be32(pi->acpi_vddc * VOLTAGE_SCALE);
3121	else
3122		table->ACPILevel.MinVddc = cpu_to_be32(pi->min_vddc_in_pp_table * VOLTAGE_SCALE);
3123
3124	table->ACPILevel.MinVddcPhases = pi->vddc_phase_shed_control ? 0 : 1;
3125
3126	table->ACPILevel.SclkFrequency = adev->clock.spll.reference_freq;
3127
3128	ret = amdgpu_atombios_get_clock_dividers(adev,
3129						 COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
3130						 table->ACPILevel.SclkFrequency, false, &dividers);
3131	if (ret)
3132		return ret;
3133
3134	table->ACPILevel.SclkDid = (u8)dividers.post_divider;
3135	table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
3136	table->ACPILevel.DeepSleepDivId = 0;
3137
3138	spll_func_cntl &= ~CG_SPLL_FUNC_CNTL__SPLL_PWRON_MASK;
3139	spll_func_cntl |= CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK;
3140
3141	spll_func_cntl_2 &= ~CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK;
3142	spll_func_cntl_2 |= (4 << CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL__SHIFT);
3143
3144	table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
3145	table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
3146	table->ACPILevel.CgSpllFuncCntl3 = pi->clock_registers.cg_spll_func_cntl_3;
3147	table->ACPILevel.CgSpllFuncCntl4 = pi->clock_registers.cg_spll_func_cntl_4;
3148	table->ACPILevel.SpllSpreadSpectrum = pi->clock_registers.cg_spll_spread_spectrum;
3149	table->ACPILevel.SpllSpreadSpectrum2 = pi->clock_registers.cg_spll_spread_spectrum_2;
3150	table->ACPILevel.CcPwrDynRm = 0;
3151	table->ACPILevel.CcPwrDynRm1 = 0;
3152
3153	table->ACPILevel.Flags = cpu_to_be32(table->ACPILevel.Flags);
3154	table->ACPILevel.MinVddcPhases = cpu_to_be32(table->ACPILevel.MinVddcPhases);
3155	table->ACPILevel.SclkFrequency = cpu_to_be32(table->ACPILevel.SclkFrequency);
3156	table->ACPILevel.CgSpllFuncCntl = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl);
3157	table->ACPILevel.CgSpllFuncCntl2 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl2);
3158	table->ACPILevel.CgSpllFuncCntl3 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl3);
3159	table->ACPILevel.CgSpllFuncCntl4 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl4);
3160	table->ACPILevel.SpllSpreadSpectrum = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum);
3161	table->ACPILevel.SpllSpreadSpectrum2 = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum2);
3162	table->ACPILevel.CcPwrDynRm = cpu_to_be32(table->ACPILevel.CcPwrDynRm);
3163	table->ACPILevel.CcPwrDynRm1 = cpu_to_be32(table->ACPILevel.CcPwrDynRm1);
3164
3165	table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
3166	table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
3167
3168	if (pi->vddci_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
3169		if (pi->acpi_vddci)
3170			table->MemoryACPILevel.MinVddci =
3171				cpu_to_be32(pi->acpi_vddci * VOLTAGE_SCALE);
3172		else
3173			table->MemoryACPILevel.MinVddci =
3174				cpu_to_be32(pi->min_vddci_in_pp_table * VOLTAGE_SCALE);
3175	}
3176
3177	if (ci_populate_mvdd_value(adev, 0, &voltage_level))
3178		table->MemoryACPILevel.MinMvdd = 0;
3179	else
3180		table->MemoryACPILevel.MinMvdd =
3181			cpu_to_be32(voltage_level.Voltage * VOLTAGE_SCALE);
3182
3183	mclk_pwrmgt_cntl |= MCLK_PWRMGT_CNTL__MRDCK0_RESET_MASK |
3184		MCLK_PWRMGT_CNTL__MRDCK1_RESET_MASK;
3185	mclk_pwrmgt_cntl &= ~(MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK |
3186			MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK);
3187
3188	dll_cntl &= ~(DLL_CNTL__MRDCK0_BYPASS_MASK | DLL_CNTL__MRDCK1_BYPASS_MASK);
3189
3190	table->MemoryACPILevel.DllCntl = cpu_to_be32(dll_cntl);
3191	table->MemoryACPILevel.MclkPwrmgtCntl = cpu_to_be32(mclk_pwrmgt_cntl);
3192	table->MemoryACPILevel.MpllAdFuncCntl =
3193		cpu_to_be32(pi->clock_registers.mpll_ad_func_cntl);
3194	table->MemoryACPILevel.MpllDqFuncCntl =
3195		cpu_to_be32(pi->clock_registers.mpll_dq_func_cntl);
3196	table->MemoryACPILevel.MpllFuncCntl =
3197		cpu_to_be32(pi->clock_registers.mpll_func_cntl);
3198	table->MemoryACPILevel.MpllFuncCntl_1 =
3199		cpu_to_be32(pi->clock_registers.mpll_func_cntl_1);
3200	table->MemoryACPILevel.MpllFuncCntl_2 =
3201		cpu_to_be32(pi->clock_registers.mpll_func_cntl_2);
3202	table->MemoryACPILevel.MpllSs1 = cpu_to_be32(pi->clock_registers.mpll_ss1);
3203	table->MemoryACPILevel.MpllSs2 = cpu_to_be32(pi->clock_registers.mpll_ss2);
3204
3205	table->MemoryACPILevel.EnabledForThrottle = 0;
3206	table->MemoryACPILevel.EnabledForActivity = 0;
3207	table->MemoryACPILevel.UpH = 0;
3208	table->MemoryACPILevel.DownH = 100;
3209	table->MemoryACPILevel.VoltageDownH = 0;
3210	table->MemoryACPILevel.ActivityLevel =
3211		cpu_to_be16((u16)pi->mclk_activity_target);
3212
3213	table->MemoryACPILevel.StutterEnable = false;
3214	table->MemoryACPILevel.StrobeEnable = false;
3215	table->MemoryACPILevel.EdcReadEnable = false;
3216	table->MemoryACPILevel.EdcWriteEnable = false;
3217	table->MemoryACPILevel.RttEnable = false;
3218
3219	return 0;
3220}
3221
3222
3223static int ci_enable_ulv(struct amdgpu_device *adev, bool enable)
3224{
3225	struct ci_power_info *pi = ci_get_pi(adev);
3226	struct ci_ulv_parm *ulv = &pi->ulv;
3227
3228	if (ulv->supported) {
3229		if (enable)
3230			return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ?
3231				0 : -EINVAL;
3232		else
3233			return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
3234				0 : -EINVAL;
3235	}
3236
3237	return 0;
3238}
3239
3240static int ci_populate_ulv_level(struct amdgpu_device *adev,
3241				 SMU7_Discrete_Ulv *state)
3242{
3243	struct ci_power_info *pi = ci_get_pi(adev);
3244	u16 ulv_voltage = adev->pm.dpm.backbias_response_time;
3245
3246	state->CcPwrDynRm = 0;
3247	state->CcPwrDynRm1 = 0;
3248
3249	if (ulv_voltage == 0) {
3250		pi->ulv.supported = false;
3251		return 0;
3252	}
3253
3254	if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
3255		if (ulv_voltage > adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
3256			state->VddcOffset = 0;
3257		else
3258			state->VddcOffset =
3259				adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage;
3260	} else {
3261		if (ulv_voltage > adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
3262			state->VddcOffsetVid = 0;
3263		else
3264			state->VddcOffsetVid = (u8)
3265				((adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage) *
3266				 VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
3267	}
3268	state->VddcPhase = pi->vddc_phase_shed_control ? 0 : 1;
3269
3270	state->CcPwrDynRm = cpu_to_be32(state->CcPwrDynRm);
3271	state->CcPwrDynRm1 = cpu_to_be32(state->CcPwrDynRm1);
3272	state->VddcOffset = cpu_to_be16(state->VddcOffset);
3273
3274	return 0;
3275}
3276
3277static int ci_calculate_sclk_params(struct amdgpu_device *adev,
3278				    u32 engine_clock,
3279				    SMU7_Discrete_GraphicsLevel *sclk)
3280{
3281	struct ci_power_info *pi = ci_get_pi(adev);
3282	struct atom_clock_dividers dividers;
3283	u32 spll_func_cntl_3 = pi->clock_registers.cg_spll_func_cntl_3;
3284	u32 spll_func_cntl_4 = pi->clock_registers.cg_spll_func_cntl_4;
3285	u32 cg_spll_spread_spectrum = pi->clock_registers.cg_spll_spread_spectrum;
3286	u32 cg_spll_spread_spectrum_2 = pi->clock_registers.cg_spll_spread_spectrum_2;
3287	u32 reference_clock = adev->clock.spll.reference_freq;
3288	u32 reference_divider;
3289	u32 fbdiv;
3290	int ret;
3291
3292	ret = amdgpu_atombios_get_clock_dividers(adev,
3293						 COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
3294						 engine_clock, false, &dividers);
3295	if (ret)
3296		return ret;
3297
3298	reference_divider = 1 + dividers.ref_div;
3299	fbdiv = dividers.fb_div & 0x3FFFFFF;
3300
3301	spll_func_cntl_3 &= ~CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV_MASK;
3302	spll_func_cntl_3 |= (fbdiv << CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV__SHIFT);
3303	spll_func_cntl_3 |= CG_SPLL_FUNC_CNTL_3__SPLL_DITHEN_MASK;
3304
3305	if (pi->caps_sclk_ss_support) {
3306		struct amdgpu_atom_ss ss;
3307		u32 vco_freq = engine_clock * dividers.post_div;
3308
3309		if (amdgpu_atombios_get_asic_ss_info(adev, &ss,
3310						     ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
3311			u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
3312			u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
3313
3314			cg_spll_spread_spectrum &= ~(CG_SPLL_SPREAD_SPECTRUM__CLKS_MASK | CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK);
3315			cg_spll_spread_spectrum |= (clk_s << CG_SPLL_SPREAD_SPECTRUM__CLKS__SHIFT);
3316			cg_spll_spread_spectrum |= (1 << CG_SPLL_SPREAD_SPECTRUM__SSEN__SHIFT);
3317
3318			cg_spll_spread_spectrum_2 &= ~CG_SPLL_SPREAD_SPECTRUM_2__CLKV_MASK;
3319			cg_spll_spread_spectrum_2 |= (clk_v << CG_SPLL_SPREAD_SPECTRUM_2__CLKV__SHIFT);
3320		}
3321	}
3322
3323	sclk->SclkFrequency = engine_clock;
3324	sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
3325	sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
3326	sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
3327	sclk->SpllSpreadSpectrum2  = cg_spll_spread_spectrum_2;
3328	sclk->SclkDid = (u8)dividers.post_divider;
3329
3330	return 0;
3331}
3332
3333static int ci_populate_single_graphic_level(struct amdgpu_device *adev,
3334					    u32 engine_clock,
3335					    u16 sclk_activity_level_t,
3336					    SMU7_Discrete_GraphicsLevel *graphic_level)
3337{
3338	struct ci_power_info *pi = ci_get_pi(adev);
3339	int ret;
3340
3341	ret = ci_calculate_sclk_params(adev, engine_clock, graphic_level);
3342	if (ret)
3343		return ret;
3344
3345	ret = ci_get_dependency_volt_by_clk(adev,
3346					    &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
3347					    engine_clock, &graphic_level->MinVddc);
3348	if (ret)
3349		return ret;
3350
3351	graphic_level->SclkFrequency = engine_clock;
3352
3353	graphic_level->Flags =  0;
3354	graphic_level->MinVddcPhases = 1;
3355
3356	if (pi->vddc_phase_shed_control)
3357		ci_populate_phase_value_based_on_sclk(adev,
3358						      &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
3359						      engine_clock,
3360						      &graphic_level->MinVddcPhases);
3361
3362	graphic_level->ActivityLevel = sclk_activity_level_t;
3363
3364	graphic_level->CcPwrDynRm = 0;
3365	graphic_level->CcPwrDynRm1 = 0;
3366	graphic_level->EnabledForThrottle = 1;
3367	graphic_level->UpH = 0;
3368	graphic_level->DownH = 0;
3369	graphic_level->VoltageDownH = 0;
3370	graphic_level->PowerThrottle = 0;
3371
3372	if (pi->caps_sclk_ds)
3373		graphic_level->DeepSleepDivId = ci_get_sleep_divider_id_from_clock(engine_clock,
3374										   CISLAND_MINIMUM_ENGINE_CLOCK);
3375
3376	graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
3377
3378	graphic_level->Flags = cpu_to_be32(graphic_level->Flags);
3379	graphic_level->MinVddc = cpu_to_be32(graphic_level->MinVddc * VOLTAGE_SCALE);
3380	graphic_level->MinVddcPhases = cpu_to_be32(graphic_level->MinVddcPhases);
3381	graphic_level->SclkFrequency = cpu_to_be32(graphic_level->SclkFrequency);
3382	graphic_level->ActivityLevel = cpu_to_be16(graphic_level->ActivityLevel);
3383	graphic_level->CgSpllFuncCntl3 = cpu_to_be32(graphic_level->CgSpllFuncCntl3);
3384	graphic_level->CgSpllFuncCntl4 = cpu_to_be32(graphic_level->CgSpllFuncCntl4);
3385	graphic_level->SpllSpreadSpectrum = cpu_to_be32(graphic_level->SpllSpreadSpectrum);
3386	graphic_level->SpllSpreadSpectrum2 = cpu_to_be32(graphic_level->SpllSpreadSpectrum2);
3387	graphic_level->CcPwrDynRm = cpu_to_be32(graphic_level->CcPwrDynRm);
3388	graphic_level->CcPwrDynRm1 = cpu_to_be32(graphic_level->CcPwrDynRm1);
3389
3390	return 0;
3391}
3392
3393static int ci_populate_all_graphic_levels(struct amdgpu_device *adev)
3394{
3395	struct ci_power_info *pi = ci_get_pi(adev);
3396	struct ci_dpm_table *dpm_table = &pi->dpm_table;
3397	u32 level_array_address = pi->dpm_table_start +
3398		offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
3399	u32 level_array_size = sizeof(SMU7_Discrete_GraphicsLevel) *
3400		SMU7_MAX_LEVELS_GRAPHICS;
3401	SMU7_Discrete_GraphicsLevel *levels = pi->smc_state_table.GraphicsLevel;
3402	u32 i, ret;
3403
3404	memset(levels, 0, level_array_size);
3405
3406	for (i = 0; i < dpm_table->sclk_table.count; i++) {
3407		ret = ci_populate_single_graphic_level(adev,
3408						       dpm_table->sclk_table.dpm_levels[i].value,
3409						       (u16)pi->activity_target[i],
3410						       &pi->smc_state_table.GraphicsLevel[i]);
3411		if (ret)
3412			return ret;
3413		if (i > 1)
3414			pi->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
3415		if (i == (dpm_table->sclk_table.count - 1))
3416			pi->smc_state_table.GraphicsLevel[i].DisplayWatermark =
3417				PPSMC_DISPLAY_WATERMARK_HIGH;
3418	}
3419	pi->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
3420
3421	pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
3422	pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
3423		ci_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
3424
3425	ret = amdgpu_ci_copy_bytes_to_smc(adev, level_array_address,
3426				   (u8 *)levels, level_array_size,
3427				   pi->sram_end);
3428	if (ret)
3429		return ret;
3430
3431	return 0;
3432}
3433
3434static int ci_populate_ulv_state(struct amdgpu_device *adev,
3435				 SMU7_Discrete_Ulv *ulv_level)
3436{
3437	return ci_populate_ulv_level(adev, ulv_level);
3438}
3439
3440static int ci_populate_all_memory_levels(struct amdgpu_device *adev)
3441{
3442	struct ci_power_info *pi = ci_get_pi(adev);
3443	struct ci_dpm_table *dpm_table = &pi->dpm_table;
3444	u32 level_array_address = pi->dpm_table_start +
3445		offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
3446	u32 level_array_size = sizeof(SMU7_Discrete_MemoryLevel) *
3447		SMU7_MAX_LEVELS_MEMORY;
3448	SMU7_Discrete_MemoryLevel *levels = pi->smc_state_table.MemoryLevel;
3449	u32 i, ret;
3450
3451	memset(levels, 0, level_array_size);
3452
3453	for (i = 0; i < dpm_table->mclk_table.count; i++) {
3454		if (dpm_table->mclk_table.dpm_levels[i].value == 0)
3455			return -EINVAL;
3456		ret = ci_populate_single_memory_level(adev,
3457						      dpm_table->mclk_table.dpm_levels[i].value,
3458						      &pi->smc_state_table.MemoryLevel[i]);
3459		if (ret)
3460			return ret;
3461	}
3462
3463	pi->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
3464
3465	if ((dpm_table->mclk_table.count >= 2) &&
3466	    ((adev->pdev->device == 0x67B0) || (adev->pdev->device == 0x67B1))) {
3467		pi->smc_state_table.MemoryLevel[1].MinVddc =
3468			pi->smc_state_table.MemoryLevel[0].MinVddc;
3469		pi->smc_state_table.MemoryLevel[1].MinVddcPhases =
3470			pi->smc_state_table.MemoryLevel[0].MinVddcPhases;
3471	}
3472
3473	pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F);
3474
3475	pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count;
3476	pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
3477		ci_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
3478
3479	pi->smc_state_table.MemoryLevel[dpm_table->mclk_table.count - 1].DisplayWatermark =
3480		PPSMC_DISPLAY_WATERMARK_HIGH;
3481
3482	ret = amdgpu_ci_copy_bytes_to_smc(adev, level_array_address,
3483				   (u8 *)levels, level_array_size,
3484				   pi->sram_end);
3485	if (ret)
3486		return ret;
3487
3488	return 0;
3489}
3490
3491static void ci_reset_single_dpm_table(struct amdgpu_device *adev,
3492				      struct ci_single_dpm_table* dpm_table,
3493				      u32 count)
3494{
3495	u32 i;
3496
3497	dpm_table->count = count;
3498	for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++)
3499		dpm_table->dpm_levels[i].enabled = false;
3500}
3501
3502static void ci_setup_pcie_table_entry(struct ci_single_dpm_table* dpm_table,
3503				      u32 index, u32 pcie_gen, u32 pcie_lanes)
3504{
3505	dpm_table->dpm_levels[index].value = pcie_gen;
3506	dpm_table->dpm_levels[index].param1 = pcie_lanes;
3507	dpm_table->dpm_levels[index].enabled = true;
3508}
3509
3510static int ci_setup_default_pcie_tables(struct amdgpu_device *adev)
3511{
3512	struct ci_power_info *pi = ci_get_pi(adev);
3513
3514	if (!pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels)
3515		return -EINVAL;
3516
3517	if (pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels) {
3518		pi->pcie_gen_powersaving = pi->pcie_gen_performance;
3519		pi->pcie_lane_powersaving = pi->pcie_lane_performance;
3520	} else if (!pi->use_pcie_performance_levels && pi->use_pcie_powersaving_levels) {
3521		pi->pcie_gen_performance = pi->pcie_gen_powersaving;
3522		pi->pcie_lane_performance = pi->pcie_lane_powersaving;
3523	}
3524
3525	ci_reset_single_dpm_table(adev,
3526				  &pi->dpm_table.pcie_speed_table,
3527				  SMU7_MAX_LEVELS_LINK);
3528
3529	if (adev->asic_type == CHIP_BONAIRE)
3530		ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
3531					  pi->pcie_gen_powersaving.min,
3532					  pi->pcie_lane_powersaving.max);
3533	else
3534		ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
3535					  pi->pcie_gen_powersaving.min,
3536					  pi->pcie_lane_powersaving.min);
3537	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 1,
3538				  pi->pcie_gen_performance.min,
3539				  pi->pcie_lane_performance.min);
3540	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 2,
3541				  pi->pcie_gen_powersaving.min,
3542				  pi->pcie_lane_powersaving.max);
3543	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 3,
3544				  pi->pcie_gen_performance.min,
3545				  pi->pcie_lane_performance.max);
3546	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 4,
3547				  pi->pcie_gen_powersaving.max,
3548				  pi->pcie_lane_powersaving.max);
3549	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 5,
3550				  pi->pcie_gen_performance.max,
3551				  pi->pcie_lane_performance.max);
3552
3553	pi->dpm_table.pcie_speed_table.count = 6;
3554
3555	return 0;
3556}
3557
3558static int ci_setup_default_dpm_tables(struct amdgpu_device *adev)
3559{
3560	struct ci_power_info *pi = ci_get_pi(adev);
3561	struct amdgpu_clock_voltage_dependency_table *allowed_sclk_vddc_table =
3562		&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
3563	struct amdgpu_clock_voltage_dependency_table *allowed_mclk_table =
3564		&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
3565	struct amdgpu_cac_leakage_table *std_voltage_table =
3566		&adev->pm.dpm.dyn_state.cac_leakage_table;
3567	u32 i;
3568
3569	if (allowed_sclk_vddc_table == NULL)
3570		return -EINVAL;
3571	if (allowed_sclk_vddc_table->count < 1)
3572		return -EINVAL;
3573	if (allowed_mclk_table == NULL)
3574		return -EINVAL;
3575	if (allowed_mclk_table->count < 1)
3576		return -EINVAL;
3577
3578	memset(&pi->dpm_table, 0, sizeof(struct ci_dpm_table));
3579
3580	ci_reset_single_dpm_table(adev,
3581				  &pi->dpm_table.sclk_table,
3582				  SMU7_MAX_LEVELS_GRAPHICS);
3583	ci_reset_single_dpm_table(adev,
3584				  &pi->dpm_table.mclk_table,
3585				  SMU7_MAX_LEVELS_MEMORY);
3586	ci_reset_single_dpm_table(adev,
3587				  &pi->dpm_table.vddc_table,
3588				  SMU7_MAX_LEVELS_VDDC);
3589	ci_reset_single_dpm_table(adev,
3590				  &pi->dpm_table.vddci_table,
3591				  SMU7_MAX_LEVELS_VDDCI);
3592	ci_reset_single_dpm_table(adev,
3593				  &pi->dpm_table.mvdd_table,
3594				  SMU7_MAX_LEVELS_MVDD);
3595
3596	pi->dpm_table.sclk_table.count = 0;
3597	for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
3598		if ((i == 0) ||
3599		    (pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count-1].value !=
3600		     allowed_sclk_vddc_table->entries[i].clk)) {
3601			pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value =
3602				allowed_sclk_vddc_table->entries[i].clk;
3603			pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled =
3604				(i == 0) ? true : false;
3605			pi->dpm_table.sclk_table.count++;
3606		}
3607	}
3608
3609	pi->dpm_table.mclk_table.count = 0;
3610	for (i = 0; i < allowed_mclk_table->count; i++) {
3611		if ((i == 0) ||
3612		    (pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count-1].value !=
3613		     allowed_mclk_table->entries[i].clk)) {
3614			pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value =
3615				allowed_mclk_table->entries[i].clk;
3616			pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled =
3617				(i == 0) ? true : false;
3618			pi->dpm_table.mclk_table.count++;
3619		}
3620	}
3621
3622	for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
3623		pi->dpm_table.vddc_table.dpm_levels[i].value =
3624			allowed_sclk_vddc_table->entries[i].v;
3625		pi->dpm_table.vddc_table.dpm_levels[i].param1 =
3626			std_voltage_table->entries[i].leakage;
3627		pi->dpm_table.vddc_table.dpm_levels[i].enabled = true;
3628	}
3629	pi->dpm_table.vddc_table.count = allowed_sclk_vddc_table->count;
3630
3631	allowed_mclk_table = &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
3632	if (allowed_mclk_table) {
3633		for (i = 0; i < allowed_mclk_table->count; i++) {
3634			pi->dpm_table.vddci_table.dpm_levels[i].value =
3635				allowed_mclk_table->entries[i].v;
3636			pi->dpm_table.vddci_table.dpm_levels[i].enabled = true;
3637		}
3638		pi->dpm_table.vddci_table.count = allowed_mclk_table->count;
3639	}
3640
3641	allowed_mclk_table = &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk;
3642	if (allowed_mclk_table) {
3643		for (i = 0; i < allowed_mclk_table->count; i++) {
3644			pi->dpm_table.mvdd_table.dpm_levels[i].value =
3645				allowed_mclk_table->entries[i].v;
3646			pi->dpm_table.mvdd_table.dpm_levels[i].enabled = true;
3647		}
3648		pi->dpm_table.mvdd_table.count = allowed_mclk_table->count;
3649	}
3650
3651	ci_setup_default_pcie_tables(adev);
3652
3653	/* save a copy of the default DPM table */
3654	memcpy(&(pi->golden_dpm_table), &(pi->dpm_table),
3655			sizeof(struct ci_dpm_table));
3656
3657	return 0;
3658}
3659
3660static int ci_find_boot_level(struct ci_single_dpm_table *table,
3661			      u32 value, u32 *boot_level)
3662{
3663	u32 i;
3664	int ret = -EINVAL;
3665
3666	for(i = 0; i < table->count; i++) {
3667		if (value == table->dpm_levels[i].value) {
3668			*boot_level = i;
3669			ret = 0;
3670		}
3671	}
3672
3673	return ret;
3674}
3675
3676static int ci_init_smc_table(struct amdgpu_device *adev)
3677{
3678	struct ci_power_info *pi = ci_get_pi(adev);
3679	struct ci_ulv_parm *ulv = &pi->ulv;
3680	struct amdgpu_ps *amdgpu_boot_state = adev->pm.dpm.boot_ps;
3681	SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
3682	int ret;
3683
3684	ret = ci_setup_default_dpm_tables(adev);
3685	if (ret)
3686		return ret;
3687
3688	if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE)
3689		ci_populate_smc_voltage_tables(adev, table);
3690
3691	ci_init_fps_limits(adev);
3692
3693	if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
3694		table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
3695
3696	if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
3697		table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
3698
3699	if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
3700		table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
3701
3702	if (ulv->supported) {
3703		ret = ci_populate_ulv_state(adev, &pi->smc_state_table.Ulv);
3704		if (ret)
3705			return ret;
3706		WREG32_SMC(ixCG_ULV_PARAMETER, ulv->cg_ulv_parameter);
3707	}
3708
3709	ret = ci_populate_all_graphic_levels(adev);
3710	if (ret)
3711		return ret;
3712
3713	ret = ci_populate_all_memory_levels(adev);
3714	if (ret)
3715		return ret;
3716
3717	ci_populate_smc_link_level(adev, table);
3718
3719	ret = ci_populate_smc_acpi_level(adev, table);
3720	if (ret)
3721		return ret;
3722
3723	ret = ci_populate_smc_vce_level(adev, table);
3724	if (ret)
3725		return ret;
3726
3727	ret = ci_populate_smc_acp_level(adev, table);
3728	if (ret)
3729		return ret;
3730
3731	ret = ci_populate_smc_samu_level(adev, table);
3732	if (ret)
3733		return ret;
3734
3735	ret = ci_do_program_memory_timing_parameters(adev);
3736	if (ret)
3737		return ret;
3738
3739	ret = ci_populate_smc_uvd_level(adev, table);
3740	if (ret)
3741		return ret;
3742
3743	table->UvdBootLevel  = 0;
3744	table->VceBootLevel  = 0;
3745	table->AcpBootLevel  = 0;
3746	table->SamuBootLevel  = 0;
3747	table->GraphicsBootLevel  = 0;
3748	table->MemoryBootLevel  = 0;
3749
3750	ret = ci_find_boot_level(&pi->dpm_table.sclk_table,
3751				 pi->vbios_boot_state.sclk_bootup_value,
3752				 (u32 *)&pi->smc_state_table.GraphicsBootLevel);
3753
3754	ret = ci_find_boot_level(&pi->dpm_table.mclk_table,
3755				 pi->vbios_boot_state.mclk_bootup_value,
3756				 (u32 *)&pi->smc_state_table.MemoryBootLevel);
3757
3758	table->BootVddc = pi->vbios_boot_state.vddc_bootup_value;
3759	table->BootVddci = pi->vbios_boot_state.vddci_bootup_value;
3760	table->BootMVdd = pi->vbios_boot_state.mvdd_bootup_value;
3761
3762	ci_populate_smc_initial_state(adev, amdgpu_boot_state);
3763
3764	ret = ci_populate_bapm_parameters_in_dpm_table(adev);
3765	if (ret)
3766		return ret;
3767
3768	table->UVDInterval = 1;
3769	table->VCEInterval = 1;
3770	table->ACPInterval = 1;
3771	table->SAMUInterval = 1;
3772	table->GraphicsVoltageChangeEnable = 1;
3773	table->GraphicsThermThrottleEnable = 1;
3774	table->GraphicsInterval = 1;
3775	table->VoltageInterval = 1;
3776	table->ThermalInterval = 1;
3777	table->TemperatureLimitHigh = (u16)((pi->thermal_temp_setting.temperature_high *
3778					     CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3779	table->TemperatureLimitLow = (u16)((pi->thermal_temp_setting.temperature_low *
3780					    CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3781	table->MemoryVoltageChangeEnable = 1;
3782	table->MemoryInterval = 1;
3783	table->VoltageResponseTime = 0;
3784	table->VddcVddciDelta = 4000;
3785	table->PhaseResponseTime = 0;
3786	table->MemoryThermThrottleEnable = 1;
3787	table->PCIeBootLinkLevel = pi->dpm_table.pcie_speed_table.count - 1;
3788	table->PCIeGenInterval = 1;
3789	if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2)
3790		table->SVI2Enable  = 1;
3791	else
3792		table->SVI2Enable  = 0;
3793
3794	table->ThermGpio = 17;
3795	table->SclkStepSize = 0x4000;
3796
3797	table->SystemFlags = cpu_to_be32(table->SystemFlags);
3798	table->SmioMaskVddcVid = cpu_to_be32(table->SmioMaskVddcVid);
3799	table->SmioMaskVddcPhase = cpu_to_be32(table->SmioMaskVddcPhase);
3800	table->SmioMaskVddciVid = cpu_to_be32(table->SmioMaskVddciVid);
3801	table->SmioMaskMvddVid = cpu_to_be32(table->SmioMaskMvddVid);
3802	table->SclkStepSize = cpu_to_be32(table->SclkStepSize);
3803	table->TemperatureLimitHigh = cpu_to_be16(table->TemperatureLimitHigh);
3804	table->TemperatureLimitLow = cpu_to_be16(table->TemperatureLimitLow);
3805	table->VddcVddciDelta = cpu_to_be16(table->VddcVddciDelta);
3806	table->VoltageResponseTime = cpu_to_be16(table->VoltageResponseTime);
3807	table->PhaseResponseTime = cpu_to_be16(table->PhaseResponseTime);
3808	table->BootVddc = cpu_to_be16(table->BootVddc * VOLTAGE_SCALE);
3809	table->BootVddci = cpu_to_be16(table->BootVddci * VOLTAGE_SCALE);
3810	table->BootMVdd = cpu_to_be16(table->BootMVdd * VOLTAGE_SCALE);
3811
3812	ret = amdgpu_ci_copy_bytes_to_smc(adev,
3813				   pi->dpm_table_start +
3814				   offsetof(SMU7_Discrete_DpmTable, SystemFlags),
3815				   (u8 *)&table->SystemFlags,
3816				   sizeof(SMU7_Discrete_DpmTable) - 3 * sizeof(SMU7_PIDController),
3817				   pi->sram_end);
3818	if (ret)
3819		return ret;
3820
3821	return 0;
3822}
3823
3824static void ci_trim_single_dpm_states(struct amdgpu_device *adev,
3825				      struct ci_single_dpm_table *dpm_table,
3826				      u32 low_limit, u32 high_limit)
3827{
3828	u32 i;
3829
3830	for (i = 0; i < dpm_table->count; i++) {
3831		if ((dpm_table->dpm_levels[i].value < low_limit) ||
3832		    (dpm_table->dpm_levels[i].value > high_limit))
3833			dpm_table->dpm_levels[i].enabled = false;
3834		else
3835			dpm_table->dpm_levels[i].enabled = true;
3836	}
3837}
3838
3839static void ci_trim_pcie_dpm_states(struct amdgpu_device *adev,
3840				    u32 speed_low, u32 lanes_low,
3841				    u32 speed_high, u32 lanes_high)
3842{
3843	struct ci_power_info *pi = ci_get_pi(adev);
3844	struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
3845	u32 i, j;
3846
3847	for (i = 0; i < pcie_table->count; i++) {
3848		if ((pcie_table->dpm_levels[i].value < speed_low) ||
3849		    (pcie_table->dpm_levels[i].param1 < lanes_low) ||
3850		    (pcie_table->dpm_levels[i].value > speed_high) ||
3851		    (pcie_table->dpm_levels[i].param1 > lanes_high))
3852			pcie_table->dpm_levels[i].enabled = false;
3853		else
3854			pcie_table->dpm_levels[i].enabled = true;
3855	}
3856
3857	for (i = 0; i < pcie_table->count; i++) {
3858		if (pcie_table->dpm_levels[i].enabled) {
3859			for (j = i + 1; j < pcie_table->count; j++) {
3860				if (pcie_table->dpm_levels[j].enabled) {
3861					if ((pcie_table->dpm_levels[i].value == pcie_table->dpm_levels[j].value) &&
3862					    (pcie_table->dpm_levels[i].param1 == pcie_table->dpm_levels[j].param1))
3863						pcie_table->dpm_levels[j].enabled = false;
3864				}
3865			}
3866		}
3867	}
3868}
3869
3870static int ci_trim_dpm_states(struct amdgpu_device *adev,
3871			      struct amdgpu_ps *amdgpu_state)
3872{
3873	struct ci_ps *state = ci_get_ps(amdgpu_state);
3874	struct ci_power_info *pi = ci_get_pi(adev);
3875	u32 high_limit_count;
3876
3877	if (state->performance_level_count < 1)
3878		return -EINVAL;
3879
3880	if (state->performance_level_count == 1)
3881		high_limit_count = 0;
3882	else
3883		high_limit_count = 1;
3884
3885	ci_trim_single_dpm_states(adev,
3886				  &pi->dpm_table.sclk_table,
3887				  state->performance_levels[0].sclk,
3888				  state->performance_levels[high_limit_count].sclk);
3889
3890	ci_trim_single_dpm_states(adev,
3891				  &pi->dpm_table.mclk_table,
3892				  state->performance_levels[0].mclk,
3893				  state->performance_levels[high_limit_count].mclk);
3894
3895	ci_trim_pcie_dpm_states(adev,
3896				state->performance_levels[0].pcie_gen,
3897				state->performance_levels[0].pcie_lane,
3898				state->performance_levels[high_limit_count].pcie_gen,
3899				state->performance_levels[high_limit_count].pcie_lane);
3900
3901	return 0;
3902}
3903
3904static int ci_apply_disp_minimum_voltage_request(struct amdgpu_device *adev)
3905{
3906	struct amdgpu_clock_voltage_dependency_table *disp_voltage_table =
3907		&adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk;
3908	struct amdgpu_clock_voltage_dependency_table *vddc_table =
3909		&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
3910	u32 requested_voltage = 0;
3911	u32 i;
3912
3913	if (disp_voltage_table == NULL)
3914		return -EINVAL;
3915	if (!disp_voltage_table->count)
3916		return -EINVAL;
3917
3918	for (i = 0; i < disp_voltage_table->count; i++) {
3919		if (adev->clock.current_dispclk == disp_voltage_table->entries[i].clk)
3920			requested_voltage = disp_voltage_table->entries[i].v;
3921	}
3922
3923	for (i = 0; i < vddc_table->count; i++) {
3924		if (requested_voltage <= vddc_table->entries[i].v) {
3925			requested_voltage = vddc_table->entries[i].v;
3926			return (amdgpu_ci_send_msg_to_smc_with_parameter(adev,
3927								  PPSMC_MSG_VddC_Request,
3928								  requested_voltage * VOLTAGE_SCALE) == PPSMC_Result_OK) ?
3929				0 : -EINVAL;
3930		}
3931	}
3932
3933	return -EINVAL;
3934}
3935
3936static int ci_upload_dpm_level_enable_mask(struct amdgpu_device *adev)
3937{
3938	struct ci_power_info *pi = ci_get_pi(adev);
3939	PPSMC_Result result;
3940
3941	ci_apply_disp_minimum_voltage_request(adev);
3942
3943	if (!pi->sclk_dpm_key_disabled) {
3944		if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3945			result = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
3946								   PPSMC_MSG_SCLKDPM_SetEnabledMask,
3947								   pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
3948			if (result != PPSMC_Result_OK)
3949				return -EINVAL;
3950		}
3951	}
3952
3953	if (!pi->mclk_dpm_key_disabled) {
3954		if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3955			result = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
3956								   PPSMC_MSG_MCLKDPM_SetEnabledMask,
3957								   pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3958			if (result != PPSMC_Result_OK)
3959				return -EINVAL;
3960		}
3961	}
3962
3963#if 0
3964	if (!pi->pcie_dpm_key_disabled) {
3965		if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3966			result = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
3967								   PPSMC_MSG_PCIeDPM_SetEnabledMask,
3968								   pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
3969			if (result != PPSMC_Result_OK)
3970				return -EINVAL;
3971		}
3972	}
3973#endif
3974
3975	return 0;
3976}
3977
3978static void ci_find_dpm_states_clocks_in_dpm_table(struct amdgpu_device *adev,
3979						   struct amdgpu_ps *amdgpu_state)
3980{
3981	struct ci_power_info *pi = ci_get_pi(adev);
3982	struct ci_ps *state = ci_get_ps(amdgpu_state);
3983	struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
3984	u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
3985	struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
3986	u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
3987	u32 i;
3988
3989	pi->need_update_smu7_dpm_table = 0;
3990
3991	for (i = 0; i < sclk_table->count; i++) {
3992		if (sclk == sclk_table->dpm_levels[i].value)
3993			break;
3994	}
3995
3996	if (i >= sclk_table->count) {
3997		pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
3998	} else {
3999		/* XXX check display min clock requirements */
4000		if (CISLAND_MINIMUM_ENGINE_CLOCK != CISLAND_MINIMUM_ENGINE_CLOCK)
4001			pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
4002	}
4003
4004	for (i = 0; i < mclk_table->count; i++) {
4005		if (mclk == mclk_table->dpm_levels[i].value)
4006			break;
4007	}
4008
4009	if (i >= mclk_table->count)
4010		pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
4011
4012	if (adev->pm.dpm.current_active_crtc_count !=
4013	    adev->pm.dpm.new_active_crtc_count)
4014		pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
4015}
4016
4017static int ci_populate_and_upload_sclk_mclk_dpm_levels(struct amdgpu_device *adev,
4018						       struct amdgpu_ps *amdgpu_state)
4019{
4020	struct ci_power_info *pi = ci_get_pi(adev);
4021	struct ci_ps *state = ci_get_ps(amdgpu_state);
4022	u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
4023	u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
4024	struct ci_dpm_table *dpm_table = &pi->dpm_table;
4025	int ret;
4026
4027	if (!pi->need_update_smu7_dpm_table)
4028		return 0;
4029
4030	if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK)
4031		dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value = sclk;
4032
4033	if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)
4034		dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value = mclk;
4035
4036	if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK)) {
4037		ret = ci_populate_all_graphic_levels(adev);
4038		if (ret)
4039			return ret;
4040	}
4041
4042	if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) {
4043		ret = ci_populate_all_memory_levels(adev);
4044		if (ret)
4045			return ret;
4046	}
4047
4048	return 0;
4049}
4050
4051static int ci_enable_uvd_dpm(struct amdgpu_device *adev, bool enable)
4052{
4053	struct ci_power_info *pi = ci_get_pi(adev);
4054	const struct amdgpu_clock_and_voltage_limits *max_limits;
4055	int i;
4056
4057	if (adev->pm.dpm.ac_power)
4058		max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4059	else
4060		max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4061
4062	if (enable) {
4063		pi->dpm_level_enable_mask.uvd_dpm_enable_mask = 0;
4064
4065		for (i = adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4066			if (adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4067				pi->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i;
4068
4069				if (!pi->caps_uvd_dpm)
4070					break;
4071			}
4072		}
4073
4074		amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4075						  PPSMC_MSG_UVDDPM_SetEnabledMask,
4076						  pi->dpm_level_enable_mask.uvd_dpm_enable_mask);
4077
4078		if (pi->last_mclk_dpm_enable_mask & 0x1) {
4079			pi->uvd_enabled = true;
4080			pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
4081			amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4082							  PPSMC_MSG_MCLKDPM_SetEnabledMask,
4083							  pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
4084		}
4085	} else {
4086		if (pi->uvd_enabled) {
4087			pi->uvd_enabled = false;
4088			pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1;
4089			amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4090							  PPSMC_MSG_MCLKDPM_SetEnabledMask,
4091							  pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
4092		}
4093	}
4094
4095	return (amdgpu_ci_send_msg_to_smc(adev, enable ?
4096				   PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable) == PPSMC_Result_OK) ?
4097		0 : -EINVAL;
4098}
4099
4100static int ci_enable_vce_dpm(struct amdgpu_device *adev, bool enable)
4101{
4102	struct ci_power_info *pi = ci_get_pi(adev);
4103	const struct amdgpu_clock_and_voltage_limits *max_limits;
4104	int i;
4105
4106	if (adev->pm.dpm.ac_power)
4107		max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4108	else
4109		max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4110
4111	if (enable) {
4112		pi->dpm_level_enable_mask.vce_dpm_enable_mask = 0;
4113		for (i = adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4114			if (adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4115				pi->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i;
4116
4117				if (!pi->caps_vce_dpm)
4118					break;
4119			}
4120		}
4121
4122		amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4123						  PPSMC_MSG_VCEDPM_SetEnabledMask,
4124						  pi->dpm_level_enable_mask.vce_dpm_enable_mask);
4125	}
4126
4127	return (amdgpu_ci_send_msg_to_smc(adev, enable ?
4128				   PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable) == PPSMC_Result_OK) ?
4129		0 : -EINVAL;
4130}
4131
4132#if 0
4133static int ci_enable_samu_dpm(struct amdgpu_device *adev, bool enable)
4134{
4135	struct ci_power_info *pi = ci_get_pi(adev);
4136	const struct amdgpu_clock_and_voltage_limits *max_limits;
4137	int i;
4138
4139	if (adev->pm.dpm.ac_power)
4140		max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4141	else
4142		max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4143
4144	if (enable) {
4145		pi->dpm_level_enable_mask.samu_dpm_enable_mask = 0;
4146		for (i = adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4147			if (adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4148				pi->dpm_level_enable_mask.samu_dpm_enable_mask |= 1 << i;
4149
4150				if (!pi->caps_samu_dpm)
4151					break;
4152			}
4153		}
4154
4155		amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4156						  PPSMC_MSG_SAMUDPM_SetEnabledMask,
4157						  pi->dpm_level_enable_mask.samu_dpm_enable_mask);
4158	}
4159	return (amdgpu_ci_send_msg_to_smc(adev, enable ?
4160				   PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable) == PPSMC_Result_OK) ?
4161		0 : -EINVAL;
4162}
4163
4164static int ci_enable_acp_dpm(struct amdgpu_device *adev, bool enable)
4165{
4166	struct ci_power_info *pi = ci_get_pi(adev);
4167	const struct amdgpu_clock_and_voltage_limits *max_limits;
4168	int i;
4169
4170	if (adev->pm.dpm.ac_power)
4171		max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4172	else
4173		max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4174
4175	if (enable) {
4176		pi->dpm_level_enable_mask.acp_dpm_enable_mask = 0;
4177		for (i = adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4178			if (adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4179				pi->dpm_level_enable_mask.acp_dpm_enable_mask |= 1 << i;
4180
4181				if (!pi->caps_acp_dpm)
4182					break;
4183			}
4184		}
4185
4186		amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4187						  PPSMC_MSG_ACPDPM_SetEnabledMask,
4188						  pi->dpm_level_enable_mask.acp_dpm_enable_mask);
4189	}
4190
4191	return (amdgpu_ci_send_msg_to_smc(adev, enable ?
4192				   PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable) == PPSMC_Result_OK) ?
4193		0 : -EINVAL;
4194}
4195#endif
4196
4197static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate)
4198{
4199	struct ci_power_info *pi = ci_get_pi(adev);
4200	u32 tmp;
4201	int ret = 0;
4202
4203	if (!gate) {
4204		/* turn the clocks on when decoding */
4205		if (pi->caps_uvd_dpm ||
4206		    (adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0))
4207			pi->smc_state_table.UvdBootLevel = 0;
4208		else
4209			pi->smc_state_table.UvdBootLevel =
4210				adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1;
4211
4212		tmp = RREG32_SMC(ixDPM_TABLE_475);
4213		tmp &= ~DPM_TABLE_475__UvdBootLevel_MASK;
4214		tmp |= (pi->smc_state_table.UvdBootLevel << DPM_TABLE_475__UvdBootLevel__SHIFT);
4215		WREG32_SMC(ixDPM_TABLE_475, tmp);
4216		ret = ci_enable_uvd_dpm(adev, true);
4217	} else {
4218		ret = ci_enable_uvd_dpm(adev, false);
4219		if (ret)
4220			return ret;
4221	}
4222
4223	return ret;
4224}
4225
4226static u8 ci_get_vce_boot_level(struct amdgpu_device *adev)
4227{
4228	u8 i;
4229	u32 min_evclk = 30000; /* ??? */
4230	struct amdgpu_vce_clock_voltage_dependency_table *table =
4231		&adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
4232
4233	for (i = 0; i < table->count; i++) {
4234		if (table->entries[i].evclk >= min_evclk)
4235			return i;
4236	}
4237
4238	return table->count - 1;
4239}
4240
4241static int ci_update_vce_dpm(struct amdgpu_device *adev,
4242			     struct amdgpu_ps *amdgpu_new_state,
4243			     struct amdgpu_ps *amdgpu_current_state)
4244{
4245	struct ci_power_info *pi = ci_get_pi(adev);
4246	int ret = 0;
4247	u32 tmp;
4248
4249	if (amdgpu_current_state->evclk != amdgpu_new_state->evclk) {
4250		if (amdgpu_new_state->evclk) {
4251			/* turn the clocks on when encoding */
4252			ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
4253							    AMD_CG_STATE_UNGATE);
4254			if (ret)
4255				return ret;
4256
4257			pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(adev);
4258			tmp = RREG32_SMC(ixDPM_TABLE_475);
4259			tmp &= ~DPM_TABLE_475__VceBootLevel_MASK;
4260			tmp |= (pi->smc_state_table.VceBootLevel << DPM_TABLE_475__VceBootLevel__SHIFT);
4261			WREG32_SMC(ixDPM_TABLE_475, tmp);
4262
4263			ret = ci_enable_vce_dpm(adev, true);
4264		} else {
4265			ret = ci_enable_vce_dpm(adev, false);
4266			if (ret)
4267				return ret;
4268			/* turn the clocks off when not encoding */
4269			ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
4270							    AMD_CG_STATE_GATE);
4271		}
4272	}
4273	return ret;
4274}
4275
4276#if 0
4277static int ci_update_samu_dpm(struct amdgpu_device *adev, bool gate)
4278{
4279	return ci_enable_samu_dpm(adev, gate);
4280}
4281
4282static int ci_update_acp_dpm(struct amdgpu_device *adev, bool gate)
4283{
4284	struct ci_power_info *pi = ci_get_pi(adev);
4285	u32 tmp;
4286
4287	if (!gate) {
4288		pi->smc_state_table.AcpBootLevel = 0;
4289
4290		tmp = RREG32_SMC(ixDPM_TABLE_475);
4291		tmp &= ~AcpBootLevel_MASK;
4292		tmp |= AcpBootLevel(pi->smc_state_table.AcpBootLevel);
4293		WREG32_SMC(ixDPM_TABLE_475, tmp);
4294	}
4295
4296	return ci_enable_acp_dpm(adev, !gate);
4297}
4298#endif
4299
4300static int ci_generate_dpm_level_enable_mask(struct amdgpu_device *adev,
4301					     struct amdgpu_ps *amdgpu_state)
4302{
4303	struct ci_power_info *pi = ci_get_pi(adev);
4304	int ret;
4305
4306	ret = ci_trim_dpm_states(adev, amdgpu_state);
4307	if (ret)
4308		return ret;
4309
4310	pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
4311		ci_get_dpm_level_enable_mask_value(&pi->dpm_table.sclk_table);
4312	pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
4313		ci_get_dpm_level_enable_mask_value(&pi->dpm_table.mclk_table);
4314	pi->last_mclk_dpm_enable_mask =
4315		pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
4316	if (pi->uvd_enabled) {
4317		if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask & 1)
4318			pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
4319	}
4320	pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
4321		ci_get_dpm_level_enable_mask_value(&pi->dpm_table.pcie_speed_table);
4322
4323	return 0;
4324}
4325
4326static u32 ci_get_lowest_enabled_level(struct amdgpu_device *adev,
4327				       u32 level_mask)
4328{
4329	u32 level = 0;
4330
4331	while ((level_mask & (1 << level)) == 0)
4332		level++;
4333
4334	return level;
4335}
4336
4337
4338static int ci_dpm_force_performance_level(struct amdgpu_device *adev,
4339					  enum amdgpu_dpm_forced_level level)
4340{
4341	struct ci_power_info *pi = ci_get_pi(adev);
4342	u32 tmp, levels, i;
4343	int ret;
4344
4345	if (level == AMDGPU_DPM_FORCED_LEVEL_HIGH) {
4346		if ((!pi->pcie_dpm_key_disabled) &&
4347		    pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
4348			levels = 0;
4349			tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
4350			while (tmp >>= 1)
4351				levels++;
4352			if (levels) {
4353				ret = ci_dpm_force_state_pcie(adev, level);
4354				if (ret)
4355					return ret;
4356				for (i = 0; i < adev->usec_timeout; i++) {
4357					tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_1) &
4358					TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK) >>
4359					TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT;
4360					if (tmp == levels)
4361						break;
4362					udelay(1);
4363				}
4364			}
4365		}
4366		if ((!pi->sclk_dpm_key_disabled) &&
4367		    pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
4368			levels = 0;
4369			tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask;
4370			while (tmp >>= 1)
4371				levels++;
4372			if (levels) {
4373				ret = ci_dpm_force_state_sclk(adev, levels);
4374				if (ret)
4375					return ret;
4376				for (i = 0; i < adev->usec_timeout; i++) {
4377					tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
4378					TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
4379					TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
4380					if (tmp == levels)
4381						break;
4382					udelay(1);
4383				}
4384			}
4385		}
4386		if ((!pi->mclk_dpm_key_disabled) &&
4387		    pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
4388			levels = 0;
4389			tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
4390			while (tmp >>= 1)
4391				levels++;
4392			if (levels) {
4393				ret = ci_dpm_force_state_mclk(adev, levels);
4394				if (ret)
4395					return ret;
4396				for (i = 0; i < adev->usec_timeout; i++) {
4397					tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
4398					TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX_MASK) >>
4399					TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX__SHIFT;
4400					if (tmp == levels)
4401						break;
4402					udelay(1);
4403				}
4404			}
4405		}
4406	} else if (level == AMDGPU_DPM_FORCED_LEVEL_LOW) {
4407		if ((!pi->sclk_dpm_key_disabled) &&
4408		    pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
4409			levels = ci_get_lowest_enabled_level(adev,
4410							     pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
4411			ret = ci_dpm_force_state_sclk(adev, levels);
4412			if (ret)
4413				return ret;
4414			for (i = 0; i < adev->usec_timeout; i++) {
4415				tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
4416				TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
4417				TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
4418				if (tmp == levels)
4419					break;
4420				udelay(1);
4421			}
4422		}
4423		if ((!pi->mclk_dpm_key_disabled) &&
4424		    pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
4425			levels = ci_get_lowest_enabled_level(adev,
4426							     pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
4427			ret = ci_dpm_force_state_mclk(adev, levels);
4428			if (ret)
4429				return ret;
4430			for (i = 0; i < adev->usec_timeout; i++) {
4431				tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
4432				TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX_MASK) >>
4433				TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX__SHIFT;
4434				if (tmp == levels)
4435					break;
4436				udelay(1);
4437			}
4438		}
4439		if ((!pi->pcie_dpm_key_disabled) &&
4440		    pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
4441			levels = ci_get_lowest_enabled_level(adev,
4442							     pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
4443			ret = ci_dpm_force_state_pcie(adev, levels);
4444			if (ret)
4445				return ret;
4446			for (i = 0; i < adev->usec_timeout; i++) {
4447				tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_1) &
4448				TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK) >>
4449				TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT;
4450				if (tmp == levels)
4451					break;
4452				udelay(1);
4453			}
4454		}
4455	} else if (level == AMDGPU_DPM_FORCED_LEVEL_AUTO) {
4456		if (!pi->pcie_dpm_key_disabled) {
4457			PPSMC_Result smc_result;
4458
4459			smc_result = amdgpu_ci_send_msg_to_smc(adev,
4460							       PPSMC_MSG_PCIeDPM_UnForceLevel);
4461			if (smc_result != PPSMC_Result_OK)
4462				return -EINVAL;
4463		}
4464		ret = ci_upload_dpm_level_enable_mask(adev);
4465		if (ret)
4466			return ret;
4467	}
4468
4469	adev->pm.dpm.forced_level = level;
4470
4471	return 0;
4472}
4473
4474static int ci_set_mc_special_registers(struct amdgpu_device *adev,
4475				       struct ci_mc_reg_table *table)
4476{
4477	u8 i, j, k;
4478	u32 temp_reg;
4479
4480	for (i = 0, j = table->last; i < table->last; i++) {
4481		if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4482			return -EINVAL;
4483		switch(table->mc_reg_address[i].s1) {
4484		case mmMC_SEQ_MISC1:
4485			temp_reg = RREG32(mmMC_PMG_CMD_EMRS);
4486			table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
4487			table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
4488			for (k = 0; k < table->num_entries; k++) {
4489				table->mc_reg_table_entry[k].mc_data[j] =
4490					((temp_reg & 0xffff0000)) | ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
4491			}
4492			j++;
4493			if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4494				return -EINVAL;
4495
4496			temp_reg = RREG32(mmMC_PMG_CMD_MRS);
4497			table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
4498			table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
4499			for (k = 0; k < table->num_entries; k++) {
4500				table->mc_reg_table_entry[k].mc_data[j] =
4501					(temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
4502				if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5)
4503					table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
4504			}
4505			j++;
4506			if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4507				return -EINVAL;
4508
4509			if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) {
4510				table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
4511				table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
4512				for (k = 0; k < table->num_entries; k++) {
4513					table->mc_reg_table_entry[k].mc_data[j] =
4514						(table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
4515				}
4516				j++;
4517				if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4518					return -EINVAL;
4519			}
4520			break;
4521		case mmMC_SEQ_RESERVE_M:
4522			temp_reg = RREG32(mmMC_PMG_CMD_MRS1);
4523			table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
4524			table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
4525			for (k = 0; k < table->num_entries; k++) {
4526				table->mc_reg_table_entry[k].mc_data[j] =
4527					(temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
4528			}
4529			j++;
4530			if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4531				return -EINVAL;
4532			break;
4533		default:
4534			break;
4535		}
4536
4537	}
4538
4539	table->last = j;
4540
4541	return 0;
4542}
4543
4544static bool ci_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
4545{
4546	bool result = true;
4547
4548	switch(in_reg) {
4549	case mmMC_SEQ_RAS_TIMING:
4550		*out_reg = mmMC_SEQ_RAS_TIMING_LP;
4551		break;
4552	case mmMC_SEQ_DLL_STBY:
4553		*out_reg = mmMC_SEQ_DLL_STBY_LP;
4554		break;
4555	case mmMC_SEQ_G5PDX_CMD0:
4556		*out_reg = mmMC_SEQ_G5PDX_CMD0_LP;
4557		break;
4558	case mmMC_SEQ_G5PDX_CMD1:
4559		*out_reg = mmMC_SEQ_G5PDX_CMD1_LP;
4560		break;
4561	case mmMC_SEQ_G5PDX_CTRL:
4562		*out_reg = mmMC_SEQ_G5PDX_CTRL_LP;
4563		break;
4564	case mmMC_SEQ_CAS_TIMING:
4565		*out_reg = mmMC_SEQ_CAS_TIMING_LP;
4566	    break;
4567	case mmMC_SEQ_MISC_TIMING:
4568		*out_reg = mmMC_SEQ_MISC_TIMING_LP;
4569		break;
4570	case mmMC_SEQ_MISC_TIMING2:
4571		*out_reg = mmMC_SEQ_MISC_TIMING2_LP;
4572		break;
4573	case mmMC_SEQ_PMG_DVS_CMD:
4574		*out_reg = mmMC_SEQ_PMG_DVS_CMD_LP;
4575		break;
4576	case mmMC_SEQ_PMG_DVS_CTL:
4577		*out_reg = mmMC_SEQ_PMG_DVS_CTL_LP;
4578		break;
4579	case mmMC_SEQ_RD_CTL_D0:
4580		*out_reg = mmMC_SEQ_RD_CTL_D0_LP;
4581		break;
4582	case mmMC_SEQ_RD_CTL_D1:
4583		*out_reg = mmMC_SEQ_RD_CTL_D1_LP;
4584		break;
4585	case mmMC_SEQ_WR_CTL_D0:
4586		*out_reg = mmMC_SEQ_WR_CTL_D0_LP;
4587		break;
4588	case mmMC_SEQ_WR_CTL_D1:
4589		*out_reg = mmMC_SEQ_WR_CTL_D1_LP;
4590		break;
4591	case mmMC_PMG_CMD_EMRS:
4592		*out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP;
4593		break;
4594	case mmMC_PMG_CMD_MRS:
4595		*out_reg = mmMC_SEQ_PMG_CMD_MRS_LP;
4596		break;
4597	case mmMC_PMG_CMD_MRS1:
4598		*out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP;
4599		break;
4600	case mmMC_SEQ_PMG_TIMING:
4601		*out_reg = mmMC_SEQ_PMG_TIMING_LP;
4602		break;
4603	case mmMC_PMG_CMD_MRS2:
4604		*out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP;
4605		break;
4606	case mmMC_SEQ_WR_CTL_2:
4607		*out_reg = mmMC_SEQ_WR_CTL_2_LP;
4608		break;
4609	default:
4610		result = false;
4611		break;
4612	}
4613
4614	return result;
4615}
4616
4617static void ci_set_valid_flag(struct ci_mc_reg_table *table)
4618{
4619	u8 i, j;
4620
4621	for (i = 0; i < table->last; i++) {
4622		for (j = 1; j < table->num_entries; j++) {
4623			if (table->mc_reg_table_entry[j-1].mc_data[i] !=
4624			    table->mc_reg_table_entry[j].mc_data[i]) {
4625				table->valid_flag |= 1 << i;
4626				break;
4627			}
4628		}
4629	}
4630}
4631
4632static void ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table)
4633{
4634	u32 i;
4635	u16 address;
4636
4637	for (i = 0; i < table->last; i++) {
4638		table->mc_reg_address[i].s0 =
4639			ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
4640			address : table->mc_reg_address[i].s1;
4641	}
4642}
4643
4644static int ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table *table,
4645				      struct ci_mc_reg_table *ci_table)
4646{
4647	u8 i, j;
4648
4649	if (table->last > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4650		return -EINVAL;
4651	if (table->num_entries > MAX_AC_TIMING_ENTRIES)
4652		return -EINVAL;
4653
4654	for (i = 0; i < table->last; i++)
4655		ci_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
4656
4657	ci_table->last = table->last;
4658
4659	for (i = 0; i < table->num_entries; i++) {
4660		ci_table->mc_reg_table_entry[i].mclk_max =
4661			table->mc_reg_table_entry[i].mclk_max;
4662		for (j = 0; j < table->last; j++)
4663			ci_table->mc_reg_table_entry[i].mc_data[j] =
4664				table->mc_reg_table_entry[i].mc_data[j];
4665	}
4666	ci_table->num_entries = table->num_entries;
4667
4668	return 0;
4669}
4670
4671static int ci_register_patching_mc_seq(struct amdgpu_device *adev,
4672				       struct ci_mc_reg_table *table)
4673{
4674	u8 i, k;
4675	u32 tmp;
4676	bool patch;
4677
4678	tmp = RREG32(mmMC_SEQ_MISC0);
4679	patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
4680
4681	if (patch &&
4682	    ((adev->pdev->device == 0x67B0) ||
4683	     (adev->pdev->device == 0x67B1))) {
4684		for (i = 0; i < table->last; i++) {
4685			if (table->last >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4686				return -EINVAL;
4687			switch (table->mc_reg_address[i].s1) {
4688			case mmMC_SEQ_MISC1:
4689				for (k = 0; k < table->num_entries; k++) {
4690					if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4691					    (table->mc_reg_table_entry[k].mclk_max == 137500))
4692						table->mc_reg_table_entry[k].mc_data[i] =
4693							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFF8) |
4694							0x00000007;
4695				}
4696				break;
4697			case mmMC_SEQ_WR_CTL_D0:
4698				for (k = 0; k < table->num_entries; k++) {
4699					if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4700					    (table->mc_reg_table_entry[k].mclk_max == 137500))
4701						table->mc_reg_table_entry[k].mc_data[i] =
4702							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
4703							0x0000D0DD;
4704				}
4705				break;
4706			case mmMC_SEQ_WR_CTL_D1:
4707				for (k = 0; k < table->num_entries; k++) {
4708					if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4709					    (table->mc_reg_table_entry[k].mclk_max == 137500))
4710						table->mc_reg_table_entry[k].mc_data[i] =
4711							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
4712							0x0000D0DD;
4713				}
4714				break;
4715			case mmMC_SEQ_WR_CTL_2:
4716				for (k = 0; k < table->num_entries; k++) {
4717					if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4718					    (table->mc_reg_table_entry[k].mclk_max == 137500))
4719						table->mc_reg_table_entry[k].mc_data[i] = 0;
4720				}
4721				break;
4722			case mmMC_SEQ_CAS_TIMING:
4723				for (k = 0; k < table->num_entries; k++) {
4724					if (table->mc_reg_table_entry[k].mclk_max == 125000)
4725						table->mc_reg_table_entry[k].mc_data[i] =
4726							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
4727							0x000C0140;
4728					else if (table->mc_reg_table_entry[k].mclk_max == 137500)
4729						table->mc_reg_table_entry[k].mc_data[i] =
4730							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
4731							0x000C0150;
4732				}
4733				break;
4734			case mmMC_SEQ_MISC_TIMING:
4735				for (k = 0; k < table->num_entries; k++) {
4736					if (table->mc_reg_table_entry[k].mclk_max == 125000)
4737						table->mc_reg_table_entry[k].mc_data[i] =
4738							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
4739							0x00000030;
4740					else if (table->mc_reg_table_entry[k].mclk_max == 137500)
4741						table->mc_reg_table_entry[k].mc_data[i] =
4742							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
4743							0x00000035;
4744				}
4745				break;
4746			default:
4747				break;
4748			}
4749		}
4750
4751		WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 3);
4752		tmp = RREG32(mmMC_SEQ_IO_DEBUG_DATA);
4753		tmp = (tmp & 0xFFF8FFFF) | (1 << 16);
4754		WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 3);
4755		WREG32(mmMC_SEQ_IO_DEBUG_DATA, tmp);
4756	}
4757
4758	return 0;
4759}
4760
4761static int ci_initialize_mc_reg_table(struct amdgpu_device *adev)
4762{
4763	struct ci_power_info *pi = ci_get_pi(adev);
4764	struct atom_mc_reg_table *table;
4765	struct ci_mc_reg_table *ci_table = &pi->mc_reg_table;
4766	u8 module_index = ci_get_memory_module_index(adev);
4767	int ret;
4768
4769	table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
4770	if (!table)
4771		return -ENOMEM;
4772
4773	WREG32(mmMC_SEQ_RAS_TIMING_LP, RREG32(mmMC_SEQ_RAS_TIMING));
4774	WREG32(mmMC_SEQ_CAS_TIMING_LP, RREG32(mmMC_SEQ_CAS_TIMING));
4775	WREG32(mmMC_SEQ_DLL_STBY_LP, RREG32(mmMC_SEQ_DLL_STBY));
4776	WREG32(mmMC_SEQ_G5PDX_CMD0_LP, RREG32(mmMC_SEQ_G5PDX_CMD0));
4777	WREG32(mmMC_SEQ_G5PDX_CMD1_LP, RREG32(mmMC_SEQ_G5PDX_CMD1));
4778	WREG32(mmMC_SEQ_G5PDX_CTRL_LP, RREG32(mmMC_SEQ_G5PDX_CTRL));
4779	WREG32(mmMC_SEQ_PMG_DVS_CMD_LP, RREG32(mmMC_SEQ_PMG_DVS_CMD));
4780	WREG32(mmMC_SEQ_PMG_DVS_CTL_LP, RREG32(mmMC_SEQ_PMG_DVS_CTL));
4781	WREG32(mmMC_SEQ_MISC_TIMING_LP, RREG32(mmMC_SEQ_MISC_TIMING));
4782	WREG32(mmMC_SEQ_MISC_TIMING2_LP, RREG32(mmMC_SEQ_MISC_TIMING2));
4783	WREG32(mmMC_SEQ_PMG_CMD_EMRS_LP, RREG32(mmMC_PMG_CMD_EMRS));
4784	WREG32(mmMC_SEQ_PMG_CMD_MRS_LP, RREG32(mmMC_PMG_CMD_MRS));
4785	WREG32(mmMC_SEQ_PMG_CMD_MRS1_LP, RREG32(mmMC_PMG_CMD_MRS1));
4786	WREG32(mmMC_SEQ_WR_CTL_D0_LP, RREG32(mmMC_SEQ_WR_CTL_D0));
4787	WREG32(mmMC_SEQ_WR_CTL_D1_LP, RREG32(mmMC_SEQ_WR_CTL_D1));
4788	WREG32(mmMC_SEQ_RD_CTL_D0_LP, RREG32(mmMC_SEQ_RD_CTL_D0));
4789	WREG32(mmMC_SEQ_RD_CTL_D1_LP, RREG32(mmMC_SEQ_RD_CTL_D1));
4790	WREG32(mmMC_SEQ_PMG_TIMING_LP, RREG32(mmMC_SEQ_PMG_TIMING));
4791	WREG32(mmMC_SEQ_PMG_CMD_MRS2_LP, RREG32(mmMC_PMG_CMD_MRS2));
4792	WREG32(mmMC_SEQ_WR_CTL_2_LP, RREG32(mmMC_SEQ_WR_CTL_2));
4793
4794	ret = amdgpu_atombios_init_mc_reg_table(adev, module_index, table);
4795	if (ret)
4796		goto init_mc_done;
4797
4798	ret = ci_copy_vbios_mc_reg_table(table, ci_table);
4799	if (ret)
4800		goto init_mc_done;
4801
4802	ci_set_s0_mc_reg_index(ci_table);
4803
4804	ret = ci_register_patching_mc_seq(adev, ci_table);
4805	if (ret)
4806		goto init_mc_done;
4807
4808	ret = ci_set_mc_special_registers(adev, ci_table);
4809	if (ret)
4810		goto init_mc_done;
4811
4812	ci_set_valid_flag(ci_table);
4813
4814init_mc_done:
4815	kfree(table);
4816
4817	return ret;
4818}
4819
4820static int ci_populate_mc_reg_addresses(struct amdgpu_device *adev,
4821					SMU7_Discrete_MCRegisters *mc_reg_table)
4822{
4823	struct ci_power_info *pi = ci_get_pi(adev);
4824	u32 i, j;
4825
4826	for (i = 0, j = 0; j < pi->mc_reg_table.last; j++) {
4827		if (pi->mc_reg_table.valid_flag & (1 << j)) {
4828			if (i >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4829				return -EINVAL;
4830			mc_reg_table->address[i].s0 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s0);
4831			mc_reg_table->address[i].s1 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s1);
4832			i++;
4833		}
4834	}
4835
4836	mc_reg_table->last = (u8)i;
4837
4838	return 0;
4839}
4840
4841static void ci_convert_mc_registers(const struct ci_mc_reg_entry *entry,
4842				    SMU7_Discrete_MCRegisterSet *data,
4843				    u32 num_entries, u32 valid_flag)
4844{
4845	u32 i, j;
4846
4847	for (i = 0, j = 0; j < num_entries; j++) {
4848		if (valid_flag & (1 << j)) {
4849			data->value[i] = cpu_to_be32(entry->mc_data[j]);
4850			i++;
4851		}
4852	}
4853}
4854
4855static void ci_convert_mc_reg_table_entry_to_smc(struct amdgpu_device *adev,
4856						 const u32 memory_clock,
4857						 SMU7_Discrete_MCRegisterSet *mc_reg_table_data)
4858{
4859	struct ci_power_info *pi = ci_get_pi(adev);
4860	u32 i = 0;
4861
4862	for(i = 0; i < pi->mc_reg_table.num_entries; i++) {
4863		if (memory_clock <= pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
4864			break;
4865	}
4866
4867	if ((i == pi->mc_reg_table.num_entries) && (i > 0))
4868		--i;
4869
4870	ci_convert_mc_registers(&pi->mc_reg_table.mc_reg_table_entry[i],
4871				mc_reg_table_data, pi->mc_reg_table.last,
4872				pi->mc_reg_table.valid_flag);
4873}
4874
4875static void ci_convert_mc_reg_table_to_smc(struct amdgpu_device *adev,
4876					   SMU7_Discrete_MCRegisters *mc_reg_table)
4877{
4878	struct ci_power_info *pi = ci_get_pi(adev);
4879	u32 i;
4880
4881	for (i = 0; i < pi->dpm_table.mclk_table.count; i++)
4882		ci_convert_mc_reg_table_entry_to_smc(adev,
4883						     pi->dpm_table.mclk_table.dpm_levels[i].value,
4884						     &mc_reg_table->data[i]);
4885}
4886
4887static int ci_populate_initial_mc_reg_table(struct amdgpu_device *adev)
4888{
4889	struct ci_power_info *pi = ci_get_pi(adev);
4890	int ret;
4891
4892	memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4893
4894	ret = ci_populate_mc_reg_addresses(adev, &pi->smc_mc_reg_table);
4895	if (ret)
4896		return ret;
4897	ci_convert_mc_reg_table_to_smc(adev, &pi->smc_mc_reg_table);
4898
4899	return amdgpu_ci_copy_bytes_to_smc(adev,
4900				    pi->mc_reg_table_start,
4901				    (u8 *)&pi->smc_mc_reg_table,
4902				    sizeof(SMU7_Discrete_MCRegisters),
4903				    pi->sram_end);
4904}
4905
4906static int ci_update_and_upload_mc_reg_table(struct amdgpu_device *adev)
4907{
4908	struct ci_power_info *pi = ci_get_pi(adev);
4909
4910	if (!(pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
4911		return 0;
4912
4913	memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4914
4915	ci_convert_mc_reg_table_to_smc(adev, &pi->smc_mc_reg_table);
4916
4917	return amdgpu_ci_copy_bytes_to_smc(adev,
4918				    pi->mc_reg_table_start +
4919				    offsetof(SMU7_Discrete_MCRegisters, data[0]),
4920				    (u8 *)&pi->smc_mc_reg_table.data[0],
4921				    sizeof(SMU7_Discrete_MCRegisterSet) *
4922				    pi->dpm_table.mclk_table.count,
4923				    pi->sram_end);
4924}
4925
4926static void ci_enable_voltage_control(struct amdgpu_device *adev)
4927{
4928	u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
4929
4930	tmp |= GENERAL_PWRMGT__VOLT_PWRMGT_EN_MASK;
4931	WREG32_SMC(ixGENERAL_PWRMGT, tmp);
4932}
4933
4934static enum amdgpu_pcie_gen ci_get_maximum_link_speed(struct amdgpu_device *adev,
4935						      struct amdgpu_ps *amdgpu_state)
4936{
4937	struct ci_ps *state = ci_get_ps(amdgpu_state);
4938	int i;
4939	u16 pcie_speed, max_speed = 0;
4940
4941	for (i = 0; i < state->performance_level_count; i++) {
4942		pcie_speed = state->performance_levels[i].pcie_gen;
4943		if (max_speed < pcie_speed)
4944			max_speed = pcie_speed;
4945	}
4946
4947	return max_speed;
4948}
4949
4950static u16 ci_get_current_pcie_speed(struct amdgpu_device *adev)
4951{
4952	u32 speed_cntl = 0;
4953
4954	speed_cntl = RREG32_PCIE(ixPCIE_LC_SPEED_CNTL) &
4955		PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK;
4956	speed_cntl >>= PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
4957
4958	return (u16)speed_cntl;
4959}
4960
4961static int ci_get_current_pcie_lane_number(struct amdgpu_device *adev)
4962{
4963	u32 link_width = 0;
4964
4965	link_width = RREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL) &
4966		PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK;
4967	link_width >>= PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
4968
4969	switch (link_width) {
4970	case 1:
4971		return 1;
4972	case 2:
4973		return 2;
4974	case 3:
4975		return 4;
4976	case 4:
4977		return 8;
4978	case 0:
4979	case 6:
4980	default:
4981		return 16;
4982	}
4983}
4984
4985static void ci_request_link_speed_change_before_state_change(struct amdgpu_device *adev,
4986							     struct amdgpu_ps *amdgpu_new_state,
4987							     struct amdgpu_ps *amdgpu_current_state)
4988{
4989	struct ci_power_info *pi = ci_get_pi(adev);
4990	enum amdgpu_pcie_gen target_link_speed =
4991		ci_get_maximum_link_speed(adev, amdgpu_new_state);
4992	enum amdgpu_pcie_gen current_link_speed;
4993
4994	if (pi->force_pcie_gen == AMDGPU_PCIE_GEN_INVALID)
4995		current_link_speed = ci_get_maximum_link_speed(adev, amdgpu_current_state);
4996	else
4997		current_link_speed = pi->force_pcie_gen;
4998
4999	pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
5000	pi->pspp_notify_required = false;
5001	if (target_link_speed > current_link_speed) {
5002		switch (target_link_speed) {
5003#ifdef CONFIG_ACPI
5004		case AMDGPU_PCIE_GEN3:
5005			if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
5006				break;
5007			pi->force_pcie_gen = AMDGPU_PCIE_GEN2;
5008			if (current_link_speed == AMDGPU_PCIE_GEN2)
5009				break;
5010		case AMDGPU_PCIE_GEN2:
5011			if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
5012				break;
5013#endif
5014		default:
5015			pi->force_pcie_gen = ci_get_current_pcie_speed(adev);
5016			break;
5017		}
5018	} else {
5019		if (target_link_speed < current_link_speed)
5020			pi->pspp_notify_required = true;
5021	}
5022}
5023
5024static void ci_notify_link_speed_change_after_state_change(struct amdgpu_device *adev,
5025							   struct amdgpu_ps *amdgpu_new_state,
5026							   struct amdgpu_ps *amdgpu_current_state)
5027{
5028	struct ci_power_info *pi = ci_get_pi(adev);
5029	enum amdgpu_pcie_gen target_link_speed =
5030		ci_get_maximum_link_speed(adev, amdgpu_new_state);
5031	u8 request;
5032
5033	if (pi->pspp_notify_required) {
5034		if (target_link_speed == AMDGPU_PCIE_GEN3)
5035			request = PCIE_PERF_REQ_PECI_GEN3;
5036		else if (target_link_speed == AMDGPU_PCIE_GEN2)
5037			request = PCIE_PERF_REQ_PECI_GEN2;
5038		else
5039			request = PCIE_PERF_REQ_PECI_GEN1;
5040
5041		if ((request == PCIE_PERF_REQ_PECI_GEN1) &&
5042		    (ci_get_current_pcie_speed(adev) > 0))
5043			return;
5044
5045#ifdef CONFIG_ACPI
5046		amdgpu_acpi_pcie_performance_request(adev, request, false);
5047#endif
5048	}
5049}
5050
5051static int ci_set_private_data_variables_based_on_pptable(struct amdgpu_device *adev)
5052{
5053	struct ci_power_info *pi = ci_get_pi(adev);
5054	struct amdgpu_clock_voltage_dependency_table *allowed_sclk_vddc_table =
5055		&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
5056	struct amdgpu_clock_voltage_dependency_table *allowed_mclk_vddc_table =
5057		&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
5058	struct amdgpu_clock_voltage_dependency_table *allowed_mclk_vddci_table =
5059		&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
5060
5061	if (allowed_sclk_vddc_table == NULL)
5062		return -EINVAL;
5063	if (allowed_sclk_vddc_table->count < 1)
5064		return -EINVAL;
5065	if (allowed_mclk_vddc_table == NULL)
5066		return -EINVAL;
5067	if (allowed_mclk_vddc_table->count < 1)
5068		return -EINVAL;
5069	if (allowed_mclk_vddci_table == NULL)
5070		return -EINVAL;
5071	if (allowed_mclk_vddci_table->count < 1)
5072		return -EINVAL;
5073
5074	pi->min_vddc_in_pp_table = allowed_sclk_vddc_table->entries[0].v;
5075	pi->max_vddc_in_pp_table =
5076		allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
5077
5078	pi->min_vddci_in_pp_table = allowed_mclk_vddci_table->entries[0].v;
5079	pi->max_vddci_in_pp_table =
5080		allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
5081
5082	adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk =
5083		allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
5084	adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk =
5085		allowed_mclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
5086	adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc =
5087		allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
5088	adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci =
5089		allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
5090
5091	return 0;
5092}
5093
5094static void ci_patch_with_vddc_leakage(struct amdgpu_device *adev, u16 *vddc)
5095{
5096	struct ci_power_info *pi = ci_get_pi(adev);
5097	struct ci_leakage_voltage *leakage_table = &pi->vddc_leakage;
5098	u32 leakage_index;
5099
5100	for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
5101		if (leakage_table->leakage_id[leakage_index] == *vddc) {
5102			*vddc = leakage_table->actual_voltage[leakage_index];
5103			break;
5104		}
5105	}
5106}
5107
5108static void ci_patch_with_vddci_leakage(struct amdgpu_device *adev, u16 *vddci)
5109{
5110	struct ci_power_info *pi = ci_get_pi(adev);
5111	struct ci_leakage_voltage *leakage_table = &pi->vddci_leakage;
5112	u32 leakage_index;
5113
5114	for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
5115		if (leakage_table->leakage_id[leakage_index] == *vddci) {
5116			*vddci = leakage_table->actual_voltage[leakage_index];
5117			break;
5118		}
5119	}
5120}
5121
5122static void ci_patch_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev,
5123								      struct amdgpu_clock_voltage_dependency_table *table)
5124{
5125	u32 i;
5126
5127	if (table) {
5128		for (i = 0; i < table->count; i++)
5129			ci_patch_with_vddc_leakage(adev, &table->entries[i].v);
5130	}
5131}
5132
5133static void ci_patch_clock_voltage_dependency_table_with_vddci_leakage(struct amdgpu_device *adev,
5134								       struct amdgpu_clock_voltage_dependency_table *table)
5135{
5136	u32 i;
5137
5138	if (table) {
5139		for (i = 0; i < table->count; i++)
5140			ci_patch_with_vddci_leakage(adev, &table->entries[i].v);
5141	}
5142}
5143
5144static void ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev,
5145									  struct amdgpu_vce_clock_voltage_dependency_table *table)
5146{
5147	u32 i;
5148
5149	if (table) {
5150		for (i = 0; i < table->count; i++)
5151			ci_patch_with_vddc_leakage(adev, &table->entries[i].v);
5152	}
5153}
5154
5155static void ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev,
5156									  struct amdgpu_uvd_clock_voltage_dependency_table *table)
5157{
5158	u32 i;
5159
5160	if (table) {
5161		for (i = 0; i < table->count; i++)
5162			ci_patch_with_vddc_leakage(adev, &table->entries[i].v);
5163	}
5164}
5165
5166static void ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(struct amdgpu_device *adev,
5167								   struct amdgpu_phase_shedding_limits_table *table)
5168{
5169	u32 i;
5170
5171	if (table) {
5172		for (i = 0; i < table->count; i++)
5173			ci_patch_with_vddc_leakage(adev, &table->entries[i].voltage);
5174	}
5175}
5176
5177static void ci_patch_clock_voltage_limits_with_vddc_leakage(struct amdgpu_device *adev,
5178							    struct amdgpu_clock_and_voltage_limits *table)
5179{
5180	if (table) {
5181		ci_patch_with_vddc_leakage(adev, (u16 *)&table->vddc);
5182		ci_patch_with_vddci_leakage(adev, (u16 *)&table->vddci);
5183	}
5184}
5185
5186static void ci_patch_cac_leakage_table_with_vddc_leakage(struct amdgpu_device *adev,
5187							 struct amdgpu_cac_leakage_table *table)
5188{
5189	u32 i;
5190
5191	if (table) {
5192		for (i = 0; i < table->count; i++)
5193			ci_patch_with_vddc_leakage(adev, &table->entries[i].vddc);
5194	}
5195}
5196
5197static void ci_patch_dependency_tables_with_leakage(struct amdgpu_device *adev)
5198{
5199
5200	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5201								  &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
5202	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5203								  &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
5204	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5205								  &adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk);
5206	ci_patch_clock_voltage_dependency_table_with_vddci_leakage(adev,
5207								   &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk);
5208	ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(adev,
5209								      &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table);
5210	ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(adev,
5211								      &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table);
5212	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5213								  &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table);
5214	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5215								  &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table);
5216	ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(adev,
5217							       &adev->pm.dpm.dyn_state.phase_shedding_limits_table);
5218	ci_patch_clock_voltage_limits_with_vddc_leakage(adev,
5219							&adev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
5220	ci_patch_clock_voltage_limits_with_vddc_leakage(adev,
5221							&adev->pm.dpm.dyn_state.max_clock_voltage_on_dc);
5222	ci_patch_cac_leakage_table_with_vddc_leakage(adev,
5223						     &adev->pm.dpm.dyn_state.cac_leakage_table);
5224
5225}
5226
5227static void ci_update_current_ps(struct amdgpu_device *adev,
5228				 struct amdgpu_ps *rps)
5229{
5230	struct ci_ps *new_ps = ci_get_ps(rps);
5231	struct ci_power_info *pi = ci_get_pi(adev);
5232
5233	pi->current_rps = *rps;
5234	pi->current_ps = *new_ps;
5235	pi->current_rps.ps_priv = &pi->current_ps;
5236	adev->pm.dpm.current_ps = &pi->current_rps;
5237}
5238
5239static void ci_update_requested_ps(struct amdgpu_device *adev,
5240				   struct amdgpu_ps *rps)
5241{
5242	struct ci_ps *new_ps = ci_get_ps(rps);
5243	struct ci_power_info *pi = ci_get_pi(adev);
5244
5245	pi->requested_rps = *rps;
5246	pi->requested_ps = *new_ps;
5247	pi->requested_rps.ps_priv = &pi->requested_ps;
5248	adev->pm.dpm.requested_ps = &pi->requested_rps;
5249}
5250
5251static int ci_dpm_pre_set_power_state(struct amdgpu_device *adev)
5252{
5253	struct ci_power_info *pi = ci_get_pi(adev);
5254	struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;
5255	struct amdgpu_ps *new_ps = &requested_ps;
5256
5257	ci_update_requested_ps(adev, new_ps);
5258
5259	ci_apply_state_adjust_rules(adev, &pi->requested_rps);
5260
5261	return 0;
5262}
5263
5264static void ci_dpm_post_set_power_state(struct amdgpu_device *adev)
5265{
5266	struct ci_power_info *pi = ci_get_pi(adev);
5267	struct amdgpu_ps *new_ps = &pi->requested_rps;
5268
5269	ci_update_current_ps(adev, new_ps);
5270}
5271
5272
5273static void ci_dpm_setup_asic(struct amdgpu_device *adev)
5274{
5275	ci_read_clock_registers(adev);
5276	ci_enable_acpi_power_management(adev);
5277	ci_init_sclk_t(adev);
5278}
5279
5280static int ci_dpm_enable(struct amdgpu_device *adev)
5281{
5282	struct ci_power_info *pi = ci_get_pi(adev);
5283	struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
5284	int ret;
5285
5286	if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
5287		ci_enable_voltage_control(adev);
5288		ret = ci_construct_voltage_tables(adev);
5289		if (ret) {
5290			DRM_ERROR("ci_construct_voltage_tables failed\n");
5291			return ret;
5292		}
5293	}
5294	if (pi->caps_dynamic_ac_timing) {
5295		ret = ci_initialize_mc_reg_table(adev);
5296		if (ret)
5297			pi->caps_dynamic_ac_timing = false;
5298	}
5299	if (pi->dynamic_ss)
5300		ci_enable_spread_spectrum(adev, true);
5301	if (pi->thermal_protection)
5302		ci_enable_thermal_protection(adev, true);
5303	ci_program_sstp(adev);
5304	ci_enable_display_gap(adev);
5305	ci_program_vc(adev);
5306	ret = ci_upload_firmware(adev);
5307	if (ret) {
5308		DRM_ERROR("ci_upload_firmware failed\n");
5309		return ret;
5310	}
5311	ret = ci_process_firmware_header(adev);
5312	if (ret) {
5313		DRM_ERROR("ci_process_firmware_header failed\n");
5314		return ret;
5315	}
5316	ret = ci_initial_switch_from_arb_f0_to_f1(adev);
5317	if (ret) {
5318		DRM_ERROR("ci_initial_switch_from_arb_f0_to_f1 failed\n");
5319		return ret;
5320	}
5321	ret = ci_init_smc_table(adev);
5322	if (ret) {
5323		DRM_ERROR("ci_init_smc_table failed\n");
5324		return ret;
5325	}
5326	ret = ci_init_arb_table_index(adev);
5327	if (ret) {
5328		DRM_ERROR("ci_init_arb_table_index failed\n");
5329		return ret;
5330	}
5331	if (pi->caps_dynamic_ac_timing) {
5332		ret = ci_populate_initial_mc_reg_table(adev);
5333		if (ret) {
5334			DRM_ERROR("ci_populate_initial_mc_reg_table failed\n");
5335			return ret;
5336		}
5337	}
5338	ret = ci_populate_pm_base(adev);
5339	if (ret) {
5340		DRM_ERROR("ci_populate_pm_base failed\n");
5341		return ret;
5342	}
5343	ci_dpm_start_smc(adev);
5344	ci_enable_vr_hot_gpio_interrupt(adev);
5345	ret = ci_notify_smc_display_change(adev, false);
5346	if (ret) {
5347		DRM_ERROR("ci_notify_smc_display_change failed\n");
5348		return ret;
5349	}
5350	ci_enable_sclk_control(adev, true);
5351	ret = ci_enable_ulv(adev, true);
5352	if (ret) {
5353		DRM_ERROR("ci_enable_ulv failed\n");
5354		return ret;
5355	}
5356	ret = ci_enable_ds_master_switch(adev, true);
5357	if (ret) {
5358		DRM_ERROR("ci_enable_ds_master_switch failed\n");
5359		return ret;
5360	}
5361	ret = ci_start_dpm(adev);
5362	if (ret) {
5363		DRM_ERROR("ci_start_dpm failed\n");
5364		return ret;
5365	}
5366	ret = ci_enable_didt(adev, true);
5367	if (ret) {
5368		DRM_ERROR("ci_enable_didt failed\n");
5369		return ret;
5370	}
5371	ret = ci_enable_smc_cac(adev, true);
5372	if (ret) {
5373		DRM_ERROR("ci_enable_smc_cac failed\n");
5374		return ret;
5375	}
5376	ret = ci_enable_power_containment(adev, true);
5377	if (ret) {
5378		DRM_ERROR("ci_enable_power_containment failed\n");
5379		return ret;
5380	}
5381
5382	ret = ci_power_control_set_level(adev);
5383	if (ret) {
5384		DRM_ERROR("ci_power_control_set_level failed\n");
5385		return ret;
5386	}
5387
5388	ci_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
5389
5390	ret = ci_enable_thermal_based_sclk_dpm(adev, true);
5391	if (ret) {
5392		DRM_ERROR("ci_enable_thermal_based_sclk_dpm failed\n");
5393		return ret;
5394	}
5395
5396	ci_thermal_start_thermal_controller(adev);
5397
5398	ci_update_current_ps(adev, boot_ps);
5399
5400	return 0;
5401}
5402
5403static void ci_dpm_disable(struct amdgpu_device *adev)
5404{
5405	struct ci_power_info *pi = ci_get_pi(adev);
5406	struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
5407
5408	amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
5409		       AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
5410	amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
5411		       AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
5412
5413	ci_dpm_powergate_uvd(adev, true);
5414
5415	if (!amdgpu_ci_is_smc_running(adev))
5416		return;
5417
5418	ci_thermal_stop_thermal_controller(adev);
5419
5420	if (pi->thermal_protection)
5421		ci_enable_thermal_protection(adev, false);
5422	ci_enable_power_containment(adev, false);
5423	ci_enable_smc_cac(adev, false);
5424	ci_enable_didt(adev, false);
5425	ci_enable_spread_spectrum(adev, false);
5426	ci_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
5427	ci_stop_dpm(adev);
5428	ci_enable_ds_master_switch(adev, false);
5429	ci_enable_ulv(adev, false);
5430	ci_clear_vc(adev);
5431	ci_reset_to_default(adev);
5432	ci_dpm_stop_smc(adev);
5433	ci_force_switch_to_arb_f0(adev);
5434	ci_enable_thermal_based_sclk_dpm(adev, false);
5435
5436	ci_update_current_ps(adev, boot_ps);
5437}
5438
5439static int ci_dpm_set_power_state(struct amdgpu_device *adev)
5440{
5441	struct ci_power_info *pi = ci_get_pi(adev);
5442	struct amdgpu_ps *new_ps = &pi->requested_rps;
5443	struct amdgpu_ps *old_ps = &pi->current_rps;
5444	int ret;
5445
5446	ci_find_dpm_states_clocks_in_dpm_table(adev, new_ps);
5447	if (pi->pcie_performance_request)
5448		ci_request_link_speed_change_before_state_change(adev, new_ps, old_ps);
5449	ret = ci_freeze_sclk_mclk_dpm(adev);
5450	if (ret) {
5451		DRM_ERROR("ci_freeze_sclk_mclk_dpm failed\n");
5452		return ret;
5453	}
5454	ret = ci_populate_and_upload_sclk_mclk_dpm_levels(adev, new_ps);
5455	if (ret) {
5456		DRM_ERROR("ci_populate_and_upload_sclk_mclk_dpm_levels failed\n");
5457		return ret;
5458	}
5459	ret = ci_generate_dpm_level_enable_mask(adev, new_ps);
5460	if (ret) {
5461		DRM_ERROR("ci_generate_dpm_level_enable_mask failed\n");
5462		return ret;
5463	}
5464
5465	ret = ci_update_vce_dpm(adev, new_ps, old_ps);
5466	if (ret) {
5467		DRM_ERROR("ci_update_vce_dpm failed\n");
5468		return ret;
5469	}
5470
5471	ret = ci_update_sclk_t(adev);
5472	if (ret) {
5473		DRM_ERROR("ci_update_sclk_t failed\n");
5474		return ret;
5475	}
5476	if (pi->caps_dynamic_ac_timing) {
5477		ret = ci_update_and_upload_mc_reg_table(adev);
5478		if (ret) {
5479			DRM_ERROR("ci_update_and_upload_mc_reg_table failed\n");
5480			return ret;
5481		}
5482	}
5483	ret = ci_program_memory_timing_parameters(adev);
5484	if (ret) {
5485		DRM_ERROR("ci_program_memory_timing_parameters failed\n");
5486		return ret;
5487	}
5488	ret = ci_unfreeze_sclk_mclk_dpm(adev);
5489	if (ret) {
5490		DRM_ERROR("ci_unfreeze_sclk_mclk_dpm failed\n");
5491		return ret;
5492	}
5493	ret = ci_upload_dpm_level_enable_mask(adev);
5494	if (ret) {
5495		DRM_ERROR("ci_upload_dpm_level_enable_mask failed\n");
5496		return ret;
5497	}
5498	if (pi->pcie_performance_request)
5499		ci_notify_link_speed_change_after_state_change(adev, new_ps, old_ps);
5500
5501	return 0;
5502}
5503
5504#if 0
5505static void ci_dpm_reset_asic(struct amdgpu_device *adev)
5506{
5507	ci_set_boot_state(adev);
5508}
5509#endif
5510
5511static void ci_dpm_display_configuration_changed(struct amdgpu_device *adev)
5512{
5513	ci_program_display_gap(adev);
5514}
5515
5516union power_info {
5517	struct _ATOM_POWERPLAY_INFO info;
5518	struct _ATOM_POWERPLAY_INFO_V2 info_2;
5519	struct _ATOM_POWERPLAY_INFO_V3 info_3;
5520	struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
5521	struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
5522	struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
5523};
5524
5525union pplib_clock_info {
5526	struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
5527	struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
5528	struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
5529	struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
5530	struct _ATOM_PPLIB_SI_CLOCK_INFO si;
5531	struct _ATOM_PPLIB_CI_CLOCK_INFO ci;
5532};
5533
5534union pplib_power_state {
5535	struct _ATOM_PPLIB_STATE v1;
5536	struct _ATOM_PPLIB_STATE_V2 v2;
5537};
5538
5539static void ci_parse_pplib_non_clock_info(struct amdgpu_device *adev,
5540					  struct amdgpu_ps *rps,
5541					  struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
5542					  u8 table_rev)
5543{
5544	rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
5545	rps->class = le16_to_cpu(non_clock_info->usClassification);
5546	rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
5547
5548	if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
5549		rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
5550		rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
5551	} else {
5552		rps->vclk = 0;
5553		rps->dclk = 0;
5554	}
5555
5556	if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
5557		adev->pm.dpm.boot_ps = rps;
5558	if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
5559		adev->pm.dpm.uvd_ps = rps;
5560}
5561
5562static void ci_parse_pplib_clock_info(struct amdgpu_device *adev,
5563				      struct amdgpu_ps *rps, int index,
5564				      union pplib_clock_info *clock_info)
5565{
5566	struct ci_power_info *pi = ci_get_pi(adev);
5567	struct ci_ps *ps = ci_get_ps(rps);
5568	struct ci_pl *pl = &ps->performance_levels[index];
5569
5570	ps->performance_level_count = index + 1;
5571
5572	pl->sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
5573	pl->sclk |= clock_info->ci.ucEngineClockHigh << 16;
5574	pl->mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
5575	pl->mclk |= clock_info->ci.ucMemoryClockHigh << 16;
5576
5577	pl->pcie_gen = amdgpu_get_pcie_gen_support(adev,
5578						   pi->sys_pcie_mask,
5579						   pi->vbios_boot_state.pcie_gen_bootup_value,
5580						   clock_info->ci.ucPCIEGen);
5581	pl->pcie_lane = amdgpu_get_pcie_lane_support(adev,
5582						     pi->vbios_boot_state.pcie_lane_bootup_value,
5583						     le16_to_cpu(clock_info->ci.usPCIELane));
5584
5585	if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
5586		pi->acpi_pcie_gen = pl->pcie_gen;
5587	}
5588
5589	if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) {
5590		pi->ulv.supported = true;
5591		pi->ulv.pl = *pl;
5592		pi->ulv.cg_ulv_parameter = CISLANDS_CGULVPARAMETER_DFLT;
5593	}
5594
5595	/* patch up boot state */
5596	if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
5597		pl->mclk = pi->vbios_boot_state.mclk_bootup_value;
5598		pl->sclk = pi->vbios_boot_state.sclk_bootup_value;
5599		pl->pcie_gen = pi->vbios_boot_state.pcie_gen_bootup_value;
5600		pl->pcie_lane = pi->vbios_boot_state.pcie_lane_bootup_value;
5601	}
5602
5603	switch (rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
5604	case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
5605		pi->use_pcie_powersaving_levels = true;
5606		if (pi->pcie_gen_powersaving.max < pl->pcie_gen)
5607			pi->pcie_gen_powersaving.max = pl->pcie_gen;
5608		if (pi->pcie_gen_powersaving.min > pl->pcie_gen)
5609			pi->pcie_gen_powersaving.min = pl->pcie_gen;
5610		if (pi->pcie_lane_powersaving.max < pl->pcie_lane)
5611			pi->pcie_lane_powersaving.max = pl->pcie_lane;
5612		if (pi->pcie_lane_powersaving.min > pl->pcie_lane)
5613			pi->pcie_lane_powersaving.min = pl->pcie_lane;
5614		break;
5615	case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
5616		pi->use_pcie_performance_levels = true;
5617		if (pi->pcie_gen_performance.max < pl->pcie_gen)
5618			pi->pcie_gen_performance.max = pl->pcie_gen;
5619		if (pi->pcie_gen_performance.min > pl->pcie_gen)
5620			pi->pcie_gen_performance.min = pl->pcie_gen;
5621		if (pi->pcie_lane_performance.max < pl->pcie_lane)
5622			pi->pcie_lane_performance.max = pl->pcie_lane;
5623		if (pi->pcie_lane_performance.min > pl->pcie_lane)
5624			pi->pcie_lane_performance.min = pl->pcie_lane;
5625		break;
5626	default:
5627		break;
5628	}
5629}
5630
5631static int ci_parse_power_table(struct amdgpu_device *adev)
5632{
5633	struct amdgpu_mode_info *mode_info = &adev->mode_info;
5634	struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
5635	union pplib_power_state *power_state;
5636	int i, j, k, non_clock_array_index, clock_array_index;
5637	union pplib_clock_info *clock_info;
5638	struct _StateArray *state_array;
5639	struct _ClockInfoArray *clock_info_array;
5640	struct _NonClockInfoArray *non_clock_info_array;
5641	union power_info *power_info;
5642	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
5643	u16 data_offset;
5644	u8 frev, crev;
5645	u8 *power_state_offset;
5646	struct ci_ps *ps;
5647
5648	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
5649				   &frev, &crev, &data_offset))
5650		return -EINVAL;
5651	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
5652
5653	amdgpu_add_thermal_controller(adev);
5654
5655	state_array = (struct _StateArray *)
5656		(mode_info->atom_context->bios + data_offset +
5657		 le16_to_cpu(power_info->pplib.usStateArrayOffset));
5658	clock_info_array = (struct _ClockInfoArray *)
5659		(mode_info->atom_context->bios + data_offset +
5660		 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
5661	non_clock_info_array = (struct _NonClockInfoArray *)
5662		(mode_info->atom_context->bios + data_offset +
5663		 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
5664
5665	adev->pm.dpm.ps = kzalloc(sizeof(struct amdgpu_ps) *
5666				  state_array->ucNumEntries, GFP_KERNEL);
5667	if (!adev->pm.dpm.ps)
5668		return -ENOMEM;
5669	power_state_offset = (u8 *)state_array->states;
5670	for (i = 0; i < state_array->ucNumEntries; i++) {
5671		u8 *idx;
5672		power_state = (union pplib_power_state *)power_state_offset;
5673		non_clock_array_index = power_state->v2.nonClockInfoIndex;
5674		non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
5675			&non_clock_info_array->nonClockInfo[non_clock_array_index];
5676		ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL);
5677		if (ps == NULL) {
5678			kfree(adev->pm.dpm.ps);
5679			return -ENOMEM;
5680		}
5681		adev->pm.dpm.ps[i].ps_priv = ps;
5682		ci_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i],
5683					      non_clock_info,
5684					      non_clock_info_array->ucEntrySize);
5685		k = 0;
5686		idx = (u8 *)&power_state->v2.clockInfoIndex[0];
5687		for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
5688			clock_array_index = idx[j];
5689			if (clock_array_index >= clock_info_array->ucNumEntries)
5690				continue;
5691			if (k >= CISLANDS_MAX_HARDWARE_POWERLEVELS)
5692				break;
5693			clock_info = (union pplib_clock_info *)
5694				((u8 *)&clock_info_array->clockInfo[0] +
5695				 (clock_array_index * clock_info_array->ucEntrySize));
5696			ci_parse_pplib_clock_info(adev,
5697						  &adev->pm.dpm.ps[i], k,
5698						  clock_info);
5699			k++;
5700		}
5701		power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
5702	}
5703	adev->pm.dpm.num_ps = state_array->ucNumEntries;
5704
5705	/* fill in the vce power states */
5706	for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
5707		u32 sclk, mclk;
5708		clock_array_index = adev->pm.dpm.vce_states[i].clk_idx;
5709		clock_info = (union pplib_clock_info *)
5710			&clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
5711		sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
5712		sclk |= clock_info->ci.ucEngineClockHigh << 16;
5713		mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
5714		mclk |= clock_info->ci.ucMemoryClockHigh << 16;
5715		adev->pm.dpm.vce_states[i].sclk = sclk;
5716		adev->pm.dpm.vce_states[i].mclk = mclk;
5717	}
5718
5719	return 0;
5720}
5721
5722static int ci_get_vbios_boot_values(struct amdgpu_device *adev,
5723				    struct ci_vbios_boot_state *boot_state)
5724{
5725	struct amdgpu_mode_info *mode_info = &adev->mode_info;
5726	int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
5727	ATOM_FIRMWARE_INFO_V2_2 *firmware_info;
5728	u8 frev, crev;
5729	u16 data_offset;
5730
5731	if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
5732				   &frev, &crev, &data_offset)) {
5733		firmware_info =
5734			(ATOM_FIRMWARE_INFO_V2_2 *)(mode_info->atom_context->bios +
5735						    data_offset);
5736		boot_state->mvdd_bootup_value = le16_to_cpu(firmware_info->usBootUpMVDDCVoltage);
5737		boot_state->vddc_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCVoltage);
5738		boot_state->vddci_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCIVoltage);
5739		boot_state->pcie_gen_bootup_value = ci_get_current_pcie_speed(adev);
5740		boot_state->pcie_lane_bootup_value = ci_get_current_pcie_lane_number(adev);
5741		boot_state->sclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultEngineClock);
5742		boot_state->mclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultMemoryClock);
5743
5744		return 0;
5745	}
5746	return -EINVAL;
5747}
5748
5749static void ci_dpm_fini(struct amdgpu_device *adev)
5750{
5751	int i;
5752
5753	for (i = 0; i < adev->pm.dpm.num_ps; i++) {
5754		kfree(adev->pm.dpm.ps[i].ps_priv);
5755	}
5756	kfree(adev->pm.dpm.ps);
5757	kfree(adev->pm.dpm.priv);
5758	kfree(adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
5759	amdgpu_free_extended_power_table(adev);
5760}
5761
5762/**
5763 * ci_dpm_init_microcode - load ucode images from disk
5764 *
5765 * @adev: amdgpu_device pointer
5766 *
5767 * Use the firmware interface to load the ucode images into
5768 * the driver (not loaded into hw).
5769 * Returns 0 on success, error on failure.
5770 */
5771static int ci_dpm_init_microcode(struct amdgpu_device *adev)
5772{
5773	const char *chip_name;
5774	char fw_name[30];
5775	int err;
5776
5777	DRM_DEBUG("\n");
5778
5779	switch (adev->asic_type) {
5780	case CHIP_BONAIRE:
5781		if ((adev->pdev->revision == 0x80) ||
5782		    (adev->pdev->revision == 0x81) ||
5783		    (adev->pdev->device == 0x665f))
5784			chip_name = "bonaire_k";
5785		else
5786			chip_name = "bonaire";
5787		break;
5788	case CHIP_HAWAII:
5789		if (adev->pdev->revision == 0x80)
5790			chip_name = "hawaii_k";
5791		else
5792			chip_name = "hawaii";
5793		break;
5794	case CHIP_KAVERI:
5795	case CHIP_KABINI:
5796	case CHIP_MULLINS:
5797	default: BUG();
5798	}
5799
5800	snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
5801	err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
5802	if (err)
5803		goto out;
5804	err = amdgpu_ucode_validate(adev->pm.fw);
5805
5806out:
5807	if (err) {
5808		printk(KERN_ERR
5809		       "cik_smc: Failed to load firmware \"%s\"\n",
5810		       fw_name);
5811		release_firmware(adev->pm.fw);
5812		adev->pm.fw = NULL;
5813	}
5814	return err;
5815}
5816
5817static int ci_dpm_init(struct amdgpu_device *adev)
5818{
5819	int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
5820	SMU7_Discrete_DpmTable *dpm_table;
5821	struct amdgpu_gpio_rec gpio;
5822	u16 data_offset, size;
5823	u8 frev, crev;
5824	struct ci_power_info *pi;
5825	int ret;
5826
5827	pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL);
5828	if (pi == NULL)
5829		return -ENOMEM;
5830	adev->pm.dpm.priv = pi;
5831
5832	pi->sys_pcie_mask =
5833		(adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) >>
5834		CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT;
5835
5836	pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
5837
5838	pi->pcie_gen_performance.max = AMDGPU_PCIE_GEN1;
5839	pi->pcie_gen_performance.min = AMDGPU_PCIE_GEN3;
5840	pi->pcie_gen_powersaving.max = AMDGPU_PCIE_GEN1;
5841	pi->pcie_gen_powersaving.min = AMDGPU_PCIE_GEN3;
5842
5843	pi->pcie_lane_performance.max = 0;
5844	pi->pcie_lane_performance.min = 16;
5845	pi->pcie_lane_powersaving.max = 0;
5846	pi->pcie_lane_powersaving.min = 16;
5847
5848	ret = ci_get_vbios_boot_values(adev, &pi->vbios_boot_state);
5849	if (ret) {
5850		ci_dpm_fini(adev);
5851		return ret;
5852	}
5853
5854	ret = amdgpu_get_platform_caps(adev);
5855	if (ret) {
5856		ci_dpm_fini(adev);
5857		return ret;
5858	}
5859
5860	ret = amdgpu_parse_extended_power_table(adev);
5861	if (ret) {
5862		ci_dpm_fini(adev);
5863		return ret;
5864	}
5865
5866	ret = ci_parse_power_table(adev);
5867	if (ret) {
5868		ci_dpm_fini(adev);
5869		return ret;
5870	}
5871
5872	pi->dll_default_on = false;
5873	pi->sram_end = SMC_RAM_END;
5874
5875	pi->activity_target[0] = CISLAND_TARGETACTIVITY_DFLT;
5876	pi->activity_target[1] = CISLAND_TARGETACTIVITY_DFLT;
5877	pi->activity_target[2] = CISLAND_TARGETACTIVITY_DFLT;
5878	pi->activity_target[3] = CISLAND_TARGETACTIVITY_DFLT;
5879	pi->activity_target[4] = CISLAND_TARGETACTIVITY_DFLT;
5880	pi->activity_target[5] = CISLAND_TARGETACTIVITY_DFLT;
5881	pi->activity_target[6] = CISLAND_TARGETACTIVITY_DFLT;
5882	pi->activity_target[7] = CISLAND_TARGETACTIVITY_DFLT;
5883
5884	pi->mclk_activity_target = CISLAND_MCLK_TARGETACTIVITY_DFLT;
5885
5886	pi->sclk_dpm_key_disabled = 0;
5887	pi->mclk_dpm_key_disabled = 0;
5888	pi->pcie_dpm_key_disabled = 0;
5889	pi->thermal_sclk_dpm_enabled = 0;
5890
5891	if (amdgpu_pp_feature_mask & SCLK_DEEP_SLEEP_MASK)
5892		pi->caps_sclk_ds = true;
5893	else
5894		pi->caps_sclk_ds = false;
5895
5896	pi->mclk_strobe_mode_threshold = 40000;
5897	pi->mclk_stutter_mode_threshold = 40000;
5898	pi->mclk_edc_enable_threshold = 40000;
5899	pi->mclk_edc_wr_enable_threshold = 40000;
5900
5901	ci_initialize_powertune_defaults(adev);
5902
5903	pi->caps_fps = false;
5904
5905	pi->caps_sclk_throttle_low_notification = false;
5906
5907	pi->caps_uvd_dpm = true;
5908	pi->caps_vce_dpm = true;
5909
5910	ci_get_leakage_voltages(adev);
5911	ci_patch_dependency_tables_with_leakage(adev);
5912	ci_set_private_data_variables_based_on_pptable(adev);
5913
5914	adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
5915		kzalloc(4 * sizeof(struct amdgpu_clock_voltage_dependency_entry), GFP_KERNEL);
5916	if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
5917		ci_dpm_fini(adev);
5918		return -ENOMEM;
5919	}
5920	adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
5921	adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
5922	adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
5923	adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
5924	adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
5925	adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
5926	adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
5927	adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
5928	adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
5929
5930	adev->pm.dpm.dyn_state.mclk_sclk_ratio = 4;
5931	adev->pm.dpm.dyn_state.sclk_mclk_delta = 15000;
5932	adev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
5933
5934	adev->pm.dpm.dyn_state.valid_sclk_values.count = 0;
5935	adev->pm.dpm.dyn_state.valid_sclk_values.values = NULL;
5936	adev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
5937	adev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
5938
5939	if (adev->asic_type == CHIP_HAWAII) {
5940		pi->thermal_temp_setting.temperature_low = 94500;
5941		pi->thermal_temp_setting.temperature_high = 95000;
5942		pi->thermal_temp_setting.temperature_shutdown = 104000;
5943	} else {
5944		pi->thermal_temp_setting.temperature_low = 99500;
5945		pi->thermal_temp_setting.temperature_high = 100000;
5946		pi->thermal_temp_setting.temperature_shutdown = 104000;
5947	}
5948
5949	pi->uvd_enabled = false;
5950
5951	dpm_table = &pi->smc_state_table;
5952
5953	gpio = amdgpu_atombios_lookup_gpio(adev, VDDC_VRHOT_GPIO_PINID);
5954	if (gpio.valid) {
5955		dpm_table->VRHotGpio = gpio.shift;
5956		adev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
5957	} else {
5958		dpm_table->VRHotGpio = CISLANDS_UNUSED_GPIO_PIN;
5959		adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
5960	}
5961
5962	gpio = amdgpu_atombios_lookup_gpio(adev, PP_AC_DC_SWITCH_GPIO_PINID);
5963	if (gpio.valid) {
5964		dpm_table->AcDcGpio = gpio.shift;
5965		adev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_HARDWAREDC;
5966	} else {
5967		dpm_table->AcDcGpio = CISLANDS_UNUSED_GPIO_PIN;
5968		adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_HARDWAREDC;
5969	}
5970
5971	gpio = amdgpu_atombios_lookup_gpio(adev, VDDC_PCC_GPIO_PINID);
5972	if (gpio.valid) {
5973		u32 tmp = RREG32_SMC(ixCNB_PWRMGT_CNTL);
5974
5975		switch (gpio.shift) {
5976		case 0:
5977			tmp &= ~CNB_PWRMGT_CNTL__GNB_SLOW_MODE_MASK;
5978			tmp |= 1 << CNB_PWRMGT_CNTL__GNB_SLOW_MODE__SHIFT;
5979			break;
5980		case 1:
5981			tmp &= ~CNB_PWRMGT_CNTL__GNB_SLOW_MODE_MASK;
5982			tmp |= 2 << CNB_PWRMGT_CNTL__GNB_SLOW_MODE__SHIFT;
5983			break;
5984		case 2:
5985			tmp |= CNB_PWRMGT_CNTL__GNB_SLOW_MASK;
5986			break;
5987		case 3:
5988			tmp |= CNB_PWRMGT_CNTL__FORCE_NB_PS1_MASK;
5989			break;
5990		case 4:
5991			tmp |= CNB_PWRMGT_CNTL__DPM_ENABLED_MASK;
5992			break;
5993		default:
5994			DRM_INFO("Invalid PCC GPIO: %u!\n", gpio.shift);
5995			break;
5996		}
5997		WREG32_SMC(ixCNB_PWRMGT_CNTL, tmp);
5998	}
5999
6000	pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE;
6001	pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE;
6002	pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE;
6003	if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT))
6004		pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
6005	else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
6006		pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
6007
6008	if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL) {
6009		if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
6010			pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
6011		else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
6012			pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
6013		else
6014			adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL;
6015	}
6016
6017	if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_MVDDCONTROL) {
6018		if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
6019			pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
6020		else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
6021			pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
6022		else
6023			adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_MVDDCONTROL;
6024	}
6025
6026	pi->vddc_phase_shed_control = true;
6027
6028#if defined(CONFIG_ACPI)
6029	pi->pcie_performance_request =
6030		amdgpu_acpi_is_pcie_performance_request_supported(adev);
6031#else
6032	pi->pcie_performance_request = false;
6033#endif
6034
6035	if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, &size,
6036				   &frev, &crev, &data_offset)) {
6037		pi->caps_sclk_ss_support = true;
6038		pi->caps_mclk_ss_support = true;
6039		pi->dynamic_ss = true;
6040	} else {
6041		pi->caps_sclk_ss_support = false;
6042		pi->caps_mclk_ss_support = false;
6043		pi->dynamic_ss = true;
6044	}
6045
6046	if (adev->pm.int_thermal_type != THERMAL_TYPE_NONE)
6047		pi->thermal_protection = true;
6048	else
6049		pi->thermal_protection = false;
6050
6051	pi->caps_dynamic_ac_timing = true;
6052
6053	pi->uvd_power_gated = true;
6054
6055	/* make sure dc limits are valid */
6056	if ((adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
6057	    (adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
6058		adev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
6059			adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
6060
6061	pi->fan_ctrl_is_in_default_mode = true;
6062
6063	return 0;
6064}
6065
6066static void
6067ci_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
6068					       struct seq_file *m)
6069{
6070	struct ci_power_info *pi = ci_get_pi(adev);
6071	struct amdgpu_ps *rps = &pi->current_rps;
6072	u32 sclk = ci_get_average_sclk_freq(adev);
6073	u32 mclk = ci_get_average_mclk_freq(adev);
6074	u32 activity_percent = 50;
6075	int ret;
6076
6077	ret = ci_read_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, AverageGraphicsA),
6078					&activity_percent);
6079
6080	if (ret == 0) {
6081		activity_percent += 0x80;
6082		activity_percent >>= 8;
6083		activity_percent = activity_percent > 100 ? 100 : activity_percent;
6084	}
6085
6086	seq_printf(m, "uvd %sabled\n", pi->uvd_power_gated ? "dis" : "en");
6087	seq_printf(m, "vce %sabled\n", rps->vce_active ? "en" : "dis");
6088	seq_printf(m, "power level avg    sclk: %u mclk: %u\n",
6089		   sclk, mclk);
6090	seq_printf(m, "GPU load: %u %%\n", activity_percent);
6091}
6092
6093static void ci_dpm_print_power_state(struct amdgpu_device *adev,
6094				     struct amdgpu_ps *rps)
6095{
6096	struct ci_ps *ps = ci_get_ps(rps);
6097	struct ci_pl *pl;
6098	int i;
6099
6100	amdgpu_dpm_print_class_info(rps->class, rps->class2);
6101	amdgpu_dpm_print_cap_info(rps->caps);
6102	printk("\tuvd    vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
6103	for (i = 0; i < ps->performance_level_count; i++) {
6104		pl = &ps->performance_levels[i];
6105		printk("\t\tpower level %d    sclk: %u mclk: %u pcie gen: %u pcie lanes: %u\n",
6106		       i, pl->sclk, pl->mclk, pl->pcie_gen + 1, pl->pcie_lane);
6107	}
6108	amdgpu_dpm_print_ps_status(adev, rps);
6109}
6110
6111static inline bool ci_are_power_levels_equal(const struct ci_pl *ci_cpl1,
6112						const struct ci_pl *ci_cpl2)
6113{
6114	return ((ci_cpl1->mclk == ci_cpl2->mclk) &&
6115		  (ci_cpl1->sclk == ci_cpl2->sclk) &&
6116		  (ci_cpl1->pcie_gen == ci_cpl2->pcie_gen) &&
6117		  (ci_cpl1->pcie_lane == ci_cpl2->pcie_lane));
6118}
6119
6120static int ci_check_state_equal(struct amdgpu_device *adev,
6121				struct amdgpu_ps *cps,
6122				struct amdgpu_ps *rps,
6123				bool *equal)
6124{
6125	struct ci_ps *ci_cps;
6126	struct ci_ps *ci_rps;
6127	int i;
6128
6129	if (adev == NULL || cps == NULL || rps == NULL || equal == NULL)
6130		return -EINVAL;
6131
6132	ci_cps = ci_get_ps(cps);
6133	ci_rps = ci_get_ps(rps);
6134
6135	if (ci_cps == NULL) {
6136		*equal = false;
6137		return 0;
6138	}
6139
6140	if (ci_cps->performance_level_count != ci_rps->performance_level_count) {
6141
6142		*equal = false;
6143		return 0;
6144	}
6145
6146	for (i = 0; i < ci_cps->performance_level_count; i++) {
6147		if (!ci_are_power_levels_equal(&(ci_cps->performance_levels[i]),
6148					&(ci_rps->performance_levels[i]))) {
6149			*equal = false;
6150			return 0;
6151		}
6152	}
6153
6154	/* If all performance levels are the same try to use the UVD clocks to break the tie.*/
6155	*equal = ((cps->vclk == rps->vclk) && (cps->dclk == rps->dclk));
6156	*equal &= ((cps->evclk == rps->evclk) && (cps->ecclk == rps->ecclk));
6157
6158	return 0;
6159}
6160
6161static u32 ci_dpm_get_sclk(struct amdgpu_device *adev, bool low)
6162{
6163	struct ci_power_info *pi = ci_get_pi(adev);
6164	struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
6165
6166	if (low)
6167		return requested_state->performance_levels[0].sclk;
6168	else
6169		return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
6170}
6171
6172static u32 ci_dpm_get_mclk(struct amdgpu_device *adev, bool low)
6173{
6174	struct ci_power_info *pi = ci_get_pi(adev);
6175	struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
6176
6177	if (low)
6178		return requested_state->performance_levels[0].mclk;
6179	else
6180		return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
6181}
6182
6183/* get temperature in millidegrees */
6184static int ci_dpm_get_temp(struct amdgpu_device *adev)
6185{
6186	u32 temp;
6187	int actual_temp = 0;
6188
6189	temp = (RREG32_SMC(ixCG_MULT_THERMAL_STATUS) & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >>
6190		CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT;
6191
6192	if (temp & 0x200)
6193		actual_temp = 255;
6194	else
6195		actual_temp = temp & 0x1ff;
6196
6197	actual_temp = actual_temp * 1000;
6198
6199	return actual_temp;
6200}
6201
6202static int ci_set_temperature_range(struct amdgpu_device *adev)
6203{
6204	int ret;
6205
6206	ret = ci_thermal_enable_alert(adev, false);
6207	if (ret)
6208		return ret;
6209	ret = ci_thermal_set_temperature_range(adev, CISLANDS_TEMP_RANGE_MIN,
6210					       CISLANDS_TEMP_RANGE_MAX);
6211	if (ret)
6212		return ret;
6213	ret = ci_thermal_enable_alert(adev, true);
6214	if (ret)
6215		return ret;
6216	return ret;
6217}
6218
6219static int ci_dpm_early_init(void *handle)
6220{
6221	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6222
6223	ci_dpm_set_dpm_funcs(adev);
6224	ci_dpm_set_irq_funcs(adev);
6225
6226	return 0;
6227}
6228
6229static int ci_dpm_late_init(void *handle)
6230{
6231	int ret;
6232	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6233
6234	if (!amdgpu_dpm)
6235		return 0;
6236
6237	/* init the sysfs and debugfs files late */
6238	ret = amdgpu_pm_sysfs_init(adev);
6239	if (ret)
6240		return ret;
6241
6242	ret = ci_set_temperature_range(adev);
6243	if (ret)
6244		return ret;
6245
6246	return 0;
6247}
6248
6249static int ci_dpm_sw_init(void *handle)
6250{
6251	int ret;
6252	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6253
6254	ret = amdgpu_irq_add_id(adev, 230, &adev->pm.dpm.thermal.irq);
6255	if (ret)
6256		return ret;
6257
6258	ret = amdgpu_irq_add_id(adev, 231, &adev->pm.dpm.thermal.irq);
6259	if (ret)
6260		return ret;
6261
6262	/* default to balanced state */
6263	adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
6264	adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
6265	adev->pm.dpm.forced_level = AMDGPU_DPM_FORCED_LEVEL_AUTO;
6266	adev->pm.default_sclk = adev->clock.default_sclk;
6267	adev->pm.default_mclk = adev->clock.default_mclk;
6268	adev->pm.current_sclk = adev->clock.default_sclk;
6269	adev->pm.current_mclk = adev->clock.default_mclk;
6270	adev->pm.int_thermal_type = THERMAL_TYPE_NONE;
6271
6272	if (amdgpu_dpm == 0)
6273		return 0;
6274
6275	ret = ci_dpm_init_microcode(adev);
6276	if (ret)
6277		return ret;
6278
6279	INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler);
6280	mutex_lock(&adev->pm.mutex);
6281	ret = ci_dpm_init(adev);
6282	if (ret)
6283		goto dpm_failed;
6284	adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
6285	if (amdgpu_dpm == 1)
6286		amdgpu_pm_print_power_states(adev);
6287	mutex_unlock(&adev->pm.mutex);
6288	DRM_INFO("amdgpu: dpm initialized\n");
6289
6290	return 0;
6291
6292dpm_failed:
6293	ci_dpm_fini(adev);
6294	mutex_unlock(&adev->pm.mutex);
6295	DRM_ERROR("amdgpu: dpm initialization failed\n");
6296	return ret;
6297}
6298
6299static int ci_dpm_sw_fini(void *handle)
6300{
6301	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6302
6303	flush_work(&adev->pm.dpm.thermal.work);
6304
6305	mutex_lock(&adev->pm.mutex);
6306	amdgpu_pm_sysfs_fini(adev);
6307	ci_dpm_fini(adev);
6308	mutex_unlock(&adev->pm.mutex);
6309
6310	release_firmware(adev->pm.fw);
6311	adev->pm.fw = NULL;
6312
6313	return 0;
6314}
6315
6316static int ci_dpm_hw_init(void *handle)
6317{
6318	int ret;
6319
6320	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6321
6322	if (!amdgpu_dpm)
6323		return 0;
6324
6325	mutex_lock(&adev->pm.mutex);
6326	ci_dpm_setup_asic(adev);
6327	ret = ci_dpm_enable(adev);
6328	if (ret)
6329		adev->pm.dpm_enabled = false;
6330	else
6331		adev->pm.dpm_enabled = true;
6332	mutex_unlock(&adev->pm.mutex);
6333
6334	return ret;
6335}
6336
6337static int ci_dpm_hw_fini(void *handle)
6338{
6339	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6340
6341	if (adev->pm.dpm_enabled) {
6342		mutex_lock(&adev->pm.mutex);
6343		ci_dpm_disable(adev);
6344		mutex_unlock(&adev->pm.mutex);
6345	}
6346
6347	return 0;
6348}
6349
6350static int ci_dpm_suspend(void *handle)
6351{
6352	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6353
6354	if (adev->pm.dpm_enabled) {
6355		mutex_lock(&adev->pm.mutex);
6356		amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
6357			       AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
6358		amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
6359			       AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
6360		adev->pm.dpm.last_user_state = adev->pm.dpm.user_state;
6361		adev->pm.dpm.last_state = adev->pm.dpm.state;
6362		adev->pm.dpm.user_state = POWER_STATE_TYPE_INTERNAL_BOOT;
6363		adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_BOOT;
6364		mutex_unlock(&adev->pm.mutex);
6365		amdgpu_pm_compute_clocks(adev);
6366
6367	}
6368
6369	return 0;
6370}
6371
6372static int ci_dpm_resume(void *handle)
6373{
6374	int ret;
6375	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6376
6377	if (adev->pm.dpm_enabled) {
6378		/* asic init will reset to the boot state */
6379		mutex_lock(&adev->pm.mutex);
6380		ci_dpm_setup_asic(adev);
6381		ret = ci_dpm_enable(adev);
6382		if (ret)
6383			adev->pm.dpm_enabled = false;
6384		else
6385			adev->pm.dpm_enabled = true;
6386		adev->pm.dpm.user_state = adev->pm.dpm.last_user_state;
6387		adev->pm.dpm.state = adev->pm.dpm.last_state;
6388		mutex_unlock(&adev->pm.mutex);
6389		if (adev->pm.dpm_enabled)
6390			amdgpu_pm_compute_clocks(adev);
6391	}
6392	return 0;
6393}
6394
6395static bool ci_dpm_is_idle(void *handle)
6396{
6397	/* XXX */
6398	return true;
6399}
6400
6401static int ci_dpm_wait_for_idle(void *handle)
6402{
6403	/* XXX */
6404	return 0;
6405}
6406
6407static int ci_dpm_soft_reset(void *handle)
6408{
6409	return 0;
6410}
6411
6412static int ci_dpm_set_interrupt_state(struct amdgpu_device *adev,
6413				      struct amdgpu_irq_src *source,
6414				      unsigned type,
6415				      enum amdgpu_interrupt_state state)
6416{
6417	u32 cg_thermal_int;
6418
6419	switch (type) {
6420	case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH:
6421		switch (state) {
6422		case AMDGPU_IRQ_STATE_DISABLE:
6423			cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
6424			cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
6425			WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6426			break;
6427		case AMDGPU_IRQ_STATE_ENABLE:
6428			cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
6429			cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
6430			WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6431			break;
6432		default:
6433			break;
6434		}
6435		break;
6436
6437	case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW:
6438		switch (state) {
6439		case AMDGPU_IRQ_STATE_DISABLE:
6440			cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
6441			cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
6442			WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6443			break;
6444		case AMDGPU_IRQ_STATE_ENABLE:
6445			cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
6446			cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
6447			WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6448			break;
6449		default:
6450			break;
6451		}
6452		break;
6453
6454	default:
6455		break;
6456	}
6457	return 0;
6458}
6459
6460static int ci_dpm_process_interrupt(struct amdgpu_device *adev,
6461				    struct amdgpu_irq_src *source,
6462				    struct amdgpu_iv_entry *entry)
6463{
6464	bool queue_thermal = false;
6465
6466	if (entry == NULL)
6467		return -EINVAL;
6468
6469	switch (entry->src_id) {
6470	case 230: /* thermal low to high */
6471		DRM_DEBUG("IH: thermal low to high\n");
6472		adev->pm.dpm.thermal.high_to_low = false;
6473		queue_thermal = true;
6474		break;
6475	case 231: /* thermal high to low */
6476		DRM_DEBUG("IH: thermal high to low\n");
6477		adev->pm.dpm.thermal.high_to_low = true;
6478		queue_thermal = true;
6479		break;
6480	default:
6481		break;
6482	}
6483
6484	if (queue_thermal)
6485		schedule_work(&adev->pm.dpm.thermal.work);
6486
6487	return 0;
6488}
6489
6490static int ci_dpm_set_clockgating_state(void *handle,
6491					  enum amd_clockgating_state state)
6492{
6493	return 0;
6494}
6495
6496static int ci_dpm_set_powergating_state(void *handle,
6497					  enum amd_powergating_state state)
6498{
6499	return 0;
6500}
6501
6502static int ci_dpm_print_clock_levels(struct amdgpu_device *adev,
6503		enum pp_clock_type type, char *buf)
6504{
6505	struct ci_power_info *pi = ci_get_pi(adev);
6506	struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
6507	struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
6508	struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
6509
6510	int i, now, size = 0;
6511	uint32_t clock, pcie_speed;
6512
6513	switch (type) {
6514	case PP_SCLK:
6515		amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_API_GetSclkFrequency);
6516		clock = RREG32(mmSMC_MSG_ARG_0);
6517
6518		for (i = 0; i < sclk_table->count; i++) {
6519			if (clock > sclk_table->dpm_levels[i].value)
6520				continue;
6521			break;
6522		}
6523		now = i;
6524
6525		for (i = 0; i < sclk_table->count; i++)
6526			size += sprintf(buf + size, "%d: %uMhz %s\n",
6527					i, sclk_table->dpm_levels[i].value / 100,
6528					(i == now) ? "*" : "");
6529		break;
6530	case PP_MCLK:
6531		amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_API_GetMclkFrequency);
6532		clock = RREG32(mmSMC_MSG_ARG_0);
6533
6534		for (i = 0; i < mclk_table->count; i++) {
6535			if (clock > mclk_table->dpm_levels[i].value)
6536				continue;
6537			break;
6538		}
6539		now = i;
6540
6541		for (i = 0; i < mclk_table->count; i++)
6542			size += sprintf(buf + size, "%d: %uMhz %s\n",
6543					i, mclk_table->dpm_levels[i].value / 100,
6544					(i == now) ? "*" : "");
6545		break;
6546	case PP_PCIE:
6547		pcie_speed = ci_get_current_pcie_speed(adev);
6548		for (i = 0; i < pcie_table->count; i++) {
6549			if (pcie_speed != pcie_table->dpm_levels[i].value)
6550				continue;
6551			break;
6552		}
6553		now = i;
6554
6555		for (i = 0; i < pcie_table->count; i++)
6556			size += sprintf(buf + size, "%d: %s %s\n", i,
6557					(pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x1" :
6558					(pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" :
6559					(pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "",
6560					(i == now) ? "*" : "");
6561		break;
6562	default:
6563		break;
6564	}
6565
6566	return size;
6567}
6568
6569static int ci_dpm_force_clock_level(struct amdgpu_device *adev,
6570		enum pp_clock_type type, uint32_t mask)
6571{
6572	struct ci_power_info *pi = ci_get_pi(adev);
6573
6574	if (adev->pm.dpm.forced_level
6575			!= AMDGPU_DPM_FORCED_LEVEL_MANUAL)
6576		return -EINVAL;
6577
6578	switch (type) {
6579	case PP_SCLK:
6580		if (!pi->sclk_dpm_key_disabled)
6581			amdgpu_ci_send_msg_to_smc_with_parameter(adev,
6582					PPSMC_MSG_SCLKDPM_SetEnabledMask,
6583					pi->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
6584		break;
6585
6586	case PP_MCLK:
6587		if (!pi->mclk_dpm_key_disabled)
6588			amdgpu_ci_send_msg_to_smc_with_parameter(adev,
6589					PPSMC_MSG_MCLKDPM_SetEnabledMask,
6590					pi->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
6591		break;
6592
6593	case PP_PCIE:
6594	{
6595		uint32_t tmp = mask & pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
6596		uint32_t level = 0;
6597
6598		while (tmp >>= 1)
6599			level++;
6600
6601		if (!pi->pcie_dpm_key_disabled)
6602			amdgpu_ci_send_msg_to_smc_with_parameter(adev,
6603					PPSMC_MSG_PCIeDPM_ForceLevel,
6604					level);
6605		break;
6606	}
6607	default:
6608		break;
6609	}
6610
6611	return 0;
6612}
6613
6614static int ci_dpm_get_sclk_od(struct amdgpu_device *adev)
6615{
6616	struct ci_power_info *pi = ci_get_pi(adev);
6617	struct ci_single_dpm_table *sclk_table = &(pi->dpm_table.sclk_table);
6618	struct ci_single_dpm_table *golden_sclk_table =
6619			&(pi->golden_dpm_table.sclk_table);
6620	int value;
6621
6622	value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
6623			golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
6624			100 /
6625			golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
6626
6627	return value;
6628}
6629
6630static int ci_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
6631{
6632	struct ci_power_info *pi = ci_get_pi(adev);
6633	struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps);
6634	struct ci_single_dpm_table *golden_sclk_table =
6635			&(pi->golden_dpm_table.sclk_table);
6636
6637	if (value > 20)
6638		value = 20;
6639
6640	ps->performance_levels[ps->performance_level_count - 1].sclk =
6641			golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
6642			value / 100 +
6643			golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
6644
6645	return 0;
6646}
6647
6648static int ci_dpm_get_mclk_od(struct amdgpu_device *adev)
6649{
6650	struct ci_power_info *pi = ci_get_pi(adev);
6651	struct ci_single_dpm_table *mclk_table = &(pi->dpm_table.mclk_table);
6652	struct ci_single_dpm_table *golden_mclk_table =
6653			&(pi->golden_dpm_table.mclk_table);
6654	int value;
6655
6656	value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
6657			golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
6658			100 /
6659			golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
6660
6661	return value;
6662}
6663
6664static int ci_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
6665{
6666	struct ci_power_info *pi = ci_get_pi(adev);
6667	struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps);
6668	struct ci_single_dpm_table *golden_mclk_table =
6669			&(pi->golden_dpm_table.mclk_table);
6670
6671	if (value > 20)
6672		value = 20;
6673
6674	ps->performance_levels[ps->performance_level_count - 1].mclk =
6675			golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
6676			value / 100 +
6677			golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
6678
6679	return 0;
6680}
6681
6682const struct amd_ip_funcs ci_dpm_ip_funcs = {
6683	.name = "ci_dpm",
6684	.early_init = ci_dpm_early_init,
6685	.late_init = ci_dpm_late_init,
6686	.sw_init = ci_dpm_sw_init,
6687	.sw_fini = ci_dpm_sw_fini,
6688	.hw_init = ci_dpm_hw_init,
6689	.hw_fini = ci_dpm_hw_fini,
6690	.suspend = ci_dpm_suspend,
6691	.resume = ci_dpm_resume,
6692	.is_idle = ci_dpm_is_idle,
6693	.wait_for_idle = ci_dpm_wait_for_idle,
6694	.soft_reset = ci_dpm_soft_reset,
6695	.set_clockgating_state = ci_dpm_set_clockgating_state,
6696	.set_powergating_state = ci_dpm_set_powergating_state,
6697};
6698
6699static const struct amdgpu_dpm_funcs ci_dpm_funcs = {
6700	.get_temperature = &ci_dpm_get_temp,
6701	.pre_set_power_state = &ci_dpm_pre_set_power_state,
6702	.set_power_state = &ci_dpm_set_power_state,
6703	.post_set_power_state = &ci_dpm_post_set_power_state,
6704	.display_configuration_changed = &ci_dpm_display_configuration_changed,
6705	.get_sclk = &ci_dpm_get_sclk,
6706	.get_mclk = &ci_dpm_get_mclk,
6707	.print_power_state = &ci_dpm_print_power_state,
6708	.debugfs_print_current_performance_level = &ci_dpm_debugfs_print_current_performance_level,
6709	.force_performance_level = &ci_dpm_force_performance_level,
6710	.vblank_too_short = &ci_dpm_vblank_too_short,
6711	.powergate_uvd = &ci_dpm_powergate_uvd,
6712	.set_fan_control_mode = &ci_dpm_set_fan_control_mode,
6713	.get_fan_control_mode = &ci_dpm_get_fan_control_mode,
6714	.set_fan_speed_percent = &ci_dpm_set_fan_speed_percent,
6715	.get_fan_speed_percent = &ci_dpm_get_fan_speed_percent,
6716	.print_clock_levels = ci_dpm_print_clock_levels,
6717	.force_clock_level = ci_dpm_force_clock_level,
6718	.get_sclk_od = ci_dpm_get_sclk_od,
6719	.set_sclk_od = ci_dpm_set_sclk_od,
6720	.get_mclk_od = ci_dpm_get_mclk_od,
6721	.set_mclk_od = ci_dpm_set_mclk_od,
6722	.check_state_equal = ci_check_state_equal,
6723	.get_vce_clock_state = amdgpu_get_vce_clock_state,
6724};
6725
6726static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev)
6727{
6728	if (adev->pm.funcs == NULL)
6729		adev->pm.funcs = &ci_dpm_funcs;
6730}
6731
6732static const struct amdgpu_irq_src_funcs ci_dpm_irq_funcs = {
6733	.set = ci_dpm_set_interrupt_state,
6734	.process = ci_dpm_process_interrupt,
6735};
6736
6737static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev)
6738{
6739	adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST;
6740	adev->pm.dpm.thermal.irq.funcs = &ci_dpm_irq_funcs;
6741}
6742
6743const struct amdgpu_ip_block_version ci_dpm_ip_block =
6744{
6745	.type = AMD_IP_BLOCK_TYPE_SMC,
6746	.major = 7,
6747	.minor = 0,
6748	.rev = 0,
6749	.funcs = &ci_dpm_ip_funcs,
6750};