Linux Audio

Check our new training course

Loading...
v5.14.15
   1/*
   2 * Copyright 2011 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: Alex Deucher
  23 */
  24
  25#include "amdgpu.h"
  26#include "amdgpu_atombios.h"
  27#include "amdgpu_i2c.h"
  28#include "amdgpu_dpm.h"
  29#include "atom.h"
  30#include "amd_pcie.h"
  31#include "amdgpu_display.h"
  32#include "hwmgr.h"
  33#include <linux/power_supply.h>
 
  34
  35#define WIDTH_4K 3840
 
  36
  37void amdgpu_dpm_print_class_info(u32 class, u32 class2)
  38{
  39	const char *s;
  40
  41	switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
  42	case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
  43	default:
  44		s = "none";
  45		break;
  46	case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
  47		s = "battery";
  48		break;
  49	case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
  50		s = "balanced";
  51		break;
  52	case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
  53		s = "performance";
  54		break;
  55	}
  56	printk("\tui class: %s\n", s);
  57	printk("\tinternal class:");
  58	if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
  59	    (class2 == 0))
  60		pr_cont(" none");
  61	else {
  62		if (class & ATOM_PPLIB_CLASSIFICATION_BOOT)
  63			pr_cont(" boot");
  64		if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
  65			pr_cont(" thermal");
  66		if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
  67			pr_cont(" limited_pwr");
  68		if (class & ATOM_PPLIB_CLASSIFICATION_REST)
  69			pr_cont(" rest");
  70		if (class & ATOM_PPLIB_CLASSIFICATION_FORCED)
  71			pr_cont(" forced");
  72		if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
  73			pr_cont(" 3d_perf");
  74		if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE)
  75			pr_cont(" ovrdrv");
  76		if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
  77			pr_cont(" uvd");
  78		if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW)
  79			pr_cont(" 3d_low");
  80		if (class & ATOM_PPLIB_CLASSIFICATION_ACPI)
  81			pr_cont(" acpi");
  82		if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
  83			pr_cont(" uvd_hd2");
  84		if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
  85			pr_cont(" uvd_hd");
  86		if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
  87			pr_cont(" uvd_sd");
  88		if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
  89			pr_cont(" limited_pwr2");
  90		if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
  91			pr_cont(" ulv");
  92		if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
  93			pr_cont(" uvd_mvc");
  94	}
  95	pr_cont("\n");
  96}
  97
  98void amdgpu_dpm_print_cap_info(u32 caps)
  99{
 100	printk("\tcaps:");
 101	if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
 102		pr_cont(" single_disp");
 103	if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK)
 104		pr_cont(" video");
 105	if (caps & ATOM_PPLIB_DISALLOW_ON_DC)
 106		pr_cont(" no_dc");
 107	pr_cont("\n");
 108}
 109
 110void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
 111				struct amdgpu_ps *rps)
 112{
 113	printk("\tstatus:");
 114	if (rps == adev->pm.dpm.current_ps)
 115		pr_cont(" c");
 116	if (rps == adev->pm.dpm.requested_ps)
 117		pr_cont(" r");
 118	if (rps == adev->pm.dpm.boot_ps)
 119		pr_cont(" b");
 120	pr_cont("\n");
 121}
 122
 123void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev)
 124{
 125	struct drm_device *ddev = adev_to_drm(adev);
 126	struct drm_crtc *crtc;
 127	struct amdgpu_crtc *amdgpu_crtc;
 128
 129	adev->pm.dpm.new_active_crtcs = 0;
 130	adev->pm.dpm.new_active_crtc_count = 0;
 131	if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
 132		list_for_each_entry(crtc,
 133				    &ddev->mode_config.crtc_list, head) {
 134			amdgpu_crtc = to_amdgpu_crtc(crtc);
 135			if (amdgpu_crtc->enabled) {
 136				adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id);
 137				adev->pm.dpm.new_active_crtc_count++;
 138			}
 139		}
 140	}
 141}
 142
 143
 144u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
 145{
 146	struct drm_device *dev = adev_to_drm(adev);
 147	struct drm_crtc *crtc;
 148	struct amdgpu_crtc *amdgpu_crtc;
 149	u32 vblank_in_pixels;
 150	u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
 151
 152	if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
 153		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 154			amdgpu_crtc = to_amdgpu_crtc(crtc);
 155			if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
 156				vblank_in_pixels =
 157					amdgpu_crtc->hw_mode.crtc_htotal *
 158					(amdgpu_crtc->hw_mode.crtc_vblank_end -
 159					amdgpu_crtc->hw_mode.crtc_vdisplay +
 160					(amdgpu_crtc->v_border * 2));
 161
 162				vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock;
 163				break;
 164			}
 165		}
 166	}
 167
 168	return vblank_time_us;
 169}
 170
 171u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev)
 172{
 173	struct drm_device *dev = adev_to_drm(adev);
 174	struct drm_crtc *crtc;
 175	struct amdgpu_crtc *amdgpu_crtc;
 176	u32 vrefresh = 0;
 177
 178	if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
 179		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 180			amdgpu_crtc = to_amdgpu_crtc(crtc);
 181			if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
 182				vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
 183				break;
 184			}
 185		}
 186	}
 187
 188	return vrefresh;
 189}
 190
 191bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor)
 192{
 193	switch (sensor) {
 194	case THERMAL_TYPE_RV6XX:
 195	case THERMAL_TYPE_RV770:
 196	case THERMAL_TYPE_EVERGREEN:
 197	case THERMAL_TYPE_SUMO:
 198	case THERMAL_TYPE_NI:
 199	case THERMAL_TYPE_SI:
 200	case THERMAL_TYPE_CI:
 201	case THERMAL_TYPE_KV:
 202		return true;
 203	case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
 204	case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
 205		return false; /* need special handling */
 206	case THERMAL_TYPE_NONE:
 207	case THERMAL_TYPE_EXTERNAL:
 208	case THERMAL_TYPE_EXTERNAL_GPIO:
 209	default:
 210		return false;
 211	}
 212}
 213
 214union power_info {
 215	struct _ATOM_POWERPLAY_INFO info;
 216	struct _ATOM_POWERPLAY_INFO_V2 info_2;
 217	struct _ATOM_POWERPLAY_INFO_V3 info_3;
 218	struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
 219	struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
 220	struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
 221	struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
 222	struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
 223};
 224
 225union fan_info {
 226	struct _ATOM_PPLIB_FANTABLE fan;
 227	struct _ATOM_PPLIB_FANTABLE2 fan2;
 228	struct _ATOM_PPLIB_FANTABLE3 fan3;
 229};
 230
 231static int amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table *amdgpu_table,
 232					      ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table)
 233{
 234	u32 size = atom_table->ucNumEntries *
 235		sizeof(struct amdgpu_clock_voltage_dependency_entry);
 236	int i;
 237	ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
 238
 239	amdgpu_table->entries = kzalloc(size, GFP_KERNEL);
 240	if (!amdgpu_table->entries)
 241		return -ENOMEM;
 242
 243	entry = &atom_table->entries[0];
 244	for (i = 0; i < atom_table->ucNumEntries; i++) {
 245		amdgpu_table->entries[i].clk = le16_to_cpu(entry->usClockLow) |
 246			(entry->ucClockHigh << 16);
 247		amdgpu_table->entries[i].v = le16_to_cpu(entry->usVoltage);
 248		entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *)
 249			((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record));
 250	}
 251	amdgpu_table->count = atom_table->ucNumEntries;
 252
 253	return 0;
 254}
 255
 256int amdgpu_get_platform_caps(struct amdgpu_device *adev)
 257{
 258	struct amdgpu_mode_info *mode_info = &adev->mode_info;
 259	union power_info *power_info;
 260	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
 261	u16 data_offset;
 262	u8 frev, crev;
 263
 264	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
 265				   &frev, &crev, &data_offset))
 266		return -EINVAL;
 267	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
 268
 269	adev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
 270	adev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
 271	adev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
 272
 273	return 0;
 274}
 275
 276/* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
 277#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
 278#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
 279#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
 280#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
 281#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
 282#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
 283#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8 24
 284#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V9 26
 285
 286int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
 287{
 288	struct amdgpu_mode_info *mode_info = &adev->mode_info;
 289	union power_info *power_info;
 290	union fan_info *fan_info;
 291	ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table;
 292	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
 293	u16 data_offset;
 294	u8 frev, crev;
 295	int ret, i;
 296
 297	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
 298				   &frev, &crev, &data_offset))
 299		return -EINVAL;
 300	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
 301
 302	/* fan table */
 303	if (le16_to_cpu(power_info->pplib.usTableSize) >=
 304	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
 305		if (power_info->pplib3.usFanTableOffset) {
 306			fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset +
 307						      le16_to_cpu(power_info->pplib3.usFanTableOffset));
 308			adev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst;
 309			adev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin);
 310			adev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed);
 311			adev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh);
 312			adev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin);
 313			adev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed);
 314			adev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh);
 315			if (fan_info->fan.ucFanTableFormat >= 2)
 316				adev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax);
 317			else
 318				adev->pm.dpm.fan.t_max = 10900;
 319			adev->pm.dpm.fan.cycle_delay = 100000;
 320			if (fan_info->fan.ucFanTableFormat >= 3) {
 321				adev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode;
 322				adev->pm.dpm.fan.default_max_fan_pwm =
 323					le16_to_cpu(fan_info->fan3.usFanPWMMax);
 324				adev->pm.dpm.fan.default_fan_output_sensitivity = 4836;
 325				adev->pm.dpm.fan.fan_output_sensitivity =
 326					le16_to_cpu(fan_info->fan3.usFanOutputSensitivity);
 327			}
 328			adev->pm.dpm.fan.ucode_fan_control = true;
 329		}
 330	}
 331
 332	/* clock dependancy tables, shedding tables */
 333	if (le16_to_cpu(power_info->pplib.usTableSize) >=
 334	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) {
 335		if (power_info->pplib4.usVddcDependencyOnSCLKOffset) {
 336			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
 337				(mode_info->atom_context->bios + data_offset +
 338				 le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
 339			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
 340								 dep_table);
 341			if (ret) {
 342				amdgpu_free_extended_power_table(adev);
 343				return ret;
 344			}
 345		}
 346		if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
 347			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
 348				(mode_info->atom_context->bios + data_offset +
 349				 le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
 350			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
 351								 dep_table);
 352			if (ret) {
 353				amdgpu_free_extended_power_table(adev);
 354				return ret;
 355			}
 356		}
 357		if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
 358			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
 359				(mode_info->atom_context->bios + data_offset +
 360				 le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
 361			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
 362								 dep_table);
 363			if (ret) {
 364				amdgpu_free_extended_power_table(adev);
 365				return ret;
 366			}
 367		}
 368		if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
 369			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
 370				(mode_info->atom_context->bios + data_offset +
 371				 le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
 372			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
 373								 dep_table);
 374			if (ret) {
 375				amdgpu_free_extended_power_table(adev);
 376				return ret;
 377			}
 378		}
 379		if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
 380			ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
 381				(ATOM_PPLIB_Clock_Voltage_Limit_Table *)
 382				(mode_info->atom_context->bios + data_offset +
 383				 le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset));
 384			if (clk_v->ucNumEntries) {
 385				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk =
 386					le16_to_cpu(clk_v->entries[0].usSclkLow) |
 387					(clk_v->entries[0].ucSclkHigh << 16);
 388				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk =
 389					le16_to_cpu(clk_v->entries[0].usMclkLow) |
 390					(clk_v->entries[0].ucMclkHigh << 16);
 391				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc =
 392					le16_to_cpu(clk_v->entries[0].usVddc);
 393				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci =
 394					le16_to_cpu(clk_v->entries[0].usVddci);
 395			}
 396		}
 397		if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) {
 398			ATOM_PPLIB_PhaseSheddingLimits_Table *psl =
 399				(ATOM_PPLIB_PhaseSheddingLimits_Table *)
 400				(mode_info->atom_context->bios + data_offset +
 401				 le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
 402			ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
 403
 404			adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
 405				kcalloc(psl->ucNumEntries,
 406					sizeof(struct amdgpu_phase_shedding_limits_entry),
 407					GFP_KERNEL);
 408			if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
 409				amdgpu_free_extended_power_table(adev);
 410				return -ENOMEM;
 411			}
 412
 413			entry = &psl->entries[0];
 414			for (i = 0; i < psl->ucNumEntries; i++) {
 415				adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
 416					le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16);
 417				adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
 418					le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16);
 419				adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
 420					le16_to_cpu(entry->usVoltage);
 421				entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *)
 422					((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record));
 423			}
 424			adev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
 425				psl->ucNumEntries;
 426		}
 427	}
 428
 429	/* cac data */
 430	if (le16_to_cpu(power_info->pplib.usTableSize) >=
 431	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) {
 432		adev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit);
 433		adev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit);
 434		adev->pm.dpm.near_tdp_limit_adjusted = adev->pm.dpm.near_tdp_limit;
 435		adev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit);
 436		if (adev->pm.dpm.tdp_od_limit)
 437			adev->pm.dpm.power_control = true;
 438		else
 439			adev->pm.dpm.power_control = false;
 440		adev->pm.dpm.tdp_adjustment = 0;
 441		adev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold);
 442		adev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage);
 443		adev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope);
 444		if (power_info->pplib5.usCACLeakageTableOffset) {
 445			ATOM_PPLIB_CAC_Leakage_Table *cac_table =
 446				(ATOM_PPLIB_CAC_Leakage_Table *)
 447				(mode_info->atom_context->bios + data_offset +
 448				 le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
 449			ATOM_PPLIB_CAC_Leakage_Record *entry;
 450			u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table);
 451			adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
 452			if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
 453				amdgpu_free_extended_power_table(adev);
 454				return -ENOMEM;
 455			}
 456			entry = &cac_table->entries[0];
 457			for (i = 0; i < cac_table->ucNumEntries; i++) {
 458				if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
 459					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 =
 460						le16_to_cpu(entry->usVddc1);
 461					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 =
 462						le16_to_cpu(entry->usVddc2);
 463					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 =
 464						le16_to_cpu(entry->usVddc3);
 465				} else {
 466					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
 467						le16_to_cpu(entry->usVddc);
 468					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
 469						le32_to_cpu(entry->ulLeakageValue);
 470				}
 471				entry = (ATOM_PPLIB_CAC_Leakage_Record *)
 472					((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record));
 473			}
 474			adev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
 475		}
 476	}
 477
 478	/* ext tables */
 479	if (le16_to_cpu(power_info->pplib.usTableSize) >=
 480	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
 481		ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
 482			(mode_info->atom_context->bios + data_offset +
 483			 le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
 484		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) &&
 485			ext_hdr->usVCETableOffset) {
 486			VCEClockInfoArray *array = (VCEClockInfoArray *)
 487				(mode_info->atom_context->bios + data_offset +
 488				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
 489			ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
 490				(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
 491				(mode_info->atom_context->bios + data_offset +
 492				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
 493				 1 + array->ucNumEntries * sizeof(VCEClockInfo));
 494			ATOM_PPLIB_VCE_State_Table *states =
 495				(ATOM_PPLIB_VCE_State_Table *)
 496				(mode_info->atom_context->bios + data_offset +
 497				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
 498				 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) +
 499				 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)));
 500			ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry;
 501			ATOM_PPLIB_VCE_State_Record *state_entry;
 502			VCEClockInfo *vce_clk;
 503			u32 size = limits->numEntries *
 504				sizeof(struct amdgpu_vce_clock_voltage_dependency_entry);
 505			adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
 506				kzalloc(size, GFP_KERNEL);
 507			if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
 508				amdgpu_free_extended_power_table(adev);
 509				return -ENOMEM;
 510			}
 511			adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
 512				limits->numEntries;
 513			entry = &limits->entries[0];
 514			state_entry = &states->entries[0];
 515			for (i = 0; i < limits->numEntries; i++) {
 516				vce_clk = (VCEClockInfo *)
 517					((u8 *)&array->entries[0] +
 518					 (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
 519				adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk =
 520					le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
 521				adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk =
 522					le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
 523				adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v =
 524					le16_to_cpu(entry->usVoltage);
 525				entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
 526					((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
 527			}
 528			adev->pm.dpm.num_of_vce_states =
 529					states->numEntries > AMD_MAX_VCE_LEVELS ?
 530					AMD_MAX_VCE_LEVELS : states->numEntries;
 531			for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
 532				vce_clk = (VCEClockInfo *)
 533					((u8 *)&array->entries[0] +
 534					 (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
 535				adev->pm.dpm.vce_states[i].evclk =
 536					le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
 537				adev->pm.dpm.vce_states[i].ecclk =
 538					le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
 539				adev->pm.dpm.vce_states[i].clk_idx =
 540					state_entry->ucClockInfoIndex & 0x3f;
 541				adev->pm.dpm.vce_states[i].pstate =
 542					(state_entry->ucClockInfoIndex & 0xc0) >> 6;
 543				state_entry = (ATOM_PPLIB_VCE_State_Record *)
 544					((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record));
 545			}
 546		}
 547		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) &&
 548			ext_hdr->usUVDTableOffset) {
 549			UVDClockInfoArray *array = (UVDClockInfoArray *)
 550				(mode_info->atom_context->bios + data_offset +
 551				 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1);
 552			ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits =
 553				(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *)
 554				(mode_info->atom_context->bios + data_offset +
 555				 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 +
 556				 1 + (array->ucNumEntries * sizeof (UVDClockInfo)));
 557			ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry;
 558			u32 size = limits->numEntries *
 559				sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry);
 560			adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
 561				kzalloc(size, GFP_KERNEL);
 562			if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
 563				amdgpu_free_extended_power_table(adev);
 564				return -ENOMEM;
 565			}
 566			adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
 567				limits->numEntries;
 568			entry = &limits->entries[0];
 569			for (i = 0; i < limits->numEntries; i++) {
 570				UVDClockInfo *uvd_clk = (UVDClockInfo *)
 571					((u8 *)&array->entries[0] +
 572					 (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo)));
 573				adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk =
 574					le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16);
 575				adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
 576					le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
 577				adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
 578					le16_to_cpu(entry->usVoltage);
 579				entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
 580					((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
 581			}
 582		}
 583		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) &&
 584			ext_hdr->usSAMUTableOffset) {
 585			ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits =
 586				(ATOM_PPLIB_SAMClk_Voltage_Limit_Table *)
 587				(mode_info->atom_context->bios + data_offset +
 588				 le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1);
 589			ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry;
 590			u32 size = limits->numEntries *
 591				sizeof(struct amdgpu_clock_voltage_dependency_entry);
 592			adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
 593				kzalloc(size, GFP_KERNEL);
 594			if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
 595				amdgpu_free_extended_power_table(adev);
 596				return -ENOMEM;
 597			}
 598			adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
 599				limits->numEntries;
 600			entry = &limits->entries[0];
 601			for (i = 0; i < limits->numEntries; i++) {
 602				adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk =
 603					le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16);
 604				adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v =
 605					le16_to_cpu(entry->usVoltage);
 606				entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *)
 607					((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record));
 608			}
 609		}
 610		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
 611		    ext_hdr->usPPMTableOffset) {
 612			ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
 613				(mode_info->atom_context->bios + data_offset +
 614				 le16_to_cpu(ext_hdr->usPPMTableOffset));
 615			adev->pm.dpm.dyn_state.ppm_table =
 616				kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL);
 617			if (!adev->pm.dpm.dyn_state.ppm_table) {
 618				amdgpu_free_extended_power_table(adev);
 619				return -ENOMEM;
 620			}
 621			adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
 622			adev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
 623				le16_to_cpu(ppm->usCpuCoreNumber);
 624			adev->pm.dpm.dyn_state.ppm_table->platform_tdp =
 625				le32_to_cpu(ppm->ulPlatformTDP);
 626			adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp =
 627				le32_to_cpu(ppm->ulSmallACPlatformTDP);
 628			adev->pm.dpm.dyn_state.ppm_table->platform_tdc =
 629				le32_to_cpu(ppm->ulPlatformTDC);
 630			adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc =
 631				le32_to_cpu(ppm->ulSmallACPlatformTDC);
 632			adev->pm.dpm.dyn_state.ppm_table->apu_tdp =
 633				le32_to_cpu(ppm->ulApuTDP);
 634			adev->pm.dpm.dyn_state.ppm_table->dgpu_tdp =
 635				le32_to_cpu(ppm->ulDGpuTDP);
 636			adev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power =
 637				le32_to_cpu(ppm->ulDGpuUlvPower);
 638			adev->pm.dpm.dyn_state.ppm_table->tj_max =
 639				le32_to_cpu(ppm->ulTjmax);
 640		}
 641		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) &&
 642			ext_hdr->usACPTableOffset) {
 643			ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits =
 644				(ATOM_PPLIB_ACPClk_Voltage_Limit_Table *)
 645				(mode_info->atom_context->bios + data_offset +
 646				 le16_to_cpu(ext_hdr->usACPTableOffset) + 1);
 647			ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry;
 648			u32 size = limits->numEntries *
 649				sizeof(struct amdgpu_clock_voltage_dependency_entry);
 650			adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
 651				kzalloc(size, GFP_KERNEL);
 652			if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
 653				amdgpu_free_extended_power_table(adev);
 654				return -ENOMEM;
 655			}
 656			adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
 657				limits->numEntries;
 658			entry = &limits->entries[0];
 659			for (i = 0; i < limits->numEntries; i++) {
 660				adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk =
 661					le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16);
 662				adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v =
 663					le16_to_cpu(entry->usVoltage);
 664				entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *)
 665					((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record));
 666			}
 667		}
 668		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) &&
 669			ext_hdr->usPowerTuneTableOffset) {
 670			u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset +
 671					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
 672			ATOM_PowerTune_Table *pt;
 673			adev->pm.dpm.dyn_state.cac_tdp_table =
 674				kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL);
 675			if (!adev->pm.dpm.dyn_state.cac_tdp_table) {
 676				amdgpu_free_extended_power_table(adev);
 677				return -ENOMEM;
 678			}
 679			if (rev > 0) {
 680				ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
 681					(mode_info->atom_context->bios + data_offset +
 682					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
 683				adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
 684					ppt->usMaximumPowerDeliveryLimit;
 685				pt = &ppt->power_tune_table;
 686			} else {
 687				ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
 688					(mode_info->atom_context->bios + data_offset +
 689					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
 690				adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255;
 691				pt = &ppt->power_tune_table;
 692			}
 693			adev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP);
 694			adev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp =
 695				le16_to_cpu(pt->usConfigurableTDP);
 696			adev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC);
 697			adev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit =
 698				le16_to_cpu(pt->usBatteryPowerLimit);
 699			adev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit =
 700				le16_to_cpu(pt->usSmallPowerLimit);
 701			adev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage =
 702				le16_to_cpu(pt->usLowCACLeakage);
 703			adev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage =
 704				le16_to_cpu(pt->usHighCACLeakage);
 705		}
 706		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8) &&
 707				ext_hdr->usSclkVddgfxTableOffset) {
 708			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
 709				(mode_info->atom_context->bios + data_offset +
 710				 le16_to_cpu(ext_hdr->usSclkVddgfxTableOffset));
 711			ret = amdgpu_parse_clk_voltage_dep_table(
 712					&adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk,
 713					dep_table);
 714			if (ret) {
 715				kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries);
 716				return ret;
 717			}
 718		}
 719	}
 720
 721	return 0;
 722}
 723
 724void amdgpu_free_extended_power_table(struct amdgpu_device *adev)
 725{
 726	struct amdgpu_dpm_dynamic_state *dyn_state = &adev->pm.dpm.dyn_state;
 727
 728	kfree(dyn_state->vddc_dependency_on_sclk.entries);
 729	kfree(dyn_state->vddci_dependency_on_mclk.entries);
 730	kfree(dyn_state->vddc_dependency_on_mclk.entries);
 731	kfree(dyn_state->mvdd_dependency_on_mclk.entries);
 732	kfree(dyn_state->cac_leakage_table.entries);
 733	kfree(dyn_state->phase_shedding_limits_table.entries);
 734	kfree(dyn_state->ppm_table);
 735	kfree(dyn_state->cac_tdp_table);
 736	kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
 737	kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
 738	kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
 739	kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
 740	kfree(dyn_state->vddgfx_dependency_on_sclk.entries);
 741}
 742
 743static const char *pp_lib_thermal_controller_names[] = {
 744	"NONE",
 745	"lm63",
 746	"adm1032",
 747	"adm1030",
 748	"max6649",
 749	"lm64",
 750	"f75375",
 751	"RV6xx",
 752	"RV770",
 753	"adt7473",
 754	"NONE",
 755	"External GPIO",
 756	"Evergreen",
 757	"emc2103",
 758	"Sumo",
 759	"Northern Islands",
 760	"Southern Islands",
 761	"lm96163",
 762	"Sea Islands",
 763	"Kaveri/Kabini",
 764};
 765
 766void amdgpu_add_thermal_controller(struct amdgpu_device *adev)
 767{
 768	struct amdgpu_mode_info *mode_info = &adev->mode_info;
 769	ATOM_PPLIB_POWERPLAYTABLE *power_table;
 770	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
 771	ATOM_PPLIB_THERMALCONTROLLER *controller;
 772	struct amdgpu_i2c_bus_rec i2c_bus;
 773	u16 data_offset;
 774	u8 frev, crev;
 775
 776	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
 777				   &frev, &crev, &data_offset))
 778		return;
 779	power_table = (ATOM_PPLIB_POWERPLAYTABLE *)
 780		(mode_info->atom_context->bios + data_offset);
 781	controller = &power_table->sThermalController;
 782
 783	/* add the i2c bus for thermal/fan chip */
 784	if (controller->ucType > 0) {
 785		if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN)
 786			adev->pm.no_fan = true;
 787		adev->pm.fan_pulses_per_revolution =
 788			controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
 789		if (adev->pm.fan_pulses_per_revolution) {
 790			adev->pm.fan_min_rpm = controller->ucFanMinRPM;
 791			adev->pm.fan_max_rpm = controller->ucFanMaxRPM;
 792		}
 793		if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
 794			DRM_INFO("Internal thermal controller %s fan control\n",
 795				 (controller->ucFanParameters &
 796				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
 797			adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
 798		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
 799			DRM_INFO("Internal thermal controller %s fan control\n",
 800				 (controller->ucFanParameters &
 801				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
 802			adev->pm.int_thermal_type = THERMAL_TYPE_RV770;
 803		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
 804			DRM_INFO("Internal thermal controller %s fan control\n",
 805				 (controller->ucFanParameters &
 806				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
 807			adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
 808		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) {
 809			DRM_INFO("Internal thermal controller %s fan control\n",
 810				 (controller->ucFanParameters &
 811				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
 812			adev->pm.int_thermal_type = THERMAL_TYPE_SUMO;
 813		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) {
 814			DRM_INFO("Internal thermal controller %s fan control\n",
 815				 (controller->ucFanParameters &
 816				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
 817			adev->pm.int_thermal_type = THERMAL_TYPE_NI;
 818		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) {
 819			DRM_INFO("Internal thermal controller %s fan control\n",
 820				 (controller->ucFanParameters &
 821				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
 822			adev->pm.int_thermal_type = THERMAL_TYPE_SI;
 823		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) {
 824			DRM_INFO("Internal thermal controller %s fan control\n",
 825				 (controller->ucFanParameters &
 826				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
 827			adev->pm.int_thermal_type = THERMAL_TYPE_CI;
 828		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) {
 829			DRM_INFO("Internal thermal controller %s fan control\n",
 830				 (controller->ucFanParameters &
 831				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
 832			adev->pm.int_thermal_type = THERMAL_TYPE_KV;
 833		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
 834			DRM_INFO("External GPIO thermal controller %s fan control\n",
 835				 (controller->ucFanParameters &
 836				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
 837			adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
 838		} else if (controller->ucType ==
 839			   ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
 840			DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
 841				 (controller->ucFanParameters &
 842				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
 843			adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
 844		} else if (controller->ucType ==
 845			   ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
 846			DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
 847				 (controller->ucFanParameters &
 848				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
 849			adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
 850		} else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
 851			DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
 852				 pp_lib_thermal_controller_names[controller->ucType],
 853				 controller->ucI2cAddress >> 1,
 854				 (controller->ucFanParameters &
 855				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
 856			adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL;
 857			i2c_bus = amdgpu_atombios_lookup_i2c_gpio(adev, controller->ucI2cLine);
 858			adev->pm.i2c_bus = amdgpu_i2c_lookup(adev, &i2c_bus);
 859			if (adev->pm.i2c_bus) {
 860				struct i2c_board_info info = { };
 861				const char *name = pp_lib_thermal_controller_names[controller->ucType];
 862				info.addr = controller->ucI2cAddress >> 1;
 863				strlcpy(info.type, name, sizeof(info.type));
 864				i2c_new_client_device(&adev->pm.i2c_bus->adapter, &info);
 865			}
 866		} else {
 867			DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
 868				 controller->ucType,
 869				 controller->ucI2cAddress >> 1,
 870				 (controller->ucFanParameters &
 871				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
 872		}
 873	}
 874}
 875
 876enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
 877						 u32 sys_mask,
 878						 enum amdgpu_pcie_gen asic_gen,
 879						 enum amdgpu_pcie_gen default_gen)
 880{
 881	switch (asic_gen) {
 882	case AMDGPU_PCIE_GEN1:
 883		return AMDGPU_PCIE_GEN1;
 884	case AMDGPU_PCIE_GEN2:
 885		return AMDGPU_PCIE_GEN2;
 886	case AMDGPU_PCIE_GEN3:
 887		return AMDGPU_PCIE_GEN3;
 888	default:
 889		if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) &&
 890		    (default_gen == AMDGPU_PCIE_GEN3))
 891			return AMDGPU_PCIE_GEN3;
 892		else if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) &&
 893			 (default_gen == AMDGPU_PCIE_GEN2))
 894			return AMDGPU_PCIE_GEN2;
 895		else
 896			return AMDGPU_PCIE_GEN1;
 897	}
 898	return AMDGPU_PCIE_GEN1;
 899}
 900
 901struct amd_vce_state*
 902amdgpu_get_vce_clock_state(void *handle, u32 idx)
 903{
 904	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 905
 906	if (idx < adev->pm.dpm.num_of_vce_states)
 907		return &adev->pm.dpm.vce_states[idx];
 
 
 908
 909	return NULL;
 910}
 911
 912int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
 913{
 914	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 
 915
 916	return pp_funcs->get_sclk((adev)->powerplay.pp_handle, (low));
 917}
 918
 919int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
 920{
 921	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 
 922
 923	return pp_funcs->get_mclk((adev)->powerplay.pp_handle, (low));
 924}
 925
 926int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate)
 927{
 928	int ret = 0;
 929	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 
 
 
 
 
 
 
 
 
 930
 931	switch (block_type) {
 932	case AMD_IP_BLOCK_TYPE_UVD:
 933	case AMD_IP_BLOCK_TYPE_VCE:
 934		if (pp_funcs && pp_funcs->set_powergating_by_smu) {
 935			/*
 936			 * TODO: need a better lock mechanism
 937			 *
 938			 * Here adev->pm.mutex lock protection is enforced on
 939			 * UVD and VCE cases only. Since for other cases, there
 940			 * may be already lock protection in amdgpu_pm.c.
 941			 * This is a quick fix for the deadlock issue below.
 942			 *     NFO: task ocltst:2028 blocked for more than 120 seconds.
 943			 *     Tainted: G           OE     5.0.0-37-generic #40~18.04.1-Ubuntu
 944			 *     echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
 945			 *     cltst          D    0  2028   2026 0x00000000
 946			 *     all Trace:
 947			 *     __schedule+0x2c0/0x870
 948			 *     schedule+0x2c/0x70
 949			 *     schedule_preempt_disabled+0xe/0x10
 950			 *     __mutex_lock.isra.9+0x26d/0x4e0
 951			 *     __mutex_lock_slowpath+0x13/0x20
 952			 *     ? __mutex_lock_slowpath+0x13/0x20
 953			 *     mutex_lock+0x2f/0x40
 954			 *     amdgpu_dpm_set_powergating_by_smu+0x64/0xe0 [amdgpu]
 955			 *     gfx_v8_0_enable_gfx_static_mg_power_gating+0x3c/0x70 [amdgpu]
 956			 *     gfx_v8_0_set_powergating_state+0x66/0x260 [amdgpu]
 957			 *     amdgpu_device_ip_set_powergating_state+0x62/0xb0 [amdgpu]
 958			 *     pp_dpm_force_performance_level+0xe7/0x100 [amdgpu]
 959			 *     amdgpu_set_dpm_forced_performance_level+0x129/0x330 [amdgpu]
 960			 */
 961			mutex_lock(&adev->pm.mutex);
 962			ret = (pp_funcs->set_powergating_by_smu(
 963				(adev)->powerplay.pp_handle, block_type, gate));
 964			mutex_unlock(&adev->pm.mutex);
 965		}
 966		break;
 967	case AMD_IP_BLOCK_TYPE_GFX:
 968	case AMD_IP_BLOCK_TYPE_VCN:
 969	case AMD_IP_BLOCK_TYPE_SDMA:
 970	case AMD_IP_BLOCK_TYPE_JPEG:
 971	case AMD_IP_BLOCK_TYPE_GMC:
 972	case AMD_IP_BLOCK_TYPE_ACP:
 973		if (pp_funcs && pp_funcs->set_powergating_by_smu) {
 
 974			ret = (pp_funcs->set_powergating_by_smu(
 975				(adev)->powerplay.pp_handle, block_type, gate));
 976		}
 977		break;
 978	default:
 979		break;
 980	}
 981
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 982	return ret;
 983}
 984
 985int amdgpu_dpm_baco_enter(struct amdgpu_device *adev)
 986{
 987	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 988	void *pp_handle = adev->powerplay.pp_handle;
 989	int ret = 0;
 990
 991	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
 992		return -ENOENT;
 993
 
 
 994	/* enter BACO state */
 995	ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
 996
 
 
 997	return ret;
 998}
 999
1000int amdgpu_dpm_baco_exit(struct amdgpu_device *adev)
1001{
1002	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1003	void *pp_handle = adev->powerplay.pp_handle;
1004	int ret = 0;
1005
1006	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
1007		return -ENOENT;
1008
 
 
1009	/* exit BACO state */
1010	ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
1011
 
 
1012	return ret;
1013}
1014
1015int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
1016			     enum pp_mp1_state mp1_state)
1017{
1018	int ret = 0;
1019	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1020
1021	if (pp_funcs && pp_funcs->set_mp1_state) {
 
 
1022		ret = pp_funcs->set_mp1_state(
1023				adev->powerplay.pp_handle,
1024				mp1_state);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1025	}
1026
1027	return ret;
1028}
1029
1030bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
1031{
1032	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1033	void *pp_handle = adev->powerplay.pp_handle;
1034	bool baco_cap;
1035
1036	if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
1037		return false;
1038
1039	if (pp_funcs->get_asic_baco_capability(pp_handle, &baco_cap))
 
 
 
 
 
 
 
 
1040		return false;
1041
1042	return baco_cap;
 
 
 
 
 
 
1043}
1044
1045int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev)
1046{
1047	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1048	void *pp_handle = adev->powerplay.pp_handle;
 
1049
1050	if (!pp_funcs || !pp_funcs->asic_reset_mode_2)
1051		return -ENOENT;
1052
1053	return pp_funcs->asic_reset_mode_2(pp_handle);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1054}
1055
1056int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
1057{
1058	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1059	void *pp_handle = adev->powerplay.pp_handle;
1060	int ret = 0;
1061
1062	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
1063		return -ENOENT;
1064
 
 
1065	/* enter BACO state */
1066	ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
1067	if (ret)
1068		return ret;
1069
1070	/* exit BACO state */
1071	ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
1072	if (ret)
1073		return ret;
1074
1075	return 0;
 
 
1076}
1077
1078bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev)
1079{
1080	struct smu_context *smu = &adev->smu;
 
1081
1082	if (is_support_sw_smu(adev))
1083		return smu_mode1_reset_is_support(smu);
 
 
 
1084
1085	return false;
1086}
1087
1088int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev)
1089{
1090	struct smu_context *smu = &adev->smu;
 
1091
1092	if (is_support_sw_smu(adev))
1093		return smu_mode1_reset(smu);
 
 
 
1094
1095	return -EOPNOTSUPP;
1096}
1097
1098int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
1099				    enum PP_SMC_POWER_PROFILE type,
1100				    bool en)
1101{
1102	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1103	int ret = 0;
1104
1105	if (amdgpu_sriov_vf(adev))
1106		return 0;
1107
1108	if (pp_funcs && pp_funcs->switch_power_profile)
 
1109		ret = pp_funcs->switch_power_profile(
1110			adev->powerplay.pp_handle, type, en);
 
 
1111
1112	return ret;
1113}
1114
1115int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
1116			       uint32_t pstate)
1117{
1118	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1119	int ret = 0;
1120
1121	if (pp_funcs && pp_funcs->set_xgmi_pstate)
 
1122		ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
1123								pstate);
 
 
1124
1125	return ret;
1126}
1127
1128int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
1129			     uint32_t cstate)
1130{
1131	int ret = 0;
1132	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1133	void *pp_handle = adev->powerplay.pp_handle;
1134
1135	if (pp_funcs && pp_funcs->set_df_cstate)
 
1136		ret = pp_funcs->set_df_cstate(pp_handle, cstate);
 
 
1137
1138	return ret;
1139}
1140
1141int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en)
1142{
1143	struct smu_context *smu = &adev->smu;
 
1144
1145	if (is_support_sw_smu(adev))
1146		return smu_allow_xgmi_power_down(smu, en);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1147
1148	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1149}
1150
1151int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev)
1152{
1153	void *pp_handle = adev->powerplay.pp_handle;
1154	const struct amd_pm_funcs *pp_funcs =
1155			adev->powerplay.pp_funcs;
1156	int ret = 0;
1157
1158	if (pp_funcs && pp_funcs->enable_mgpu_fan_boost)
 
1159		ret = pp_funcs->enable_mgpu_fan_boost(pp_handle);
 
 
1160
1161	return ret;
1162}
1163
1164int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev,
1165				      uint32_t msg_id)
1166{
1167	void *pp_handle = adev->powerplay.pp_handle;
1168	const struct amd_pm_funcs *pp_funcs =
1169			adev->powerplay.pp_funcs;
1170	int ret = 0;
1171
1172	if (pp_funcs && pp_funcs->set_clockgating_by_smu)
 
1173		ret = pp_funcs->set_clockgating_by_smu(pp_handle,
1174						       msg_id);
 
 
1175
1176	return ret;
1177}
1178
1179int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev,
1180				  bool acquire)
1181{
1182	void *pp_handle = adev->powerplay.pp_handle;
1183	const struct amd_pm_funcs *pp_funcs =
1184			adev->powerplay.pp_funcs;
1185	int ret = -EOPNOTSUPP;
1186
1187	if (pp_funcs && pp_funcs->smu_i2c_bus_access)
 
1188		ret = pp_funcs->smu_i2c_bus_access(pp_handle,
1189						   acquire);
 
 
1190
1191	return ret;
1192}
1193
1194void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
1195{
1196	if (adev->pm.dpm_enabled) {
1197		mutex_lock(&adev->pm.mutex);
1198		if (power_supply_is_system_supplied() > 0)
1199			adev->pm.ac_power = true;
1200		else
1201			adev->pm.ac_power = false;
 
1202		if (adev->powerplay.pp_funcs &&
1203		    adev->powerplay.pp_funcs->enable_bapm)
1204			amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
1205		mutex_unlock(&adev->pm.mutex);
1206
1207		if (is_support_sw_smu(adev))
1208			smu_set_ac_dc(&adev->smu);
 
 
1209	}
1210}
1211
1212int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
1213			   void *data, uint32_t *size)
1214{
1215	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1216	int ret = 0;
1217
1218	if (!data || !size)
1219		return -EINVAL;
1220
1221	if (pp_funcs && pp_funcs->read_sensor)
1222		ret = pp_funcs->read_sensor((adev)->powerplay.pp_handle,
1223								    sensor, data, size);
1224	else
1225		ret = -EINVAL;
 
 
 
1226
1227	return ret;
1228}
1229
1230void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
1231{
1232	struct amdgpu_device *adev =
1233		container_of(work, struct amdgpu_device,
1234			     pm.dpm.thermal.work);
1235	/* switch to the thermal state */
1236	enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
1237	int temp, size = sizeof(temp);
1238
1239	if (!adev->pm.dpm_enabled)
1240		return;
1241
1242	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP,
1243				    (void *)&temp, &size)) {
1244		if (temp < adev->pm.dpm.thermal.min_temp)
1245			/* switch back the user state */
1246			dpm_state = adev->pm.dpm.user_state;
1247	} else {
1248		if (adev->pm.dpm.thermal.high_to_low)
1249			/* switch back the user state */
1250			dpm_state = adev->pm.dpm.user_state;
1251	}
1252	mutex_lock(&adev->pm.mutex);
1253	if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
1254		adev->pm.dpm.thermal_active = true;
1255	else
1256		adev->pm.dpm.thermal_active = false;
1257	adev->pm.dpm.state = dpm_state;
1258	mutex_unlock(&adev->pm.mutex);
1259
1260	amdgpu_pm_compute_clocks(adev);
1261}
1262
1263static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
1264						     enum amd_pm_state_type dpm_state)
1265{
1266	int i;
1267	struct amdgpu_ps *ps;
1268	u32 ui_class;
1269	bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ?
1270		true : false;
1271
1272	/* check if the vblank period is too short to adjust the mclk */
1273	if (single_display && adev->powerplay.pp_funcs->vblank_too_short) {
1274		if (amdgpu_dpm_vblank_too_short(adev))
1275			single_display = false;
1276	}
1277
1278	/* certain older asics have a separare 3D performance state,
1279	 * so try that first if the user selected performance
1280	 */
1281	if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
1282		dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
1283	/* balanced states don't exist at the moment */
1284	if (dpm_state == POWER_STATE_TYPE_BALANCED)
1285		dpm_state = POWER_STATE_TYPE_PERFORMANCE;
1286
1287restart_search:
1288	/* Pick the best power state based on current conditions */
1289	for (i = 0; i < adev->pm.dpm.num_ps; i++) {
1290		ps = &adev->pm.dpm.ps[i];
1291		ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
1292		switch (dpm_state) {
1293		/* user states */
1294		case POWER_STATE_TYPE_BATTERY:
1295			if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
1296				if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
1297					if (single_display)
1298						return ps;
1299				} else
1300					return ps;
1301			}
1302			break;
1303		case POWER_STATE_TYPE_BALANCED:
1304			if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
1305				if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
1306					if (single_display)
1307						return ps;
1308				} else
1309					return ps;
1310			}
1311			break;
1312		case POWER_STATE_TYPE_PERFORMANCE:
1313			if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
1314				if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
1315					if (single_display)
1316						return ps;
1317				} else
1318					return ps;
1319			}
1320			break;
1321		/* internal states */
1322		case POWER_STATE_TYPE_INTERNAL_UVD:
1323			if (adev->pm.dpm.uvd_ps)
1324				return adev->pm.dpm.uvd_ps;
1325			else
1326				break;
1327		case POWER_STATE_TYPE_INTERNAL_UVD_SD:
1328			if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
1329				return ps;
1330			break;
1331		case POWER_STATE_TYPE_INTERNAL_UVD_HD:
1332			if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
1333				return ps;
1334			break;
1335		case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
1336			if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
1337				return ps;
1338			break;
1339		case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
1340			if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
1341				return ps;
1342			break;
1343		case POWER_STATE_TYPE_INTERNAL_BOOT:
1344			return adev->pm.dpm.boot_ps;
1345		case POWER_STATE_TYPE_INTERNAL_THERMAL:
1346			if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
1347				return ps;
1348			break;
1349		case POWER_STATE_TYPE_INTERNAL_ACPI:
1350			if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
1351				return ps;
1352			break;
1353		case POWER_STATE_TYPE_INTERNAL_ULV:
1354			if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
1355				return ps;
1356			break;
1357		case POWER_STATE_TYPE_INTERNAL_3DPERF:
1358			if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
1359				return ps;
1360			break;
1361		default:
1362			break;
1363		}
1364	}
1365	/* use a fallback state if we didn't match */
1366	switch (dpm_state) {
1367	case POWER_STATE_TYPE_INTERNAL_UVD_SD:
1368		dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
1369		goto restart_search;
1370	case POWER_STATE_TYPE_INTERNAL_UVD_HD:
1371	case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
1372	case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
1373		if (adev->pm.dpm.uvd_ps) {
1374			return adev->pm.dpm.uvd_ps;
1375		} else {
1376			dpm_state = POWER_STATE_TYPE_PERFORMANCE;
1377			goto restart_search;
1378		}
1379	case POWER_STATE_TYPE_INTERNAL_THERMAL:
1380		dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
1381		goto restart_search;
1382	case POWER_STATE_TYPE_INTERNAL_ACPI:
1383		dpm_state = POWER_STATE_TYPE_BATTERY;
1384		goto restart_search;
1385	case POWER_STATE_TYPE_BATTERY:
1386	case POWER_STATE_TYPE_BALANCED:
1387	case POWER_STATE_TYPE_INTERNAL_3DPERF:
1388		dpm_state = POWER_STATE_TYPE_PERFORMANCE;
1389		goto restart_search;
1390	default:
1391		break;
1392	}
1393
1394	return NULL;
1395}
1396
1397static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
1398{
1399	struct amdgpu_ps *ps;
1400	enum amd_pm_state_type dpm_state;
1401	int ret;
1402	bool equal = false;
1403
1404	/* if dpm init failed */
1405	if (!adev->pm.dpm_enabled)
1406		return;
1407
1408	if (adev->pm.dpm.user_state != adev->pm.dpm.state) {
1409		/* add other state override checks here */
1410		if ((!adev->pm.dpm.thermal_active) &&
1411		    (!adev->pm.dpm.uvd_active))
1412			adev->pm.dpm.state = adev->pm.dpm.user_state;
1413	}
1414	dpm_state = adev->pm.dpm.state;
1415
1416	ps = amdgpu_dpm_pick_power_state(adev, dpm_state);
1417	if (ps)
1418		adev->pm.dpm.requested_ps = ps;
1419	else
1420		return;
1421
1422	if (amdgpu_dpm == 1 && adev->powerplay.pp_funcs->print_power_state) {
1423		printk("switching from power state:\n");
1424		amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
1425		printk("switching to power state:\n");
1426		amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
1427	}
1428
1429	/* update whether vce is active */
1430	ps->vce_active = adev->pm.dpm.vce_active;
1431	if (adev->powerplay.pp_funcs->display_configuration_changed)
1432		amdgpu_dpm_display_configuration_changed(adev);
1433
1434	ret = amdgpu_dpm_pre_set_power_state(adev);
1435	if (ret)
1436		return;
1437
1438	if (adev->powerplay.pp_funcs->check_state_equal) {
1439		if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal))
1440			equal = false;
1441	}
1442
1443	if (equal)
1444		return;
1445
1446	amdgpu_dpm_set_power_state(adev);
1447	amdgpu_dpm_post_set_power_state(adev);
1448
1449	adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
1450	adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
1451
1452	if (adev->powerplay.pp_funcs->force_performance_level) {
1453		if (adev->pm.dpm.thermal_active) {
1454			enum amd_dpm_forced_level level = adev->pm.dpm.forced_level;
1455			/* force low perf level for thermal */
1456			amdgpu_dpm_force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW);
1457			/* save the user's level */
1458			adev->pm.dpm.forced_level = level;
1459		} else {
1460			/* otherwise, user selected level */
1461			amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level);
1462		}
1463	}
1464}
1465
1466void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
1467{
1468	int i = 0;
1469
1470	if (!adev->pm.dpm_enabled)
1471		return;
1472
1473	if (adev->mode_info.num_crtc)
1474		amdgpu_display_bandwidth_update(adev);
1475
1476	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
1477		struct amdgpu_ring *ring = adev->rings[i];
1478		if (ring && ring->sched.ready)
1479			amdgpu_fence_wait_empty(ring);
1480	}
1481
1482	if (adev->powerplay.pp_funcs->dispatch_tasks) {
1483		if (!amdgpu_device_has_dc_support(adev)) {
1484			mutex_lock(&adev->pm.mutex);
1485			amdgpu_dpm_get_active_displays(adev);
1486			adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
1487			adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
1488			adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
1489			/* we have issues with mclk switching with
1490			 * refresh rates over 120 hz on the non-DC code.
1491			 */
1492			if (adev->pm.pm_display_cfg.vrefresh > 120)
1493				adev->pm.pm_display_cfg.min_vblank_time = 0;
1494			if (adev->powerplay.pp_funcs->display_configuration_change)
1495				adev->powerplay.pp_funcs->display_configuration_change(
1496							adev->powerplay.pp_handle,
1497							&adev->pm.pm_display_cfg);
1498			mutex_unlock(&adev->pm.mutex);
1499		}
1500		amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL);
1501	} else {
1502		mutex_lock(&adev->pm.mutex);
1503		amdgpu_dpm_get_active_displays(adev);
1504		amdgpu_dpm_change_power_state_locked(adev);
1505		mutex_unlock(&adev->pm.mutex);
1506	}
1507}
1508
1509void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
1510{
1511	int ret = 0;
1512
1513	if (adev->family == AMDGPU_FAMILY_SI) {
1514		mutex_lock(&adev->pm.mutex);
1515		if (enable) {
1516			adev->pm.dpm.uvd_active = true;
1517			adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
1518		} else {
1519			adev->pm.dpm.uvd_active = false;
1520		}
1521		mutex_unlock(&adev->pm.mutex);
1522
1523		amdgpu_pm_compute_clocks(adev);
1524	} else {
1525		ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
1526		if (ret)
1527			DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
1528				  enable ? "enable" : "disable", ret);
1529
1530		/* enable/disable Low Memory PState for UVD (4k videos) */
1531		if (adev->asic_type == CHIP_STONEY &&
1532			adev->uvd.decode_image_width >= WIDTH_4K) {
1533			struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
1534
1535			if (hwmgr && hwmgr->hwmgr_func &&
1536			    hwmgr->hwmgr_func->update_nbdpm_pstate)
1537				hwmgr->hwmgr_func->update_nbdpm_pstate(hwmgr,
1538								       !enable,
1539								       true);
1540		}
1541	}
 
 
 
 
 
1542}
1543
1544void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
1545{
1546	int ret = 0;
1547
1548	if (adev->family == AMDGPU_FAMILY_SI) {
1549		mutex_lock(&adev->pm.mutex);
1550		if (enable) {
1551			adev->pm.dpm.vce_active = true;
1552			/* XXX select vce level based on ring/task */
1553			adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
1554		} else {
1555			adev->pm.dpm.vce_active = false;
1556		}
1557		mutex_unlock(&adev->pm.mutex);
1558
1559		amdgpu_pm_compute_clocks(adev);
1560	} else {
1561		ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
1562		if (ret)
1563			DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
1564				  enable ? "enable" : "disable", ret);
1565	}
1566}
1567
1568void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
1569{
1570	int i;
1571
1572	if (adev->powerplay.pp_funcs->print_power_state == NULL)
1573		return;
 
1574
1575	for (i = 0; i < adev->pm.dpm.num_ps; i++)
1576		amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]);
1577
 
1578}
1579
1580void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
1581{
1582	int ret = 0;
1583
1584	ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable);
1585	if (ret)
1586		DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
1587			  enable ? "enable" : "disable", ret);
1588}
1589
 
 
 
 
 
 
 
 
 
 
1590int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
1591{
1592	int r;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1593
1594	if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->load_firmware) {
1595		r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle);
1596		if (r) {
1597			pr_err("smu firmware loading failed\n");
1598			return r;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1599		}
 
 
 
 
 
1600
1601		if (smu_version)
1602			*smu_version = adev->pm.fw_version;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1603	}
1604
 
 
 
 
 
 
 
 
 
 
 
 
1605	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1606}
v6.8
   1/*
   2 * Copyright 2011 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: Alex Deucher
  23 */
  24
  25#include "amdgpu.h"
  26#include "amdgpu_atombios.h"
  27#include "amdgpu_i2c.h"
  28#include "amdgpu_dpm.h"
  29#include "atom.h"
  30#include "amd_pcie.h"
  31#include "amdgpu_display.h"
  32#include "hwmgr.h"
  33#include <linux/power_supply.h>
  34#include "amdgpu_smu.h"
  35
  36#define amdgpu_dpm_enable_bapm(adev, e) \
  37		((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e)))
  38
  39#define amdgpu_dpm_is_legacy_dpm(adev) ((adev)->powerplay.pp_handle == (adev))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  40
  41int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
 
 
 
  42{
  43	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
  44	int ret = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  45
  46	if (!pp_funcs->get_sclk)
  47		return 0;
 
 
  48
  49	mutex_lock(&adev->pm.mutex);
  50	ret = pp_funcs->get_sclk((adev)->powerplay.pp_handle,
  51				 low);
  52	mutex_unlock(&adev->pm.mutex);
  53
  54	return ret;
  55}
  56
  57int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
  58{
  59	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
  60	int ret = 0;
  61
  62	if (!pp_funcs->get_mclk)
  63		return 0;
  64
  65	mutex_lock(&adev->pm.mutex);
  66	ret = pp_funcs->get_mclk((adev)->powerplay.pp_handle,
  67				 low);
  68	mutex_unlock(&adev->pm.mutex);
  69
  70	return ret;
  71}
  72
  73int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate)
  74{
  75	int ret = 0;
  76	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
  77	enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON;
  78
  79	if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state) {
  80		dev_dbg(adev->dev, "IP block%d already in the target %s state!",
  81				block_type, gate ? "gate" : "ungate");
  82		return 0;
  83	}
  84
  85	mutex_lock(&adev->pm.mutex);
  86
  87	switch (block_type) {
  88	case AMD_IP_BLOCK_TYPE_UVD:
  89	case AMD_IP_BLOCK_TYPE_VCE:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  90	case AMD_IP_BLOCK_TYPE_GFX:
  91	case AMD_IP_BLOCK_TYPE_VCN:
  92	case AMD_IP_BLOCK_TYPE_SDMA:
  93	case AMD_IP_BLOCK_TYPE_JPEG:
  94	case AMD_IP_BLOCK_TYPE_GMC:
  95	case AMD_IP_BLOCK_TYPE_ACP:
  96	case AMD_IP_BLOCK_TYPE_VPE:
  97		if (pp_funcs && pp_funcs->set_powergating_by_smu)
  98			ret = (pp_funcs->set_powergating_by_smu(
  99				(adev)->powerplay.pp_handle, block_type, gate));
 
 100		break;
 101	default:
 102		break;
 103	}
 104
 105	if (!ret)
 106		atomic_set(&adev->pm.pwr_state[block_type], pwr_state);
 107
 108	mutex_unlock(&adev->pm.mutex);
 109
 110	return ret;
 111}
 112
 113int amdgpu_dpm_set_gfx_power_up_by_imu(struct amdgpu_device *adev)
 114{
 115	struct smu_context *smu = adev->powerplay.pp_handle;
 116	int ret = -EOPNOTSUPP;
 117
 118	mutex_lock(&adev->pm.mutex);
 119	ret = smu_set_gfx_power_up_by_imu(smu);
 120	mutex_unlock(&adev->pm.mutex);
 121
 122	msleep(10);
 123
 124	return ret;
 125}
 126
 127int amdgpu_dpm_baco_enter(struct amdgpu_device *adev)
 128{
 129	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 130	void *pp_handle = adev->powerplay.pp_handle;
 131	int ret = 0;
 132
 133	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
 134		return -ENOENT;
 135
 136	mutex_lock(&adev->pm.mutex);
 137
 138	/* enter BACO state */
 139	ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
 140
 141	mutex_unlock(&adev->pm.mutex);
 142
 143	return ret;
 144}
 145
 146int amdgpu_dpm_baco_exit(struct amdgpu_device *adev)
 147{
 148	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 149	void *pp_handle = adev->powerplay.pp_handle;
 150	int ret = 0;
 151
 152	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
 153		return -ENOENT;
 154
 155	mutex_lock(&adev->pm.mutex);
 156
 157	/* exit BACO state */
 158	ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
 159
 160	mutex_unlock(&adev->pm.mutex);
 161
 162	return ret;
 163}
 164
 165int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
 166			     enum pp_mp1_state mp1_state)
 167{
 168	int ret = 0;
 169	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 170
 171	if (pp_funcs && pp_funcs->set_mp1_state) {
 172		mutex_lock(&adev->pm.mutex);
 173
 174		ret = pp_funcs->set_mp1_state(
 175				adev->powerplay.pp_handle,
 176				mp1_state);
 177
 178		mutex_unlock(&adev->pm.mutex);
 179	}
 180
 181	return ret;
 182}
 183
 184int amdgpu_dpm_notify_rlc_state(struct amdgpu_device *adev, bool en)
 185{
 186	int ret = 0;
 187	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 188
 189	if (pp_funcs && pp_funcs->notify_rlc_state) {
 190		mutex_lock(&adev->pm.mutex);
 191
 192		ret = pp_funcs->notify_rlc_state(
 193				adev->powerplay.pp_handle,
 194				en);
 195
 196		mutex_unlock(&adev->pm.mutex);
 197	}
 198
 199	return ret;
 200}
 201
 202bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
 203{
 204	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 205	void *pp_handle = adev->powerplay.pp_handle;
 206	bool ret;
 207
 208	if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
 209		return false;
 210	/* Don't use baco for reset in S3.
 211	 * This is a workaround for some platforms
 212	 * where entering BACO during suspend
 213	 * seems to cause reboots or hangs.
 214	 * This might be related to the fact that BACO controls
 215	 * power to the whole GPU including devices like audio and USB.
 216	 * Powering down/up everything may adversely affect these other
 217	 * devices.  Needs more investigation.
 218	 */
 219	if (adev->in_s3)
 220		return false;
 221
 222	mutex_lock(&adev->pm.mutex);
 223
 224	ret = pp_funcs->get_asic_baco_capability(pp_handle);
 225
 226	mutex_unlock(&adev->pm.mutex);
 227
 228	return ret;
 229}
 230
 231int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev)
 232{
 233	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 234	void *pp_handle = adev->powerplay.pp_handle;
 235	int ret = 0;
 236
 237	if (!pp_funcs || !pp_funcs->asic_reset_mode_2)
 238		return -ENOENT;
 239
 240	mutex_lock(&adev->pm.mutex);
 241
 242	ret = pp_funcs->asic_reset_mode_2(pp_handle);
 243
 244	mutex_unlock(&adev->pm.mutex);
 245
 246	return ret;
 247}
 248
 249int amdgpu_dpm_enable_gfx_features(struct amdgpu_device *adev)
 250{
 251	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 252	void *pp_handle = adev->powerplay.pp_handle;
 253	int ret = 0;
 254
 255	if (!pp_funcs || !pp_funcs->asic_reset_enable_gfx_features)
 256		return -ENOENT;
 257
 258	mutex_lock(&adev->pm.mutex);
 259
 260	ret = pp_funcs->asic_reset_enable_gfx_features(pp_handle);
 261
 262	mutex_unlock(&adev->pm.mutex);
 263
 264	return ret;
 265}
 266
 267int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
 268{
 269	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 270	void *pp_handle = adev->powerplay.pp_handle;
 271	int ret = 0;
 272
 273	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
 274		return -ENOENT;
 275
 276	mutex_lock(&adev->pm.mutex);
 277
 278	/* enter BACO state */
 279	ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
 280	if (ret)
 281		goto out;
 282
 283	/* exit BACO state */
 284	ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
 
 
 285
 286out:
 287	mutex_unlock(&adev->pm.mutex);
 288	return ret;
 289}
 290
 291bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev)
 292{
 293	struct smu_context *smu = adev->powerplay.pp_handle;
 294	bool support_mode1_reset = false;
 295
 296	if (is_support_sw_smu(adev)) {
 297		mutex_lock(&adev->pm.mutex);
 298		support_mode1_reset = smu_mode1_reset_is_support(smu);
 299		mutex_unlock(&adev->pm.mutex);
 300	}
 301
 302	return support_mode1_reset;
 303}
 304
 305int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev)
 306{
 307	struct smu_context *smu = adev->powerplay.pp_handle;
 308	int ret = -EOPNOTSUPP;
 309
 310	if (is_support_sw_smu(adev)) {
 311		mutex_lock(&adev->pm.mutex);
 312		ret = smu_mode1_reset(smu);
 313		mutex_unlock(&adev->pm.mutex);
 314	}
 315
 316	return ret;
 317}
 318
 319int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
 320				    enum PP_SMC_POWER_PROFILE type,
 321				    bool en)
 322{
 323	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 324	int ret = 0;
 325
 326	if (amdgpu_sriov_vf(adev))
 327		return 0;
 328
 329	if (pp_funcs && pp_funcs->switch_power_profile) {
 330		mutex_lock(&adev->pm.mutex);
 331		ret = pp_funcs->switch_power_profile(
 332			adev->powerplay.pp_handle, type, en);
 333		mutex_unlock(&adev->pm.mutex);
 334	}
 335
 336	return ret;
 337}
 338
 339int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
 340			       uint32_t pstate)
 341{
 342	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 343	int ret = 0;
 344
 345	if (pp_funcs && pp_funcs->set_xgmi_pstate) {
 346		mutex_lock(&adev->pm.mutex);
 347		ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
 348								pstate);
 349		mutex_unlock(&adev->pm.mutex);
 350	}
 351
 352	return ret;
 353}
 354
 355int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
 356			     uint32_t cstate)
 357{
 358	int ret = 0;
 359	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 360	void *pp_handle = adev->powerplay.pp_handle;
 361
 362	if (pp_funcs && pp_funcs->set_df_cstate) {
 363		mutex_lock(&adev->pm.mutex);
 364		ret = pp_funcs->set_df_cstate(pp_handle, cstate);
 365		mutex_unlock(&adev->pm.mutex);
 366	}
 367
 368	return ret;
 369}
 370
 371int amdgpu_dpm_get_xgmi_plpd_mode(struct amdgpu_device *adev, char **mode_desc)
 372{
 373	struct smu_context *smu = adev->powerplay.pp_handle;
 374	int mode = XGMI_PLPD_NONE;
 375
 376	if (is_support_sw_smu(adev)) {
 377		mode = smu->plpd_mode;
 378		if (mode_desc == NULL)
 379			return mode;
 380		switch (smu->plpd_mode) {
 381		case XGMI_PLPD_DISALLOW:
 382			*mode_desc = "disallow";
 383			break;
 384		case XGMI_PLPD_DEFAULT:
 385			*mode_desc = "default";
 386			break;
 387		case XGMI_PLPD_OPTIMIZED:
 388			*mode_desc = "optimized";
 389			break;
 390		case XGMI_PLPD_NONE:
 391		default:
 392			*mode_desc = "none";
 393			break;
 394		}
 395	}
 396
 397	return mode;
 398}
 399
 400int amdgpu_dpm_set_xgmi_plpd_mode(struct amdgpu_device *adev, int mode)
 401{
 402	struct smu_context *smu = adev->powerplay.pp_handle;
 403	int ret = -EOPNOTSUPP;
 404
 405	if (is_support_sw_smu(adev)) {
 406		mutex_lock(&adev->pm.mutex);
 407		ret = smu_set_xgmi_plpd_mode(smu, mode);
 408		mutex_unlock(&adev->pm.mutex);
 409	}
 410
 411	return ret;
 412}
 413
 414int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev)
 415{
 416	void *pp_handle = adev->powerplay.pp_handle;
 417	const struct amd_pm_funcs *pp_funcs =
 418			adev->powerplay.pp_funcs;
 419	int ret = 0;
 420
 421	if (pp_funcs && pp_funcs->enable_mgpu_fan_boost) {
 422		mutex_lock(&adev->pm.mutex);
 423		ret = pp_funcs->enable_mgpu_fan_boost(pp_handle);
 424		mutex_unlock(&adev->pm.mutex);
 425	}
 426
 427	return ret;
 428}
 429
 430int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev,
 431				      uint32_t msg_id)
 432{
 433	void *pp_handle = adev->powerplay.pp_handle;
 434	const struct amd_pm_funcs *pp_funcs =
 435			adev->powerplay.pp_funcs;
 436	int ret = 0;
 437
 438	if (pp_funcs && pp_funcs->set_clockgating_by_smu) {
 439		mutex_lock(&adev->pm.mutex);
 440		ret = pp_funcs->set_clockgating_by_smu(pp_handle,
 441						       msg_id);
 442		mutex_unlock(&adev->pm.mutex);
 443	}
 444
 445	return ret;
 446}
 447
 448int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev,
 449				  bool acquire)
 450{
 451	void *pp_handle = adev->powerplay.pp_handle;
 452	const struct amd_pm_funcs *pp_funcs =
 453			adev->powerplay.pp_funcs;
 454	int ret = -EOPNOTSUPP;
 455
 456	if (pp_funcs && pp_funcs->smu_i2c_bus_access) {
 457		mutex_lock(&adev->pm.mutex);
 458		ret = pp_funcs->smu_i2c_bus_access(pp_handle,
 459						   acquire);
 460		mutex_unlock(&adev->pm.mutex);
 461	}
 462
 463	return ret;
 464}
 465
 466void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
 467{
 468	if (adev->pm.dpm_enabled) {
 469		mutex_lock(&adev->pm.mutex);
 470		if (power_supply_is_system_supplied() > 0)
 471			adev->pm.ac_power = true;
 472		else
 473			adev->pm.ac_power = false;
 474
 475		if (adev->powerplay.pp_funcs &&
 476		    adev->powerplay.pp_funcs->enable_bapm)
 477			amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
 
 478
 479		if (is_support_sw_smu(adev))
 480			smu_set_ac_dc(adev->powerplay.pp_handle);
 481
 482		mutex_unlock(&adev->pm.mutex);
 483	}
 484}
 485
 486int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
 487			   void *data, uint32_t *size)
 488{
 489	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 490	int ret = -EINVAL;
 491
 492	if (!data || !size)
 493		return -EINVAL;
 494
 495	if (pp_funcs && pp_funcs->read_sensor) {
 496		mutex_lock(&adev->pm.mutex);
 497		ret = pp_funcs->read_sensor(adev->powerplay.pp_handle,
 498					    sensor,
 499					    data,
 500					    size);
 501		mutex_unlock(&adev->pm.mutex);
 502	}
 503
 504	return ret;
 505}
 506
 507int amdgpu_dpm_get_apu_thermal_limit(struct amdgpu_device *adev, uint32_t *limit)
 508{
 509	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 510	int ret = -EOPNOTSUPP;
 
 
 
 
 
 
 
 511
 512	if (pp_funcs && pp_funcs->get_apu_thermal_limit) {
 513		mutex_lock(&adev->pm.mutex);
 514		ret = pp_funcs->get_apu_thermal_limit(adev->powerplay.pp_handle, limit);
 515		mutex_unlock(&adev->pm.mutex);
 
 
 
 
 
 516	}
 
 
 
 
 
 
 
 517
 518	return ret;
 519}
 520
 521int amdgpu_dpm_set_apu_thermal_limit(struct amdgpu_device *adev, uint32_t limit)
 
 522{
 523	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 524	int ret = -EOPNOTSUPP;
 
 
 
 
 
 
 
 
 
 525
 526	if (pp_funcs && pp_funcs->set_apu_thermal_limit) {
 527		mutex_lock(&adev->pm.mutex);
 528		ret = pp_funcs->set_apu_thermal_limit(adev->powerplay.pp_handle, limit);
 529		mutex_unlock(&adev->pm.mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 530	}
 531
 532	return ret;
 533}
 534
 535void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev)
 536{
 537	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 538	int i;
 
 
 539
 
 540	if (!adev->pm.dpm_enabled)
 541		return;
 542
 543	if (!pp_funcs->pm_compute_clocks)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 544		return;
 545
 546	if (adev->mode_info.num_crtc)
 547		amdgpu_display_bandwidth_update(adev);
 548
 549	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
 550		struct amdgpu_ring *ring = adev->rings[i];
 551		if (ring && ring->sched.ready)
 552			amdgpu_fence_wait_empty(ring);
 553	}
 554
 555	mutex_lock(&adev->pm.mutex);
 556	pp_funcs->pm_compute_clocks(adev->powerplay.pp_handle);
 557	mutex_unlock(&adev->pm.mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 558}
 559
 560void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
 561{
 562	int ret = 0;
 563
 564	if (adev->family == AMDGPU_FAMILY_SI) {
 565		mutex_lock(&adev->pm.mutex);
 566		if (enable) {
 567			adev->pm.dpm.uvd_active = true;
 568			adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
 569		} else {
 570			adev->pm.dpm.uvd_active = false;
 571		}
 572		mutex_unlock(&adev->pm.mutex);
 573
 574		amdgpu_dpm_compute_clocks(adev);
 575		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 576	}
 577
 578	ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
 579	if (ret)
 580		DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
 581			  enable ? "enable" : "disable", ret);
 582}
 583
 584void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
 585{
 586	int ret = 0;
 587
 588	if (adev->family == AMDGPU_FAMILY_SI) {
 589		mutex_lock(&adev->pm.mutex);
 590		if (enable) {
 591			adev->pm.dpm.vce_active = true;
 592			/* XXX select vce level based on ring/task */
 593			adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
 594		} else {
 595			adev->pm.dpm.vce_active = false;
 596		}
 597		mutex_unlock(&adev->pm.mutex);
 598
 599		amdgpu_dpm_compute_clocks(adev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 600		return;
 601	}
 602
 603	ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
 604	if (ret)
 605		DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
 606			  enable ? "enable" : "disable", ret);
 607}
 608
 609void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
 610{
 611	int ret = 0;
 612
 613	ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable);
 614	if (ret)
 615		DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
 616			  enable ? "enable" : "disable", ret);
 617}
 618
 619void amdgpu_dpm_enable_vpe(struct amdgpu_device *adev, bool enable)
 620{
 621	int ret = 0;
 622
 623	ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VPE, !enable);
 624	if (ret)
 625		DRM_ERROR("Dpm %s vpe failed, ret = %d.\n",
 626			  enable ? "enable" : "disable", ret);
 627}
 628
 629int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
 630{
 631	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 632	int r = 0;
 633
 634	if (!pp_funcs || !pp_funcs->load_firmware)
 635		return 0;
 636
 637	mutex_lock(&adev->pm.mutex);
 638	r = pp_funcs->load_firmware(adev->powerplay.pp_handle);
 639	if (r) {
 640		pr_err("smu firmware loading failed\n");
 641		goto out;
 642	}
 643
 644	if (smu_version)
 645		*smu_version = adev->pm.fw_version;
 646
 647out:
 648	mutex_unlock(&adev->pm.mutex);
 649	return r;
 650}
 651
 652int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable)
 653{
 654	int ret = 0;
 655
 656	if (is_support_sw_smu(adev)) {
 657		mutex_lock(&adev->pm.mutex);
 658		ret = smu_handle_passthrough_sbr(adev->powerplay.pp_handle,
 659						 enable);
 660		mutex_unlock(&adev->pm.mutex);
 661	}
 662
 663	return ret;
 664}
 665
 666int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size)
 667{
 668	struct smu_context *smu = adev->powerplay.pp_handle;
 669	int ret = 0;
 670
 671	if (!is_support_sw_smu(adev))
 672		return -EOPNOTSUPP;
 673
 674	mutex_lock(&adev->pm.mutex);
 675	ret = smu_send_hbm_bad_pages_num(smu, size);
 676	mutex_unlock(&adev->pm.mutex);
 677
 678	return ret;
 679}
 680
 681int amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device *adev, uint32_t size)
 682{
 683	struct smu_context *smu = adev->powerplay.pp_handle;
 684	int ret = 0;
 685
 686	if (!is_support_sw_smu(adev))
 687		return -EOPNOTSUPP;
 688
 689	mutex_lock(&adev->pm.mutex);
 690	ret = smu_send_hbm_bad_channel_flag(smu, size);
 691	mutex_unlock(&adev->pm.mutex);
 692
 693	return ret;
 694}
 695
 696int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev,
 697				  enum pp_clock_type type,
 698				  uint32_t *min,
 699				  uint32_t *max)
 700{
 701	int ret = 0;
 702
 703	if (type != PP_SCLK)
 704		return -EINVAL;
 705
 706	if (!is_support_sw_smu(adev))
 707		return -EOPNOTSUPP;
 708
 709	mutex_lock(&adev->pm.mutex);
 710	ret = smu_get_dpm_freq_range(adev->powerplay.pp_handle,
 711				     SMU_SCLK,
 712				     min,
 713				     max);
 714	mutex_unlock(&adev->pm.mutex);
 715
 716	return ret;
 717}
 718
 719int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev,
 720				   enum pp_clock_type type,
 721				   uint32_t min,
 722				   uint32_t max)
 723{
 724	struct smu_context *smu = adev->powerplay.pp_handle;
 725	int ret = 0;
 726
 727	if (type != PP_SCLK)
 728		return -EINVAL;
 729
 730	if (!is_support_sw_smu(adev))
 731		return -EOPNOTSUPP;
 732
 733	mutex_lock(&adev->pm.mutex);
 734	ret = smu_set_soft_freq_range(smu,
 735				      SMU_SCLK,
 736				      min,
 737				      max);
 738	mutex_unlock(&adev->pm.mutex);
 739
 740	return ret;
 741}
 742
 743int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev)
 744{
 745	struct smu_context *smu = adev->powerplay.pp_handle;
 746	int ret = 0;
 747
 748	if (!is_support_sw_smu(adev))
 749		return 0;
 750
 751	mutex_lock(&adev->pm.mutex);
 752	ret = smu_write_watermarks_table(smu);
 753	mutex_unlock(&adev->pm.mutex);
 754
 755	return ret;
 756}
 757
 758int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev,
 759			      enum smu_event_type event,
 760			      uint64_t event_arg)
 761{
 762	struct smu_context *smu = adev->powerplay.pp_handle;
 763	int ret = 0;
 764
 765	if (!is_support_sw_smu(adev))
 766		return -EOPNOTSUPP;
 767
 768	mutex_lock(&adev->pm.mutex);
 769	ret = smu_wait_for_event(smu, event, event_arg);
 770	mutex_unlock(&adev->pm.mutex);
 771
 772	return ret;
 773}
 774
 775int amdgpu_dpm_set_residency_gfxoff(struct amdgpu_device *adev, bool value)
 776{
 777	struct smu_context *smu = adev->powerplay.pp_handle;
 778	int ret = 0;
 779
 780	if (!is_support_sw_smu(adev))
 781		return -EOPNOTSUPP;
 782
 783	mutex_lock(&adev->pm.mutex);
 784	ret = smu_set_residency_gfxoff(smu, value);
 785	mutex_unlock(&adev->pm.mutex);
 786
 787	return ret;
 788}
 789
 790int amdgpu_dpm_get_residency_gfxoff(struct amdgpu_device *adev, u32 *value)
 791{
 792	struct smu_context *smu = adev->powerplay.pp_handle;
 793	int ret = 0;
 794
 795	if (!is_support_sw_smu(adev))
 796		return -EOPNOTSUPP;
 797
 798	mutex_lock(&adev->pm.mutex);
 799	ret = smu_get_residency_gfxoff(smu, value);
 800	mutex_unlock(&adev->pm.mutex);
 801
 802	return ret;
 803}
 804
 805int amdgpu_dpm_get_entrycount_gfxoff(struct amdgpu_device *adev, u64 *value)
 806{
 807	struct smu_context *smu = adev->powerplay.pp_handle;
 808	int ret = 0;
 809
 810	if (!is_support_sw_smu(adev))
 811		return -EOPNOTSUPP;
 812
 813	mutex_lock(&adev->pm.mutex);
 814	ret = smu_get_entrycount_gfxoff(smu, value);
 815	mutex_unlock(&adev->pm.mutex);
 816
 817	return ret;
 818}
 819
 820int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value)
 821{
 822	struct smu_context *smu = adev->powerplay.pp_handle;
 823	int ret = 0;
 824
 825	if (!is_support_sw_smu(adev))
 826		return -EOPNOTSUPP;
 827
 828	mutex_lock(&adev->pm.mutex);
 829	ret = smu_get_status_gfxoff(smu, value);
 830	mutex_unlock(&adev->pm.mutex);
 831
 832	return ret;
 833}
 834
 835uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev)
 836{
 837	struct smu_context *smu = adev->powerplay.pp_handle;
 838
 839	if (!is_support_sw_smu(adev))
 840		return 0;
 841
 842	return atomic64_read(&smu->throttle_int_counter);
 843}
 844
 845/* amdgpu_dpm_gfx_state_change - Handle gfx power state change set
 846 * @adev: amdgpu_device pointer
 847 * @state: gfx power state(1 -sGpuChangeState_D0Entry and 2 -sGpuChangeState_D3Entry)
 848 *
 849 */
 850void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev,
 851				 enum gfx_change_state state)
 852{
 853	mutex_lock(&adev->pm.mutex);
 854	if (adev->powerplay.pp_funcs &&
 855	    adev->powerplay.pp_funcs->gfx_state_change_set)
 856		((adev)->powerplay.pp_funcs->gfx_state_change_set(
 857			(adev)->powerplay.pp_handle, state));
 858	mutex_unlock(&adev->pm.mutex);
 859}
 860
 861int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev,
 862			    void *umc_ecc)
 863{
 864	struct smu_context *smu = adev->powerplay.pp_handle;
 865	int ret = 0;
 866
 867	if (!is_support_sw_smu(adev))
 868		return -EOPNOTSUPP;
 869
 870	mutex_lock(&adev->pm.mutex);
 871	ret = smu_get_ecc_info(smu, umc_ecc);
 872	mutex_unlock(&adev->pm.mutex);
 873
 874	return ret;
 875}
 876
 877struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev,
 878						     uint32_t idx)
 879{
 880	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 881	struct amd_vce_state *vstate = NULL;
 882
 883	if (!pp_funcs->get_vce_clock_state)
 884		return NULL;
 885
 886	mutex_lock(&adev->pm.mutex);
 887	vstate = pp_funcs->get_vce_clock_state(adev->powerplay.pp_handle,
 888					       idx);
 889	mutex_unlock(&adev->pm.mutex);
 890
 891	return vstate;
 892}
 893
 894void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev,
 895					enum amd_pm_state_type *state)
 896{
 897	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 898
 899	mutex_lock(&adev->pm.mutex);
 900
 901	if (!pp_funcs->get_current_power_state) {
 902		*state = adev->pm.dpm.user_state;
 903		goto out;
 904	}
 905
 906	*state = pp_funcs->get_current_power_state(adev->powerplay.pp_handle);
 907	if (*state < POWER_STATE_TYPE_DEFAULT ||
 908	    *state > POWER_STATE_TYPE_INTERNAL_3DPERF)
 909		*state = adev->pm.dpm.user_state;
 910
 911out:
 912	mutex_unlock(&adev->pm.mutex);
 913}
 914
 915void amdgpu_dpm_set_power_state(struct amdgpu_device *adev,
 916				enum amd_pm_state_type state)
 917{
 918	mutex_lock(&adev->pm.mutex);
 919	adev->pm.dpm.user_state = state;
 920	mutex_unlock(&adev->pm.mutex);
 921
 922	if (is_support_sw_smu(adev))
 923		return;
 924
 925	if (amdgpu_dpm_dispatch_task(adev,
 926				     AMD_PP_TASK_ENABLE_USER_STATE,
 927				     &state) == -EOPNOTSUPP)
 928		amdgpu_dpm_compute_clocks(adev);
 929}
 930
 931enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev)
 932{
 933	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 934	enum amd_dpm_forced_level level;
 935
 936	if (!pp_funcs)
 937		return AMD_DPM_FORCED_LEVEL_AUTO;
 938
 939	mutex_lock(&adev->pm.mutex);
 940	if (pp_funcs->get_performance_level)
 941		level = pp_funcs->get_performance_level(adev->powerplay.pp_handle);
 942	else
 943		level = adev->pm.dpm.forced_level;
 944	mutex_unlock(&adev->pm.mutex);
 945
 946	return level;
 947}
 948
 949int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
 950				       enum amd_dpm_forced_level level)
 951{
 952	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 953	enum amd_dpm_forced_level current_level;
 954	uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
 955					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
 956					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
 957					AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
 958
 959	if (!pp_funcs || !pp_funcs->force_performance_level)
 960		return 0;
 961
 962	if (adev->pm.dpm.thermal_active)
 963		return -EINVAL;
 964
 965	current_level = amdgpu_dpm_get_performance_level(adev);
 966	if (current_level == level)
 967		return 0;
 968
 969	if (adev->asic_type == CHIP_RAVEN) {
 970		if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) {
 971			if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
 972			    level == AMD_DPM_FORCED_LEVEL_MANUAL)
 973				amdgpu_gfx_off_ctrl(adev, false);
 974			else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL &&
 975				 level != AMD_DPM_FORCED_LEVEL_MANUAL)
 976				amdgpu_gfx_off_ctrl(adev, true);
 977		}
 978	}
 979
 980	if (!(current_level & profile_mode_mask) &&
 981	    (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT))
 982		return -EINVAL;
 983
 984	if (!(current_level & profile_mode_mask) &&
 985	      (level & profile_mode_mask)) {
 986		/* enter UMD Pstate */
 987		amdgpu_device_ip_set_powergating_state(adev,
 988						       AMD_IP_BLOCK_TYPE_GFX,
 989						       AMD_PG_STATE_UNGATE);
 990		amdgpu_device_ip_set_clockgating_state(adev,
 991						       AMD_IP_BLOCK_TYPE_GFX,
 992						       AMD_CG_STATE_UNGATE);
 993	} else if ((current_level & profile_mode_mask) &&
 994		    !(level & profile_mode_mask)) {
 995		/* exit UMD Pstate */
 996		amdgpu_device_ip_set_clockgating_state(adev,
 997						       AMD_IP_BLOCK_TYPE_GFX,
 998						       AMD_CG_STATE_GATE);
 999		amdgpu_device_ip_set_powergating_state(adev,
1000						       AMD_IP_BLOCK_TYPE_GFX,
1001						       AMD_PG_STATE_GATE);
1002	}
1003
1004	mutex_lock(&adev->pm.mutex);
1005
1006	if (pp_funcs->force_performance_level(adev->powerplay.pp_handle,
1007					      level)) {
1008		mutex_unlock(&adev->pm.mutex);
1009		return -EINVAL;
1010	}
1011
1012	adev->pm.dpm.forced_level = level;
1013
1014	mutex_unlock(&adev->pm.mutex);
1015
1016	return 0;
1017}
1018
1019int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev,
1020				 struct pp_states_info *states)
1021{
1022	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1023	int ret = 0;
1024
1025	if (!pp_funcs->get_pp_num_states)
1026		return -EOPNOTSUPP;
1027
1028	mutex_lock(&adev->pm.mutex);
1029	ret = pp_funcs->get_pp_num_states(adev->powerplay.pp_handle,
1030					  states);
1031	mutex_unlock(&adev->pm.mutex);
1032
1033	return ret;
1034}
1035
1036int amdgpu_dpm_dispatch_task(struct amdgpu_device *adev,
1037			      enum amd_pp_task task_id,
1038			      enum amd_pm_state_type *user_state)
1039{
1040	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1041	int ret = 0;
1042
1043	if (!pp_funcs->dispatch_tasks)
1044		return -EOPNOTSUPP;
1045
1046	mutex_lock(&adev->pm.mutex);
1047	ret = pp_funcs->dispatch_tasks(adev->powerplay.pp_handle,
1048				       task_id,
1049				       user_state);
1050	mutex_unlock(&adev->pm.mutex);
1051
1052	return ret;
1053}
1054
1055int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table)
1056{
1057	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1058	int ret = 0;
1059
1060	if (!pp_funcs->get_pp_table)
1061		return 0;
1062
1063	mutex_lock(&adev->pm.mutex);
1064	ret = pp_funcs->get_pp_table(adev->powerplay.pp_handle,
1065				     table);
1066	mutex_unlock(&adev->pm.mutex);
1067
1068	return ret;
1069}
1070
1071int amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device *adev,
1072				      uint32_t type,
1073				      long *input,
1074				      uint32_t size)
1075{
1076	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1077	int ret = 0;
1078
1079	if (!pp_funcs->set_fine_grain_clk_vol)
1080		return 0;
1081
1082	mutex_lock(&adev->pm.mutex);
1083	ret = pp_funcs->set_fine_grain_clk_vol(adev->powerplay.pp_handle,
1084					       type,
1085					       input,
1086					       size);
1087	mutex_unlock(&adev->pm.mutex);
1088
1089	return ret;
1090}
1091
1092int amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device *adev,
1093				  uint32_t type,
1094				  long *input,
1095				  uint32_t size)
1096{
1097	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1098	int ret = 0;
1099
1100	if (!pp_funcs->odn_edit_dpm_table)
1101		return 0;
1102
1103	mutex_lock(&adev->pm.mutex);
1104	ret = pp_funcs->odn_edit_dpm_table(adev->powerplay.pp_handle,
1105					   type,
1106					   input,
1107					   size);
1108	mutex_unlock(&adev->pm.mutex);
1109
1110	return ret;
1111}
1112
1113int amdgpu_dpm_print_clock_levels(struct amdgpu_device *adev,
1114				  enum pp_clock_type type,
1115				  char *buf)
1116{
1117	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1118	int ret = 0;
1119
1120	if (!pp_funcs->print_clock_levels)
1121		return 0;
1122
1123	mutex_lock(&adev->pm.mutex);
1124	ret = pp_funcs->print_clock_levels(adev->powerplay.pp_handle,
1125					   type,
1126					   buf);
1127	mutex_unlock(&adev->pm.mutex);
1128
1129	return ret;
1130}
1131
1132int amdgpu_dpm_emit_clock_levels(struct amdgpu_device *adev,
1133				  enum pp_clock_type type,
1134				  char *buf,
1135				  int *offset)
1136{
1137	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1138	int ret = 0;
1139
1140	if (!pp_funcs->emit_clock_levels)
1141		return -ENOENT;
1142
1143	mutex_lock(&adev->pm.mutex);
1144	ret = pp_funcs->emit_clock_levels(adev->powerplay.pp_handle,
1145					   type,
1146					   buf,
1147					   offset);
1148	mutex_unlock(&adev->pm.mutex);
1149
1150	return ret;
1151}
1152
1153int amdgpu_dpm_set_ppfeature_status(struct amdgpu_device *adev,
1154				    uint64_t ppfeature_masks)
1155{
1156	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1157	int ret = 0;
1158
1159	if (!pp_funcs->set_ppfeature_status)
1160		return 0;
1161
1162	mutex_lock(&adev->pm.mutex);
1163	ret = pp_funcs->set_ppfeature_status(adev->powerplay.pp_handle,
1164					     ppfeature_masks);
1165	mutex_unlock(&adev->pm.mutex);
1166
1167	return ret;
1168}
1169
1170int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf)
1171{
1172	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1173	int ret = 0;
1174
1175	if (!pp_funcs->get_ppfeature_status)
1176		return 0;
1177
1178	mutex_lock(&adev->pm.mutex);
1179	ret = pp_funcs->get_ppfeature_status(adev->powerplay.pp_handle,
1180					     buf);
1181	mutex_unlock(&adev->pm.mutex);
1182
1183	return ret;
1184}
1185
1186int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev,
1187				 enum pp_clock_type type,
1188				 uint32_t mask)
1189{
1190	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1191	int ret = 0;
1192
1193	if (!pp_funcs->force_clock_level)
1194		return 0;
1195
1196	mutex_lock(&adev->pm.mutex);
1197	ret = pp_funcs->force_clock_level(adev->powerplay.pp_handle,
1198					  type,
1199					  mask);
1200	mutex_unlock(&adev->pm.mutex);
1201
1202	return ret;
1203}
1204
1205int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev)
1206{
1207	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1208	int ret = 0;
1209
1210	if (!pp_funcs->get_sclk_od)
1211		return -EOPNOTSUPP;
1212
1213	mutex_lock(&adev->pm.mutex);
1214	ret = pp_funcs->get_sclk_od(adev->powerplay.pp_handle);
1215	mutex_unlock(&adev->pm.mutex);
1216
1217	return ret;
1218}
1219
1220int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
1221{
1222	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1223
1224	if (is_support_sw_smu(adev))
1225		return -EOPNOTSUPP;
1226
1227	mutex_lock(&adev->pm.mutex);
1228	if (pp_funcs->set_sclk_od)
1229		pp_funcs->set_sclk_od(adev->powerplay.pp_handle, value);
1230	mutex_unlock(&adev->pm.mutex);
1231
1232	if (amdgpu_dpm_dispatch_task(adev,
1233				     AMD_PP_TASK_READJUST_POWER_STATE,
1234				     NULL) == -EOPNOTSUPP) {
1235		adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1236		amdgpu_dpm_compute_clocks(adev);
1237	}
1238
1239	return 0;
1240}
1241
1242int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev)
1243{
1244	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1245	int ret = 0;
1246
1247	if (!pp_funcs->get_mclk_od)
1248		return -EOPNOTSUPP;
1249
1250	mutex_lock(&adev->pm.mutex);
1251	ret = pp_funcs->get_mclk_od(adev->powerplay.pp_handle);
1252	mutex_unlock(&adev->pm.mutex);
1253
1254	return ret;
1255}
1256
1257int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
1258{
1259	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1260
1261	if (is_support_sw_smu(adev))
1262		return -EOPNOTSUPP;
1263
1264	mutex_lock(&adev->pm.mutex);
1265	if (pp_funcs->set_mclk_od)
1266		pp_funcs->set_mclk_od(adev->powerplay.pp_handle, value);
1267	mutex_unlock(&adev->pm.mutex);
1268
1269	if (amdgpu_dpm_dispatch_task(adev,
1270				     AMD_PP_TASK_READJUST_POWER_STATE,
1271				     NULL) == -EOPNOTSUPP) {
1272		adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1273		amdgpu_dpm_compute_clocks(adev);
1274	}
1275
1276	return 0;
1277}
1278
1279int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev,
1280				      char *buf)
1281{
1282	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1283	int ret = 0;
1284
1285	if (!pp_funcs->get_power_profile_mode)
1286		return -EOPNOTSUPP;
1287
1288	mutex_lock(&adev->pm.mutex);
1289	ret = pp_funcs->get_power_profile_mode(adev->powerplay.pp_handle,
1290					       buf);
1291	mutex_unlock(&adev->pm.mutex);
1292
1293	return ret;
1294}
1295
1296int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev,
1297				      long *input, uint32_t size)
1298{
1299	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1300	int ret = 0;
1301
1302	if (!pp_funcs->set_power_profile_mode)
1303		return 0;
1304
1305	mutex_lock(&adev->pm.mutex);
1306	ret = pp_funcs->set_power_profile_mode(adev->powerplay.pp_handle,
1307					       input,
1308					       size);
1309	mutex_unlock(&adev->pm.mutex);
1310
1311	return ret;
1312}
1313
1314int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table)
1315{
1316	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1317	int ret = 0;
1318
1319	if (!pp_funcs->get_gpu_metrics)
1320		return 0;
1321
1322	mutex_lock(&adev->pm.mutex);
1323	ret = pp_funcs->get_gpu_metrics(adev->powerplay.pp_handle,
1324					table);
1325	mutex_unlock(&adev->pm.mutex);
1326
1327	return ret;
1328}
1329
1330ssize_t amdgpu_dpm_get_pm_metrics(struct amdgpu_device *adev, void *pm_metrics,
1331				  size_t size)
1332{
1333	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1334	int ret = 0;
1335
1336	if (!pp_funcs->get_pm_metrics)
1337		return -EOPNOTSUPP;
1338
1339	mutex_lock(&adev->pm.mutex);
1340	ret = pp_funcs->get_pm_metrics(adev->powerplay.pp_handle, pm_metrics,
1341				       size);
1342	mutex_unlock(&adev->pm.mutex);
1343
1344	return ret;
1345}
1346
1347int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev,
1348				    uint32_t *fan_mode)
1349{
1350	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1351	int ret = 0;
1352
1353	if (!pp_funcs->get_fan_control_mode)
1354		return -EOPNOTSUPP;
1355
1356	mutex_lock(&adev->pm.mutex);
1357	ret = pp_funcs->get_fan_control_mode(adev->powerplay.pp_handle,
1358					     fan_mode);
1359	mutex_unlock(&adev->pm.mutex);
1360
1361	return ret;
1362}
1363
1364int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev,
1365				 uint32_t speed)
1366{
1367	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1368	int ret = 0;
1369
1370	if (!pp_funcs->set_fan_speed_pwm)
1371		return -EOPNOTSUPP;
1372
1373	mutex_lock(&adev->pm.mutex);
1374	ret = pp_funcs->set_fan_speed_pwm(adev->powerplay.pp_handle,
1375					  speed);
1376	mutex_unlock(&adev->pm.mutex);
1377
1378	return ret;
1379}
1380
1381int amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device *adev,
1382				 uint32_t *speed)
1383{
1384	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1385	int ret = 0;
1386
1387	if (!pp_funcs->get_fan_speed_pwm)
1388		return -EOPNOTSUPP;
1389
1390	mutex_lock(&adev->pm.mutex);
1391	ret = pp_funcs->get_fan_speed_pwm(adev->powerplay.pp_handle,
1392					  speed);
1393	mutex_unlock(&adev->pm.mutex);
1394
1395	return ret;
1396}
1397
1398int amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device *adev,
1399				 uint32_t *speed)
1400{
1401	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1402	int ret = 0;
1403
1404	if (!pp_funcs->get_fan_speed_rpm)
1405		return -EOPNOTSUPP;
1406
1407	mutex_lock(&adev->pm.mutex);
1408	ret = pp_funcs->get_fan_speed_rpm(adev->powerplay.pp_handle,
1409					  speed);
1410	mutex_unlock(&adev->pm.mutex);
1411
1412	return ret;
1413}
1414
1415int amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device *adev,
1416				 uint32_t speed)
1417{
1418	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1419	int ret = 0;
1420
1421	if (!pp_funcs->set_fan_speed_rpm)
1422		return -EOPNOTSUPP;
1423
1424	mutex_lock(&adev->pm.mutex);
1425	ret = pp_funcs->set_fan_speed_rpm(adev->powerplay.pp_handle,
1426					  speed);
1427	mutex_unlock(&adev->pm.mutex);
1428
1429	return ret;
1430}
1431
1432int amdgpu_dpm_set_fan_control_mode(struct amdgpu_device *adev,
1433				    uint32_t mode)
1434{
1435	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1436	int ret = 0;
1437
1438	if (!pp_funcs->set_fan_control_mode)
1439		return -EOPNOTSUPP;
1440
1441	mutex_lock(&adev->pm.mutex);
1442	ret = pp_funcs->set_fan_control_mode(adev->powerplay.pp_handle,
1443					     mode);
1444	mutex_unlock(&adev->pm.mutex);
1445
1446	return ret;
1447}
1448
1449int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev,
1450			       uint32_t *limit,
1451			       enum pp_power_limit_level pp_limit_level,
1452			       enum pp_power_type power_type)
1453{
1454	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1455	int ret = 0;
1456
1457	if (!pp_funcs->get_power_limit)
1458		return -ENODATA;
1459
1460	mutex_lock(&adev->pm.mutex);
1461	ret = pp_funcs->get_power_limit(adev->powerplay.pp_handle,
1462					limit,
1463					pp_limit_level,
1464					power_type);
1465	mutex_unlock(&adev->pm.mutex);
1466
1467	return ret;
1468}
1469
1470int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev,
1471			       uint32_t limit)
1472{
1473	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1474	int ret = 0;
1475
1476	if (!pp_funcs->set_power_limit)
1477		return -EINVAL;
1478
1479	mutex_lock(&adev->pm.mutex);
1480	ret = pp_funcs->set_power_limit(adev->powerplay.pp_handle,
1481					limit);
1482	mutex_unlock(&adev->pm.mutex);
1483
1484	return ret;
1485}
1486
1487int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev)
1488{
1489	bool cclk_dpm_supported = false;
1490
1491	if (!is_support_sw_smu(adev))
1492		return false;
1493
1494	mutex_lock(&adev->pm.mutex);
1495	cclk_dpm_supported = is_support_cclk_dpm(adev);
1496	mutex_unlock(&adev->pm.mutex);
1497
1498	return (int)cclk_dpm_supported;
1499}
1500
1501int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
1502						       struct seq_file *m)
1503{
1504	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1505
1506	if (!pp_funcs->debugfs_print_current_performance_level)
1507		return -EOPNOTSUPP;
1508
1509	mutex_lock(&adev->pm.mutex);
1510	pp_funcs->debugfs_print_current_performance_level(adev->powerplay.pp_handle,
1511							  m);
1512	mutex_unlock(&adev->pm.mutex);
1513
1514	return 0;
1515}
1516
1517int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev,
1518				       void **addr,
1519				       size_t *size)
1520{
1521	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1522	int ret = 0;
1523
1524	if (!pp_funcs->get_smu_prv_buf_details)
1525		return -ENOSYS;
1526
1527	mutex_lock(&adev->pm.mutex);
1528	ret = pp_funcs->get_smu_prv_buf_details(adev->powerplay.pp_handle,
1529						addr,
1530						size);
1531	mutex_unlock(&adev->pm.mutex);
1532
1533	return ret;
1534}
1535
1536int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev)
1537{
1538	if (is_support_sw_smu(adev)) {
1539		struct smu_context *smu = adev->powerplay.pp_handle;
1540
1541		return (smu->od_enabled || smu->is_apu);
1542	} else {
1543		struct pp_hwmgr *hwmgr;
1544
1545		/*
1546		 * dpm on some legacy asics don't carry od_enabled member
1547		 * as its pp_handle is casted directly from adev.
1548		 */
1549		if (amdgpu_dpm_is_legacy_dpm(adev))
1550			return false;
1551
1552		hwmgr = (struct pp_hwmgr *)adev->powerplay.pp_handle;
1553
1554		return hwmgr->od_enabled;
1555	}
1556}
1557
1558int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev,
1559			    const char *buf,
1560			    size_t size)
1561{
1562	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1563	int ret = 0;
1564
1565	if (!pp_funcs->set_pp_table)
1566		return -EOPNOTSUPP;
1567
1568	mutex_lock(&adev->pm.mutex);
1569	ret = pp_funcs->set_pp_table(adev->powerplay.pp_handle,
1570				     buf,
1571				     size);
1572	mutex_unlock(&adev->pm.mutex);
1573
1574	return ret;
1575}
1576
1577int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev)
1578{
1579	struct smu_context *smu = adev->powerplay.pp_handle;
1580
1581	if (!is_support_sw_smu(adev))
1582		return INT_MAX;
1583
1584	return smu->cpu_core_num;
1585}
1586
1587void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev)
1588{
1589	if (!is_support_sw_smu(adev))
1590		return;
1591
1592	amdgpu_smu_stb_debug_fs_init(adev);
1593}
1594
1595int amdgpu_dpm_display_configuration_change(struct amdgpu_device *adev,
1596					    const struct amd_pp_display_configuration *input)
1597{
1598	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1599	int ret = 0;
1600
1601	if (!pp_funcs->display_configuration_change)
1602		return 0;
1603
1604	mutex_lock(&adev->pm.mutex);
1605	ret = pp_funcs->display_configuration_change(adev->powerplay.pp_handle,
1606						     input);
1607	mutex_unlock(&adev->pm.mutex);
1608
1609	return ret;
1610}
1611
1612int amdgpu_dpm_get_clock_by_type(struct amdgpu_device *adev,
1613				 enum amd_pp_clock_type type,
1614				 struct amd_pp_clocks *clocks)
1615{
1616	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1617	int ret = 0;
1618
1619	if (!pp_funcs->get_clock_by_type)
1620		return 0;
1621
1622	mutex_lock(&adev->pm.mutex);
1623	ret = pp_funcs->get_clock_by_type(adev->powerplay.pp_handle,
1624					  type,
1625					  clocks);
1626	mutex_unlock(&adev->pm.mutex);
1627
1628	return ret;
1629}
1630
1631int amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device *adev,
1632						struct amd_pp_simple_clock_info *clocks)
1633{
1634	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1635	int ret = 0;
1636
1637	if (!pp_funcs->get_display_mode_validation_clocks)
1638		return 0;
1639
1640	mutex_lock(&adev->pm.mutex);
1641	ret = pp_funcs->get_display_mode_validation_clocks(adev->powerplay.pp_handle,
1642							   clocks);
1643	mutex_unlock(&adev->pm.mutex);
1644
1645	return ret;
1646}
1647
1648int amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device *adev,
1649					      enum amd_pp_clock_type type,
1650					      struct pp_clock_levels_with_latency *clocks)
1651{
1652	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1653	int ret = 0;
1654
1655	if (!pp_funcs->get_clock_by_type_with_latency)
1656		return 0;
1657
1658	mutex_lock(&adev->pm.mutex);
1659	ret = pp_funcs->get_clock_by_type_with_latency(adev->powerplay.pp_handle,
1660						       type,
1661						       clocks);
1662	mutex_unlock(&adev->pm.mutex);
1663
1664	return ret;
1665}
1666
1667int amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device *adev,
1668					      enum amd_pp_clock_type type,
1669					      struct pp_clock_levels_with_voltage *clocks)
1670{
1671	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1672	int ret = 0;
1673
1674	if (!pp_funcs->get_clock_by_type_with_voltage)
1675		return 0;
1676
1677	mutex_lock(&adev->pm.mutex);
1678	ret = pp_funcs->get_clock_by_type_with_voltage(adev->powerplay.pp_handle,
1679						       type,
1680						       clocks);
1681	mutex_unlock(&adev->pm.mutex);
1682
1683	return ret;
1684}
1685
1686int amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device *adev,
1687					       void *clock_ranges)
1688{
1689	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1690	int ret = 0;
1691
1692	if (!pp_funcs->set_watermarks_for_clocks_ranges)
1693		return -EOPNOTSUPP;
1694
1695	mutex_lock(&adev->pm.mutex);
1696	ret = pp_funcs->set_watermarks_for_clocks_ranges(adev->powerplay.pp_handle,
1697							 clock_ranges);
1698	mutex_unlock(&adev->pm.mutex);
1699
1700	return ret;
1701}
1702
1703int amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device *adev,
1704					     struct pp_display_clock_request *clock)
1705{
1706	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1707	int ret = 0;
1708
1709	if (!pp_funcs->display_clock_voltage_request)
1710		return -EOPNOTSUPP;
1711
1712	mutex_lock(&adev->pm.mutex);
1713	ret = pp_funcs->display_clock_voltage_request(adev->powerplay.pp_handle,
1714						      clock);
1715	mutex_unlock(&adev->pm.mutex);
1716
1717	return ret;
1718}
1719
1720int amdgpu_dpm_get_current_clocks(struct amdgpu_device *adev,
1721				  struct amd_pp_clock_info *clocks)
1722{
1723	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1724	int ret = 0;
1725
1726	if (!pp_funcs->get_current_clocks)
1727		return -EOPNOTSUPP;
1728
1729	mutex_lock(&adev->pm.mutex);
1730	ret = pp_funcs->get_current_clocks(adev->powerplay.pp_handle,
1731					   clocks);
1732	mutex_unlock(&adev->pm.mutex);
1733
1734	return ret;
1735}
1736
1737void amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device *adev)
1738{
1739	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1740
1741	if (!pp_funcs->notify_smu_enable_pwe)
1742		return;
1743
1744	mutex_lock(&adev->pm.mutex);
1745	pp_funcs->notify_smu_enable_pwe(adev->powerplay.pp_handle);
1746	mutex_unlock(&adev->pm.mutex);
1747}
1748
1749int amdgpu_dpm_set_active_display_count(struct amdgpu_device *adev,
1750					uint32_t count)
1751{
1752	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1753	int ret = 0;
1754
1755	if (!pp_funcs->set_active_display_count)
1756		return -EOPNOTSUPP;
1757
1758	mutex_lock(&adev->pm.mutex);
1759	ret = pp_funcs->set_active_display_count(adev->powerplay.pp_handle,
1760						 count);
1761	mutex_unlock(&adev->pm.mutex);
1762
1763	return ret;
1764}
1765
1766int amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device *adev,
1767					  uint32_t clock)
1768{
1769	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1770	int ret = 0;
1771
1772	if (!pp_funcs->set_min_deep_sleep_dcefclk)
1773		return -EOPNOTSUPP;
1774
1775	mutex_lock(&adev->pm.mutex);
1776	ret = pp_funcs->set_min_deep_sleep_dcefclk(adev->powerplay.pp_handle,
1777						   clock);
1778	mutex_unlock(&adev->pm.mutex);
1779
1780	return ret;
1781}
1782
1783void amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device *adev,
1784					     uint32_t clock)
1785{
1786	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1787
1788	if (!pp_funcs->set_hard_min_dcefclk_by_freq)
1789		return;
1790
1791	mutex_lock(&adev->pm.mutex);
1792	pp_funcs->set_hard_min_dcefclk_by_freq(adev->powerplay.pp_handle,
1793					       clock);
1794	mutex_unlock(&adev->pm.mutex);
1795}
1796
1797void amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device *adev,
1798					  uint32_t clock)
1799{
1800	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1801
1802	if (!pp_funcs->set_hard_min_fclk_by_freq)
1803		return;
1804
1805	mutex_lock(&adev->pm.mutex);
1806	pp_funcs->set_hard_min_fclk_by_freq(adev->powerplay.pp_handle,
1807					    clock);
1808	mutex_unlock(&adev->pm.mutex);
1809}
1810
1811int amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device *adev,
1812						   bool disable_memory_clock_switch)
1813{
1814	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1815	int ret = 0;
1816
1817	if (!pp_funcs->display_disable_memory_clock_switch)
1818		return 0;
1819
1820	mutex_lock(&adev->pm.mutex);
1821	ret = pp_funcs->display_disable_memory_clock_switch(adev->powerplay.pp_handle,
1822							    disable_memory_clock_switch);
1823	mutex_unlock(&adev->pm.mutex);
1824
1825	return ret;
1826}
1827
1828int amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device *adev,
1829						struct pp_smu_nv_clock_table *max_clocks)
1830{
1831	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1832	int ret = 0;
1833
1834	if (!pp_funcs->get_max_sustainable_clocks_by_dc)
1835		return -EOPNOTSUPP;
1836
1837	mutex_lock(&adev->pm.mutex);
1838	ret = pp_funcs->get_max_sustainable_clocks_by_dc(adev->powerplay.pp_handle,
1839							 max_clocks);
1840	mutex_unlock(&adev->pm.mutex);
1841
1842	return ret;
1843}
1844
1845enum pp_smu_status amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev,
1846						  unsigned int *clock_values_in_khz,
1847						  unsigned int *num_states)
1848{
1849	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1850	int ret = 0;
1851
1852	if (!pp_funcs->get_uclk_dpm_states)
1853		return -EOPNOTSUPP;
1854
1855	mutex_lock(&adev->pm.mutex);
1856	ret = pp_funcs->get_uclk_dpm_states(adev->powerplay.pp_handle,
1857					    clock_values_in_khz,
1858					    num_states);
1859	mutex_unlock(&adev->pm.mutex);
1860
1861	return ret;
1862}
1863
1864int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev,
1865				   struct dpm_clocks *clock_table)
1866{
1867	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1868	int ret = 0;
1869
1870	if (!pp_funcs->get_dpm_clock_table)
1871		return -EOPNOTSUPP;
1872
1873	mutex_lock(&adev->pm.mutex);
1874	ret = pp_funcs->get_dpm_clock_table(adev->powerplay.pp_handle,
1875					    clock_table);
1876	mutex_unlock(&adev->pm.mutex);
1877
1878	return ret;
1879}