Linux Audio

Check our new training course

Loading...
v6.9.4
   1/*
   2 * Copyright 2011 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: Alex Deucher
  23 */
  24
  25#include "amdgpu.h"
  26#include "amdgpu_atombios.h"
  27#include "amdgpu_i2c.h"
  28#include "amdgpu_dpm.h"
  29#include "atom.h"
  30#include "amd_pcie.h"
  31#include "amdgpu_display.h"
  32#include "hwmgr.h"
  33#include <linux/power_supply.h>
  34#include "amdgpu_smu.h"
  35
  36#define amdgpu_dpm_enable_bapm(adev, e) \
  37		((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e)))
  38
  39#define amdgpu_dpm_is_legacy_dpm(adev) ((adev)->powerplay.pp_handle == (adev))
 
 
  40
  41int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  42{
  43	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
  44	int ret = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  45
  46	if (!pp_funcs->get_sclk)
  47		return 0;
  48
  49	mutex_lock(&adev->pm.mutex);
  50	ret = pp_funcs->get_sclk((adev)->powerplay.pp_handle,
  51				 low);
  52	mutex_unlock(&adev->pm.mutex);
 
 
 
 
 
 
 
 
 
 
 
 
  53
  54	return ret;
  55}
  56
  57int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  58{
  59	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
  60	int ret = 0;
  61
  62	if (!pp_funcs->get_mclk)
  63		return 0;
  64
  65	mutex_lock(&adev->pm.mutex);
  66	ret = pp_funcs->get_mclk((adev)->powerplay.pp_handle,
  67				 low);
  68	mutex_unlock(&adev->pm.mutex);
  69
  70	return ret;
  71}
  72
  73int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate)
  74{
  75	int ret = 0;
  76	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
  77	enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON;
  78
  79	if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state) {
  80		dev_dbg(adev->dev, "IP block%d already in the target %s state!",
  81				block_type, gate ? "gate" : "ungate");
  82		return 0;
  83	}
  84
  85	mutex_lock(&adev->pm.mutex);
  86
  87	switch (block_type) {
  88	case AMD_IP_BLOCK_TYPE_UVD:
  89	case AMD_IP_BLOCK_TYPE_VCE:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  90	case AMD_IP_BLOCK_TYPE_GFX:
  91	case AMD_IP_BLOCK_TYPE_VCN:
  92	case AMD_IP_BLOCK_TYPE_SDMA:
  93	case AMD_IP_BLOCK_TYPE_JPEG:
  94	case AMD_IP_BLOCK_TYPE_GMC:
  95	case AMD_IP_BLOCK_TYPE_ACP:
  96	case AMD_IP_BLOCK_TYPE_VPE:
  97		if (pp_funcs && pp_funcs->set_powergating_by_smu)
  98			ret = (pp_funcs->set_powergating_by_smu(
  99				(adev)->powerplay.pp_handle, block_type, gate));
 
 100		break;
 101	default:
 102		break;
 103	}
 104
 105	if (!ret)
 106		atomic_set(&adev->pm.pwr_state[block_type], pwr_state);
 107
 108	mutex_unlock(&adev->pm.mutex);
 109
 110	return ret;
 111}
 112
 113int amdgpu_dpm_set_gfx_power_up_by_imu(struct amdgpu_device *adev)
 114{
 115	struct smu_context *smu = adev->powerplay.pp_handle;
 116	int ret = -EOPNOTSUPP;
 117
 118	mutex_lock(&adev->pm.mutex);
 119	ret = smu_set_gfx_power_up_by_imu(smu);
 120	mutex_unlock(&adev->pm.mutex);
 121
 122	msleep(10);
 123
 124	return ret;
 125}
 126
 127int amdgpu_dpm_baco_enter(struct amdgpu_device *adev)
 128{
 129	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 130	void *pp_handle = adev->powerplay.pp_handle;
 131	int ret = 0;
 132
 133	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
 134		return -ENOENT;
 135
 136	mutex_lock(&adev->pm.mutex);
 137
 138	/* enter BACO state */
 139	ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
 140
 141	mutex_unlock(&adev->pm.mutex);
 142
 143	return ret;
 144}
 145
 146int amdgpu_dpm_baco_exit(struct amdgpu_device *adev)
 147{
 148	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 149	void *pp_handle = adev->powerplay.pp_handle;
 150	int ret = 0;
 151
 152	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
 153		return -ENOENT;
 154
 155	mutex_lock(&adev->pm.mutex);
 156
 157	/* exit BACO state */
 158	ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
 159
 160	mutex_unlock(&adev->pm.mutex);
 161
 162	return ret;
 163}
 164
 165int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
 166			     enum pp_mp1_state mp1_state)
 167{
 168	int ret = 0;
 169	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 170
 171	if (pp_funcs && pp_funcs->set_mp1_state) {
 172		mutex_lock(&adev->pm.mutex);
 173
 174		ret = pp_funcs->set_mp1_state(
 175				adev->powerplay.pp_handle,
 176				mp1_state);
 177
 178		mutex_unlock(&adev->pm.mutex);
 179	}
 180
 181	return ret;
 182}
 183
 184int amdgpu_dpm_notify_rlc_state(struct amdgpu_device *adev, bool en)
 185{
 186	int ret = 0;
 187	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 188
 189	if (pp_funcs && pp_funcs->notify_rlc_state) {
 190		mutex_lock(&adev->pm.mutex);
 191
 192		ret = pp_funcs->notify_rlc_state(
 193				adev->powerplay.pp_handle,
 194				en);
 195
 196		mutex_unlock(&adev->pm.mutex);
 197	}
 198
 199	return ret;
 200}
 201
 202bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
 203{
 204	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 205	void *pp_handle = adev->powerplay.pp_handle;
 206	bool ret;
 207
 208	if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
 209		return false;
 210	/* Don't use baco for reset in S3.
 211	 * This is a workaround for some platforms
 212	 * where entering BACO during suspend
 213	 * seems to cause reboots or hangs.
 214	 * This might be related to the fact that BACO controls
 215	 * power to the whole GPU including devices like audio and USB.
 216	 * Powering down/up everything may adversely affect these other
 217	 * devices.  Needs more investigation.
 218	 */
 219	if (adev->in_s3)
 220		return false;
 221
 222	mutex_lock(&adev->pm.mutex);
 223
 224	ret = pp_funcs->get_asic_baco_capability(pp_handle);
 225
 226	mutex_unlock(&adev->pm.mutex);
 227
 228	return ret;
 229}
 230
 231int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev)
 232{
 233	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 234	void *pp_handle = adev->powerplay.pp_handle;
 235	int ret = 0;
 236
 237	if (!pp_funcs || !pp_funcs->asic_reset_mode_2)
 238		return -ENOENT;
 239
 240	mutex_lock(&adev->pm.mutex);
 241
 242	ret = pp_funcs->asic_reset_mode_2(pp_handle);
 243
 244	mutex_unlock(&adev->pm.mutex);
 245
 246	return ret;
 247}
 248
 249int amdgpu_dpm_enable_gfx_features(struct amdgpu_device *adev)
 250{
 251	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 252	void *pp_handle = adev->powerplay.pp_handle;
 253	int ret = 0;
 254
 255	if (!pp_funcs || !pp_funcs->asic_reset_enable_gfx_features)
 256		return -ENOENT;
 257
 258	mutex_lock(&adev->pm.mutex);
 259
 260	ret = pp_funcs->asic_reset_enable_gfx_features(pp_handle);
 261
 262	mutex_unlock(&adev->pm.mutex);
 263
 264	return ret;
 265}
 266
 267int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
 268{
 269	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 270	void *pp_handle = adev->powerplay.pp_handle;
 271	int ret = 0;
 272
 273	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
 274		return -ENOENT;
 275
 276	mutex_lock(&adev->pm.mutex);
 277
 278	/* enter BACO state */
 279	ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
 280	if (ret)
 281		goto out;
 282
 283	/* exit BACO state */
 284	ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
 
 
 285
 286out:
 287	mutex_unlock(&adev->pm.mutex);
 288	return ret;
 289}
 290
 291bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev)
 292{
 293	struct smu_context *smu = adev->powerplay.pp_handle;
 294	bool support_mode1_reset = false;
 295
 296	if (is_support_sw_smu(adev)) {
 297		mutex_lock(&adev->pm.mutex);
 298		support_mode1_reset = smu_mode1_reset_is_support(smu);
 299		mutex_unlock(&adev->pm.mutex);
 300	}
 301
 302	return support_mode1_reset;
 303}
 304
 305int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev)
 306{
 307	struct smu_context *smu = adev->powerplay.pp_handle;
 308	int ret = -EOPNOTSUPP;
 309
 310	if (is_support_sw_smu(adev)) {
 311		mutex_lock(&adev->pm.mutex);
 312		ret = smu_mode1_reset(smu);
 313		mutex_unlock(&adev->pm.mutex);
 314	}
 315
 316	return ret;
 317}
 318
 319int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
 320				    enum PP_SMC_POWER_PROFILE type,
 321				    bool en)
 322{
 323	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 324	int ret = 0;
 325
 326	if (amdgpu_sriov_vf(adev))
 327		return 0;
 328
 329	if (pp_funcs && pp_funcs->switch_power_profile) {
 330		mutex_lock(&adev->pm.mutex);
 331		ret = pp_funcs->switch_power_profile(
 332			adev->powerplay.pp_handle, type, en);
 333		mutex_unlock(&adev->pm.mutex);
 334	}
 335
 336	return ret;
 337}
 338
 339int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
 340			       uint32_t pstate)
 341{
 342	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 343	int ret = 0;
 344
 345	if (pp_funcs && pp_funcs->set_xgmi_pstate) {
 346		mutex_lock(&adev->pm.mutex);
 347		ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
 348								pstate);
 349		mutex_unlock(&adev->pm.mutex);
 350	}
 351
 352	return ret;
 353}
 354
 355int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
 356			     uint32_t cstate)
 357{
 358	int ret = 0;
 359	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 360	void *pp_handle = adev->powerplay.pp_handle;
 361
 362	if (pp_funcs && pp_funcs->set_df_cstate) {
 363		mutex_lock(&adev->pm.mutex);
 364		ret = pp_funcs->set_df_cstate(pp_handle, cstate);
 365		mutex_unlock(&adev->pm.mutex);
 366	}
 367
 368	return ret;
 369}
 370
 371int amdgpu_dpm_get_xgmi_plpd_mode(struct amdgpu_device *adev, char **mode_desc)
 372{
 373	struct smu_context *smu = adev->powerplay.pp_handle;
 374	int mode = XGMI_PLPD_NONE;
 375
 376	if (is_support_sw_smu(adev)) {
 377		mode = smu->plpd_mode;
 378		if (mode_desc == NULL)
 379			return mode;
 380		switch (smu->plpd_mode) {
 381		case XGMI_PLPD_DISALLOW:
 382			*mode_desc = "disallow";
 383			break;
 384		case XGMI_PLPD_DEFAULT:
 385			*mode_desc = "default";
 386			break;
 387		case XGMI_PLPD_OPTIMIZED:
 388			*mode_desc = "optimized";
 389			break;
 390		case XGMI_PLPD_NONE:
 391		default:
 392			*mode_desc = "none";
 393			break;
 394		}
 395	}
 396
 397	return mode;
 398}
 399
 400int amdgpu_dpm_set_xgmi_plpd_mode(struct amdgpu_device *adev, int mode)
 401{
 402	struct smu_context *smu = adev->powerplay.pp_handle;
 403	int ret = -EOPNOTSUPP;
 404
 405	if (is_support_sw_smu(adev)) {
 406		mutex_lock(&adev->pm.mutex);
 407		ret = smu_set_xgmi_plpd_mode(smu, mode);
 408		mutex_unlock(&adev->pm.mutex);
 409	}
 410
 411	return ret;
 412}
 413
 414int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev)
 415{
 416	void *pp_handle = adev->powerplay.pp_handle;
 417	const struct amd_pm_funcs *pp_funcs =
 418			adev->powerplay.pp_funcs;
 419	int ret = 0;
 420
 421	if (pp_funcs && pp_funcs->enable_mgpu_fan_boost) {
 422		mutex_lock(&adev->pm.mutex);
 423		ret = pp_funcs->enable_mgpu_fan_boost(pp_handle);
 424		mutex_unlock(&adev->pm.mutex);
 425	}
 426
 427	return ret;
 428}
 429
 430int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev,
 431				      uint32_t msg_id)
 432{
 433	void *pp_handle = adev->powerplay.pp_handle;
 434	const struct amd_pm_funcs *pp_funcs =
 435			adev->powerplay.pp_funcs;
 436	int ret = 0;
 437
 438	if (pp_funcs && pp_funcs->set_clockgating_by_smu) {
 439		mutex_lock(&adev->pm.mutex);
 440		ret = pp_funcs->set_clockgating_by_smu(pp_handle,
 441						       msg_id);
 442		mutex_unlock(&adev->pm.mutex);
 443	}
 444
 445	return ret;
 446}
 447
 448int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev,
 449				  bool acquire)
 450{
 451	void *pp_handle = adev->powerplay.pp_handle;
 452	const struct amd_pm_funcs *pp_funcs =
 453			adev->powerplay.pp_funcs;
 454	int ret = -EOPNOTSUPP;
 455
 456	if (pp_funcs && pp_funcs->smu_i2c_bus_access) {
 457		mutex_lock(&adev->pm.mutex);
 458		ret = pp_funcs->smu_i2c_bus_access(pp_handle,
 459						   acquire);
 460		mutex_unlock(&adev->pm.mutex);
 461	}
 462
 463	return ret;
 464}
 465
 466void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
 467{
 468	if (adev->pm.dpm_enabled) {
 469		mutex_lock(&adev->pm.mutex);
 470		if (power_supply_is_system_supplied() > 0)
 471			adev->pm.ac_power = true;
 472		else
 473			adev->pm.ac_power = false;
 474
 475		if (adev->powerplay.pp_funcs &&
 476		    adev->powerplay.pp_funcs->enable_bapm)
 477			amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
 
 478
 479		if (is_support_sw_smu(adev))
 480			smu_set_ac_dc(adev->powerplay.pp_handle);
 481
 482		mutex_unlock(&adev->pm.mutex);
 483	}
 484}
 485
 486int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
 487			   void *data, uint32_t *size)
 488{
 489	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 490	int ret = -EINVAL;
 491
 492	if (!data || !size)
 493		return -EINVAL;
 494
 495	if (pp_funcs && pp_funcs->read_sensor) {
 496		mutex_lock(&adev->pm.mutex);
 497		ret = pp_funcs->read_sensor(adev->powerplay.pp_handle,
 498					    sensor,
 499					    data,
 500					    size);
 501		mutex_unlock(&adev->pm.mutex);
 502	}
 503
 504	return ret;
 505}
 506
 507int amdgpu_dpm_get_apu_thermal_limit(struct amdgpu_device *adev, uint32_t *limit)
 508{
 509	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 510	int ret = -EOPNOTSUPP;
 
 
 
 
 511
 512	if (pp_funcs && pp_funcs->get_apu_thermal_limit) {
 513		mutex_lock(&adev->pm.mutex);
 514		ret = pp_funcs->get_apu_thermal_limit(adev->powerplay.pp_handle, limit);
 515		mutex_unlock(&adev->pm.mutex);
 
 
 
 
 
 
 
 
 516	}
 
 
 
 
 
 
 
 517
 518	return ret;
 519}
 520
 521int amdgpu_dpm_set_apu_thermal_limit(struct amdgpu_device *adev, uint32_t limit)
 
 522{
 523	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 524	int ret = -EOPNOTSUPP;
 
 
 
 
 
 
 
 
 
 525
 526	if (pp_funcs && pp_funcs->set_apu_thermal_limit) {
 527		mutex_lock(&adev->pm.mutex);
 528		ret = pp_funcs->set_apu_thermal_limit(adev->powerplay.pp_handle, limit);
 529		mutex_unlock(&adev->pm.mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 530	}
 531
 532	return ret;
 533}
 534
 535void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev)
 536{
 537	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 538	int i;
 
 
 539
 
 540	if (!adev->pm.dpm_enabled)
 541		return;
 542
 543	if (!pp_funcs->pm_compute_clocks)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 544		return;
 545
 546	if (adev->mode_info.num_crtc)
 547		amdgpu_display_bandwidth_update(adev);
 548
 549	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
 550		struct amdgpu_ring *ring = adev->rings[i];
 551		if (ring && ring->sched.ready)
 552			amdgpu_fence_wait_empty(ring);
 553	}
 554
 555	mutex_lock(&adev->pm.mutex);
 556	pp_funcs->pm_compute_clocks(adev->powerplay.pp_handle);
 557	mutex_unlock(&adev->pm.mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 558}
 559
 560void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
 561{
 562	int ret = 0;
 563
 564	if (adev->family == AMDGPU_FAMILY_SI) {
 565		mutex_lock(&adev->pm.mutex);
 566		if (enable) {
 567			adev->pm.dpm.uvd_active = true;
 568			adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
 569		} else {
 570			adev->pm.dpm.uvd_active = false;
 571		}
 572		mutex_unlock(&adev->pm.mutex);
 573
 574		amdgpu_dpm_compute_clocks(adev);
 575		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 576	}
 577
 578	ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
 579	if (ret)
 580		DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
 581			  enable ? "enable" : "disable", ret);
 582}
 583
 584void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
 585{
 586	int ret = 0;
 587
 588	if (adev->family == AMDGPU_FAMILY_SI) {
 589		mutex_lock(&adev->pm.mutex);
 590		if (enable) {
 591			adev->pm.dpm.vce_active = true;
 592			/* XXX select vce level based on ring/task */
 593			adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
 594		} else {
 595			adev->pm.dpm.vce_active = false;
 596		}
 597		mutex_unlock(&adev->pm.mutex);
 598
 599		amdgpu_dpm_compute_clocks(adev);
 600		return;
 
 
 
 
 601	}
 602
 603	ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
 604	if (ret)
 605		DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
 606			  enable ? "enable" : "disable", ret);
 607}
 608
 609void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
 610{
 611	int ret = 0;
 
 
 
 
 
 
 612
 613	ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable);
 614	if (ret)
 615		DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
 616			  enable ? "enable" : "disable", ret);
 617}
 618
 619void amdgpu_dpm_enable_vpe(struct amdgpu_device *adev, bool enable)
 620{
 621	int ret = 0;
 622
 623	ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VPE, !enable);
 624	if (ret)
 625		DRM_ERROR("Dpm %s vpe failed, ret = %d.\n",
 626			  enable ? "enable" : "disable", ret);
 627}
 628
 629int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
 630{
 631	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 632	int r = 0;
 633
 634	if (!pp_funcs || !pp_funcs->load_firmware)
 635		return 0;
 636
 637	mutex_lock(&adev->pm.mutex);
 638	r = pp_funcs->load_firmware(adev->powerplay.pp_handle);
 639	if (r) {
 640		pr_err("smu firmware loading failed\n");
 641		goto out;
 642	}
 643
 644	if (smu_version)
 645		*smu_version = adev->pm.fw_version;
 646
 647out:
 648	mutex_unlock(&adev->pm.mutex);
 649	return r;
 650}
 651
 652int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable)
 653{
 654	int ret = 0;
 655
 656	if (is_support_sw_smu(adev)) {
 657		mutex_lock(&adev->pm.mutex);
 658		ret = smu_handle_passthrough_sbr(adev->powerplay.pp_handle,
 659						 enable);
 660		mutex_unlock(&adev->pm.mutex);
 661	}
 662
 663	return ret;
 664}
 665
 666int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size)
 667{
 668	struct smu_context *smu = adev->powerplay.pp_handle;
 669	int ret = 0;
 670
 671	if (!is_support_sw_smu(adev))
 672		return -EOPNOTSUPP;
 673
 674	mutex_lock(&adev->pm.mutex);
 675	ret = smu_send_hbm_bad_pages_num(smu, size);
 676	mutex_unlock(&adev->pm.mutex);
 677
 678	return ret;
 679}
 680
 681int amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device *adev, uint32_t size)
 682{
 683	struct smu_context *smu = adev->powerplay.pp_handle;
 684	int ret = 0;
 685
 686	if (!is_support_sw_smu(adev))
 687		return -EOPNOTSUPP;
 688
 689	mutex_lock(&adev->pm.mutex);
 690	ret = smu_send_hbm_bad_channel_flag(smu, size);
 691	mutex_unlock(&adev->pm.mutex);
 692
 693	return ret;
 694}
 695
 696int amdgpu_dpm_send_rma_reason(struct amdgpu_device *adev)
 697{
 698	struct smu_context *smu = adev->powerplay.pp_handle;
 699	int ret;
 700
 701	if (!is_support_sw_smu(adev))
 702		return -EOPNOTSUPP;
 703
 704	mutex_lock(&adev->pm.mutex);
 705	ret = smu_send_rma_reason(smu);
 706	mutex_unlock(&adev->pm.mutex);
 707
 708	return ret;
 709}
 710
 711int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev,
 712				  enum pp_clock_type type,
 713				  uint32_t *min,
 714				  uint32_t *max)
 715{
 716	int ret = 0;
 717
 718	if (type != PP_SCLK)
 719		return -EINVAL;
 720
 721	if (!is_support_sw_smu(adev))
 722		return -EOPNOTSUPP;
 723
 724	mutex_lock(&adev->pm.mutex);
 725	ret = smu_get_dpm_freq_range(adev->powerplay.pp_handle,
 726				     SMU_SCLK,
 727				     min,
 728				     max);
 729	mutex_unlock(&adev->pm.mutex);
 730
 731	return ret;
 732}
 733
 734int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev,
 735				   enum pp_clock_type type,
 736				   uint32_t min,
 737				   uint32_t max)
 738{
 739	struct smu_context *smu = adev->powerplay.pp_handle;
 740	int ret = 0;
 741
 742	if (type != PP_SCLK)
 743		return -EINVAL;
 744
 745	if (!is_support_sw_smu(adev))
 746		return -EOPNOTSUPP;
 747
 748	mutex_lock(&adev->pm.mutex);
 749	ret = smu_set_soft_freq_range(smu,
 750				      SMU_SCLK,
 751				      min,
 752				      max);
 753	mutex_unlock(&adev->pm.mutex);
 754
 755	return ret;
 756}
 757
 758int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev)
 759{
 760	struct smu_context *smu = adev->powerplay.pp_handle;
 761	int ret = 0;
 762
 763	if (!is_support_sw_smu(adev))
 764		return 0;
 765
 766	mutex_lock(&adev->pm.mutex);
 767	ret = smu_write_watermarks_table(smu);
 768	mutex_unlock(&adev->pm.mutex);
 769
 770	return ret;
 771}
 772
 773int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev,
 774			      enum smu_event_type event,
 775			      uint64_t event_arg)
 776{
 777	struct smu_context *smu = adev->powerplay.pp_handle;
 778	int ret = 0;
 779
 780	if (!is_support_sw_smu(adev))
 781		return -EOPNOTSUPP;
 782
 783	mutex_lock(&adev->pm.mutex);
 784	ret = smu_wait_for_event(smu, event, event_arg);
 785	mutex_unlock(&adev->pm.mutex);
 786
 787	return ret;
 788}
 789
 790int amdgpu_dpm_set_residency_gfxoff(struct amdgpu_device *adev, bool value)
 791{
 792	struct smu_context *smu = adev->powerplay.pp_handle;
 793	int ret = 0;
 794
 795	if (!is_support_sw_smu(adev))
 796		return -EOPNOTSUPP;
 797
 798	mutex_lock(&adev->pm.mutex);
 799	ret = smu_set_residency_gfxoff(smu, value);
 800	mutex_unlock(&adev->pm.mutex);
 801
 802	return ret;
 803}
 804
 805int amdgpu_dpm_get_residency_gfxoff(struct amdgpu_device *adev, u32 *value)
 806{
 807	struct smu_context *smu = adev->powerplay.pp_handle;
 808	int ret = 0;
 809
 810	if (!is_support_sw_smu(adev))
 811		return -EOPNOTSUPP;
 812
 813	mutex_lock(&adev->pm.mutex);
 814	ret = smu_get_residency_gfxoff(smu, value);
 815	mutex_unlock(&adev->pm.mutex);
 816
 817	return ret;
 818}
 819
 820int amdgpu_dpm_get_entrycount_gfxoff(struct amdgpu_device *adev, u64 *value)
 821{
 822	struct smu_context *smu = adev->powerplay.pp_handle;
 823	int ret = 0;
 824
 825	if (!is_support_sw_smu(adev))
 826		return -EOPNOTSUPP;
 827
 828	mutex_lock(&adev->pm.mutex);
 829	ret = smu_get_entrycount_gfxoff(smu, value);
 830	mutex_unlock(&adev->pm.mutex);
 831
 832	return ret;
 833}
 834
 835int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value)
 836{
 837	struct smu_context *smu = adev->powerplay.pp_handle;
 838	int ret = 0;
 839
 840	if (!is_support_sw_smu(adev))
 841		return -EOPNOTSUPP;
 842
 843	mutex_lock(&adev->pm.mutex);
 844	ret = smu_get_status_gfxoff(smu, value);
 845	mutex_unlock(&adev->pm.mutex);
 846
 847	return ret;
 848}
 849
 850uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev)
 851{
 852	struct smu_context *smu = adev->powerplay.pp_handle;
 853
 854	if (!is_support_sw_smu(adev))
 855		return 0;
 856
 857	return atomic64_read(&smu->throttle_int_counter);
 858}
 859
 860/* amdgpu_dpm_gfx_state_change - Handle gfx power state change set
 861 * @adev: amdgpu_device pointer
 862 * @state: gfx power state(1 -sGpuChangeState_D0Entry and 2 -sGpuChangeState_D3Entry)
 863 *
 864 */
 865void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev,
 866				 enum gfx_change_state state)
 867{
 868	mutex_lock(&adev->pm.mutex);
 869	if (adev->powerplay.pp_funcs &&
 870	    adev->powerplay.pp_funcs->gfx_state_change_set)
 871		((adev)->powerplay.pp_funcs->gfx_state_change_set(
 872			(adev)->powerplay.pp_handle, state));
 873	mutex_unlock(&adev->pm.mutex);
 874}
 875
 876int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev,
 877			    void *umc_ecc)
 878{
 879	struct smu_context *smu = adev->powerplay.pp_handle;
 880	int ret = 0;
 881
 882	if (!is_support_sw_smu(adev))
 883		return -EOPNOTSUPP;
 884
 885	mutex_lock(&adev->pm.mutex);
 886	ret = smu_get_ecc_info(smu, umc_ecc);
 887	mutex_unlock(&adev->pm.mutex);
 888
 889	return ret;
 890}
 891
 892struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev,
 893						     uint32_t idx)
 894{
 895	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 896	struct amd_vce_state *vstate = NULL;
 897
 898	if (!pp_funcs->get_vce_clock_state)
 899		return NULL;
 900
 901	mutex_lock(&adev->pm.mutex);
 902	vstate = pp_funcs->get_vce_clock_state(adev->powerplay.pp_handle,
 903					       idx);
 904	mutex_unlock(&adev->pm.mutex);
 905
 906	return vstate;
 907}
 908
 909void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev,
 910					enum amd_pm_state_type *state)
 911{
 912	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 913
 914	mutex_lock(&adev->pm.mutex);
 915
 916	if (!pp_funcs->get_current_power_state) {
 917		*state = adev->pm.dpm.user_state;
 918		goto out;
 919	}
 920
 921	*state = pp_funcs->get_current_power_state(adev->powerplay.pp_handle);
 922	if (*state < POWER_STATE_TYPE_DEFAULT ||
 923	    *state > POWER_STATE_TYPE_INTERNAL_3DPERF)
 924		*state = adev->pm.dpm.user_state;
 925
 926out:
 927	mutex_unlock(&adev->pm.mutex);
 928}
 929
 930void amdgpu_dpm_set_power_state(struct amdgpu_device *adev,
 931				enum amd_pm_state_type state)
 932{
 933	mutex_lock(&adev->pm.mutex);
 934	adev->pm.dpm.user_state = state;
 935	mutex_unlock(&adev->pm.mutex);
 936
 937	if (is_support_sw_smu(adev))
 938		return;
 939
 940	if (amdgpu_dpm_dispatch_task(adev,
 941				     AMD_PP_TASK_ENABLE_USER_STATE,
 942				     &state) == -EOPNOTSUPP)
 943		amdgpu_dpm_compute_clocks(adev);
 944}
 945
 946enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev)
 947{
 948	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 949	enum amd_dpm_forced_level level;
 950
 951	if (!pp_funcs)
 952		return AMD_DPM_FORCED_LEVEL_AUTO;
 953
 954	mutex_lock(&adev->pm.mutex);
 955	if (pp_funcs->get_performance_level)
 956		level = pp_funcs->get_performance_level(adev->powerplay.pp_handle);
 957	else
 958		level = adev->pm.dpm.forced_level;
 959	mutex_unlock(&adev->pm.mutex);
 960
 961	return level;
 962}
 963
 964int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
 965				       enum amd_dpm_forced_level level)
 966{
 967	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 968	enum amd_dpm_forced_level current_level;
 969	uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
 970					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
 971					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
 972					AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
 973
 974	if (!pp_funcs || !pp_funcs->force_performance_level)
 975		return 0;
 976
 977	if (adev->pm.dpm.thermal_active)
 978		return -EINVAL;
 979
 980	current_level = amdgpu_dpm_get_performance_level(adev);
 981	if (current_level == level)
 982		return 0;
 983
 984	if (adev->asic_type == CHIP_RAVEN) {
 985		if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) {
 986			if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
 987			    level == AMD_DPM_FORCED_LEVEL_MANUAL)
 988				amdgpu_gfx_off_ctrl(adev, false);
 989			else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL &&
 990				 level != AMD_DPM_FORCED_LEVEL_MANUAL)
 991				amdgpu_gfx_off_ctrl(adev, true);
 992		}
 993	}
 994
 995	if (!(current_level & profile_mode_mask) &&
 996	    (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT))
 997		return -EINVAL;
 998
 999	if (!(current_level & profile_mode_mask) &&
1000	      (level & profile_mode_mask)) {
1001		/* enter UMD Pstate */
1002		amdgpu_device_ip_set_powergating_state(adev,
1003						       AMD_IP_BLOCK_TYPE_GFX,
1004						       AMD_PG_STATE_UNGATE);
1005		amdgpu_device_ip_set_clockgating_state(adev,
1006						       AMD_IP_BLOCK_TYPE_GFX,
1007						       AMD_CG_STATE_UNGATE);
1008	} else if ((current_level & profile_mode_mask) &&
1009		    !(level & profile_mode_mask)) {
1010		/* exit UMD Pstate */
1011		amdgpu_device_ip_set_clockgating_state(adev,
1012						       AMD_IP_BLOCK_TYPE_GFX,
1013						       AMD_CG_STATE_GATE);
1014		amdgpu_device_ip_set_powergating_state(adev,
1015						       AMD_IP_BLOCK_TYPE_GFX,
1016						       AMD_PG_STATE_GATE);
1017	}
1018
1019	mutex_lock(&adev->pm.mutex);
1020
1021	if (pp_funcs->force_performance_level(adev->powerplay.pp_handle,
1022					      level)) {
1023		mutex_unlock(&adev->pm.mutex);
1024		return -EINVAL;
1025	}
1026
1027	adev->pm.dpm.forced_level = level;
1028
1029	mutex_unlock(&adev->pm.mutex);
1030
1031	return 0;
1032}
1033
1034int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev,
1035				 struct pp_states_info *states)
1036{
1037	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1038	int ret = 0;
1039
1040	if (!pp_funcs->get_pp_num_states)
1041		return -EOPNOTSUPP;
1042
1043	mutex_lock(&adev->pm.mutex);
1044	ret = pp_funcs->get_pp_num_states(adev->powerplay.pp_handle,
1045					  states);
1046	mutex_unlock(&adev->pm.mutex);
1047
1048	return ret;
1049}
1050
1051int amdgpu_dpm_dispatch_task(struct amdgpu_device *adev,
1052			      enum amd_pp_task task_id,
1053			      enum amd_pm_state_type *user_state)
1054{
1055	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1056	int ret = 0;
1057
1058	if (!pp_funcs->dispatch_tasks)
1059		return -EOPNOTSUPP;
1060
1061	mutex_lock(&adev->pm.mutex);
1062	ret = pp_funcs->dispatch_tasks(adev->powerplay.pp_handle,
1063				       task_id,
1064				       user_state);
1065	mutex_unlock(&adev->pm.mutex);
1066
1067	return ret;
1068}
1069
1070int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table)
1071{
1072	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1073	int ret = 0;
1074
1075	if (!pp_funcs->get_pp_table)
1076		return 0;
1077
1078	mutex_lock(&adev->pm.mutex);
1079	ret = pp_funcs->get_pp_table(adev->powerplay.pp_handle,
1080				     table);
1081	mutex_unlock(&adev->pm.mutex);
1082
1083	return ret;
1084}
1085
1086int amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device *adev,
1087				      uint32_t type,
1088				      long *input,
1089				      uint32_t size)
1090{
1091	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1092	int ret = 0;
1093
1094	if (!pp_funcs->set_fine_grain_clk_vol)
1095		return 0;
1096
1097	mutex_lock(&adev->pm.mutex);
1098	ret = pp_funcs->set_fine_grain_clk_vol(adev->powerplay.pp_handle,
1099					       type,
1100					       input,
1101					       size);
1102	mutex_unlock(&adev->pm.mutex);
1103
1104	return ret;
1105}
1106
1107int amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device *adev,
1108				  uint32_t type,
1109				  long *input,
1110				  uint32_t size)
1111{
1112	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1113	int ret = 0;
1114
1115	if (!pp_funcs->odn_edit_dpm_table)
1116		return 0;
1117
1118	mutex_lock(&adev->pm.mutex);
1119	ret = pp_funcs->odn_edit_dpm_table(adev->powerplay.pp_handle,
1120					   type,
1121					   input,
1122					   size);
1123	mutex_unlock(&adev->pm.mutex);
1124
1125	return ret;
1126}
1127
1128int amdgpu_dpm_print_clock_levels(struct amdgpu_device *adev,
1129				  enum pp_clock_type type,
1130				  char *buf)
1131{
1132	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1133	int ret = 0;
1134
1135	if (!pp_funcs->print_clock_levels)
1136		return 0;
1137
1138	mutex_lock(&adev->pm.mutex);
1139	ret = pp_funcs->print_clock_levels(adev->powerplay.pp_handle,
1140					   type,
1141					   buf);
1142	mutex_unlock(&adev->pm.mutex);
1143
1144	return ret;
1145}
1146
1147int amdgpu_dpm_emit_clock_levels(struct amdgpu_device *adev,
1148				  enum pp_clock_type type,
1149				  char *buf,
1150				  int *offset)
1151{
1152	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1153	int ret = 0;
1154
1155	if (!pp_funcs->emit_clock_levels)
1156		return -ENOENT;
1157
1158	mutex_lock(&adev->pm.mutex);
1159	ret = pp_funcs->emit_clock_levels(adev->powerplay.pp_handle,
1160					   type,
1161					   buf,
1162					   offset);
1163	mutex_unlock(&adev->pm.mutex);
1164
1165	return ret;
1166}
1167
1168int amdgpu_dpm_set_ppfeature_status(struct amdgpu_device *adev,
1169				    uint64_t ppfeature_masks)
1170{
1171	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1172	int ret = 0;
1173
1174	if (!pp_funcs->set_ppfeature_status)
1175		return 0;
1176
1177	mutex_lock(&adev->pm.mutex);
1178	ret = pp_funcs->set_ppfeature_status(adev->powerplay.pp_handle,
1179					     ppfeature_masks);
1180	mutex_unlock(&adev->pm.mutex);
1181
1182	return ret;
1183}
1184
1185int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf)
1186{
1187	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1188	int ret = 0;
1189
1190	if (!pp_funcs->get_ppfeature_status)
1191		return 0;
1192
1193	mutex_lock(&adev->pm.mutex);
1194	ret = pp_funcs->get_ppfeature_status(adev->powerplay.pp_handle,
1195					     buf);
1196	mutex_unlock(&adev->pm.mutex);
1197
1198	return ret;
1199}
1200
1201int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev,
1202				 enum pp_clock_type type,
1203				 uint32_t mask)
1204{
1205	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1206	int ret = 0;
1207
1208	if (!pp_funcs->force_clock_level)
1209		return 0;
1210
1211	mutex_lock(&adev->pm.mutex);
1212	ret = pp_funcs->force_clock_level(adev->powerplay.pp_handle,
1213					  type,
1214					  mask);
1215	mutex_unlock(&adev->pm.mutex);
1216
1217	return ret;
1218}
1219
1220int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev)
1221{
1222	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1223	int ret = 0;
1224
1225	if (!pp_funcs->get_sclk_od)
1226		return -EOPNOTSUPP;
1227
1228	mutex_lock(&adev->pm.mutex);
1229	ret = pp_funcs->get_sclk_od(adev->powerplay.pp_handle);
1230	mutex_unlock(&adev->pm.mutex);
1231
1232	return ret;
1233}
1234
1235int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
1236{
1237	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1238
1239	if (is_support_sw_smu(adev))
1240		return -EOPNOTSUPP;
1241
1242	mutex_lock(&adev->pm.mutex);
1243	if (pp_funcs->set_sclk_od)
1244		pp_funcs->set_sclk_od(adev->powerplay.pp_handle, value);
1245	mutex_unlock(&adev->pm.mutex);
1246
1247	if (amdgpu_dpm_dispatch_task(adev,
1248				     AMD_PP_TASK_READJUST_POWER_STATE,
1249				     NULL) == -EOPNOTSUPP) {
1250		adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1251		amdgpu_dpm_compute_clocks(adev);
1252	}
1253
1254	return 0;
1255}
1256
1257int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev)
1258{
1259	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1260	int ret = 0;
1261
1262	if (!pp_funcs->get_mclk_od)
1263		return -EOPNOTSUPP;
1264
1265	mutex_lock(&adev->pm.mutex);
1266	ret = pp_funcs->get_mclk_od(adev->powerplay.pp_handle);
1267	mutex_unlock(&adev->pm.mutex);
1268
1269	return ret;
1270}
1271
1272int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
1273{
1274	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1275
1276	if (is_support_sw_smu(adev))
1277		return -EOPNOTSUPP;
1278
1279	mutex_lock(&adev->pm.mutex);
1280	if (pp_funcs->set_mclk_od)
1281		pp_funcs->set_mclk_od(adev->powerplay.pp_handle, value);
1282	mutex_unlock(&adev->pm.mutex);
1283
1284	if (amdgpu_dpm_dispatch_task(adev,
1285				     AMD_PP_TASK_READJUST_POWER_STATE,
1286				     NULL) == -EOPNOTSUPP) {
1287		adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1288		amdgpu_dpm_compute_clocks(adev);
1289	}
1290
1291	return 0;
1292}
1293
1294int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev,
1295				      char *buf)
1296{
1297	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1298	int ret = 0;
1299
1300	if (!pp_funcs->get_power_profile_mode)
1301		return -EOPNOTSUPP;
1302
1303	mutex_lock(&adev->pm.mutex);
1304	ret = pp_funcs->get_power_profile_mode(adev->powerplay.pp_handle,
1305					       buf);
1306	mutex_unlock(&adev->pm.mutex);
1307
1308	return ret;
1309}
1310
1311int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev,
1312				      long *input, uint32_t size)
1313{
1314	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1315	int ret = 0;
1316
1317	if (!pp_funcs->set_power_profile_mode)
1318		return 0;
1319
1320	mutex_lock(&adev->pm.mutex);
1321	ret = pp_funcs->set_power_profile_mode(adev->powerplay.pp_handle,
1322					       input,
1323					       size);
1324	mutex_unlock(&adev->pm.mutex);
1325
1326	return ret;
1327}
1328
1329int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table)
1330{
1331	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1332	int ret = 0;
1333
1334	if (!pp_funcs->get_gpu_metrics)
1335		return 0;
1336
1337	mutex_lock(&adev->pm.mutex);
1338	ret = pp_funcs->get_gpu_metrics(adev->powerplay.pp_handle,
1339					table);
1340	mutex_unlock(&adev->pm.mutex);
1341
1342	return ret;
1343}
1344
1345ssize_t amdgpu_dpm_get_pm_metrics(struct amdgpu_device *adev, void *pm_metrics,
1346				  size_t size)
1347{
1348	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1349	int ret = 0;
1350
1351	if (!pp_funcs->get_pm_metrics)
1352		return -EOPNOTSUPP;
1353
1354	mutex_lock(&adev->pm.mutex);
1355	ret = pp_funcs->get_pm_metrics(adev->powerplay.pp_handle, pm_metrics,
1356				       size);
1357	mutex_unlock(&adev->pm.mutex);
1358
1359	return ret;
1360}
1361
1362int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev,
1363				    uint32_t *fan_mode)
1364{
1365	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1366	int ret = 0;
1367
1368	if (!pp_funcs->get_fan_control_mode)
1369		return -EOPNOTSUPP;
1370
1371	mutex_lock(&adev->pm.mutex);
1372	ret = pp_funcs->get_fan_control_mode(adev->powerplay.pp_handle,
1373					     fan_mode);
1374	mutex_unlock(&adev->pm.mutex);
1375
1376	return ret;
1377}
1378
1379int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev,
1380				 uint32_t speed)
1381{
1382	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1383	int ret = 0;
1384
1385	if (!pp_funcs->set_fan_speed_pwm)
1386		return -EOPNOTSUPP;
1387
1388	mutex_lock(&adev->pm.mutex);
1389	ret = pp_funcs->set_fan_speed_pwm(adev->powerplay.pp_handle,
1390					  speed);
1391	mutex_unlock(&adev->pm.mutex);
1392
1393	return ret;
1394}
1395
1396int amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device *adev,
1397				 uint32_t *speed)
1398{
1399	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1400	int ret = 0;
1401
1402	if (!pp_funcs->get_fan_speed_pwm)
1403		return -EOPNOTSUPP;
1404
1405	mutex_lock(&adev->pm.mutex);
1406	ret = pp_funcs->get_fan_speed_pwm(adev->powerplay.pp_handle,
1407					  speed);
1408	mutex_unlock(&adev->pm.mutex);
1409
1410	return ret;
1411}
1412
1413int amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device *adev,
1414				 uint32_t *speed)
1415{
1416	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1417	int ret = 0;
1418
1419	if (!pp_funcs->get_fan_speed_rpm)
1420		return -EOPNOTSUPP;
1421
1422	mutex_lock(&adev->pm.mutex);
1423	ret = pp_funcs->get_fan_speed_rpm(adev->powerplay.pp_handle,
1424					  speed);
1425	mutex_unlock(&adev->pm.mutex);
1426
1427	return ret;
1428}
1429
1430int amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device *adev,
1431				 uint32_t speed)
1432{
1433	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1434	int ret = 0;
1435
1436	if (!pp_funcs->set_fan_speed_rpm)
1437		return -EOPNOTSUPP;
1438
1439	mutex_lock(&adev->pm.mutex);
1440	ret = pp_funcs->set_fan_speed_rpm(adev->powerplay.pp_handle,
1441					  speed);
1442	mutex_unlock(&adev->pm.mutex);
1443
1444	return ret;
1445}
1446
1447int amdgpu_dpm_set_fan_control_mode(struct amdgpu_device *adev,
1448				    uint32_t mode)
1449{
1450	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1451	int ret = 0;
1452
1453	if (!pp_funcs->set_fan_control_mode)
1454		return -EOPNOTSUPP;
1455
1456	mutex_lock(&adev->pm.mutex);
1457	ret = pp_funcs->set_fan_control_mode(adev->powerplay.pp_handle,
1458					     mode);
1459	mutex_unlock(&adev->pm.mutex);
1460
1461	return ret;
1462}
1463
1464int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev,
1465			       uint32_t *limit,
1466			       enum pp_power_limit_level pp_limit_level,
1467			       enum pp_power_type power_type)
1468{
1469	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1470	int ret = 0;
1471
1472	if (!pp_funcs->get_power_limit)
1473		return -ENODATA;
1474
1475	mutex_lock(&adev->pm.mutex);
1476	ret = pp_funcs->get_power_limit(adev->powerplay.pp_handle,
1477					limit,
1478					pp_limit_level,
1479					power_type);
1480	mutex_unlock(&adev->pm.mutex);
1481
1482	return ret;
1483}
1484
1485int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev,
1486			       uint32_t limit)
1487{
1488	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1489	int ret = 0;
1490
1491	if (!pp_funcs->set_power_limit)
1492		return -EINVAL;
1493
1494	mutex_lock(&adev->pm.mutex);
1495	ret = pp_funcs->set_power_limit(adev->powerplay.pp_handle,
1496					limit);
1497	mutex_unlock(&adev->pm.mutex);
1498
1499	return ret;
1500}
1501
1502int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev)
1503{
1504	bool cclk_dpm_supported = false;
1505
1506	if (!is_support_sw_smu(adev))
1507		return false;
1508
1509	mutex_lock(&adev->pm.mutex);
1510	cclk_dpm_supported = is_support_cclk_dpm(adev);
1511	mutex_unlock(&adev->pm.mutex);
1512
1513	return (int)cclk_dpm_supported;
1514}
1515
1516int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
1517						       struct seq_file *m)
1518{
1519	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1520
1521	if (!pp_funcs->debugfs_print_current_performance_level)
1522		return -EOPNOTSUPP;
1523
1524	mutex_lock(&adev->pm.mutex);
1525	pp_funcs->debugfs_print_current_performance_level(adev->powerplay.pp_handle,
1526							  m);
1527	mutex_unlock(&adev->pm.mutex);
1528
1529	return 0;
1530}
1531
1532int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev,
1533				       void **addr,
1534				       size_t *size)
1535{
1536	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1537	int ret = 0;
1538
1539	if (!pp_funcs->get_smu_prv_buf_details)
1540		return -ENOSYS;
1541
1542	mutex_lock(&adev->pm.mutex);
1543	ret = pp_funcs->get_smu_prv_buf_details(adev->powerplay.pp_handle,
1544						addr,
1545						size);
1546	mutex_unlock(&adev->pm.mutex);
1547
1548	return ret;
1549}
1550
1551int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev)
1552{
1553	if (is_support_sw_smu(adev)) {
1554		struct smu_context *smu = adev->powerplay.pp_handle;
1555
1556		return (smu->od_enabled || smu->is_apu);
1557	} else {
1558		struct pp_hwmgr *hwmgr;
1559
1560		/*
1561		 * dpm on some legacy asics don't carry od_enabled member
1562		 * as its pp_handle is casted directly from adev.
1563		 */
1564		if (amdgpu_dpm_is_legacy_dpm(adev))
1565			return false;
1566
1567		hwmgr = (struct pp_hwmgr *)adev->powerplay.pp_handle;
1568
1569		return hwmgr->od_enabled;
1570	}
1571}
1572
1573int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev,
1574			    const char *buf,
1575			    size_t size)
1576{
1577	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1578	int ret = 0;
1579
1580	if (!pp_funcs->set_pp_table)
1581		return -EOPNOTSUPP;
1582
1583	mutex_lock(&adev->pm.mutex);
1584	ret = pp_funcs->set_pp_table(adev->powerplay.pp_handle,
1585				     buf,
1586				     size);
1587	mutex_unlock(&adev->pm.mutex);
1588
1589	return ret;
1590}
1591
1592int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev)
1593{
1594	struct smu_context *smu = adev->powerplay.pp_handle;
1595
1596	if (!is_support_sw_smu(adev))
1597		return INT_MAX;
1598
1599	return smu->cpu_core_num;
1600}
1601
1602void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev)
1603{
1604	if (!is_support_sw_smu(adev))
1605		return;
1606
1607	amdgpu_smu_stb_debug_fs_init(adev);
1608}
1609
1610int amdgpu_dpm_display_configuration_change(struct amdgpu_device *adev,
1611					    const struct amd_pp_display_configuration *input)
1612{
1613	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1614	int ret = 0;
1615
1616	if (!pp_funcs->display_configuration_change)
1617		return 0;
1618
1619	mutex_lock(&adev->pm.mutex);
1620	ret = pp_funcs->display_configuration_change(adev->powerplay.pp_handle,
1621						     input);
1622	mutex_unlock(&adev->pm.mutex);
1623
1624	return ret;
1625}
1626
1627int amdgpu_dpm_get_clock_by_type(struct amdgpu_device *adev,
1628				 enum amd_pp_clock_type type,
1629				 struct amd_pp_clocks *clocks)
1630{
1631	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1632	int ret = 0;
1633
1634	if (!pp_funcs->get_clock_by_type)
1635		return 0;
1636
1637	mutex_lock(&adev->pm.mutex);
1638	ret = pp_funcs->get_clock_by_type(adev->powerplay.pp_handle,
1639					  type,
1640					  clocks);
1641	mutex_unlock(&adev->pm.mutex);
1642
1643	return ret;
1644}
1645
1646int amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device *adev,
1647						struct amd_pp_simple_clock_info *clocks)
1648{
1649	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1650	int ret = 0;
1651
1652	if (!pp_funcs->get_display_mode_validation_clocks)
1653		return 0;
1654
1655	mutex_lock(&adev->pm.mutex);
1656	ret = pp_funcs->get_display_mode_validation_clocks(adev->powerplay.pp_handle,
1657							   clocks);
1658	mutex_unlock(&adev->pm.mutex);
1659
1660	return ret;
1661}
1662
1663int amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device *adev,
1664					      enum amd_pp_clock_type type,
1665					      struct pp_clock_levels_with_latency *clocks)
1666{
1667	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1668	int ret = 0;
1669
1670	if (!pp_funcs->get_clock_by_type_with_latency)
1671		return 0;
1672
1673	mutex_lock(&adev->pm.mutex);
1674	ret = pp_funcs->get_clock_by_type_with_latency(adev->powerplay.pp_handle,
1675						       type,
1676						       clocks);
1677	mutex_unlock(&adev->pm.mutex);
1678
1679	return ret;
1680}
1681
1682int amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device *adev,
1683					      enum amd_pp_clock_type type,
1684					      struct pp_clock_levels_with_voltage *clocks)
1685{
1686	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1687	int ret = 0;
1688
1689	if (!pp_funcs->get_clock_by_type_with_voltage)
1690		return 0;
1691
1692	mutex_lock(&adev->pm.mutex);
1693	ret = pp_funcs->get_clock_by_type_with_voltage(adev->powerplay.pp_handle,
1694						       type,
1695						       clocks);
1696	mutex_unlock(&adev->pm.mutex);
1697
1698	return ret;
1699}
1700
1701int amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device *adev,
1702					       void *clock_ranges)
1703{
1704	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1705	int ret = 0;
1706
1707	if (!pp_funcs->set_watermarks_for_clocks_ranges)
1708		return -EOPNOTSUPP;
1709
1710	mutex_lock(&adev->pm.mutex);
1711	ret = pp_funcs->set_watermarks_for_clocks_ranges(adev->powerplay.pp_handle,
1712							 clock_ranges);
1713	mutex_unlock(&adev->pm.mutex);
1714
1715	return ret;
1716}
1717
1718int amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device *adev,
1719					     struct pp_display_clock_request *clock)
1720{
1721	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1722	int ret = 0;
1723
1724	if (!pp_funcs->display_clock_voltage_request)
1725		return -EOPNOTSUPP;
1726
1727	mutex_lock(&adev->pm.mutex);
1728	ret = pp_funcs->display_clock_voltage_request(adev->powerplay.pp_handle,
1729						      clock);
1730	mutex_unlock(&adev->pm.mutex);
1731
1732	return ret;
1733}
1734
1735int amdgpu_dpm_get_current_clocks(struct amdgpu_device *adev,
1736				  struct amd_pp_clock_info *clocks)
1737{
1738	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1739	int ret = 0;
1740
1741	if (!pp_funcs->get_current_clocks)
1742		return -EOPNOTSUPP;
1743
1744	mutex_lock(&adev->pm.mutex);
1745	ret = pp_funcs->get_current_clocks(adev->powerplay.pp_handle,
1746					   clocks);
1747	mutex_unlock(&adev->pm.mutex);
1748
1749	return ret;
1750}
1751
1752void amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device *adev)
1753{
1754	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1755
1756	if (!pp_funcs->notify_smu_enable_pwe)
1757		return;
1758
1759	mutex_lock(&adev->pm.mutex);
1760	pp_funcs->notify_smu_enable_pwe(adev->powerplay.pp_handle);
1761	mutex_unlock(&adev->pm.mutex);
1762}
1763
1764int amdgpu_dpm_set_active_display_count(struct amdgpu_device *adev,
1765					uint32_t count)
1766{
1767	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1768	int ret = 0;
1769
1770	if (!pp_funcs->set_active_display_count)
1771		return -EOPNOTSUPP;
1772
1773	mutex_lock(&adev->pm.mutex);
1774	ret = pp_funcs->set_active_display_count(adev->powerplay.pp_handle,
1775						 count);
1776	mutex_unlock(&adev->pm.mutex);
1777
1778	return ret;
1779}
1780
1781int amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device *adev,
1782					  uint32_t clock)
1783{
1784	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1785	int ret = 0;
1786
1787	if (!pp_funcs->set_min_deep_sleep_dcefclk)
1788		return -EOPNOTSUPP;
1789
1790	mutex_lock(&adev->pm.mutex);
1791	ret = pp_funcs->set_min_deep_sleep_dcefclk(adev->powerplay.pp_handle,
1792						   clock);
1793	mutex_unlock(&adev->pm.mutex);
1794
1795	return ret;
1796}
1797
1798void amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device *adev,
1799					     uint32_t clock)
1800{
1801	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1802
1803	if (!pp_funcs->set_hard_min_dcefclk_by_freq)
1804		return;
1805
1806	mutex_lock(&adev->pm.mutex);
1807	pp_funcs->set_hard_min_dcefclk_by_freq(adev->powerplay.pp_handle,
1808					       clock);
1809	mutex_unlock(&adev->pm.mutex);
1810}
1811
1812void amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device *adev,
1813					  uint32_t clock)
1814{
1815	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1816
1817	if (!pp_funcs->set_hard_min_fclk_by_freq)
1818		return;
1819
1820	mutex_lock(&adev->pm.mutex);
1821	pp_funcs->set_hard_min_fclk_by_freq(adev->powerplay.pp_handle,
1822					    clock);
1823	mutex_unlock(&adev->pm.mutex);
1824}
1825
1826int amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device *adev,
1827						   bool disable_memory_clock_switch)
1828{
1829	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1830	int ret = 0;
1831
1832	if (!pp_funcs->display_disable_memory_clock_switch)
1833		return 0;
1834
1835	mutex_lock(&adev->pm.mutex);
1836	ret = pp_funcs->display_disable_memory_clock_switch(adev->powerplay.pp_handle,
1837							    disable_memory_clock_switch);
1838	mutex_unlock(&adev->pm.mutex);
1839
1840	return ret;
1841}
1842
1843int amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device *adev,
1844						struct pp_smu_nv_clock_table *max_clocks)
1845{
1846	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1847	int ret = 0;
1848
1849	if (!pp_funcs->get_max_sustainable_clocks_by_dc)
1850		return -EOPNOTSUPP;
1851
1852	mutex_lock(&adev->pm.mutex);
1853	ret = pp_funcs->get_max_sustainable_clocks_by_dc(adev->powerplay.pp_handle,
1854							 max_clocks);
1855	mutex_unlock(&adev->pm.mutex);
1856
1857	return ret;
1858}
1859
1860enum pp_smu_status amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev,
1861						  unsigned int *clock_values_in_khz,
1862						  unsigned int *num_states)
1863{
1864	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1865	int ret = 0;
1866
1867	if (!pp_funcs->get_uclk_dpm_states)
1868		return -EOPNOTSUPP;
1869
1870	mutex_lock(&adev->pm.mutex);
1871	ret = pp_funcs->get_uclk_dpm_states(adev->powerplay.pp_handle,
1872					    clock_values_in_khz,
1873					    num_states);
1874	mutex_unlock(&adev->pm.mutex);
1875
1876	return ret;
1877}
1878
1879int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev,
1880				   struct dpm_clocks *clock_table)
1881{
1882	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1883	int ret = 0;
1884
1885	if (!pp_funcs->get_dpm_clock_table)
1886		return -EOPNOTSUPP;
1887
1888	mutex_lock(&adev->pm.mutex);
1889	ret = pp_funcs->get_dpm_clock_table(adev->powerplay.pp_handle,
1890					    clock_table);
1891	mutex_unlock(&adev->pm.mutex);
1892
1893	return ret;
1894}
v5.14.15
   1/*
   2 * Copyright 2011 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: Alex Deucher
  23 */
  24
  25#include "amdgpu.h"
  26#include "amdgpu_atombios.h"
  27#include "amdgpu_i2c.h"
  28#include "amdgpu_dpm.h"
  29#include "atom.h"
  30#include "amd_pcie.h"
  31#include "amdgpu_display.h"
  32#include "hwmgr.h"
  33#include <linux/power_supply.h>
 
  34
  35#define WIDTH_4K 3840
 
  36
  37void amdgpu_dpm_print_class_info(u32 class, u32 class2)
  38{
  39	const char *s;
  40
  41	switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
  42	case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
  43	default:
  44		s = "none";
  45		break;
  46	case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
  47		s = "battery";
  48		break;
  49	case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
  50		s = "balanced";
  51		break;
  52	case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
  53		s = "performance";
  54		break;
  55	}
  56	printk("\tui class: %s\n", s);
  57	printk("\tinternal class:");
  58	if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
  59	    (class2 == 0))
  60		pr_cont(" none");
  61	else {
  62		if (class & ATOM_PPLIB_CLASSIFICATION_BOOT)
  63			pr_cont(" boot");
  64		if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
  65			pr_cont(" thermal");
  66		if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
  67			pr_cont(" limited_pwr");
  68		if (class & ATOM_PPLIB_CLASSIFICATION_REST)
  69			pr_cont(" rest");
  70		if (class & ATOM_PPLIB_CLASSIFICATION_FORCED)
  71			pr_cont(" forced");
  72		if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
  73			pr_cont(" 3d_perf");
  74		if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE)
  75			pr_cont(" ovrdrv");
  76		if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
  77			pr_cont(" uvd");
  78		if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW)
  79			pr_cont(" 3d_low");
  80		if (class & ATOM_PPLIB_CLASSIFICATION_ACPI)
  81			pr_cont(" acpi");
  82		if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
  83			pr_cont(" uvd_hd2");
  84		if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
  85			pr_cont(" uvd_hd");
  86		if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
  87			pr_cont(" uvd_sd");
  88		if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
  89			pr_cont(" limited_pwr2");
  90		if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
  91			pr_cont(" ulv");
  92		if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
  93			pr_cont(" uvd_mvc");
  94	}
  95	pr_cont("\n");
  96}
  97
  98void amdgpu_dpm_print_cap_info(u32 caps)
  99{
 100	printk("\tcaps:");
 101	if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
 102		pr_cont(" single_disp");
 103	if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK)
 104		pr_cont(" video");
 105	if (caps & ATOM_PPLIB_DISALLOW_ON_DC)
 106		pr_cont(" no_dc");
 107	pr_cont("\n");
 108}
 109
 110void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
 111				struct amdgpu_ps *rps)
 112{
 113	printk("\tstatus:");
 114	if (rps == adev->pm.dpm.current_ps)
 115		pr_cont(" c");
 116	if (rps == adev->pm.dpm.requested_ps)
 117		pr_cont(" r");
 118	if (rps == adev->pm.dpm.boot_ps)
 119		pr_cont(" b");
 120	pr_cont("\n");
 121}
 122
 123void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev)
 124{
 125	struct drm_device *ddev = adev_to_drm(adev);
 126	struct drm_crtc *crtc;
 127	struct amdgpu_crtc *amdgpu_crtc;
 128
 129	adev->pm.dpm.new_active_crtcs = 0;
 130	adev->pm.dpm.new_active_crtc_count = 0;
 131	if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
 132		list_for_each_entry(crtc,
 133				    &ddev->mode_config.crtc_list, head) {
 134			amdgpu_crtc = to_amdgpu_crtc(crtc);
 135			if (amdgpu_crtc->enabled) {
 136				adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id);
 137				adev->pm.dpm.new_active_crtc_count++;
 138			}
 139		}
 140	}
 141}
 142
 143
 144u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
 145{
 146	struct drm_device *dev = adev_to_drm(adev);
 147	struct drm_crtc *crtc;
 148	struct amdgpu_crtc *amdgpu_crtc;
 149	u32 vblank_in_pixels;
 150	u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
 151
 152	if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
 153		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 154			amdgpu_crtc = to_amdgpu_crtc(crtc);
 155			if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
 156				vblank_in_pixels =
 157					amdgpu_crtc->hw_mode.crtc_htotal *
 158					(amdgpu_crtc->hw_mode.crtc_vblank_end -
 159					amdgpu_crtc->hw_mode.crtc_vdisplay +
 160					(amdgpu_crtc->v_border * 2));
 161
 162				vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock;
 163				break;
 164			}
 165		}
 166	}
 167
 168	return vblank_time_us;
 169}
 170
 171u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev)
 172{
 173	struct drm_device *dev = adev_to_drm(adev);
 174	struct drm_crtc *crtc;
 175	struct amdgpu_crtc *amdgpu_crtc;
 176	u32 vrefresh = 0;
 177
 178	if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
 179		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 180			amdgpu_crtc = to_amdgpu_crtc(crtc);
 181			if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
 182				vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
 183				break;
 184			}
 185		}
 186	}
 187
 188	return vrefresh;
 189}
 190
 191bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor)
 192{
 193	switch (sensor) {
 194	case THERMAL_TYPE_RV6XX:
 195	case THERMAL_TYPE_RV770:
 196	case THERMAL_TYPE_EVERGREEN:
 197	case THERMAL_TYPE_SUMO:
 198	case THERMAL_TYPE_NI:
 199	case THERMAL_TYPE_SI:
 200	case THERMAL_TYPE_CI:
 201	case THERMAL_TYPE_KV:
 202		return true;
 203	case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
 204	case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
 205		return false; /* need special handling */
 206	case THERMAL_TYPE_NONE:
 207	case THERMAL_TYPE_EXTERNAL:
 208	case THERMAL_TYPE_EXTERNAL_GPIO:
 209	default:
 210		return false;
 211	}
 212}
 213
 214union power_info {
 215	struct _ATOM_POWERPLAY_INFO info;
 216	struct _ATOM_POWERPLAY_INFO_V2 info_2;
 217	struct _ATOM_POWERPLAY_INFO_V3 info_3;
 218	struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
 219	struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
 220	struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
 221	struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
 222	struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
 223};
 224
 225union fan_info {
 226	struct _ATOM_PPLIB_FANTABLE fan;
 227	struct _ATOM_PPLIB_FANTABLE2 fan2;
 228	struct _ATOM_PPLIB_FANTABLE3 fan3;
 229};
 230
 231static int amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table *amdgpu_table,
 232					      ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table)
 233{
 234	u32 size = atom_table->ucNumEntries *
 235		sizeof(struct amdgpu_clock_voltage_dependency_entry);
 236	int i;
 237	ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
 238
 239	amdgpu_table->entries = kzalloc(size, GFP_KERNEL);
 240	if (!amdgpu_table->entries)
 241		return -ENOMEM;
 242
 243	entry = &atom_table->entries[0];
 244	for (i = 0; i < atom_table->ucNumEntries; i++) {
 245		amdgpu_table->entries[i].clk = le16_to_cpu(entry->usClockLow) |
 246			(entry->ucClockHigh << 16);
 247		amdgpu_table->entries[i].v = le16_to_cpu(entry->usVoltage);
 248		entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *)
 249			((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record));
 250	}
 251	amdgpu_table->count = atom_table->ucNumEntries;
 252
 253	return 0;
 254}
 255
 256int amdgpu_get_platform_caps(struct amdgpu_device *adev)
 257{
 258	struct amdgpu_mode_info *mode_info = &adev->mode_info;
 259	union power_info *power_info;
 260	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
 261	u16 data_offset;
 262	u8 frev, crev;
 263
 264	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
 265				   &frev, &crev, &data_offset))
 266		return -EINVAL;
 267	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
 268
 269	adev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
 270	adev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
 271	adev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
 272
 273	return 0;
 274}
 275
 276/* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
 277#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
 278#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
 279#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
 280#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
 281#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
 282#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
 283#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8 24
 284#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V9 26
 285
 286int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
 287{
 288	struct amdgpu_mode_info *mode_info = &adev->mode_info;
 289	union power_info *power_info;
 290	union fan_info *fan_info;
 291	ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table;
 292	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
 293	u16 data_offset;
 294	u8 frev, crev;
 295	int ret, i;
 296
 297	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
 298				   &frev, &crev, &data_offset))
 299		return -EINVAL;
 300	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
 301
 302	/* fan table */
 303	if (le16_to_cpu(power_info->pplib.usTableSize) >=
 304	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
 305		if (power_info->pplib3.usFanTableOffset) {
 306			fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset +
 307						      le16_to_cpu(power_info->pplib3.usFanTableOffset));
 308			adev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst;
 309			adev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin);
 310			adev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed);
 311			adev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh);
 312			adev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin);
 313			adev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed);
 314			adev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh);
 315			if (fan_info->fan.ucFanTableFormat >= 2)
 316				adev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax);
 317			else
 318				adev->pm.dpm.fan.t_max = 10900;
 319			adev->pm.dpm.fan.cycle_delay = 100000;
 320			if (fan_info->fan.ucFanTableFormat >= 3) {
 321				adev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode;
 322				adev->pm.dpm.fan.default_max_fan_pwm =
 323					le16_to_cpu(fan_info->fan3.usFanPWMMax);
 324				adev->pm.dpm.fan.default_fan_output_sensitivity = 4836;
 325				adev->pm.dpm.fan.fan_output_sensitivity =
 326					le16_to_cpu(fan_info->fan3.usFanOutputSensitivity);
 327			}
 328			adev->pm.dpm.fan.ucode_fan_control = true;
 329		}
 330	}
 331
 332	/* clock dependancy tables, shedding tables */
 333	if (le16_to_cpu(power_info->pplib.usTableSize) >=
 334	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) {
 335		if (power_info->pplib4.usVddcDependencyOnSCLKOffset) {
 336			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
 337				(mode_info->atom_context->bios + data_offset +
 338				 le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
 339			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
 340								 dep_table);
 341			if (ret) {
 342				amdgpu_free_extended_power_table(adev);
 343				return ret;
 344			}
 345		}
 346		if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
 347			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
 348				(mode_info->atom_context->bios + data_offset +
 349				 le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
 350			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
 351								 dep_table);
 352			if (ret) {
 353				amdgpu_free_extended_power_table(adev);
 354				return ret;
 355			}
 356		}
 357		if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
 358			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
 359				(mode_info->atom_context->bios + data_offset +
 360				 le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
 361			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
 362								 dep_table);
 363			if (ret) {
 364				amdgpu_free_extended_power_table(adev);
 365				return ret;
 366			}
 367		}
 368		if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
 369			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
 370				(mode_info->atom_context->bios + data_offset +
 371				 le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
 372			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
 373								 dep_table);
 374			if (ret) {
 375				amdgpu_free_extended_power_table(adev);
 376				return ret;
 377			}
 378		}
 379		if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
 380			ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
 381				(ATOM_PPLIB_Clock_Voltage_Limit_Table *)
 382				(mode_info->atom_context->bios + data_offset +
 383				 le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset));
 384			if (clk_v->ucNumEntries) {
 385				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk =
 386					le16_to_cpu(clk_v->entries[0].usSclkLow) |
 387					(clk_v->entries[0].ucSclkHigh << 16);
 388				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk =
 389					le16_to_cpu(clk_v->entries[0].usMclkLow) |
 390					(clk_v->entries[0].ucMclkHigh << 16);
 391				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc =
 392					le16_to_cpu(clk_v->entries[0].usVddc);
 393				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci =
 394					le16_to_cpu(clk_v->entries[0].usVddci);
 395			}
 396		}
 397		if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) {
 398			ATOM_PPLIB_PhaseSheddingLimits_Table *psl =
 399				(ATOM_PPLIB_PhaseSheddingLimits_Table *)
 400				(mode_info->atom_context->bios + data_offset +
 401				 le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
 402			ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
 403
 404			adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
 405				kcalloc(psl->ucNumEntries,
 406					sizeof(struct amdgpu_phase_shedding_limits_entry),
 407					GFP_KERNEL);
 408			if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
 409				amdgpu_free_extended_power_table(adev);
 410				return -ENOMEM;
 411			}
 412
 413			entry = &psl->entries[0];
 414			for (i = 0; i < psl->ucNumEntries; i++) {
 415				adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
 416					le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16);
 417				adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
 418					le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16);
 419				adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
 420					le16_to_cpu(entry->usVoltage);
 421				entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *)
 422					((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record));
 423			}
 424			adev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
 425				psl->ucNumEntries;
 426		}
 427	}
 428
 429	/* cac data */
 430	if (le16_to_cpu(power_info->pplib.usTableSize) >=
 431	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) {
 432		adev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit);
 433		adev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit);
 434		adev->pm.dpm.near_tdp_limit_adjusted = adev->pm.dpm.near_tdp_limit;
 435		adev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit);
 436		if (adev->pm.dpm.tdp_od_limit)
 437			adev->pm.dpm.power_control = true;
 438		else
 439			adev->pm.dpm.power_control = false;
 440		adev->pm.dpm.tdp_adjustment = 0;
 441		adev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold);
 442		adev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage);
 443		adev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope);
 444		if (power_info->pplib5.usCACLeakageTableOffset) {
 445			ATOM_PPLIB_CAC_Leakage_Table *cac_table =
 446				(ATOM_PPLIB_CAC_Leakage_Table *)
 447				(mode_info->atom_context->bios + data_offset +
 448				 le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
 449			ATOM_PPLIB_CAC_Leakage_Record *entry;
 450			u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table);
 451			adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
 452			if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
 453				amdgpu_free_extended_power_table(adev);
 454				return -ENOMEM;
 455			}
 456			entry = &cac_table->entries[0];
 457			for (i = 0; i < cac_table->ucNumEntries; i++) {
 458				if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
 459					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 =
 460						le16_to_cpu(entry->usVddc1);
 461					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 =
 462						le16_to_cpu(entry->usVddc2);
 463					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 =
 464						le16_to_cpu(entry->usVddc3);
 465				} else {
 466					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
 467						le16_to_cpu(entry->usVddc);
 468					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
 469						le32_to_cpu(entry->ulLeakageValue);
 470				}
 471				entry = (ATOM_PPLIB_CAC_Leakage_Record *)
 472					((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record));
 473			}
 474			adev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
 475		}
 476	}
 477
 478	/* ext tables */
 479	if (le16_to_cpu(power_info->pplib.usTableSize) >=
 480	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
 481		ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
 482			(mode_info->atom_context->bios + data_offset +
 483			 le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
 484		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) &&
 485			ext_hdr->usVCETableOffset) {
 486			VCEClockInfoArray *array = (VCEClockInfoArray *)
 487				(mode_info->atom_context->bios + data_offset +
 488				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
 489			ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
 490				(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
 491				(mode_info->atom_context->bios + data_offset +
 492				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
 493				 1 + array->ucNumEntries * sizeof(VCEClockInfo));
 494			ATOM_PPLIB_VCE_State_Table *states =
 495				(ATOM_PPLIB_VCE_State_Table *)
 496				(mode_info->atom_context->bios + data_offset +
 497				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
 498				 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) +
 499				 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)));
 500			ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry;
 501			ATOM_PPLIB_VCE_State_Record *state_entry;
 502			VCEClockInfo *vce_clk;
 503			u32 size = limits->numEntries *
 504				sizeof(struct amdgpu_vce_clock_voltage_dependency_entry);
 505			adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
 506				kzalloc(size, GFP_KERNEL);
 507			if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
 508				amdgpu_free_extended_power_table(adev);
 509				return -ENOMEM;
 510			}
 511			adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
 512				limits->numEntries;
 513			entry = &limits->entries[0];
 514			state_entry = &states->entries[0];
 515			for (i = 0; i < limits->numEntries; i++) {
 516				vce_clk = (VCEClockInfo *)
 517					((u8 *)&array->entries[0] +
 518					 (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
 519				adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk =
 520					le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
 521				adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk =
 522					le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
 523				adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v =
 524					le16_to_cpu(entry->usVoltage);
 525				entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
 526					((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
 527			}
 528			adev->pm.dpm.num_of_vce_states =
 529					states->numEntries > AMD_MAX_VCE_LEVELS ?
 530					AMD_MAX_VCE_LEVELS : states->numEntries;
 531			for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
 532				vce_clk = (VCEClockInfo *)
 533					((u8 *)&array->entries[0] +
 534					 (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
 535				adev->pm.dpm.vce_states[i].evclk =
 536					le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
 537				adev->pm.dpm.vce_states[i].ecclk =
 538					le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
 539				adev->pm.dpm.vce_states[i].clk_idx =
 540					state_entry->ucClockInfoIndex & 0x3f;
 541				adev->pm.dpm.vce_states[i].pstate =
 542					(state_entry->ucClockInfoIndex & 0xc0) >> 6;
 543				state_entry = (ATOM_PPLIB_VCE_State_Record *)
 544					((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record));
 545			}
 546		}
 547		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) &&
 548			ext_hdr->usUVDTableOffset) {
 549			UVDClockInfoArray *array = (UVDClockInfoArray *)
 550				(mode_info->atom_context->bios + data_offset +
 551				 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1);
 552			ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits =
 553				(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *)
 554				(mode_info->atom_context->bios + data_offset +
 555				 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 +
 556				 1 + (array->ucNumEntries * sizeof (UVDClockInfo)));
 557			ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry;
 558			u32 size = limits->numEntries *
 559				sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry);
 560			adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
 561				kzalloc(size, GFP_KERNEL);
 562			if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
 563				amdgpu_free_extended_power_table(adev);
 564				return -ENOMEM;
 565			}
 566			adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
 567				limits->numEntries;
 568			entry = &limits->entries[0];
 569			for (i = 0; i < limits->numEntries; i++) {
 570				UVDClockInfo *uvd_clk = (UVDClockInfo *)
 571					((u8 *)&array->entries[0] +
 572					 (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo)));
 573				adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk =
 574					le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16);
 575				adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
 576					le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
 577				adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
 578					le16_to_cpu(entry->usVoltage);
 579				entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
 580					((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
 581			}
 582		}
 583		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) &&
 584			ext_hdr->usSAMUTableOffset) {
 585			ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits =
 586				(ATOM_PPLIB_SAMClk_Voltage_Limit_Table *)
 587				(mode_info->atom_context->bios + data_offset +
 588				 le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1);
 589			ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry;
 590			u32 size = limits->numEntries *
 591				sizeof(struct amdgpu_clock_voltage_dependency_entry);
 592			adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
 593				kzalloc(size, GFP_KERNEL);
 594			if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
 595				amdgpu_free_extended_power_table(adev);
 596				return -ENOMEM;
 597			}
 598			adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
 599				limits->numEntries;
 600			entry = &limits->entries[0];
 601			for (i = 0; i < limits->numEntries; i++) {
 602				adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk =
 603					le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16);
 604				adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v =
 605					le16_to_cpu(entry->usVoltage);
 606				entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *)
 607					((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record));
 608			}
 609		}
 610		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
 611		    ext_hdr->usPPMTableOffset) {
 612			ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
 613				(mode_info->atom_context->bios + data_offset +
 614				 le16_to_cpu(ext_hdr->usPPMTableOffset));
 615			adev->pm.dpm.dyn_state.ppm_table =
 616				kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL);
 617			if (!adev->pm.dpm.dyn_state.ppm_table) {
 618				amdgpu_free_extended_power_table(adev);
 619				return -ENOMEM;
 620			}
 621			adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
 622			adev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
 623				le16_to_cpu(ppm->usCpuCoreNumber);
 624			adev->pm.dpm.dyn_state.ppm_table->platform_tdp =
 625				le32_to_cpu(ppm->ulPlatformTDP);
 626			adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp =
 627				le32_to_cpu(ppm->ulSmallACPlatformTDP);
 628			adev->pm.dpm.dyn_state.ppm_table->platform_tdc =
 629				le32_to_cpu(ppm->ulPlatformTDC);
 630			adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc =
 631				le32_to_cpu(ppm->ulSmallACPlatformTDC);
 632			adev->pm.dpm.dyn_state.ppm_table->apu_tdp =
 633				le32_to_cpu(ppm->ulApuTDP);
 634			adev->pm.dpm.dyn_state.ppm_table->dgpu_tdp =
 635				le32_to_cpu(ppm->ulDGpuTDP);
 636			adev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power =
 637				le32_to_cpu(ppm->ulDGpuUlvPower);
 638			adev->pm.dpm.dyn_state.ppm_table->tj_max =
 639				le32_to_cpu(ppm->ulTjmax);
 640		}
 641		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) &&
 642			ext_hdr->usACPTableOffset) {
 643			ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits =
 644				(ATOM_PPLIB_ACPClk_Voltage_Limit_Table *)
 645				(mode_info->atom_context->bios + data_offset +
 646				 le16_to_cpu(ext_hdr->usACPTableOffset) + 1);
 647			ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry;
 648			u32 size = limits->numEntries *
 649				sizeof(struct amdgpu_clock_voltage_dependency_entry);
 650			adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
 651				kzalloc(size, GFP_KERNEL);
 652			if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
 653				amdgpu_free_extended_power_table(adev);
 654				return -ENOMEM;
 655			}
 656			adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
 657				limits->numEntries;
 658			entry = &limits->entries[0];
 659			for (i = 0; i < limits->numEntries; i++) {
 660				adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk =
 661					le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16);
 662				adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v =
 663					le16_to_cpu(entry->usVoltage);
 664				entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *)
 665					((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record));
 666			}
 667		}
 668		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) &&
 669			ext_hdr->usPowerTuneTableOffset) {
 670			u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset +
 671					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
 672			ATOM_PowerTune_Table *pt;
 673			adev->pm.dpm.dyn_state.cac_tdp_table =
 674				kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL);
 675			if (!adev->pm.dpm.dyn_state.cac_tdp_table) {
 676				amdgpu_free_extended_power_table(adev);
 677				return -ENOMEM;
 678			}
 679			if (rev > 0) {
 680				ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
 681					(mode_info->atom_context->bios + data_offset +
 682					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
 683				adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
 684					ppt->usMaximumPowerDeliveryLimit;
 685				pt = &ppt->power_tune_table;
 686			} else {
 687				ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
 688					(mode_info->atom_context->bios + data_offset +
 689					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
 690				adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255;
 691				pt = &ppt->power_tune_table;
 692			}
 693			adev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP);
 694			adev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp =
 695				le16_to_cpu(pt->usConfigurableTDP);
 696			adev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC);
 697			adev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit =
 698				le16_to_cpu(pt->usBatteryPowerLimit);
 699			adev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit =
 700				le16_to_cpu(pt->usSmallPowerLimit);
 701			adev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage =
 702				le16_to_cpu(pt->usLowCACLeakage);
 703			adev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage =
 704				le16_to_cpu(pt->usHighCACLeakage);
 705		}
 706		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8) &&
 707				ext_hdr->usSclkVddgfxTableOffset) {
 708			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
 709				(mode_info->atom_context->bios + data_offset +
 710				 le16_to_cpu(ext_hdr->usSclkVddgfxTableOffset));
 711			ret = amdgpu_parse_clk_voltage_dep_table(
 712					&adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk,
 713					dep_table);
 714			if (ret) {
 715				kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries);
 716				return ret;
 717			}
 718		}
 719	}
 720
 721	return 0;
 722}
 723
 724void amdgpu_free_extended_power_table(struct amdgpu_device *adev)
 725{
 726	struct amdgpu_dpm_dynamic_state *dyn_state = &adev->pm.dpm.dyn_state;
 727
 728	kfree(dyn_state->vddc_dependency_on_sclk.entries);
 729	kfree(dyn_state->vddci_dependency_on_mclk.entries);
 730	kfree(dyn_state->vddc_dependency_on_mclk.entries);
 731	kfree(dyn_state->mvdd_dependency_on_mclk.entries);
 732	kfree(dyn_state->cac_leakage_table.entries);
 733	kfree(dyn_state->phase_shedding_limits_table.entries);
 734	kfree(dyn_state->ppm_table);
 735	kfree(dyn_state->cac_tdp_table);
 736	kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
 737	kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
 738	kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
 739	kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
 740	kfree(dyn_state->vddgfx_dependency_on_sclk.entries);
 741}
 742
 743static const char *pp_lib_thermal_controller_names[] = {
 744	"NONE",
 745	"lm63",
 746	"adm1032",
 747	"adm1030",
 748	"max6649",
 749	"lm64",
 750	"f75375",
 751	"RV6xx",
 752	"RV770",
 753	"adt7473",
 754	"NONE",
 755	"External GPIO",
 756	"Evergreen",
 757	"emc2103",
 758	"Sumo",
 759	"Northern Islands",
 760	"Southern Islands",
 761	"lm96163",
 762	"Sea Islands",
 763	"Kaveri/Kabini",
 764};
 765
 766void amdgpu_add_thermal_controller(struct amdgpu_device *adev)
 767{
 768	struct amdgpu_mode_info *mode_info = &adev->mode_info;
 769	ATOM_PPLIB_POWERPLAYTABLE *power_table;
 770	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
 771	ATOM_PPLIB_THERMALCONTROLLER *controller;
 772	struct amdgpu_i2c_bus_rec i2c_bus;
 773	u16 data_offset;
 774	u8 frev, crev;
 775
 776	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
 777				   &frev, &crev, &data_offset))
 778		return;
 779	power_table = (ATOM_PPLIB_POWERPLAYTABLE *)
 780		(mode_info->atom_context->bios + data_offset);
 781	controller = &power_table->sThermalController;
 782
 783	/* add the i2c bus for thermal/fan chip */
 784	if (controller->ucType > 0) {
 785		if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN)
 786			adev->pm.no_fan = true;
 787		adev->pm.fan_pulses_per_revolution =
 788			controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
 789		if (adev->pm.fan_pulses_per_revolution) {
 790			adev->pm.fan_min_rpm = controller->ucFanMinRPM;
 791			adev->pm.fan_max_rpm = controller->ucFanMaxRPM;
 792		}
 793		if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
 794			DRM_INFO("Internal thermal controller %s fan control\n",
 795				 (controller->ucFanParameters &
 796				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
 797			adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
 798		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
 799			DRM_INFO("Internal thermal controller %s fan control\n",
 800				 (controller->ucFanParameters &
 801				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
 802			adev->pm.int_thermal_type = THERMAL_TYPE_RV770;
 803		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
 804			DRM_INFO("Internal thermal controller %s fan control\n",
 805				 (controller->ucFanParameters &
 806				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
 807			adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
 808		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) {
 809			DRM_INFO("Internal thermal controller %s fan control\n",
 810				 (controller->ucFanParameters &
 811				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
 812			adev->pm.int_thermal_type = THERMAL_TYPE_SUMO;
 813		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) {
 814			DRM_INFO("Internal thermal controller %s fan control\n",
 815				 (controller->ucFanParameters &
 816				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
 817			adev->pm.int_thermal_type = THERMAL_TYPE_NI;
 818		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) {
 819			DRM_INFO("Internal thermal controller %s fan control\n",
 820				 (controller->ucFanParameters &
 821				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
 822			adev->pm.int_thermal_type = THERMAL_TYPE_SI;
 823		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) {
 824			DRM_INFO("Internal thermal controller %s fan control\n",
 825				 (controller->ucFanParameters &
 826				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
 827			adev->pm.int_thermal_type = THERMAL_TYPE_CI;
 828		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) {
 829			DRM_INFO("Internal thermal controller %s fan control\n",
 830				 (controller->ucFanParameters &
 831				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
 832			adev->pm.int_thermal_type = THERMAL_TYPE_KV;
 833		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
 834			DRM_INFO("External GPIO thermal controller %s fan control\n",
 835				 (controller->ucFanParameters &
 836				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
 837			adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
 838		} else if (controller->ucType ==
 839			   ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
 840			DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
 841				 (controller->ucFanParameters &
 842				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
 843			adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
 844		} else if (controller->ucType ==
 845			   ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
 846			DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
 847				 (controller->ucFanParameters &
 848				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
 849			adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
 850		} else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
 851			DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
 852				 pp_lib_thermal_controller_names[controller->ucType],
 853				 controller->ucI2cAddress >> 1,
 854				 (controller->ucFanParameters &
 855				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
 856			adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL;
 857			i2c_bus = amdgpu_atombios_lookup_i2c_gpio(adev, controller->ucI2cLine);
 858			adev->pm.i2c_bus = amdgpu_i2c_lookup(adev, &i2c_bus);
 859			if (adev->pm.i2c_bus) {
 860				struct i2c_board_info info = { };
 861				const char *name = pp_lib_thermal_controller_names[controller->ucType];
 862				info.addr = controller->ucI2cAddress >> 1;
 863				strlcpy(info.type, name, sizeof(info.type));
 864				i2c_new_client_device(&adev->pm.i2c_bus->adapter, &info);
 865			}
 866		} else {
 867			DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
 868				 controller->ucType,
 869				 controller->ucI2cAddress >> 1,
 870				 (controller->ucFanParameters &
 871				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
 872		}
 873	}
 874}
 875
 876enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
 877						 u32 sys_mask,
 878						 enum amdgpu_pcie_gen asic_gen,
 879						 enum amdgpu_pcie_gen default_gen)
 880{
 881	switch (asic_gen) {
 882	case AMDGPU_PCIE_GEN1:
 883		return AMDGPU_PCIE_GEN1;
 884	case AMDGPU_PCIE_GEN2:
 885		return AMDGPU_PCIE_GEN2;
 886	case AMDGPU_PCIE_GEN3:
 887		return AMDGPU_PCIE_GEN3;
 888	default:
 889		if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) &&
 890		    (default_gen == AMDGPU_PCIE_GEN3))
 891			return AMDGPU_PCIE_GEN3;
 892		else if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) &&
 893			 (default_gen == AMDGPU_PCIE_GEN2))
 894			return AMDGPU_PCIE_GEN2;
 895		else
 896			return AMDGPU_PCIE_GEN1;
 897	}
 898	return AMDGPU_PCIE_GEN1;
 899}
 900
 901struct amd_vce_state*
 902amdgpu_get_vce_clock_state(void *handle, u32 idx)
 903{
 904	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 905
 906	if (idx < adev->pm.dpm.num_of_vce_states)
 907		return &adev->pm.dpm.vce_states[idx];
 908
 909	return NULL;
 910}
 911
 912int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
 913{
 914	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 
 915
 916	return pp_funcs->get_sclk((adev)->powerplay.pp_handle, (low));
 917}
 918
 919int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
 920{
 921	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 
 922
 923	return pp_funcs->get_mclk((adev)->powerplay.pp_handle, (low));
 924}
 925
 926int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate)
 927{
 928	int ret = 0;
 929	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 
 
 
 
 
 
 
 
 
 930
 931	switch (block_type) {
 932	case AMD_IP_BLOCK_TYPE_UVD:
 933	case AMD_IP_BLOCK_TYPE_VCE:
 934		if (pp_funcs && pp_funcs->set_powergating_by_smu) {
 935			/*
 936			 * TODO: need a better lock mechanism
 937			 *
 938			 * Here adev->pm.mutex lock protection is enforced on
 939			 * UVD and VCE cases only. Since for other cases, there
 940			 * may be already lock protection in amdgpu_pm.c.
 941			 * This is a quick fix for the deadlock issue below.
 942			 *     NFO: task ocltst:2028 blocked for more than 120 seconds.
 943			 *     Tainted: G           OE     5.0.0-37-generic #40~18.04.1-Ubuntu
 944			 *     echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
 945			 *     cltst          D    0  2028   2026 0x00000000
 946			 *     all Trace:
 947			 *     __schedule+0x2c0/0x870
 948			 *     schedule+0x2c/0x70
 949			 *     schedule_preempt_disabled+0xe/0x10
 950			 *     __mutex_lock.isra.9+0x26d/0x4e0
 951			 *     __mutex_lock_slowpath+0x13/0x20
 952			 *     ? __mutex_lock_slowpath+0x13/0x20
 953			 *     mutex_lock+0x2f/0x40
 954			 *     amdgpu_dpm_set_powergating_by_smu+0x64/0xe0 [amdgpu]
 955			 *     gfx_v8_0_enable_gfx_static_mg_power_gating+0x3c/0x70 [amdgpu]
 956			 *     gfx_v8_0_set_powergating_state+0x66/0x260 [amdgpu]
 957			 *     amdgpu_device_ip_set_powergating_state+0x62/0xb0 [amdgpu]
 958			 *     pp_dpm_force_performance_level+0xe7/0x100 [amdgpu]
 959			 *     amdgpu_set_dpm_forced_performance_level+0x129/0x330 [amdgpu]
 960			 */
 961			mutex_lock(&adev->pm.mutex);
 962			ret = (pp_funcs->set_powergating_by_smu(
 963				(adev)->powerplay.pp_handle, block_type, gate));
 964			mutex_unlock(&adev->pm.mutex);
 965		}
 966		break;
 967	case AMD_IP_BLOCK_TYPE_GFX:
 968	case AMD_IP_BLOCK_TYPE_VCN:
 969	case AMD_IP_BLOCK_TYPE_SDMA:
 970	case AMD_IP_BLOCK_TYPE_JPEG:
 971	case AMD_IP_BLOCK_TYPE_GMC:
 972	case AMD_IP_BLOCK_TYPE_ACP:
 973		if (pp_funcs && pp_funcs->set_powergating_by_smu) {
 
 974			ret = (pp_funcs->set_powergating_by_smu(
 975				(adev)->powerplay.pp_handle, block_type, gate));
 976		}
 977		break;
 978	default:
 979		break;
 980	}
 981
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 982	return ret;
 983}
 984
 985int amdgpu_dpm_baco_enter(struct amdgpu_device *adev)
 986{
 987	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 988	void *pp_handle = adev->powerplay.pp_handle;
 989	int ret = 0;
 990
 991	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
 992		return -ENOENT;
 993
 
 
 994	/* enter BACO state */
 995	ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
 996
 
 
 997	return ret;
 998}
 999
1000int amdgpu_dpm_baco_exit(struct amdgpu_device *adev)
1001{
1002	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1003	void *pp_handle = adev->powerplay.pp_handle;
1004	int ret = 0;
1005
1006	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
1007		return -ENOENT;
1008
 
 
1009	/* exit BACO state */
1010	ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
1011
 
 
1012	return ret;
1013}
1014
1015int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
1016			     enum pp_mp1_state mp1_state)
1017{
1018	int ret = 0;
1019	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1020
1021	if (pp_funcs && pp_funcs->set_mp1_state) {
 
 
1022		ret = pp_funcs->set_mp1_state(
1023				adev->powerplay.pp_handle,
1024				mp1_state);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1025	}
1026
1027	return ret;
1028}
1029
1030bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
1031{
1032	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1033	void *pp_handle = adev->powerplay.pp_handle;
1034	bool baco_cap;
1035
1036	if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
1037		return false;
 
 
 
 
 
 
 
 
 
 
 
 
 
1038
1039	if (pp_funcs->get_asic_baco_capability(pp_handle, &baco_cap))
1040		return false;
 
1041
1042	return baco_cap;
1043}
1044
1045int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev)
1046{
1047	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1048	void *pp_handle = adev->powerplay.pp_handle;
 
1049
1050	if (!pp_funcs || !pp_funcs->asic_reset_mode_2)
1051		return -ENOENT;
1052
1053	return pp_funcs->asic_reset_mode_2(pp_handle);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1054}
1055
1056int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
1057{
1058	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1059	void *pp_handle = adev->powerplay.pp_handle;
1060	int ret = 0;
1061
1062	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
1063		return -ENOENT;
1064
 
 
1065	/* enter BACO state */
1066	ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
1067	if (ret)
1068		return ret;
1069
1070	/* exit BACO state */
1071	ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
1072	if (ret)
1073		return ret;
1074
1075	return 0;
 
 
1076}
1077
1078bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev)
1079{
1080	struct smu_context *smu = &adev->smu;
 
1081
1082	if (is_support_sw_smu(adev))
1083		return smu_mode1_reset_is_support(smu);
 
 
 
1084
1085	return false;
1086}
1087
1088int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev)
1089{
1090	struct smu_context *smu = &adev->smu;
 
1091
1092	if (is_support_sw_smu(adev))
1093		return smu_mode1_reset(smu);
 
 
 
1094
1095	return -EOPNOTSUPP;
1096}
1097
1098int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
1099				    enum PP_SMC_POWER_PROFILE type,
1100				    bool en)
1101{
1102	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1103	int ret = 0;
1104
1105	if (amdgpu_sriov_vf(adev))
1106		return 0;
1107
1108	if (pp_funcs && pp_funcs->switch_power_profile)
 
1109		ret = pp_funcs->switch_power_profile(
1110			adev->powerplay.pp_handle, type, en);
 
 
1111
1112	return ret;
1113}
1114
1115int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
1116			       uint32_t pstate)
1117{
1118	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1119	int ret = 0;
1120
1121	if (pp_funcs && pp_funcs->set_xgmi_pstate)
 
1122		ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
1123								pstate);
 
 
1124
1125	return ret;
1126}
1127
1128int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
1129			     uint32_t cstate)
1130{
1131	int ret = 0;
1132	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1133	void *pp_handle = adev->powerplay.pp_handle;
1134
1135	if (pp_funcs && pp_funcs->set_df_cstate)
 
1136		ret = pp_funcs->set_df_cstate(pp_handle, cstate);
 
 
1137
1138	return ret;
1139}
1140
1141int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1142{
1143	struct smu_context *smu = &adev->smu;
 
1144
1145	if (is_support_sw_smu(adev))
1146		return smu_allow_xgmi_power_down(smu, en);
 
 
 
1147
1148	return 0;
1149}
1150
1151int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev)
1152{
1153	void *pp_handle = adev->powerplay.pp_handle;
1154	const struct amd_pm_funcs *pp_funcs =
1155			adev->powerplay.pp_funcs;
1156	int ret = 0;
1157
1158	if (pp_funcs && pp_funcs->enable_mgpu_fan_boost)
 
1159		ret = pp_funcs->enable_mgpu_fan_boost(pp_handle);
 
 
1160
1161	return ret;
1162}
1163
1164int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev,
1165				      uint32_t msg_id)
1166{
1167	void *pp_handle = adev->powerplay.pp_handle;
1168	const struct amd_pm_funcs *pp_funcs =
1169			adev->powerplay.pp_funcs;
1170	int ret = 0;
1171
1172	if (pp_funcs && pp_funcs->set_clockgating_by_smu)
 
1173		ret = pp_funcs->set_clockgating_by_smu(pp_handle,
1174						       msg_id);
 
 
1175
1176	return ret;
1177}
1178
1179int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev,
1180				  bool acquire)
1181{
1182	void *pp_handle = adev->powerplay.pp_handle;
1183	const struct amd_pm_funcs *pp_funcs =
1184			adev->powerplay.pp_funcs;
1185	int ret = -EOPNOTSUPP;
1186
1187	if (pp_funcs && pp_funcs->smu_i2c_bus_access)
 
1188		ret = pp_funcs->smu_i2c_bus_access(pp_handle,
1189						   acquire);
 
 
1190
1191	return ret;
1192}
1193
1194void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
1195{
1196	if (adev->pm.dpm_enabled) {
1197		mutex_lock(&adev->pm.mutex);
1198		if (power_supply_is_system_supplied() > 0)
1199			adev->pm.ac_power = true;
1200		else
1201			adev->pm.ac_power = false;
 
1202		if (adev->powerplay.pp_funcs &&
1203		    adev->powerplay.pp_funcs->enable_bapm)
1204			amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
1205		mutex_unlock(&adev->pm.mutex);
1206
1207		if (is_support_sw_smu(adev))
1208			smu_set_ac_dc(&adev->smu);
 
 
1209	}
1210}
1211
1212int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
1213			   void *data, uint32_t *size)
1214{
1215	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1216	int ret = 0;
1217
1218	if (!data || !size)
1219		return -EINVAL;
1220
1221	if (pp_funcs && pp_funcs->read_sensor)
1222		ret = pp_funcs->read_sensor((adev)->powerplay.pp_handle,
1223								    sensor, data, size);
1224	else
1225		ret = -EINVAL;
 
 
 
1226
1227	return ret;
1228}
1229
1230void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
1231{
1232	struct amdgpu_device *adev =
1233		container_of(work, struct amdgpu_device,
1234			     pm.dpm.thermal.work);
1235	/* switch to the thermal state */
1236	enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
1237	int temp, size = sizeof(temp);
1238
1239	if (!adev->pm.dpm_enabled)
1240		return;
1241
1242	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP,
1243				    (void *)&temp, &size)) {
1244		if (temp < adev->pm.dpm.thermal.min_temp)
1245			/* switch back the user state */
1246			dpm_state = adev->pm.dpm.user_state;
1247	} else {
1248		if (adev->pm.dpm.thermal.high_to_low)
1249			/* switch back the user state */
1250			dpm_state = adev->pm.dpm.user_state;
1251	}
1252	mutex_lock(&adev->pm.mutex);
1253	if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
1254		adev->pm.dpm.thermal_active = true;
1255	else
1256		adev->pm.dpm.thermal_active = false;
1257	adev->pm.dpm.state = dpm_state;
1258	mutex_unlock(&adev->pm.mutex);
1259
1260	amdgpu_pm_compute_clocks(adev);
1261}
1262
1263static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
1264						     enum amd_pm_state_type dpm_state)
1265{
1266	int i;
1267	struct amdgpu_ps *ps;
1268	u32 ui_class;
1269	bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ?
1270		true : false;
1271
1272	/* check if the vblank period is too short to adjust the mclk */
1273	if (single_display && adev->powerplay.pp_funcs->vblank_too_short) {
1274		if (amdgpu_dpm_vblank_too_short(adev))
1275			single_display = false;
1276	}
1277
1278	/* certain older asics have a separare 3D performance state,
1279	 * so try that first if the user selected performance
1280	 */
1281	if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
1282		dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
1283	/* balanced states don't exist at the moment */
1284	if (dpm_state == POWER_STATE_TYPE_BALANCED)
1285		dpm_state = POWER_STATE_TYPE_PERFORMANCE;
1286
1287restart_search:
1288	/* Pick the best power state based on current conditions */
1289	for (i = 0; i < adev->pm.dpm.num_ps; i++) {
1290		ps = &adev->pm.dpm.ps[i];
1291		ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
1292		switch (dpm_state) {
1293		/* user states */
1294		case POWER_STATE_TYPE_BATTERY:
1295			if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
1296				if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
1297					if (single_display)
1298						return ps;
1299				} else
1300					return ps;
1301			}
1302			break;
1303		case POWER_STATE_TYPE_BALANCED:
1304			if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
1305				if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
1306					if (single_display)
1307						return ps;
1308				} else
1309					return ps;
1310			}
1311			break;
1312		case POWER_STATE_TYPE_PERFORMANCE:
1313			if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
1314				if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
1315					if (single_display)
1316						return ps;
1317				} else
1318					return ps;
1319			}
1320			break;
1321		/* internal states */
1322		case POWER_STATE_TYPE_INTERNAL_UVD:
1323			if (adev->pm.dpm.uvd_ps)
1324				return adev->pm.dpm.uvd_ps;
1325			else
1326				break;
1327		case POWER_STATE_TYPE_INTERNAL_UVD_SD:
1328			if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
1329				return ps;
1330			break;
1331		case POWER_STATE_TYPE_INTERNAL_UVD_HD:
1332			if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
1333				return ps;
1334			break;
1335		case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
1336			if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
1337				return ps;
1338			break;
1339		case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
1340			if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
1341				return ps;
1342			break;
1343		case POWER_STATE_TYPE_INTERNAL_BOOT:
1344			return adev->pm.dpm.boot_ps;
1345		case POWER_STATE_TYPE_INTERNAL_THERMAL:
1346			if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
1347				return ps;
1348			break;
1349		case POWER_STATE_TYPE_INTERNAL_ACPI:
1350			if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
1351				return ps;
1352			break;
1353		case POWER_STATE_TYPE_INTERNAL_ULV:
1354			if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
1355				return ps;
1356			break;
1357		case POWER_STATE_TYPE_INTERNAL_3DPERF:
1358			if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
1359				return ps;
1360			break;
1361		default:
1362			break;
1363		}
1364	}
1365	/* use a fallback state if we didn't match */
1366	switch (dpm_state) {
1367	case POWER_STATE_TYPE_INTERNAL_UVD_SD:
1368		dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
1369		goto restart_search;
1370	case POWER_STATE_TYPE_INTERNAL_UVD_HD:
1371	case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
1372	case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
1373		if (adev->pm.dpm.uvd_ps) {
1374			return adev->pm.dpm.uvd_ps;
1375		} else {
1376			dpm_state = POWER_STATE_TYPE_PERFORMANCE;
1377			goto restart_search;
1378		}
1379	case POWER_STATE_TYPE_INTERNAL_THERMAL:
1380		dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
1381		goto restart_search;
1382	case POWER_STATE_TYPE_INTERNAL_ACPI:
1383		dpm_state = POWER_STATE_TYPE_BATTERY;
1384		goto restart_search;
1385	case POWER_STATE_TYPE_BATTERY:
1386	case POWER_STATE_TYPE_BALANCED:
1387	case POWER_STATE_TYPE_INTERNAL_3DPERF:
1388		dpm_state = POWER_STATE_TYPE_PERFORMANCE;
1389		goto restart_search;
1390	default:
1391		break;
1392	}
1393
1394	return NULL;
1395}
1396
1397static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
1398{
1399	struct amdgpu_ps *ps;
1400	enum amd_pm_state_type dpm_state;
1401	int ret;
1402	bool equal = false;
1403
1404	/* if dpm init failed */
1405	if (!adev->pm.dpm_enabled)
1406		return;
1407
1408	if (adev->pm.dpm.user_state != adev->pm.dpm.state) {
1409		/* add other state override checks here */
1410		if ((!adev->pm.dpm.thermal_active) &&
1411		    (!adev->pm.dpm.uvd_active))
1412			adev->pm.dpm.state = adev->pm.dpm.user_state;
1413	}
1414	dpm_state = adev->pm.dpm.state;
1415
1416	ps = amdgpu_dpm_pick_power_state(adev, dpm_state);
1417	if (ps)
1418		adev->pm.dpm.requested_ps = ps;
1419	else
1420		return;
1421
1422	if (amdgpu_dpm == 1 && adev->powerplay.pp_funcs->print_power_state) {
1423		printk("switching from power state:\n");
1424		amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
1425		printk("switching to power state:\n");
1426		amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
1427	}
1428
1429	/* update whether vce is active */
1430	ps->vce_active = adev->pm.dpm.vce_active;
1431	if (adev->powerplay.pp_funcs->display_configuration_changed)
1432		amdgpu_dpm_display_configuration_changed(adev);
1433
1434	ret = amdgpu_dpm_pre_set_power_state(adev);
1435	if (ret)
1436		return;
1437
1438	if (adev->powerplay.pp_funcs->check_state_equal) {
1439		if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal))
1440			equal = false;
1441	}
1442
1443	if (equal)
1444		return;
1445
1446	amdgpu_dpm_set_power_state(adev);
1447	amdgpu_dpm_post_set_power_state(adev);
1448
1449	adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
1450	adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
1451
1452	if (adev->powerplay.pp_funcs->force_performance_level) {
1453		if (adev->pm.dpm.thermal_active) {
1454			enum amd_dpm_forced_level level = adev->pm.dpm.forced_level;
1455			/* force low perf level for thermal */
1456			amdgpu_dpm_force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW);
1457			/* save the user's level */
1458			adev->pm.dpm.forced_level = level;
1459		} else {
1460			/* otherwise, user selected level */
1461			amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level);
1462		}
1463	}
1464}
1465
1466void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
1467{
1468	int i = 0;
1469
1470	if (!adev->pm.dpm_enabled)
1471		return;
1472
1473	if (adev->mode_info.num_crtc)
1474		amdgpu_display_bandwidth_update(adev);
1475
1476	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
1477		struct amdgpu_ring *ring = adev->rings[i];
1478		if (ring && ring->sched.ready)
1479			amdgpu_fence_wait_empty(ring);
1480	}
1481
1482	if (adev->powerplay.pp_funcs->dispatch_tasks) {
1483		if (!amdgpu_device_has_dc_support(adev)) {
1484			mutex_lock(&adev->pm.mutex);
1485			amdgpu_dpm_get_active_displays(adev);
1486			adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
1487			adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
1488			adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
1489			/* we have issues with mclk switching with
1490			 * refresh rates over 120 hz on the non-DC code.
1491			 */
1492			if (adev->pm.pm_display_cfg.vrefresh > 120)
1493				adev->pm.pm_display_cfg.min_vblank_time = 0;
1494			if (adev->powerplay.pp_funcs->display_configuration_change)
1495				adev->powerplay.pp_funcs->display_configuration_change(
1496							adev->powerplay.pp_handle,
1497							&adev->pm.pm_display_cfg);
1498			mutex_unlock(&adev->pm.mutex);
1499		}
1500		amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL);
1501	} else {
1502		mutex_lock(&adev->pm.mutex);
1503		amdgpu_dpm_get_active_displays(adev);
1504		amdgpu_dpm_change_power_state_locked(adev);
1505		mutex_unlock(&adev->pm.mutex);
1506	}
1507}
1508
1509void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
1510{
1511	int ret = 0;
1512
1513	if (adev->family == AMDGPU_FAMILY_SI) {
1514		mutex_lock(&adev->pm.mutex);
1515		if (enable) {
1516			adev->pm.dpm.uvd_active = true;
1517			adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
1518		} else {
1519			adev->pm.dpm.uvd_active = false;
1520		}
1521		mutex_unlock(&adev->pm.mutex);
1522
1523		amdgpu_pm_compute_clocks(adev);
1524	} else {
1525		ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
1526		if (ret)
1527			DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
1528				  enable ? "enable" : "disable", ret);
1529
1530		/* enable/disable Low Memory PState for UVD (4k videos) */
1531		if (adev->asic_type == CHIP_STONEY &&
1532			adev->uvd.decode_image_width >= WIDTH_4K) {
1533			struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
1534
1535			if (hwmgr && hwmgr->hwmgr_func &&
1536			    hwmgr->hwmgr_func->update_nbdpm_pstate)
1537				hwmgr->hwmgr_func->update_nbdpm_pstate(hwmgr,
1538								       !enable,
1539								       true);
1540		}
1541	}
 
 
 
 
 
1542}
1543
1544void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
1545{
1546	int ret = 0;
1547
1548	if (adev->family == AMDGPU_FAMILY_SI) {
1549		mutex_lock(&adev->pm.mutex);
1550		if (enable) {
1551			adev->pm.dpm.vce_active = true;
1552			/* XXX select vce level based on ring/task */
1553			adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
1554		} else {
1555			adev->pm.dpm.vce_active = false;
1556		}
1557		mutex_unlock(&adev->pm.mutex);
1558
1559		amdgpu_pm_compute_clocks(adev);
1560	} else {
1561		ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
1562		if (ret)
1563			DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
1564				  enable ? "enable" : "disable", ret);
1565	}
 
 
 
 
 
1566}
1567
1568void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
1569{
1570	int i;
1571
1572	if (adev->powerplay.pp_funcs->print_power_state == NULL)
1573		return;
1574
1575	for (i = 0; i < adev->pm.dpm.num_ps; i++)
1576		amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]);
1577
 
 
 
 
1578}
1579
1580void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
1581{
1582	int ret = 0;
1583
1584	ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable);
1585	if (ret)
1586		DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
1587			  enable ? "enable" : "disable", ret);
1588}
1589
1590int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
1591{
1592	int r;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1593
1594	if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->load_firmware) {
1595		r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle);
1596		if (r) {
1597			pr_err("smu firmware loading failed\n");
1598			return r;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1599		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1600
1601		if (smu_version)
1602			*smu_version = adev->pm.fw_version;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1603	}
1604
1605	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1606}