Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Mar 24-27, 2025, special US time zones
Register
Loading...
v6.13.7
   1/*
   2 * Copyright 2011 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: Alex Deucher
  23 */
  24
  25#include "amdgpu.h"
  26#include "amdgpu_atombios.h"
  27#include "amdgpu_i2c.h"
  28#include "amdgpu_dpm.h"
  29#include "atom.h"
  30#include "amd_pcie.h"
  31#include "amdgpu_display.h"
  32#include "hwmgr.h"
  33#include <linux/power_supply.h>
  34#include "amdgpu_smu.h"
  35
  36#define amdgpu_dpm_enable_bapm(adev, e) \
  37		((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e)))
  38
  39#define amdgpu_dpm_is_legacy_dpm(adev) ((adev)->powerplay.pp_handle == (adev))
 
 
  40
  41int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  42{
  43	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
  44	int ret = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  45
  46	if (!pp_funcs->get_sclk)
  47		return 0;
  48
  49	mutex_lock(&adev->pm.mutex);
  50	ret = pp_funcs->get_sclk((adev)->powerplay.pp_handle,
  51				 low);
  52	mutex_unlock(&adev->pm.mutex);
 
 
 
 
 
 
 
 
 
 
 
 
  53
  54	return ret;
  55}
  56
  57int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  58{
  59	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
  60	int ret = 0;
  61
  62	if (!pp_funcs->get_mclk)
  63		return 0;
  64
  65	mutex_lock(&adev->pm.mutex);
  66	ret = pp_funcs->get_mclk((adev)->powerplay.pp_handle,
  67				 low);
  68	mutex_unlock(&adev->pm.mutex);
  69
  70	return ret;
  71}
  72
  73int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate)
  74{
  75	int ret = 0;
  76	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
  77	enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON;
  78
  79	if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state) {
  80		dev_dbg(adev->dev, "IP block%d already in the target %s state!",
  81				block_type, gate ? "gate" : "ungate");
  82		return 0;
  83	}
  84
  85	mutex_lock(&adev->pm.mutex);
  86
  87	switch (block_type) {
  88	case AMD_IP_BLOCK_TYPE_UVD:
  89	case AMD_IP_BLOCK_TYPE_VCE:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  90	case AMD_IP_BLOCK_TYPE_GFX:
  91	case AMD_IP_BLOCK_TYPE_VCN:
  92	case AMD_IP_BLOCK_TYPE_SDMA:
  93	case AMD_IP_BLOCK_TYPE_JPEG:
  94	case AMD_IP_BLOCK_TYPE_GMC:
  95	case AMD_IP_BLOCK_TYPE_ACP:
  96	case AMD_IP_BLOCK_TYPE_VPE:
  97		if (pp_funcs && pp_funcs->set_powergating_by_smu)
  98			ret = (pp_funcs->set_powergating_by_smu(
  99				(adev)->powerplay.pp_handle, block_type, gate));
 
 100		break;
 101	default:
 102		break;
 103	}
 104
 105	if (!ret)
 106		atomic_set(&adev->pm.pwr_state[block_type], pwr_state);
 107
 108	mutex_unlock(&adev->pm.mutex);
 109
 110	return ret;
 111}
 112
 113int amdgpu_dpm_set_gfx_power_up_by_imu(struct amdgpu_device *adev)
 114{
 115	struct smu_context *smu = adev->powerplay.pp_handle;
 116	int ret = -EOPNOTSUPP;
 117
 118	mutex_lock(&adev->pm.mutex);
 119	ret = smu_set_gfx_power_up_by_imu(smu);
 120	mutex_unlock(&adev->pm.mutex);
 121
 122	msleep(10);
 123
 124	return ret;
 125}
 126
 127int amdgpu_dpm_baco_enter(struct amdgpu_device *adev)
 128{
 129	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 130	void *pp_handle = adev->powerplay.pp_handle;
 131	int ret = 0;
 132
 133	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
 134		return -ENOENT;
 135
 136	mutex_lock(&adev->pm.mutex);
 137
 138	/* enter BACO state */
 139	ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
 140
 141	mutex_unlock(&adev->pm.mutex);
 142
 143	return ret;
 144}
 145
 146int amdgpu_dpm_baco_exit(struct amdgpu_device *adev)
 147{
 148	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 149	void *pp_handle = adev->powerplay.pp_handle;
 150	int ret = 0;
 151
 152	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
 153		return -ENOENT;
 154
 155	mutex_lock(&adev->pm.mutex);
 156
 157	/* exit BACO state */
 158	ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
 159
 160	mutex_unlock(&adev->pm.mutex);
 161
 162	return ret;
 163}
 164
 165int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
 166			     enum pp_mp1_state mp1_state)
 167{
 168	int ret = 0;
 169	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 170
 171	if (mp1_state == PP_MP1_STATE_FLR) {
 172		/* VF lost access to SMU */
 173		if (amdgpu_sriov_vf(adev))
 174			adev->pm.dpm_enabled = false;
 175	} else if (pp_funcs && pp_funcs->set_mp1_state) {
 176		mutex_lock(&adev->pm.mutex);
 177
 178		ret = pp_funcs->set_mp1_state(
 179				adev->powerplay.pp_handle,
 180				mp1_state);
 181
 182		mutex_unlock(&adev->pm.mutex);
 183	}
 184
 185	return ret;
 186}
 187
 188int amdgpu_dpm_notify_rlc_state(struct amdgpu_device *adev, bool en)
 189{
 190	int ret = 0;
 191	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 192
 193	if (pp_funcs && pp_funcs->notify_rlc_state) {
 194		mutex_lock(&adev->pm.mutex);
 195
 196		ret = pp_funcs->notify_rlc_state(
 197				adev->powerplay.pp_handle,
 198				en);
 199
 200		mutex_unlock(&adev->pm.mutex);
 201	}
 202
 203	return ret;
 204}
 205
 206int amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
 207{
 208	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 209	void *pp_handle = adev->powerplay.pp_handle;
 210	int ret;
 211
 212	if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
 213		return 0;
 214	/* Don't use baco for reset in S3.
 215	 * This is a workaround for some platforms
 216	 * where entering BACO during suspend
 217	 * seems to cause reboots or hangs.
 218	 * This might be related to the fact that BACO controls
 219	 * power to the whole GPU including devices like audio and USB.
 220	 * Powering down/up everything may adversely affect these other
 221	 * devices.  Needs more investigation.
 222	 */
 223	if (adev->in_s3)
 224		return 0;
 225
 226	mutex_lock(&adev->pm.mutex);
 227
 228	ret = pp_funcs->get_asic_baco_capability(pp_handle);
 229
 230	mutex_unlock(&adev->pm.mutex);
 
 231
 232	return ret;
 233}
 234
 235int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev)
 236{
 237	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 238	void *pp_handle = adev->powerplay.pp_handle;
 239	int ret = 0;
 240
 241	if (!pp_funcs || !pp_funcs->asic_reset_mode_2)
 242		return -ENOENT;
 243
 244	mutex_lock(&adev->pm.mutex);
 245
 246	ret = pp_funcs->asic_reset_mode_2(pp_handle);
 247
 248	mutex_unlock(&adev->pm.mutex);
 249
 250	return ret;
 251}
 252
 253int amdgpu_dpm_enable_gfx_features(struct amdgpu_device *adev)
 254{
 255	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 256	void *pp_handle = adev->powerplay.pp_handle;
 257	int ret = 0;
 258
 259	if (!pp_funcs || !pp_funcs->asic_reset_enable_gfx_features)
 260		return -ENOENT;
 261
 262	mutex_lock(&adev->pm.mutex);
 263
 264	ret = pp_funcs->asic_reset_enable_gfx_features(pp_handle);
 265
 266	mutex_unlock(&adev->pm.mutex);
 267
 268	return ret;
 269}
 270
 271int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
 272{
 273	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 274	void *pp_handle = adev->powerplay.pp_handle;
 275	int ret = 0;
 276
 277	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
 278		return -ENOENT;
 279
 280	mutex_lock(&adev->pm.mutex);
 281
 282	/* enter BACO state */
 283	ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
 284	if (ret)
 285		goto out;
 286
 287	/* exit BACO state */
 288	ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
 
 
 289
 290out:
 291	mutex_unlock(&adev->pm.mutex);
 292	return ret;
 293}
 294
 295bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev)
 296{
 297	struct smu_context *smu = adev->powerplay.pp_handle;
 298	bool support_mode1_reset = false;
 299
 300	if (is_support_sw_smu(adev)) {
 301		mutex_lock(&adev->pm.mutex);
 302		support_mode1_reset = smu_mode1_reset_is_support(smu);
 303		mutex_unlock(&adev->pm.mutex);
 304	}
 305
 306	return support_mode1_reset;
 307}
 308
 309int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev)
 310{
 311	struct smu_context *smu = adev->powerplay.pp_handle;
 312	int ret = -EOPNOTSUPP;
 313
 314	if (is_support_sw_smu(adev)) {
 315		mutex_lock(&adev->pm.mutex);
 316		ret = smu_mode1_reset(smu);
 317		mutex_unlock(&adev->pm.mutex);
 318	}
 319
 320	return ret;
 321}
 322
 323int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
 324				    enum PP_SMC_POWER_PROFILE type,
 325				    bool en)
 326{
 327	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 328	int ret = 0;
 329
 330	if (amdgpu_sriov_vf(adev))
 331		return 0;
 332
 333	if (pp_funcs && pp_funcs->switch_power_profile) {
 334		mutex_lock(&adev->pm.mutex);
 335		ret = pp_funcs->switch_power_profile(
 336			adev->powerplay.pp_handle, type, en);
 337		mutex_unlock(&adev->pm.mutex);
 338	}
 339
 340	return ret;
 341}
 342
 343int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
 344			       uint32_t pstate)
 345{
 346	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 347	int ret = 0;
 348
 349	if (pp_funcs && pp_funcs->set_xgmi_pstate) {
 350		mutex_lock(&adev->pm.mutex);
 351		ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
 352								pstate);
 353		mutex_unlock(&adev->pm.mutex);
 354	}
 355
 356	return ret;
 357}
 358
 359int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
 360			     uint32_t cstate)
 361{
 362	int ret = 0;
 363	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 364	void *pp_handle = adev->powerplay.pp_handle;
 365
 366	if (pp_funcs && pp_funcs->set_df_cstate) {
 367		mutex_lock(&adev->pm.mutex);
 368		ret = pp_funcs->set_df_cstate(pp_handle, cstate);
 369		mutex_unlock(&adev->pm.mutex);
 370	}
 371
 372	return ret;
 373}
 374
 375ssize_t amdgpu_dpm_get_pm_policy_info(struct amdgpu_device *adev,
 376				      enum pp_pm_policy p_type, char *buf)
 377{
 378	struct smu_context *smu = adev->powerplay.pp_handle;
 379	int ret = -EOPNOTSUPP;
 380
 381	if (is_support_sw_smu(adev)) {
 382		mutex_lock(&adev->pm.mutex);
 383		ret = smu_get_pm_policy_info(smu, p_type, buf);
 384		mutex_unlock(&adev->pm.mutex);
 385	}
 386
 387	return ret;
 388}
 389
 390int amdgpu_dpm_set_pm_policy(struct amdgpu_device *adev, int policy_type,
 391			     int policy_level)
 392{
 393	struct smu_context *smu = adev->powerplay.pp_handle;
 394	int ret = -EOPNOTSUPP;
 395
 396	if (is_support_sw_smu(adev)) {
 397		mutex_lock(&adev->pm.mutex);
 398		ret = smu_set_pm_policy(smu, policy_type, policy_level);
 399		mutex_unlock(&adev->pm.mutex);
 400	}
 401
 402	return ret;
 403}
 404
 405int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev)
 406{
 407	void *pp_handle = adev->powerplay.pp_handle;
 408	const struct amd_pm_funcs *pp_funcs =
 409			adev->powerplay.pp_funcs;
 410	int ret = 0;
 411
 412	if (pp_funcs && pp_funcs->enable_mgpu_fan_boost) {
 413		mutex_lock(&adev->pm.mutex);
 414		ret = pp_funcs->enable_mgpu_fan_boost(pp_handle);
 415		mutex_unlock(&adev->pm.mutex);
 416	}
 417
 418	return ret;
 419}
 420
 421int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev,
 422				      uint32_t msg_id)
 423{
 424	void *pp_handle = adev->powerplay.pp_handle;
 425	const struct amd_pm_funcs *pp_funcs =
 426			adev->powerplay.pp_funcs;
 427	int ret = 0;
 428
 429	if (pp_funcs && pp_funcs->set_clockgating_by_smu) {
 430		mutex_lock(&adev->pm.mutex);
 431		ret = pp_funcs->set_clockgating_by_smu(pp_handle,
 432						       msg_id);
 433		mutex_unlock(&adev->pm.mutex);
 434	}
 435
 436	return ret;
 437}
 438
 439int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev,
 440				  bool acquire)
 441{
 442	void *pp_handle = adev->powerplay.pp_handle;
 443	const struct amd_pm_funcs *pp_funcs =
 444			adev->powerplay.pp_funcs;
 445	int ret = -EOPNOTSUPP;
 446
 447	if (pp_funcs && pp_funcs->smu_i2c_bus_access) {
 448		mutex_lock(&adev->pm.mutex);
 449		ret = pp_funcs->smu_i2c_bus_access(pp_handle,
 450						   acquire);
 451		mutex_unlock(&adev->pm.mutex);
 452	}
 453
 454	return ret;
 455}
 456
 457void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
 458{
 459	if (adev->pm.dpm_enabled) {
 460		mutex_lock(&adev->pm.mutex);
 461		if (power_supply_is_system_supplied() > 0)
 462			adev->pm.ac_power = true;
 463		else
 464			adev->pm.ac_power = false;
 465
 466		if (adev->powerplay.pp_funcs &&
 467		    adev->powerplay.pp_funcs->enable_bapm)
 468			amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
 
 469
 470		if (is_support_sw_smu(adev))
 471			smu_set_ac_dc(adev->powerplay.pp_handle);
 472
 473		mutex_unlock(&adev->pm.mutex);
 474	}
 475}
 476
 477int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
 478			   void *data, uint32_t *size)
 479{
 480	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 481	int ret = -EINVAL;
 482
 483	if (!data || !size)
 484		return -EINVAL;
 485
 486	if (pp_funcs && pp_funcs->read_sensor) {
 487		mutex_lock(&adev->pm.mutex);
 488		ret = pp_funcs->read_sensor(adev->powerplay.pp_handle,
 489					    sensor,
 490					    data,
 491					    size);
 492		mutex_unlock(&adev->pm.mutex);
 493	}
 494
 495	return ret;
 496}
 497
 498int amdgpu_dpm_get_apu_thermal_limit(struct amdgpu_device *adev, uint32_t *limit)
 499{
 500	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 501	int ret = -EOPNOTSUPP;
 
 
 
 
 502
 503	if (pp_funcs && pp_funcs->get_apu_thermal_limit) {
 504		mutex_lock(&adev->pm.mutex);
 505		ret = pp_funcs->get_apu_thermal_limit(adev->powerplay.pp_handle, limit);
 506		mutex_unlock(&adev->pm.mutex);
 
 
 
 
 
 
 
 
 507	}
 
 
 
 
 
 
 
 508
 509	return ret;
 510}
 511
 512int amdgpu_dpm_set_apu_thermal_limit(struct amdgpu_device *adev, uint32_t limit)
 
 513{
 514	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 515	int ret = -EOPNOTSUPP;
 
 
 
 
 
 
 
 
 
 516
 517	if (pp_funcs && pp_funcs->set_apu_thermal_limit) {
 518		mutex_lock(&adev->pm.mutex);
 519		ret = pp_funcs->set_apu_thermal_limit(adev->powerplay.pp_handle, limit);
 520		mutex_unlock(&adev->pm.mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 521	}
 522
 523	return ret;
 524}
 525
 526void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev)
 527{
 528	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 529	int i;
 
 
 530
 
 531	if (!adev->pm.dpm_enabled)
 532		return;
 533
 534	if (!pp_funcs->pm_compute_clocks)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 535		return;
 536
 537	if (adev->mode_info.num_crtc)
 538		amdgpu_display_bandwidth_update(adev);
 539
 540	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
 541		struct amdgpu_ring *ring = adev->rings[i];
 542		if (ring && ring->sched.ready)
 543			amdgpu_fence_wait_empty(ring);
 544	}
 545
 546	mutex_lock(&adev->pm.mutex);
 547	pp_funcs->pm_compute_clocks(adev->powerplay.pp_handle);
 548	mutex_unlock(&adev->pm.mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 549}
 550
 551void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
 552{
 553	int ret = 0;
 554
 555	if (adev->family == AMDGPU_FAMILY_SI) {
 556		mutex_lock(&adev->pm.mutex);
 557		if (enable) {
 558			adev->pm.dpm.uvd_active = true;
 559			adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
 560		} else {
 561			adev->pm.dpm.uvd_active = false;
 562		}
 563		mutex_unlock(&adev->pm.mutex);
 564
 565		amdgpu_dpm_compute_clocks(adev);
 566		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 567	}
 568
 569	ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
 570	if (ret)
 571		DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
 572			  enable ? "enable" : "disable", ret);
 573}
 574
 575void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
 576{
 577	int ret = 0;
 578
 579	if (adev->family == AMDGPU_FAMILY_SI) {
 580		mutex_lock(&adev->pm.mutex);
 581		if (enable) {
 582			adev->pm.dpm.vce_active = true;
 583			/* XXX select vce level based on ring/task */
 584			adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
 585		} else {
 586			adev->pm.dpm.vce_active = false;
 587		}
 588		mutex_unlock(&adev->pm.mutex);
 589
 590		amdgpu_dpm_compute_clocks(adev);
 591		return;
 
 
 
 
 592	}
 593
 594	ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
 595	if (ret)
 596		DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
 597			  enable ? "enable" : "disable", ret);
 598}
 599
 600void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
 601{
 602	int ret = 0;
 
 
 
 
 
 
 603
 604	ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable);
 605	if (ret)
 606		DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
 607			  enable ? "enable" : "disable", ret);
 608}
 609
 610void amdgpu_dpm_enable_vpe(struct amdgpu_device *adev, bool enable)
 611{
 612	int ret = 0;
 613
 614	ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VPE, !enable);
 615	if (ret)
 616		DRM_ERROR("Dpm %s vpe failed, ret = %d.\n",
 617			  enable ? "enable" : "disable", ret);
 618}
 619
 620int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
 621{
 622	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 623	int r = 0;
 624
 625	if (!pp_funcs || !pp_funcs->load_firmware ||
 626	    (is_support_sw_smu(adev) && (adev->flags & AMD_IS_APU)))
 627		return 0;
 628
 629	mutex_lock(&adev->pm.mutex);
 630	r = pp_funcs->load_firmware(adev->powerplay.pp_handle);
 631	if (r) {
 632		pr_err("smu firmware loading failed\n");
 633		goto out;
 634	}
 635
 636	if (smu_version)
 637		*smu_version = adev->pm.fw_version;
 638
 639out:
 640	mutex_unlock(&adev->pm.mutex);
 641	return r;
 642}
 643
 644int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable)
 645{
 646	int ret = 0;
 647
 648	if (is_support_sw_smu(adev)) {
 649		mutex_lock(&adev->pm.mutex);
 650		ret = smu_handle_passthrough_sbr(adev->powerplay.pp_handle,
 651						 enable);
 652		mutex_unlock(&adev->pm.mutex);
 653	}
 654
 655	return ret;
 656}
 657
 658int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size)
 659{
 660	struct smu_context *smu = adev->powerplay.pp_handle;
 661	int ret = 0;
 662
 663	if (!is_support_sw_smu(adev))
 664		return -EOPNOTSUPP;
 665
 666	mutex_lock(&adev->pm.mutex);
 667	ret = smu_send_hbm_bad_pages_num(smu, size);
 668	mutex_unlock(&adev->pm.mutex);
 669
 670	return ret;
 671}
 672
 673int amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device *adev, uint32_t size)
 674{
 675	struct smu_context *smu = adev->powerplay.pp_handle;
 676	int ret = 0;
 677
 678	if (!is_support_sw_smu(adev))
 679		return -EOPNOTSUPP;
 680
 681	mutex_lock(&adev->pm.mutex);
 682	ret = smu_send_hbm_bad_channel_flag(smu, size);
 683	mutex_unlock(&adev->pm.mutex);
 684
 685	return ret;
 686}
 687
 688int amdgpu_dpm_send_rma_reason(struct amdgpu_device *adev)
 689{
 690	struct smu_context *smu = adev->powerplay.pp_handle;
 691	int ret;
 692
 693	if (!is_support_sw_smu(adev))
 694		return -EOPNOTSUPP;
 695
 696	mutex_lock(&adev->pm.mutex);
 697	ret = smu_send_rma_reason(smu);
 698	mutex_unlock(&adev->pm.mutex);
 699
 700	return ret;
 701}
 702
 703int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev,
 704				  enum pp_clock_type type,
 705				  uint32_t *min,
 706				  uint32_t *max)
 707{
 708	int ret = 0;
 709
 710	if (type != PP_SCLK)
 711		return -EINVAL;
 712
 713	if (!is_support_sw_smu(adev))
 714		return -EOPNOTSUPP;
 715
 716	mutex_lock(&adev->pm.mutex);
 717	ret = smu_get_dpm_freq_range(adev->powerplay.pp_handle,
 718				     SMU_SCLK,
 719				     min,
 720				     max);
 721	mutex_unlock(&adev->pm.mutex);
 722
 723	return ret;
 724}
 725
 726int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev,
 727				   enum pp_clock_type type,
 728				   uint32_t min,
 729				   uint32_t max)
 730{
 731	struct smu_context *smu = adev->powerplay.pp_handle;
 732	int ret = 0;
 733
 734	if (type != PP_SCLK)
 735		return -EINVAL;
 736
 737	if (!is_support_sw_smu(adev))
 738		return -EOPNOTSUPP;
 739
 740	mutex_lock(&adev->pm.mutex);
 741	ret = smu_set_soft_freq_range(smu,
 742				      SMU_SCLK,
 743				      min,
 744				      max);
 745	mutex_unlock(&adev->pm.mutex);
 746
 747	return ret;
 748}
 749
 750int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev)
 751{
 752	struct smu_context *smu = adev->powerplay.pp_handle;
 753	int ret = 0;
 754
 755	if (!is_support_sw_smu(adev))
 756		return 0;
 757
 758	mutex_lock(&adev->pm.mutex);
 759	ret = smu_write_watermarks_table(smu);
 760	mutex_unlock(&adev->pm.mutex);
 761
 762	return ret;
 763}
 764
 765int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev,
 766			      enum smu_event_type event,
 767			      uint64_t event_arg)
 768{
 769	struct smu_context *smu = adev->powerplay.pp_handle;
 770	int ret = 0;
 771
 772	if (!is_support_sw_smu(adev))
 773		return -EOPNOTSUPP;
 774
 775	mutex_lock(&adev->pm.mutex);
 776	ret = smu_wait_for_event(smu, event, event_arg);
 777	mutex_unlock(&adev->pm.mutex);
 778
 779	return ret;
 780}
 781
 782int amdgpu_dpm_set_residency_gfxoff(struct amdgpu_device *adev, bool value)
 783{
 784	struct smu_context *smu = adev->powerplay.pp_handle;
 785	int ret = 0;
 786
 787	if (!is_support_sw_smu(adev))
 788		return -EOPNOTSUPP;
 789
 790	mutex_lock(&adev->pm.mutex);
 791	ret = smu_set_residency_gfxoff(smu, value);
 792	mutex_unlock(&adev->pm.mutex);
 793
 794	return ret;
 795}
 796
 797int amdgpu_dpm_get_residency_gfxoff(struct amdgpu_device *adev, u32 *value)
 798{
 799	struct smu_context *smu = adev->powerplay.pp_handle;
 800	int ret = 0;
 801
 802	if (!is_support_sw_smu(adev))
 803		return -EOPNOTSUPP;
 804
 805	mutex_lock(&adev->pm.mutex);
 806	ret = smu_get_residency_gfxoff(smu, value);
 807	mutex_unlock(&adev->pm.mutex);
 808
 809	return ret;
 810}
 811
 812int amdgpu_dpm_get_entrycount_gfxoff(struct amdgpu_device *adev, u64 *value)
 813{
 814	struct smu_context *smu = adev->powerplay.pp_handle;
 815	int ret = 0;
 816
 817	if (!is_support_sw_smu(adev))
 818		return -EOPNOTSUPP;
 819
 820	mutex_lock(&adev->pm.mutex);
 821	ret = smu_get_entrycount_gfxoff(smu, value);
 822	mutex_unlock(&adev->pm.mutex);
 823
 824	return ret;
 825}
 826
 827int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value)
 828{
 829	struct smu_context *smu = adev->powerplay.pp_handle;
 830	int ret = 0;
 831
 832	if (!is_support_sw_smu(adev))
 833		return -EOPNOTSUPP;
 834
 835	mutex_lock(&adev->pm.mutex);
 836	ret = smu_get_status_gfxoff(smu, value);
 837	mutex_unlock(&adev->pm.mutex);
 838
 839	return ret;
 840}
 841
 842uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev)
 843{
 844	struct smu_context *smu = adev->powerplay.pp_handle;
 845
 846	if (!is_support_sw_smu(adev))
 847		return 0;
 848
 849	return atomic64_read(&smu->throttle_int_counter);
 850}
 851
 852/* amdgpu_dpm_gfx_state_change - Handle gfx power state change set
 853 * @adev: amdgpu_device pointer
 854 * @state: gfx power state(1 -sGpuChangeState_D0Entry and 2 -sGpuChangeState_D3Entry)
 855 *
 856 */
 857void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev,
 858				 enum gfx_change_state state)
 859{
 860	mutex_lock(&adev->pm.mutex);
 861	if (adev->powerplay.pp_funcs &&
 862	    adev->powerplay.pp_funcs->gfx_state_change_set)
 863		((adev)->powerplay.pp_funcs->gfx_state_change_set(
 864			(adev)->powerplay.pp_handle, state));
 865	mutex_unlock(&adev->pm.mutex);
 866}
 867
 868int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev,
 869			    void *umc_ecc)
 870{
 871	struct smu_context *smu = adev->powerplay.pp_handle;
 872	int ret = 0;
 873
 874	if (!is_support_sw_smu(adev))
 875		return -EOPNOTSUPP;
 876
 877	mutex_lock(&adev->pm.mutex);
 878	ret = smu_get_ecc_info(smu, umc_ecc);
 879	mutex_unlock(&adev->pm.mutex);
 880
 881	return ret;
 882}
 883
 884struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev,
 885						     uint32_t idx)
 886{
 887	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 888	struct amd_vce_state *vstate = NULL;
 889
 890	if (!pp_funcs->get_vce_clock_state)
 891		return NULL;
 892
 893	mutex_lock(&adev->pm.mutex);
 894	vstate = pp_funcs->get_vce_clock_state(adev->powerplay.pp_handle,
 895					       idx);
 896	mutex_unlock(&adev->pm.mutex);
 897
 898	return vstate;
 899}
 900
 901void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev,
 902					enum amd_pm_state_type *state)
 903{
 904	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 905
 906	mutex_lock(&adev->pm.mutex);
 907
 908	if (!pp_funcs->get_current_power_state) {
 909		*state = adev->pm.dpm.user_state;
 910		goto out;
 911	}
 912
 913	*state = pp_funcs->get_current_power_state(adev->powerplay.pp_handle);
 914	if (*state < POWER_STATE_TYPE_DEFAULT ||
 915	    *state > POWER_STATE_TYPE_INTERNAL_3DPERF)
 916		*state = adev->pm.dpm.user_state;
 917
 918out:
 919	mutex_unlock(&adev->pm.mutex);
 920}
 921
 922void amdgpu_dpm_set_power_state(struct amdgpu_device *adev,
 923				enum amd_pm_state_type state)
 924{
 925	mutex_lock(&adev->pm.mutex);
 926	adev->pm.dpm.user_state = state;
 927	mutex_unlock(&adev->pm.mutex);
 928
 929	if (is_support_sw_smu(adev))
 930		return;
 931
 932	if (amdgpu_dpm_dispatch_task(adev,
 933				     AMD_PP_TASK_ENABLE_USER_STATE,
 934				     &state) == -EOPNOTSUPP)
 935		amdgpu_dpm_compute_clocks(adev);
 936}
 937
 938enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev)
 939{
 940	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 941	enum amd_dpm_forced_level level;
 942
 943	if (!pp_funcs)
 944		return AMD_DPM_FORCED_LEVEL_AUTO;
 945
 946	mutex_lock(&adev->pm.mutex);
 947	if (pp_funcs->get_performance_level)
 948		level = pp_funcs->get_performance_level(adev->powerplay.pp_handle);
 949	else
 950		level = adev->pm.dpm.forced_level;
 951	mutex_unlock(&adev->pm.mutex);
 952
 953	return level;
 954}
 955
 956int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
 957				       enum amd_dpm_forced_level level)
 958{
 959	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 960	enum amd_dpm_forced_level current_level;
 961	uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
 962					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
 963					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
 964					AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
 965
 966	if (!pp_funcs || !pp_funcs->force_performance_level)
 967		return 0;
 968
 969	if (adev->pm.dpm.thermal_active)
 970		return -EINVAL;
 971
 972	current_level = amdgpu_dpm_get_performance_level(adev);
 973	if (current_level == level)
 974		return 0;
 975
 976	if (adev->asic_type == CHIP_RAVEN) {
 977		if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) {
 978			if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
 979			    level == AMD_DPM_FORCED_LEVEL_MANUAL)
 980				amdgpu_gfx_off_ctrl(adev, false);
 981			else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL &&
 982				 level != AMD_DPM_FORCED_LEVEL_MANUAL)
 983				amdgpu_gfx_off_ctrl(adev, true);
 984		}
 985	}
 986
 987	if (!(current_level & profile_mode_mask) &&
 988	    (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT))
 989		return -EINVAL;
 990
 991	if (!(current_level & profile_mode_mask) &&
 992	      (level & profile_mode_mask)) {
 993		/* enter UMD Pstate */
 994		amdgpu_device_ip_set_powergating_state(adev,
 995						       AMD_IP_BLOCK_TYPE_GFX,
 996						       AMD_PG_STATE_UNGATE);
 997		amdgpu_device_ip_set_clockgating_state(adev,
 998						       AMD_IP_BLOCK_TYPE_GFX,
 999						       AMD_CG_STATE_UNGATE);
1000	} else if ((current_level & profile_mode_mask) &&
1001		    !(level & profile_mode_mask)) {
1002		/* exit UMD Pstate */
1003		amdgpu_device_ip_set_clockgating_state(adev,
1004						       AMD_IP_BLOCK_TYPE_GFX,
1005						       AMD_CG_STATE_GATE);
1006		amdgpu_device_ip_set_powergating_state(adev,
1007						       AMD_IP_BLOCK_TYPE_GFX,
1008						       AMD_PG_STATE_GATE);
1009	}
1010
1011	mutex_lock(&adev->pm.mutex);
1012
1013	if (pp_funcs->force_performance_level(adev->powerplay.pp_handle,
1014					      level)) {
1015		mutex_unlock(&adev->pm.mutex);
1016		return -EINVAL;
1017	}
1018
1019	adev->pm.dpm.forced_level = level;
1020
1021	mutex_unlock(&adev->pm.mutex);
1022
1023	return 0;
1024}
1025
1026int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev,
1027				 struct pp_states_info *states)
1028{
1029	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1030	int ret = 0;
1031
1032	if (!pp_funcs->get_pp_num_states)
1033		return -EOPNOTSUPP;
1034
1035	mutex_lock(&adev->pm.mutex);
1036	ret = pp_funcs->get_pp_num_states(adev->powerplay.pp_handle,
1037					  states);
1038	mutex_unlock(&adev->pm.mutex);
1039
1040	return ret;
1041}
1042
1043int amdgpu_dpm_dispatch_task(struct amdgpu_device *adev,
1044			      enum amd_pp_task task_id,
1045			      enum amd_pm_state_type *user_state)
1046{
1047	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1048	int ret = 0;
1049
1050	if (!pp_funcs->dispatch_tasks)
1051		return -EOPNOTSUPP;
1052
1053	mutex_lock(&adev->pm.mutex);
1054	ret = pp_funcs->dispatch_tasks(adev->powerplay.pp_handle,
1055				       task_id,
1056				       user_state);
1057	mutex_unlock(&adev->pm.mutex);
1058
1059	return ret;
1060}
1061
1062int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table)
1063{
1064	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1065	int ret = 0;
1066
1067	if (!pp_funcs->get_pp_table)
1068		return 0;
1069
1070	mutex_lock(&adev->pm.mutex);
1071	ret = pp_funcs->get_pp_table(adev->powerplay.pp_handle,
1072				     table);
1073	mutex_unlock(&adev->pm.mutex);
1074
1075	return ret;
1076}
1077
1078int amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device *adev,
1079				      uint32_t type,
1080				      long *input,
1081				      uint32_t size)
1082{
1083	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1084	int ret = 0;
1085
1086	if (!pp_funcs->set_fine_grain_clk_vol)
1087		return 0;
1088
1089	mutex_lock(&adev->pm.mutex);
1090	ret = pp_funcs->set_fine_grain_clk_vol(adev->powerplay.pp_handle,
1091					       type,
1092					       input,
1093					       size);
1094	mutex_unlock(&adev->pm.mutex);
1095
1096	return ret;
1097}
1098
1099int amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device *adev,
1100				  uint32_t type,
1101				  long *input,
1102				  uint32_t size)
1103{
1104	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1105	int ret = 0;
1106
1107	if (!pp_funcs->odn_edit_dpm_table)
1108		return 0;
1109
1110	mutex_lock(&adev->pm.mutex);
1111	ret = pp_funcs->odn_edit_dpm_table(adev->powerplay.pp_handle,
1112					   type,
1113					   input,
1114					   size);
1115	mutex_unlock(&adev->pm.mutex);
1116
1117	return ret;
1118}
1119
1120int amdgpu_dpm_print_clock_levels(struct amdgpu_device *adev,
1121				  enum pp_clock_type type,
1122				  char *buf)
1123{
1124	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1125	int ret = 0;
1126
1127	if (!pp_funcs->print_clock_levels)
1128		return 0;
1129
1130	mutex_lock(&adev->pm.mutex);
1131	ret = pp_funcs->print_clock_levels(adev->powerplay.pp_handle,
1132					   type,
1133					   buf);
1134	mutex_unlock(&adev->pm.mutex);
1135
1136	return ret;
1137}
1138
1139int amdgpu_dpm_emit_clock_levels(struct amdgpu_device *adev,
1140				  enum pp_clock_type type,
1141				  char *buf,
1142				  int *offset)
1143{
1144	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1145	int ret = 0;
1146
1147	if (!pp_funcs->emit_clock_levels)
1148		return -ENOENT;
1149
1150	mutex_lock(&adev->pm.mutex);
1151	ret = pp_funcs->emit_clock_levels(adev->powerplay.pp_handle,
1152					   type,
1153					   buf,
1154					   offset);
1155	mutex_unlock(&adev->pm.mutex);
1156
1157	return ret;
1158}
1159
1160int amdgpu_dpm_set_ppfeature_status(struct amdgpu_device *adev,
1161				    uint64_t ppfeature_masks)
1162{
1163	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1164	int ret = 0;
1165
1166	if (!pp_funcs->set_ppfeature_status)
1167		return 0;
1168
1169	mutex_lock(&adev->pm.mutex);
1170	ret = pp_funcs->set_ppfeature_status(adev->powerplay.pp_handle,
1171					     ppfeature_masks);
1172	mutex_unlock(&adev->pm.mutex);
1173
1174	return ret;
1175}
1176
1177int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf)
1178{
1179	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1180	int ret = 0;
1181
1182	if (!pp_funcs->get_ppfeature_status)
1183		return 0;
1184
1185	mutex_lock(&adev->pm.mutex);
1186	ret = pp_funcs->get_ppfeature_status(adev->powerplay.pp_handle,
1187					     buf);
1188	mutex_unlock(&adev->pm.mutex);
1189
1190	return ret;
1191}
1192
1193int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev,
1194				 enum pp_clock_type type,
1195				 uint32_t mask)
1196{
1197	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1198	int ret = 0;
1199
1200	if (!pp_funcs->force_clock_level)
1201		return 0;
1202
1203	mutex_lock(&adev->pm.mutex);
1204	ret = pp_funcs->force_clock_level(adev->powerplay.pp_handle,
1205					  type,
1206					  mask);
1207	mutex_unlock(&adev->pm.mutex);
1208
1209	return ret;
1210}
1211
1212int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev)
1213{
1214	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1215	int ret = 0;
1216
1217	if (!pp_funcs->get_sclk_od)
1218		return -EOPNOTSUPP;
1219
1220	mutex_lock(&adev->pm.mutex);
1221	ret = pp_funcs->get_sclk_od(adev->powerplay.pp_handle);
1222	mutex_unlock(&adev->pm.mutex);
1223
1224	return ret;
1225}
1226
1227int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
1228{
1229	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1230
1231	if (is_support_sw_smu(adev))
1232		return -EOPNOTSUPP;
1233
1234	mutex_lock(&adev->pm.mutex);
1235	if (pp_funcs->set_sclk_od)
1236		pp_funcs->set_sclk_od(adev->powerplay.pp_handle, value);
1237	mutex_unlock(&adev->pm.mutex);
1238
1239	if (amdgpu_dpm_dispatch_task(adev,
1240				     AMD_PP_TASK_READJUST_POWER_STATE,
1241				     NULL) == -EOPNOTSUPP) {
1242		adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1243		amdgpu_dpm_compute_clocks(adev);
1244	}
1245
1246	return 0;
1247}
1248
1249int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev)
1250{
1251	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1252	int ret = 0;
1253
1254	if (!pp_funcs->get_mclk_od)
1255		return -EOPNOTSUPP;
1256
1257	mutex_lock(&adev->pm.mutex);
1258	ret = pp_funcs->get_mclk_od(adev->powerplay.pp_handle);
1259	mutex_unlock(&adev->pm.mutex);
1260
1261	return ret;
1262}
1263
1264int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
1265{
1266	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1267
1268	if (is_support_sw_smu(adev))
1269		return -EOPNOTSUPP;
1270
1271	mutex_lock(&adev->pm.mutex);
1272	if (pp_funcs->set_mclk_od)
1273		pp_funcs->set_mclk_od(adev->powerplay.pp_handle, value);
1274	mutex_unlock(&adev->pm.mutex);
1275
1276	if (amdgpu_dpm_dispatch_task(adev,
1277				     AMD_PP_TASK_READJUST_POWER_STATE,
1278				     NULL) == -EOPNOTSUPP) {
1279		adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1280		amdgpu_dpm_compute_clocks(adev);
1281	}
1282
1283	return 0;
1284}
1285
1286int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev,
1287				      char *buf)
1288{
1289	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1290	int ret = 0;
1291
1292	if (!pp_funcs->get_power_profile_mode)
1293		return -EOPNOTSUPP;
1294
1295	mutex_lock(&adev->pm.mutex);
1296	ret = pp_funcs->get_power_profile_mode(adev->powerplay.pp_handle,
1297					       buf);
1298	mutex_unlock(&adev->pm.mutex);
1299
1300	return ret;
1301}
1302
1303int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev,
1304				      long *input, uint32_t size)
1305{
1306	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1307	int ret = 0;
1308
1309	if (!pp_funcs->set_power_profile_mode)
1310		return 0;
1311
1312	mutex_lock(&adev->pm.mutex);
1313	ret = pp_funcs->set_power_profile_mode(adev->powerplay.pp_handle,
1314					       input,
1315					       size);
1316	mutex_unlock(&adev->pm.mutex);
1317
1318	return ret;
1319}
1320
1321int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table)
1322{
1323	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1324	int ret = 0;
1325
1326	if (!pp_funcs->get_gpu_metrics)
1327		return 0;
1328
1329	mutex_lock(&adev->pm.mutex);
1330	ret = pp_funcs->get_gpu_metrics(adev->powerplay.pp_handle,
1331					table);
1332	mutex_unlock(&adev->pm.mutex);
1333
1334	return ret;
1335}
1336
1337ssize_t amdgpu_dpm_get_pm_metrics(struct amdgpu_device *adev, void *pm_metrics,
1338				  size_t size)
1339{
1340	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1341	int ret = 0;
1342
1343	if (!pp_funcs->get_pm_metrics)
1344		return -EOPNOTSUPP;
1345
1346	mutex_lock(&adev->pm.mutex);
1347	ret = pp_funcs->get_pm_metrics(adev->powerplay.pp_handle, pm_metrics,
1348				       size);
1349	mutex_unlock(&adev->pm.mutex);
1350
1351	return ret;
1352}
1353
1354int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev,
1355				    uint32_t *fan_mode)
1356{
1357	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1358	int ret = 0;
1359
1360	if (!pp_funcs->get_fan_control_mode)
1361		return -EOPNOTSUPP;
1362
1363	mutex_lock(&adev->pm.mutex);
1364	ret = pp_funcs->get_fan_control_mode(adev->powerplay.pp_handle,
1365					     fan_mode);
1366	mutex_unlock(&adev->pm.mutex);
1367
1368	return ret;
1369}
1370
1371int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev,
1372				 uint32_t speed)
1373{
1374	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1375	int ret = 0;
1376
1377	if (!pp_funcs->set_fan_speed_pwm)
1378		return -EOPNOTSUPP;
1379
1380	mutex_lock(&adev->pm.mutex);
1381	ret = pp_funcs->set_fan_speed_pwm(adev->powerplay.pp_handle,
1382					  speed);
1383	mutex_unlock(&adev->pm.mutex);
1384
1385	return ret;
1386}
1387
1388int amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device *adev,
1389				 uint32_t *speed)
1390{
1391	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1392	int ret = 0;
1393
1394	if (!pp_funcs->get_fan_speed_pwm)
1395		return -EOPNOTSUPP;
1396
1397	mutex_lock(&adev->pm.mutex);
1398	ret = pp_funcs->get_fan_speed_pwm(adev->powerplay.pp_handle,
1399					  speed);
1400	mutex_unlock(&adev->pm.mutex);
1401
1402	return ret;
1403}
1404
1405int amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device *adev,
1406				 uint32_t *speed)
1407{
1408	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1409	int ret = 0;
1410
1411	if (!pp_funcs->get_fan_speed_rpm)
1412		return -EOPNOTSUPP;
1413
1414	mutex_lock(&adev->pm.mutex);
1415	ret = pp_funcs->get_fan_speed_rpm(adev->powerplay.pp_handle,
1416					  speed);
1417	mutex_unlock(&adev->pm.mutex);
1418
1419	return ret;
1420}
1421
1422int amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device *adev,
1423				 uint32_t speed)
1424{
1425	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1426	int ret = 0;
1427
1428	if (!pp_funcs->set_fan_speed_rpm)
1429		return -EOPNOTSUPP;
1430
1431	mutex_lock(&adev->pm.mutex);
1432	ret = pp_funcs->set_fan_speed_rpm(adev->powerplay.pp_handle,
1433					  speed);
1434	mutex_unlock(&adev->pm.mutex);
1435
1436	return ret;
1437}
1438
1439int amdgpu_dpm_set_fan_control_mode(struct amdgpu_device *adev,
1440				    uint32_t mode)
1441{
1442	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1443	int ret = 0;
1444
1445	if (!pp_funcs->set_fan_control_mode)
1446		return -EOPNOTSUPP;
1447
1448	mutex_lock(&adev->pm.mutex);
1449	ret = pp_funcs->set_fan_control_mode(adev->powerplay.pp_handle,
1450					     mode);
1451	mutex_unlock(&adev->pm.mutex);
1452
1453	return ret;
1454}
1455
1456int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev,
1457			       uint32_t *limit,
1458			       enum pp_power_limit_level pp_limit_level,
1459			       enum pp_power_type power_type)
1460{
1461	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1462	int ret = 0;
1463
1464	if (!pp_funcs->get_power_limit)
1465		return -ENODATA;
1466
1467	mutex_lock(&adev->pm.mutex);
1468	ret = pp_funcs->get_power_limit(adev->powerplay.pp_handle,
1469					limit,
1470					pp_limit_level,
1471					power_type);
1472	mutex_unlock(&adev->pm.mutex);
1473
1474	return ret;
1475}
1476
1477int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev,
1478			       uint32_t limit)
1479{
1480	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1481	int ret = 0;
1482
1483	if (!pp_funcs->set_power_limit)
1484		return -EINVAL;
1485
1486	mutex_lock(&adev->pm.mutex);
1487	ret = pp_funcs->set_power_limit(adev->powerplay.pp_handle,
1488					limit);
1489	mutex_unlock(&adev->pm.mutex);
1490
1491	return ret;
1492}
1493
1494int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev)
1495{
1496	bool cclk_dpm_supported = false;
1497
1498	if (!is_support_sw_smu(adev))
1499		return false;
1500
1501	mutex_lock(&adev->pm.mutex);
1502	cclk_dpm_supported = is_support_cclk_dpm(adev);
1503	mutex_unlock(&adev->pm.mutex);
1504
1505	return (int)cclk_dpm_supported;
1506}
1507
1508int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
1509						       struct seq_file *m)
1510{
1511	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1512
1513	if (!pp_funcs->debugfs_print_current_performance_level)
1514		return -EOPNOTSUPP;
1515
1516	mutex_lock(&adev->pm.mutex);
1517	pp_funcs->debugfs_print_current_performance_level(adev->powerplay.pp_handle,
1518							  m);
1519	mutex_unlock(&adev->pm.mutex);
1520
1521	return 0;
1522}
1523
1524int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev,
1525				       void **addr,
1526				       size_t *size)
1527{
1528	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1529	int ret = 0;
1530
1531	if (!pp_funcs->get_smu_prv_buf_details)
1532		return -ENOSYS;
1533
1534	mutex_lock(&adev->pm.mutex);
1535	ret = pp_funcs->get_smu_prv_buf_details(adev->powerplay.pp_handle,
1536						addr,
1537						size);
1538	mutex_unlock(&adev->pm.mutex);
1539
1540	return ret;
1541}
1542
1543int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev)
1544{
1545	if (is_support_sw_smu(adev)) {
1546		struct smu_context *smu = adev->powerplay.pp_handle;
1547
1548		return (smu->od_enabled || smu->is_apu);
1549	} else {
1550		struct pp_hwmgr *hwmgr;
1551
1552		/*
1553		 * dpm on some legacy asics don't carry od_enabled member
1554		 * as its pp_handle is casted directly from adev.
1555		 */
1556		if (amdgpu_dpm_is_legacy_dpm(adev))
1557			return false;
1558
1559		hwmgr = (struct pp_hwmgr *)adev->powerplay.pp_handle;
1560
1561		return hwmgr->od_enabled;
1562	}
1563}
1564
1565int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev,
1566			    const char *buf,
1567			    size_t size)
1568{
1569	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1570	int ret = 0;
1571
1572	if (!pp_funcs->set_pp_table)
1573		return -EOPNOTSUPP;
1574
1575	mutex_lock(&adev->pm.mutex);
1576	ret = pp_funcs->set_pp_table(adev->powerplay.pp_handle,
1577				     buf,
1578				     size);
1579	mutex_unlock(&adev->pm.mutex);
1580
1581	return ret;
1582}
1583
1584int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev)
1585{
1586	struct smu_context *smu = adev->powerplay.pp_handle;
1587
1588	if (!is_support_sw_smu(adev))
1589		return INT_MAX;
1590
1591	return smu->cpu_core_num;
1592}
1593
1594void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev)
1595{
1596	if (!is_support_sw_smu(adev))
1597		return;
1598
1599	amdgpu_smu_stb_debug_fs_init(adev);
1600}
1601
1602int amdgpu_dpm_display_configuration_change(struct amdgpu_device *adev,
1603					    const struct amd_pp_display_configuration *input)
1604{
1605	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1606	int ret = 0;
1607
1608	if (!pp_funcs->display_configuration_change)
1609		return 0;
1610
1611	mutex_lock(&adev->pm.mutex);
1612	ret = pp_funcs->display_configuration_change(adev->powerplay.pp_handle,
1613						     input);
1614	mutex_unlock(&adev->pm.mutex);
1615
1616	return ret;
1617}
1618
1619int amdgpu_dpm_get_clock_by_type(struct amdgpu_device *adev,
1620				 enum amd_pp_clock_type type,
1621				 struct amd_pp_clocks *clocks)
1622{
1623	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1624	int ret = 0;
1625
1626	if (!pp_funcs->get_clock_by_type)
1627		return 0;
1628
1629	mutex_lock(&adev->pm.mutex);
1630	ret = pp_funcs->get_clock_by_type(adev->powerplay.pp_handle,
1631					  type,
1632					  clocks);
1633	mutex_unlock(&adev->pm.mutex);
1634
1635	return ret;
1636}
1637
1638int amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device *adev,
1639						struct amd_pp_simple_clock_info *clocks)
1640{
1641	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1642	int ret = 0;
1643
1644	if (!pp_funcs->get_display_mode_validation_clocks)
1645		return 0;
1646
1647	mutex_lock(&adev->pm.mutex);
1648	ret = pp_funcs->get_display_mode_validation_clocks(adev->powerplay.pp_handle,
1649							   clocks);
1650	mutex_unlock(&adev->pm.mutex);
1651
1652	return ret;
1653}
1654
1655int amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device *adev,
1656					      enum amd_pp_clock_type type,
1657					      struct pp_clock_levels_with_latency *clocks)
1658{
1659	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1660	int ret = 0;
1661
1662	if (!pp_funcs->get_clock_by_type_with_latency)
1663		return 0;
1664
1665	mutex_lock(&adev->pm.mutex);
1666	ret = pp_funcs->get_clock_by_type_with_latency(adev->powerplay.pp_handle,
1667						       type,
1668						       clocks);
1669	mutex_unlock(&adev->pm.mutex);
1670
1671	return ret;
1672}
1673
1674int amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device *adev,
1675					      enum amd_pp_clock_type type,
1676					      struct pp_clock_levels_with_voltage *clocks)
1677{
1678	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1679	int ret = 0;
1680
1681	if (!pp_funcs->get_clock_by_type_with_voltage)
1682		return 0;
1683
1684	mutex_lock(&adev->pm.mutex);
1685	ret = pp_funcs->get_clock_by_type_with_voltage(adev->powerplay.pp_handle,
1686						       type,
1687						       clocks);
1688	mutex_unlock(&adev->pm.mutex);
1689
1690	return ret;
1691}
1692
1693int amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device *adev,
1694					       void *clock_ranges)
1695{
1696	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1697	int ret = 0;
1698
1699	if (!pp_funcs->set_watermarks_for_clocks_ranges)
1700		return -EOPNOTSUPP;
1701
1702	mutex_lock(&adev->pm.mutex);
1703	ret = pp_funcs->set_watermarks_for_clocks_ranges(adev->powerplay.pp_handle,
1704							 clock_ranges);
1705	mutex_unlock(&adev->pm.mutex);
1706
1707	return ret;
1708}
1709
1710int amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device *adev,
1711					     struct pp_display_clock_request *clock)
1712{
1713	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1714	int ret = 0;
1715
1716	if (!pp_funcs->display_clock_voltage_request)
1717		return -EOPNOTSUPP;
1718
1719	mutex_lock(&adev->pm.mutex);
1720	ret = pp_funcs->display_clock_voltage_request(adev->powerplay.pp_handle,
1721						      clock);
1722	mutex_unlock(&adev->pm.mutex);
1723
1724	return ret;
1725}
1726
1727int amdgpu_dpm_get_current_clocks(struct amdgpu_device *adev,
1728				  struct amd_pp_clock_info *clocks)
1729{
1730	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1731	int ret = 0;
1732
1733	if (!pp_funcs->get_current_clocks)
1734		return -EOPNOTSUPP;
1735
1736	mutex_lock(&adev->pm.mutex);
1737	ret = pp_funcs->get_current_clocks(adev->powerplay.pp_handle,
1738					   clocks);
1739	mutex_unlock(&adev->pm.mutex);
1740
1741	return ret;
1742}
1743
1744void amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device *adev)
1745{
1746	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1747
1748	if (!pp_funcs->notify_smu_enable_pwe)
1749		return;
1750
1751	mutex_lock(&adev->pm.mutex);
1752	pp_funcs->notify_smu_enable_pwe(adev->powerplay.pp_handle);
1753	mutex_unlock(&adev->pm.mutex);
1754}
1755
1756int amdgpu_dpm_set_active_display_count(struct amdgpu_device *adev,
1757					uint32_t count)
1758{
1759	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1760	int ret = 0;
1761
1762	if (!pp_funcs->set_active_display_count)
1763		return -EOPNOTSUPP;
1764
1765	mutex_lock(&adev->pm.mutex);
1766	ret = pp_funcs->set_active_display_count(adev->powerplay.pp_handle,
1767						 count);
1768	mutex_unlock(&adev->pm.mutex);
1769
1770	return ret;
1771}
1772
1773int amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device *adev,
1774					  uint32_t clock)
1775{
1776	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1777	int ret = 0;
1778
1779	if (!pp_funcs->set_min_deep_sleep_dcefclk)
1780		return -EOPNOTSUPP;
1781
1782	mutex_lock(&adev->pm.mutex);
1783	ret = pp_funcs->set_min_deep_sleep_dcefclk(adev->powerplay.pp_handle,
1784						   clock);
1785	mutex_unlock(&adev->pm.mutex);
1786
1787	return ret;
1788}
1789
1790void amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device *adev,
1791					     uint32_t clock)
1792{
1793	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1794
1795	if (!pp_funcs->set_hard_min_dcefclk_by_freq)
1796		return;
1797
1798	mutex_lock(&adev->pm.mutex);
1799	pp_funcs->set_hard_min_dcefclk_by_freq(adev->powerplay.pp_handle,
1800					       clock);
1801	mutex_unlock(&adev->pm.mutex);
1802}
1803
1804void amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device *adev,
1805					  uint32_t clock)
1806{
1807	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1808
1809	if (!pp_funcs->set_hard_min_fclk_by_freq)
1810		return;
1811
1812	mutex_lock(&adev->pm.mutex);
1813	pp_funcs->set_hard_min_fclk_by_freq(adev->powerplay.pp_handle,
1814					    clock);
1815	mutex_unlock(&adev->pm.mutex);
1816}
1817
1818int amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device *adev,
1819						   bool disable_memory_clock_switch)
1820{
1821	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1822	int ret = 0;
1823
1824	if (!pp_funcs->display_disable_memory_clock_switch)
1825		return 0;
1826
1827	mutex_lock(&adev->pm.mutex);
1828	ret = pp_funcs->display_disable_memory_clock_switch(adev->powerplay.pp_handle,
1829							    disable_memory_clock_switch);
1830	mutex_unlock(&adev->pm.mutex);
1831
1832	return ret;
1833}
1834
1835int amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device *adev,
1836						struct pp_smu_nv_clock_table *max_clocks)
1837{
1838	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1839	int ret = 0;
1840
1841	if (!pp_funcs->get_max_sustainable_clocks_by_dc)
1842		return -EOPNOTSUPP;
1843
1844	mutex_lock(&adev->pm.mutex);
1845	ret = pp_funcs->get_max_sustainable_clocks_by_dc(adev->powerplay.pp_handle,
1846							 max_clocks);
1847	mutex_unlock(&adev->pm.mutex);
1848
1849	return ret;
1850}
1851
1852enum pp_smu_status amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev,
1853						  unsigned int *clock_values_in_khz,
1854						  unsigned int *num_states)
1855{
1856	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1857	int ret = 0;
1858
1859	if (!pp_funcs->get_uclk_dpm_states)
1860		return -EOPNOTSUPP;
1861
1862	mutex_lock(&adev->pm.mutex);
1863	ret = pp_funcs->get_uclk_dpm_states(adev->powerplay.pp_handle,
1864					    clock_values_in_khz,
1865					    num_states);
1866	mutex_unlock(&adev->pm.mutex);
1867
1868	return ret;
1869}
1870
1871int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev,
1872				   struct dpm_clocks *clock_table)
1873{
1874	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1875	int ret = 0;
1876
1877	if (!pp_funcs->get_dpm_clock_table)
1878		return -EOPNOTSUPP;
1879
1880	mutex_lock(&adev->pm.mutex);
1881	ret = pp_funcs->get_dpm_clock_table(adev->powerplay.pp_handle,
1882					    clock_table);
1883	mutex_unlock(&adev->pm.mutex);
1884
1885	return ret;
1886}
v5.14.15
   1/*
   2 * Copyright 2011 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: Alex Deucher
  23 */
  24
  25#include "amdgpu.h"
  26#include "amdgpu_atombios.h"
  27#include "amdgpu_i2c.h"
  28#include "amdgpu_dpm.h"
  29#include "atom.h"
  30#include "amd_pcie.h"
  31#include "amdgpu_display.h"
  32#include "hwmgr.h"
  33#include <linux/power_supply.h>
 
  34
  35#define WIDTH_4K 3840
 
  36
  37void amdgpu_dpm_print_class_info(u32 class, u32 class2)
  38{
  39	const char *s;
  40
  41	switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
  42	case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
  43	default:
  44		s = "none";
  45		break;
  46	case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
  47		s = "battery";
  48		break;
  49	case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
  50		s = "balanced";
  51		break;
  52	case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
  53		s = "performance";
  54		break;
  55	}
  56	printk("\tui class: %s\n", s);
  57	printk("\tinternal class:");
  58	if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
  59	    (class2 == 0))
  60		pr_cont(" none");
  61	else {
  62		if (class & ATOM_PPLIB_CLASSIFICATION_BOOT)
  63			pr_cont(" boot");
  64		if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
  65			pr_cont(" thermal");
  66		if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
  67			pr_cont(" limited_pwr");
  68		if (class & ATOM_PPLIB_CLASSIFICATION_REST)
  69			pr_cont(" rest");
  70		if (class & ATOM_PPLIB_CLASSIFICATION_FORCED)
  71			pr_cont(" forced");
  72		if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
  73			pr_cont(" 3d_perf");
  74		if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE)
  75			pr_cont(" ovrdrv");
  76		if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
  77			pr_cont(" uvd");
  78		if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW)
  79			pr_cont(" 3d_low");
  80		if (class & ATOM_PPLIB_CLASSIFICATION_ACPI)
  81			pr_cont(" acpi");
  82		if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
  83			pr_cont(" uvd_hd2");
  84		if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
  85			pr_cont(" uvd_hd");
  86		if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
  87			pr_cont(" uvd_sd");
  88		if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
  89			pr_cont(" limited_pwr2");
  90		if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
  91			pr_cont(" ulv");
  92		if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
  93			pr_cont(" uvd_mvc");
  94	}
  95	pr_cont("\n");
  96}
  97
  98void amdgpu_dpm_print_cap_info(u32 caps)
  99{
 100	printk("\tcaps:");
 101	if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
 102		pr_cont(" single_disp");
 103	if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK)
 104		pr_cont(" video");
 105	if (caps & ATOM_PPLIB_DISALLOW_ON_DC)
 106		pr_cont(" no_dc");
 107	pr_cont("\n");
 108}
 109
 110void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
 111				struct amdgpu_ps *rps)
 112{
 113	printk("\tstatus:");
 114	if (rps == adev->pm.dpm.current_ps)
 115		pr_cont(" c");
 116	if (rps == adev->pm.dpm.requested_ps)
 117		pr_cont(" r");
 118	if (rps == adev->pm.dpm.boot_ps)
 119		pr_cont(" b");
 120	pr_cont("\n");
 121}
 122
 123void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev)
 124{
 125	struct drm_device *ddev = adev_to_drm(adev);
 126	struct drm_crtc *crtc;
 127	struct amdgpu_crtc *amdgpu_crtc;
 128
 129	adev->pm.dpm.new_active_crtcs = 0;
 130	adev->pm.dpm.new_active_crtc_count = 0;
 131	if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
 132		list_for_each_entry(crtc,
 133				    &ddev->mode_config.crtc_list, head) {
 134			amdgpu_crtc = to_amdgpu_crtc(crtc);
 135			if (amdgpu_crtc->enabled) {
 136				adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id);
 137				adev->pm.dpm.new_active_crtc_count++;
 138			}
 139		}
 140	}
 141}
 142
 143
 144u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
 145{
 146	struct drm_device *dev = adev_to_drm(adev);
 147	struct drm_crtc *crtc;
 148	struct amdgpu_crtc *amdgpu_crtc;
 149	u32 vblank_in_pixels;
 150	u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
 151
 152	if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
 153		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 154			amdgpu_crtc = to_amdgpu_crtc(crtc);
 155			if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
 156				vblank_in_pixels =
 157					amdgpu_crtc->hw_mode.crtc_htotal *
 158					(amdgpu_crtc->hw_mode.crtc_vblank_end -
 159					amdgpu_crtc->hw_mode.crtc_vdisplay +
 160					(amdgpu_crtc->v_border * 2));
 161
 162				vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock;
 163				break;
 164			}
 165		}
 166	}
 167
 168	return vblank_time_us;
 169}
 170
 171u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev)
 172{
 173	struct drm_device *dev = adev_to_drm(adev);
 174	struct drm_crtc *crtc;
 175	struct amdgpu_crtc *amdgpu_crtc;
 176	u32 vrefresh = 0;
 177
 178	if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
 179		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 180			amdgpu_crtc = to_amdgpu_crtc(crtc);
 181			if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
 182				vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
 183				break;
 184			}
 185		}
 186	}
 187
 188	return vrefresh;
 189}
 190
 191bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor)
 192{
 193	switch (sensor) {
 194	case THERMAL_TYPE_RV6XX:
 195	case THERMAL_TYPE_RV770:
 196	case THERMAL_TYPE_EVERGREEN:
 197	case THERMAL_TYPE_SUMO:
 198	case THERMAL_TYPE_NI:
 199	case THERMAL_TYPE_SI:
 200	case THERMAL_TYPE_CI:
 201	case THERMAL_TYPE_KV:
 202		return true;
 203	case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
 204	case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
 205		return false; /* need special handling */
 206	case THERMAL_TYPE_NONE:
 207	case THERMAL_TYPE_EXTERNAL:
 208	case THERMAL_TYPE_EXTERNAL_GPIO:
 209	default:
 210		return false;
 211	}
 212}
 213
 214union power_info {
 215	struct _ATOM_POWERPLAY_INFO info;
 216	struct _ATOM_POWERPLAY_INFO_V2 info_2;
 217	struct _ATOM_POWERPLAY_INFO_V3 info_3;
 218	struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
 219	struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
 220	struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
 221	struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
 222	struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
 223};
 224
 225union fan_info {
 226	struct _ATOM_PPLIB_FANTABLE fan;
 227	struct _ATOM_PPLIB_FANTABLE2 fan2;
 228	struct _ATOM_PPLIB_FANTABLE3 fan3;
 229};
 230
 231static int amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table *amdgpu_table,
 232					      ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table)
 233{
 234	u32 size = atom_table->ucNumEntries *
 235		sizeof(struct amdgpu_clock_voltage_dependency_entry);
 236	int i;
 237	ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
 238
 239	amdgpu_table->entries = kzalloc(size, GFP_KERNEL);
 240	if (!amdgpu_table->entries)
 241		return -ENOMEM;
 242
 243	entry = &atom_table->entries[0];
 244	for (i = 0; i < atom_table->ucNumEntries; i++) {
 245		amdgpu_table->entries[i].clk = le16_to_cpu(entry->usClockLow) |
 246			(entry->ucClockHigh << 16);
 247		amdgpu_table->entries[i].v = le16_to_cpu(entry->usVoltage);
 248		entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *)
 249			((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record));
 250	}
 251	amdgpu_table->count = atom_table->ucNumEntries;
 252
 253	return 0;
 254}
 255
 256int amdgpu_get_platform_caps(struct amdgpu_device *adev)
 257{
 258	struct amdgpu_mode_info *mode_info = &adev->mode_info;
 259	union power_info *power_info;
 260	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
 261	u16 data_offset;
 262	u8 frev, crev;
 263
 264	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
 265				   &frev, &crev, &data_offset))
 266		return -EINVAL;
 267	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
 268
 269	adev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
 270	adev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
 271	adev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
 272
 273	return 0;
 274}
 275
 276/* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
 277#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
 278#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
 279#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
 280#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
 281#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
 282#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
 283#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8 24
 284#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V9 26
 285
 286int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
 287{
 288	struct amdgpu_mode_info *mode_info = &adev->mode_info;
 289	union power_info *power_info;
 290	union fan_info *fan_info;
 291	ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table;
 292	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
 293	u16 data_offset;
 294	u8 frev, crev;
 295	int ret, i;
 296
 297	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
 298				   &frev, &crev, &data_offset))
 299		return -EINVAL;
 300	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
 301
 302	/* fan table */
 303	if (le16_to_cpu(power_info->pplib.usTableSize) >=
 304	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
 305		if (power_info->pplib3.usFanTableOffset) {
 306			fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset +
 307						      le16_to_cpu(power_info->pplib3.usFanTableOffset));
 308			adev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst;
 309			adev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin);
 310			adev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed);
 311			adev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh);
 312			adev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin);
 313			adev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed);
 314			adev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh);
 315			if (fan_info->fan.ucFanTableFormat >= 2)
 316				adev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax);
 317			else
 318				adev->pm.dpm.fan.t_max = 10900;
 319			adev->pm.dpm.fan.cycle_delay = 100000;
 320			if (fan_info->fan.ucFanTableFormat >= 3) {
 321				adev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode;
 322				adev->pm.dpm.fan.default_max_fan_pwm =
 323					le16_to_cpu(fan_info->fan3.usFanPWMMax);
 324				adev->pm.dpm.fan.default_fan_output_sensitivity = 4836;
 325				adev->pm.dpm.fan.fan_output_sensitivity =
 326					le16_to_cpu(fan_info->fan3.usFanOutputSensitivity);
 327			}
 328			adev->pm.dpm.fan.ucode_fan_control = true;
 329		}
 330	}
 331
 332	/* clock dependancy tables, shedding tables */
 333	if (le16_to_cpu(power_info->pplib.usTableSize) >=
 334	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) {
 335		if (power_info->pplib4.usVddcDependencyOnSCLKOffset) {
 336			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
 337				(mode_info->atom_context->bios + data_offset +
 338				 le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
 339			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
 340								 dep_table);
 341			if (ret) {
 342				amdgpu_free_extended_power_table(adev);
 343				return ret;
 344			}
 345		}
 346		if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
 347			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
 348				(mode_info->atom_context->bios + data_offset +
 349				 le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
 350			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
 351								 dep_table);
 352			if (ret) {
 353				amdgpu_free_extended_power_table(adev);
 354				return ret;
 355			}
 356		}
 357		if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
 358			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
 359				(mode_info->atom_context->bios + data_offset +
 360				 le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
 361			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
 362								 dep_table);
 363			if (ret) {
 364				amdgpu_free_extended_power_table(adev);
 365				return ret;
 366			}
 367		}
 368		if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
 369			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
 370				(mode_info->atom_context->bios + data_offset +
 371				 le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
 372			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
 373								 dep_table);
 374			if (ret) {
 375				amdgpu_free_extended_power_table(adev);
 376				return ret;
 377			}
 378		}
 379		if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
 380			ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
 381				(ATOM_PPLIB_Clock_Voltage_Limit_Table *)
 382				(mode_info->atom_context->bios + data_offset +
 383				 le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset));
 384			if (clk_v->ucNumEntries) {
 385				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk =
 386					le16_to_cpu(clk_v->entries[0].usSclkLow) |
 387					(clk_v->entries[0].ucSclkHigh << 16);
 388				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk =
 389					le16_to_cpu(clk_v->entries[0].usMclkLow) |
 390					(clk_v->entries[0].ucMclkHigh << 16);
 391				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc =
 392					le16_to_cpu(clk_v->entries[0].usVddc);
 393				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci =
 394					le16_to_cpu(clk_v->entries[0].usVddci);
 395			}
 396		}
 397		if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) {
 398			ATOM_PPLIB_PhaseSheddingLimits_Table *psl =
 399				(ATOM_PPLIB_PhaseSheddingLimits_Table *)
 400				(mode_info->atom_context->bios + data_offset +
 401				 le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
 402			ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
 403
 404			adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
 405				kcalloc(psl->ucNumEntries,
 406					sizeof(struct amdgpu_phase_shedding_limits_entry),
 407					GFP_KERNEL);
 408			if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
 409				amdgpu_free_extended_power_table(adev);
 410				return -ENOMEM;
 411			}
 412
 413			entry = &psl->entries[0];
 414			for (i = 0; i < psl->ucNumEntries; i++) {
 415				adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
 416					le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16);
 417				adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
 418					le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16);
 419				adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
 420					le16_to_cpu(entry->usVoltage);
 421				entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *)
 422					((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record));
 423			}
 424			adev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
 425				psl->ucNumEntries;
 426		}
 427	}
 428
 429	/* cac data */
 430	if (le16_to_cpu(power_info->pplib.usTableSize) >=
 431	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) {
 432		adev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit);
 433		adev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit);
 434		adev->pm.dpm.near_tdp_limit_adjusted = adev->pm.dpm.near_tdp_limit;
 435		adev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit);
 436		if (adev->pm.dpm.tdp_od_limit)
 437			adev->pm.dpm.power_control = true;
 438		else
 439			adev->pm.dpm.power_control = false;
 440		adev->pm.dpm.tdp_adjustment = 0;
 441		adev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold);
 442		adev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage);
 443		adev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope);
 444		if (power_info->pplib5.usCACLeakageTableOffset) {
 445			ATOM_PPLIB_CAC_Leakage_Table *cac_table =
 446				(ATOM_PPLIB_CAC_Leakage_Table *)
 447				(mode_info->atom_context->bios + data_offset +
 448				 le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
 449			ATOM_PPLIB_CAC_Leakage_Record *entry;
 450			u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table);
 451			adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
 452			if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
 453				amdgpu_free_extended_power_table(adev);
 454				return -ENOMEM;
 455			}
 456			entry = &cac_table->entries[0];
 457			for (i = 0; i < cac_table->ucNumEntries; i++) {
 458				if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
 459					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 =
 460						le16_to_cpu(entry->usVddc1);
 461					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 =
 462						le16_to_cpu(entry->usVddc2);
 463					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 =
 464						le16_to_cpu(entry->usVddc3);
 465				} else {
 466					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
 467						le16_to_cpu(entry->usVddc);
 468					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
 469						le32_to_cpu(entry->ulLeakageValue);
 470				}
 471				entry = (ATOM_PPLIB_CAC_Leakage_Record *)
 472					((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record));
 473			}
 474			adev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
 475		}
 476	}
 477
 478	/* ext tables */
 479	if (le16_to_cpu(power_info->pplib.usTableSize) >=
 480	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
 481		ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
 482			(mode_info->atom_context->bios + data_offset +
 483			 le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
 484		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) &&
 485			ext_hdr->usVCETableOffset) {
 486			VCEClockInfoArray *array = (VCEClockInfoArray *)
 487				(mode_info->atom_context->bios + data_offset +
 488				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
 489			ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
 490				(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
 491				(mode_info->atom_context->bios + data_offset +
 492				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
 493				 1 + array->ucNumEntries * sizeof(VCEClockInfo));
 494			ATOM_PPLIB_VCE_State_Table *states =
 495				(ATOM_PPLIB_VCE_State_Table *)
 496				(mode_info->atom_context->bios + data_offset +
 497				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
 498				 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) +
 499				 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)));
 500			ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry;
 501			ATOM_PPLIB_VCE_State_Record *state_entry;
 502			VCEClockInfo *vce_clk;
 503			u32 size = limits->numEntries *
 504				sizeof(struct amdgpu_vce_clock_voltage_dependency_entry);
 505			adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
 506				kzalloc(size, GFP_KERNEL);
 507			if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
 508				amdgpu_free_extended_power_table(adev);
 509				return -ENOMEM;
 510			}
 511			adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
 512				limits->numEntries;
 513			entry = &limits->entries[0];
 514			state_entry = &states->entries[0];
 515			for (i = 0; i < limits->numEntries; i++) {
 516				vce_clk = (VCEClockInfo *)
 517					((u8 *)&array->entries[0] +
 518					 (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
 519				adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk =
 520					le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
 521				adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk =
 522					le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
 523				adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v =
 524					le16_to_cpu(entry->usVoltage);
 525				entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
 526					((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
 527			}
 528			adev->pm.dpm.num_of_vce_states =
 529					states->numEntries > AMD_MAX_VCE_LEVELS ?
 530					AMD_MAX_VCE_LEVELS : states->numEntries;
 531			for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
 532				vce_clk = (VCEClockInfo *)
 533					((u8 *)&array->entries[0] +
 534					 (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
 535				adev->pm.dpm.vce_states[i].evclk =
 536					le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
 537				adev->pm.dpm.vce_states[i].ecclk =
 538					le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
 539				adev->pm.dpm.vce_states[i].clk_idx =
 540					state_entry->ucClockInfoIndex & 0x3f;
 541				adev->pm.dpm.vce_states[i].pstate =
 542					(state_entry->ucClockInfoIndex & 0xc0) >> 6;
 543				state_entry = (ATOM_PPLIB_VCE_State_Record *)
 544					((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record));
 545			}
 546		}
 547		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) &&
 548			ext_hdr->usUVDTableOffset) {
 549			UVDClockInfoArray *array = (UVDClockInfoArray *)
 550				(mode_info->atom_context->bios + data_offset +
 551				 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1);
 552			ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits =
 553				(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *)
 554				(mode_info->atom_context->bios + data_offset +
 555				 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 +
 556				 1 + (array->ucNumEntries * sizeof (UVDClockInfo)));
 557			ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry;
 558			u32 size = limits->numEntries *
 559				sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry);
 560			adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
 561				kzalloc(size, GFP_KERNEL);
 562			if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
 563				amdgpu_free_extended_power_table(adev);
 564				return -ENOMEM;
 565			}
 566			adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
 567				limits->numEntries;
 568			entry = &limits->entries[0];
 569			for (i = 0; i < limits->numEntries; i++) {
 570				UVDClockInfo *uvd_clk = (UVDClockInfo *)
 571					((u8 *)&array->entries[0] +
 572					 (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo)));
 573				adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk =
 574					le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16);
 575				adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
 576					le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
 577				adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
 578					le16_to_cpu(entry->usVoltage);
 579				entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
 580					((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
 581			}
 582		}
 583		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) &&
 584			ext_hdr->usSAMUTableOffset) {
 585			ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits =
 586				(ATOM_PPLIB_SAMClk_Voltage_Limit_Table *)
 587				(mode_info->atom_context->bios + data_offset +
 588				 le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1);
 589			ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry;
 590			u32 size = limits->numEntries *
 591				sizeof(struct amdgpu_clock_voltage_dependency_entry);
 592			adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
 593				kzalloc(size, GFP_KERNEL);
 594			if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
 595				amdgpu_free_extended_power_table(adev);
 596				return -ENOMEM;
 597			}
 598			adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
 599				limits->numEntries;
 600			entry = &limits->entries[0];
 601			for (i = 0; i < limits->numEntries; i++) {
 602				adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk =
 603					le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16);
 604				adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v =
 605					le16_to_cpu(entry->usVoltage);
 606				entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *)
 607					((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record));
 608			}
 609		}
 610		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
 611		    ext_hdr->usPPMTableOffset) {
 612			ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
 613				(mode_info->atom_context->bios + data_offset +
 614				 le16_to_cpu(ext_hdr->usPPMTableOffset));
 615			adev->pm.dpm.dyn_state.ppm_table =
 616				kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL);
 617			if (!adev->pm.dpm.dyn_state.ppm_table) {
 618				amdgpu_free_extended_power_table(adev);
 619				return -ENOMEM;
 620			}
 621			adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
 622			adev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
 623				le16_to_cpu(ppm->usCpuCoreNumber);
 624			adev->pm.dpm.dyn_state.ppm_table->platform_tdp =
 625				le32_to_cpu(ppm->ulPlatformTDP);
 626			adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp =
 627				le32_to_cpu(ppm->ulSmallACPlatformTDP);
 628			adev->pm.dpm.dyn_state.ppm_table->platform_tdc =
 629				le32_to_cpu(ppm->ulPlatformTDC);
 630			adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc =
 631				le32_to_cpu(ppm->ulSmallACPlatformTDC);
 632			adev->pm.dpm.dyn_state.ppm_table->apu_tdp =
 633				le32_to_cpu(ppm->ulApuTDP);
 634			adev->pm.dpm.dyn_state.ppm_table->dgpu_tdp =
 635				le32_to_cpu(ppm->ulDGpuTDP);
 636			adev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power =
 637				le32_to_cpu(ppm->ulDGpuUlvPower);
 638			adev->pm.dpm.dyn_state.ppm_table->tj_max =
 639				le32_to_cpu(ppm->ulTjmax);
 640		}
 641		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) &&
 642			ext_hdr->usACPTableOffset) {
 643			ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits =
 644				(ATOM_PPLIB_ACPClk_Voltage_Limit_Table *)
 645				(mode_info->atom_context->bios + data_offset +
 646				 le16_to_cpu(ext_hdr->usACPTableOffset) + 1);
 647			ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry;
 648			u32 size = limits->numEntries *
 649				sizeof(struct amdgpu_clock_voltage_dependency_entry);
 650			adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
 651				kzalloc(size, GFP_KERNEL);
 652			if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
 653				amdgpu_free_extended_power_table(adev);
 654				return -ENOMEM;
 655			}
 656			adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
 657				limits->numEntries;
 658			entry = &limits->entries[0];
 659			for (i = 0; i < limits->numEntries; i++) {
 660				adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk =
 661					le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16);
 662				adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v =
 663					le16_to_cpu(entry->usVoltage);
 664				entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *)
 665					((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record));
 666			}
 667		}
 668		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) &&
 669			ext_hdr->usPowerTuneTableOffset) {
 670			u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset +
 671					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
 672			ATOM_PowerTune_Table *pt;
 673			adev->pm.dpm.dyn_state.cac_tdp_table =
 674				kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL);
 675			if (!adev->pm.dpm.dyn_state.cac_tdp_table) {
 676				amdgpu_free_extended_power_table(adev);
 677				return -ENOMEM;
 678			}
 679			if (rev > 0) {
 680				ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
 681					(mode_info->atom_context->bios + data_offset +
 682					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
 683				adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
 684					ppt->usMaximumPowerDeliveryLimit;
 685				pt = &ppt->power_tune_table;
 686			} else {
 687				ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
 688					(mode_info->atom_context->bios + data_offset +
 689					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
 690				adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255;
 691				pt = &ppt->power_tune_table;
 692			}
 693			adev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP);
 694			adev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp =
 695				le16_to_cpu(pt->usConfigurableTDP);
 696			adev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC);
 697			adev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit =
 698				le16_to_cpu(pt->usBatteryPowerLimit);
 699			adev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit =
 700				le16_to_cpu(pt->usSmallPowerLimit);
 701			adev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage =
 702				le16_to_cpu(pt->usLowCACLeakage);
 703			adev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage =
 704				le16_to_cpu(pt->usHighCACLeakage);
 705		}
 706		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8) &&
 707				ext_hdr->usSclkVddgfxTableOffset) {
 708			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
 709				(mode_info->atom_context->bios + data_offset +
 710				 le16_to_cpu(ext_hdr->usSclkVddgfxTableOffset));
 711			ret = amdgpu_parse_clk_voltage_dep_table(
 712					&adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk,
 713					dep_table);
 714			if (ret) {
 715				kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries);
 716				return ret;
 717			}
 718		}
 719	}
 720
 721	return 0;
 722}
 723
 724void amdgpu_free_extended_power_table(struct amdgpu_device *adev)
 725{
 726	struct amdgpu_dpm_dynamic_state *dyn_state = &adev->pm.dpm.dyn_state;
 727
 728	kfree(dyn_state->vddc_dependency_on_sclk.entries);
 729	kfree(dyn_state->vddci_dependency_on_mclk.entries);
 730	kfree(dyn_state->vddc_dependency_on_mclk.entries);
 731	kfree(dyn_state->mvdd_dependency_on_mclk.entries);
 732	kfree(dyn_state->cac_leakage_table.entries);
 733	kfree(dyn_state->phase_shedding_limits_table.entries);
 734	kfree(dyn_state->ppm_table);
 735	kfree(dyn_state->cac_tdp_table);
 736	kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
 737	kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
 738	kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
 739	kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
 740	kfree(dyn_state->vddgfx_dependency_on_sclk.entries);
 741}
 742
 743static const char *pp_lib_thermal_controller_names[] = {
 744	"NONE",
 745	"lm63",
 746	"adm1032",
 747	"adm1030",
 748	"max6649",
 749	"lm64",
 750	"f75375",
 751	"RV6xx",
 752	"RV770",
 753	"adt7473",
 754	"NONE",
 755	"External GPIO",
 756	"Evergreen",
 757	"emc2103",
 758	"Sumo",
 759	"Northern Islands",
 760	"Southern Islands",
 761	"lm96163",
 762	"Sea Islands",
 763	"Kaveri/Kabini",
 764};
 765
 766void amdgpu_add_thermal_controller(struct amdgpu_device *adev)
 767{
 768	struct amdgpu_mode_info *mode_info = &adev->mode_info;
 769	ATOM_PPLIB_POWERPLAYTABLE *power_table;
 770	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
 771	ATOM_PPLIB_THERMALCONTROLLER *controller;
 772	struct amdgpu_i2c_bus_rec i2c_bus;
 773	u16 data_offset;
 774	u8 frev, crev;
 775
 776	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
 777				   &frev, &crev, &data_offset))
 778		return;
 779	power_table = (ATOM_PPLIB_POWERPLAYTABLE *)
 780		(mode_info->atom_context->bios + data_offset);
 781	controller = &power_table->sThermalController;
 782
 783	/* add the i2c bus for thermal/fan chip */
 784	if (controller->ucType > 0) {
 785		if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN)
 786			adev->pm.no_fan = true;
 787		adev->pm.fan_pulses_per_revolution =
 788			controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
 789		if (adev->pm.fan_pulses_per_revolution) {
 790			adev->pm.fan_min_rpm = controller->ucFanMinRPM;
 791			adev->pm.fan_max_rpm = controller->ucFanMaxRPM;
 792		}
 793		if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
 794			DRM_INFO("Internal thermal controller %s fan control\n",
 795				 (controller->ucFanParameters &
 796				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
 797			adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
 798		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
 799			DRM_INFO("Internal thermal controller %s fan control\n",
 800				 (controller->ucFanParameters &
 801				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
 802			adev->pm.int_thermal_type = THERMAL_TYPE_RV770;
 803		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
 804			DRM_INFO("Internal thermal controller %s fan control\n",
 805				 (controller->ucFanParameters &
 806				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
 807			adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
 808		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) {
 809			DRM_INFO("Internal thermal controller %s fan control\n",
 810				 (controller->ucFanParameters &
 811				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
 812			adev->pm.int_thermal_type = THERMAL_TYPE_SUMO;
 813		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) {
 814			DRM_INFO("Internal thermal controller %s fan control\n",
 815				 (controller->ucFanParameters &
 816				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
 817			adev->pm.int_thermal_type = THERMAL_TYPE_NI;
 818		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) {
 819			DRM_INFO("Internal thermal controller %s fan control\n",
 820				 (controller->ucFanParameters &
 821				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
 822			adev->pm.int_thermal_type = THERMAL_TYPE_SI;
 823		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) {
 824			DRM_INFO("Internal thermal controller %s fan control\n",
 825				 (controller->ucFanParameters &
 826				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
 827			adev->pm.int_thermal_type = THERMAL_TYPE_CI;
 828		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) {
 829			DRM_INFO("Internal thermal controller %s fan control\n",
 830				 (controller->ucFanParameters &
 831				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
 832			adev->pm.int_thermal_type = THERMAL_TYPE_KV;
 833		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
 834			DRM_INFO("External GPIO thermal controller %s fan control\n",
 835				 (controller->ucFanParameters &
 836				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
 837			adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
 838		} else if (controller->ucType ==
 839			   ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
 840			DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
 841				 (controller->ucFanParameters &
 842				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
 843			adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
 844		} else if (controller->ucType ==
 845			   ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
 846			DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
 847				 (controller->ucFanParameters &
 848				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
 849			adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
 850		} else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
 851			DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
 852				 pp_lib_thermal_controller_names[controller->ucType],
 853				 controller->ucI2cAddress >> 1,
 854				 (controller->ucFanParameters &
 855				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
 856			adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL;
 857			i2c_bus = amdgpu_atombios_lookup_i2c_gpio(adev, controller->ucI2cLine);
 858			adev->pm.i2c_bus = amdgpu_i2c_lookup(adev, &i2c_bus);
 859			if (adev->pm.i2c_bus) {
 860				struct i2c_board_info info = { };
 861				const char *name = pp_lib_thermal_controller_names[controller->ucType];
 862				info.addr = controller->ucI2cAddress >> 1;
 863				strlcpy(info.type, name, sizeof(info.type));
 864				i2c_new_client_device(&adev->pm.i2c_bus->adapter, &info);
 865			}
 866		} else {
 867			DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
 868				 controller->ucType,
 869				 controller->ucI2cAddress >> 1,
 870				 (controller->ucFanParameters &
 871				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
 872		}
 873	}
 874}
 875
 876enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
 877						 u32 sys_mask,
 878						 enum amdgpu_pcie_gen asic_gen,
 879						 enum amdgpu_pcie_gen default_gen)
 880{
 881	switch (asic_gen) {
 882	case AMDGPU_PCIE_GEN1:
 883		return AMDGPU_PCIE_GEN1;
 884	case AMDGPU_PCIE_GEN2:
 885		return AMDGPU_PCIE_GEN2;
 886	case AMDGPU_PCIE_GEN3:
 887		return AMDGPU_PCIE_GEN3;
 888	default:
 889		if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) &&
 890		    (default_gen == AMDGPU_PCIE_GEN3))
 891			return AMDGPU_PCIE_GEN3;
 892		else if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) &&
 893			 (default_gen == AMDGPU_PCIE_GEN2))
 894			return AMDGPU_PCIE_GEN2;
 895		else
 896			return AMDGPU_PCIE_GEN1;
 897	}
 898	return AMDGPU_PCIE_GEN1;
 899}
 900
 901struct amd_vce_state*
 902amdgpu_get_vce_clock_state(void *handle, u32 idx)
 903{
 904	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 905
 906	if (idx < adev->pm.dpm.num_of_vce_states)
 907		return &adev->pm.dpm.vce_states[idx];
 908
 909	return NULL;
 910}
 911
 912int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
 913{
 914	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 
 915
 916	return pp_funcs->get_sclk((adev)->powerplay.pp_handle, (low));
 917}
 918
 919int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
 920{
 921	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 
 922
 923	return pp_funcs->get_mclk((adev)->powerplay.pp_handle, (low));
 924}
 925
 926int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate)
 927{
 928	int ret = 0;
 929	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 
 
 
 
 
 
 
 
 
 930
 931	switch (block_type) {
 932	case AMD_IP_BLOCK_TYPE_UVD:
 933	case AMD_IP_BLOCK_TYPE_VCE:
 934		if (pp_funcs && pp_funcs->set_powergating_by_smu) {
 935			/*
 936			 * TODO: need a better lock mechanism
 937			 *
 938			 * Here adev->pm.mutex lock protection is enforced on
 939			 * UVD and VCE cases only. Since for other cases, there
 940			 * may be already lock protection in amdgpu_pm.c.
 941			 * This is a quick fix for the deadlock issue below.
 942			 *     NFO: task ocltst:2028 blocked for more than 120 seconds.
 943			 *     Tainted: G           OE     5.0.0-37-generic #40~18.04.1-Ubuntu
 944			 *     echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
 945			 *     cltst          D    0  2028   2026 0x00000000
 946			 *     all Trace:
 947			 *     __schedule+0x2c0/0x870
 948			 *     schedule+0x2c/0x70
 949			 *     schedule_preempt_disabled+0xe/0x10
 950			 *     __mutex_lock.isra.9+0x26d/0x4e0
 951			 *     __mutex_lock_slowpath+0x13/0x20
 952			 *     ? __mutex_lock_slowpath+0x13/0x20
 953			 *     mutex_lock+0x2f/0x40
 954			 *     amdgpu_dpm_set_powergating_by_smu+0x64/0xe0 [amdgpu]
 955			 *     gfx_v8_0_enable_gfx_static_mg_power_gating+0x3c/0x70 [amdgpu]
 956			 *     gfx_v8_0_set_powergating_state+0x66/0x260 [amdgpu]
 957			 *     amdgpu_device_ip_set_powergating_state+0x62/0xb0 [amdgpu]
 958			 *     pp_dpm_force_performance_level+0xe7/0x100 [amdgpu]
 959			 *     amdgpu_set_dpm_forced_performance_level+0x129/0x330 [amdgpu]
 960			 */
 961			mutex_lock(&adev->pm.mutex);
 962			ret = (pp_funcs->set_powergating_by_smu(
 963				(adev)->powerplay.pp_handle, block_type, gate));
 964			mutex_unlock(&adev->pm.mutex);
 965		}
 966		break;
 967	case AMD_IP_BLOCK_TYPE_GFX:
 968	case AMD_IP_BLOCK_TYPE_VCN:
 969	case AMD_IP_BLOCK_TYPE_SDMA:
 970	case AMD_IP_BLOCK_TYPE_JPEG:
 971	case AMD_IP_BLOCK_TYPE_GMC:
 972	case AMD_IP_BLOCK_TYPE_ACP:
 973		if (pp_funcs && pp_funcs->set_powergating_by_smu) {
 
 974			ret = (pp_funcs->set_powergating_by_smu(
 975				(adev)->powerplay.pp_handle, block_type, gate));
 976		}
 977		break;
 978	default:
 979		break;
 980	}
 981
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 982	return ret;
 983}
 984
 985int amdgpu_dpm_baco_enter(struct amdgpu_device *adev)
 986{
 987	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 988	void *pp_handle = adev->powerplay.pp_handle;
 989	int ret = 0;
 990
 991	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
 992		return -ENOENT;
 993
 
 
 994	/* enter BACO state */
 995	ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
 996
 
 
 997	return ret;
 998}
 999
1000int amdgpu_dpm_baco_exit(struct amdgpu_device *adev)
1001{
1002	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1003	void *pp_handle = adev->powerplay.pp_handle;
1004	int ret = 0;
1005
1006	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
1007		return -ENOENT;
1008
 
 
1009	/* exit BACO state */
1010	ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
1011
 
 
1012	return ret;
1013}
1014
1015int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
1016			     enum pp_mp1_state mp1_state)
1017{
1018	int ret = 0;
1019	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1020
1021	if (pp_funcs && pp_funcs->set_mp1_state) {
 
 
 
 
 
 
1022		ret = pp_funcs->set_mp1_state(
1023				adev->powerplay.pp_handle,
1024				mp1_state);
 
 
1025	}
1026
1027	return ret;
1028}
1029
1030bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1031{
1032	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1033	void *pp_handle = adev->powerplay.pp_handle;
1034	bool baco_cap;
1035
1036	if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
1037		return false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1038
1039	if (pp_funcs->get_asic_baco_capability(pp_handle, &baco_cap))
1040		return false;
1041
1042	return baco_cap;
1043}
1044
1045int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev)
1046{
1047	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1048	void *pp_handle = adev->powerplay.pp_handle;
 
1049
1050	if (!pp_funcs || !pp_funcs->asic_reset_mode_2)
1051		return -ENOENT;
1052
1053	return pp_funcs->asic_reset_mode_2(pp_handle);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1054}
1055
1056int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
1057{
1058	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1059	void *pp_handle = adev->powerplay.pp_handle;
1060	int ret = 0;
1061
1062	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
1063		return -ENOENT;
1064
 
 
1065	/* enter BACO state */
1066	ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
1067	if (ret)
1068		return ret;
1069
1070	/* exit BACO state */
1071	ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
1072	if (ret)
1073		return ret;
1074
1075	return 0;
 
 
1076}
1077
1078bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev)
1079{
1080	struct smu_context *smu = &adev->smu;
 
1081
1082	if (is_support_sw_smu(adev))
1083		return smu_mode1_reset_is_support(smu);
 
 
 
1084
1085	return false;
1086}
1087
1088int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev)
1089{
1090	struct smu_context *smu = &adev->smu;
 
1091
1092	if (is_support_sw_smu(adev))
1093		return smu_mode1_reset(smu);
 
 
 
1094
1095	return -EOPNOTSUPP;
1096}
1097
1098int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
1099				    enum PP_SMC_POWER_PROFILE type,
1100				    bool en)
1101{
1102	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1103	int ret = 0;
1104
1105	if (amdgpu_sriov_vf(adev))
1106		return 0;
1107
1108	if (pp_funcs && pp_funcs->switch_power_profile)
 
1109		ret = pp_funcs->switch_power_profile(
1110			adev->powerplay.pp_handle, type, en);
 
 
1111
1112	return ret;
1113}
1114
1115int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
1116			       uint32_t pstate)
1117{
1118	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1119	int ret = 0;
1120
1121	if (pp_funcs && pp_funcs->set_xgmi_pstate)
 
1122		ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
1123								pstate);
 
 
1124
1125	return ret;
1126}
1127
1128int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
1129			     uint32_t cstate)
1130{
1131	int ret = 0;
1132	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1133	void *pp_handle = adev->powerplay.pp_handle;
1134
1135	if (pp_funcs && pp_funcs->set_df_cstate)
 
1136		ret = pp_funcs->set_df_cstate(pp_handle, cstate);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1137
1138	return ret;
1139}
1140
1141int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en)
 
1142{
1143	struct smu_context *smu = &adev->smu;
 
1144
1145	if (is_support_sw_smu(adev))
1146		return smu_allow_xgmi_power_down(smu, en);
 
 
 
1147
1148	return 0;
1149}
1150
1151int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev)
1152{
1153	void *pp_handle = adev->powerplay.pp_handle;
1154	const struct amd_pm_funcs *pp_funcs =
1155			adev->powerplay.pp_funcs;
1156	int ret = 0;
1157
1158	if (pp_funcs && pp_funcs->enable_mgpu_fan_boost)
 
1159		ret = pp_funcs->enable_mgpu_fan_boost(pp_handle);
 
 
1160
1161	return ret;
1162}
1163
1164int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev,
1165				      uint32_t msg_id)
1166{
1167	void *pp_handle = adev->powerplay.pp_handle;
1168	const struct amd_pm_funcs *pp_funcs =
1169			adev->powerplay.pp_funcs;
1170	int ret = 0;
1171
1172	if (pp_funcs && pp_funcs->set_clockgating_by_smu)
 
1173		ret = pp_funcs->set_clockgating_by_smu(pp_handle,
1174						       msg_id);
 
 
1175
1176	return ret;
1177}
1178
1179int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev,
1180				  bool acquire)
1181{
1182	void *pp_handle = adev->powerplay.pp_handle;
1183	const struct amd_pm_funcs *pp_funcs =
1184			adev->powerplay.pp_funcs;
1185	int ret = -EOPNOTSUPP;
1186
1187	if (pp_funcs && pp_funcs->smu_i2c_bus_access)
 
1188		ret = pp_funcs->smu_i2c_bus_access(pp_handle,
1189						   acquire);
 
 
1190
1191	return ret;
1192}
1193
1194void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
1195{
1196	if (adev->pm.dpm_enabled) {
1197		mutex_lock(&adev->pm.mutex);
1198		if (power_supply_is_system_supplied() > 0)
1199			adev->pm.ac_power = true;
1200		else
1201			adev->pm.ac_power = false;
 
1202		if (adev->powerplay.pp_funcs &&
1203		    adev->powerplay.pp_funcs->enable_bapm)
1204			amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
1205		mutex_unlock(&adev->pm.mutex);
1206
1207		if (is_support_sw_smu(adev))
1208			smu_set_ac_dc(&adev->smu);
 
 
1209	}
1210}
1211
1212int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
1213			   void *data, uint32_t *size)
1214{
1215	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1216	int ret = 0;
1217
1218	if (!data || !size)
1219		return -EINVAL;
1220
1221	if (pp_funcs && pp_funcs->read_sensor)
1222		ret = pp_funcs->read_sensor((adev)->powerplay.pp_handle,
1223								    sensor, data, size);
1224	else
1225		ret = -EINVAL;
 
 
 
1226
1227	return ret;
1228}
1229
1230void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
1231{
1232	struct amdgpu_device *adev =
1233		container_of(work, struct amdgpu_device,
1234			     pm.dpm.thermal.work);
1235	/* switch to the thermal state */
1236	enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
1237	int temp, size = sizeof(temp);
1238
1239	if (!adev->pm.dpm_enabled)
1240		return;
1241
1242	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP,
1243				    (void *)&temp, &size)) {
1244		if (temp < adev->pm.dpm.thermal.min_temp)
1245			/* switch back the user state */
1246			dpm_state = adev->pm.dpm.user_state;
1247	} else {
1248		if (adev->pm.dpm.thermal.high_to_low)
1249			/* switch back the user state */
1250			dpm_state = adev->pm.dpm.user_state;
1251	}
1252	mutex_lock(&adev->pm.mutex);
1253	if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
1254		adev->pm.dpm.thermal_active = true;
1255	else
1256		adev->pm.dpm.thermal_active = false;
1257	adev->pm.dpm.state = dpm_state;
1258	mutex_unlock(&adev->pm.mutex);
1259
1260	amdgpu_pm_compute_clocks(adev);
1261}
1262
1263static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
1264						     enum amd_pm_state_type dpm_state)
1265{
1266	int i;
1267	struct amdgpu_ps *ps;
1268	u32 ui_class;
1269	bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ?
1270		true : false;
1271
1272	/* check if the vblank period is too short to adjust the mclk */
1273	if (single_display && adev->powerplay.pp_funcs->vblank_too_short) {
1274		if (amdgpu_dpm_vblank_too_short(adev))
1275			single_display = false;
1276	}
1277
1278	/* certain older asics have a separare 3D performance state,
1279	 * so try that first if the user selected performance
1280	 */
1281	if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
1282		dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
1283	/* balanced states don't exist at the moment */
1284	if (dpm_state == POWER_STATE_TYPE_BALANCED)
1285		dpm_state = POWER_STATE_TYPE_PERFORMANCE;
1286
1287restart_search:
1288	/* Pick the best power state based on current conditions */
1289	for (i = 0; i < adev->pm.dpm.num_ps; i++) {
1290		ps = &adev->pm.dpm.ps[i];
1291		ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
1292		switch (dpm_state) {
1293		/* user states */
1294		case POWER_STATE_TYPE_BATTERY:
1295			if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
1296				if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
1297					if (single_display)
1298						return ps;
1299				} else
1300					return ps;
1301			}
1302			break;
1303		case POWER_STATE_TYPE_BALANCED:
1304			if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
1305				if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
1306					if (single_display)
1307						return ps;
1308				} else
1309					return ps;
1310			}
1311			break;
1312		case POWER_STATE_TYPE_PERFORMANCE:
1313			if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
1314				if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
1315					if (single_display)
1316						return ps;
1317				} else
1318					return ps;
1319			}
1320			break;
1321		/* internal states */
1322		case POWER_STATE_TYPE_INTERNAL_UVD:
1323			if (adev->pm.dpm.uvd_ps)
1324				return adev->pm.dpm.uvd_ps;
1325			else
1326				break;
1327		case POWER_STATE_TYPE_INTERNAL_UVD_SD:
1328			if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
1329				return ps;
1330			break;
1331		case POWER_STATE_TYPE_INTERNAL_UVD_HD:
1332			if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
1333				return ps;
1334			break;
1335		case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
1336			if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
1337				return ps;
1338			break;
1339		case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
1340			if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
1341				return ps;
1342			break;
1343		case POWER_STATE_TYPE_INTERNAL_BOOT:
1344			return adev->pm.dpm.boot_ps;
1345		case POWER_STATE_TYPE_INTERNAL_THERMAL:
1346			if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
1347				return ps;
1348			break;
1349		case POWER_STATE_TYPE_INTERNAL_ACPI:
1350			if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
1351				return ps;
1352			break;
1353		case POWER_STATE_TYPE_INTERNAL_ULV:
1354			if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
1355				return ps;
1356			break;
1357		case POWER_STATE_TYPE_INTERNAL_3DPERF:
1358			if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
1359				return ps;
1360			break;
1361		default:
1362			break;
1363		}
1364	}
1365	/* use a fallback state if we didn't match */
1366	switch (dpm_state) {
1367	case POWER_STATE_TYPE_INTERNAL_UVD_SD:
1368		dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
1369		goto restart_search;
1370	case POWER_STATE_TYPE_INTERNAL_UVD_HD:
1371	case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
1372	case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
1373		if (adev->pm.dpm.uvd_ps) {
1374			return adev->pm.dpm.uvd_ps;
1375		} else {
1376			dpm_state = POWER_STATE_TYPE_PERFORMANCE;
1377			goto restart_search;
1378		}
1379	case POWER_STATE_TYPE_INTERNAL_THERMAL:
1380		dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
1381		goto restart_search;
1382	case POWER_STATE_TYPE_INTERNAL_ACPI:
1383		dpm_state = POWER_STATE_TYPE_BATTERY;
1384		goto restart_search;
1385	case POWER_STATE_TYPE_BATTERY:
1386	case POWER_STATE_TYPE_BALANCED:
1387	case POWER_STATE_TYPE_INTERNAL_3DPERF:
1388		dpm_state = POWER_STATE_TYPE_PERFORMANCE;
1389		goto restart_search;
1390	default:
1391		break;
1392	}
1393
1394	return NULL;
1395}
1396
1397static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
1398{
1399	struct amdgpu_ps *ps;
1400	enum amd_pm_state_type dpm_state;
1401	int ret;
1402	bool equal = false;
1403
1404	/* if dpm init failed */
1405	if (!adev->pm.dpm_enabled)
1406		return;
1407
1408	if (adev->pm.dpm.user_state != adev->pm.dpm.state) {
1409		/* add other state override checks here */
1410		if ((!adev->pm.dpm.thermal_active) &&
1411		    (!adev->pm.dpm.uvd_active))
1412			adev->pm.dpm.state = adev->pm.dpm.user_state;
1413	}
1414	dpm_state = adev->pm.dpm.state;
1415
1416	ps = amdgpu_dpm_pick_power_state(adev, dpm_state);
1417	if (ps)
1418		adev->pm.dpm.requested_ps = ps;
1419	else
1420		return;
1421
1422	if (amdgpu_dpm == 1 && adev->powerplay.pp_funcs->print_power_state) {
1423		printk("switching from power state:\n");
1424		amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
1425		printk("switching to power state:\n");
1426		amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
1427	}
1428
1429	/* update whether vce is active */
1430	ps->vce_active = adev->pm.dpm.vce_active;
1431	if (adev->powerplay.pp_funcs->display_configuration_changed)
1432		amdgpu_dpm_display_configuration_changed(adev);
1433
1434	ret = amdgpu_dpm_pre_set_power_state(adev);
1435	if (ret)
1436		return;
1437
1438	if (adev->powerplay.pp_funcs->check_state_equal) {
1439		if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal))
1440			equal = false;
1441	}
1442
1443	if (equal)
1444		return;
1445
1446	amdgpu_dpm_set_power_state(adev);
1447	amdgpu_dpm_post_set_power_state(adev);
1448
1449	adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
1450	adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
1451
1452	if (adev->powerplay.pp_funcs->force_performance_level) {
1453		if (adev->pm.dpm.thermal_active) {
1454			enum amd_dpm_forced_level level = adev->pm.dpm.forced_level;
1455			/* force low perf level for thermal */
1456			amdgpu_dpm_force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW);
1457			/* save the user's level */
1458			adev->pm.dpm.forced_level = level;
1459		} else {
1460			/* otherwise, user selected level */
1461			amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level);
1462		}
1463	}
1464}
1465
1466void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
1467{
1468	int i = 0;
1469
1470	if (!adev->pm.dpm_enabled)
1471		return;
1472
1473	if (adev->mode_info.num_crtc)
1474		amdgpu_display_bandwidth_update(adev);
1475
1476	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
1477		struct amdgpu_ring *ring = adev->rings[i];
1478		if (ring && ring->sched.ready)
1479			amdgpu_fence_wait_empty(ring);
1480	}
1481
1482	if (adev->powerplay.pp_funcs->dispatch_tasks) {
1483		if (!amdgpu_device_has_dc_support(adev)) {
1484			mutex_lock(&adev->pm.mutex);
1485			amdgpu_dpm_get_active_displays(adev);
1486			adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
1487			adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
1488			adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
1489			/* we have issues with mclk switching with
1490			 * refresh rates over 120 hz on the non-DC code.
1491			 */
1492			if (adev->pm.pm_display_cfg.vrefresh > 120)
1493				adev->pm.pm_display_cfg.min_vblank_time = 0;
1494			if (adev->powerplay.pp_funcs->display_configuration_change)
1495				adev->powerplay.pp_funcs->display_configuration_change(
1496							adev->powerplay.pp_handle,
1497							&adev->pm.pm_display_cfg);
1498			mutex_unlock(&adev->pm.mutex);
1499		}
1500		amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL);
1501	} else {
1502		mutex_lock(&adev->pm.mutex);
1503		amdgpu_dpm_get_active_displays(adev);
1504		amdgpu_dpm_change_power_state_locked(adev);
1505		mutex_unlock(&adev->pm.mutex);
1506	}
1507}
1508
1509void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
1510{
1511	int ret = 0;
1512
1513	if (adev->family == AMDGPU_FAMILY_SI) {
1514		mutex_lock(&adev->pm.mutex);
1515		if (enable) {
1516			adev->pm.dpm.uvd_active = true;
1517			adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
1518		} else {
1519			adev->pm.dpm.uvd_active = false;
1520		}
1521		mutex_unlock(&adev->pm.mutex);
1522
1523		amdgpu_pm_compute_clocks(adev);
1524	} else {
1525		ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
1526		if (ret)
1527			DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
1528				  enable ? "enable" : "disable", ret);
1529
1530		/* enable/disable Low Memory PState for UVD (4k videos) */
1531		if (adev->asic_type == CHIP_STONEY &&
1532			adev->uvd.decode_image_width >= WIDTH_4K) {
1533			struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
1534
1535			if (hwmgr && hwmgr->hwmgr_func &&
1536			    hwmgr->hwmgr_func->update_nbdpm_pstate)
1537				hwmgr->hwmgr_func->update_nbdpm_pstate(hwmgr,
1538								       !enable,
1539								       true);
1540		}
1541	}
 
 
 
 
 
1542}
1543
1544void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
1545{
1546	int ret = 0;
1547
1548	if (adev->family == AMDGPU_FAMILY_SI) {
1549		mutex_lock(&adev->pm.mutex);
1550		if (enable) {
1551			adev->pm.dpm.vce_active = true;
1552			/* XXX select vce level based on ring/task */
1553			adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
1554		} else {
1555			adev->pm.dpm.vce_active = false;
1556		}
1557		mutex_unlock(&adev->pm.mutex);
1558
1559		amdgpu_pm_compute_clocks(adev);
1560	} else {
1561		ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
1562		if (ret)
1563			DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
1564				  enable ? "enable" : "disable", ret);
1565	}
 
 
 
 
 
1566}
1567
1568void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
1569{
1570	int i;
1571
1572	if (adev->powerplay.pp_funcs->print_power_state == NULL)
1573		return;
1574
1575	for (i = 0; i < adev->pm.dpm.num_ps; i++)
1576		amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]);
1577
 
 
 
 
1578}
1579
1580void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
1581{
1582	int ret = 0;
1583
1584	ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable);
1585	if (ret)
1586		DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
1587			  enable ? "enable" : "disable", ret);
1588}
1589
1590int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
1591{
1592	int r;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1593
1594	if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->load_firmware) {
1595		r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle);
1596		if (r) {
1597			pr_err("smu firmware loading failed\n");
1598			return r;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1599		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1600
1601		if (smu_version)
1602			*smu_version = adev->pm.fw_version;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1603	}
1604
1605	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1606}