Linux Audio

Check our new training course

Loading...
v6.2
   1/*
   2 * Copyright 2011 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: Alex Deucher
  23 */
  24
  25#include "amdgpu.h"
  26#include "amdgpu_atombios.h"
  27#include "amdgpu_i2c.h"
  28#include "amdgpu_dpm.h"
  29#include "atom.h"
  30#include "amd_pcie.h"
  31#include "amdgpu_display.h"
  32#include "hwmgr.h"
  33#include <linux/power_supply.h>
  34#include "amdgpu_smu.h"
  35
  36#define amdgpu_dpm_enable_bapm(adev, e) \
  37		((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e)))
  38
  39int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
  40{
  41	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
  42	int ret = 0;
  43
  44	if (!pp_funcs->get_sclk)
  45		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  46
  47	mutex_lock(&adev->pm.mutex);
  48	ret = pp_funcs->get_sclk((adev)->powerplay.pp_handle,
  49				 low);
  50	mutex_unlock(&adev->pm.mutex);
  51
  52	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  53}
  54
  55int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  56{
  57	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
  58	int ret = 0;
  59
  60	if (!pp_funcs->get_mclk)
  61		return 0;
  62
  63	mutex_lock(&adev->pm.mutex);
  64	ret = pp_funcs->get_mclk((adev)->powerplay.pp_handle,
  65				 low);
  66	mutex_unlock(&adev->pm.mutex);
  67
  68	return ret;
  69}
  70
  71int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate)
  72{
  73	int ret = 0;
  74	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
  75	enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON;
  76
  77	if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state) {
  78		dev_dbg(adev->dev, "IP block%d already in the target %s state!",
  79				block_type, gate ? "gate" : "ungate");
  80		return 0;
  81	}
  82
  83	mutex_lock(&adev->pm.mutex);
  84
  85	switch (block_type) {
  86	case AMD_IP_BLOCK_TYPE_UVD:
  87	case AMD_IP_BLOCK_TYPE_VCE:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  88	case AMD_IP_BLOCK_TYPE_GFX:
  89	case AMD_IP_BLOCK_TYPE_VCN:
  90	case AMD_IP_BLOCK_TYPE_SDMA:
  91	case AMD_IP_BLOCK_TYPE_JPEG:
  92	case AMD_IP_BLOCK_TYPE_GMC:
  93	case AMD_IP_BLOCK_TYPE_ACP:
  94		if (pp_funcs && pp_funcs->set_powergating_by_smu)
  95			ret = (pp_funcs->set_powergating_by_smu(
  96				(adev)->powerplay.pp_handle, block_type, gate));
 
  97		break;
  98	default:
  99		break;
 100	}
 101
 102	if (!ret)
 103		atomic_set(&adev->pm.pwr_state[block_type], pwr_state);
 104
 105	mutex_unlock(&adev->pm.mutex);
 106
 107	return ret;
 108}
 109
 110int amdgpu_dpm_set_gfx_power_up_by_imu(struct amdgpu_device *adev)
 111{
 112	struct smu_context *smu = adev->powerplay.pp_handle;
 113	int ret = -EOPNOTSUPP;
 114
 115	mutex_lock(&adev->pm.mutex);
 116	ret = smu_set_gfx_power_up_by_imu(smu);
 117	mutex_unlock(&adev->pm.mutex);
 118
 119	msleep(10);
 120
 121	return ret;
 122}
 123
 124int amdgpu_dpm_baco_enter(struct amdgpu_device *adev)
 125{
 126	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 127	void *pp_handle = adev->powerplay.pp_handle;
 128	int ret = 0;
 129
 130	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
 131		return -ENOENT;
 132
 133	mutex_lock(&adev->pm.mutex);
 134
 135	/* enter BACO state */
 136	ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
 137
 138	mutex_unlock(&adev->pm.mutex);
 139
 140	return ret;
 141}
 142
 143int amdgpu_dpm_baco_exit(struct amdgpu_device *adev)
 144{
 145	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 146	void *pp_handle = adev->powerplay.pp_handle;
 147	int ret = 0;
 148
 149	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
 150		return -ENOENT;
 151
 152	mutex_lock(&adev->pm.mutex);
 153
 154	/* exit BACO state */
 155	ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
 156
 157	mutex_unlock(&adev->pm.mutex);
 158
 159	return ret;
 160}
 161
 162int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
 163			     enum pp_mp1_state mp1_state)
 164{
 165	int ret = 0;
 166	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 167
 168	if (pp_funcs && pp_funcs->set_mp1_state) {
 169		mutex_lock(&adev->pm.mutex);
 170
 171		ret = pp_funcs->set_mp1_state(
 172				adev->powerplay.pp_handle,
 173				mp1_state);
 174
 175		mutex_unlock(&adev->pm.mutex);
 176	}
 177
 178	return ret;
 179}
 180
 181bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
 182{
 183	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 184	void *pp_handle = adev->powerplay.pp_handle;
 185	bool baco_cap;
 186	int ret = 0;
 187
 188	if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
 189		return false;
 190	/* Don't use baco for reset in S3.
 191	 * This is a workaround for some platforms
 192	 * where entering BACO during suspend
 193	 * seems to cause reboots or hangs.
 194	 * This might be related to the fact that BACO controls
 195	 * power to the whole GPU including devices like audio and USB.
 196	 * Powering down/up everything may adversely affect these other
 197	 * devices.  Needs more investigation.
 198	 */
 199	if (adev->in_s3)
 200		return false;
 201
 202	mutex_lock(&adev->pm.mutex);
 203
 204	ret = pp_funcs->get_asic_baco_capability(pp_handle,
 205						 &baco_cap);
 206
 207	mutex_unlock(&adev->pm.mutex);
 
 208
 209	return ret ? false : baco_cap;
 210}
 211
 212int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev)
 213{
 214	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 215	void *pp_handle = adev->powerplay.pp_handle;
 216	int ret = 0;
 217
 218	if (!pp_funcs || !pp_funcs->asic_reset_mode_2)
 219		return -ENOENT;
 220
 221	mutex_lock(&adev->pm.mutex);
 222
 223	ret = pp_funcs->asic_reset_mode_2(pp_handle);
 224
 225	mutex_unlock(&adev->pm.mutex);
 226
 227	return ret;
 228}
 229
 230int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
 231{
 232	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 233	void *pp_handle = adev->powerplay.pp_handle;
 234	int ret = 0;
 235
 236	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
 237		return -ENOENT;
 238
 239	mutex_lock(&adev->pm.mutex);
 240
 241	/* enter BACO state */
 242	ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
 243	if (ret)
 244		goto out;
 245
 246	/* exit BACO state */
 247	ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
 
 
 248
 249out:
 250	mutex_unlock(&adev->pm.mutex);
 251	return ret;
 252}
 253
 254bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev)
 255{
 256	struct smu_context *smu = adev->powerplay.pp_handle;
 257	bool support_mode1_reset = false;
 258
 259	if (is_support_sw_smu(adev)) {
 260		mutex_lock(&adev->pm.mutex);
 261		support_mode1_reset = smu_mode1_reset_is_support(smu);
 262		mutex_unlock(&adev->pm.mutex);
 263	}
 264
 265	return support_mode1_reset;
 266}
 267
 268int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev)
 269{
 270	struct smu_context *smu = adev->powerplay.pp_handle;
 271	int ret = -EOPNOTSUPP;
 272
 273	if (is_support_sw_smu(adev)) {
 274		mutex_lock(&adev->pm.mutex);
 275		ret = smu_mode1_reset(smu);
 276		mutex_unlock(&adev->pm.mutex);
 277	}
 278
 279	return ret;
 280}
 281
 282int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
 283				    enum PP_SMC_POWER_PROFILE type,
 284				    bool en)
 285{
 286	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 287	int ret = 0;
 288
 289	if (amdgpu_sriov_vf(adev))
 290		return 0;
 291
 292	if (pp_funcs && pp_funcs->switch_power_profile) {
 293		mutex_lock(&adev->pm.mutex);
 294		ret = pp_funcs->switch_power_profile(
 295			adev->powerplay.pp_handle, type, en);
 296		mutex_unlock(&adev->pm.mutex);
 297	}
 298
 299	return ret;
 300}
 301
 302int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
 303			       uint32_t pstate)
 304{
 305	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 306	int ret = 0;
 307
 308	if (pp_funcs && pp_funcs->set_xgmi_pstate) {
 309		mutex_lock(&adev->pm.mutex);
 310		ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
 311								pstate);
 312		mutex_unlock(&adev->pm.mutex);
 313	}
 314
 315	return ret;
 316}
 317
 318int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
 319			     uint32_t cstate)
 320{
 321	int ret = 0;
 322	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 323	void *pp_handle = adev->powerplay.pp_handle;
 324
 325	if (pp_funcs && pp_funcs->set_df_cstate) {
 326		mutex_lock(&adev->pm.mutex);
 327		ret = pp_funcs->set_df_cstate(pp_handle, cstate);
 328		mutex_unlock(&adev->pm.mutex);
 329	}
 330
 331	return ret;
 332}
 333
 334int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en)
 335{
 336	struct smu_context *smu = adev->powerplay.pp_handle;
 337	int ret = 0;
 338
 339	if (is_support_sw_smu(adev)) {
 340		mutex_lock(&adev->pm.mutex);
 341		ret = smu_allow_xgmi_power_down(smu, en);
 342		mutex_unlock(&adev->pm.mutex);
 343	}
 344
 345	return ret;
 346}
 347
 348int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev)
 349{
 350	void *pp_handle = adev->powerplay.pp_handle;
 351	const struct amd_pm_funcs *pp_funcs =
 352			adev->powerplay.pp_funcs;
 353	int ret = 0;
 354
 355	if (pp_funcs && pp_funcs->enable_mgpu_fan_boost) {
 356		mutex_lock(&adev->pm.mutex);
 357		ret = pp_funcs->enable_mgpu_fan_boost(pp_handle);
 358		mutex_unlock(&adev->pm.mutex);
 359	}
 360
 361	return ret;
 362}
 363
 364int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev,
 365				      uint32_t msg_id)
 366{
 367	void *pp_handle = adev->powerplay.pp_handle;
 368	const struct amd_pm_funcs *pp_funcs =
 369			adev->powerplay.pp_funcs;
 370	int ret = 0;
 371
 372	if (pp_funcs && pp_funcs->set_clockgating_by_smu) {
 373		mutex_lock(&adev->pm.mutex);
 374		ret = pp_funcs->set_clockgating_by_smu(pp_handle,
 375						       msg_id);
 376		mutex_unlock(&adev->pm.mutex);
 377	}
 378
 379	return ret;
 380}
 381
 382int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev,
 383				  bool acquire)
 384{
 385	void *pp_handle = adev->powerplay.pp_handle;
 386	const struct amd_pm_funcs *pp_funcs =
 387			adev->powerplay.pp_funcs;
 388	int ret = -EOPNOTSUPP;
 389
 390	if (pp_funcs && pp_funcs->smu_i2c_bus_access) {
 391		mutex_lock(&adev->pm.mutex);
 392		ret = pp_funcs->smu_i2c_bus_access(pp_handle,
 393						   acquire);
 394		mutex_unlock(&adev->pm.mutex);
 395	}
 396
 397	return ret;
 398}
 399
 400void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
 401{
 402	if (adev->pm.dpm_enabled) {
 403		mutex_lock(&adev->pm.mutex);
 404		if (power_supply_is_system_supplied() > 0)
 405			adev->pm.ac_power = true;
 406		else
 407			adev->pm.ac_power = false;
 408
 409		if (adev->powerplay.pp_funcs &&
 410		    adev->powerplay.pp_funcs->enable_bapm)
 411			amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
 
 412
 413		if (is_support_sw_smu(adev))
 414			smu_set_ac_dc(adev->powerplay.pp_handle);
 415
 416		mutex_unlock(&adev->pm.mutex);
 417	}
 418}
 419
 420int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
 421			   void *data, uint32_t *size)
 422{
 423	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 424	int ret = -EINVAL;
 425
 426	if (!data || !size)
 427		return -EINVAL;
 428
 429	if (pp_funcs && pp_funcs->read_sensor) {
 430		mutex_lock(&adev->pm.mutex);
 431		ret = pp_funcs->read_sensor(adev->powerplay.pp_handle,
 432					    sensor,
 433					    data,
 434					    size);
 435		mutex_unlock(&adev->pm.mutex);
 436	}
 437
 438	return ret;
 439}
 440
 441void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev)
 442{
 443	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 444	int i;
 
 
 
 
 445
 446	if (!adev->pm.dpm_enabled)
 447		return;
 448
 449	if (!pp_funcs->pm_compute_clocks)
 450		return;
 451
 452	if (adev->mode_info.num_crtc)
 453		amdgpu_display_bandwidth_update(adev);
 454
 455	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
 456		struct amdgpu_ring *ring = adev->rings[i];
 457		if (ring && ring->sched.ready)
 458			amdgpu_fence_wait_empty(ring);
 459	}
 460
 461	mutex_lock(&adev->pm.mutex);
 462	pp_funcs->pm_compute_clocks(adev->powerplay.pp_handle);
 
 
 
 
 463	mutex_unlock(&adev->pm.mutex);
 
 
 464}
 465
 466void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
 
 467{
 468	int ret = 0;
 
 
 
 
 
 
 
 
 
 
 469
 470	if (adev->family == AMDGPU_FAMILY_SI) {
 471		mutex_lock(&adev->pm.mutex);
 472		if (enable) {
 473			adev->pm.dpm.uvd_active = true;
 474			adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
 475		} else {
 476			adev->pm.dpm.uvd_active = false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 477		}
 478		mutex_unlock(&adev->pm.mutex);
 479
 480		amdgpu_dpm_compute_clocks(adev);
 481		return;
 482	}
 483
 484	ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
 485	if (ret)
 486		DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
 487			  enable ? "enable" : "disable", ret);
 488}
 489
 490void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
 491{
 492	int ret = 0;
 493
 494	if (adev->family == AMDGPU_FAMILY_SI) {
 495		mutex_lock(&adev->pm.mutex);
 496		if (enable) {
 497			adev->pm.dpm.vce_active = true;
 498			/* XXX select vce level based on ring/task */
 499			adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
 500		} else {
 501			adev->pm.dpm.vce_active = false;
 
 502		}
 503		mutex_unlock(&adev->pm.mutex);
 504
 505		amdgpu_dpm_compute_clocks(adev);
 506		return;
 
 
 
 
 
 
 
 
 
 507	}
 508
 509	ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
 510	if (ret)
 511		DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
 512			  enable ? "enable" : "disable", ret);
 513}
 514
 515void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
 516{
 517	int ret = 0;
 518
 519	ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable);
 520	if (ret)
 521		DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
 522			  enable ? "enable" : "disable", ret);
 523}
 524
 525int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
 526{
 527	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 528	int r = 0;
 529
 530	if (!pp_funcs || !pp_funcs->load_firmware)
 531		return 0;
 
 
 
 
 
 
 
 
 
 
 
 532
 533	mutex_lock(&adev->pm.mutex);
 534	r = pp_funcs->load_firmware(adev->powerplay.pp_handle);
 535	if (r) {
 536		pr_err("smu firmware loading failed\n");
 537		goto out;
 538	}
 539
 540	if (smu_version)
 541		*smu_version = adev->pm.fw_version;
 542
 543out:
 544	mutex_unlock(&adev->pm.mutex);
 545	return r;
 546}
 547
 548int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable)
 549{
 550	int ret = 0;
 551
 552	if (is_support_sw_smu(adev)) {
 553		mutex_lock(&adev->pm.mutex);
 554		ret = smu_handle_passthrough_sbr(adev->powerplay.pp_handle,
 555						 enable);
 556		mutex_unlock(&adev->pm.mutex);
 557	}
 558
 559	return ret;
 560}
 561
 562int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size)
 563{
 564	struct smu_context *smu = adev->powerplay.pp_handle;
 565	int ret = 0;
 566
 567	if (!is_support_sw_smu(adev))
 568		return -EOPNOTSUPP;
 569
 570	mutex_lock(&adev->pm.mutex);
 571	ret = smu_send_hbm_bad_pages_num(smu, size);
 572	mutex_unlock(&adev->pm.mutex);
 573
 574	return ret;
 575}
 576
 577int amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device *adev, uint32_t size)
 578{
 579	struct smu_context *smu = adev->powerplay.pp_handle;
 580	int ret = 0;
 581
 582	if (!is_support_sw_smu(adev))
 583		return -EOPNOTSUPP;
 584
 585	mutex_lock(&adev->pm.mutex);
 586	ret = smu_send_hbm_bad_channel_flag(smu, size);
 587	mutex_unlock(&adev->pm.mutex);
 588
 589	return ret;
 590}
 591
 592int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev,
 593				  enum pp_clock_type type,
 594				  uint32_t *min,
 595				  uint32_t *max)
 596{
 597	int ret = 0;
 598
 599	if (type != PP_SCLK)
 600		return -EINVAL;
 601
 602	if (!is_support_sw_smu(adev))
 603		return -EOPNOTSUPP;
 604
 605	mutex_lock(&adev->pm.mutex);
 606	ret = smu_get_dpm_freq_range(adev->powerplay.pp_handle,
 607				     SMU_SCLK,
 608				     min,
 609				     max);
 610	mutex_unlock(&adev->pm.mutex);
 611
 612	return ret;
 613}
 614
 615int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev,
 616				   enum pp_clock_type type,
 617				   uint32_t min,
 618				   uint32_t max)
 619{
 620	struct smu_context *smu = adev->powerplay.pp_handle;
 621	int ret = 0;
 622
 623	if (type != PP_SCLK)
 624		return -EINVAL;
 625
 626	if (!is_support_sw_smu(adev))
 627		return -EOPNOTSUPP;
 628
 629	mutex_lock(&adev->pm.mutex);
 630	ret = smu_set_soft_freq_range(smu,
 631				      SMU_SCLK,
 632				      min,
 633				      max);
 634	mutex_unlock(&adev->pm.mutex);
 635
 636	return ret;
 637}
 638
 639int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev)
 640{
 641	struct smu_context *smu = adev->powerplay.pp_handle;
 642	int ret = 0;
 643
 644	if (!is_support_sw_smu(adev))
 645		return 0;
 646
 647	mutex_lock(&adev->pm.mutex);
 648	ret = smu_write_watermarks_table(smu);
 649	mutex_unlock(&adev->pm.mutex);
 650
 651	return ret;
 652}
 653
 654int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev,
 655			      enum smu_event_type event,
 656			      uint64_t event_arg)
 657{
 658	struct smu_context *smu = adev->powerplay.pp_handle;
 659	int ret = 0;
 660
 661	if (!is_support_sw_smu(adev))
 662		return -EOPNOTSUPP;
 663
 664	mutex_lock(&adev->pm.mutex);
 665	ret = smu_wait_for_event(smu, event, event_arg);
 666	mutex_unlock(&adev->pm.mutex);
 667
 668	return ret;
 669}
 670
 671int amdgpu_dpm_set_residency_gfxoff(struct amdgpu_device *adev, bool value)
 672{
 673	struct smu_context *smu = adev->powerplay.pp_handle;
 674	int ret = 0;
 675
 676	if (!is_support_sw_smu(adev))
 677		return -EOPNOTSUPP;
 678
 679	mutex_lock(&adev->pm.mutex);
 680	ret = smu_set_residency_gfxoff(smu, value);
 681	mutex_unlock(&adev->pm.mutex);
 682
 683	return ret;
 684}
 685
 686int amdgpu_dpm_get_residency_gfxoff(struct amdgpu_device *adev, u32 *value)
 687{
 688	struct smu_context *smu = adev->powerplay.pp_handle;
 689	int ret = 0;
 690
 691	if (!is_support_sw_smu(adev))
 692		return -EOPNOTSUPP;
 693
 694	mutex_lock(&adev->pm.mutex);
 695	ret = smu_get_residency_gfxoff(smu, value);
 696	mutex_unlock(&adev->pm.mutex);
 697
 698	return ret;
 699}
 700
 701int amdgpu_dpm_get_entrycount_gfxoff(struct amdgpu_device *adev, u64 *value)
 702{
 703	struct smu_context *smu = adev->powerplay.pp_handle;
 704	int ret = 0;
 705
 706	if (!is_support_sw_smu(adev))
 707		return -EOPNOTSUPP;
 708
 709	mutex_lock(&adev->pm.mutex);
 710	ret = smu_get_entrycount_gfxoff(smu, value);
 711	mutex_unlock(&adev->pm.mutex);
 712
 713	return ret;
 714}
 715
 716int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value)
 717{
 718	struct smu_context *smu = adev->powerplay.pp_handle;
 719	int ret = 0;
 720
 721	if (!is_support_sw_smu(adev))
 722		return -EOPNOTSUPP;
 723
 724	mutex_lock(&adev->pm.mutex);
 725	ret = smu_get_status_gfxoff(smu, value);
 726	mutex_unlock(&adev->pm.mutex);
 727
 728	return ret;
 729}
 730
 731uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev)
 732{
 733	struct smu_context *smu = adev->powerplay.pp_handle;
 734
 735	if (!is_support_sw_smu(adev))
 736		return 0;
 737
 738	return atomic64_read(&smu->throttle_int_counter);
 739}
 740
 741/* amdgpu_dpm_gfx_state_change - Handle gfx power state change set
 742 * @adev: amdgpu_device pointer
 743 * @state: gfx power state(1 -sGpuChangeState_D0Entry and 2 -sGpuChangeState_D3Entry)
 744 *
 745 */
 746void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev,
 747				 enum gfx_change_state state)
 748{
 749	mutex_lock(&adev->pm.mutex);
 750	if (adev->powerplay.pp_funcs &&
 751	    adev->powerplay.pp_funcs->gfx_state_change_set)
 752		((adev)->powerplay.pp_funcs->gfx_state_change_set(
 753			(adev)->powerplay.pp_handle, state));
 754	mutex_unlock(&adev->pm.mutex);
 755}
 756
 757int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev,
 758			    void *umc_ecc)
 759{
 760	struct smu_context *smu = adev->powerplay.pp_handle;
 761	int ret = 0;
 762
 763	if (!is_support_sw_smu(adev))
 764		return -EOPNOTSUPP;
 765
 766	mutex_lock(&adev->pm.mutex);
 767	ret = smu_get_ecc_info(smu, umc_ecc);
 768	mutex_unlock(&adev->pm.mutex);
 769
 770	return ret;
 771}
 772
 773struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev,
 774						     uint32_t idx)
 775{
 776	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 777	struct amd_vce_state *vstate = NULL;
 778
 779	if (!pp_funcs->get_vce_clock_state)
 780		return NULL;
 781
 782	mutex_lock(&adev->pm.mutex);
 783	vstate = pp_funcs->get_vce_clock_state(adev->powerplay.pp_handle,
 784					       idx);
 785	mutex_unlock(&adev->pm.mutex);
 786
 787	return vstate;
 788}
 789
 790void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev,
 791					enum amd_pm_state_type *state)
 792{
 793	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 794
 795	mutex_lock(&adev->pm.mutex);
 
 796
 797	if (!pp_funcs->get_current_power_state) {
 798		*state = adev->pm.dpm.user_state;
 799		goto out;
 
 
 
 
 
 
 
 
 800	}
 801
 802	*state = pp_funcs->get_current_power_state(adev->powerplay.pp_handle);
 803	if (*state < POWER_STATE_TYPE_DEFAULT ||
 804	    *state > POWER_STATE_TYPE_INTERNAL_3DPERF)
 805		*state = adev->pm.dpm.user_state;
 806
 807out:
 808	mutex_unlock(&adev->pm.mutex);
 809}
 810
 811void amdgpu_dpm_set_power_state(struct amdgpu_device *adev,
 812				enum amd_pm_state_type state)
 813{
 814	mutex_lock(&adev->pm.mutex);
 815	adev->pm.dpm.user_state = state;
 816	mutex_unlock(&adev->pm.mutex);
 817
 818	if (is_support_sw_smu(adev))
 819		return;
 820
 821	if (amdgpu_dpm_dispatch_task(adev,
 822				     AMD_PP_TASK_ENABLE_USER_STATE,
 823				     &state) == -EOPNOTSUPP)
 824		amdgpu_dpm_compute_clocks(adev);
 825}
 826
 827enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev)
 828{
 829	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 830	enum amd_dpm_forced_level level;
 831
 832	if (!pp_funcs)
 833		return AMD_DPM_FORCED_LEVEL_AUTO;
 834
 835	mutex_lock(&adev->pm.mutex);
 836	if (pp_funcs->get_performance_level)
 837		level = pp_funcs->get_performance_level(adev->powerplay.pp_handle);
 838	else
 839		level = adev->pm.dpm.forced_level;
 840	mutex_unlock(&adev->pm.mutex);
 841
 842	return level;
 843}
 844
 845int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
 846				       enum amd_dpm_forced_level level)
 847{
 848	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 849	enum amd_dpm_forced_level current_level;
 850	uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
 851					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
 852					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
 853					AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
 854
 855	if (!pp_funcs || !pp_funcs->force_performance_level)
 856		return 0;
 857
 858	if (adev->pm.dpm.thermal_active)
 859		return -EINVAL;
 860
 861	current_level = amdgpu_dpm_get_performance_level(adev);
 862	if (current_level == level)
 863		return 0;
 864
 865	if (adev->asic_type == CHIP_RAVEN) {
 866		if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) {
 867			if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
 868			    level == AMD_DPM_FORCED_LEVEL_MANUAL)
 869				amdgpu_gfx_off_ctrl(adev, false);
 870			else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL &&
 871				 level != AMD_DPM_FORCED_LEVEL_MANUAL)
 872				amdgpu_gfx_off_ctrl(adev, true);
 873		}
 874	}
 875
 876	if (!(current_level & profile_mode_mask) &&
 877	    (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT))
 878		return -EINVAL;
 879
 880	if (!(current_level & profile_mode_mask) &&
 881	      (level & profile_mode_mask)) {
 882		/* enter UMD Pstate */
 883		amdgpu_device_ip_set_powergating_state(adev,
 884						       AMD_IP_BLOCK_TYPE_GFX,
 885						       AMD_PG_STATE_UNGATE);
 886		amdgpu_device_ip_set_clockgating_state(adev,
 887						       AMD_IP_BLOCK_TYPE_GFX,
 888						       AMD_CG_STATE_UNGATE);
 889	} else if ((current_level & profile_mode_mask) &&
 890		    !(level & profile_mode_mask)) {
 891		/* exit UMD Pstate */
 892		amdgpu_device_ip_set_clockgating_state(adev,
 893						       AMD_IP_BLOCK_TYPE_GFX,
 894						       AMD_CG_STATE_GATE);
 895		amdgpu_device_ip_set_powergating_state(adev,
 896						       AMD_IP_BLOCK_TYPE_GFX,
 897						       AMD_PG_STATE_GATE);
 898	}
 899
 900	mutex_lock(&adev->pm.mutex);
 901
 902	if (pp_funcs->force_performance_level(adev->powerplay.pp_handle,
 903					      level)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 904		mutex_unlock(&adev->pm.mutex);
 905		return -EINVAL;
 906	}
 907
 908	adev->pm.dpm.forced_level = level;
 909
 910	mutex_unlock(&adev->pm.mutex);
 911
 912	return 0;
 913}
 914
 915int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev,
 916				 struct pp_states_info *states)
 917{
 918	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 919	int ret = 0;
 920
 921	if (!pp_funcs->get_pp_num_states)
 922		return -EOPNOTSUPP;
 923
 924	mutex_lock(&adev->pm.mutex);
 925	ret = pp_funcs->get_pp_num_states(adev->powerplay.pp_handle,
 926					  states);
 927	mutex_unlock(&adev->pm.mutex);
 928
 929	return ret;
 930}
 931
 932int amdgpu_dpm_dispatch_task(struct amdgpu_device *adev,
 933			      enum amd_pp_task task_id,
 934			      enum amd_pm_state_type *user_state)
 935{
 936	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 937	int ret = 0;
 938
 939	if (!pp_funcs->dispatch_tasks)
 940		return -EOPNOTSUPP;
 941
 942	mutex_lock(&adev->pm.mutex);
 943	ret = pp_funcs->dispatch_tasks(adev->powerplay.pp_handle,
 944				       task_id,
 945				       user_state);
 946	mutex_unlock(&adev->pm.mutex);
 947
 948	return ret;
 949}
 950
 951int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table)
 952{
 953	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 954	int ret = 0;
 955
 956	if (!pp_funcs->get_pp_table)
 957		return 0;
 958
 959	mutex_lock(&adev->pm.mutex);
 960	ret = pp_funcs->get_pp_table(adev->powerplay.pp_handle,
 961				     table);
 962	mutex_unlock(&adev->pm.mutex);
 963
 964	return ret;
 965}
 966
 967int amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device *adev,
 968				      uint32_t type,
 969				      long *input,
 970				      uint32_t size)
 971{
 972	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 973	int ret = 0;
 974
 975	if (!pp_funcs->set_fine_grain_clk_vol)
 976		return 0;
 977
 978	mutex_lock(&adev->pm.mutex);
 979	ret = pp_funcs->set_fine_grain_clk_vol(adev->powerplay.pp_handle,
 980					       type,
 981					       input,
 982					       size);
 983	mutex_unlock(&adev->pm.mutex);
 984
 985	return ret;
 986}
 987
 988int amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device *adev,
 989				  uint32_t type,
 990				  long *input,
 991				  uint32_t size)
 992{
 993	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 994	int ret = 0;
 995
 996	if (!pp_funcs->odn_edit_dpm_table)
 997		return 0;
 998
 999	mutex_lock(&adev->pm.mutex);
1000	ret = pp_funcs->odn_edit_dpm_table(adev->powerplay.pp_handle,
1001					   type,
1002					   input,
1003					   size);
1004	mutex_unlock(&adev->pm.mutex);
1005
1006	return ret;
1007}
1008
1009int amdgpu_dpm_print_clock_levels(struct amdgpu_device *adev,
1010				  enum pp_clock_type type,
1011				  char *buf)
1012{
1013	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1014	int ret = 0;
1015
1016	if (!pp_funcs->print_clock_levels)
1017		return 0;
1018
1019	mutex_lock(&adev->pm.mutex);
1020	ret = pp_funcs->print_clock_levels(adev->powerplay.pp_handle,
1021					   type,
1022					   buf);
1023	mutex_unlock(&adev->pm.mutex);
1024
1025	return ret;
1026}
1027
1028int amdgpu_dpm_emit_clock_levels(struct amdgpu_device *adev,
1029				  enum pp_clock_type type,
1030				  char *buf,
1031				  int *offset)
1032{
1033	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1034	int ret = 0;
1035
1036	if (!pp_funcs->emit_clock_levels)
1037		return -ENOENT;
1038
1039	mutex_lock(&adev->pm.mutex);
1040	ret = pp_funcs->emit_clock_levels(adev->powerplay.pp_handle,
1041					   type,
1042					   buf,
1043					   offset);
1044	mutex_unlock(&adev->pm.mutex);
1045
1046	return ret;
1047}
1048
1049int amdgpu_dpm_set_ppfeature_status(struct amdgpu_device *adev,
1050				    uint64_t ppfeature_masks)
1051{
1052	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1053	int ret = 0;
1054
1055	if (!pp_funcs->set_ppfeature_status)
1056		return 0;
1057
1058	mutex_lock(&adev->pm.mutex);
1059	ret = pp_funcs->set_ppfeature_status(adev->powerplay.pp_handle,
1060					     ppfeature_masks);
1061	mutex_unlock(&adev->pm.mutex);
1062
1063	return ret;
1064}
1065
1066int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf)
1067{
1068	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1069	int ret = 0;
1070
1071	if (!pp_funcs->get_ppfeature_status)
1072		return 0;
1073
1074	mutex_lock(&adev->pm.mutex);
1075	ret = pp_funcs->get_ppfeature_status(adev->powerplay.pp_handle,
1076					     buf);
1077	mutex_unlock(&adev->pm.mutex);
1078
1079	return ret;
1080}
1081
1082int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev,
1083				 enum pp_clock_type type,
1084				 uint32_t mask)
1085{
1086	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1087	int ret = 0;
1088
1089	if (!pp_funcs->force_clock_level)
1090		return 0;
1091
1092	mutex_lock(&adev->pm.mutex);
1093	ret = pp_funcs->force_clock_level(adev->powerplay.pp_handle,
1094					  type,
1095					  mask);
1096	mutex_unlock(&adev->pm.mutex);
1097
1098	return ret;
1099}
1100
1101int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev)
1102{
1103	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1104	int ret = 0;
1105
1106	if (!pp_funcs->get_sclk_od)
1107		return 0;
1108
1109	mutex_lock(&adev->pm.mutex);
1110	ret = pp_funcs->get_sclk_od(adev->powerplay.pp_handle);
1111	mutex_unlock(&adev->pm.mutex);
1112
1113	return ret;
1114}
1115
1116int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
1117{
1118	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1119
1120	if (is_support_sw_smu(adev))
1121		return 0;
1122
1123	mutex_lock(&adev->pm.mutex);
1124	if (pp_funcs->set_sclk_od)
1125		pp_funcs->set_sclk_od(adev->powerplay.pp_handle, value);
1126	mutex_unlock(&adev->pm.mutex);
1127
1128	if (amdgpu_dpm_dispatch_task(adev,
1129				     AMD_PP_TASK_READJUST_POWER_STATE,
1130				     NULL) == -EOPNOTSUPP) {
1131		adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1132		amdgpu_dpm_compute_clocks(adev);
 
 
 
 
 
 
 
 
 
 
 
 
 
1133	}
1134
1135	return 0;
1136}
1137
1138int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev)
1139{
1140	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1141	int ret = 0;
1142
1143	if (!pp_funcs->get_mclk_od)
1144		return 0;
1145
1146	mutex_lock(&adev->pm.mutex);
1147	ret = pp_funcs->get_mclk_od(adev->powerplay.pp_handle);
1148	mutex_unlock(&adev->pm.mutex);
1149
1150	return ret;
1151}
1152
1153int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
1154{
1155	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1156
1157	if (is_support_sw_smu(adev))
1158		return 0;
1159
1160	mutex_lock(&adev->pm.mutex);
1161	if (pp_funcs->set_mclk_od)
1162		pp_funcs->set_mclk_od(adev->powerplay.pp_handle, value);
1163	mutex_unlock(&adev->pm.mutex);
1164
1165	if (amdgpu_dpm_dispatch_task(adev,
1166				     AMD_PP_TASK_READJUST_POWER_STATE,
1167				     NULL) == -EOPNOTSUPP) {
1168		adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1169		amdgpu_dpm_compute_clocks(adev);
 
1170	}
1171
1172	return 0;
1173}
1174
1175int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev,
1176				      char *buf)
1177{
1178	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1179	int ret = 0;
1180
1181	if (!pp_funcs->get_power_profile_mode)
1182		return -EOPNOTSUPP;
1183
1184	mutex_lock(&adev->pm.mutex);
1185	ret = pp_funcs->get_power_profile_mode(adev->powerplay.pp_handle,
1186					       buf);
1187	mutex_unlock(&adev->pm.mutex);
1188
1189	return ret;
1190}
1191
1192int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev,
1193				      long *input, uint32_t size)
1194{
1195	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1196	int ret = 0;
1197
1198	if (!pp_funcs->set_power_profile_mode)
1199		return 0;
1200
1201	mutex_lock(&adev->pm.mutex);
1202	ret = pp_funcs->set_power_profile_mode(adev->powerplay.pp_handle,
1203					       input,
1204					       size);
1205	mutex_unlock(&adev->pm.mutex);
1206
1207	return ret;
1208}
1209
1210int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table)
1211{
1212	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1213	int ret = 0;
1214
1215	if (!pp_funcs->get_gpu_metrics)
1216		return 0;
1217
1218	mutex_lock(&adev->pm.mutex);
1219	ret = pp_funcs->get_gpu_metrics(adev->powerplay.pp_handle,
1220					table);
1221	mutex_unlock(&adev->pm.mutex);
1222
1223	return ret;
1224}
1225
1226int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev,
1227				    uint32_t *fan_mode)
1228{
1229	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1230	int ret = 0;
1231
1232	if (!pp_funcs->get_fan_control_mode)
1233		return -EOPNOTSUPP;
1234
1235	mutex_lock(&adev->pm.mutex);
1236	ret = pp_funcs->get_fan_control_mode(adev->powerplay.pp_handle,
1237					     fan_mode);
1238	mutex_unlock(&adev->pm.mutex);
1239
1240	return ret;
1241}
1242
1243int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev,
1244				 uint32_t speed)
1245{
1246	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1247	int ret = 0;
1248
1249	if (!pp_funcs->set_fan_speed_pwm)
1250		return -EOPNOTSUPP;
1251
1252	mutex_lock(&adev->pm.mutex);
1253	ret = pp_funcs->set_fan_speed_pwm(adev->powerplay.pp_handle,
1254					  speed);
1255	mutex_unlock(&adev->pm.mutex);
1256
1257	return ret;
1258}
1259
1260int amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device *adev,
1261				 uint32_t *speed)
1262{
1263	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1264	int ret = 0;
1265
1266	if (!pp_funcs->get_fan_speed_pwm)
1267		return -EOPNOTSUPP;
1268
1269	mutex_lock(&adev->pm.mutex);
1270	ret = pp_funcs->get_fan_speed_pwm(adev->powerplay.pp_handle,
1271					  speed);
1272	mutex_unlock(&adev->pm.mutex);
1273
1274	return ret;
1275}
1276
1277int amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device *adev,
1278				 uint32_t *speed)
1279{
1280	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1281	int ret = 0;
1282
1283	if (!pp_funcs->get_fan_speed_rpm)
1284		return -EOPNOTSUPP;
1285
1286	mutex_lock(&adev->pm.mutex);
1287	ret = pp_funcs->get_fan_speed_rpm(adev->powerplay.pp_handle,
1288					  speed);
1289	mutex_unlock(&adev->pm.mutex);
1290
1291	return ret;
1292}
1293
1294int amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device *adev,
1295				 uint32_t speed)
1296{
1297	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1298	int ret = 0;
1299
1300	if (!pp_funcs->set_fan_speed_rpm)
1301		return -EOPNOTSUPP;
1302
1303	mutex_lock(&adev->pm.mutex);
1304	ret = pp_funcs->set_fan_speed_rpm(adev->powerplay.pp_handle,
1305					  speed);
1306	mutex_unlock(&adev->pm.mutex);
1307
1308	return ret;
1309}
1310
1311int amdgpu_dpm_set_fan_control_mode(struct amdgpu_device *adev,
1312				    uint32_t mode)
1313{
1314	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1315	int ret = 0;
1316
1317	if (!pp_funcs->set_fan_control_mode)
1318		return -EOPNOTSUPP;
1319
1320	mutex_lock(&adev->pm.mutex);
1321	ret = pp_funcs->set_fan_control_mode(adev->powerplay.pp_handle,
1322					     mode);
1323	mutex_unlock(&adev->pm.mutex);
1324
1325	return ret;
1326}
1327
1328int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev,
1329			       uint32_t *limit,
1330			       enum pp_power_limit_level pp_limit_level,
1331			       enum pp_power_type power_type)
1332{
1333	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1334	int ret = 0;
1335
1336	if (!pp_funcs->get_power_limit)
1337		return -ENODATA;
1338
1339	mutex_lock(&adev->pm.mutex);
1340	ret = pp_funcs->get_power_limit(adev->powerplay.pp_handle,
1341					limit,
1342					pp_limit_level,
1343					power_type);
1344	mutex_unlock(&adev->pm.mutex);
1345
1346	return ret;
1347}
1348
1349int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev,
1350			       uint32_t limit)
1351{
1352	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1353	int ret = 0;
1354
1355	if (!pp_funcs->set_power_limit)
1356		return -EINVAL;
1357
1358	mutex_lock(&adev->pm.mutex);
1359	ret = pp_funcs->set_power_limit(adev->powerplay.pp_handle,
1360					limit);
1361	mutex_unlock(&adev->pm.mutex);
1362
1363	return ret;
1364}
1365
1366int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev)
1367{
1368	bool cclk_dpm_supported = false;
1369
1370	if (!is_support_sw_smu(adev))
1371		return false;
1372
1373	mutex_lock(&adev->pm.mutex);
1374	cclk_dpm_supported = is_support_cclk_dpm(adev);
1375	mutex_unlock(&adev->pm.mutex);
1376
1377	return (int)cclk_dpm_supported;
1378}
1379
1380int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
1381						       struct seq_file *m)
1382{
1383	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1384
1385	if (!pp_funcs->debugfs_print_current_performance_level)
1386		return -EOPNOTSUPP;
1387
1388	mutex_lock(&adev->pm.mutex);
1389	pp_funcs->debugfs_print_current_performance_level(adev->powerplay.pp_handle,
1390							  m);
1391	mutex_unlock(&adev->pm.mutex);
1392
1393	return 0;
1394}
1395
1396int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev,
1397				       void **addr,
1398				       size_t *size)
1399{
1400	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1401	int ret = 0;
1402
1403	if (!pp_funcs->get_smu_prv_buf_details)
1404		return -ENOSYS;
1405
1406	mutex_lock(&adev->pm.mutex);
1407	ret = pp_funcs->get_smu_prv_buf_details(adev->powerplay.pp_handle,
1408						addr,
1409						size);
1410	mutex_unlock(&adev->pm.mutex);
1411
1412	return ret;
1413}
1414
1415int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev)
1416{
1417	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
1418	struct smu_context *smu = adev->powerplay.pp_handle;
1419
1420	if ((is_support_sw_smu(adev) && smu->od_enabled) ||
1421	    (is_support_sw_smu(adev) && smu->is_apu) ||
1422		(!is_support_sw_smu(adev) && hwmgr->od_enabled))
1423		return true;
1424
1425	return false;
1426}
1427
1428int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev,
1429			    const char *buf,
1430			    size_t size)
1431{
1432	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1433	int ret = 0;
1434
1435	if (!pp_funcs->set_pp_table)
1436		return -EOPNOTSUPP;
1437
1438	mutex_lock(&adev->pm.mutex);
1439	ret = pp_funcs->set_pp_table(adev->powerplay.pp_handle,
1440				     buf,
1441				     size);
1442	mutex_unlock(&adev->pm.mutex);
1443
1444	return ret;
1445}
1446
1447int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev)
1448{
1449	struct smu_context *smu = adev->powerplay.pp_handle;
1450
1451	if (!is_support_sw_smu(adev))
1452		return INT_MAX;
1453
1454	return smu->cpu_core_num;
1455}
1456
1457void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev)
1458{
1459	if (!is_support_sw_smu(adev))
1460		return;
1461
1462	amdgpu_smu_stb_debug_fs_init(adev);
1463}
1464
1465int amdgpu_dpm_display_configuration_change(struct amdgpu_device *adev,
1466					    const struct amd_pp_display_configuration *input)
1467{
1468	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1469	int ret = 0;
1470
1471	if (!pp_funcs->display_configuration_change)
1472		return 0;
1473
1474	mutex_lock(&adev->pm.mutex);
1475	ret = pp_funcs->display_configuration_change(adev->powerplay.pp_handle,
1476						     input);
1477	mutex_unlock(&adev->pm.mutex);
1478
1479	return ret;
1480}
1481
1482int amdgpu_dpm_get_clock_by_type(struct amdgpu_device *adev,
1483				 enum amd_pp_clock_type type,
1484				 struct amd_pp_clocks *clocks)
1485{
1486	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1487	int ret = 0;
1488
1489	if (!pp_funcs->get_clock_by_type)
1490		return 0;
1491
1492	mutex_lock(&adev->pm.mutex);
1493	ret = pp_funcs->get_clock_by_type(adev->powerplay.pp_handle,
1494					  type,
1495					  clocks);
1496	mutex_unlock(&adev->pm.mutex);
1497
1498	return ret;
1499}
1500
1501int amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device *adev,
1502						struct amd_pp_simple_clock_info *clocks)
1503{
1504	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1505	int ret = 0;
1506
1507	if (!pp_funcs->get_display_mode_validation_clocks)
1508		return 0;
1509
1510	mutex_lock(&adev->pm.mutex);
1511	ret = pp_funcs->get_display_mode_validation_clocks(adev->powerplay.pp_handle,
1512							   clocks);
1513	mutex_unlock(&adev->pm.mutex);
1514
1515	return ret;
1516}
1517
1518int amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device *adev,
1519					      enum amd_pp_clock_type type,
1520					      struct pp_clock_levels_with_latency *clocks)
1521{
1522	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1523	int ret = 0;
1524
1525	if (!pp_funcs->get_clock_by_type_with_latency)
1526		return 0;
1527
1528	mutex_lock(&adev->pm.mutex);
1529	ret = pp_funcs->get_clock_by_type_with_latency(adev->powerplay.pp_handle,
1530						       type,
1531						       clocks);
1532	mutex_unlock(&adev->pm.mutex);
1533
1534	return ret;
1535}
1536
1537int amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device *adev,
1538					      enum amd_pp_clock_type type,
1539					      struct pp_clock_levels_with_voltage *clocks)
1540{
1541	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1542	int ret = 0;
1543
1544	if (!pp_funcs->get_clock_by_type_with_voltage)
1545		return 0;
1546
1547	mutex_lock(&adev->pm.mutex);
1548	ret = pp_funcs->get_clock_by_type_with_voltage(adev->powerplay.pp_handle,
1549						       type,
1550						       clocks);
1551	mutex_unlock(&adev->pm.mutex);
1552
1553	return ret;
1554}
1555
1556int amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device *adev,
1557					       void *clock_ranges)
1558{
1559	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1560	int ret = 0;
1561
1562	if (!pp_funcs->set_watermarks_for_clocks_ranges)
1563		return -EOPNOTSUPP;
1564
1565	mutex_lock(&adev->pm.mutex);
1566	ret = pp_funcs->set_watermarks_for_clocks_ranges(adev->powerplay.pp_handle,
1567							 clock_ranges);
1568	mutex_unlock(&adev->pm.mutex);
1569
1570	return ret;
1571}
1572
1573int amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device *adev,
1574					     struct pp_display_clock_request *clock)
1575{
1576	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1577	int ret = 0;
1578
1579	if (!pp_funcs->display_clock_voltage_request)
1580		return -EOPNOTSUPP;
1581
1582	mutex_lock(&adev->pm.mutex);
1583	ret = pp_funcs->display_clock_voltage_request(adev->powerplay.pp_handle,
1584						      clock);
1585	mutex_unlock(&adev->pm.mutex);
1586
1587	return ret;
1588}
1589
1590int amdgpu_dpm_get_current_clocks(struct amdgpu_device *adev,
1591				  struct amd_pp_clock_info *clocks)
1592{
1593	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1594	int ret = 0;
1595
1596	if (!pp_funcs->get_current_clocks)
1597		return -EOPNOTSUPP;
1598
1599	mutex_lock(&adev->pm.mutex);
1600	ret = pp_funcs->get_current_clocks(adev->powerplay.pp_handle,
1601					   clocks);
1602	mutex_unlock(&adev->pm.mutex);
1603
1604	return ret;
1605}
1606
1607void amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device *adev)
1608{
1609	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1610
1611	if (!pp_funcs->notify_smu_enable_pwe)
1612		return;
1613
1614	mutex_lock(&adev->pm.mutex);
1615	pp_funcs->notify_smu_enable_pwe(adev->powerplay.pp_handle);
1616	mutex_unlock(&adev->pm.mutex);
1617}
1618
1619int amdgpu_dpm_set_active_display_count(struct amdgpu_device *adev,
1620					uint32_t count)
1621{
1622	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1623	int ret = 0;
1624
1625	if (!pp_funcs->set_active_display_count)
1626		return -EOPNOTSUPP;
1627
1628	mutex_lock(&adev->pm.mutex);
1629	ret = pp_funcs->set_active_display_count(adev->powerplay.pp_handle,
1630						 count);
1631	mutex_unlock(&adev->pm.mutex);
1632
1633	return ret;
1634}
1635
1636int amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device *adev,
1637					  uint32_t clock)
1638{
1639	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1640	int ret = 0;
1641
1642	if (!pp_funcs->set_min_deep_sleep_dcefclk)
1643		return -EOPNOTSUPP;
1644
1645	mutex_lock(&adev->pm.mutex);
1646	ret = pp_funcs->set_min_deep_sleep_dcefclk(adev->powerplay.pp_handle,
1647						   clock);
1648	mutex_unlock(&adev->pm.mutex);
1649
1650	return ret;
1651}
1652
1653void amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device *adev,
1654					     uint32_t clock)
1655{
1656	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1657
1658	if (!pp_funcs->set_hard_min_dcefclk_by_freq)
1659		return;
1660
1661	mutex_lock(&adev->pm.mutex);
1662	pp_funcs->set_hard_min_dcefclk_by_freq(adev->powerplay.pp_handle,
1663					       clock);
1664	mutex_unlock(&adev->pm.mutex);
1665}
1666
1667void amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device *adev,
1668					  uint32_t clock)
1669{
1670	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1671
1672	if (!pp_funcs->set_hard_min_fclk_by_freq)
1673		return;
1674
1675	mutex_lock(&adev->pm.mutex);
1676	pp_funcs->set_hard_min_fclk_by_freq(adev->powerplay.pp_handle,
1677					    clock);
1678	mutex_unlock(&adev->pm.mutex);
1679}
1680
1681int amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device *adev,
1682						   bool disable_memory_clock_switch)
1683{
1684	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1685	int ret = 0;
1686
1687	if (!pp_funcs->display_disable_memory_clock_switch)
1688		return 0;
1689
1690	mutex_lock(&adev->pm.mutex);
1691	ret = pp_funcs->display_disable_memory_clock_switch(adev->powerplay.pp_handle,
1692							    disable_memory_clock_switch);
1693	mutex_unlock(&adev->pm.mutex);
1694
1695	return ret;
1696}
1697
1698int amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device *adev,
1699						struct pp_smu_nv_clock_table *max_clocks)
1700{
1701	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1702	int ret = 0;
1703
1704	if (!pp_funcs->get_max_sustainable_clocks_by_dc)
1705		return -EOPNOTSUPP;
1706
1707	mutex_lock(&adev->pm.mutex);
1708	ret = pp_funcs->get_max_sustainable_clocks_by_dc(adev->powerplay.pp_handle,
1709							 max_clocks);
1710	mutex_unlock(&adev->pm.mutex);
1711
1712	return ret;
1713}
1714
1715enum pp_smu_status amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev,
1716						  unsigned int *clock_values_in_khz,
1717						  unsigned int *num_states)
1718{
1719	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1720	int ret = 0;
1721
1722	if (!pp_funcs->get_uclk_dpm_states)
1723		return -EOPNOTSUPP;
1724
1725	mutex_lock(&adev->pm.mutex);
1726	ret = pp_funcs->get_uclk_dpm_states(adev->powerplay.pp_handle,
1727					    clock_values_in_khz,
1728					    num_states);
1729	mutex_unlock(&adev->pm.mutex);
1730
1731	return ret;
1732}
1733
1734int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev,
1735				   struct dpm_clocks *clock_table)
1736{
1737	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1738	int ret = 0;
1739
1740	if (!pp_funcs->get_dpm_clock_table)
1741		return -EOPNOTSUPP;
 
 
 
 
1742
1743	mutex_lock(&adev->pm.mutex);
1744	ret = pp_funcs->get_dpm_clock_table(adev->powerplay.pp_handle,
1745					    clock_table);
1746	mutex_unlock(&adev->pm.mutex);
1747
1748	return ret;
1749}
v5.14.15
   1/*
   2 * Copyright 2011 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: Alex Deucher
  23 */
  24
  25#include "amdgpu.h"
  26#include "amdgpu_atombios.h"
  27#include "amdgpu_i2c.h"
  28#include "amdgpu_dpm.h"
  29#include "atom.h"
  30#include "amd_pcie.h"
  31#include "amdgpu_display.h"
  32#include "hwmgr.h"
  33#include <linux/power_supply.h>
 
  34
  35#define WIDTH_4K 3840
 
  36
  37void amdgpu_dpm_print_class_info(u32 class, u32 class2)
  38{
  39	const char *s;
 
  40
  41	switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
  42	case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
  43	default:
  44		s = "none";
  45		break;
  46	case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
  47		s = "battery";
  48		break;
  49	case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
  50		s = "balanced";
  51		break;
  52	case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
  53		s = "performance";
  54		break;
  55	}
  56	printk("\tui class: %s\n", s);
  57	printk("\tinternal class:");
  58	if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
  59	    (class2 == 0))
  60		pr_cont(" none");
  61	else {
  62		if (class & ATOM_PPLIB_CLASSIFICATION_BOOT)
  63			pr_cont(" boot");
  64		if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
  65			pr_cont(" thermal");
  66		if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
  67			pr_cont(" limited_pwr");
  68		if (class & ATOM_PPLIB_CLASSIFICATION_REST)
  69			pr_cont(" rest");
  70		if (class & ATOM_PPLIB_CLASSIFICATION_FORCED)
  71			pr_cont(" forced");
  72		if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
  73			pr_cont(" 3d_perf");
  74		if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE)
  75			pr_cont(" ovrdrv");
  76		if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
  77			pr_cont(" uvd");
  78		if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW)
  79			pr_cont(" 3d_low");
  80		if (class & ATOM_PPLIB_CLASSIFICATION_ACPI)
  81			pr_cont(" acpi");
  82		if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
  83			pr_cont(" uvd_hd2");
  84		if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
  85			pr_cont(" uvd_hd");
  86		if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
  87			pr_cont(" uvd_sd");
  88		if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
  89			pr_cont(" limited_pwr2");
  90		if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
  91			pr_cont(" ulv");
  92		if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
  93			pr_cont(" uvd_mvc");
  94	}
  95	pr_cont("\n");
  96}
  97
  98void amdgpu_dpm_print_cap_info(u32 caps)
  99{
 100	printk("\tcaps:");
 101	if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
 102		pr_cont(" single_disp");
 103	if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK)
 104		pr_cont(" video");
 105	if (caps & ATOM_PPLIB_DISALLOW_ON_DC)
 106		pr_cont(" no_dc");
 107	pr_cont("\n");
 108}
 109
 110void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
 111				struct amdgpu_ps *rps)
 112{
 113	printk("\tstatus:");
 114	if (rps == adev->pm.dpm.current_ps)
 115		pr_cont(" c");
 116	if (rps == adev->pm.dpm.requested_ps)
 117		pr_cont(" r");
 118	if (rps == adev->pm.dpm.boot_ps)
 119		pr_cont(" b");
 120	pr_cont("\n");
 121}
 122
 123void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev)
 124{
 125	struct drm_device *ddev = adev_to_drm(adev);
 126	struct drm_crtc *crtc;
 127	struct amdgpu_crtc *amdgpu_crtc;
 128
 129	adev->pm.dpm.new_active_crtcs = 0;
 130	adev->pm.dpm.new_active_crtc_count = 0;
 131	if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
 132		list_for_each_entry(crtc,
 133				    &ddev->mode_config.crtc_list, head) {
 134			amdgpu_crtc = to_amdgpu_crtc(crtc);
 135			if (amdgpu_crtc->enabled) {
 136				adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id);
 137				adev->pm.dpm.new_active_crtc_count++;
 138			}
 139		}
 140	}
 141}
 142
 
 
 
 
 143
 144u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
 145{
 146	struct drm_device *dev = adev_to_drm(adev);
 147	struct drm_crtc *crtc;
 148	struct amdgpu_crtc *amdgpu_crtc;
 149	u32 vblank_in_pixels;
 150	u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
 151
 152	if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
 153		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 154			amdgpu_crtc = to_amdgpu_crtc(crtc);
 155			if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
 156				vblank_in_pixels =
 157					amdgpu_crtc->hw_mode.crtc_htotal *
 158					(amdgpu_crtc->hw_mode.crtc_vblank_end -
 159					amdgpu_crtc->hw_mode.crtc_vdisplay +
 160					(amdgpu_crtc->v_border * 2));
 161
 162				vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock;
 163				break;
 164			}
 165		}
 166	}
 167
 168	return vblank_time_us;
 169}
 170
 171u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev)
 172{
 173	struct drm_device *dev = adev_to_drm(adev);
 174	struct drm_crtc *crtc;
 175	struct amdgpu_crtc *amdgpu_crtc;
 176	u32 vrefresh = 0;
 177
 178	if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
 179		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 180			amdgpu_crtc = to_amdgpu_crtc(crtc);
 181			if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
 182				vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
 183				break;
 184			}
 185		}
 186	}
 187
 188	return vrefresh;
 189}
 190
 191bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor)
 192{
 193	switch (sensor) {
 194	case THERMAL_TYPE_RV6XX:
 195	case THERMAL_TYPE_RV770:
 196	case THERMAL_TYPE_EVERGREEN:
 197	case THERMAL_TYPE_SUMO:
 198	case THERMAL_TYPE_NI:
 199	case THERMAL_TYPE_SI:
 200	case THERMAL_TYPE_CI:
 201	case THERMAL_TYPE_KV:
 202		return true;
 203	case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
 204	case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
 205		return false; /* need special handling */
 206	case THERMAL_TYPE_NONE:
 207	case THERMAL_TYPE_EXTERNAL:
 208	case THERMAL_TYPE_EXTERNAL_GPIO:
 209	default:
 210		return false;
 211	}
 212}
 213
 214union power_info {
 215	struct _ATOM_POWERPLAY_INFO info;
 216	struct _ATOM_POWERPLAY_INFO_V2 info_2;
 217	struct _ATOM_POWERPLAY_INFO_V3 info_3;
 218	struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
 219	struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
 220	struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
 221	struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
 222	struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
 223};
 224
 225union fan_info {
 226	struct _ATOM_PPLIB_FANTABLE fan;
 227	struct _ATOM_PPLIB_FANTABLE2 fan2;
 228	struct _ATOM_PPLIB_FANTABLE3 fan3;
 229};
 230
 231static int amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table *amdgpu_table,
 232					      ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table)
 233{
 234	u32 size = atom_table->ucNumEntries *
 235		sizeof(struct amdgpu_clock_voltage_dependency_entry);
 236	int i;
 237	ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
 238
 239	amdgpu_table->entries = kzalloc(size, GFP_KERNEL);
 240	if (!amdgpu_table->entries)
 241		return -ENOMEM;
 242
 243	entry = &atom_table->entries[0];
 244	for (i = 0; i < atom_table->ucNumEntries; i++) {
 245		amdgpu_table->entries[i].clk = le16_to_cpu(entry->usClockLow) |
 246			(entry->ucClockHigh << 16);
 247		amdgpu_table->entries[i].v = le16_to_cpu(entry->usVoltage);
 248		entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *)
 249			((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record));
 250	}
 251	amdgpu_table->count = atom_table->ucNumEntries;
 252
 253	return 0;
 254}
 255
 256int amdgpu_get_platform_caps(struct amdgpu_device *adev)
 257{
 258	struct amdgpu_mode_info *mode_info = &adev->mode_info;
 259	union power_info *power_info;
 260	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
 261	u16 data_offset;
 262	u8 frev, crev;
 263
 264	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
 265				   &frev, &crev, &data_offset))
 266		return -EINVAL;
 267	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
 268
 269	adev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
 270	adev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
 271	adev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
 272
 273	return 0;
 274}
 275
 276/* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
 277#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
 278#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
 279#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
 280#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
 281#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
 282#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
 283#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8 24
 284#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V9 26
 285
 286int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
 287{
 288	struct amdgpu_mode_info *mode_info = &adev->mode_info;
 289	union power_info *power_info;
 290	union fan_info *fan_info;
 291	ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table;
 292	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
 293	u16 data_offset;
 294	u8 frev, crev;
 295	int ret, i;
 296
 297	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
 298				   &frev, &crev, &data_offset))
 299		return -EINVAL;
 300	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
 301
 302	/* fan table */
 303	if (le16_to_cpu(power_info->pplib.usTableSize) >=
 304	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
 305		if (power_info->pplib3.usFanTableOffset) {
 306			fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset +
 307						      le16_to_cpu(power_info->pplib3.usFanTableOffset));
 308			adev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst;
 309			adev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin);
 310			adev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed);
 311			adev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh);
 312			adev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin);
 313			adev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed);
 314			adev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh);
 315			if (fan_info->fan.ucFanTableFormat >= 2)
 316				adev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax);
 317			else
 318				adev->pm.dpm.fan.t_max = 10900;
 319			adev->pm.dpm.fan.cycle_delay = 100000;
 320			if (fan_info->fan.ucFanTableFormat >= 3) {
 321				adev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode;
 322				adev->pm.dpm.fan.default_max_fan_pwm =
 323					le16_to_cpu(fan_info->fan3.usFanPWMMax);
 324				adev->pm.dpm.fan.default_fan_output_sensitivity = 4836;
 325				adev->pm.dpm.fan.fan_output_sensitivity =
 326					le16_to_cpu(fan_info->fan3.usFanOutputSensitivity);
 327			}
 328			adev->pm.dpm.fan.ucode_fan_control = true;
 329		}
 330	}
 331
 332	/* clock dependancy tables, shedding tables */
 333	if (le16_to_cpu(power_info->pplib.usTableSize) >=
 334	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) {
 335		if (power_info->pplib4.usVddcDependencyOnSCLKOffset) {
 336			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
 337				(mode_info->atom_context->bios + data_offset +
 338				 le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
 339			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
 340								 dep_table);
 341			if (ret) {
 342				amdgpu_free_extended_power_table(adev);
 343				return ret;
 344			}
 345		}
 346		if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
 347			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
 348				(mode_info->atom_context->bios + data_offset +
 349				 le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
 350			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
 351								 dep_table);
 352			if (ret) {
 353				amdgpu_free_extended_power_table(adev);
 354				return ret;
 355			}
 356		}
 357		if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
 358			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
 359				(mode_info->atom_context->bios + data_offset +
 360				 le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
 361			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
 362								 dep_table);
 363			if (ret) {
 364				amdgpu_free_extended_power_table(adev);
 365				return ret;
 366			}
 367		}
 368		if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
 369			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
 370				(mode_info->atom_context->bios + data_offset +
 371				 le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
 372			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
 373								 dep_table);
 374			if (ret) {
 375				amdgpu_free_extended_power_table(adev);
 376				return ret;
 377			}
 378		}
 379		if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
 380			ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
 381				(ATOM_PPLIB_Clock_Voltage_Limit_Table *)
 382				(mode_info->atom_context->bios + data_offset +
 383				 le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset));
 384			if (clk_v->ucNumEntries) {
 385				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk =
 386					le16_to_cpu(clk_v->entries[0].usSclkLow) |
 387					(clk_v->entries[0].ucSclkHigh << 16);
 388				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk =
 389					le16_to_cpu(clk_v->entries[0].usMclkLow) |
 390					(clk_v->entries[0].ucMclkHigh << 16);
 391				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc =
 392					le16_to_cpu(clk_v->entries[0].usVddc);
 393				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci =
 394					le16_to_cpu(clk_v->entries[0].usVddci);
 395			}
 396		}
 397		if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) {
 398			ATOM_PPLIB_PhaseSheddingLimits_Table *psl =
 399				(ATOM_PPLIB_PhaseSheddingLimits_Table *)
 400				(mode_info->atom_context->bios + data_offset +
 401				 le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
 402			ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
 403
 404			adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
 405				kcalloc(psl->ucNumEntries,
 406					sizeof(struct amdgpu_phase_shedding_limits_entry),
 407					GFP_KERNEL);
 408			if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
 409				amdgpu_free_extended_power_table(adev);
 410				return -ENOMEM;
 411			}
 412
 413			entry = &psl->entries[0];
 414			for (i = 0; i < psl->ucNumEntries; i++) {
 415				adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
 416					le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16);
 417				adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
 418					le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16);
 419				adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
 420					le16_to_cpu(entry->usVoltage);
 421				entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *)
 422					((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record));
 423			}
 424			adev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
 425				psl->ucNumEntries;
 426		}
 427	}
 428
 429	/* cac data */
 430	if (le16_to_cpu(power_info->pplib.usTableSize) >=
 431	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) {
 432		adev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit);
 433		adev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit);
 434		adev->pm.dpm.near_tdp_limit_adjusted = adev->pm.dpm.near_tdp_limit;
 435		adev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit);
 436		if (adev->pm.dpm.tdp_od_limit)
 437			adev->pm.dpm.power_control = true;
 438		else
 439			adev->pm.dpm.power_control = false;
 440		adev->pm.dpm.tdp_adjustment = 0;
 441		adev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold);
 442		adev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage);
 443		adev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope);
 444		if (power_info->pplib5.usCACLeakageTableOffset) {
 445			ATOM_PPLIB_CAC_Leakage_Table *cac_table =
 446				(ATOM_PPLIB_CAC_Leakage_Table *)
 447				(mode_info->atom_context->bios + data_offset +
 448				 le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
 449			ATOM_PPLIB_CAC_Leakage_Record *entry;
 450			u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table);
 451			adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
 452			if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
 453				amdgpu_free_extended_power_table(adev);
 454				return -ENOMEM;
 455			}
 456			entry = &cac_table->entries[0];
 457			for (i = 0; i < cac_table->ucNumEntries; i++) {
 458				if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
 459					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 =
 460						le16_to_cpu(entry->usVddc1);
 461					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 =
 462						le16_to_cpu(entry->usVddc2);
 463					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 =
 464						le16_to_cpu(entry->usVddc3);
 465				} else {
 466					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
 467						le16_to_cpu(entry->usVddc);
 468					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
 469						le32_to_cpu(entry->ulLeakageValue);
 470				}
 471				entry = (ATOM_PPLIB_CAC_Leakage_Record *)
 472					((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record));
 473			}
 474			adev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
 475		}
 476	}
 477
 478	/* ext tables */
 479	if (le16_to_cpu(power_info->pplib.usTableSize) >=
 480	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
 481		ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
 482			(mode_info->atom_context->bios + data_offset +
 483			 le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
 484		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) &&
 485			ext_hdr->usVCETableOffset) {
 486			VCEClockInfoArray *array = (VCEClockInfoArray *)
 487				(mode_info->atom_context->bios + data_offset +
 488				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
 489			ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
 490				(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
 491				(mode_info->atom_context->bios + data_offset +
 492				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
 493				 1 + array->ucNumEntries * sizeof(VCEClockInfo));
 494			ATOM_PPLIB_VCE_State_Table *states =
 495				(ATOM_PPLIB_VCE_State_Table *)
 496				(mode_info->atom_context->bios + data_offset +
 497				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
 498				 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) +
 499				 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)));
 500			ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry;
 501			ATOM_PPLIB_VCE_State_Record *state_entry;
 502			VCEClockInfo *vce_clk;
 503			u32 size = limits->numEntries *
 504				sizeof(struct amdgpu_vce_clock_voltage_dependency_entry);
 505			adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
 506				kzalloc(size, GFP_KERNEL);
 507			if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
 508				amdgpu_free_extended_power_table(adev);
 509				return -ENOMEM;
 510			}
 511			adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
 512				limits->numEntries;
 513			entry = &limits->entries[0];
 514			state_entry = &states->entries[0];
 515			for (i = 0; i < limits->numEntries; i++) {
 516				vce_clk = (VCEClockInfo *)
 517					((u8 *)&array->entries[0] +
 518					 (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
 519				adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk =
 520					le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
 521				adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk =
 522					le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
 523				adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v =
 524					le16_to_cpu(entry->usVoltage);
 525				entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
 526					((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
 527			}
 528			adev->pm.dpm.num_of_vce_states =
 529					states->numEntries > AMD_MAX_VCE_LEVELS ?
 530					AMD_MAX_VCE_LEVELS : states->numEntries;
 531			for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
 532				vce_clk = (VCEClockInfo *)
 533					((u8 *)&array->entries[0] +
 534					 (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
 535				adev->pm.dpm.vce_states[i].evclk =
 536					le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
 537				adev->pm.dpm.vce_states[i].ecclk =
 538					le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
 539				adev->pm.dpm.vce_states[i].clk_idx =
 540					state_entry->ucClockInfoIndex & 0x3f;
 541				adev->pm.dpm.vce_states[i].pstate =
 542					(state_entry->ucClockInfoIndex & 0xc0) >> 6;
 543				state_entry = (ATOM_PPLIB_VCE_State_Record *)
 544					((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record));
 545			}
 546		}
 547		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) &&
 548			ext_hdr->usUVDTableOffset) {
 549			UVDClockInfoArray *array = (UVDClockInfoArray *)
 550				(mode_info->atom_context->bios + data_offset +
 551				 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1);
 552			ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits =
 553				(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *)
 554				(mode_info->atom_context->bios + data_offset +
 555				 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 +
 556				 1 + (array->ucNumEntries * sizeof (UVDClockInfo)));
 557			ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry;
 558			u32 size = limits->numEntries *
 559				sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry);
 560			adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
 561				kzalloc(size, GFP_KERNEL);
 562			if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
 563				amdgpu_free_extended_power_table(adev);
 564				return -ENOMEM;
 565			}
 566			adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
 567				limits->numEntries;
 568			entry = &limits->entries[0];
 569			for (i = 0; i < limits->numEntries; i++) {
 570				UVDClockInfo *uvd_clk = (UVDClockInfo *)
 571					((u8 *)&array->entries[0] +
 572					 (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo)));
 573				adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk =
 574					le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16);
 575				adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
 576					le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
 577				adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
 578					le16_to_cpu(entry->usVoltage);
 579				entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
 580					((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
 581			}
 582		}
 583		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) &&
 584			ext_hdr->usSAMUTableOffset) {
 585			ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits =
 586				(ATOM_PPLIB_SAMClk_Voltage_Limit_Table *)
 587				(mode_info->atom_context->bios + data_offset +
 588				 le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1);
 589			ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry;
 590			u32 size = limits->numEntries *
 591				sizeof(struct amdgpu_clock_voltage_dependency_entry);
 592			adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
 593				kzalloc(size, GFP_KERNEL);
 594			if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
 595				amdgpu_free_extended_power_table(adev);
 596				return -ENOMEM;
 597			}
 598			adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
 599				limits->numEntries;
 600			entry = &limits->entries[0];
 601			for (i = 0; i < limits->numEntries; i++) {
 602				adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk =
 603					le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16);
 604				adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v =
 605					le16_to_cpu(entry->usVoltage);
 606				entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *)
 607					((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record));
 608			}
 609		}
 610		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
 611		    ext_hdr->usPPMTableOffset) {
 612			ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
 613				(mode_info->atom_context->bios + data_offset +
 614				 le16_to_cpu(ext_hdr->usPPMTableOffset));
 615			adev->pm.dpm.dyn_state.ppm_table =
 616				kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL);
 617			if (!adev->pm.dpm.dyn_state.ppm_table) {
 618				amdgpu_free_extended_power_table(adev);
 619				return -ENOMEM;
 620			}
 621			adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
 622			adev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
 623				le16_to_cpu(ppm->usCpuCoreNumber);
 624			adev->pm.dpm.dyn_state.ppm_table->platform_tdp =
 625				le32_to_cpu(ppm->ulPlatformTDP);
 626			adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp =
 627				le32_to_cpu(ppm->ulSmallACPlatformTDP);
 628			adev->pm.dpm.dyn_state.ppm_table->platform_tdc =
 629				le32_to_cpu(ppm->ulPlatformTDC);
 630			adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc =
 631				le32_to_cpu(ppm->ulSmallACPlatformTDC);
 632			adev->pm.dpm.dyn_state.ppm_table->apu_tdp =
 633				le32_to_cpu(ppm->ulApuTDP);
 634			adev->pm.dpm.dyn_state.ppm_table->dgpu_tdp =
 635				le32_to_cpu(ppm->ulDGpuTDP);
 636			adev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power =
 637				le32_to_cpu(ppm->ulDGpuUlvPower);
 638			adev->pm.dpm.dyn_state.ppm_table->tj_max =
 639				le32_to_cpu(ppm->ulTjmax);
 640		}
 641		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) &&
 642			ext_hdr->usACPTableOffset) {
 643			ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits =
 644				(ATOM_PPLIB_ACPClk_Voltage_Limit_Table *)
 645				(mode_info->atom_context->bios + data_offset +
 646				 le16_to_cpu(ext_hdr->usACPTableOffset) + 1);
 647			ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry;
 648			u32 size = limits->numEntries *
 649				sizeof(struct amdgpu_clock_voltage_dependency_entry);
 650			adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
 651				kzalloc(size, GFP_KERNEL);
 652			if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
 653				amdgpu_free_extended_power_table(adev);
 654				return -ENOMEM;
 655			}
 656			adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
 657				limits->numEntries;
 658			entry = &limits->entries[0];
 659			for (i = 0; i < limits->numEntries; i++) {
 660				adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk =
 661					le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16);
 662				adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v =
 663					le16_to_cpu(entry->usVoltage);
 664				entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *)
 665					((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record));
 666			}
 667		}
 668		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) &&
 669			ext_hdr->usPowerTuneTableOffset) {
 670			u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset +
 671					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
 672			ATOM_PowerTune_Table *pt;
 673			adev->pm.dpm.dyn_state.cac_tdp_table =
 674				kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL);
 675			if (!adev->pm.dpm.dyn_state.cac_tdp_table) {
 676				amdgpu_free_extended_power_table(adev);
 677				return -ENOMEM;
 678			}
 679			if (rev > 0) {
 680				ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
 681					(mode_info->atom_context->bios + data_offset +
 682					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
 683				adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
 684					ppt->usMaximumPowerDeliveryLimit;
 685				pt = &ppt->power_tune_table;
 686			} else {
 687				ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
 688					(mode_info->atom_context->bios + data_offset +
 689					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
 690				adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255;
 691				pt = &ppt->power_tune_table;
 692			}
 693			adev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP);
 694			adev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp =
 695				le16_to_cpu(pt->usConfigurableTDP);
 696			adev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC);
 697			adev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit =
 698				le16_to_cpu(pt->usBatteryPowerLimit);
 699			adev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit =
 700				le16_to_cpu(pt->usSmallPowerLimit);
 701			adev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage =
 702				le16_to_cpu(pt->usLowCACLeakage);
 703			adev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage =
 704				le16_to_cpu(pt->usHighCACLeakage);
 705		}
 706		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8) &&
 707				ext_hdr->usSclkVddgfxTableOffset) {
 708			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
 709				(mode_info->atom_context->bios + data_offset +
 710				 le16_to_cpu(ext_hdr->usSclkVddgfxTableOffset));
 711			ret = amdgpu_parse_clk_voltage_dep_table(
 712					&adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk,
 713					dep_table);
 714			if (ret) {
 715				kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries);
 716				return ret;
 717			}
 718		}
 719	}
 720
 721	return 0;
 722}
 723
 724void amdgpu_free_extended_power_table(struct amdgpu_device *adev)
 725{
 726	struct amdgpu_dpm_dynamic_state *dyn_state = &adev->pm.dpm.dyn_state;
 727
 728	kfree(dyn_state->vddc_dependency_on_sclk.entries);
 729	kfree(dyn_state->vddci_dependency_on_mclk.entries);
 730	kfree(dyn_state->vddc_dependency_on_mclk.entries);
 731	kfree(dyn_state->mvdd_dependency_on_mclk.entries);
 732	kfree(dyn_state->cac_leakage_table.entries);
 733	kfree(dyn_state->phase_shedding_limits_table.entries);
 734	kfree(dyn_state->ppm_table);
 735	kfree(dyn_state->cac_tdp_table);
 736	kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
 737	kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
 738	kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
 739	kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
 740	kfree(dyn_state->vddgfx_dependency_on_sclk.entries);
 741}
 742
 743static const char *pp_lib_thermal_controller_names[] = {
 744	"NONE",
 745	"lm63",
 746	"adm1032",
 747	"adm1030",
 748	"max6649",
 749	"lm64",
 750	"f75375",
 751	"RV6xx",
 752	"RV770",
 753	"adt7473",
 754	"NONE",
 755	"External GPIO",
 756	"Evergreen",
 757	"emc2103",
 758	"Sumo",
 759	"Northern Islands",
 760	"Southern Islands",
 761	"lm96163",
 762	"Sea Islands",
 763	"Kaveri/Kabini",
 764};
 765
 766void amdgpu_add_thermal_controller(struct amdgpu_device *adev)
 767{
 768	struct amdgpu_mode_info *mode_info = &adev->mode_info;
 769	ATOM_PPLIB_POWERPLAYTABLE *power_table;
 770	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
 771	ATOM_PPLIB_THERMALCONTROLLER *controller;
 772	struct amdgpu_i2c_bus_rec i2c_bus;
 773	u16 data_offset;
 774	u8 frev, crev;
 775
 776	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
 777				   &frev, &crev, &data_offset))
 778		return;
 779	power_table = (ATOM_PPLIB_POWERPLAYTABLE *)
 780		(mode_info->atom_context->bios + data_offset);
 781	controller = &power_table->sThermalController;
 782
 783	/* add the i2c bus for thermal/fan chip */
 784	if (controller->ucType > 0) {
 785		if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN)
 786			adev->pm.no_fan = true;
 787		adev->pm.fan_pulses_per_revolution =
 788			controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
 789		if (adev->pm.fan_pulses_per_revolution) {
 790			adev->pm.fan_min_rpm = controller->ucFanMinRPM;
 791			adev->pm.fan_max_rpm = controller->ucFanMaxRPM;
 792		}
 793		if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
 794			DRM_INFO("Internal thermal controller %s fan control\n",
 795				 (controller->ucFanParameters &
 796				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
 797			adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
 798		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
 799			DRM_INFO("Internal thermal controller %s fan control\n",
 800				 (controller->ucFanParameters &
 801				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
 802			adev->pm.int_thermal_type = THERMAL_TYPE_RV770;
 803		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
 804			DRM_INFO("Internal thermal controller %s fan control\n",
 805				 (controller->ucFanParameters &
 806				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
 807			adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
 808		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) {
 809			DRM_INFO("Internal thermal controller %s fan control\n",
 810				 (controller->ucFanParameters &
 811				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
 812			adev->pm.int_thermal_type = THERMAL_TYPE_SUMO;
 813		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) {
 814			DRM_INFO("Internal thermal controller %s fan control\n",
 815				 (controller->ucFanParameters &
 816				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
 817			adev->pm.int_thermal_type = THERMAL_TYPE_NI;
 818		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) {
 819			DRM_INFO("Internal thermal controller %s fan control\n",
 820				 (controller->ucFanParameters &
 821				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
 822			adev->pm.int_thermal_type = THERMAL_TYPE_SI;
 823		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) {
 824			DRM_INFO("Internal thermal controller %s fan control\n",
 825				 (controller->ucFanParameters &
 826				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
 827			adev->pm.int_thermal_type = THERMAL_TYPE_CI;
 828		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) {
 829			DRM_INFO("Internal thermal controller %s fan control\n",
 830				 (controller->ucFanParameters &
 831				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
 832			adev->pm.int_thermal_type = THERMAL_TYPE_KV;
 833		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
 834			DRM_INFO("External GPIO thermal controller %s fan control\n",
 835				 (controller->ucFanParameters &
 836				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
 837			adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
 838		} else if (controller->ucType ==
 839			   ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
 840			DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
 841				 (controller->ucFanParameters &
 842				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
 843			adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
 844		} else if (controller->ucType ==
 845			   ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
 846			DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
 847				 (controller->ucFanParameters &
 848				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
 849			adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
 850		} else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
 851			DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
 852				 pp_lib_thermal_controller_names[controller->ucType],
 853				 controller->ucI2cAddress >> 1,
 854				 (controller->ucFanParameters &
 855				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
 856			adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL;
 857			i2c_bus = amdgpu_atombios_lookup_i2c_gpio(adev, controller->ucI2cLine);
 858			adev->pm.i2c_bus = amdgpu_i2c_lookup(adev, &i2c_bus);
 859			if (adev->pm.i2c_bus) {
 860				struct i2c_board_info info = { };
 861				const char *name = pp_lib_thermal_controller_names[controller->ucType];
 862				info.addr = controller->ucI2cAddress >> 1;
 863				strlcpy(info.type, name, sizeof(info.type));
 864				i2c_new_client_device(&adev->pm.i2c_bus->adapter, &info);
 865			}
 866		} else {
 867			DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
 868				 controller->ucType,
 869				 controller->ucI2cAddress >> 1,
 870				 (controller->ucFanParameters &
 871				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
 872		}
 873	}
 874}
 875
 876enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
 877						 u32 sys_mask,
 878						 enum amdgpu_pcie_gen asic_gen,
 879						 enum amdgpu_pcie_gen default_gen)
 880{
 881	switch (asic_gen) {
 882	case AMDGPU_PCIE_GEN1:
 883		return AMDGPU_PCIE_GEN1;
 884	case AMDGPU_PCIE_GEN2:
 885		return AMDGPU_PCIE_GEN2;
 886	case AMDGPU_PCIE_GEN3:
 887		return AMDGPU_PCIE_GEN3;
 888	default:
 889		if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) &&
 890		    (default_gen == AMDGPU_PCIE_GEN3))
 891			return AMDGPU_PCIE_GEN3;
 892		else if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) &&
 893			 (default_gen == AMDGPU_PCIE_GEN2))
 894			return AMDGPU_PCIE_GEN2;
 895		else
 896			return AMDGPU_PCIE_GEN1;
 897	}
 898	return AMDGPU_PCIE_GEN1;
 899}
 900
 901struct amd_vce_state*
 902amdgpu_get_vce_clock_state(void *handle, u32 idx)
 903{
 904	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 905
 906	if (idx < adev->pm.dpm.num_of_vce_states)
 907		return &adev->pm.dpm.vce_states[idx];
 908
 909	return NULL;
 910}
 911
 912int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
 913{
 914	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 
 915
 916	return pp_funcs->get_sclk((adev)->powerplay.pp_handle, (low));
 917}
 918
 919int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
 920{
 921	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 
 922
 923	return pp_funcs->get_mclk((adev)->powerplay.pp_handle, (low));
 924}
 925
 926int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate)
 927{
 928	int ret = 0;
 929	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 
 
 
 
 
 
 
 
 
 930
 931	switch (block_type) {
 932	case AMD_IP_BLOCK_TYPE_UVD:
 933	case AMD_IP_BLOCK_TYPE_VCE:
 934		if (pp_funcs && pp_funcs->set_powergating_by_smu) {
 935			/*
 936			 * TODO: need a better lock mechanism
 937			 *
 938			 * Here adev->pm.mutex lock protection is enforced on
 939			 * UVD and VCE cases only. Since for other cases, there
 940			 * may be already lock protection in amdgpu_pm.c.
 941			 * This is a quick fix for the deadlock issue below.
 942			 *     NFO: task ocltst:2028 blocked for more than 120 seconds.
 943			 *     Tainted: G           OE     5.0.0-37-generic #40~18.04.1-Ubuntu
 944			 *     echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
 945			 *     cltst          D    0  2028   2026 0x00000000
 946			 *     all Trace:
 947			 *     __schedule+0x2c0/0x870
 948			 *     schedule+0x2c/0x70
 949			 *     schedule_preempt_disabled+0xe/0x10
 950			 *     __mutex_lock.isra.9+0x26d/0x4e0
 951			 *     __mutex_lock_slowpath+0x13/0x20
 952			 *     ? __mutex_lock_slowpath+0x13/0x20
 953			 *     mutex_lock+0x2f/0x40
 954			 *     amdgpu_dpm_set_powergating_by_smu+0x64/0xe0 [amdgpu]
 955			 *     gfx_v8_0_enable_gfx_static_mg_power_gating+0x3c/0x70 [amdgpu]
 956			 *     gfx_v8_0_set_powergating_state+0x66/0x260 [amdgpu]
 957			 *     amdgpu_device_ip_set_powergating_state+0x62/0xb0 [amdgpu]
 958			 *     pp_dpm_force_performance_level+0xe7/0x100 [amdgpu]
 959			 *     amdgpu_set_dpm_forced_performance_level+0x129/0x330 [amdgpu]
 960			 */
 961			mutex_lock(&adev->pm.mutex);
 962			ret = (pp_funcs->set_powergating_by_smu(
 963				(adev)->powerplay.pp_handle, block_type, gate));
 964			mutex_unlock(&adev->pm.mutex);
 965		}
 966		break;
 967	case AMD_IP_BLOCK_TYPE_GFX:
 968	case AMD_IP_BLOCK_TYPE_VCN:
 969	case AMD_IP_BLOCK_TYPE_SDMA:
 970	case AMD_IP_BLOCK_TYPE_JPEG:
 971	case AMD_IP_BLOCK_TYPE_GMC:
 972	case AMD_IP_BLOCK_TYPE_ACP:
 973		if (pp_funcs && pp_funcs->set_powergating_by_smu) {
 974			ret = (pp_funcs->set_powergating_by_smu(
 975				(adev)->powerplay.pp_handle, block_type, gate));
 976		}
 977		break;
 978	default:
 979		break;
 980	}
 981
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 982	return ret;
 983}
 984
 985int amdgpu_dpm_baco_enter(struct amdgpu_device *adev)
 986{
 987	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 988	void *pp_handle = adev->powerplay.pp_handle;
 989	int ret = 0;
 990
 991	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
 992		return -ENOENT;
 993
 
 
 994	/* enter BACO state */
 995	ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
 996
 
 
 997	return ret;
 998}
 999
1000int amdgpu_dpm_baco_exit(struct amdgpu_device *adev)
1001{
1002	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1003	void *pp_handle = adev->powerplay.pp_handle;
1004	int ret = 0;
1005
1006	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
1007		return -ENOENT;
1008
 
 
1009	/* exit BACO state */
1010	ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
1011
 
 
1012	return ret;
1013}
1014
1015int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
1016			     enum pp_mp1_state mp1_state)
1017{
1018	int ret = 0;
1019	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1020
1021	if (pp_funcs && pp_funcs->set_mp1_state) {
 
 
1022		ret = pp_funcs->set_mp1_state(
1023				adev->powerplay.pp_handle,
1024				mp1_state);
 
 
1025	}
1026
1027	return ret;
1028}
1029
1030bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
1031{
1032	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1033	void *pp_handle = adev->powerplay.pp_handle;
1034	bool baco_cap;
 
1035
1036	if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
1037		return false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1038
1039	if (pp_funcs->get_asic_baco_capability(pp_handle, &baco_cap))
1040		return false;
1041
1042	return baco_cap;
1043}
1044
1045int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev)
1046{
1047	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1048	void *pp_handle = adev->powerplay.pp_handle;
 
1049
1050	if (!pp_funcs || !pp_funcs->asic_reset_mode_2)
1051		return -ENOENT;
1052
1053	return pp_funcs->asic_reset_mode_2(pp_handle);
 
 
 
 
 
 
1054}
1055
1056int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
1057{
1058	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1059	void *pp_handle = adev->powerplay.pp_handle;
1060	int ret = 0;
1061
1062	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
1063		return -ENOENT;
1064
 
 
1065	/* enter BACO state */
1066	ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
1067	if (ret)
1068		return ret;
1069
1070	/* exit BACO state */
1071	ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
1072	if (ret)
1073		return ret;
1074
1075	return 0;
 
 
1076}
1077
1078bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev)
1079{
1080	struct smu_context *smu = &adev->smu;
 
1081
1082	if (is_support_sw_smu(adev))
1083		return smu_mode1_reset_is_support(smu);
 
 
 
1084
1085	return false;
1086}
1087
1088int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev)
1089{
1090	struct smu_context *smu = &adev->smu;
 
1091
1092	if (is_support_sw_smu(adev))
1093		return smu_mode1_reset(smu);
 
 
 
1094
1095	return -EOPNOTSUPP;
1096}
1097
1098int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
1099				    enum PP_SMC_POWER_PROFILE type,
1100				    bool en)
1101{
1102	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1103	int ret = 0;
1104
1105	if (amdgpu_sriov_vf(adev))
1106		return 0;
1107
1108	if (pp_funcs && pp_funcs->switch_power_profile)
 
1109		ret = pp_funcs->switch_power_profile(
1110			adev->powerplay.pp_handle, type, en);
 
 
1111
1112	return ret;
1113}
1114
1115int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
1116			       uint32_t pstate)
1117{
1118	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1119	int ret = 0;
1120
1121	if (pp_funcs && pp_funcs->set_xgmi_pstate)
 
1122		ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
1123								pstate);
 
 
1124
1125	return ret;
1126}
1127
1128int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
1129			     uint32_t cstate)
1130{
1131	int ret = 0;
1132	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1133	void *pp_handle = adev->powerplay.pp_handle;
1134
1135	if (pp_funcs && pp_funcs->set_df_cstate)
 
1136		ret = pp_funcs->set_df_cstate(pp_handle, cstate);
 
 
1137
1138	return ret;
1139}
1140
1141int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en)
1142{
1143	struct smu_context *smu = &adev->smu;
 
1144
1145	if (is_support_sw_smu(adev))
1146		return smu_allow_xgmi_power_down(smu, en);
 
 
 
1147
1148	return 0;
1149}
1150
1151int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev)
1152{
1153	void *pp_handle = adev->powerplay.pp_handle;
1154	const struct amd_pm_funcs *pp_funcs =
1155			adev->powerplay.pp_funcs;
1156	int ret = 0;
1157
1158	if (pp_funcs && pp_funcs->enable_mgpu_fan_boost)
 
1159		ret = pp_funcs->enable_mgpu_fan_boost(pp_handle);
 
 
1160
1161	return ret;
1162}
1163
1164int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev,
1165				      uint32_t msg_id)
1166{
1167	void *pp_handle = adev->powerplay.pp_handle;
1168	const struct amd_pm_funcs *pp_funcs =
1169			adev->powerplay.pp_funcs;
1170	int ret = 0;
1171
1172	if (pp_funcs && pp_funcs->set_clockgating_by_smu)
 
1173		ret = pp_funcs->set_clockgating_by_smu(pp_handle,
1174						       msg_id);
 
 
1175
1176	return ret;
1177}
1178
1179int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev,
1180				  bool acquire)
1181{
1182	void *pp_handle = adev->powerplay.pp_handle;
1183	const struct amd_pm_funcs *pp_funcs =
1184			adev->powerplay.pp_funcs;
1185	int ret = -EOPNOTSUPP;
1186
1187	if (pp_funcs && pp_funcs->smu_i2c_bus_access)
 
1188		ret = pp_funcs->smu_i2c_bus_access(pp_handle,
1189						   acquire);
 
 
1190
1191	return ret;
1192}
1193
1194void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
1195{
1196	if (adev->pm.dpm_enabled) {
1197		mutex_lock(&adev->pm.mutex);
1198		if (power_supply_is_system_supplied() > 0)
1199			adev->pm.ac_power = true;
1200		else
1201			adev->pm.ac_power = false;
 
1202		if (adev->powerplay.pp_funcs &&
1203		    adev->powerplay.pp_funcs->enable_bapm)
1204			amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
1205		mutex_unlock(&adev->pm.mutex);
1206
1207		if (is_support_sw_smu(adev))
1208			smu_set_ac_dc(&adev->smu);
 
 
1209	}
1210}
1211
1212int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
1213			   void *data, uint32_t *size)
1214{
1215	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1216	int ret = 0;
1217
1218	if (!data || !size)
1219		return -EINVAL;
1220
1221	if (pp_funcs && pp_funcs->read_sensor)
1222		ret = pp_funcs->read_sensor((adev)->powerplay.pp_handle,
1223								    sensor, data, size);
1224	else
1225		ret = -EINVAL;
 
 
 
1226
1227	return ret;
1228}
1229
1230void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
1231{
1232	struct amdgpu_device *adev =
1233		container_of(work, struct amdgpu_device,
1234			     pm.dpm.thermal.work);
1235	/* switch to the thermal state */
1236	enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
1237	int temp, size = sizeof(temp);
1238
1239	if (!adev->pm.dpm_enabled)
1240		return;
1241
1242	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP,
1243				    (void *)&temp, &size)) {
1244		if (temp < adev->pm.dpm.thermal.min_temp)
1245			/* switch back the user state */
1246			dpm_state = adev->pm.dpm.user_state;
1247	} else {
1248		if (adev->pm.dpm.thermal.high_to_low)
1249			/* switch back the user state */
1250			dpm_state = adev->pm.dpm.user_state;
 
1251	}
 
1252	mutex_lock(&adev->pm.mutex);
1253	if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
1254		adev->pm.dpm.thermal_active = true;
1255	else
1256		adev->pm.dpm.thermal_active = false;
1257	adev->pm.dpm.state = dpm_state;
1258	mutex_unlock(&adev->pm.mutex);
1259
1260	amdgpu_pm_compute_clocks(adev);
1261}
1262
1263static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
1264						     enum amd_pm_state_type dpm_state)
1265{
1266	int i;
1267	struct amdgpu_ps *ps;
1268	u32 ui_class;
1269	bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ?
1270		true : false;
1271
1272	/* check if the vblank period is too short to adjust the mclk */
1273	if (single_display && adev->powerplay.pp_funcs->vblank_too_short) {
1274		if (amdgpu_dpm_vblank_too_short(adev))
1275			single_display = false;
1276	}
1277
1278	/* certain older asics have a separare 3D performance state,
1279	 * so try that first if the user selected performance
1280	 */
1281	if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
1282		dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
1283	/* balanced states don't exist at the moment */
1284	if (dpm_state == POWER_STATE_TYPE_BALANCED)
1285		dpm_state = POWER_STATE_TYPE_PERFORMANCE;
1286
1287restart_search:
1288	/* Pick the best power state based on current conditions */
1289	for (i = 0; i < adev->pm.dpm.num_ps; i++) {
1290		ps = &adev->pm.dpm.ps[i];
1291		ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
1292		switch (dpm_state) {
1293		/* user states */
1294		case POWER_STATE_TYPE_BATTERY:
1295			if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
1296				if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
1297					if (single_display)
1298						return ps;
1299				} else
1300					return ps;
1301			}
1302			break;
1303		case POWER_STATE_TYPE_BALANCED:
1304			if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
1305				if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
1306					if (single_display)
1307						return ps;
1308				} else
1309					return ps;
1310			}
1311			break;
1312		case POWER_STATE_TYPE_PERFORMANCE:
1313			if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
1314				if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
1315					if (single_display)
1316						return ps;
1317				} else
1318					return ps;
1319			}
1320			break;
1321		/* internal states */
1322		case POWER_STATE_TYPE_INTERNAL_UVD:
1323			if (adev->pm.dpm.uvd_ps)
1324				return adev->pm.dpm.uvd_ps;
1325			else
1326				break;
1327		case POWER_STATE_TYPE_INTERNAL_UVD_SD:
1328			if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
1329				return ps;
1330			break;
1331		case POWER_STATE_TYPE_INTERNAL_UVD_HD:
1332			if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
1333				return ps;
1334			break;
1335		case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
1336			if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
1337				return ps;
1338			break;
1339		case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
1340			if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
1341				return ps;
1342			break;
1343		case POWER_STATE_TYPE_INTERNAL_BOOT:
1344			return adev->pm.dpm.boot_ps;
1345		case POWER_STATE_TYPE_INTERNAL_THERMAL:
1346			if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
1347				return ps;
1348			break;
1349		case POWER_STATE_TYPE_INTERNAL_ACPI:
1350			if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
1351				return ps;
1352			break;
1353		case POWER_STATE_TYPE_INTERNAL_ULV:
1354			if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
1355				return ps;
1356			break;
1357		case POWER_STATE_TYPE_INTERNAL_3DPERF:
1358			if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
1359				return ps;
1360			break;
1361		default:
1362			break;
1363		}
 
 
 
 
1364	}
1365	/* use a fallback state if we didn't match */
1366	switch (dpm_state) {
1367	case POWER_STATE_TYPE_INTERNAL_UVD_SD:
1368		dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
1369		goto restart_search;
1370	case POWER_STATE_TYPE_INTERNAL_UVD_HD:
1371	case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
1372	case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
1373		if (adev->pm.dpm.uvd_ps) {
1374			return adev->pm.dpm.uvd_ps;
 
 
 
 
 
 
 
1375		} else {
1376			dpm_state = POWER_STATE_TYPE_PERFORMANCE;
1377			goto restart_search;
1378		}
1379	case POWER_STATE_TYPE_INTERNAL_THERMAL:
1380		dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
1381		goto restart_search;
1382	case POWER_STATE_TYPE_INTERNAL_ACPI:
1383		dpm_state = POWER_STATE_TYPE_BATTERY;
1384		goto restart_search;
1385	case POWER_STATE_TYPE_BATTERY:
1386	case POWER_STATE_TYPE_BALANCED:
1387	case POWER_STATE_TYPE_INTERNAL_3DPERF:
1388		dpm_state = POWER_STATE_TYPE_PERFORMANCE;
1389		goto restart_search;
1390	default:
1391		break;
1392	}
1393
1394	return NULL;
 
 
 
1395}
1396
1397static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
1398{
1399	struct amdgpu_ps *ps;
1400	enum amd_pm_state_type dpm_state;
1401	int ret;
1402	bool equal = false;
 
 
 
1403
1404	/* if dpm init failed */
1405	if (!adev->pm.dpm_enabled)
1406		return;
 
1407
1408	if (adev->pm.dpm.user_state != adev->pm.dpm.state) {
1409		/* add other state override checks here */
1410		if ((!adev->pm.dpm.thermal_active) &&
1411		    (!adev->pm.dpm.uvd_active))
1412			adev->pm.dpm.state = adev->pm.dpm.user_state;
1413	}
1414	dpm_state = adev->pm.dpm.state;
1415
1416	ps = amdgpu_dpm_pick_power_state(adev, dpm_state);
1417	if (ps)
1418		adev->pm.dpm.requested_ps = ps;
1419	else
1420		return;
1421
1422	if (amdgpu_dpm == 1 && adev->powerplay.pp_funcs->print_power_state) {
1423		printk("switching from power state:\n");
1424		amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
1425		printk("switching to power state:\n");
1426		amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
1427	}
1428
1429	/* update whether vce is active */
1430	ps->vce_active = adev->pm.dpm.vce_active;
1431	if (adev->powerplay.pp_funcs->display_configuration_changed)
1432		amdgpu_dpm_display_configuration_changed(adev);
 
 
 
1433
1434	ret = amdgpu_dpm_pre_set_power_state(adev);
1435	if (ret)
1436		return;
1437
1438	if (adev->powerplay.pp_funcs->check_state_equal) {
1439		if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal))
1440			equal = false;
 
 
1441	}
1442
1443	if (equal)
1444		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1445
1446	amdgpu_dpm_set_power_state(adev);
1447	amdgpu_dpm_post_set_power_state(adev);
 
 
1448
1449	adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
1450	adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
1451
1452	if (adev->powerplay.pp_funcs->force_performance_level) {
1453		if (adev->pm.dpm.thermal_active) {
1454			enum amd_dpm_forced_level level = adev->pm.dpm.forced_level;
1455			/* force low perf level for thermal */
1456			amdgpu_dpm_force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW);
1457			/* save the user's level */
1458			adev->pm.dpm.forced_level = level;
1459		} else {
1460			/* otherwise, user selected level */
1461			amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level);
1462		}
1463	}
 
 
 
 
 
 
 
 
1464}
1465
1466void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
 
1467{
1468	int i = 0;
 
 
1469
1470	if (!adev->pm.dpm_enabled)
1471		return;
1472
1473	if (adev->mode_info.num_crtc)
1474		amdgpu_display_bandwidth_update(adev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1475
1476	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
1477		struct amdgpu_ring *ring = adev->rings[i];
1478		if (ring && ring->sched.ready)
1479			amdgpu_fence_wait_empty(ring);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1480	}
1481
1482	if (adev->powerplay.pp_funcs->dispatch_tasks) {
1483		if (!amdgpu_device_has_dc_support(adev)) {
1484			mutex_lock(&adev->pm.mutex);
1485			amdgpu_dpm_get_active_displays(adev);
1486			adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
1487			adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
1488			adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
1489			/* we have issues with mclk switching with
1490			 * refresh rates over 120 hz on the non-DC code.
1491			 */
1492			if (adev->pm.pm_display_cfg.vrefresh > 120)
1493				adev->pm.pm_display_cfg.min_vblank_time = 0;
1494			if (adev->powerplay.pp_funcs->display_configuration_change)
1495				adev->powerplay.pp_funcs->display_configuration_change(
1496							adev->powerplay.pp_handle,
1497							&adev->pm.pm_display_cfg);
1498			mutex_unlock(&adev->pm.mutex);
1499		}
1500		amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL);
1501	} else {
1502		mutex_lock(&adev->pm.mutex);
1503		amdgpu_dpm_get_active_displays(adev);
1504		amdgpu_dpm_change_power_state_locked(adev);
1505		mutex_unlock(&adev->pm.mutex);
 
1506	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1507}
1508
1509void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
1510{
 
1511	int ret = 0;
1512
1513	if (adev->family == AMDGPU_FAMILY_SI) {
1514		mutex_lock(&adev->pm.mutex);
1515		if (enable) {
1516			adev->pm.dpm.uvd_active = true;
1517			adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
1518		} else {
1519			adev->pm.dpm.uvd_active = false;
1520		}
1521		mutex_unlock(&adev->pm.mutex);
 
 
 
 
 
 
 
 
 
 
 
 
1522
1523		amdgpu_pm_compute_clocks(adev);
1524	} else {
1525		ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
1526		if (ret)
1527			DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
1528				  enable ? "enable" : "disable", ret);
1529
1530		/* enable/disable Low Memory PState for UVD (4k videos) */
1531		if (adev->asic_type == CHIP_STONEY &&
1532			adev->uvd.decode_image_width >= WIDTH_4K) {
1533			struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
1534
1535			if (hwmgr && hwmgr->hwmgr_func &&
1536			    hwmgr->hwmgr_func->update_nbdpm_pstate)
1537				hwmgr->hwmgr_func->update_nbdpm_pstate(hwmgr,
1538								       !enable,
1539								       true);
1540		}
1541	}
 
 
1542}
1543
1544void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
1545{
 
1546	int ret = 0;
1547
1548	if (adev->family == AMDGPU_FAMILY_SI) {
1549		mutex_lock(&adev->pm.mutex);
1550		if (enable) {
1551			adev->pm.dpm.vce_active = true;
1552			/* XXX select vce level based on ring/task */
1553			adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
1554		} else {
1555			adev->pm.dpm.vce_active = false;
1556		}
1557		mutex_unlock(&adev->pm.mutex);
 
 
 
 
 
 
 
 
 
 
 
1558
1559		amdgpu_pm_compute_clocks(adev);
1560	} else {
1561		ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
1562		if (ret)
1563			DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
1564				  enable ? "enable" : "disable", ret);
1565	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1566}
1567
1568void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
 
 
1569{
1570	int i;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1571
1572	if (adev->powerplay.pp_funcs->print_power_state == NULL)
1573		return;
1574
1575	for (i = 0; i < adev->pm.dpm.num_ps; i++)
1576		amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1577
 
1578}
1579
1580void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1581{
 
1582	int ret = 0;
1583
1584	ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable);
1585	if (ret)
1586		DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
1587			  enable ? "enable" : "disable", ret);
 
 
 
 
 
 
1588}
1589
1590int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
 
1591{
1592	int r;
 
1593
1594	if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->load_firmware) {
1595		r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle);
1596		if (r) {
1597			pr_err("smu firmware loading failed\n");
1598			return r;
1599		}
1600
1601		if (smu_version)
1602			*smu_version = adev->pm.fw_version;
1603	}
 
1604
1605	return 0;
1606}