Linux Audio

Check our new training course

In-person Linux kernel drivers training

Jun 16-20, 2025
Register
Loading...
v6.2
   1/*
   2 * Copyright 2011 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: Alex Deucher
  23 */
  24
  25#include "amdgpu.h"
  26#include "amdgpu_atombios.h"
  27#include "amdgpu_i2c.h"
  28#include "amdgpu_dpm.h"
  29#include "atom.h"
  30#include "amd_pcie.h"
  31#include "amdgpu_display.h"
  32#include "hwmgr.h"
  33#include <linux/power_supply.h>
  34#include "amdgpu_smu.h"
  35
  36#define amdgpu_dpm_enable_bapm(adev, e) \
  37		((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e)))
  38
 
 
  39int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
  40{
  41	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
  42	int ret = 0;
  43
  44	if (!pp_funcs->get_sclk)
  45		return 0;
  46
  47	mutex_lock(&adev->pm.mutex);
  48	ret = pp_funcs->get_sclk((adev)->powerplay.pp_handle,
  49				 low);
  50	mutex_unlock(&adev->pm.mutex);
  51
  52	return ret;
  53}
  54
  55int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
  56{
  57	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
  58	int ret = 0;
  59
  60	if (!pp_funcs->get_mclk)
  61		return 0;
  62
  63	mutex_lock(&adev->pm.mutex);
  64	ret = pp_funcs->get_mclk((adev)->powerplay.pp_handle,
  65				 low);
  66	mutex_unlock(&adev->pm.mutex);
  67
  68	return ret;
  69}
  70
  71int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate)
  72{
  73	int ret = 0;
  74	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
  75	enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON;
  76
  77	if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state) {
  78		dev_dbg(adev->dev, "IP block%d already in the target %s state!",
  79				block_type, gate ? "gate" : "ungate");
  80		return 0;
  81	}
  82
  83	mutex_lock(&adev->pm.mutex);
  84
  85	switch (block_type) {
  86	case AMD_IP_BLOCK_TYPE_UVD:
  87	case AMD_IP_BLOCK_TYPE_VCE:
  88	case AMD_IP_BLOCK_TYPE_GFX:
  89	case AMD_IP_BLOCK_TYPE_VCN:
  90	case AMD_IP_BLOCK_TYPE_SDMA:
  91	case AMD_IP_BLOCK_TYPE_JPEG:
  92	case AMD_IP_BLOCK_TYPE_GMC:
  93	case AMD_IP_BLOCK_TYPE_ACP:
 
  94		if (pp_funcs && pp_funcs->set_powergating_by_smu)
  95			ret = (pp_funcs->set_powergating_by_smu(
  96				(adev)->powerplay.pp_handle, block_type, gate));
  97		break;
  98	default:
  99		break;
 100	}
 101
 102	if (!ret)
 103		atomic_set(&adev->pm.pwr_state[block_type], pwr_state);
 104
 105	mutex_unlock(&adev->pm.mutex);
 106
 107	return ret;
 108}
 109
 110int amdgpu_dpm_set_gfx_power_up_by_imu(struct amdgpu_device *adev)
 111{
 112	struct smu_context *smu = adev->powerplay.pp_handle;
 113	int ret = -EOPNOTSUPP;
 114
 115	mutex_lock(&adev->pm.mutex);
 116	ret = smu_set_gfx_power_up_by_imu(smu);
 117	mutex_unlock(&adev->pm.mutex);
 118
 119	msleep(10);
 120
 121	return ret;
 122}
 123
 124int amdgpu_dpm_baco_enter(struct amdgpu_device *adev)
 125{
 126	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 127	void *pp_handle = adev->powerplay.pp_handle;
 128	int ret = 0;
 129
 130	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
 131		return -ENOENT;
 132
 133	mutex_lock(&adev->pm.mutex);
 134
 135	/* enter BACO state */
 136	ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
 137
 138	mutex_unlock(&adev->pm.mutex);
 139
 140	return ret;
 141}
 142
 143int amdgpu_dpm_baco_exit(struct amdgpu_device *adev)
 144{
 145	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 146	void *pp_handle = adev->powerplay.pp_handle;
 147	int ret = 0;
 148
 149	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
 150		return -ENOENT;
 151
 152	mutex_lock(&adev->pm.mutex);
 153
 154	/* exit BACO state */
 155	ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
 156
 157	mutex_unlock(&adev->pm.mutex);
 158
 159	return ret;
 160}
 161
 162int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
 163			     enum pp_mp1_state mp1_state)
 164{
 165	int ret = 0;
 166	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 167
 168	if (pp_funcs && pp_funcs->set_mp1_state) {
 169		mutex_lock(&adev->pm.mutex);
 170
 171		ret = pp_funcs->set_mp1_state(
 172				adev->powerplay.pp_handle,
 173				mp1_state);
 174
 175		mutex_unlock(&adev->pm.mutex);
 176	}
 177
 178	return ret;
 179}
 180
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 181bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
 182{
 183	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 184	void *pp_handle = adev->powerplay.pp_handle;
 185	bool baco_cap;
 186	int ret = 0;
 187
 188	if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
 189		return false;
 190	/* Don't use baco for reset in S3.
 191	 * This is a workaround for some platforms
 192	 * where entering BACO during suspend
 193	 * seems to cause reboots or hangs.
 194	 * This might be related to the fact that BACO controls
 195	 * power to the whole GPU including devices like audio and USB.
 196	 * Powering down/up everything may adversely affect these other
 197	 * devices.  Needs more investigation.
 198	 */
 199	if (adev->in_s3)
 200		return false;
 201
 202	mutex_lock(&adev->pm.mutex);
 203
 204	ret = pp_funcs->get_asic_baco_capability(pp_handle,
 205						 &baco_cap);
 206
 207	mutex_unlock(&adev->pm.mutex);
 208
 209	return ret ? false : baco_cap;
 210}
 211
 212int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev)
 213{
 214	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 215	void *pp_handle = adev->powerplay.pp_handle;
 216	int ret = 0;
 217
 218	if (!pp_funcs || !pp_funcs->asic_reset_mode_2)
 219		return -ENOENT;
 220
 221	mutex_lock(&adev->pm.mutex);
 222
 223	ret = pp_funcs->asic_reset_mode_2(pp_handle);
 224
 225	mutex_unlock(&adev->pm.mutex);
 226
 227	return ret;
 228}
 229
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 230int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
 231{
 232	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 233	void *pp_handle = adev->powerplay.pp_handle;
 234	int ret = 0;
 235
 236	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
 237		return -ENOENT;
 238
 239	mutex_lock(&adev->pm.mutex);
 240
 241	/* enter BACO state */
 242	ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
 243	if (ret)
 244		goto out;
 245
 246	/* exit BACO state */
 247	ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
 248
 249out:
 250	mutex_unlock(&adev->pm.mutex);
 251	return ret;
 252}
 253
 254bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev)
 255{
 256	struct smu_context *smu = adev->powerplay.pp_handle;
 257	bool support_mode1_reset = false;
 258
 259	if (is_support_sw_smu(adev)) {
 260		mutex_lock(&adev->pm.mutex);
 261		support_mode1_reset = smu_mode1_reset_is_support(smu);
 262		mutex_unlock(&adev->pm.mutex);
 263	}
 264
 265	return support_mode1_reset;
 266}
 267
 268int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev)
 269{
 270	struct smu_context *smu = adev->powerplay.pp_handle;
 271	int ret = -EOPNOTSUPP;
 272
 273	if (is_support_sw_smu(adev)) {
 274		mutex_lock(&adev->pm.mutex);
 275		ret = smu_mode1_reset(smu);
 276		mutex_unlock(&adev->pm.mutex);
 277	}
 278
 279	return ret;
 280}
 281
 282int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
 283				    enum PP_SMC_POWER_PROFILE type,
 284				    bool en)
 285{
 286	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 287	int ret = 0;
 288
 289	if (amdgpu_sriov_vf(adev))
 290		return 0;
 291
 292	if (pp_funcs && pp_funcs->switch_power_profile) {
 293		mutex_lock(&adev->pm.mutex);
 294		ret = pp_funcs->switch_power_profile(
 295			adev->powerplay.pp_handle, type, en);
 296		mutex_unlock(&adev->pm.mutex);
 297	}
 298
 299	return ret;
 300}
 301
 302int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
 303			       uint32_t pstate)
 304{
 305	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 306	int ret = 0;
 307
 308	if (pp_funcs && pp_funcs->set_xgmi_pstate) {
 309		mutex_lock(&adev->pm.mutex);
 310		ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
 311								pstate);
 312		mutex_unlock(&adev->pm.mutex);
 313	}
 314
 315	return ret;
 316}
 317
 318int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
 319			     uint32_t cstate)
 320{
 321	int ret = 0;
 322	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 323	void *pp_handle = adev->powerplay.pp_handle;
 324
 325	if (pp_funcs && pp_funcs->set_df_cstate) {
 326		mutex_lock(&adev->pm.mutex);
 327		ret = pp_funcs->set_df_cstate(pp_handle, cstate);
 328		mutex_unlock(&adev->pm.mutex);
 329	}
 330
 331	return ret;
 332}
 333
 334int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en)
 335{
 336	struct smu_context *smu = adev->powerplay.pp_handle;
 337	int ret = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 338
 339	if (is_support_sw_smu(adev)) {
 340		mutex_lock(&adev->pm.mutex);
 341		ret = smu_allow_xgmi_power_down(smu, en);
 342		mutex_unlock(&adev->pm.mutex);
 343	}
 344
 345	return ret;
 346}
 347
 348int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev)
 349{
 350	void *pp_handle = adev->powerplay.pp_handle;
 351	const struct amd_pm_funcs *pp_funcs =
 352			adev->powerplay.pp_funcs;
 353	int ret = 0;
 354
 355	if (pp_funcs && pp_funcs->enable_mgpu_fan_boost) {
 356		mutex_lock(&adev->pm.mutex);
 357		ret = pp_funcs->enable_mgpu_fan_boost(pp_handle);
 358		mutex_unlock(&adev->pm.mutex);
 359	}
 360
 361	return ret;
 362}
 363
 364int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev,
 365				      uint32_t msg_id)
 366{
 367	void *pp_handle = adev->powerplay.pp_handle;
 368	const struct amd_pm_funcs *pp_funcs =
 369			adev->powerplay.pp_funcs;
 370	int ret = 0;
 371
 372	if (pp_funcs && pp_funcs->set_clockgating_by_smu) {
 373		mutex_lock(&adev->pm.mutex);
 374		ret = pp_funcs->set_clockgating_by_smu(pp_handle,
 375						       msg_id);
 376		mutex_unlock(&adev->pm.mutex);
 377	}
 378
 379	return ret;
 380}
 381
 382int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev,
 383				  bool acquire)
 384{
 385	void *pp_handle = adev->powerplay.pp_handle;
 386	const struct amd_pm_funcs *pp_funcs =
 387			adev->powerplay.pp_funcs;
 388	int ret = -EOPNOTSUPP;
 389
 390	if (pp_funcs && pp_funcs->smu_i2c_bus_access) {
 391		mutex_lock(&adev->pm.mutex);
 392		ret = pp_funcs->smu_i2c_bus_access(pp_handle,
 393						   acquire);
 394		mutex_unlock(&adev->pm.mutex);
 395	}
 396
 397	return ret;
 398}
 399
 400void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
 401{
 402	if (adev->pm.dpm_enabled) {
 403		mutex_lock(&adev->pm.mutex);
 404		if (power_supply_is_system_supplied() > 0)
 405			adev->pm.ac_power = true;
 406		else
 407			adev->pm.ac_power = false;
 408
 409		if (adev->powerplay.pp_funcs &&
 410		    adev->powerplay.pp_funcs->enable_bapm)
 411			amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
 412
 413		if (is_support_sw_smu(adev))
 414			smu_set_ac_dc(adev->powerplay.pp_handle);
 415
 416		mutex_unlock(&adev->pm.mutex);
 417	}
 418}
 419
 420int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
 421			   void *data, uint32_t *size)
 422{
 423	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 424	int ret = -EINVAL;
 425
 426	if (!data || !size)
 427		return -EINVAL;
 428
 429	if (pp_funcs && pp_funcs->read_sensor) {
 430		mutex_lock(&adev->pm.mutex);
 431		ret = pp_funcs->read_sensor(adev->powerplay.pp_handle,
 432					    sensor,
 433					    data,
 434					    size);
 435		mutex_unlock(&adev->pm.mutex);
 436	}
 437
 438	return ret;
 439}
 440
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 441void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev)
 442{
 443	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 444	int i;
 445
 446	if (!adev->pm.dpm_enabled)
 447		return;
 448
 449	if (!pp_funcs->pm_compute_clocks)
 450		return;
 451
 452	if (adev->mode_info.num_crtc)
 453		amdgpu_display_bandwidth_update(adev);
 454
 455	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
 456		struct amdgpu_ring *ring = adev->rings[i];
 457		if (ring && ring->sched.ready)
 458			amdgpu_fence_wait_empty(ring);
 459	}
 460
 461	mutex_lock(&adev->pm.mutex);
 462	pp_funcs->pm_compute_clocks(adev->powerplay.pp_handle);
 463	mutex_unlock(&adev->pm.mutex);
 464}
 465
 466void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
 467{
 468	int ret = 0;
 469
 470	if (adev->family == AMDGPU_FAMILY_SI) {
 471		mutex_lock(&adev->pm.mutex);
 472		if (enable) {
 473			adev->pm.dpm.uvd_active = true;
 474			adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
 475		} else {
 476			adev->pm.dpm.uvd_active = false;
 477		}
 478		mutex_unlock(&adev->pm.mutex);
 479
 480		amdgpu_dpm_compute_clocks(adev);
 481		return;
 482	}
 483
 484	ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
 485	if (ret)
 486		DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
 487			  enable ? "enable" : "disable", ret);
 488}
 489
 490void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
 491{
 492	int ret = 0;
 493
 494	if (adev->family == AMDGPU_FAMILY_SI) {
 495		mutex_lock(&adev->pm.mutex);
 496		if (enable) {
 497			adev->pm.dpm.vce_active = true;
 498			/* XXX select vce level based on ring/task */
 499			adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
 500		} else {
 501			adev->pm.dpm.vce_active = false;
 502		}
 503		mutex_unlock(&adev->pm.mutex);
 504
 505		amdgpu_dpm_compute_clocks(adev);
 506		return;
 507	}
 508
 509	ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
 510	if (ret)
 511		DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
 512			  enable ? "enable" : "disable", ret);
 513}
 514
 515void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
 516{
 517	int ret = 0;
 518
 519	ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable);
 520	if (ret)
 521		DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
 522			  enable ? "enable" : "disable", ret);
 523}
 524
 
 
 
 
 
 
 
 
 
 
 525int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
 526{
 527	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 528	int r = 0;
 529
 530	if (!pp_funcs || !pp_funcs->load_firmware)
 531		return 0;
 532
 533	mutex_lock(&adev->pm.mutex);
 534	r = pp_funcs->load_firmware(adev->powerplay.pp_handle);
 535	if (r) {
 536		pr_err("smu firmware loading failed\n");
 537		goto out;
 538	}
 539
 540	if (smu_version)
 541		*smu_version = adev->pm.fw_version;
 542
 543out:
 544	mutex_unlock(&adev->pm.mutex);
 545	return r;
 546}
 547
 548int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable)
 549{
 550	int ret = 0;
 551
 552	if (is_support_sw_smu(adev)) {
 553		mutex_lock(&adev->pm.mutex);
 554		ret = smu_handle_passthrough_sbr(adev->powerplay.pp_handle,
 555						 enable);
 556		mutex_unlock(&adev->pm.mutex);
 557	}
 558
 559	return ret;
 560}
 561
 562int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size)
 563{
 564	struct smu_context *smu = adev->powerplay.pp_handle;
 565	int ret = 0;
 566
 567	if (!is_support_sw_smu(adev))
 568		return -EOPNOTSUPP;
 569
 570	mutex_lock(&adev->pm.mutex);
 571	ret = smu_send_hbm_bad_pages_num(smu, size);
 572	mutex_unlock(&adev->pm.mutex);
 573
 574	return ret;
 575}
 576
 577int amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device *adev, uint32_t size)
 578{
 579	struct smu_context *smu = adev->powerplay.pp_handle;
 580	int ret = 0;
 581
 582	if (!is_support_sw_smu(adev))
 583		return -EOPNOTSUPP;
 584
 585	mutex_lock(&adev->pm.mutex);
 586	ret = smu_send_hbm_bad_channel_flag(smu, size);
 587	mutex_unlock(&adev->pm.mutex);
 588
 589	return ret;
 590}
 591
 592int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev,
 593				  enum pp_clock_type type,
 594				  uint32_t *min,
 595				  uint32_t *max)
 596{
 597	int ret = 0;
 598
 599	if (type != PP_SCLK)
 600		return -EINVAL;
 601
 602	if (!is_support_sw_smu(adev))
 603		return -EOPNOTSUPP;
 604
 605	mutex_lock(&adev->pm.mutex);
 606	ret = smu_get_dpm_freq_range(adev->powerplay.pp_handle,
 607				     SMU_SCLK,
 608				     min,
 609				     max);
 610	mutex_unlock(&adev->pm.mutex);
 611
 612	return ret;
 613}
 614
 615int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev,
 616				   enum pp_clock_type type,
 617				   uint32_t min,
 618				   uint32_t max)
 619{
 620	struct smu_context *smu = adev->powerplay.pp_handle;
 621	int ret = 0;
 622
 623	if (type != PP_SCLK)
 624		return -EINVAL;
 625
 626	if (!is_support_sw_smu(adev))
 627		return -EOPNOTSUPP;
 628
 629	mutex_lock(&adev->pm.mutex);
 630	ret = smu_set_soft_freq_range(smu,
 631				      SMU_SCLK,
 632				      min,
 633				      max);
 634	mutex_unlock(&adev->pm.mutex);
 635
 636	return ret;
 637}
 638
 639int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev)
 640{
 641	struct smu_context *smu = adev->powerplay.pp_handle;
 642	int ret = 0;
 643
 644	if (!is_support_sw_smu(adev))
 645		return 0;
 646
 647	mutex_lock(&adev->pm.mutex);
 648	ret = smu_write_watermarks_table(smu);
 649	mutex_unlock(&adev->pm.mutex);
 650
 651	return ret;
 652}
 653
 654int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev,
 655			      enum smu_event_type event,
 656			      uint64_t event_arg)
 657{
 658	struct smu_context *smu = adev->powerplay.pp_handle;
 659	int ret = 0;
 660
 661	if (!is_support_sw_smu(adev))
 662		return -EOPNOTSUPP;
 663
 664	mutex_lock(&adev->pm.mutex);
 665	ret = smu_wait_for_event(smu, event, event_arg);
 666	mutex_unlock(&adev->pm.mutex);
 667
 668	return ret;
 669}
 670
 671int amdgpu_dpm_set_residency_gfxoff(struct amdgpu_device *adev, bool value)
 672{
 673	struct smu_context *smu = adev->powerplay.pp_handle;
 674	int ret = 0;
 675
 676	if (!is_support_sw_smu(adev))
 677		return -EOPNOTSUPP;
 678
 679	mutex_lock(&adev->pm.mutex);
 680	ret = smu_set_residency_gfxoff(smu, value);
 681	mutex_unlock(&adev->pm.mutex);
 682
 683	return ret;
 684}
 685
 686int amdgpu_dpm_get_residency_gfxoff(struct amdgpu_device *adev, u32 *value)
 687{
 688	struct smu_context *smu = adev->powerplay.pp_handle;
 689	int ret = 0;
 690
 691	if (!is_support_sw_smu(adev))
 692		return -EOPNOTSUPP;
 693
 694	mutex_lock(&adev->pm.mutex);
 695	ret = smu_get_residency_gfxoff(smu, value);
 696	mutex_unlock(&adev->pm.mutex);
 697
 698	return ret;
 699}
 700
 701int amdgpu_dpm_get_entrycount_gfxoff(struct amdgpu_device *adev, u64 *value)
 702{
 703	struct smu_context *smu = adev->powerplay.pp_handle;
 704	int ret = 0;
 705
 706	if (!is_support_sw_smu(adev))
 707		return -EOPNOTSUPP;
 708
 709	mutex_lock(&adev->pm.mutex);
 710	ret = smu_get_entrycount_gfxoff(smu, value);
 711	mutex_unlock(&adev->pm.mutex);
 712
 713	return ret;
 714}
 715
 716int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value)
 717{
 718	struct smu_context *smu = adev->powerplay.pp_handle;
 719	int ret = 0;
 720
 721	if (!is_support_sw_smu(adev))
 722		return -EOPNOTSUPP;
 723
 724	mutex_lock(&adev->pm.mutex);
 725	ret = smu_get_status_gfxoff(smu, value);
 726	mutex_unlock(&adev->pm.mutex);
 727
 728	return ret;
 729}
 730
 731uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev)
 732{
 733	struct smu_context *smu = adev->powerplay.pp_handle;
 734
 735	if (!is_support_sw_smu(adev))
 736		return 0;
 737
 738	return atomic64_read(&smu->throttle_int_counter);
 739}
 740
 741/* amdgpu_dpm_gfx_state_change - Handle gfx power state change set
 742 * @adev: amdgpu_device pointer
 743 * @state: gfx power state(1 -sGpuChangeState_D0Entry and 2 -sGpuChangeState_D3Entry)
 744 *
 745 */
 746void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev,
 747				 enum gfx_change_state state)
 748{
 749	mutex_lock(&adev->pm.mutex);
 750	if (adev->powerplay.pp_funcs &&
 751	    adev->powerplay.pp_funcs->gfx_state_change_set)
 752		((adev)->powerplay.pp_funcs->gfx_state_change_set(
 753			(adev)->powerplay.pp_handle, state));
 754	mutex_unlock(&adev->pm.mutex);
 755}
 756
 757int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev,
 758			    void *umc_ecc)
 759{
 760	struct smu_context *smu = adev->powerplay.pp_handle;
 761	int ret = 0;
 762
 763	if (!is_support_sw_smu(adev))
 764		return -EOPNOTSUPP;
 765
 766	mutex_lock(&adev->pm.mutex);
 767	ret = smu_get_ecc_info(smu, umc_ecc);
 768	mutex_unlock(&adev->pm.mutex);
 769
 770	return ret;
 771}
 772
 773struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev,
 774						     uint32_t idx)
 775{
 776	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 777	struct amd_vce_state *vstate = NULL;
 778
 779	if (!pp_funcs->get_vce_clock_state)
 780		return NULL;
 781
 782	mutex_lock(&adev->pm.mutex);
 783	vstate = pp_funcs->get_vce_clock_state(adev->powerplay.pp_handle,
 784					       idx);
 785	mutex_unlock(&adev->pm.mutex);
 786
 787	return vstate;
 788}
 789
 790void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev,
 791					enum amd_pm_state_type *state)
 792{
 793	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 794
 795	mutex_lock(&adev->pm.mutex);
 796
 797	if (!pp_funcs->get_current_power_state) {
 798		*state = adev->pm.dpm.user_state;
 799		goto out;
 800	}
 801
 802	*state = pp_funcs->get_current_power_state(adev->powerplay.pp_handle);
 803	if (*state < POWER_STATE_TYPE_DEFAULT ||
 804	    *state > POWER_STATE_TYPE_INTERNAL_3DPERF)
 805		*state = adev->pm.dpm.user_state;
 806
 807out:
 808	mutex_unlock(&adev->pm.mutex);
 809}
 810
 811void amdgpu_dpm_set_power_state(struct amdgpu_device *adev,
 812				enum amd_pm_state_type state)
 813{
 814	mutex_lock(&adev->pm.mutex);
 815	adev->pm.dpm.user_state = state;
 816	mutex_unlock(&adev->pm.mutex);
 817
 818	if (is_support_sw_smu(adev))
 819		return;
 820
 821	if (amdgpu_dpm_dispatch_task(adev,
 822				     AMD_PP_TASK_ENABLE_USER_STATE,
 823				     &state) == -EOPNOTSUPP)
 824		amdgpu_dpm_compute_clocks(adev);
 825}
 826
 827enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev)
 828{
 829	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 830	enum amd_dpm_forced_level level;
 831
 832	if (!pp_funcs)
 833		return AMD_DPM_FORCED_LEVEL_AUTO;
 834
 835	mutex_lock(&adev->pm.mutex);
 836	if (pp_funcs->get_performance_level)
 837		level = pp_funcs->get_performance_level(adev->powerplay.pp_handle);
 838	else
 839		level = adev->pm.dpm.forced_level;
 840	mutex_unlock(&adev->pm.mutex);
 841
 842	return level;
 843}
 844
 845int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
 846				       enum amd_dpm_forced_level level)
 847{
 848	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 849	enum amd_dpm_forced_level current_level;
 850	uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
 851					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
 852					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
 853					AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
 854
 855	if (!pp_funcs || !pp_funcs->force_performance_level)
 856		return 0;
 857
 858	if (adev->pm.dpm.thermal_active)
 859		return -EINVAL;
 860
 861	current_level = amdgpu_dpm_get_performance_level(adev);
 862	if (current_level == level)
 863		return 0;
 864
 865	if (adev->asic_type == CHIP_RAVEN) {
 866		if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) {
 867			if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
 868			    level == AMD_DPM_FORCED_LEVEL_MANUAL)
 869				amdgpu_gfx_off_ctrl(adev, false);
 870			else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL &&
 871				 level != AMD_DPM_FORCED_LEVEL_MANUAL)
 872				amdgpu_gfx_off_ctrl(adev, true);
 873		}
 874	}
 875
 876	if (!(current_level & profile_mode_mask) &&
 877	    (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT))
 878		return -EINVAL;
 879
 880	if (!(current_level & profile_mode_mask) &&
 881	      (level & profile_mode_mask)) {
 882		/* enter UMD Pstate */
 883		amdgpu_device_ip_set_powergating_state(adev,
 884						       AMD_IP_BLOCK_TYPE_GFX,
 885						       AMD_PG_STATE_UNGATE);
 886		amdgpu_device_ip_set_clockgating_state(adev,
 887						       AMD_IP_BLOCK_TYPE_GFX,
 888						       AMD_CG_STATE_UNGATE);
 889	} else if ((current_level & profile_mode_mask) &&
 890		    !(level & profile_mode_mask)) {
 891		/* exit UMD Pstate */
 892		amdgpu_device_ip_set_clockgating_state(adev,
 893						       AMD_IP_BLOCK_TYPE_GFX,
 894						       AMD_CG_STATE_GATE);
 895		amdgpu_device_ip_set_powergating_state(adev,
 896						       AMD_IP_BLOCK_TYPE_GFX,
 897						       AMD_PG_STATE_GATE);
 898	}
 899
 900	mutex_lock(&adev->pm.mutex);
 901
 902	if (pp_funcs->force_performance_level(adev->powerplay.pp_handle,
 903					      level)) {
 904		mutex_unlock(&adev->pm.mutex);
 905		return -EINVAL;
 906	}
 907
 908	adev->pm.dpm.forced_level = level;
 909
 910	mutex_unlock(&adev->pm.mutex);
 911
 912	return 0;
 913}
 914
 915int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev,
 916				 struct pp_states_info *states)
 917{
 918	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 919	int ret = 0;
 920
 921	if (!pp_funcs->get_pp_num_states)
 922		return -EOPNOTSUPP;
 923
 924	mutex_lock(&adev->pm.mutex);
 925	ret = pp_funcs->get_pp_num_states(adev->powerplay.pp_handle,
 926					  states);
 927	mutex_unlock(&adev->pm.mutex);
 928
 929	return ret;
 930}
 931
 932int amdgpu_dpm_dispatch_task(struct amdgpu_device *adev,
 933			      enum amd_pp_task task_id,
 934			      enum amd_pm_state_type *user_state)
 935{
 936	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 937	int ret = 0;
 938
 939	if (!pp_funcs->dispatch_tasks)
 940		return -EOPNOTSUPP;
 941
 942	mutex_lock(&adev->pm.mutex);
 943	ret = pp_funcs->dispatch_tasks(adev->powerplay.pp_handle,
 944				       task_id,
 945				       user_state);
 946	mutex_unlock(&adev->pm.mutex);
 947
 948	return ret;
 949}
 950
 951int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table)
 952{
 953	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 954	int ret = 0;
 955
 956	if (!pp_funcs->get_pp_table)
 957		return 0;
 958
 959	mutex_lock(&adev->pm.mutex);
 960	ret = pp_funcs->get_pp_table(adev->powerplay.pp_handle,
 961				     table);
 962	mutex_unlock(&adev->pm.mutex);
 963
 964	return ret;
 965}
 966
 967int amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device *adev,
 968				      uint32_t type,
 969				      long *input,
 970				      uint32_t size)
 971{
 972	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 973	int ret = 0;
 974
 975	if (!pp_funcs->set_fine_grain_clk_vol)
 976		return 0;
 977
 978	mutex_lock(&adev->pm.mutex);
 979	ret = pp_funcs->set_fine_grain_clk_vol(adev->powerplay.pp_handle,
 980					       type,
 981					       input,
 982					       size);
 983	mutex_unlock(&adev->pm.mutex);
 984
 985	return ret;
 986}
 987
 988int amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device *adev,
 989				  uint32_t type,
 990				  long *input,
 991				  uint32_t size)
 992{
 993	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 994	int ret = 0;
 995
 996	if (!pp_funcs->odn_edit_dpm_table)
 997		return 0;
 998
 999	mutex_lock(&adev->pm.mutex);
1000	ret = pp_funcs->odn_edit_dpm_table(adev->powerplay.pp_handle,
1001					   type,
1002					   input,
1003					   size);
1004	mutex_unlock(&adev->pm.mutex);
1005
1006	return ret;
1007}
1008
1009int amdgpu_dpm_print_clock_levels(struct amdgpu_device *adev,
1010				  enum pp_clock_type type,
1011				  char *buf)
1012{
1013	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1014	int ret = 0;
1015
1016	if (!pp_funcs->print_clock_levels)
1017		return 0;
1018
1019	mutex_lock(&adev->pm.mutex);
1020	ret = pp_funcs->print_clock_levels(adev->powerplay.pp_handle,
1021					   type,
1022					   buf);
1023	mutex_unlock(&adev->pm.mutex);
1024
1025	return ret;
1026}
1027
1028int amdgpu_dpm_emit_clock_levels(struct amdgpu_device *adev,
1029				  enum pp_clock_type type,
1030				  char *buf,
1031				  int *offset)
1032{
1033	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1034	int ret = 0;
1035
1036	if (!pp_funcs->emit_clock_levels)
1037		return -ENOENT;
1038
1039	mutex_lock(&adev->pm.mutex);
1040	ret = pp_funcs->emit_clock_levels(adev->powerplay.pp_handle,
1041					   type,
1042					   buf,
1043					   offset);
1044	mutex_unlock(&adev->pm.mutex);
1045
1046	return ret;
1047}
1048
1049int amdgpu_dpm_set_ppfeature_status(struct amdgpu_device *adev,
1050				    uint64_t ppfeature_masks)
1051{
1052	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1053	int ret = 0;
1054
1055	if (!pp_funcs->set_ppfeature_status)
1056		return 0;
1057
1058	mutex_lock(&adev->pm.mutex);
1059	ret = pp_funcs->set_ppfeature_status(adev->powerplay.pp_handle,
1060					     ppfeature_masks);
1061	mutex_unlock(&adev->pm.mutex);
1062
1063	return ret;
1064}
1065
1066int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf)
1067{
1068	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1069	int ret = 0;
1070
1071	if (!pp_funcs->get_ppfeature_status)
1072		return 0;
1073
1074	mutex_lock(&adev->pm.mutex);
1075	ret = pp_funcs->get_ppfeature_status(adev->powerplay.pp_handle,
1076					     buf);
1077	mutex_unlock(&adev->pm.mutex);
1078
1079	return ret;
1080}
1081
1082int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev,
1083				 enum pp_clock_type type,
1084				 uint32_t mask)
1085{
1086	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1087	int ret = 0;
1088
1089	if (!pp_funcs->force_clock_level)
1090		return 0;
1091
1092	mutex_lock(&adev->pm.mutex);
1093	ret = pp_funcs->force_clock_level(adev->powerplay.pp_handle,
1094					  type,
1095					  mask);
1096	mutex_unlock(&adev->pm.mutex);
1097
1098	return ret;
1099}
1100
1101int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev)
1102{
1103	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1104	int ret = 0;
1105
1106	if (!pp_funcs->get_sclk_od)
1107		return 0;
1108
1109	mutex_lock(&adev->pm.mutex);
1110	ret = pp_funcs->get_sclk_od(adev->powerplay.pp_handle);
1111	mutex_unlock(&adev->pm.mutex);
1112
1113	return ret;
1114}
1115
1116int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
1117{
1118	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1119
1120	if (is_support_sw_smu(adev))
1121		return 0;
1122
1123	mutex_lock(&adev->pm.mutex);
1124	if (pp_funcs->set_sclk_od)
1125		pp_funcs->set_sclk_od(adev->powerplay.pp_handle, value);
1126	mutex_unlock(&adev->pm.mutex);
1127
1128	if (amdgpu_dpm_dispatch_task(adev,
1129				     AMD_PP_TASK_READJUST_POWER_STATE,
1130				     NULL) == -EOPNOTSUPP) {
1131		adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1132		amdgpu_dpm_compute_clocks(adev);
1133	}
1134
1135	return 0;
1136}
1137
1138int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev)
1139{
1140	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1141	int ret = 0;
1142
1143	if (!pp_funcs->get_mclk_od)
1144		return 0;
1145
1146	mutex_lock(&adev->pm.mutex);
1147	ret = pp_funcs->get_mclk_od(adev->powerplay.pp_handle);
1148	mutex_unlock(&adev->pm.mutex);
1149
1150	return ret;
1151}
1152
1153int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
1154{
1155	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1156
1157	if (is_support_sw_smu(adev))
1158		return 0;
1159
1160	mutex_lock(&adev->pm.mutex);
1161	if (pp_funcs->set_mclk_od)
1162		pp_funcs->set_mclk_od(adev->powerplay.pp_handle, value);
1163	mutex_unlock(&adev->pm.mutex);
1164
1165	if (amdgpu_dpm_dispatch_task(adev,
1166				     AMD_PP_TASK_READJUST_POWER_STATE,
1167				     NULL) == -EOPNOTSUPP) {
1168		adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1169		amdgpu_dpm_compute_clocks(adev);
1170	}
1171
1172	return 0;
1173}
1174
1175int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev,
1176				      char *buf)
1177{
1178	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1179	int ret = 0;
1180
1181	if (!pp_funcs->get_power_profile_mode)
1182		return -EOPNOTSUPP;
1183
1184	mutex_lock(&adev->pm.mutex);
1185	ret = pp_funcs->get_power_profile_mode(adev->powerplay.pp_handle,
1186					       buf);
1187	mutex_unlock(&adev->pm.mutex);
1188
1189	return ret;
1190}
1191
1192int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev,
1193				      long *input, uint32_t size)
1194{
1195	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1196	int ret = 0;
1197
1198	if (!pp_funcs->set_power_profile_mode)
1199		return 0;
1200
1201	mutex_lock(&adev->pm.mutex);
1202	ret = pp_funcs->set_power_profile_mode(adev->powerplay.pp_handle,
1203					       input,
1204					       size);
1205	mutex_unlock(&adev->pm.mutex);
1206
1207	return ret;
1208}
1209
1210int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table)
1211{
1212	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1213	int ret = 0;
1214
1215	if (!pp_funcs->get_gpu_metrics)
1216		return 0;
1217
1218	mutex_lock(&adev->pm.mutex);
1219	ret = pp_funcs->get_gpu_metrics(adev->powerplay.pp_handle,
1220					table);
1221	mutex_unlock(&adev->pm.mutex);
1222
1223	return ret;
1224}
1225
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1226int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev,
1227				    uint32_t *fan_mode)
1228{
1229	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1230	int ret = 0;
1231
1232	if (!pp_funcs->get_fan_control_mode)
1233		return -EOPNOTSUPP;
1234
1235	mutex_lock(&adev->pm.mutex);
1236	ret = pp_funcs->get_fan_control_mode(adev->powerplay.pp_handle,
1237					     fan_mode);
1238	mutex_unlock(&adev->pm.mutex);
1239
1240	return ret;
1241}
1242
1243int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev,
1244				 uint32_t speed)
1245{
1246	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1247	int ret = 0;
1248
1249	if (!pp_funcs->set_fan_speed_pwm)
1250		return -EOPNOTSUPP;
1251
1252	mutex_lock(&adev->pm.mutex);
1253	ret = pp_funcs->set_fan_speed_pwm(adev->powerplay.pp_handle,
1254					  speed);
1255	mutex_unlock(&adev->pm.mutex);
1256
1257	return ret;
1258}
1259
1260int amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device *adev,
1261				 uint32_t *speed)
1262{
1263	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1264	int ret = 0;
1265
1266	if (!pp_funcs->get_fan_speed_pwm)
1267		return -EOPNOTSUPP;
1268
1269	mutex_lock(&adev->pm.mutex);
1270	ret = pp_funcs->get_fan_speed_pwm(adev->powerplay.pp_handle,
1271					  speed);
1272	mutex_unlock(&adev->pm.mutex);
1273
1274	return ret;
1275}
1276
1277int amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device *adev,
1278				 uint32_t *speed)
1279{
1280	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1281	int ret = 0;
1282
1283	if (!pp_funcs->get_fan_speed_rpm)
1284		return -EOPNOTSUPP;
1285
1286	mutex_lock(&adev->pm.mutex);
1287	ret = pp_funcs->get_fan_speed_rpm(adev->powerplay.pp_handle,
1288					  speed);
1289	mutex_unlock(&adev->pm.mutex);
1290
1291	return ret;
1292}
1293
1294int amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device *adev,
1295				 uint32_t speed)
1296{
1297	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1298	int ret = 0;
1299
1300	if (!pp_funcs->set_fan_speed_rpm)
1301		return -EOPNOTSUPP;
1302
1303	mutex_lock(&adev->pm.mutex);
1304	ret = pp_funcs->set_fan_speed_rpm(adev->powerplay.pp_handle,
1305					  speed);
1306	mutex_unlock(&adev->pm.mutex);
1307
1308	return ret;
1309}
1310
1311int amdgpu_dpm_set_fan_control_mode(struct amdgpu_device *adev,
1312				    uint32_t mode)
1313{
1314	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1315	int ret = 0;
1316
1317	if (!pp_funcs->set_fan_control_mode)
1318		return -EOPNOTSUPP;
1319
1320	mutex_lock(&adev->pm.mutex);
1321	ret = pp_funcs->set_fan_control_mode(adev->powerplay.pp_handle,
1322					     mode);
1323	mutex_unlock(&adev->pm.mutex);
1324
1325	return ret;
1326}
1327
1328int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev,
1329			       uint32_t *limit,
1330			       enum pp_power_limit_level pp_limit_level,
1331			       enum pp_power_type power_type)
1332{
1333	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1334	int ret = 0;
1335
1336	if (!pp_funcs->get_power_limit)
1337		return -ENODATA;
1338
1339	mutex_lock(&adev->pm.mutex);
1340	ret = pp_funcs->get_power_limit(adev->powerplay.pp_handle,
1341					limit,
1342					pp_limit_level,
1343					power_type);
1344	mutex_unlock(&adev->pm.mutex);
1345
1346	return ret;
1347}
1348
1349int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev,
1350			       uint32_t limit)
1351{
1352	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1353	int ret = 0;
1354
1355	if (!pp_funcs->set_power_limit)
1356		return -EINVAL;
1357
1358	mutex_lock(&adev->pm.mutex);
1359	ret = pp_funcs->set_power_limit(adev->powerplay.pp_handle,
1360					limit);
1361	mutex_unlock(&adev->pm.mutex);
1362
1363	return ret;
1364}
1365
1366int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev)
1367{
1368	bool cclk_dpm_supported = false;
1369
1370	if (!is_support_sw_smu(adev))
1371		return false;
1372
1373	mutex_lock(&adev->pm.mutex);
1374	cclk_dpm_supported = is_support_cclk_dpm(adev);
1375	mutex_unlock(&adev->pm.mutex);
1376
1377	return (int)cclk_dpm_supported;
1378}
1379
1380int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
1381						       struct seq_file *m)
1382{
1383	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1384
1385	if (!pp_funcs->debugfs_print_current_performance_level)
1386		return -EOPNOTSUPP;
1387
1388	mutex_lock(&adev->pm.mutex);
1389	pp_funcs->debugfs_print_current_performance_level(adev->powerplay.pp_handle,
1390							  m);
1391	mutex_unlock(&adev->pm.mutex);
1392
1393	return 0;
1394}
1395
1396int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev,
1397				       void **addr,
1398				       size_t *size)
1399{
1400	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1401	int ret = 0;
1402
1403	if (!pp_funcs->get_smu_prv_buf_details)
1404		return -ENOSYS;
1405
1406	mutex_lock(&adev->pm.mutex);
1407	ret = pp_funcs->get_smu_prv_buf_details(adev->powerplay.pp_handle,
1408						addr,
1409						size);
1410	mutex_unlock(&adev->pm.mutex);
1411
1412	return ret;
1413}
1414
1415int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev)
1416{
1417	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
1418	struct smu_context *smu = adev->powerplay.pp_handle;
 
 
 
 
 
 
 
 
 
 
 
1419
1420	if ((is_support_sw_smu(adev) && smu->od_enabled) ||
1421	    (is_support_sw_smu(adev) && smu->is_apu) ||
1422		(!is_support_sw_smu(adev) && hwmgr->od_enabled))
1423		return true;
1424
1425	return false;
 
1426}
1427
1428int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev,
1429			    const char *buf,
1430			    size_t size)
1431{
1432	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1433	int ret = 0;
1434
1435	if (!pp_funcs->set_pp_table)
1436		return -EOPNOTSUPP;
1437
1438	mutex_lock(&adev->pm.mutex);
1439	ret = pp_funcs->set_pp_table(adev->powerplay.pp_handle,
1440				     buf,
1441				     size);
1442	mutex_unlock(&adev->pm.mutex);
1443
1444	return ret;
1445}
1446
1447int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev)
1448{
1449	struct smu_context *smu = adev->powerplay.pp_handle;
1450
1451	if (!is_support_sw_smu(adev))
1452		return INT_MAX;
1453
1454	return smu->cpu_core_num;
1455}
1456
1457void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev)
1458{
1459	if (!is_support_sw_smu(adev))
1460		return;
1461
1462	amdgpu_smu_stb_debug_fs_init(adev);
1463}
1464
1465int amdgpu_dpm_display_configuration_change(struct amdgpu_device *adev,
1466					    const struct amd_pp_display_configuration *input)
1467{
1468	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1469	int ret = 0;
1470
1471	if (!pp_funcs->display_configuration_change)
1472		return 0;
1473
1474	mutex_lock(&adev->pm.mutex);
1475	ret = pp_funcs->display_configuration_change(adev->powerplay.pp_handle,
1476						     input);
1477	mutex_unlock(&adev->pm.mutex);
1478
1479	return ret;
1480}
1481
1482int amdgpu_dpm_get_clock_by_type(struct amdgpu_device *adev,
1483				 enum amd_pp_clock_type type,
1484				 struct amd_pp_clocks *clocks)
1485{
1486	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1487	int ret = 0;
1488
1489	if (!pp_funcs->get_clock_by_type)
1490		return 0;
1491
1492	mutex_lock(&adev->pm.mutex);
1493	ret = pp_funcs->get_clock_by_type(adev->powerplay.pp_handle,
1494					  type,
1495					  clocks);
1496	mutex_unlock(&adev->pm.mutex);
1497
1498	return ret;
1499}
1500
1501int amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device *adev,
1502						struct amd_pp_simple_clock_info *clocks)
1503{
1504	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1505	int ret = 0;
1506
1507	if (!pp_funcs->get_display_mode_validation_clocks)
1508		return 0;
1509
1510	mutex_lock(&adev->pm.mutex);
1511	ret = pp_funcs->get_display_mode_validation_clocks(adev->powerplay.pp_handle,
1512							   clocks);
1513	mutex_unlock(&adev->pm.mutex);
1514
1515	return ret;
1516}
1517
1518int amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device *adev,
1519					      enum amd_pp_clock_type type,
1520					      struct pp_clock_levels_with_latency *clocks)
1521{
1522	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1523	int ret = 0;
1524
1525	if (!pp_funcs->get_clock_by_type_with_latency)
1526		return 0;
1527
1528	mutex_lock(&adev->pm.mutex);
1529	ret = pp_funcs->get_clock_by_type_with_latency(adev->powerplay.pp_handle,
1530						       type,
1531						       clocks);
1532	mutex_unlock(&adev->pm.mutex);
1533
1534	return ret;
1535}
1536
1537int amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device *adev,
1538					      enum amd_pp_clock_type type,
1539					      struct pp_clock_levels_with_voltage *clocks)
1540{
1541	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1542	int ret = 0;
1543
1544	if (!pp_funcs->get_clock_by_type_with_voltage)
1545		return 0;
1546
1547	mutex_lock(&adev->pm.mutex);
1548	ret = pp_funcs->get_clock_by_type_with_voltage(adev->powerplay.pp_handle,
1549						       type,
1550						       clocks);
1551	mutex_unlock(&adev->pm.mutex);
1552
1553	return ret;
1554}
1555
1556int amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device *adev,
1557					       void *clock_ranges)
1558{
1559	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1560	int ret = 0;
1561
1562	if (!pp_funcs->set_watermarks_for_clocks_ranges)
1563		return -EOPNOTSUPP;
1564
1565	mutex_lock(&adev->pm.mutex);
1566	ret = pp_funcs->set_watermarks_for_clocks_ranges(adev->powerplay.pp_handle,
1567							 clock_ranges);
1568	mutex_unlock(&adev->pm.mutex);
1569
1570	return ret;
1571}
1572
1573int amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device *adev,
1574					     struct pp_display_clock_request *clock)
1575{
1576	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1577	int ret = 0;
1578
1579	if (!pp_funcs->display_clock_voltage_request)
1580		return -EOPNOTSUPP;
1581
1582	mutex_lock(&adev->pm.mutex);
1583	ret = pp_funcs->display_clock_voltage_request(adev->powerplay.pp_handle,
1584						      clock);
1585	mutex_unlock(&adev->pm.mutex);
1586
1587	return ret;
1588}
1589
1590int amdgpu_dpm_get_current_clocks(struct amdgpu_device *adev,
1591				  struct amd_pp_clock_info *clocks)
1592{
1593	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1594	int ret = 0;
1595
1596	if (!pp_funcs->get_current_clocks)
1597		return -EOPNOTSUPP;
1598
1599	mutex_lock(&adev->pm.mutex);
1600	ret = pp_funcs->get_current_clocks(adev->powerplay.pp_handle,
1601					   clocks);
1602	mutex_unlock(&adev->pm.mutex);
1603
1604	return ret;
1605}
1606
1607void amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device *adev)
1608{
1609	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1610
1611	if (!pp_funcs->notify_smu_enable_pwe)
1612		return;
1613
1614	mutex_lock(&adev->pm.mutex);
1615	pp_funcs->notify_smu_enable_pwe(adev->powerplay.pp_handle);
1616	mutex_unlock(&adev->pm.mutex);
1617}
1618
1619int amdgpu_dpm_set_active_display_count(struct amdgpu_device *adev,
1620					uint32_t count)
1621{
1622	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1623	int ret = 0;
1624
1625	if (!pp_funcs->set_active_display_count)
1626		return -EOPNOTSUPP;
1627
1628	mutex_lock(&adev->pm.mutex);
1629	ret = pp_funcs->set_active_display_count(adev->powerplay.pp_handle,
1630						 count);
1631	mutex_unlock(&adev->pm.mutex);
1632
1633	return ret;
1634}
1635
1636int amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device *adev,
1637					  uint32_t clock)
1638{
1639	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1640	int ret = 0;
1641
1642	if (!pp_funcs->set_min_deep_sleep_dcefclk)
1643		return -EOPNOTSUPP;
1644
1645	mutex_lock(&adev->pm.mutex);
1646	ret = pp_funcs->set_min_deep_sleep_dcefclk(adev->powerplay.pp_handle,
1647						   clock);
1648	mutex_unlock(&adev->pm.mutex);
1649
1650	return ret;
1651}
1652
1653void amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device *adev,
1654					     uint32_t clock)
1655{
1656	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1657
1658	if (!pp_funcs->set_hard_min_dcefclk_by_freq)
1659		return;
1660
1661	mutex_lock(&adev->pm.mutex);
1662	pp_funcs->set_hard_min_dcefclk_by_freq(adev->powerplay.pp_handle,
1663					       clock);
1664	mutex_unlock(&adev->pm.mutex);
1665}
1666
1667void amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device *adev,
1668					  uint32_t clock)
1669{
1670	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1671
1672	if (!pp_funcs->set_hard_min_fclk_by_freq)
1673		return;
1674
1675	mutex_lock(&adev->pm.mutex);
1676	pp_funcs->set_hard_min_fclk_by_freq(adev->powerplay.pp_handle,
1677					    clock);
1678	mutex_unlock(&adev->pm.mutex);
1679}
1680
1681int amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device *adev,
1682						   bool disable_memory_clock_switch)
1683{
1684	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1685	int ret = 0;
1686
1687	if (!pp_funcs->display_disable_memory_clock_switch)
1688		return 0;
1689
1690	mutex_lock(&adev->pm.mutex);
1691	ret = pp_funcs->display_disable_memory_clock_switch(adev->powerplay.pp_handle,
1692							    disable_memory_clock_switch);
1693	mutex_unlock(&adev->pm.mutex);
1694
1695	return ret;
1696}
1697
1698int amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device *adev,
1699						struct pp_smu_nv_clock_table *max_clocks)
1700{
1701	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1702	int ret = 0;
1703
1704	if (!pp_funcs->get_max_sustainable_clocks_by_dc)
1705		return -EOPNOTSUPP;
1706
1707	mutex_lock(&adev->pm.mutex);
1708	ret = pp_funcs->get_max_sustainable_clocks_by_dc(adev->powerplay.pp_handle,
1709							 max_clocks);
1710	mutex_unlock(&adev->pm.mutex);
1711
1712	return ret;
1713}
1714
1715enum pp_smu_status amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev,
1716						  unsigned int *clock_values_in_khz,
1717						  unsigned int *num_states)
1718{
1719	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1720	int ret = 0;
1721
1722	if (!pp_funcs->get_uclk_dpm_states)
1723		return -EOPNOTSUPP;
1724
1725	mutex_lock(&adev->pm.mutex);
1726	ret = pp_funcs->get_uclk_dpm_states(adev->powerplay.pp_handle,
1727					    clock_values_in_khz,
1728					    num_states);
1729	mutex_unlock(&adev->pm.mutex);
1730
1731	return ret;
1732}
1733
1734int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev,
1735				   struct dpm_clocks *clock_table)
1736{
1737	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1738	int ret = 0;
1739
1740	if (!pp_funcs->get_dpm_clock_table)
1741		return -EOPNOTSUPP;
1742
1743	mutex_lock(&adev->pm.mutex);
1744	ret = pp_funcs->get_dpm_clock_table(adev->powerplay.pp_handle,
1745					    clock_table);
1746	mutex_unlock(&adev->pm.mutex);
1747
1748	return ret;
1749}
v6.8
   1/*
   2 * Copyright 2011 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: Alex Deucher
  23 */
  24
  25#include "amdgpu.h"
  26#include "amdgpu_atombios.h"
  27#include "amdgpu_i2c.h"
  28#include "amdgpu_dpm.h"
  29#include "atom.h"
  30#include "amd_pcie.h"
  31#include "amdgpu_display.h"
  32#include "hwmgr.h"
  33#include <linux/power_supply.h>
  34#include "amdgpu_smu.h"
  35
  36#define amdgpu_dpm_enable_bapm(adev, e) \
  37		((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e)))
  38
  39#define amdgpu_dpm_is_legacy_dpm(adev) ((adev)->powerplay.pp_handle == (adev))
  40
  41int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
  42{
  43	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
  44	int ret = 0;
  45
  46	if (!pp_funcs->get_sclk)
  47		return 0;
  48
  49	mutex_lock(&adev->pm.mutex);
  50	ret = pp_funcs->get_sclk((adev)->powerplay.pp_handle,
  51				 low);
  52	mutex_unlock(&adev->pm.mutex);
  53
  54	return ret;
  55}
  56
  57int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
  58{
  59	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
  60	int ret = 0;
  61
  62	if (!pp_funcs->get_mclk)
  63		return 0;
  64
  65	mutex_lock(&adev->pm.mutex);
  66	ret = pp_funcs->get_mclk((adev)->powerplay.pp_handle,
  67				 low);
  68	mutex_unlock(&adev->pm.mutex);
  69
  70	return ret;
  71}
  72
  73int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate)
  74{
  75	int ret = 0;
  76	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
  77	enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON;
  78
  79	if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state) {
  80		dev_dbg(adev->dev, "IP block%d already in the target %s state!",
  81				block_type, gate ? "gate" : "ungate");
  82		return 0;
  83	}
  84
  85	mutex_lock(&adev->pm.mutex);
  86
  87	switch (block_type) {
  88	case AMD_IP_BLOCK_TYPE_UVD:
  89	case AMD_IP_BLOCK_TYPE_VCE:
  90	case AMD_IP_BLOCK_TYPE_GFX:
  91	case AMD_IP_BLOCK_TYPE_VCN:
  92	case AMD_IP_BLOCK_TYPE_SDMA:
  93	case AMD_IP_BLOCK_TYPE_JPEG:
  94	case AMD_IP_BLOCK_TYPE_GMC:
  95	case AMD_IP_BLOCK_TYPE_ACP:
  96	case AMD_IP_BLOCK_TYPE_VPE:
  97		if (pp_funcs && pp_funcs->set_powergating_by_smu)
  98			ret = (pp_funcs->set_powergating_by_smu(
  99				(adev)->powerplay.pp_handle, block_type, gate));
 100		break;
 101	default:
 102		break;
 103	}
 104
 105	if (!ret)
 106		atomic_set(&adev->pm.pwr_state[block_type], pwr_state);
 107
 108	mutex_unlock(&adev->pm.mutex);
 109
 110	return ret;
 111}
 112
 113int amdgpu_dpm_set_gfx_power_up_by_imu(struct amdgpu_device *adev)
 114{
 115	struct smu_context *smu = adev->powerplay.pp_handle;
 116	int ret = -EOPNOTSUPP;
 117
 118	mutex_lock(&adev->pm.mutex);
 119	ret = smu_set_gfx_power_up_by_imu(smu);
 120	mutex_unlock(&adev->pm.mutex);
 121
 122	msleep(10);
 123
 124	return ret;
 125}
 126
 127int amdgpu_dpm_baco_enter(struct amdgpu_device *adev)
 128{
 129	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 130	void *pp_handle = adev->powerplay.pp_handle;
 131	int ret = 0;
 132
 133	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
 134		return -ENOENT;
 135
 136	mutex_lock(&adev->pm.mutex);
 137
 138	/* enter BACO state */
 139	ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
 140
 141	mutex_unlock(&adev->pm.mutex);
 142
 143	return ret;
 144}
 145
 146int amdgpu_dpm_baco_exit(struct amdgpu_device *adev)
 147{
 148	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 149	void *pp_handle = adev->powerplay.pp_handle;
 150	int ret = 0;
 151
 152	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
 153		return -ENOENT;
 154
 155	mutex_lock(&adev->pm.mutex);
 156
 157	/* exit BACO state */
 158	ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
 159
 160	mutex_unlock(&adev->pm.mutex);
 161
 162	return ret;
 163}
 164
 165int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
 166			     enum pp_mp1_state mp1_state)
 167{
 168	int ret = 0;
 169	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 170
 171	if (pp_funcs && pp_funcs->set_mp1_state) {
 172		mutex_lock(&adev->pm.mutex);
 173
 174		ret = pp_funcs->set_mp1_state(
 175				adev->powerplay.pp_handle,
 176				mp1_state);
 177
 178		mutex_unlock(&adev->pm.mutex);
 179	}
 180
 181	return ret;
 182}
 183
 184int amdgpu_dpm_notify_rlc_state(struct amdgpu_device *adev, bool en)
 185{
 186	int ret = 0;
 187	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 188
 189	if (pp_funcs && pp_funcs->notify_rlc_state) {
 190		mutex_lock(&adev->pm.mutex);
 191
 192		ret = pp_funcs->notify_rlc_state(
 193				adev->powerplay.pp_handle,
 194				en);
 195
 196		mutex_unlock(&adev->pm.mutex);
 197	}
 198
 199	return ret;
 200}
 201
 202bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
 203{
 204	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 205	void *pp_handle = adev->powerplay.pp_handle;
 206	bool ret;
 
 207
 208	if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
 209		return false;
 210	/* Don't use baco for reset in S3.
 211	 * This is a workaround for some platforms
 212	 * where entering BACO during suspend
 213	 * seems to cause reboots or hangs.
 214	 * This might be related to the fact that BACO controls
 215	 * power to the whole GPU including devices like audio and USB.
 216	 * Powering down/up everything may adversely affect these other
 217	 * devices.  Needs more investigation.
 218	 */
 219	if (adev->in_s3)
 220		return false;
 221
 222	mutex_lock(&adev->pm.mutex);
 223
 224	ret = pp_funcs->get_asic_baco_capability(pp_handle);
 
 225
 226	mutex_unlock(&adev->pm.mutex);
 227
 228	return ret;
 229}
 230
 231int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev)
 232{
 233	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 234	void *pp_handle = adev->powerplay.pp_handle;
 235	int ret = 0;
 236
 237	if (!pp_funcs || !pp_funcs->asic_reset_mode_2)
 238		return -ENOENT;
 239
 240	mutex_lock(&adev->pm.mutex);
 241
 242	ret = pp_funcs->asic_reset_mode_2(pp_handle);
 243
 244	mutex_unlock(&adev->pm.mutex);
 245
 246	return ret;
 247}
 248
 249int amdgpu_dpm_enable_gfx_features(struct amdgpu_device *adev)
 250{
 251	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 252	void *pp_handle = adev->powerplay.pp_handle;
 253	int ret = 0;
 254
 255	if (!pp_funcs || !pp_funcs->asic_reset_enable_gfx_features)
 256		return -ENOENT;
 257
 258	mutex_lock(&adev->pm.mutex);
 259
 260	ret = pp_funcs->asic_reset_enable_gfx_features(pp_handle);
 261
 262	mutex_unlock(&adev->pm.mutex);
 263
 264	return ret;
 265}
 266
 267int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
 268{
 269	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 270	void *pp_handle = adev->powerplay.pp_handle;
 271	int ret = 0;
 272
 273	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
 274		return -ENOENT;
 275
 276	mutex_lock(&adev->pm.mutex);
 277
 278	/* enter BACO state */
 279	ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
 280	if (ret)
 281		goto out;
 282
 283	/* exit BACO state */
 284	ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
 285
 286out:
 287	mutex_unlock(&adev->pm.mutex);
 288	return ret;
 289}
 290
 291bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev)
 292{
 293	struct smu_context *smu = adev->powerplay.pp_handle;
 294	bool support_mode1_reset = false;
 295
 296	if (is_support_sw_smu(adev)) {
 297		mutex_lock(&adev->pm.mutex);
 298		support_mode1_reset = smu_mode1_reset_is_support(smu);
 299		mutex_unlock(&adev->pm.mutex);
 300	}
 301
 302	return support_mode1_reset;
 303}
 304
 305int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev)
 306{
 307	struct smu_context *smu = adev->powerplay.pp_handle;
 308	int ret = -EOPNOTSUPP;
 309
 310	if (is_support_sw_smu(adev)) {
 311		mutex_lock(&adev->pm.mutex);
 312		ret = smu_mode1_reset(smu);
 313		mutex_unlock(&adev->pm.mutex);
 314	}
 315
 316	return ret;
 317}
 318
 319int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
 320				    enum PP_SMC_POWER_PROFILE type,
 321				    bool en)
 322{
 323	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 324	int ret = 0;
 325
 326	if (amdgpu_sriov_vf(adev))
 327		return 0;
 328
 329	if (pp_funcs && pp_funcs->switch_power_profile) {
 330		mutex_lock(&adev->pm.mutex);
 331		ret = pp_funcs->switch_power_profile(
 332			adev->powerplay.pp_handle, type, en);
 333		mutex_unlock(&adev->pm.mutex);
 334	}
 335
 336	return ret;
 337}
 338
 339int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
 340			       uint32_t pstate)
 341{
 342	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 343	int ret = 0;
 344
 345	if (pp_funcs && pp_funcs->set_xgmi_pstate) {
 346		mutex_lock(&adev->pm.mutex);
 347		ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
 348								pstate);
 349		mutex_unlock(&adev->pm.mutex);
 350	}
 351
 352	return ret;
 353}
 354
 355int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
 356			     uint32_t cstate)
 357{
 358	int ret = 0;
 359	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 360	void *pp_handle = adev->powerplay.pp_handle;
 361
 362	if (pp_funcs && pp_funcs->set_df_cstate) {
 363		mutex_lock(&adev->pm.mutex);
 364		ret = pp_funcs->set_df_cstate(pp_handle, cstate);
 365		mutex_unlock(&adev->pm.mutex);
 366	}
 367
 368	return ret;
 369}
 370
 371int amdgpu_dpm_get_xgmi_plpd_mode(struct amdgpu_device *adev, char **mode_desc)
 372{
 373	struct smu_context *smu = adev->powerplay.pp_handle;
 374	int mode = XGMI_PLPD_NONE;
 375
 376	if (is_support_sw_smu(adev)) {
 377		mode = smu->plpd_mode;
 378		if (mode_desc == NULL)
 379			return mode;
 380		switch (smu->plpd_mode) {
 381		case XGMI_PLPD_DISALLOW:
 382			*mode_desc = "disallow";
 383			break;
 384		case XGMI_PLPD_DEFAULT:
 385			*mode_desc = "default";
 386			break;
 387		case XGMI_PLPD_OPTIMIZED:
 388			*mode_desc = "optimized";
 389			break;
 390		case XGMI_PLPD_NONE:
 391		default:
 392			*mode_desc = "none";
 393			break;
 394		}
 395	}
 396
 397	return mode;
 398}
 399
 400int amdgpu_dpm_set_xgmi_plpd_mode(struct amdgpu_device *adev, int mode)
 401{
 402	struct smu_context *smu = adev->powerplay.pp_handle;
 403	int ret = -EOPNOTSUPP;
 404
 405	if (is_support_sw_smu(adev)) {
 406		mutex_lock(&adev->pm.mutex);
 407		ret = smu_set_xgmi_plpd_mode(smu, mode);
 408		mutex_unlock(&adev->pm.mutex);
 409	}
 410
 411	return ret;
 412}
 413
 414int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev)
 415{
 416	void *pp_handle = adev->powerplay.pp_handle;
 417	const struct amd_pm_funcs *pp_funcs =
 418			adev->powerplay.pp_funcs;
 419	int ret = 0;
 420
 421	if (pp_funcs && pp_funcs->enable_mgpu_fan_boost) {
 422		mutex_lock(&adev->pm.mutex);
 423		ret = pp_funcs->enable_mgpu_fan_boost(pp_handle);
 424		mutex_unlock(&adev->pm.mutex);
 425	}
 426
 427	return ret;
 428}
 429
 430int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev,
 431				      uint32_t msg_id)
 432{
 433	void *pp_handle = adev->powerplay.pp_handle;
 434	const struct amd_pm_funcs *pp_funcs =
 435			adev->powerplay.pp_funcs;
 436	int ret = 0;
 437
 438	if (pp_funcs && pp_funcs->set_clockgating_by_smu) {
 439		mutex_lock(&adev->pm.mutex);
 440		ret = pp_funcs->set_clockgating_by_smu(pp_handle,
 441						       msg_id);
 442		mutex_unlock(&adev->pm.mutex);
 443	}
 444
 445	return ret;
 446}
 447
 448int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev,
 449				  bool acquire)
 450{
 451	void *pp_handle = adev->powerplay.pp_handle;
 452	const struct amd_pm_funcs *pp_funcs =
 453			adev->powerplay.pp_funcs;
 454	int ret = -EOPNOTSUPP;
 455
 456	if (pp_funcs && pp_funcs->smu_i2c_bus_access) {
 457		mutex_lock(&adev->pm.mutex);
 458		ret = pp_funcs->smu_i2c_bus_access(pp_handle,
 459						   acquire);
 460		mutex_unlock(&adev->pm.mutex);
 461	}
 462
 463	return ret;
 464}
 465
 466void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
 467{
 468	if (adev->pm.dpm_enabled) {
 469		mutex_lock(&adev->pm.mutex);
 470		if (power_supply_is_system_supplied() > 0)
 471			adev->pm.ac_power = true;
 472		else
 473			adev->pm.ac_power = false;
 474
 475		if (adev->powerplay.pp_funcs &&
 476		    adev->powerplay.pp_funcs->enable_bapm)
 477			amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
 478
 479		if (is_support_sw_smu(adev))
 480			smu_set_ac_dc(adev->powerplay.pp_handle);
 481
 482		mutex_unlock(&adev->pm.mutex);
 483	}
 484}
 485
 486int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
 487			   void *data, uint32_t *size)
 488{
 489	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 490	int ret = -EINVAL;
 491
 492	if (!data || !size)
 493		return -EINVAL;
 494
 495	if (pp_funcs && pp_funcs->read_sensor) {
 496		mutex_lock(&adev->pm.mutex);
 497		ret = pp_funcs->read_sensor(adev->powerplay.pp_handle,
 498					    sensor,
 499					    data,
 500					    size);
 501		mutex_unlock(&adev->pm.mutex);
 502	}
 503
 504	return ret;
 505}
 506
 507int amdgpu_dpm_get_apu_thermal_limit(struct amdgpu_device *adev, uint32_t *limit)
 508{
 509	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 510	int ret = -EOPNOTSUPP;
 511
 512	if (pp_funcs && pp_funcs->get_apu_thermal_limit) {
 513		mutex_lock(&adev->pm.mutex);
 514		ret = pp_funcs->get_apu_thermal_limit(adev->powerplay.pp_handle, limit);
 515		mutex_unlock(&adev->pm.mutex);
 516	}
 517
 518	return ret;
 519}
 520
 521int amdgpu_dpm_set_apu_thermal_limit(struct amdgpu_device *adev, uint32_t limit)
 522{
 523	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 524	int ret = -EOPNOTSUPP;
 525
 526	if (pp_funcs && pp_funcs->set_apu_thermal_limit) {
 527		mutex_lock(&adev->pm.mutex);
 528		ret = pp_funcs->set_apu_thermal_limit(adev->powerplay.pp_handle, limit);
 529		mutex_unlock(&adev->pm.mutex);
 530	}
 531
 532	return ret;
 533}
 534
 535void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev)
 536{
 537	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 538	int i;
 539
 540	if (!adev->pm.dpm_enabled)
 541		return;
 542
 543	if (!pp_funcs->pm_compute_clocks)
 544		return;
 545
 546	if (adev->mode_info.num_crtc)
 547		amdgpu_display_bandwidth_update(adev);
 548
 549	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
 550		struct amdgpu_ring *ring = adev->rings[i];
 551		if (ring && ring->sched.ready)
 552			amdgpu_fence_wait_empty(ring);
 553	}
 554
 555	mutex_lock(&adev->pm.mutex);
 556	pp_funcs->pm_compute_clocks(adev->powerplay.pp_handle);
 557	mutex_unlock(&adev->pm.mutex);
 558}
 559
 560void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
 561{
 562	int ret = 0;
 563
 564	if (adev->family == AMDGPU_FAMILY_SI) {
 565		mutex_lock(&adev->pm.mutex);
 566		if (enable) {
 567			adev->pm.dpm.uvd_active = true;
 568			adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
 569		} else {
 570			adev->pm.dpm.uvd_active = false;
 571		}
 572		mutex_unlock(&adev->pm.mutex);
 573
 574		amdgpu_dpm_compute_clocks(adev);
 575		return;
 576	}
 577
 578	ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
 579	if (ret)
 580		DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
 581			  enable ? "enable" : "disable", ret);
 582}
 583
 584void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
 585{
 586	int ret = 0;
 587
 588	if (adev->family == AMDGPU_FAMILY_SI) {
 589		mutex_lock(&adev->pm.mutex);
 590		if (enable) {
 591			adev->pm.dpm.vce_active = true;
 592			/* XXX select vce level based on ring/task */
 593			adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
 594		} else {
 595			adev->pm.dpm.vce_active = false;
 596		}
 597		mutex_unlock(&adev->pm.mutex);
 598
 599		amdgpu_dpm_compute_clocks(adev);
 600		return;
 601	}
 602
 603	ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
 604	if (ret)
 605		DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
 606			  enable ? "enable" : "disable", ret);
 607}
 608
 609void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
 610{
 611	int ret = 0;
 612
 613	ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable);
 614	if (ret)
 615		DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
 616			  enable ? "enable" : "disable", ret);
 617}
 618
 619void amdgpu_dpm_enable_vpe(struct amdgpu_device *adev, bool enable)
 620{
 621	int ret = 0;
 622
 623	ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VPE, !enable);
 624	if (ret)
 625		DRM_ERROR("Dpm %s vpe failed, ret = %d.\n",
 626			  enable ? "enable" : "disable", ret);
 627}
 628
 629int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
 630{
 631	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 632	int r = 0;
 633
 634	if (!pp_funcs || !pp_funcs->load_firmware)
 635		return 0;
 636
 637	mutex_lock(&adev->pm.mutex);
 638	r = pp_funcs->load_firmware(adev->powerplay.pp_handle);
 639	if (r) {
 640		pr_err("smu firmware loading failed\n");
 641		goto out;
 642	}
 643
 644	if (smu_version)
 645		*smu_version = adev->pm.fw_version;
 646
 647out:
 648	mutex_unlock(&adev->pm.mutex);
 649	return r;
 650}
 651
 652int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable)
 653{
 654	int ret = 0;
 655
 656	if (is_support_sw_smu(adev)) {
 657		mutex_lock(&adev->pm.mutex);
 658		ret = smu_handle_passthrough_sbr(adev->powerplay.pp_handle,
 659						 enable);
 660		mutex_unlock(&adev->pm.mutex);
 661	}
 662
 663	return ret;
 664}
 665
 666int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size)
 667{
 668	struct smu_context *smu = adev->powerplay.pp_handle;
 669	int ret = 0;
 670
 671	if (!is_support_sw_smu(adev))
 672		return -EOPNOTSUPP;
 673
 674	mutex_lock(&adev->pm.mutex);
 675	ret = smu_send_hbm_bad_pages_num(smu, size);
 676	mutex_unlock(&adev->pm.mutex);
 677
 678	return ret;
 679}
 680
 681int amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device *adev, uint32_t size)
 682{
 683	struct smu_context *smu = adev->powerplay.pp_handle;
 684	int ret = 0;
 685
 686	if (!is_support_sw_smu(adev))
 687		return -EOPNOTSUPP;
 688
 689	mutex_lock(&adev->pm.mutex);
 690	ret = smu_send_hbm_bad_channel_flag(smu, size);
 691	mutex_unlock(&adev->pm.mutex);
 692
 693	return ret;
 694}
 695
 696int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev,
 697				  enum pp_clock_type type,
 698				  uint32_t *min,
 699				  uint32_t *max)
 700{
 701	int ret = 0;
 702
 703	if (type != PP_SCLK)
 704		return -EINVAL;
 705
 706	if (!is_support_sw_smu(adev))
 707		return -EOPNOTSUPP;
 708
 709	mutex_lock(&adev->pm.mutex);
 710	ret = smu_get_dpm_freq_range(adev->powerplay.pp_handle,
 711				     SMU_SCLK,
 712				     min,
 713				     max);
 714	mutex_unlock(&adev->pm.mutex);
 715
 716	return ret;
 717}
 718
 719int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev,
 720				   enum pp_clock_type type,
 721				   uint32_t min,
 722				   uint32_t max)
 723{
 724	struct smu_context *smu = adev->powerplay.pp_handle;
 725	int ret = 0;
 726
 727	if (type != PP_SCLK)
 728		return -EINVAL;
 729
 730	if (!is_support_sw_smu(adev))
 731		return -EOPNOTSUPP;
 732
 733	mutex_lock(&adev->pm.mutex);
 734	ret = smu_set_soft_freq_range(smu,
 735				      SMU_SCLK,
 736				      min,
 737				      max);
 738	mutex_unlock(&adev->pm.mutex);
 739
 740	return ret;
 741}
 742
 743int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev)
 744{
 745	struct smu_context *smu = adev->powerplay.pp_handle;
 746	int ret = 0;
 747
 748	if (!is_support_sw_smu(adev))
 749		return 0;
 750
 751	mutex_lock(&adev->pm.mutex);
 752	ret = smu_write_watermarks_table(smu);
 753	mutex_unlock(&adev->pm.mutex);
 754
 755	return ret;
 756}
 757
 758int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev,
 759			      enum smu_event_type event,
 760			      uint64_t event_arg)
 761{
 762	struct smu_context *smu = adev->powerplay.pp_handle;
 763	int ret = 0;
 764
 765	if (!is_support_sw_smu(adev))
 766		return -EOPNOTSUPP;
 767
 768	mutex_lock(&adev->pm.mutex);
 769	ret = smu_wait_for_event(smu, event, event_arg);
 770	mutex_unlock(&adev->pm.mutex);
 771
 772	return ret;
 773}
 774
 775int amdgpu_dpm_set_residency_gfxoff(struct amdgpu_device *adev, bool value)
 776{
 777	struct smu_context *smu = adev->powerplay.pp_handle;
 778	int ret = 0;
 779
 780	if (!is_support_sw_smu(adev))
 781		return -EOPNOTSUPP;
 782
 783	mutex_lock(&adev->pm.mutex);
 784	ret = smu_set_residency_gfxoff(smu, value);
 785	mutex_unlock(&adev->pm.mutex);
 786
 787	return ret;
 788}
 789
 790int amdgpu_dpm_get_residency_gfxoff(struct amdgpu_device *adev, u32 *value)
 791{
 792	struct smu_context *smu = adev->powerplay.pp_handle;
 793	int ret = 0;
 794
 795	if (!is_support_sw_smu(adev))
 796		return -EOPNOTSUPP;
 797
 798	mutex_lock(&adev->pm.mutex);
 799	ret = smu_get_residency_gfxoff(smu, value);
 800	mutex_unlock(&adev->pm.mutex);
 801
 802	return ret;
 803}
 804
 805int amdgpu_dpm_get_entrycount_gfxoff(struct amdgpu_device *adev, u64 *value)
 806{
 807	struct smu_context *smu = adev->powerplay.pp_handle;
 808	int ret = 0;
 809
 810	if (!is_support_sw_smu(adev))
 811		return -EOPNOTSUPP;
 812
 813	mutex_lock(&adev->pm.mutex);
 814	ret = smu_get_entrycount_gfxoff(smu, value);
 815	mutex_unlock(&adev->pm.mutex);
 816
 817	return ret;
 818}
 819
 820int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value)
 821{
 822	struct smu_context *smu = adev->powerplay.pp_handle;
 823	int ret = 0;
 824
 825	if (!is_support_sw_smu(adev))
 826		return -EOPNOTSUPP;
 827
 828	mutex_lock(&adev->pm.mutex);
 829	ret = smu_get_status_gfxoff(smu, value);
 830	mutex_unlock(&adev->pm.mutex);
 831
 832	return ret;
 833}
 834
 835uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev)
 836{
 837	struct smu_context *smu = adev->powerplay.pp_handle;
 838
 839	if (!is_support_sw_smu(adev))
 840		return 0;
 841
 842	return atomic64_read(&smu->throttle_int_counter);
 843}
 844
 845/* amdgpu_dpm_gfx_state_change - Handle gfx power state change set
 846 * @adev: amdgpu_device pointer
 847 * @state: gfx power state(1 -sGpuChangeState_D0Entry and 2 -sGpuChangeState_D3Entry)
 848 *
 849 */
 850void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev,
 851				 enum gfx_change_state state)
 852{
 853	mutex_lock(&adev->pm.mutex);
 854	if (adev->powerplay.pp_funcs &&
 855	    adev->powerplay.pp_funcs->gfx_state_change_set)
 856		((adev)->powerplay.pp_funcs->gfx_state_change_set(
 857			(adev)->powerplay.pp_handle, state));
 858	mutex_unlock(&adev->pm.mutex);
 859}
 860
 861int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev,
 862			    void *umc_ecc)
 863{
 864	struct smu_context *smu = adev->powerplay.pp_handle;
 865	int ret = 0;
 866
 867	if (!is_support_sw_smu(adev))
 868		return -EOPNOTSUPP;
 869
 870	mutex_lock(&adev->pm.mutex);
 871	ret = smu_get_ecc_info(smu, umc_ecc);
 872	mutex_unlock(&adev->pm.mutex);
 873
 874	return ret;
 875}
 876
 877struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev,
 878						     uint32_t idx)
 879{
 880	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 881	struct amd_vce_state *vstate = NULL;
 882
 883	if (!pp_funcs->get_vce_clock_state)
 884		return NULL;
 885
 886	mutex_lock(&adev->pm.mutex);
 887	vstate = pp_funcs->get_vce_clock_state(adev->powerplay.pp_handle,
 888					       idx);
 889	mutex_unlock(&adev->pm.mutex);
 890
 891	return vstate;
 892}
 893
 894void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev,
 895					enum amd_pm_state_type *state)
 896{
 897	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 898
 899	mutex_lock(&adev->pm.mutex);
 900
 901	if (!pp_funcs->get_current_power_state) {
 902		*state = adev->pm.dpm.user_state;
 903		goto out;
 904	}
 905
 906	*state = pp_funcs->get_current_power_state(adev->powerplay.pp_handle);
 907	if (*state < POWER_STATE_TYPE_DEFAULT ||
 908	    *state > POWER_STATE_TYPE_INTERNAL_3DPERF)
 909		*state = adev->pm.dpm.user_state;
 910
 911out:
 912	mutex_unlock(&adev->pm.mutex);
 913}
 914
 915void amdgpu_dpm_set_power_state(struct amdgpu_device *adev,
 916				enum amd_pm_state_type state)
 917{
 918	mutex_lock(&adev->pm.mutex);
 919	adev->pm.dpm.user_state = state;
 920	mutex_unlock(&adev->pm.mutex);
 921
 922	if (is_support_sw_smu(adev))
 923		return;
 924
 925	if (amdgpu_dpm_dispatch_task(adev,
 926				     AMD_PP_TASK_ENABLE_USER_STATE,
 927				     &state) == -EOPNOTSUPP)
 928		amdgpu_dpm_compute_clocks(adev);
 929}
 930
 931enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev)
 932{
 933	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 934	enum amd_dpm_forced_level level;
 935
 936	if (!pp_funcs)
 937		return AMD_DPM_FORCED_LEVEL_AUTO;
 938
 939	mutex_lock(&adev->pm.mutex);
 940	if (pp_funcs->get_performance_level)
 941		level = pp_funcs->get_performance_level(adev->powerplay.pp_handle);
 942	else
 943		level = adev->pm.dpm.forced_level;
 944	mutex_unlock(&adev->pm.mutex);
 945
 946	return level;
 947}
 948
 949int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
 950				       enum amd_dpm_forced_level level)
 951{
 952	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 953	enum amd_dpm_forced_level current_level;
 954	uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
 955					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
 956					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
 957					AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
 958
 959	if (!pp_funcs || !pp_funcs->force_performance_level)
 960		return 0;
 961
 962	if (adev->pm.dpm.thermal_active)
 963		return -EINVAL;
 964
 965	current_level = amdgpu_dpm_get_performance_level(adev);
 966	if (current_level == level)
 967		return 0;
 968
 969	if (adev->asic_type == CHIP_RAVEN) {
 970		if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) {
 971			if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
 972			    level == AMD_DPM_FORCED_LEVEL_MANUAL)
 973				amdgpu_gfx_off_ctrl(adev, false);
 974			else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL &&
 975				 level != AMD_DPM_FORCED_LEVEL_MANUAL)
 976				amdgpu_gfx_off_ctrl(adev, true);
 977		}
 978	}
 979
 980	if (!(current_level & profile_mode_mask) &&
 981	    (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT))
 982		return -EINVAL;
 983
 984	if (!(current_level & profile_mode_mask) &&
 985	      (level & profile_mode_mask)) {
 986		/* enter UMD Pstate */
 987		amdgpu_device_ip_set_powergating_state(adev,
 988						       AMD_IP_BLOCK_TYPE_GFX,
 989						       AMD_PG_STATE_UNGATE);
 990		amdgpu_device_ip_set_clockgating_state(adev,
 991						       AMD_IP_BLOCK_TYPE_GFX,
 992						       AMD_CG_STATE_UNGATE);
 993	} else if ((current_level & profile_mode_mask) &&
 994		    !(level & profile_mode_mask)) {
 995		/* exit UMD Pstate */
 996		amdgpu_device_ip_set_clockgating_state(adev,
 997						       AMD_IP_BLOCK_TYPE_GFX,
 998						       AMD_CG_STATE_GATE);
 999		amdgpu_device_ip_set_powergating_state(adev,
1000						       AMD_IP_BLOCK_TYPE_GFX,
1001						       AMD_PG_STATE_GATE);
1002	}
1003
1004	mutex_lock(&adev->pm.mutex);
1005
1006	if (pp_funcs->force_performance_level(adev->powerplay.pp_handle,
1007					      level)) {
1008		mutex_unlock(&adev->pm.mutex);
1009		return -EINVAL;
1010	}
1011
1012	adev->pm.dpm.forced_level = level;
1013
1014	mutex_unlock(&adev->pm.mutex);
1015
1016	return 0;
1017}
1018
1019int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev,
1020				 struct pp_states_info *states)
1021{
1022	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1023	int ret = 0;
1024
1025	if (!pp_funcs->get_pp_num_states)
1026		return -EOPNOTSUPP;
1027
1028	mutex_lock(&adev->pm.mutex);
1029	ret = pp_funcs->get_pp_num_states(adev->powerplay.pp_handle,
1030					  states);
1031	mutex_unlock(&adev->pm.mutex);
1032
1033	return ret;
1034}
1035
1036int amdgpu_dpm_dispatch_task(struct amdgpu_device *adev,
1037			      enum amd_pp_task task_id,
1038			      enum amd_pm_state_type *user_state)
1039{
1040	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1041	int ret = 0;
1042
1043	if (!pp_funcs->dispatch_tasks)
1044		return -EOPNOTSUPP;
1045
1046	mutex_lock(&adev->pm.mutex);
1047	ret = pp_funcs->dispatch_tasks(adev->powerplay.pp_handle,
1048				       task_id,
1049				       user_state);
1050	mutex_unlock(&adev->pm.mutex);
1051
1052	return ret;
1053}
1054
1055int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table)
1056{
1057	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1058	int ret = 0;
1059
1060	if (!pp_funcs->get_pp_table)
1061		return 0;
1062
1063	mutex_lock(&adev->pm.mutex);
1064	ret = pp_funcs->get_pp_table(adev->powerplay.pp_handle,
1065				     table);
1066	mutex_unlock(&adev->pm.mutex);
1067
1068	return ret;
1069}
1070
1071int amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device *adev,
1072				      uint32_t type,
1073				      long *input,
1074				      uint32_t size)
1075{
1076	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1077	int ret = 0;
1078
1079	if (!pp_funcs->set_fine_grain_clk_vol)
1080		return 0;
1081
1082	mutex_lock(&adev->pm.mutex);
1083	ret = pp_funcs->set_fine_grain_clk_vol(adev->powerplay.pp_handle,
1084					       type,
1085					       input,
1086					       size);
1087	mutex_unlock(&adev->pm.mutex);
1088
1089	return ret;
1090}
1091
1092int amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device *adev,
1093				  uint32_t type,
1094				  long *input,
1095				  uint32_t size)
1096{
1097	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1098	int ret = 0;
1099
1100	if (!pp_funcs->odn_edit_dpm_table)
1101		return 0;
1102
1103	mutex_lock(&adev->pm.mutex);
1104	ret = pp_funcs->odn_edit_dpm_table(adev->powerplay.pp_handle,
1105					   type,
1106					   input,
1107					   size);
1108	mutex_unlock(&adev->pm.mutex);
1109
1110	return ret;
1111}
1112
1113int amdgpu_dpm_print_clock_levels(struct amdgpu_device *adev,
1114				  enum pp_clock_type type,
1115				  char *buf)
1116{
1117	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1118	int ret = 0;
1119
1120	if (!pp_funcs->print_clock_levels)
1121		return 0;
1122
1123	mutex_lock(&adev->pm.mutex);
1124	ret = pp_funcs->print_clock_levels(adev->powerplay.pp_handle,
1125					   type,
1126					   buf);
1127	mutex_unlock(&adev->pm.mutex);
1128
1129	return ret;
1130}
1131
1132int amdgpu_dpm_emit_clock_levels(struct amdgpu_device *adev,
1133				  enum pp_clock_type type,
1134				  char *buf,
1135				  int *offset)
1136{
1137	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1138	int ret = 0;
1139
1140	if (!pp_funcs->emit_clock_levels)
1141		return -ENOENT;
1142
1143	mutex_lock(&adev->pm.mutex);
1144	ret = pp_funcs->emit_clock_levels(adev->powerplay.pp_handle,
1145					   type,
1146					   buf,
1147					   offset);
1148	mutex_unlock(&adev->pm.mutex);
1149
1150	return ret;
1151}
1152
1153int amdgpu_dpm_set_ppfeature_status(struct amdgpu_device *adev,
1154				    uint64_t ppfeature_masks)
1155{
1156	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1157	int ret = 0;
1158
1159	if (!pp_funcs->set_ppfeature_status)
1160		return 0;
1161
1162	mutex_lock(&adev->pm.mutex);
1163	ret = pp_funcs->set_ppfeature_status(adev->powerplay.pp_handle,
1164					     ppfeature_masks);
1165	mutex_unlock(&adev->pm.mutex);
1166
1167	return ret;
1168}
1169
1170int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf)
1171{
1172	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1173	int ret = 0;
1174
1175	if (!pp_funcs->get_ppfeature_status)
1176		return 0;
1177
1178	mutex_lock(&adev->pm.mutex);
1179	ret = pp_funcs->get_ppfeature_status(adev->powerplay.pp_handle,
1180					     buf);
1181	mutex_unlock(&adev->pm.mutex);
1182
1183	return ret;
1184}
1185
1186int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev,
1187				 enum pp_clock_type type,
1188				 uint32_t mask)
1189{
1190	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1191	int ret = 0;
1192
1193	if (!pp_funcs->force_clock_level)
1194		return 0;
1195
1196	mutex_lock(&adev->pm.mutex);
1197	ret = pp_funcs->force_clock_level(adev->powerplay.pp_handle,
1198					  type,
1199					  mask);
1200	mutex_unlock(&adev->pm.mutex);
1201
1202	return ret;
1203}
1204
1205int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev)
1206{
1207	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1208	int ret = 0;
1209
1210	if (!pp_funcs->get_sclk_od)
1211		return -EOPNOTSUPP;
1212
1213	mutex_lock(&adev->pm.mutex);
1214	ret = pp_funcs->get_sclk_od(adev->powerplay.pp_handle);
1215	mutex_unlock(&adev->pm.mutex);
1216
1217	return ret;
1218}
1219
1220int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
1221{
1222	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1223
1224	if (is_support_sw_smu(adev))
1225		return -EOPNOTSUPP;
1226
1227	mutex_lock(&adev->pm.mutex);
1228	if (pp_funcs->set_sclk_od)
1229		pp_funcs->set_sclk_od(adev->powerplay.pp_handle, value);
1230	mutex_unlock(&adev->pm.mutex);
1231
1232	if (amdgpu_dpm_dispatch_task(adev,
1233				     AMD_PP_TASK_READJUST_POWER_STATE,
1234				     NULL) == -EOPNOTSUPP) {
1235		adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1236		amdgpu_dpm_compute_clocks(adev);
1237	}
1238
1239	return 0;
1240}
1241
1242int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev)
1243{
1244	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1245	int ret = 0;
1246
1247	if (!pp_funcs->get_mclk_od)
1248		return -EOPNOTSUPP;
1249
1250	mutex_lock(&adev->pm.mutex);
1251	ret = pp_funcs->get_mclk_od(adev->powerplay.pp_handle);
1252	mutex_unlock(&adev->pm.mutex);
1253
1254	return ret;
1255}
1256
1257int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
1258{
1259	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1260
1261	if (is_support_sw_smu(adev))
1262		return -EOPNOTSUPP;
1263
1264	mutex_lock(&adev->pm.mutex);
1265	if (pp_funcs->set_mclk_od)
1266		pp_funcs->set_mclk_od(adev->powerplay.pp_handle, value);
1267	mutex_unlock(&adev->pm.mutex);
1268
1269	if (amdgpu_dpm_dispatch_task(adev,
1270				     AMD_PP_TASK_READJUST_POWER_STATE,
1271				     NULL) == -EOPNOTSUPP) {
1272		adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1273		amdgpu_dpm_compute_clocks(adev);
1274	}
1275
1276	return 0;
1277}
1278
1279int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev,
1280				      char *buf)
1281{
1282	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1283	int ret = 0;
1284
1285	if (!pp_funcs->get_power_profile_mode)
1286		return -EOPNOTSUPP;
1287
1288	mutex_lock(&adev->pm.mutex);
1289	ret = pp_funcs->get_power_profile_mode(adev->powerplay.pp_handle,
1290					       buf);
1291	mutex_unlock(&adev->pm.mutex);
1292
1293	return ret;
1294}
1295
1296int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev,
1297				      long *input, uint32_t size)
1298{
1299	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1300	int ret = 0;
1301
1302	if (!pp_funcs->set_power_profile_mode)
1303		return 0;
1304
1305	mutex_lock(&adev->pm.mutex);
1306	ret = pp_funcs->set_power_profile_mode(adev->powerplay.pp_handle,
1307					       input,
1308					       size);
1309	mutex_unlock(&adev->pm.mutex);
1310
1311	return ret;
1312}
1313
1314int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table)
1315{
1316	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1317	int ret = 0;
1318
1319	if (!pp_funcs->get_gpu_metrics)
1320		return 0;
1321
1322	mutex_lock(&adev->pm.mutex);
1323	ret = pp_funcs->get_gpu_metrics(adev->powerplay.pp_handle,
1324					table);
1325	mutex_unlock(&adev->pm.mutex);
1326
1327	return ret;
1328}
1329
1330ssize_t amdgpu_dpm_get_pm_metrics(struct amdgpu_device *adev, void *pm_metrics,
1331				  size_t size)
1332{
1333	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1334	int ret = 0;
1335
1336	if (!pp_funcs->get_pm_metrics)
1337		return -EOPNOTSUPP;
1338
1339	mutex_lock(&adev->pm.mutex);
1340	ret = pp_funcs->get_pm_metrics(adev->powerplay.pp_handle, pm_metrics,
1341				       size);
1342	mutex_unlock(&adev->pm.mutex);
1343
1344	return ret;
1345}
1346
1347int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev,
1348				    uint32_t *fan_mode)
1349{
1350	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1351	int ret = 0;
1352
1353	if (!pp_funcs->get_fan_control_mode)
1354		return -EOPNOTSUPP;
1355
1356	mutex_lock(&adev->pm.mutex);
1357	ret = pp_funcs->get_fan_control_mode(adev->powerplay.pp_handle,
1358					     fan_mode);
1359	mutex_unlock(&adev->pm.mutex);
1360
1361	return ret;
1362}
1363
1364int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev,
1365				 uint32_t speed)
1366{
1367	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1368	int ret = 0;
1369
1370	if (!pp_funcs->set_fan_speed_pwm)
1371		return -EOPNOTSUPP;
1372
1373	mutex_lock(&adev->pm.mutex);
1374	ret = pp_funcs->set_fan_speed_pwm(adev->powerplay.pp_handle,
1375					  speed);
1376	mutex_unlock(&adev->pm.mutex);
1377
1378	return ret;
1379}
1380
1381int amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device *adev,
1382				 uint32_t *speed)
1383{
1384	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1385	int ret = 0;
1386
1387	if (!pp_funcs->get_fan_speed_pwm)
1388		return -EOPNOTSUPP;
1389
1390	mutex_lock(&adev->pm.mutex);
1391	ret = pp_funcs->get_fan_speed_pwm(adev->powerplay.pp_handle,
1392					  speed);
1393	mutex_unlock(&adev->pm.mutex);
1394
1395	return ret;
1396}
1397
1398int amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device *adev,
1399				 uint32_t *speed)
1400{
1401	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1402	int ret = 0;
1403
1404	if (!pp_funcs->get_fan_speed_rpm)
1405		return -EOPNOTSUPP;
1406
1407	mutex_lock(&adev->pm.mutex);
1408	ret = pp_funcs->get_fan_speed_rpm(adev->powerplay.pp_handle,
1409					  speed);
1410	mutex_unlock(&adev->pm.mutex);
1411
1412	return ret;
1413}
1414
1415int amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device *adev,
1416				 uint32_t speed)
1417{
1418	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1419	int ret = 0;
1420
1421	if (!pp_funcs->set_fan_speed_rpm)
1422		return -EOPNOTSUPP;
1423
1424	mutex_lock(&adev->pm.mutex);
1425	ret = pp_funcs->set_fan_speed_rpm(adev->powerplay.pp_handle,
1426					  speed);
1427	mutex_unlock(&adev->pm.mutex);
1428
1429	return ret;
1430}
1431
1432int amdgpu_dpm_set_fan_control_mode(struct amdgpu_device *adev,
1433				    uint32_t mode)
1434{
1435	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1436	int ret = 0;
1437
1438	if (!pp_funcs->set_fan_control_mode)
1439		return -EOPNOTSUPP;
1440
1441	mutex_lock(&adev->pm.mutex);
1442	ret = pp_funcs->set_fan_control_mode(adev->powerplay.pp_handle,
1443					     mode);
1444	mutex_unlock(&adev->pm.mutex);
1445
1446	return ret;
1447}
1448
1449int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev,
1450			       uint32_t *limit,
1451			       enum pp_power_limit_level pp_limit_level,
1452			       enum pp_power_type power_type)
1453{
1454	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1455	int ret = 0;
1456
1457	if (!pp_funcs->get_power_limit)
1458		return -ENODATA;
1459
1460	mutex_lock(&adev->pm.mutex);
1461	ret = pp_funcs->get_power_limit(adev->powerplay.pp_handle,
1462					limit,
1463					pp_limit_level,
1464					power_type);
1465	mutex_unlock(&adev->pm.mutex);
1466
1467	return ret;
1468}
1469
1470int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev,
1471			       uint32_t limit)
1472{
1473	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1474	int ret = 0;
1475
1476	if (!pp_funcs->set_power_limit)
1477		return -EINVAL;
1478
1479	mutex_lock(&adev->pm.mutex);
1480	ret = pp_funcs->set_power_limit(adev->powerplay.pp_handle,
1481					limit);
1482	mutex_unlock(&adev->pm.mutex);
1483
1484	return ret;
1485}
1486
1487int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev)
1488{
1489	bool cclk_dpm_supported = false;
1490
1491	if (!is_support_sw_smu(adev))
1492		return false;
1493
1494	mutex_lock(&adev->pm.mutex);
1495	cclk_dpm_supported = is_support_cclk_dpm(adev);
1496	mutex_unlock(&adev->pm.mutex);
1497
1498	return (int)cclk_dpm_supported;
1499}
1500
1501int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
1502						       struct seq_file *m)
1503{
1504	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1505
1506	if (!pp_funcs->debugfs_print_current_performance_level)
1507		return -EOPNOTSUPP;
1508
1509	mutex_lock(&adev->pm.mutex);
1510	pp_funcs->debugfs_print_current_performance_level(adev->powerplay.pp_handle,
1511							  m);
1512	mutex_unlock(&adev->pm.mutex);
1513
1514	return 0;
1515}
1516
1517int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev,
1518				       void **addr,
1519				       size_t *size)
1520{
1521	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1522	int ret = 0;
1523
1524	if (!pp_funcs->get_smu_prv_buf_details)
1525		return -ENOSYS;
1526
1527	mutex_lock(&adev->pm.mutex);
1528	ret = pp_funcs->get_smu_prv_buf_details(adev->powerplay.pp_handle,
1529						addr,
1530						size);
1531	mutex_unlock(&adev->pm.mutex);
1532
1533	return ret;
1534}
1535
1536int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev)
1537{
1538	if (is_support_sw_smu(adev)) {
1539		struct smu_context *smu = adev->powerplay.pp_handle;
1540
1541		return (smu->od_enabled || smu->is_apu);
1542	} else {
1543		struct pp_hwmgr *hwmgr;
1544
1545		/*
1546		 * dpm on some legacy asics don't carry od_enabled member
1547		 * as its pp_handle is casted directly from adev.
1548		 */
1549		if (amdgpu_dpm_is_legacy_dpm(adev))
1550			return false;
1551
1552		hwmgr = (struct pp_hwmgr *)adev->powerplay.pp_handle;
 
 
 
1553
1554		return hwmgr->od_enabled;
1555	}
1556}
1557
1558int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev,
1559			    const char *buf,
1560			    size_t size)
1561{
1562	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1563	int ret = 0;
1564
1565	if (!pp_funcs->set_pp_table)
1566		return -EOPNOTSUPP;
1567
1568	mutex_lock(&adev->pm.mutex);
1569	ret = pp_funcs->set_pp_table(adev->powerplay.pp_handle,
1570				     buf,
1571				     size);
1572	mutex_unlock(&adev->pm.mutex);
1573
1574	return ret;
1575}
1576
1577int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev)
1578{
1579	struct smu_context *smu = adev->powerplay.pp_handle;
1580
1581	if (!is_support_sw_smu(adev))
1582		return INT_MAX;
1583
1584	return smu->cpu_core_num;
1585}
1586
1587void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev)
1588{
1589	if (!is_support_sw_smu(adev))
1590		return;
1591
1592	amdgpu_smu_stb_debug_fs_init(adev);
1593}
1594
1595int amdgpu_dpm_display_configuration_change(struct amdgpu_device *adev,
1596					    const struct amd_pp_display_configuration *input)
1597{
1598	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1599	int ret = 0;
1600
1601	if (!pp_funcs->display_configuration_change)
1602		return 0;
1603
1604	mutex_lock(&adev->pm.mutex);
1605	ret = pp_funcs->display_configuration_change(adev->powerplay.pp_handle,
1606						     input);
1607	mutex_unlock(&adev->pm.mutex);
1608
1609	return ret;
1610}
1611
1612int amdgpu_dpm_get_clock_by_type(struct amdgpu_device *adev,
1613				 enum amd_pp_clock_type type,
1614				 struct amd_pp_clocks *clocks)
1615{
1616	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1617	int ret = 0;
1618
1619	if (!pp_funcs->get_clock_by_type)
1620		return 0;
1621
1622	mutex_lock(&adev->pm.mutex);
1623	ret = pp_funcs->get_clock_by_type(adev->powerplay.pp_handle,
1624					  type,
1625					  clocks);
1626	mutex_unlock(&adev->pm.mutex);
1627
1628	return ret;
1629}
1630
1631int amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device *adev,
1632						struct amd_pp_simple_clock_info *clocks)
1633{
1634	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1635	int ret = 0;
1636
1637	if (!pp_funcs->get_display_mode_validation_clocks)
1638		return 0;
1639
1640	mutex_lock(&adev->pm.mutex);
1641	ret = pp_funcs->get_display_mode_validation_clocks(adev->powerplay.pp_handle,
1642							   clocks);
1643	mutex_unlock(&adev->pm.mutex);
1644
1645	return ret;
1646}
1647
1648int amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device *adev,
1649					      enum amd_pp_clock_type type,
1650					      struct pp_clock_levels_with_latency *clocks)
1651{
1652	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1653	int ret = 0;
1654
1655	if (!pp_funcs->get_clock_by_type_with_latency)
1656		return 0;
1657
1658	mutex_lock(&adev->pm.mutex);
1659	ret = pp_funcs->get_clock_by_type_with_latency(adev->powerplay.pp_handle,
1660						       type,
1661						       clocks);
1662	mutex_unlock(&adev->pm.mutex);
1663
1664	return ret;
1665}
1666
1667int amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device *adev,
1668					      enum amd_pp_clock_type type,
1669					      struct pp_clock_levels_with_voltage *clocks)
1670{
1671	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1672	int ret = 0;
1673
1674	if (!pp_funcs->get_clock_by_type_with_voltage)
1675		return 0;
1676
1677	mutex_lock(&adev->pm.mutex);
1678	ret = pp_funcs->get_clock_by_type_with_voltage(adev->powerplay.pp_handle,
1679						       type,
1680						       clocks);
1681	mutex_unlock(&adev->pm.mutex);
1682
1683	return ret;
1684}
1685
1686int amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device *adev,
1687					       void *clock_ranges)
1688{
1689	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1690	int ret = 0;
1691
1692	if (!pp_funcs->set_watermarks_for_clocks_ranges)
1693		return -EOPNOTSUPP;
1694
1695	mutex_lock(&adev->pm.mutex);
1696	ret = pp_funcs->set_watermarks_for_clocks_ranges(adev->powerplay.pp_handle,
1697							 clock_ranges);
1698	mutex_unlock(&adev->pm.mutex);
1699
1700	return ret;
1701}
1702
1703int amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device *adev,
1704					     struct pp_display_clock_request *clock)
1705{
1706	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1707	int ret = 0;
1708
1709	if (!pp_funcs->display_clock_voltage_request)
1710		return -EOPNOTSUPP;
1711
1712	mutex_lock(&adev->pm.mutex);
1713	ret = pp_funcs->display_clock_voltage_request(adev->powerplay.pp_handle,
1714						      clock);
1715	mutex_unlock(&adev->pm.mutex);
1716
1717	return ret;
1718}
1719
1720int amdgpu_dpm_get_current_clocks(struct amdgpu_device *adev,
1721				  struct amd_pp_clock_info *clocks)
1722{
1723	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1724	int ret = 0;
1725
1726	if (!pp_funcs->get_current_clocks)
1727		return -EOPNOTSUPP;
1728
1729	mutex_lock(&adev->pm.mutex);
1730	ret = pp_funcs->get_current_clocks(adev->powerplay.pp_handle,
1731					   clocks);
1732	mutex_unlock(&adev->pm.mutex);
1733
1734	return ret;
1735}
1736
1737void amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device *adev)
1738{
1739	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1740
1741	if (!pp_funcs->notify_smu_enable_pwe)
1742		return;
1743
1744	mutex_lock(&adev->pm.mutex);
1745	pp_funcs->notify_smu_enable_pwe(adev->powerplay.pp_handle);
1746	mutex_unlock(&adev->pm.mutex);
1747}
1748
1749int amdgpu_dpm_set_active_display_count(struct amdgpu_device *adev,
1750					uint32_t count)
1751{
1752	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1753	int ret = 0;
1754
1755	if (!pp_funcs->set_active_display_count)
1756		return -EOPNOTSUPP;
1757
1758	mutex_lock(&adev->pm.mutex);
1759	ret = pp_funcs->set_active_display_count(adev->powerplay.pp_handle,
1760						 count);
1761	mutex_unlock(&adev->pm.mutex);
1762
1763	return ret;
1764}
1765
1766int amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device *adev,
1767					  uint32_t clock)
1768{
1769	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1770	int ret = 0;
1771
1772	if (!pp_funcs->set_min_deep_sleep_dcefclk)
1773		return -EOPNOTSUPP;
1774
1775	mutex_lock(&adev->pm.mutex);
1776	ret = pp_funcs->set_min_deep_sleep_dcefclk(adev->powerplay.pp_handle,
1777						   clock);
1778	mutex_unlock(&adev->pm.mutex);
1779
1780	return ret;
1781}
1782
1783void amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device *adev,
1784					     uint32_t clock)
1785{
1786	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1787
1788	if (!pp_funcs->set_hard_min_dcefclk_by_freq)
1789		return;
1790
1791	mutex_lock(&adev->pm.mutex);
1792	pp_funcs->set_hard_min_dcefclk_by_freq(adev->powerplay.pp_handle,
1793					       clock);
1794	mutex_unlock(&adev->pm.mutex);
1795}
1796
1797void amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device *adev,
1798					  uint32_t clock)
1799{
1800	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1801
1802	if (!pp_funcs->set_hard_min_fclk_by_freq)
1803		return;
1804
1805	mutex_lock(&adev->pm.mutex);
1806	pp_funcs->set_hard_min_fclk_by_freq(adev->powerplay.pp_handle,
1807					    clock);
1808	mutex_unlock(&adev->pm.mutex);
1809}
1810
1811int amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device *adev,
1812						   bool disable_memory_clock_switch)
1813{
1814	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1815	int ret = 0;
1816
1817	if (!pp_funcs->display_disable_memory_clock_switch)
1818		return 0;
1819
1820	mutex_lock(&adev->pm.mutex);
1821	ret = pp_funcs->display_disable_memory_clock_switch(adev->powerplay.pp_handle,
1822							    disable_memory_clock_switch);
1823	mutex_unlock(&adev->pm.mutex);
1824
1825	return ret;
1826}
1827
1828int amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device *adev,
1829						struct pp_smu_nv_clock_table *max_clocks)
1830{
1831	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1832	int ret = 0;
1833
1834	if (!pp_funcs->get_max_sustainable_clocks_by_dc)
1835		return -EOPNOTSUPP;
1836
1837	mutex_lock(&adev->pm.mutex);
1838	ret = pp_funcs->get_max_sustainable_clocks_by_dc(adev->powerplay.pp_handle,
1839							 max_clocks);
1840	mutex_unlock(&adev->pm.mutex);
1841
1842	return ret;
1843}
1844
1845enum pp_smu_status amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev,
1846						  unsigned int *clock_values_in_khz,
1847						  unsigned int *num_states)
1848{
1849	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1850	int ret = 0;
1851
1852	if (!pp_funcs->get_uclk_dpm_states)
1853		return -EOPNOTSUPP;
1854
1855	mutex_lock(&adev->pm.mutex);
1856	ret = pp_funcs->get_uclk_dpm_states(adev->powerplay.pp_handle,
1857					    clock_values_in_khz,
1858					    num_states);
1859	mutex_unlock(&adev->pm.mutex);
1860
1861	return ret;
1862}
1863
1864int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev,
1865				   struct dpm_clocks *clock_table)
1866{
1867	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1868	int ret = 0;
1869
1870	if (!pp_funcs->get_dpm_clock_table)
1871		return -EOPNOTSUPP;
1872
1873	mutex_lock(&adev->pm.mutex);
1874	ret = pp_funcs->get_dpm_clock_table(adev->powerplay.pp_handle,
1875					    clock_table);
1876	mutex_unlock(&adev->pm.mutex);
1877
1878	return ret;
1879}