Linux Audio

Check our new training course

Loading...
v6.9.4
   1/*
   2 * Copyright 2015 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23#include "pp_debug.h"
  24#include <linux/types.h>
  25#include <linux/kernel.h>
  26#include <linux/gfp.h>
  27#include <linux/slab.h>
  28#include <linux/firmware.h>
  29#include <linux/reboot.h>
  30#include "amd_shared.h"
  31#include "amd_powerplay.h"
  32#include "power_state.h"
  33#include "amdgpu.h"
  34#include "hwmgr.h"
  35#include "amdgpu_dpm_internal.h"
  36#include "amdgpu_display.h"
  37
  38static const struct amd_pm_funcs pp_dpm_funcs;
  39
  40static int amd_powerplay_create(struct amdgpu_device *adev)
  41{
  42	struct pp_hwmgr *hwmgr;
  43
  44	if (adev == NULL)
  45		return -EINVAL;
  46
  47	hwmgr = kzalloc(sizeof(struct pp_hwmgr), GFP_KERNEL);
  48	if (hwmgr == NULL)
  49		return -ENOMEM;
  50
  51	hwmgr->adev = adev;
  52	hwmgr->not_vf = !amdgpu_sriov_vf(adev);
  53	hwmgr->device = amdgpu_cgs_create_device(adev);
  54	mutex_init(&hwmgr->msg_lock);
  55	hwmgr->chip_family = adev->family;
  56	hwmgr->chip_id = adev->asic_type;
  57	hwmgr->feature_mask = adev->pm.pp_feature;
  58	hwmgr->display_config = &adev->pm.pm_display_cfg;
  59	adev->powerplay.pp_handle = hwmgr;
  60	adev->powerplay.pp_funcs = &pp_dpm_funcs;
  61	return 0;
  62}
  63
  64
  65static void amd_powerplay_destroy(struct amdgpu_device *adev)
  66{
  67	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
  68
  69	mutex_destroy(&hwmgr->msg_lock);
  70
  71	kfree(hwmgr->hardcode_pp_table);
  72	hwmgr->hardcode_pp_table = NULL;
  73
  74	kfree(hwmgr);
  75	hwmgr = NULL;
  76}
  77
  78static int pp_early_init(void *handle)
  79{
  80	int ret;
  81	struct amdgpu_device *adev = handle;
  82
  83	ret = amd_powerplay_create(adev);
  84
  85	if (ret != 0)
  86		return ret;
  87
  88	ret = hwmgr_early_init(adev->powerplay.pp_handle);
  89	if (ret)
  90		return -EINVAL;
  91
  92	return 0;
  93}
  94
  95static void pp_swctf_delayed_work_handler(struct work_struct *work)
  96{
  97	struct pp_hwmgr *hwmgr =
  98		container_of(work, struct pp_hwmgr, swctf_delayed_work.work);
  99	struct amdgpu_device *adev = hwmgr->adev;
 100	struct amdgpu_dpm_thermal *range =
 101				&adev->pm.dpm.thermal;
 102	uint32_t gpu_temperature, size;
 103	int ret;
 104
 105	/*
 106	 * If the hotspot/edge temperature is confirmed as below SW CTF setting point
 107	 * after the delay enforced, nothing will be done.
 108	 * Otherwise, a graceful shutdown will be performed to prevent further damage.
 109	 */
 110	if (range->sw_ctf_threshold &&
 111	    hwmgr->hwmgr_func->read_sensor) {
 112		ret = hwmgr->hwmgr_func->read_sensor(hwmgr,
 113						     AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
 114						     &gpu_temperature,
 115						     &size);
 116		/*
 117		 * For some legacy ASICs, hotspot temperature retrieving might be not
 118		 * supported. Check the edge temperature instead then.
 119		 */
 120		if (ret == -EOPNOTSUPP)
 121			ret = hwmgr->hwmgr_func->read_sensor(hwmgr,
 122							     AMDGPU_PP_SENSOR_EDGE_TEMP,
 123							     &gpu_temperature,
 124							     &size);
 125		if (!ret && gpu_temperature / 1000 < range->sw_ctf_threshold)
 126			return;
 127	}
 128
 129	dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
 130	dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
 131	orderly_poweroff(true);
 132}
 133
 134static int pp_sw_init(void *handle)
 135{
 136	struct amdgpu_device *adev = handle;
 137	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
 138	int ret = 0;
 139
 140	ret = hwmgr_sw_init(hwmgr);
 141
 142	pr_debug("powerplay sw init %s\n", ret ? "failed" : "successfully");
 143
 144	if (!ret)
 145		INIT_DELAYED_WORK(&hwmgr->swctf_delayed_work,
 146				  pp_swctf_delayed_work_handler);
 147
 148	return ret;
 149}
 150
 151static int pp_sw_fini(void *handle)
 152{
 153	struct amdgpu_device *adev = handle;
 154	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
 155
 156	hwmgr_sw_fini(hwmgr);
 157
 158	amdgpu_ucode_release(&adev->pm.fw);
 
 159
 160	return 0;
 161}
 162
 163static int pp_hw_init(void *handle)
 164{
 165	int ret = 0;
 166	struct amdgpu_device *adev = handle;
 167	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
 168
 169	ret = hwmgr_hw_init(hwmgr);
 170
 171	if (ret)
 172		pr_err("powerplay hw init failed\n");
 173
 174	return ret;
 175}
 176
 177static int pp_hw_fini(void *handle)
 178{
 179	struct amdgpu_device *adev = handle;
 180	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
 181
 182	cancel_delayed_work_sync(&hwmgr->swctf_delayed_work);
 183
 184	hwmgr_hw_fini(hwmgr);
 185
 186	return 0;
 187}
 188
 189static void pp_reserve_vram_for_smu(struct amdgpu_device *adev)
 190{
 191	int r = -EINVAL;
 192	void *cpu_ptr = NULL;
 193	uint64_t gpu_addr;
 194	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
 195
 196	if (amdgpu_bo_create_kernel(adev, adev->pm.smu_prv_buffer_size,
 197						PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
 198						&adev->pm.smu_prv_buffer,
 199						&gpu_addr,
 200						&cpu_ptr)) {
 201		DRM_ERROR("amdgpu: failed to create smu prv buffer\n");
 202		return;
 203	}
 204
 205	if (hwmgr->hwmgr_func->notify_cac_buffer_info)
 206		r = hwmgr->hwmgr_func->notify_cac_buffer_info(hwmgr,
 207					lower_32_bits((unsigned long)cpu_ptr),
 208					upper_32_bits((unsigned long)cpu_ptr),
 209					lower_32_bits(gpu_addr),
 210					upper_32_bits(gpu_addr),
 211					adev->pm.smu_prv_buffer_size);
 212
 213	if (r) {
 214		amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL);
 215		adev->pm.smu_prv_buffer = NULL;
 216		DRM_ERROR("amdgpu: failed to notify SMU buffer address\n");
 217	}
 218}
 219
 220static int pp_late_init(void *handle)
 221{
 222	struct amdgpu_device *adev = handle;
 223	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
 224
 225	if (hwmgr && hwmgr->pm_en)
 226		hwmgr_handle_task(hwmgr,
 227					AMD_PP_TASK_COMPLETE_INIT, NULL);
 228	if (adev->pm.smu_prv_buffer_size != 0)
 229		pp_reserve_vram_for_smu(adev);
 230
 231	return 0;
 232}
 233
 234static void pp_late_fini(void *handle)
 235{
 236	struct amdgpu_device *adev = handle;
 237
 238	if (adev->pm.smu_prv_buffer)
 239		amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL);
 240	amd_powerplay_destroy(adev);
 241}
 242
 243
 244static bool pp_is_idle(void *handle)
 245{
 246	return false;
 247}
 248
 249static int pp_wait_for_idle(void *handle)
 250{
 251	return 0;
 252}
 253
 254static int pp_sw_reset(void *handle)
 255{
 256	return 0;
 257}
 258
 259static int pp_set_powergating_state(void *handle,
 260				    enum amd_powergating_state state)
 261{
 262	return 0;
 263}
 264
 265static int pp_suspend(void *handle)
 266{
 267	struct amdgpu_device *adev = handle;
 268	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
 269
 270	cancel_delayed_work_sync(&hwmgr->swctf_delayed_work);
 271
 272	return hwmgr_suspend(hwmgr);
 273}
 274
 275static int pp_resume(void *handle)
 276{
 277	struct amdgpu_device *adev = handle;
 278	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
 279
 280	return hwmgr_resume(hwmgr);
 281}
 282
 283static int pp_set_clockgating_state(void *handle,
 284					  enum amd_clockgating_state state)
 285{
 286	return 0;
 287}
 288
 289static const struct amd_ip_funcs pp_ip_funcs = {
 290	.name = "powerplay",
 291	.early_init = pp_early_init,
 292	.late_init = pp_late_init,
 293	.sw_init = pp_sw_init,
 294	.sw_fini = pp_sw_fini,
 295	.hw_init = pp_hw_init,
 296	.hw_fini = pp_hw_fini,
 297	.late_fini = pp_late_fini,
 298	.suspend = pp_suspend,
 299	.resume = pp_resume,
 300	.is_idle = pp_is_idle,
 301	.wait_for_idle = pp_wait_for_idle,
 302	.soft_reset = pp_sw_reset,
 303	.set_clockgating_state = pp_set_clockgating_state,
 304	.set_powergating_state = pp_set_powergating_state,
 305};
 306
 307const struct amdgpu_ip_block_version pp_smu_ip_block =
 308{
 309	.type = AMD_IP_BLOCK_TYPE_SMC,
 310	.major = 1,
 311	.minor = 0,
 312	.rev = 0,
 313	.funcs = &pp_ip_funcs,
 314};
 315
 316/* This interface only be supported On Vi,
 317 * because only smu7/8 can help to load gfx/sdma fw,
 318 * smu need to be enabled before load other ip's fw.
 319 * so call start smu to load smu7 fw and other ip's fw
 320 */
 321static int pp_dpm_load_fw(void *handle)
 322{
 323	struct pp_hwmgr *hwmgr = handle;
 324
 325	if (!hwmgr || !hwmgr->smumgr_funcs || !hwmgr->smumgr_funcs->start_smu)
 326		return -EINVAL;
 327
 328	if (hwmgr->smumgr_funcs->start_smu(hwmgr)) {
 329		pr_err("fw load failed\n");
 330		return -EINVAL;
 331	}
 332
 333	return 0;
 334}
 335
 336static int pp_dpm_fw_loading_complete(void *handle)
 337{
 338	return 0;
 339}
 340
 341static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id)
 342{
 343	struct pp_hwmgr *hwmgr = handle;
 344
 345	if (!hwmgr || !hwmgr->pm_en)
 346		return -EINVAL;
 347
 348	if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
 349		pr_info_ratelimited("%s was not implemented.\n", __func__);
 350		return 0;
 351	}
 352
 353	return hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
 354}
 355
 356static void pp_dpm_en_umd_pstate(struct pp_hwmgr  *hwmgr,
 357						enum amd_dpm_forced_level *level)
 358{
 359	uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
 360					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
 361					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
 362					AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
 363
 364	if (!(hwmgr->dpm_level & profile_mode_mask)) {
 365		/* enter umd pstate, save current level, disable gfx cg*/
 366		if (*level & profile_mode_mask) {
 367			hwmgr->saved_dpm_level = hwmgr->dpm_level;
 368			hwmgr->en_umd_pstate = true;
 369		}
 370	} else {
 371		/* exit umd pstate, restore level, enable gfx cg*/
 372		if (!(*level & profile_mode_mask)) {
 373			if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
 374				*level = hwmgr->saved_dpm_level;
 375			hwmgr->en_umd_pstate = false;
 376		}
 377	}
 378}
 379
 380static int pp_dpm_force_performance_level(void *handle,
 381					enum amd_dpm_forced_level level)
 382{
 383	struct pp_hwmgr *hwmgr = handle;
 384
 385	if (!hwmgr || !hwmgr->pm_en)
 386		return -EINVAL;
 387
 388	if (level == hwmgr->dpm_level)
 389		return 0;
 390
 391	pp_dpm_en_umd_pstate(hwmgr, &level);
 392	hwmgr->request_dpm_level = level;
 393	hwmgr_handle_task(hwmgr, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
 394
 395	return 0;
 396}
 397
 398static enum amd_dpm_forced_level pp_dpm_get_performance_level(
 399								void *handle)
 400{
 401	struct pp_hwmgr *hwmgr = handle;
 402
 403	if (!hwmgr || !hwmgr->pm_en)
 404		return -EINVAL;
 405
 406	return hwmgr->dpm_level;
 407}
 408
 409static uint32_t pp_dpm_get_sclk(void *handle, bool low)
 410{
 411	struct pp_hwmgr *hwmgr = handle;
 412
 413	if (!hwmgr || !hwmgr->pm_en)
 414		return 0;
 415
 416	if (hwmgr->hwmgr_func->get_sclk == NULL) {
 417		pr_info_ratelimited("%s was not implemented.\n", __func__);
 418		return 0;
 419	}
 420	return hwmgr->hwmgr_func->get_sclk(hwmgr, low);
 421}
 422
 423static uint32_t pp_dpm_get_mclk(void *handle, bool low)
 424{
 425	struct pp_hwmgr *hwmgr = handle;
 426
 427	if (!hwmgr || !hwmgr->pm_en)
 428		return 0;
 429
 430	if (hwmgr->hwmgr_func->get_mclk == NULL) {
 431		pr_info_ratelimited("%s was not implemented.\n", __func__);
 432		return 0;
 433	}
 434	return hwmgr->hwmgr_func->get_mclk(hwmgr, low);
 435}
 436
 437static void pp_dpm_powergate_vce(void *handle, bool gate)
 438{
 439	struct pp_hwmgr *hwmgr = handle;
 440
 441	if (!hwmgr || !hwmgr->pm_en)
 442		return;
 443
 444	if (hwmgr->hwmgr_func->powergate_vce == NULL) {
 445		pr_info_ratelimited("%s was not implemented.\n", __func__);
 446		return;
 447	}
 448	hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
 449}
 450
 451static void pp_dpm_powergate_uvd(void *handle, bool gate)
 452{
 453	struct pp_hwmgr *hwmgr = handle;
 454
 455	if (!hwmgr || !hwmgr->pm_en)
 456		return;
 457
 458	if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
 459		pr_info_ratelimited("%s was not implemented.\n", __func__);
 460		return;
 461	}
 462	hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
 463}
 464
 465static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
 466		enum amd_pm_state_type *user_state)
 467{
 468	struct pp_hwmgr *hwmgr = handle;
 469
 470	if (!hwmgr || !hwmgr->pm_en)
 471		return -EINVAL;
 472
 473	return hwmgr_handle_task(hwmgr, task_id, user_state);
 474}
 475
 476static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
 477{
 478	struct pp_hwmgr *hwmgr = handle;
 479	struct pp_power_state *state;
 480	enum amd_pm_state_type pm_type;
 481
 482	if (!hwmgr || !hwmgr->pm_en || !hwmgr->current_ps)
 483		return -EINVAL;
 484
 485	state = hwmgr->current_ps;
 486
 487	switch (state->classification.ui_label) {
 488	case PP_StateUILabel_Battery:
 489		pm_type = POWER_STATE_TYPE_BATTERY;
 490		break;
 491	case PP_StateUILabel_Balanced:
 492		pm_type = POWER_STATE_TYPE_BALANCED;
 493		break;
 494	case PP_StateUILabel_Performance:
 495		pm_type = POWER_STATE_TYPE_PERFORMANCE;
 496		break;
 497	default:
 498		if (state->classification.flags & PP_StateClassificationFlag_Boot)
 499			pm_type = POWER_STATE_TYPE_INTERNAL_BOOT;
 500		else
 501			pm_type = POWER_STATE_TYPE_DEFAULT;
 502		break;
 503	}
 504
 505	return pm_type;
 506}
 507
 508static int pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
 509{
 510	struct pp_hwmgr *hwmgr = handle;
 511
 512	if (!hwmgr || !hwmgr->pm_en)
 513		return -EOPNOTSUPP;
 514
 515	if (hwmgr->hwmgr_func->set_fan_control_mode == NULL)
 516		return -EOPNOTSUPP;
 517
 518	if (mode == U32_MAX)
 519		return -EINVAL;
 520
 521	hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode);
 522
 523	return 0;
 524}
 525
 526static int pp_dpm_get_fan_control_mode(void *handle, uint32_t *fan_mode)
 527{
 528	struct pp_hwmgr *hwmgr = handle;
 529
 530	if (!hwmgr || !hwmgr->pm_en)
 531		return -EOPNOTSUPP;
 532
 533	if (hwmgr->hwmgr_func->get_fan_control_mode == NULL)
 534		return -EOPNOTSUPP;
 535
 536	if (!fan_mode)
 537		return -EINVAL;
 538
 539	*fan_mode = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr);
 540	return 0;
 541}
 542
 543static int pp_dpm_set_fan_speed_pwm(void *handle, uint32_t speed)
 544{
 545	struct pp_hwmgr *hwmgr = handle;
 546
 547	if (!hwmgr || !hwmgr->pm_en)
 548		return -EOPNOTSUPP;
 549
 550	if (hwmgr->hwmgr_func->set_fan_speed_pwm == NULL)
 551		return -EOPNOTSUPP;
 552
 553	if (speed == U32_MAX)
 554		return -EINVAL;
 555
 556	return hwmgr->hwmgr_func->set_fan_speed_pwm(hwmgr, speed);
 557}
 558
 559static int pp_dpm_get_fan_speed_pwm(void *handle, uint32_t *speed)
 560{
 561	struct pp_hwmgr *hwmgr = handle;
 562
 563	if (!hwmgr || !hwmgr->pm_en)
 564		return -EOPNOTSUPP;
 565
 566	if (hwmgr->hwmgr_func->get_fan_speed_pwm == NULL)
 567		return -EOPNOTSUPP;
 568
 569	if (!speed)
 570		return -EINVAL;
 571
 572	return hwmgr->hwmgr_func->get_fan_speed_pwm(hwmgr, speed);
 573}
 574
 575static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
 576{
 577	struct pp_hwmgr *hwmgr = handle;
 578
 579	if (!hwmgr || !hwmgr->pm_en)
 580		return -EOPNOTSUPP;
 581
 582	if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL)
 583		return -EOPNOTSUPP;
 584
 585	if (!rpm)
 586		return -EINVAL;
 587
 588	return hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm);
 589}
 590
 591static int pp_dpm_set_fan_speed_rpm(void *handle, uint32_t rpm)
 592{
 593	struct pp_hwmgr *hwmgr = handle;
 594
 595	if (!hwmgr || !hwmgr->pm_en)
 596		return -EOPNOTSUPP;
 597
 598	if (hwmgr->hwmgr_func->set_fan_speed_rpm == NULL)
 599		return -EOPNOTSUPP;
 600
 601	if (rpm == U32_MAX)
 602		return -EINVAL;
 603
 604	return hwmgr->hwmgr_func->set_fan_speed_rpm(hwmgr, rpm);
 605}
 606
 607static int pp_dpm_get_pp_num_states(void *handle,
 608		struct pp_states_info *data)
 609{
 610	struct pp_hwmgr *hwmgr = handle;
 611	int i;
 612
 613	memset(data, 0, sizeof(*data));
 614
 615	if (!hwmgr || !hwmgr->pm_en || !hwmgr->ps)
 616		return -EINVAL;
 617
 618	data->nums = hwmgr->num_ps;
 619
 620	for (i = 0; i < hwmgr->num_ps; i++) {
 621		struct pp_power_state *state = (struct pp_power_state *)
 622				((unsigned long)hwmgr->ps + i * hwmgr->ps_size);
 623		switch (state->classification.ui_label) {
 624		case PP_StateUILabel_Battery:
 625			data->states[i] = POWER_STATE_TYPE_BATTERY;
 626			break;
 627		case PP_StateUILabel_Balanced:
 628			data->states[i] = POWER_STATE_TYPE_BALANCED;
 629			break;
 630		case PP_StateUILabel_Performance:
 631			data->states[i] = POWER_STATE_TYPE_PERFORMANCE;
 632			break;
 633		default:
 634			if (state->classification.flags & PP_StateClassificationFlag_Boot)
 635				data->states[i] = POWER_STATE_TYPE_INTERNAL_BOOT;
 636			else
 637				data->states[i] = POWER_STATE_TYPE_DEFAULT;
 638		}
 639	}
 640	return 0;
 641}
 642
 643static int pp_dpm_get_pp_table(void *handle, char **table)
 644{
 645	struct pp_hwmgr *hwmgr = handle;
 646
 647	if (!hwmgr || !hwmgr->pm_en || !hwmgr->soft_pp_table)
 648		return -EINVAL;
 649
 650	*table = (char *)hwmgr->soft_pp_table;
 651	return hwmgr->soft_pp_table_size;
 652}
 653
 654static int amd_powerplay_reset(void *handle)
 655{
 656	struct pp_hwmgr *hwmgr = handle;
 657	int ret;
 658
 659	ret = hwmgr_hw_fini(hwmgr);
 660	if (ret)
 661		return ret;
 662
 663	ret = hwmgr_hw_init(hwmgr);
 664	if (ret)
 665		return ret;
 666
 667	return hwmgr_handle_task(hwmgr, AMD_PP_TASK_COMPLETE_INIT, NULL);
 668}
 669
 670static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
 671{
 672	struct pp_hwmgr *hwmgr = handle;
 673	int ret = -ENOMEM;
 674
 675	if (!hwmgr || !hwmgr->pm_en)
 676		return -EINVAL;
 677
 678	if (!hwmgr->hardcode_pp_table) {
 679		hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table,
 680						   hwmgr->soft_pp_table_size,
 681						   GFP_KERNEL);
 682		if (!hwmgr->hardcode_pp_table)
 683			return ret;
 684	}
 685
 686	memcpy(hwmgr->hardcode_pp_table, buf, size);
 687
 688	hwmgr->soft_pp_table = hwmgr->hardcode_pp_table;
 689
 690	ret = amd_powerplay_reset(handle);
 691	if (ret)
 692		return ret;
 693
 694	if (hwmgr->hwmgr_func->avfs_control)
 695		ret = hwmgr->hwmgr_func->avfs_control(hwmgr, false);
 696
 697	return ret;
 698}
 699
 700static int pp_dpm_force_clock_level(void *handle,
 701		enum pp_clock_type type, uint32_t mask)
 702{
 703	struct pp_hwmgr *hwmgr = handle;
 704
 705	if (!hwmgr || !hwmgr->pm_en)
 706		return -EINVAL;
 707
 708	if (hwmgr->hwmgr_func->force_clock_level == NULL) {
 709		pr_info_ratelimited("%s was not implemented.\n", __func__);
 710		return 0;
 711	}
 712
 713	if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
 714		pr_debug("force clock level is for dpm manual mode only.\n");
 715		return -EINVAL;
 716	}
 717
 718	return hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask);
 719}
 720
 721static int pp_dpm_emit_clock_levels(void *handle,
 722				    enum pp_clock_type type,
 723				    char *buf,
 724				    int *offset)
 725{
 726	struct pp_hwmgr *hwmgr = handle;
 727
 728	if (!hwmgr || !hwmgr->pm_en)
 729		return -EOPNOTSUPP;
 730
 731	if (!hwmgr->hwmgr_func->emit_clock_levels)
 732		return -ENOENT;
 733
 734	return hwmgr->hwmgr_func->emit_clock_levels(hwmgr, type, buf, offset);
 735}
 736
 737static int pp_dpm_print_clock_levels(void *handle,
 738		enum pp_clock_type type, char *buf)
 739{
 740	struct pp_hwmgr *hwmgr = handle;
 741
 742	if (!hwmgr || !hwmgr->pm_en)
 743		return -EINVAL;
 744
 745	if (hwmgr->hwmgr_func->print_clock_levels == NULL) {
 746		pr_info_ratelimited("%s was not implemented.\n", __func__);
 747		return 0;
 748	}
 749	return hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
 750}
 751
 752static int pp_dpm_get_sclk_od(void *handle)
 753{
 754	struct pp_hwmgr *hwmgr = handle;
 755
 756	if (!hwmgr || !hwmgr->pm_en)
 757		return -EINVAL;
 758
 759	if (hwmgr->hwmgr_func->get_sclk_od == NULL) {
 760		pr_info_ratelimited("%s was not implemented.\n", __func__);
 761		return 0;
 762	}
 763	return hwmgr->hwmgr_func->get_sclk_od(hwmgr);
 764}
 765
 766static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
 767{
 768	struct pp_hwmgr *hwmgr = handle;
 769
 770	if (!hwmgr || !hwmgr->pm_en)
 771		return -EINVAL;
 772
 773	if (hwmgr->hwmgr_func->set_sclk_od == NULL) {
 774		pr_info_ratelimited("%s was not implemented.\n", __func__);
 775		return 0;
 776	}
 777
 778	return hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
 779}
 780
 781static int pp_dpm_get_mclk_od(void *handle)
 782{
 783	struct pp_hwmgr *hwmgr = handle;
 784
 785	if (!hwmgr || !hwmgr->pm_en)
 786		return -EINVAL;
 787
 788	if (hwmgr->hwmgr_func->get_mclk_od == NULL) {
 789		pr_info_ratelimited("%s was not implemented.\n", __func__);
 790		return 0;
 791	}
 792	return hwmgr->hwmgr_func->get_mclk_od(hwmgr);
 793}
 794
 795static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
 796{
 797	struct pp_hwmgr *hwmgr = handle;
 798
 799	if (!hwmgr || !hwmgr->pm_en)
 800		return -EINVAL;
 801
 802	if (hwmgr->hwmgr_func->set_mclk_od == NULL) {
 803		pr_info_ratelimited("%s was not implemented.\n", __func__);
 804		return 0;
 805	}
 806	return hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
 807}
 808
 809static int pp_dpm_read_sensor(void *handle, int idx,
 810			      void *value, int *size)
 811{
 812	struct pp_hwmgr *hwmgr = handle;
 813
 814	if (!hwmgr || !hwmgr->pm_en || !value)
 815		return -EINVAL;
 816
 817	switch (idx) {
 818	case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
 819		*((uint32_t *)value) = hwmgr->pstate_sclk * 100;
 820		return 0;
 821	case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
 822		*((uint32_t *)value) = hwmgr->pstate_mclk * 100;
 823		return 0;
 824	case AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK:
 825		*((uint32_t *)value) = hwmgr->pstate_sclk_peak * 100;
 826		return 0;
 827	case AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK:
 828		*((uint32_t *)value) = hwmgr->pstate_mclk_peak * 100;
 829		return 0;
 830	case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
 831		*((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMinRPM;
 832		return 0;
 833	case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
 834		*((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMaxRPM;
 835		return 0;
 836	default:
 837		return hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value, size);
 838	}
 839}
 840
 841static struct amd_vce_state*
 842pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
 843{
 844	struct pp_hwmgr *hwmgr = handle;
 845
 846	if (!hwmgr || !hwmgr->pm_en)
 847		return NULL;
 848
 849	if (idx < hwmgr->num_vce_state_tables)
 850		return &hwmgr->vce_states[idx];
 851	return NULL;
 852}
 853
 854static int pp_get_power_profile_mode(void *handle, char *buf)
 855{
 856	struct pp_hwmgr *hwmgr = handle;
 857
 858	if (!hwmgr || !hwmgr->pm_en || !hwmgr->hwmgr_func->get_power_profile_mode)
 859		return -EOPNOTSUPP;
 860	if (!buf)
 861		return -EINVAL;
 862
 863	return hwmgr->hwmgr_func->get_power_profile_mode(hwmgr, buf);
 864}
 865
 866static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
 867{
 868	struct pp_hwmgr *hwmgr = handle;
 869
 870	if (!hwmgr || !hwmgr->pm_en || !hwmgr->hwmgr_func->set_power_profile_mode)
 871		return -EOPNOTSUPP;
 872
 873	if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
 874		pr_debug("power profile setting is for manual dpm mode only.\n");
 875		return -EINVAL;
 876	}
 877
 878	return hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, input, size);
 879}
 880
 881static int pp_set_fine_grain_clk_vol(void *handle, uint32_t type, long *input, uint32_t size)
 882{
 883	struct pp_hwmgr *hwmgr = handle;
 884
 885	if (!hwmgr || !hwmgr->pm_en)
 886		return -EINVAL;
 887
 888	if (hwmgr->hwmgr_func->set_fine_grain_clk_vol == NULL)
 889		return 0;
 890
 891	return hwmgr->hwmgr_func->set_fine_grain_clk_vol(hwmgr, type, input, size);
 892}
 893
 894static int pp_odn_edit_dpm_table(void *handle, enum PP_OD_DPM_TABLE_COMMAND type,
 895				 long *input, uint32_t size)
 896{
 897	struct pp_hwmgr *hwmgr = handle;
 898
 899	if (!hwmgr || !hwmgr->pm_en)
 900		return -EINVAL;
 901
 902	if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) {
 903		pr_info_ratelimited("%s was not implemented.\n", __func__);
 904		return 0;
 905	}
 906
 907	return hwmgr->hwmgr_func->odn_edit_dpm_table(hwmgr, type, input, size);
 908}
 909
 910static int pp_dpm_set_mp1_state(void *handle, enum pp_mp1_state mp1_state)
 911{
 912	struct pp_hwmgr *hwmgr = handle;
 913
 914	if (!hwmgr)
 915		return -EINVAL;
 916
 917	if (!hwmgr->pm_en)
 918		return 0;
 919
 920	if (hwmgr->hwmgr_func->set_mp1_state)
 921		return hwmgr->hwmgr_func->set_mp1_state(hwmgr, mp1_state);
 922
 923	return 0;
 924}
 925
 926static int pp_dpm_switch_power_profile(void *handle,
 927		enum PP_SMC_POWER_PROFILE type, bool en)
 928{
 929	struct pp_hwmgr *hwmgr = handle;
 930	long workload;
 931	uint32_t index;
 932
 933	if (!hwmgr || !hwmgr->pm_en)
 934		return -EINVAL;
 935
 936	if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
 937		pr_info_ratelimited("%s was not implemented.\n", __func__);
 938		return -EINVAL;
 939	}
 940
 941	if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
 942		return -EINVAL;
 943
 944	if (!en) {
 945		hwmgr->workload_mask &= ~(1 << hwmgr->workload_prority[type]);
 946		index = fls(hwmgr->workload_mask);
 947		index = index > 0 && index <= Workload_Policy_Max ? index - 1 : 0;
 948		workload = hwmgr->workload_setting[index];
 949	} else {
 950		hwmgr->workload_mask |= (1 << hwmgr->workload_prority[type]);
 951		index = fls(hwmgr->workload_mask);
 952		index = index <= Workload_Policy_Max ? index - 1 : 0;
 953		workload = hwmgr->workload_setting[index];
 954	}
 955
 956	if (type == PP_SMC_POWER_PROFILE_COMPUTE &&
 957		hwmgr->hwmgr_func->disable_power_features_for_compute_performance) {
 958			if (hwmgr->hwmgr_func->disable_power_features_for_compute_performance(hwmgr, en))
 959				return -EINVAL;
 960	}
 961
 962	if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
 963		hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0);
 964
 965	return 0;
 966}
 967
 968static int pp_set_power_limit(void *handle, uint32_t limit)
 969{
 970	struct pp_hwmgr *hwmgr = handle;
 971	uint32_t max_power_limit;
 972
 973	if (!hwmgr || !hwmgr->pm_en)
 974		return -EINVAL;
 975
 976	if (hwmgr->hwmgr_func->set_power_limit == NULL) {
 977		pr_info_ratelimited("%s was not implemented.\n", __func__);
 978		return -EINVAL;
 979	}
 980
 981	if (limit == 0)
 982		limit = hwmgr->default_power_limit;
 983
 984	max_power_limit = hwmgr->default_power_limit;
 985	if (hwmgr->od_enabled) {
 986		max_power_limit *= (100 + hwmgr->platform_descriptor.TDPODLimit);
 987		max_power_limit /= 100;
 988	}
 989
 990	if (limit > max_power_limit)
 991		return -EINVAL;
 992
 993	hwmgr->hwmgr_func->set_power_limit(hwmgr, limit);
 994	hwmgr->power_limit = limit;
 995	return 0;
 996}
 997
 998static int pp_get_power_limit(void *handle, uint32_t *limit,
 999			      enum pp_power_limit_level pp_limit_level,
1000			      enum pp_power_type power_type)
1001{
1002	struct pp_hwmgr *hwmgr = handle;
1003	int ret = 0;
1004
1005	if (!hwmgr || !hwmgr->pm_en || !limit)
1006		return -EINVAL;
1007
1008	if (power_type != PP_PWR_TYPE_SUSTAINED)
1009		return -EOPNOTSUPP;
1010
1011	switch (pp_limit_level) {
1012		case PP_PWR_LIMIT_CURRENT:
1013			*limit = hwmgr->power_limit;
1014			break;
1015		case PP_PWR_LIMIT_DEFAULT:
1016			*limit = hwmgr->default_power_limit;
1017			break;
1018		case PP_PWR_LIMIT_MAX:
1019			*limit = hwmgr->default_power_limit;
1020			if (hwmgr->od_enabled) {
1021				*limit *= (100 + hwmgr->platform_descriptor.TDPODLimit);
1022				*limit /= 100;
1023			}
1024			break;
1025		case PP_PWR_LIMIT_MIN:
1026			*limit = 0;
1027			break;
1028		default:
1029			ret = -EOPNOTSUPP;
1030			break;
1031	}
1032
1033	return ret;
1034}
1035
1036static int pp_display_configuration_change(void *handle,
1037	const struct amd_pp_display_configuration *display_config)
1038{
1039	struct pp_hwmgr *hwmgr = handle;
1040
1041	if (!hwmgr || !hwmgr->pm_en)
1042		return -EINVAL;
1043
1044	phm_store_dal_configuration_data(hwmgr, display_config);
1045	return 0;
1046}
1047
1048static int pp_get_display_power_level(void *handle,
1049		struct amd_pp_simple_clock_info *output)
1050{
1051	struct pp_hwmgr *hwmgr = handle;
1052
1053	if (!hwmgr || !hwmgr->pm_en || !output)
1054		return -EINVAL;
1055
1056	return phm_get_dal_power_level(hwmgr, output);
1057}
1058
1059static int pp_get_current_clocks(void *handle,
1060		struct amd_pp_clock_info *clocks)
1061{
1062	struct amd_pp_simple_clock_info simple_clocks = { 0 };
1063	struct pp_clock_info hw_clocks;
1064	struct pp_hwmgr *hwmgr = handle;
1065	int ret = 0;
1066
1067	if (!hwmgr || !hwmgr->pm_en)
1068		return -EINVAL;
1069
1070	phm_get_dal_power_level(hwmgr, &simple_clocks);
1071
1072	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1073					PHM_PlatformCaps_PowerContainment))
1074		ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
1075					&hw_clocks, PHM_PerformanceLevelDesignation_PowerContainment);
1076	else
1077		ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
1078					&hw_clocks, PHM_PerformanceLevelDesignation_Activity);
1079
1080	if (ret) {
1081		pr_debug("Error in phm_get_clock_info \n");
1082		return -EINVAL;
1083	}
1084
1085	clocks->min_engine_clock = hw_clocks.min_eng_clk;
1086	clocks->max_engine_clock = hw_clocks.max_eng_clk;
1087	clocks->min_memory_clock = hw_clocks.min_mem_clk;
1088	clocks->max_memory_clock = hw_clocks.max_mem_clk;
1089	clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
1090	clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
1091
1092	clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1093	clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1094
1095	if (simple_clocks.level == 0)
1096		clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
1097	else
1098		clocks->max_clocks_state = simple_clocks.level;
1099
1100	if (0 == phm_get_current_shallow_sleep_clocks(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks)) {
1101		clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1102		clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1103	}
1104	return 0;
1105}
1106
1107static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks)
1108{
1109	struct pp_hwmgr *hwmgr = handle;
1110
1111	if (!hwmgr || !hwmgr->pm_en)
1112		return -EINVAL;
1113
1114	if (clocks == NULL)
1115		return -EINVAL;
1116
1117	return phm_get_clock_by_type(hwmgr, type, clocks);
1118}
1119
1120static int pp_get_clock_by_type_with_latency(void *handle,
1121		enum amd_pp_clock_type type,
1122		struct pp_clock_levels_with_latency *clocks)
1123{
1124	struct pp_hwmgr *hwmgr = handle;
1125
1126	if (!hwmgr || !hwmgr->pm_en || !clocks)
1127		return -EINVAL;
1128
1129	return phm_get_clock_by_type_with_latency(hwmgr, type, clocks);
1130}
1131
1132static int pp_get_clock_by_type_with_voltage(void *handle,
1133		enum amd_pp_clock_type type,
1134		struct pp_clock_levels_with_voltage *clocks)
1135{
1136	struct pp_hwmgr *hwmgr = handle;
1137
1138	if (!hwmgr || !hwmgr->pm_en || !clocks)
1139		return -EINVAL;
1140
1141	return phm_get_clock_by_type_with_voltage(hwmgr, type, clocks);
1142}
1143
1144static int pp_set_watermarks_for_clocks_ranges(void *handle,
1145		void *clock_ranges)
1146{
1147	struct pp_hwmgr *hwmgr = handle;
1148
1149	if (!hwmgr || !hwmgr->pm_en || !clock_ranges)
1150		return -EINVAL;
1151
1152	return phm_set_watermarks_for_clocks_ranges(hwmgr,
1153						    clock_ranges);
1154}
1155
1156static int pp_display_clock_voltage_request(void *handle,
1157		struct pp_display_clock_request *clock)
1158{
1159	struct pp_hwmgr *hwmgr = handle;
1160
1161	if (!hwmgr || !hwmgr->pm_en || !clock)
1162		return -EINVAL;
1163
1164	return phm_display_clock_voltage_request(hwmgr, clock);
1165}
1166
1167static int pp_get_display_mode_validation_clocks(void *handle,
1168		struct amd_pp_simple_clock_info *clocks)
1169{
1170	struct pp_hwmgr *hwmgr = handle;
1171	int ret = 0;
1172
1173	if (!hwmgr || !hwmgr->pm_en || !clocks)
1174		return -EINVAL;
1175
1176	clocks->level = PP_DAL_POWERLEVEL_7;
1177
1178	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))
1179		ret = phm_get_max_high_clocks(hwmgr, clocks);
1180
1181	return ret;
1182}
1183
1184static int pp_dpm_powergate_mmhub(void *handle)
1185{
1186	struct pp_hwmgr *hwmgr = handle;
1187
1188	if (!hwmgr || !hwmgr->pm_en)
1189		return -EINVAL;
1190
1191	if (hwmgr->hwmgr_func->powergate_mmhub == NULL) {
1192		pr_info_ratelimited("%s was not implemented.\n", __func__);
1193		return 0;
1194	}
1195
1196	return hwmgr->hwmgr_func->powergate_mmhub(hwmgr);
1197}
1198
1199static int pp_dpm_powergate_gfx(void *handle, bool gate)
1200{
1201	struct pp_hwmgr *hwmgr = handle;
1202
1203	if (!hwmgr || !hwmgr->pm_en)
1204		return 0;
1205
1206	if (hwmgr->hwmgr_func->powergate_gfx == NULL) {
1207		pr_info_ratelimited("%s was not implemented.\n", __func__);
1208		return 0;
1209	}
1210
1211	return hwmgr->hwmgr_func->powergate_gfx(hwmgr, gate);
1212}
1213
1214static void pp_dpm_powergate_acp(void *handle, bool gate)
1215{
1216	struct pp_hwmgr *hwmgr = handle;
1217
1218	if (!hwmgr || !hwmgr->pm_en)
1219		return;
1220
1221	if (hwmgr->hwmgr_func->powergate_acp == NULL) {
1222		pr_info_ratelimited("%s was not implemented.\n", __func__);
1223		return;
1224	}
1225
1226	hwmgr->hwmgr_func->powergate_acp(hwmgr, gate);
1227}
1228
1229static void pp_dpm_powergate_sdma(void *handle, bool gate)
1230{
1231	struct pp_hwmgr *hwmgr = handle;
1232
1233	if (!hwmgr)
1234		return;
1235
1236	if (hwmgr->hwmgr_func->powergate_sdma == NULL) {
1237		pr_info_ratelimited("%s was not implemented.\n", __func__);
1238		return;
1239	}
1240
1241	hwmgr->hwmgr_func->powergate_sdma(hwmgr, gate);
1242}
1243
1244static int pp_set_powergating_by_smu(void *handle,
1245				uint32_t block_type, bool gate)
1246{
1247	int ret = 0;
1248
1249	switch (block_type) {
1250	case AMD_IP_BLOCK_TYPE_UVD:
1251	case AMD_IP_BLOCK_TYPE_VCN:
1252		pp_dpm_powergate_uvd(handle, gate);
1253		break;
1254	case AMD_IP_BLOCK_TYPE_VCE:
1255		pp_dpm_powergate_vce(handle, gate);
1256		break;
1257	case AMD_IP_BLOCK_TYPE_GMC:
1258		/*
1259		 * For now, this is only used on PICASSO.
1260		 * And only "gate" operation is supported.
1261		 */
1262		if (gate)
1263			pp_dpm_powergate_mmhub(handle);
1264		break;
1265	case AMD_IP_BLOCK_TYPE_GFX:
1266		ret = pp_dpm_powergate_gfx(handle, gate);
1267		break;
1268	case AMD_IP_BLOCK_TYPE_ACP:
1269		pp_dpm_powergate_acp(handle, gate);
1270		break;
1271	case AMD_IP_BLOCK_TYPE_SDMA:
1272		pp_dpm_powergate_sdma(handle, gate);
1273		break;
1274	default:
1275		break;
1276	}
1277	return ret;
1278}
1279
1280static int pp_notify_smu_enable_pwe(void *handle)
1281{
1282	struct pp_hwmgr *hwmgr = handle;
1283
1284	if (!hwmgr || !hwmgr->pm_en)
1285		return -EINVAL;
1286
1287	if (hwmgr->hwmgr_func->smus_notify_pwe == NULL) {
1288		pr_info_ratelimited("%s was not implemented.\n", __func__);
1289		return -EINVAL;
1290	}
1291
1292	hwmgr->hwmgr_func->smus_notify_pwe(hwmgr);
1293
1294	return 0;
1295}
1296
1297static int pp_enable_mgpu_fan_boost(void *handle)
1298{
1299	struct pp_hwmgr *hwmgr = handle;
1300
1301	if (!hwmgr)
1302		return -EINVAL;
1303
1304	if (!hwmgr->pm_en ||
1305	     hwmgr->hwmgr_func->enable_mgpu_fan_boost == NULL)
1306		return 0;
1307
1308	hwmgr->hwmgr_func->enable_mgpu_fan_boost(hwmgr);
1309
1310	return 0;
1311}
1312
1313static int pp_set_min_deep_sleep_dcefclk(void *handle, uint32_t clock)
1314{
1315	struct pp_hwmgr *hwmgr = handle;
1316
1317	if (!hwmgr || !hwmgr->pm_en)
1318		return -EINVAL;
1319
1320	if (hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk == NULL) {
1321		pr_debug("%s was not implemented.\n", __func__);
1322		return -EINVAL;
1323	}
1324
1325	hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk(hwmgr, clock);
1326
1327	return 0;
1328}
1329
1330static int pp_set_hard_min_dcefclk_by_freq(void *handle, uint32_t clock)
1331{
1332	struct pp_hwmgr *hwmgr = handle;
1333
1334	if (!hwmgr || !hwmgr->pm_en)
1335		return -EINVAL;
1336
1337	if (hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq == NULL) {
1338		pr_debug("%s was not implemented.\n", __func__);
1339		return -EINVAL;
1340	}
1341
1342	hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq(hwmgr, clock);
1343
1344	return 0;
1345}
1346
1347static int pp_set_hard_min_fclk_by_freq(void *handle, uint32_t clock)
1348{
1349	struct pp_hwmgr *hwmgr = handle;
1350
1351	if (!hwmgr || !hwmgr->pm_en)
1352		return -EINVAL;
1353
1354	if (hwmgr->hwmgr_func->set_hard_min_fclk_by_freq == NULL) {
1355		pr_debug("%s was not implemented.\n", __func__);
1356		return -EINVAL;
1357	}
1358
1359	hwmgr->hwmgr_func->set_hard_min_fclk_by_freq(hwmgr, clock);
1360
1361	return 0;
1362}
1363
1364static int pp_set_active_display_count(void *handle, uint32_t count)
1365{
1366	struct pp_hwmgr *hwmgr = handle;
1367
1368	if (!hwmgr || !hwmgr->pm_en)
1369		return -EINVAL;
1370
1371	return phm_set_active_display_count(hwmgr, count);
1372}
1373
1374static bool pp_get_asic_baco_capability(void *handle)
1375{
1376	struct pp_hwmgr *hwmgr = handle;
1377
 
1378	if (!hwmgr)
1379		return false;
1380
1381	if (!(hwmgr->not_vf && amdgpu_dpm) ||
1382		!hwmgr->hwmgr_func->get_asic_baco_capability)
1383		return false;
1384
1385	return hwmgr->hwmgr_func->get_asic_baco_capability(hwmgr);
 
 
1386}
1387
1388static int pp_get_asic_baco_state(void *handle, int *state)
1389{
1390	struct pp_hwmgr *hwmgr = handle;
1391
1392	if (!hwmgr)
1393		return -EINVAL;
1394
1395	if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_state)
1396		return 0;
1397
1398	hwmgr->hwmgr_func->get_asic_baco_state(hwmgr, (enum BACO_STATE *)state);
1399
1400	return 0;
1401}
1402
1403static int pp_set_asic_baco_state(void *handle, int state)
1404{
1405	struct pp_hwmgr *hwmgr = handle;
1406
1407	if (!hwmgr)
1408		return -EINVAL;
1409
1410	if (!(hwmgr->not_vf && amdgpu_dpm) ||
1411		!hwmgr->hwmgr_func->set_asic_baco_state)
1412		return 0;
1413
1414	hwmgr->hwmgr_func->set_asic_baco_state(hwmgr, (enum BACO_STATE)state);
1415
1416	return 0;
1417}
1418
1419static int pp_get_ppfeature_status(void *handle, char *buf)
1420{
1421	struct pp_hwmgr *hwmgr = handle;
1422
1423	if (!hwmgr || !hwmgr->pm_en || !buf)
1424		return -EINVAL;
1425
1426	if (hwmgr->hwmgr_func->get_ppfeature_status == NULL) {
1427		pr_info_ratelimited("%s was not implemented.\n", __func__);
1428		return -EINVAL;
1429	}
1430
1431	return hwmgr->hwmgr_func->get_ppfeature_status(hwmgr, buf);
1432}
1433
1434static int pp_set_ppfeature_status(void *handle, uint64_t ppfeature_masks)
1435{
1436	struct pp_hwmgr *hwmgr = handle;
1437
1438	if (!hwmgr || !hwmgr->pm_en)
1439		return -EINVAL;
1440
1441	if (hwmgr->hwmgr_func->set_ppfeature_status == NULL) {
1442		pr_info_ratelimited("%s was not implemented.\n", __func__);
1443		return -EINVAL;
1444	}
1445
1446	return hwmgr->hwmgr_func->set_ppfeature_status(hwmgr, ppfeature_masks);
1447}
1448
1449static int pp_asic_reset_mode_2(void *handle)
1450{
1451	struct pp_hwmgr *hwmgr = handle;
1452
1453	if (!hwmgr || !hwmgr->pm_en)
1454		return -EINVAL;
1455
1456	if (hwmgr->hwmgr_func->asic_reset == NULL) {
1457		pr_info_ratelimited("%s was not implemented.\n", __func__);
1458		return -EINVAL;
1459	}
1460
1461	return hwmgr->hwmgr_func->asic_reset(hwmgr, SMU_ASIC_RESET_MODE_2);
1462}
1463
1464static int pp_smu_i2c_bus_access(void *handle, bool acquire)
1465{
1466	struct pp_hwmgr *hwmgr = handle;
1467
1468	if (!hwmgr || !hwmgr->pm_en)
1469		return -EINVAL;
1470
1471	if (hwmgr->hwmgr_func->smu_i2c_bus_access == NULL) {
1472		pr_info_ratelimited("%s was not implemented.\n", __func__);
1473		return -EINVAL;
1474	}
1475
1476	return hwmgr->hwmgr_func->smu_i2c_bus_access(hwmgr, acquire);
1477}
1478
1479static int pp_set_df_cstate(void *handle, enum pp_df_cstate state)
1480{
1481	struct pp_hwmgr *hwmgr = handle;
1482
1483	if (!hwmgr)
1484		return -EINVAL;
1485
1486	if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_df_cstate)
1487		return 0;
1488
1489	hwmgr->hwmgr_func->set_df_cstate(hwmgr, state);
1490
1491	return 0;
1492}
1493
1494static int pp_set_xgmi_pstate(void *handle, uint32_t pstate)
1495{
1496	struct pp_hwmgr *hwmgr = handle;
1497
1498	if (!hwmgr)
1499		return -EINVAL;
1500
1501	if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_xgmi_pstate)
1502		return 0;
1503
1504	hwmgr->hwmgr_func->set_xgmi_pstate(hwmgr, pstate);
1505
1506	return 0;
1507}
1508
1509static ssize_t pp_get_gpu_metrics(void *handle, void **table)
1510{
1511	struct pp_hwmgr *hwmgr = handle;
1512
1513	if (!hwmgr)
1514		return -EINVAL;
1515
1516	if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_gpu_metrics)
1517		return -EOPNOTSUPP;
1518
1519	return hwmgr->hwmgr_func->get_gpu_metrics(hwmgr, table);
1520}
1521
1522static int pp_gfx_state_change_set(void *handle, uint32_t state)
1523{
1524	struct pp_hwmgr *hwmgr = handle;
1525
1526	if (!hwmgr || !hwmgr->pm_en)
1527		return -EINVAL;
1528
1529	if (hwmgr->hwmgr_func->gfx_state_change == NULL) {
1530		pr_info_ratelimited("%s was not implemented.\n", __func__);
1531		return -EINVAL;
1532	}
1533
1534	hwmgr->hwmgr_func->gfx_state_change(hwmgr, state);
1535	return 0;
1536}
1537
1538static int pp_get_prv_buffer_details(void *handle, void **addr, size_t *size)
1539{
1540	struct pp_hwmgr *hwmgr = handle;
1541	struct amdgpu_device *adev = hwmgr->adev;
1542	int err;
1543
1544	if (!addr || !size)
1545		return -EINVAL;
1546
1547	*addr = NULL;
1548	*size = 0;
1549	if (adev->pm.smu_prv_buffer) {
1550		err = amdgpu_bo_kmap(adev->pm.smu_prv_buffer, addr);
1551		if (err)
1552			return err;
1553		*size = adev->pm.smu_prv_buffer_size;
1554	}
1555
1556	return 0;
1557}
1558
1559static void pp_pm_compute_clocks(void *handle)
1560{
1561	struct pp_hwmgr *hwmgr = handle;
1562	struct amdgpu_device *adev = hwmgr->adev;
1563
1564	if (!adev->dc_enabled) {
1565		amdgpu_dpm_get_active_displays(adev);
1566		adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
1567		adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
1568		adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
1569		/* we have issues with mclk switching with
1570		 * refresh rates over 120 hz on the non-DC code.
1571		 */
1572		if (adev->pm.pm_display_cfg.vrefresh > 120)
1573			adev->pm.pm_display_cfg.min_vblank_time = 0;
1574
1575		pp_display_configuration_change(handle,
1576						&adev->pm.pm_display_cfg);
1577	}
1578
1579	pp_dpm_dispatch_tasks(handle,
1580			      AMD_PP_TASK_DISPLAY_CONFIG_CHANGE,
1581			      NULL);
1582}
1583
1584static const struct amd_pm_funcs pp_dpm_funcs = {
1585	.load_firmware = pp_dpm_load_fw,
1586	.wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
1587	.force_performance_level = pp_dpm_force_performance_level,
1588	.get_performance_level = pp_dpm_get_performance_level,
1589	.get_current_power_state = pp_dpm_get_current_power_state,
1590	.dispatch_tasks = pp_dpm_dispatch_tasks,
1591	.set_fan_control_mode = pp_dpm_set_fan_control_mode,
1592	.get_fan_control_mode = pp_dpm_get_fan_control_mode,
1593	.set_fan_speed_pwm = pp_dpm_set_fan_speed_pwm,
1594	.get_fan_speed_pwm = pp_dpm_get_fan_speed_pwm,
1595	.get_fan_speed_rpm = pp_dpm_get_fan_speed_rpm,
1596	.set_fan_speed_rpm = pp_dpm_set_fan_speed_rpm,
1597	.get_pp_num_states = pp_dpm_get_pp_num_states,
1598	.get_pp_table = pp_dpm_get_pp_table,
1599	.set_pp_table = pp_dpm_set_pp_table,
1600	.force_clock_level = pp_dpm_force_clock_level,
1601	.emit_clock_levels = pp_dpm_emit_clock_levels,
1602	.print_clock_levels = pp_dpm_print_clock_levels,
1603	.get_sclk_od = pp_dpm_get_sclk_od,
1604	.set_sclk_od = pp_dpm_set_sclk_od,
1605	.get_mclk_od = pp_dpm_get_mclk_od,
1606	.set_mclk_od = pp_dpm_set_mclk_od,
1607	.read_sensor = pp_dpm_read_sensor,
1608	.get_vce_clock_state = pp_dpm_get_vce_clock_state,
1609	.switch_power_profile = pp_dpm_switch_power_profile,
1610	.set_clockgating_by_smu = pp_set_clockgating_by_smu,
1611	.set_powergating_by_smu = pp_set_powergating_by_smu,
1612	.get_power_profile_mode = pp_get_power_profile_mode,
1613	.set_power_profile_mode = pp_set_power_profile_mode,
1614	.set_fine_grain_clk_vol = pp_set_fine_grain_clk_vol,
1615	.odn_edit_dpm_table = pp_odn_edit_dpm_table,
1616	.set_mp1_state = pp_dpm_set_mp1_state,
1617	.set_power_limit = pp_set_power_limit,
1618	.get_power_limit = pp_get_power_limit,
1619/* export to DC */
1620	.get_sclk = pp_dpm_get_sclk,
1621	.get_mclk = pp_dpm_get_mclk,
1622	.display_configuration_change = pp_display_configuration_change,
1623	.get_display_power_level = pp_get_display_power_level,
1624	.get_current_clocks = pp_get_current_clocks,
1625	.get_clock_by_type = pp_get_clock_by_type,
1626	.get_clock_by_type_with_latency = pp_get_clock_by_type_with_latency,
1627	.get_clock_by_type_with_voltage = pp_get_clock_by_type_with_voltage,
1628	.set_watermarks_for_clocks_ranges = pp_set_watermarks_for_clocks_ranges,
1629	.display_clock_voltage_request = pp_display_clock_voltage_request,
1630	.get_display_mode_validation_clocks = pp_get_display_mode_validation_clocks,
1631	.notify_smu_enable_pwe = pp_notify_smu_enable_pwe,
1632	.enable_mgpu_fan_boost = pp_enable_mgpu_fan_boost,
1633	.set_active_display_count = pp_set_active_display_count,
1634	.set_min_deep_sleep_dcefclk = pp_set_min_deep_sleep_dcefclk,
1635	.set_hard_min_dcefclk_by_freq = pp_set_hard_min_dcefclk_by_freq,
1636	.set_hard_min_fclk_by_freq = pp_set_hard_min_fclk_by_freq,
1637	.get_asic_baco_capability = pp_get_asic_baco_capability,
1638	.get_asic_baco_state = pp_get_asic_baco_state,
1639	.set_asic_baco_state = pp_set_asic_baco_state,
1640	.get_ppfeature_status = pp_get_ppfeature_status,
1641	.set_ppfeature_status = pp_set_ppfeature_status,
1642	.asic_reset_mode_2 = pp_asic_reset_mode_2,
1643	.smu_i2c_bus_access = pp_smu_i2c_bus_access,
1644	.set_df_cstate = pp_set_df_cstate,
1645	.set_xgmi_pstate = pp_set_xgmi_pstate,
1646	.get_gpu_metrics = pp_get_gpu_metrics,
1647	.gfx_state_change_set = pp_gfx_state_change_set,
1648	.get_smu_prv_buf_details = pp_get_prv_buffer_details,
1649	.pm_compute_clocks = pp_pm_compute_clocks,
1650};
v6.2
   1/*
   2 * Copyright 2015 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23#include "pp_debug.h"
  24#include <linux/types.h>
  25#include <linux/kernel.h>
  26#include <linux/gfp.h>
  27#include <linux/slab.h>
  28#include <linux/firmware.h>
 
  29#include "amd_shared.h"
  30#include "amd_powerplay.h"
  31#include "power_state.h"
  32#include "amdgpu.h"
  33#include "hwmgr.h"
  34#include "amdgpu_dpm_internal.h"
  35#include "amdgpu_display.h"
  36
  37static const struct amd_pm_funcs pp_dpm_funcs;
  38
  39static int amd_powerplay_create(struct amdgpu_device *adev)
  40{
  41	struct pp_hwmgr *hwmgr;
  42
  43	if (adev == NULL)
  44		return -EINVAL;
  45
  46	hwmgr = kzalloc(sizeof(struct pp_hwmgr), GFP_KERNEL);
  47	if (hwmgr == NULL)
  48		return -ENOMEM;
  49
  50	hwmgr->adev = adev;
  51	hwmgr->not_vf = !amdgpu_sriov_vf(adev);
  52	hwmgr->device = amdgpu_cgs_create_device(adev);
  53	mutex_init(&hwmgr->msg_lock);
  54	hwmgr->chip_family = adev->family;
  55	hwmgr->chip_id = adev->asic_type;
  56	hwmgr->feature_mask = adev->pm.pp_feature;
  57	hwmgr->display_config = &adev->pm.pm_display_cfg;
  58	adev->powerplay.pp_handle = hwmgr;
  59	adev->powerplay.pp_funcs = &pp_dpm_funcs;
  60	return 0;
  61}
  62
  63
  64static void amd_powerplay_destroy(struct amdgpu_device *adev)
  65{
  66	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
  67
  68	mutex_destroy(&hwmgr->msg_lock);
  69
  70	kfree(hwmgr->hardcode_pp_table);
  71	hwmgr->hardcode_pp_table = NULL;
  72
  73	kfree(hwmgr);
  74	hwmgr = NULL;
  75}
  76
  77static int pp_early_init(void *handle)
  78{
  79	int ret;
  80	struct amdgpu_device *adev = handle;
  81
  82	ret = amd_powerplay_create(adev);
  83
  84	if (ret != 0)
  85		return ret;
  86
  87	ret = hwmgr_early_init(adev->powerplay.pp_handle);
  88	if (ret)
  89		return -EINVAL;
  90
  91	return 0;
  92}
  93
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  94static int pp_sw_init(void *handle)
  95{
  96	struct amdgpu_device *adev = handle;
  97	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
  98	int ret = 0;
  99
 100	ret = hwmgr_sw_init(hwmgr);
 101
 102	pr_debug("powerplay sw init %s\n", ret ? "failed" : "successfully");
 103
 
 
 
 
 104	return ret;
 105}
 106
 107static int pp_sw_fini(void *handle)
 108{
 109	struct amdgpu_device *adev = handle;
 110	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
 111
 112	hwmgr_sw_fini(hwmgr);
 113
 114	release_firmware(adev->pm.fw);
 115	adev->pm.fw = NULL;
 116
 117	return 0;
 118}
 119
 120static int pp_hw_init(void *handle)
 121{
 122	int ret = 0;
 123	struct amdgpu_device *adev = handle;
 124	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
 125
 126	ret = hwmgr_hw_init(hwmgr);
 127
 128	if (ret)
 129		pr_err("powerplay hw init failed\n");
 130
 131	return ret;
 132}
 133
 134static int pp_hw_fini(void *handle)
 135{
 136	struct amdgpu_device *adev = handle;
 137	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
 138
 
 
 139	hwmgr_hw_fini(hwmgr);
 140
 141	return 0;
 142}
 143
 144static void pp_reserve_vram_for_smu(struct amdgpu_device *adev)
 145{
 146	int r = -EINVAL;
 147	void *cpu_ptr = NULL;
 148	uint64_t gpu_addr;
 149	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
 150
 151	if (amdgpu_bo_create_kernel(adev, adev->pm.smu_prv_buffer_size,
 152						PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
 153						&adev->pm.smu_prv_buffer,
 154						&gpu_addr,
 155						&cpu_ptr)) {
 156		DRM_ERROR("amdgpu: failed to create smu prv buffer\n");
 157		return;
 158	}
 159
 160	if (hwmgr->hwmgr_func->notify_cac_buffer_info)
 161		r = hwmgr->hwmgr_func->notify_cac_buffer_info(hwmgr,
 162					lower_32_bits((unsigned long)cpu_ptr),
 163					upper_32_bits((unsigned long)cpu_ptr),
 164					lower_32_bits(gpu_addr),
 165					upper_32_bits(gpu_addr),
 166					adev->pm.smu_prv_buffer_size);
 167
 168	if (r) {
 169		amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL);
 170		adev->pm.smu_prv_buffer = NULL;
 171		DRM_ERROR("amdgpu: failed to notify SMU buffer address\n");
 172	}
 173}
 174
 175static int pp_late_init(void *handle)
 176{
 177	struct amdgpu_device *adev = handle;
 178	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
 179
 180	if (hwmgr && hwmgr->pm_en)
 181		hwmgr_handle_task(hwmgr,
 182					AMD_PP_TASK_COMPLETE_INIT, NULL);
 183	if (adev->pm.smu_prv_buffer_size != 0)
 184		pp_reserve_vram_for_smu(adev);
 185
 186	return 0;
 187}
 188
 189static void pp_late_fini(void *handle)
 190{
 191	struct amdgpu_device *adev = handle;
 192
 193	if (adev->pm.smu_prv_buffer)
 194		amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL);
 195	amd_powerplay_destroy(adev);
 196}
 197
 198
 199static bool pp_is_idle(void *handle)
 200{
 201	return false;
 202}
 203
 204static int pp_wait_for_idle(void *handle)
 205{
 206	return 0;
 207}
 208
 209static int pp_sw_reset(void *handle)
 210{
 211	return 0;
 212}
 213
 214static int pp_set_powergating_state(void *handle,
 215				    enum amd_powergating_state state)
 216{
 217	return 0;
 218}
 219
 220static int pp_suspend(void *handle)
 221{
 222	struct amdgpu_device *adev = handle;
 223	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
 224
 
 
 225	return hwmgr_suspend(hwmgr);
 226}
 227
 228static int pp_resume(void *handle)
 229{
 230	struct amdgpu_device *adev = handle;
 231	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
 232
 233	return hwmgr_resume(hwmgr);
 234}
 235
 236static int pp_set_clockgating_state(void *handle,
 237					  enum amd_clockgating_state state)
 238{
 239	return 0;
 240}
 241
 242static const struct amd_ip_funcs pp_ip_funcs = {
 243	.name = "powerplay",
 244	.early_init = pp_early_init,
 245	.late_init = pp_late_init,
 246	.sw_init = pp_sw_init,
 247	.sw_fini = pp_sw_fini,
 248	.hw_init = pp_hw_init,
 249	.hw_fini = pp_hw_fini,
 250	.late_fini = pp_late_fini,
 251	.suspend = pp_suspend,
 252	.resume = pp_resume,
 253	.is_idle = pp_is_idle,
 254	.wait_for_idle = pp_wait_for_idle,
 255	.soft_reset = pp_sw_reset,
 256	.set_clockgating_state = pp_set_clockgating_state,
 257	.set_powergating_state = pp_set_powergating_state,
 258};
 259
 260const struct amdgpu_ip_block_version pp_smu_ip_block =
 261{
 262	.type = AMD_IP_BLOCK_TYPE_SMC,
 263	.major = 1,
 264	.minor = 0,
 265	.rev = 0,
 266	.funcs = &pp_ip_funcs,
 267};
 268
 269/* This interface only be supported On Vi,
 270 * because only smu7/8 can help to load gfx/sdma fw,
 271 * smu need to be enabled before load other ip's fw.
 272 * so call start smu to load smu7 fw and other ip's fw
 273 */
 274static int pp_dpm_load_fw(void *handle)
 275{
 276	struct pp_hwmgr *hwmgr = handle;
 277
 278	if (!hwmgr || !hwmgr->smumgr_funcs || !hwmgr->smumgr_funcs->start_smu)
 279		return -EINVAL;
 280
 281	if (hwmgr->smumgr_funcs->start_smu(hwmgr)) {
 282		pr_err("fw load failed\n");
 283		return -EINVAL;
 284	}
 285
 286	return 0;
 287}
 288
 289static int pp_dpm_fw_loading_complete(void *handle)
 290{
 291	return 0;
 292}
 293
 294static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id)
 295{
 296	struct pp_hwmgr *hwmgr = handle;
 297
 298	if (!hwmgr || !hwmgr->pm_en)
 299		return -EINVAL;
 300
 301	if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
 302		pr_info_ratelimited("%s was not implemented.\n", __func__);
 303		return 0;
 304	}
 305
 306	return hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
 307}
 308
 309static void pp_dpm_en_umd_pstate(struct pp_hwmgr  *hwmgr,
 310						enum amd_dpm_forced_level *level)
 311{
 312	uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
 313					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
 314					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
 315					AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
 316
 317	if (!(hwmgr->dpm_level & profile_mode_mask)) {
 318		/* enter umd pstate, save current level, disable gfx cg*/
 319		if (*level & profile_mode_mask) {
 320			hwmgr->saved_dpm_level = hwmgr->dpm_level;
 321			hwmgr->en_umd_pstate = true;
 322		}
 323	} else {
 324		/* exit umd pstate, restore level, enable gfx cg*/
 325		if (!(*level & profile_mode_mask)) {
 326			if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
 327				*level = hwmgr->saved_dpm_level;
 328			hwmgr->en_umd_pstate = false;
 329		}
 330	}
 331}
 332
 333static int pp_dpm_force_performance_level(void *handle,
 334					enum amd_dpm_forced_level level)
 335{
 336	struct pp_hwmgr *hwmgr = handle;
 337
 338	if (!hwmgr || !hwmgr->pm_en)
 339		return -EINVAL;
 340
 341	if (level == hwmgr->dpm_level)
 342		return 0;
 343
 344	pp_dpm_en_umd_pstate(hwmgr, &level);
 345	hwmgr->request_dpm_level = level;
 346	hwmgr_handle_task(hwmgr, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
 347
 348	return 0;
 349}
 350
 351static enum amd_dpm_forced_level pp_dpm_get_performance_level(
 352								void *handle)
 353{
 354	struct pp_hwmgr *hwmgr = handle;
 355
 356	if (!hwmgr || !hwmgr->pm_en)
 357		return -EINVAL;
 358
 359	return hwmgr->dpm_level;
 360}
 361
 362static uint32_t pp_dpm_get_sclk(void *handle, bool low)
 363{
 364	struct pp_hwmgr *hwmgr = handle;
 365
 366	if (!hwmgr || !hwmgr->pm_en)
 367		return 0;
 368
 369	if (hwmgr->hwmgr_func->get_sclk == NULL) {
 370		pr_info_ratelimited("%s was not implemented.\n", __func__);
 371		return 0;
 372	}
 373	return hwmgr->hwmgr_func->get_sclk(hwmgr, low);
 374}
 375
 376static uint32_t pp_dpm_get_mclk(void *handle, bool low)
 377{
 378	struct pp_hwmgr *hwmgr = handle;
 379
 380	if (!hwmgr || !hwmgr->pm_en)
 381		return 0;
 382
 383	if (hwmgr->hwmgr_func->get_mclk == NULL) {
 384		pr_info_ratelimited("%s was not implemented.\n", __func__);
 385		return 0;
 386	}
 387	return hwmgr->hwmgr_func->get_mclk(hwmgr, low);
 388}
 389
 390static void pp_dpm_powergate_vce(void *handle, bool gate)
 391{
 392	struct pp_hwmgr *hwmgr = handle;
 393
 394	if (!hwmgr || !hwmgr->pm_en)
 395		return;
 396
 397	if (hwmgr->hwmgr_func->powergate_vce == NULL) {
 398		pr_info_ratelimited("%s was not implemented.\n", __func__);
 399		return;
 400	}
 401	hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
 402}
 403
 404static void pp_dpm_powergate_uvd(void *handle, bool gate)
 405{
 406	struct pp_hwmgr *hwmgr = handle;
 407
 408	if (!hwmgr || !hwmgr->pm_en)
 409		return;
 410
 411	if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
 412		pr_info_ratelimited("%s was not implemented.\n", __func__);
 413		return;
 414	}
 415	hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
 416}
 417
 418static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
 419		enum amd_pm_state_type *user_state)
 420{
 421	struct pp_hwmgr *hwmgr = handle;
 422
 423	if (!hwmgr || !hwmgr->pm_en)
 424		return -EINVAL;
 425
 426	return hwmgr_handle_task(hwmgr, task_id, user_state);
 427}
 428
 429static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
 430{
 431	struct pp_hwmgr *hwmgr = handle;
 432	struct pp_power_state *state;
 433	enum amd_pm_state_type pm_type;
 434
 435	if (!hwmgr || !hwmgr->pm_en || !hwmgr->current_ps)
 436		return -EINVAL;
 437
 438	state = hwmgr->current_ps;
 439
 440	switch (state->classification.ui_label) {
 441	case PP_StateUILabel_Battery:
 442		pm_type = POWER_STATE_TYPE_BATTERY;
 443		break;
 444	case PP_StateUILabel_Balanced:
 445		pm_type = POWER_STATE_TYPE_BALANCED;
 446		break;
 447	case PP_StateUILabel_Performance:
 448		pm_type = POWER_STATE_TYPE_PERFORMANCE;
 449		break;
 450	default:
 451		if (state->classification.flags & PP_StateClassificationFlag_Boot)
 452			pm_type = POWER_STATE_TYPE_INTERNAL_BOOT;
 453		else
 454			pm_type = POWER_STATE_TYPE_DEFAULT;
 455		break;
 456	}
 457
 458	return pm_type;
 459}
 460
 461static int pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
 462{
 463	struct pp_hwmgr *hwmgr = handle;
 464
 465	if (!hwmgr || !hwmgr->pm_en)
 466		return -EOPNOTSUPP;
 467
 468	if (hwmgr->hwmgr_func->set_fan_control_mode == NULL)
 469		return -EOPNOTSUPP;
 470
 471	if (mode == U32_MAX)
 472		return -EINVAL;
 473
 474	hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode);
 475
 476	return 0;
 477}
 478
 479static int pp_dpm_get_fan_control_mode(void *handle, uint32_t *fan_mode)
 480{
 481	struct pp_hwmgr *hwmgr = handle;
 482
 483	if (!hwmgr || !hwmgr->pm_en)
 484		return -EOPNOTSUPP;
 485
 486	if (hwmgr->hwmgr_func->get_fan_control_mode == NULL)
 487		return -EOPNOTSUPP;
 488
 489	if (!fan_mode)
 490		return -EINVAL;
 491
 492	*fan_mode = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr);
 493	return 0;
 494}
 495
 496static int pp_dpm_set_fan_speed_pwm(void *handle, uint32_t speed)
 497{
 498	struct pp_hwmgr *hwmgr = handle;
 499
 500	if (!hwmgr || !hwmgr->pm_en)
 501		return -EOPNOTSUPP;
 502
 503	if (hwmgr->hwmgr_func->set_fan_speed_pwm == NULL)
 504		return -EOPNOTSUPP;
 505
 506	if (speed == U32_MAX)
 507		return -EINVAL;
 508
 509	return hwmgr->hwmgr_func->set_fan_speed_pwm(hwmgr, speed);
 510}
 511
 512static int pp_dpm_get_fan_speed_pwm(void *handle, uint32_t *speed)
 513{
 514	struct pp_hwmgr *hwmgr = handle;
 515
 516	if (!hwmgr || !hwmgr->pm_en)
 517		return -EOPNOTSUPP;
 518
 519	if (hwmgr->hwmgr_func->get_fan_speed_pwm == NULL)
 520		return -EOPNOTSUPP;
 521
 522	if (!speed)
 523		return -EINVAL;
 524
 525	return hwmgr->hwmgr_func->get_fan_speed_pwm(hwmgr, speed);
 526}
 527
 528static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
 529{
 530	struct pp_hwmgr *hwmgr = handle;
 531
 532	if (!hwmgr || !hwmgr->pm_en)
 533		return -EOPNOTSUPP;
 534
 535	if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL)
 536		return -EOPNOTSUPP;
 537
 538	if (!rpm)
 539		return -EINVAL;
 540
 541	return hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm);
 542}
 543
 544static int pp_dpm_set_fan_speed_rpm(void *handle, uint32_t rpm)
 545{
 546	struct pp_hwmgr *hwmgr = handle;
 547
 548	if (!hwmgr || !hwmgr->pm_en)
 549		return -EOPNOTSUPP;
 550
 551	if (hwmgr->hwmgr_func->set_fan_speed_rpm == NULL)
 552		return -EOPNOTSUPP;
 553
 554	if (rpm == U32_MAX)
 555		return -EINVAL;
 556
 557	return hwmgr->hwmgr_func->set_fan_speed_rpm(hwmgr, rpm);
 558}
 559
 560static int pp_dpm_get_pp_num_states(void *handle,
 561		struct pp_states_info *data)
 562{
 563	struct pp_hwmgr *hwmgr = handle;
 564	int i;
 565
 566	memset(data, 0, sizeof(*data));
 567
 568	if (!hwmgr || !hwmgr->pm_en ||!hwmgr->ps)
 569		return -EINVAL;
 570
 571	data->nums = hwmgr->num_ps;
 572
 573	for (i = 0; i < hwmgr->num_ps; i++) {
 574		struct pp_power_state *state = (struct pp_power_state *)
 575				((unsigned long)hwmgr->ps + i * hwmgr->ps_size);
 576		switch (state->classification.ui_label) {
 577		case PP_StateUILabel_Battery:
 578			data->states[i] = POWER_STATE_TYPE_BATTERY;
 579			break;
 580		case PP_StateUILabel_Balanced:
 581			data->states[i] = POWER_STATE_TYPE_BALANCED;
 582			break;
 583		case PP_StateUILabel_Performance:
 584			data->states[i] = POWER_STATE_TYPE_PERFORMANCE;
 585			break;
 586		default:
 587			if (state->classification.flags & PP_StateClassificationFlag_Boot)
 588				data->states[i] = POWER_STATE_TYPE_INTERNAL_BOOT;
 589			else
 590				data->states[i] = POWER_STATE_TYPE_DEFAULT;
 591		}
 592	}
 593	return 0;
 594}
 595
 596static int pp_dpm_get_pp_table(void *handle, char **table)
 597{
 598	struct pp_hwmgr *hwmgr = handle;
 599
 600	if (!hwmgr || !hwmgr->pm_en ||!hwmgr->soft_pp_table)
 601		return -EINVAL;
 602
 603	*table = (char *)hwmgr->soft_pp_table;
 604	return hwmgr->soft_pp_table_size;
 605}
 606
 607static int amd_powerplay_reset(void *handle)
 608{
 609	struct pp_hwmgr *hwmgr = handle;
 610	int ret;
 611
 612	ret = hwmgr_hw_fini(hwmgr);
 613	if (ret)
 614		return ret;
 615
 616	ret = hwmgr_hw_init(hwmgr);
 617	if (ret)
 618		return ret;
 619
 620	return hwmgr_handle_task(hwmgr, AMD_PP_TASK_COMPLETE_INIT, NULL);
 621}
 622
 623static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
 624{
 625	struct pp_hwmgr *hwmgr = handle;
 626	int ret = -ENOMEM;
 627
 628	if (!hwmgr || !hwmgr->pm_en)
 629		return -EINVAL;
 630
 631	if (!hwmgr->hardcode_pp_table) {
 632		hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table,
 633						   hwmgr->soft_pp_table_size,
 634						   GFP_KERNEL);
 635		if (!hwmgr->hardcode_pp_table)
 636			return ret;
 637	}
 638
 639	memcpy(hwmgr->hardcode_pp_table, buf, size);
 640
 641	hwmgr->soft_pp_table = hwmgr->hardcode_pp_table;
 642
 643	ret = amd_powerplay_reset(handle);
 644	if (ret)
 645		return ret;
 646
 647	if (hwmgr->hwmgr_func->avfs_control)
 648		ret = hwmgr->hwmgr_func->avfs_control(hwmgr, false);
 649
 650	return ret;
 651}
 652
 653static int pp_dpm_force_clock_level(void *handle,
 654		enum pp_clock_type type, uint32_t mask)
 655{
 656	struct pp_hwmgr *hwmgr = handle;
 657
 658	if (!hwmgr || !hwmgr->pm_en)
 659		return -EINVAL;
 660
 661	if (hwmgr->hwmgr_func->force_clock_level == NULL) {
 662		pr_info_ratelimited("%s was not implemented.\n", __func__);
 663		return 0;
 664	}
 665
 666	if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
 667		pr_debug("force clock level is for dpm manual mode only.\n");
 668		return -EINVAL;
 669	}
 670
 671	return hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask);
 672}
 673
 674static int pp_dpm_emit_clock_levels(void *handle,
 675				    enum pp_clock_type type,
 676				    char *buf,
 677				    int *offset)
 678{
 679	struct pp_hwmgr *hwmgr = handle;
 680
 681	if (!hwmgr || !hwmgr->pm_en)
 682		return -EOPNOTSUPP;
 683
 684	if (!hwmgr->hwmgr_func->emit_clock_levels)
 685		return -ENOENT;
 686
 687	return hwmgr->hwmgr_func->emit_clock_levels(hwmgr, type, buf, offset);
 688}
 689
 690static int pp_dpm_print_clock_levels(void *handle,
 691		enum pp_clock_type type, char *buf)
 692{
 693	struct pp_hwmgr *hwmgr = handle;
 694
 695	if (!hwmgr || !hwmgr->pm_en)
 696		return -EINVAL;
 697
 698	if (hwmgr->hwmgr_func->print_clock_levels == NULL) {
 699		pr_info_ratelimited("%s was not implemented.\n", __func__);
 700		return 0;
 701	}
 702	return hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
 703}
 704
 705static int pp_dpm_get_sclk_od(void *handle)
 706{
 707	struct pp_hwmgr *hwmgr = handle;
 708
 709	if (!hwmgr || !hwmgr->pm_en)
 710		return -EINVAL;
 711
 712	if (hwmgr->hwmgr_func->get_sclk_od == NULL) {
 713		pr_info_ratelimited("%s was not implemented.\n", __func__);
 714		return 0;
 715	}
 716	return hwmgr->hwmgr_func->get_sclk_od(hwmgr);
 717}
 718
 719static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
 720{
 721	struct pp_hwmgr *hwmgr = handle;
 722
 723	if (!hwmgr || !hwmgr->pm_en)
 724		return -EINVAL;
 725
 726	if (hwmgr->hwmgr_func->set_sclk_od == NULL) {
 727		pr_info_ratelimited("%s was not implemented.\n", __func__);
 728		return 0;
 729	}
 730
 731	return hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
 732}
 733
 734static int pp_dpm_get_mclk_od(void *handle)
 735{
 736	struct pp_hwmgr *hwmgr = handle;
 737
 738	if (!hwmgr || !hwmgr->pm_en)
 739		return -EINVAL;
 740
 741	if (hwmgr->hwmgr_func->get_mclk_od == NULL) {
 742		pr_info_ratelimited("%s was not implemented.\n", __func__);
 743		return 0;
 744	}
 745	return hwmgr->hwmgr_func->get_mclk_od(hwmgr);
 746}
 747
 748static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
 749{
 750	struct pp_hwmgr *hwmgr = handle;
 751
 752	if (!hwmgr || !hwmgr->pm_en)
 753		return -EINVAL;
 754
 755	if (hwmgr->hwmgr_func->set_mclk_od == NULL) {
 756		pr_info_ratelimited("%s was not implemented.\n", __func__);
 757		return 0;
 758	}
 759	return hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
 760}
 761
 762static int pp_dpm_read_sensor(void *handle, int idx,
 763			      void *value, int *size)
 764{
 765	struct pp_hwmgr *hwmgr = handle;
 766
 767	if (!hwmgr || !hwmgr->pm_en || !value)
 768		return -EINVAL;
 769
 770	switch (idx) {
 771	case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
 772		*((uint32_t *)value) = hwmgr->pstate_sclk;
 773		return 0;
 774	case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
 775		*((uint32_t *)value) = hwmgr->pstate_mclk;
 
 
 
 
 
 
 776		return 0;
 777	case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
 778		*((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMinRPM;
 779		return 0;
 780	case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
 781		*((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMaxRPM;
 782		return 0;
 783	default:
 784		return hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value, size);
 785	}
 786}
 787
 788static struct amd_vce_state*
 789pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
 790{
 791	struct pp_hwmgr *hwmgr = handle;
 792
 793	if (!hwmgr || !hwmgr->pm_en)
 794		return NULL;
 795
 796	if (idx < hwmgr->num_vce_state_tables)
 797		return &hwmgr->vce_states[idx];
 798	return NULL;
 799}
 800
 801static int pp_get_power_profile_mode(void *handle, char *buf)
 802{
 803	struct pp_hwmgr *hwmgr = handle;
 804
 805	if (!hwmgr || !hwmgr->pm_en || !hwmgr->hwmgr_func->get_power_profile_mode)
 806		return -EOPNOTSUPP;
 807	if (!buf)
 808		return -EINVAL;
 809
 810	return hwmgr->hwmgr_func->get_power_profile_mode(hwmgr, buf);
 811}
 812
 813static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
 814{
 815	struct pp_hwmgr *hwmgr = handle;
 816
 817	if (!hwmgr || !hwmgr->pm_en || !hwmgr->hwmgr_func->set_power_profile_mode)
 818		return -EOPNOTSUPP;
 819
 820	if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
 821		pr_debug("power profile setting is for manual dpm mode only.\n");
 822		return -EINVAL;
 823	}
 824
 825	return hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, input, size);
 826}
 827
 828static int pp_set_fine_grain_clk_vol(void *handle, uint32_t type, long *input, uint32_t size)
 829{
 830	struct pp_hwmgr *hwmgr = handle;
 831
 832	if (!hwmgr || !hwmgr->pm_en)
 833		return -EINVAL;
 834
 835	if (hwmgr->hwmgr_func->set_fine_grain_clk_vol == NULL)
 836		return 0;
 837
 838	return hwmgr->hwmgr_func->set_fine_grain_clk_vol(hwmgr, type, input, size);
 839}
 840
 841static int pp_odn_edit_dpm_table(void *handle, enum PP_OD_DPM_TABLE_COMMAND type,
 842				 long *input, uint32_t size)
 843{
 844	struct pp_hwmgr *hwmgr = handle;
 845
 846	if (!hwmgr || !hwmgr->pm_en)
 847		return -EINVAL;
 848
 849	if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) {
 850		pr_info_ratelimited("%s was not implemented.\n", __func__);
 851		return 0;
 852	}
 853
 854	return hwmgr->hwmgr_func->odn_edit_dpm_table(hwmgr, type, input, size);
 855}
 856
 857static int pp_dpm_set_mp1_state(void *handle, enum pp_mp1_state mp1_state)
 858{
 859	struct pp_hwmgr *hwmgr = handle;
 860
 861	if (!hwmgr)
 862		return -EINVAL;
 863
 864	if (!hwmgr->pm_en)
 865		return 0;
 866
 867	if (hwmgr->hwmgr_func->set_mp1_state)
 868		return hwmgr->hwmgr_func->set_mp1_state(hwmgr, mp1_state);
 869
 870	return 0;
 871}
 872
 873static int pp_dpm_switch_power_profile(void *handle,
 874		enum PP_SMC_POWER_PROFILE type, bool en)
 875{
 876	struct pp_hwmgr *hwmgr = handle;
 877	long workload;
 878	uint32_t index;
 879
 880	if (!hwmgr || !hwmgr->pm_en)
 881		return -EINVAL;
 882
 883	if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
 884		pr_info_ratelimited("%s was not implemented.\n", __func__);
 885		return -EINVAL;
 886	}
 887
 888	if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
 889		return -EINVAL;
 890
 891	if (!en) {
 892		hwmgr->workload_mask &= ~(1 << hwmgr->workload_prority[type]);
 893		index = fls(hwmgr->workload_mask);
 894		index = index > 0 && index <= Workload_Policy_Max ? index - 1 : 0;
 895		workload = hwmgr->workload_setting[index];
 896	} else {
 897		hwmgr->workload_mask |= (1 << hwmgr->workload_prority[type]);
 898		index = fls(hwmgr->workload_mask);
 899		index = index <= Workload_Policy_Max ? index - 1 : 0;
 900		workload = hwmgr->workload_setting[index];
 901	}
 902
 903	if (type == PP_SMC_POWER_PROFILE_COMPUTE &&
 904		hwmgr->hwmgr_func->disable_power_features_for_compute_performance) {
 905			if (hwmgr->hwmgr_func->disable_power_features_for_compute_performance(hwmgr, en))
 906				return -EINVAL;
 907	}
 908
 909	if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
 910		hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0);
 911
 912	return 0;
 913}
 914
 915static int pp_set_power_limit(void *handle, uint32_t limit)
 916{
 917	struct pp_hwmgr *hwmgr = handle;
 918	uint32_t max_power_limit;
 919
 920	if (!hwmgr || !hwmgr->pm_en)
 921		return -EINVAL;
 922
 923	if (hwmgr->hwmgr_func->set_power_limit == NULL) {
 924		pr_info_ratelimited("%s was not implemented.\n", __func__);
 925		return -EINVAL;
 926	}
 927
 928	if (limit == 0)
 929		limit = hwmgr->default_power_limit;
 930
 931	max_power_limit = hwmgr->default_power_limit;
 932	if (hwmgr->od_enabled) {
 933		max_power_limit *= (100 + hwmgr->platform_descriptor.TDPODLimit);
 934		max_power_limit /= 100;
 935	}
 936
 937	if (limit > max_power_limit)
 938		return -EINVAL;
 939
 940	hwmgr->hwmgr_func->set_power_limit(hwmgr, limit);
 941	hwmgr->power_limit = limit;
 942	return 0;
 943}
 944
 945static int pp_get_power_limit(void *handle, uint32_t *limit,
 946			      enum pp_power_limit_level pp_limit_level,
 947			      enum pp_power_type power_type)
 948{
 949	struct pp_hwmgr *hwmgr = handle;
 950	int ret = 0;
 951
 952	if (!hwmgr || !hwmgr->pm_en ||!limit)
 953		return -EINVAL;
 954
 955	if (power_type != PP_PWR_TYPE_SUSTAINED)
 956		return -EOPNOTSUPP;
 957
 958	switch (pp_limit_level) {
 959		case PP_PWR_LIMIT_CURRENT:
 960			*limit = hwmgr->power_limit;
 961			break;
 962		case PP_PWR_LIMIT_DEFAULT:
 963			*limit = hwmgr->default_power_limit;
 964			break;
 965		case PP_PWR_LIMIT_MAX:
 966			*limit = hwmgr->default_power_limit;
 967			if (hwmgr->od_enabled) {
 968				*limit *= (100 + hwmgr->platform_descriptor.TDPODLimit);
 969				*limit /= 100;
 970			}
 971			break;
 
 
 
 972		default:
 973			ret = -EOPNOTSUPP;
 974			break;
 975	}
 976
 977	return ret;
 978}
 979
 980static int pp_display_configuration_change(void *handle,
 981	const struct amd_pp_display_configuration *display_config)
 982{
 983	struct pp_hwmgr *hwmgr = handle;
 984
 985	if (!hwmgr || !hwmgr->pm_en)
 986		return -EINVAL;
 987
 988	phm_store_dal_configuration_data(hwmgr, display_config);
 989	return 0;
 990}
 991
 992static int pp_get_display_power_level(void *handle,
 993		struct amd_pp_simple_clock_info *output)
 994{
 995	struct pp_hwmgr *hwmgr = handle;
 996
 997	if (!hwmgr || !hwmgr->pm_en ||!output)
 998		return -EINVAL;
 999
1000	return phm_get_dal_power_level(hwmgr, output);
1001}
1002
1003static int pp_get_current_clocks(void *handle,
1004		struct amd_pp_clock_info *clocks)
1005{
1006	struct amd_pp_simple_clock_info simple_clocks = { 0 };
1007	struct pp_clock_info hw_clocks;
1008	struct pp_hwmgr *hwmgr = handle;
1009	int ret = 0;
1010
1011	if (!hwmgr || !hwmgr->pm_en)
1012		return -EINVAL;
1013
1014	phm_get_dal_power_level(hwmgr, &simple_clocks);
1015
1016	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1017					PHM_PlatformCaps_PowerContainment))
1018		ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
1019					&hw_clocks, PHM_PerformanceLevelDesignation_PowerContainment);
1020	else
1021		ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
1022					&hw_clocks, PHM_PerformanceLevelDesignation_Activity);
1023
1024	if (ret) {
1025		pr_debug("Error in phm_get_clock_info \n");
1026		return -EINVAL;
1027	}
1028
1029	clocks->min_engine_clock = hw_clocks.min_eng_clk;
1030	clocks->max_engine_clock = hw_clocks.max_eng_clk;
1031	clocks->min_memory_clock = hw_clocks.min_mem_clk;
1032	clocks->max_memory_clock = hw_clocks.max_mem_clk;
1033	clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
1034	clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
1035
1036	clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1037	clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1038
1039	if (simple_clocks.level == 0)
1040		clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
1041	else
1042		clocks->max_clocks_state = simple_clocks.level;
1043
1044	if (0 == phm_get_current_shallow_sleep_clocks(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks)) {
1045		clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1046		clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1047	}
1048	return 0;
1049}
1050
1051static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks)
1052{
1053	struct pp_hwmgr *hwmgr = handle;
1054
1055	if (!hwmgr || !hwmgr->pm_en)
1056		return -EINVAL;
1057
1058	if (clocks == NULL)
1059		return -EINVAL;
1060
1061	return phm_get_clock_by_type(hwmgr, type, clocks);
1062}
1063
1064static int pp_get_clock_by_type_with_latency(void *handle,
1065		enum amd_pp_clock_type type,
1066		struct pp_clock_levels_with_latency *clocks)
1067{
1068	struct pp_hwmgr *hwmgr = handle;
1069
1070	if (!hwmgr || !hwmgr->pm_en ||!clocks)
1071		return -EINVAL;
1072
1073	return phm_get_clock_by_type_with_latency(hwmgr, type, clocks);
1074}
1075
1076static int pp_get_clock_by_type_with_voltage(void *handle,
1077		enum amd_pp_clock_type type,
1078		struct pp_clock_levels_with_voltage *clocks)
1079{
1080	struct pp_hwmgr *hwmgr = handle;
1081
1082	if (!hwmgr || !hwmgr->pm_en ||!clocks)
1083		return -EINVAL;
1084
1085	return phm_get_clock_by_type_with_voltage(hwmgr, type, clocks);
1086}
1087
1088static int pp_set_watermarks_for_clocks_ranges(void *handle,
1089		void *clock_ranges)
1090{
1091	struct pp_hwmgr *hwmgr = handle;
1092
1093	if (!hwmgr || !hwmgr->pm_en || !clock_ranges)
1094		return -EINVAL;
1095
1096	return phm_set_watermarks_for_clocks_ranges(hwmgr,
1097						    clock_ranges);
1098}
1099
1100static int pp_display_clock_voltage_request(void *handle,
1101		struct pp_display_clock_request *clock)
1102{
1103	struct pp_hwmgr *hwmgr = handle;
1104
1105	if (!hwmgr || !hwmgr->pm_en ||!clock)
1106		return -EINVAL;
1107
1108	return phm_display_clock_voltage_request(hwmgr, clock);
1109}
1110
1111static int pp_get_display_mode_validation_clocks(void *handle,
1112		struct amd_pp_simple_clock_info *clocks)
1113{
1114	struct pp_hwmgr *hwmgr = handle;
1115	int ret = 0;
1116
1117	if (!hwmgr || !hwmgr->pm_en ||!clocks)
1118		return -EINVAL;
1119
1120	clocks->level = PP_DAL_POWERLEVEL_7;
1121
1122	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))
1123		ret = phm_get_max_high_clocks(hwmgr, clocks);
1124
1125	return ret;
1126}
1127
1128static int pp_dpm_powergate_mmhub(void *handle)
1129{
1130	struct pp_hwmgr *hwmgr = handle;
1131
1132	if (!hwmgr || !hwmgr->pm_en)
1133		return -EINVAL;
1134
1135	if (hwmgr->hwmgr_func->powergate_mmhub == NULL) {
1136		pr_info_ratelimited("%s was not implemented.\n", __func__);
1137		return 0;
1138	}
1139
1140	return hwmgr->hwmgr_func->powergate_mmhub(hwmgr);
1141}
1142
1143static int pp_dpm_powergate_gfx(void *handle, bool gate)
1144{
1145	struct pp_hwmgr *hwmgr = handle;
1146
1147	if (!hwmgr || !hwmgr->pm_en)
1148		return 0;
1149
1150	if (hwmgr->hwmgr_func->powergate_gfx == NULL) {
1151		pr_info_ratelimited("%s was not implemented.\n", __func__);
1152		return 0;
1153	}
1154
1155	return hwmgr->hwmgr_func->powergate_gfx(hwmgr, gate);
1156}
1157
1158static void pp_dpm_powergate_acp(void *handle, bool gate)
1159{
1160	struct pp_hwmgr *hwmgr = handle;
1161
1162	if (!hwmgr || !hwmgr->pm_en)
1163		return;
1164
1165	if (hwmgr->hwmgr_func->powergate_acp == NULL) {
1166		pr_info_ratelimited("%s was not implemented.\n", __func__);
1167		return;
1168	}
1169
1170	hwmgr->hwmgr_func->powergate_acp(hwmgr, gate);
1171}
1172
1173static void pp_dpm_powergate_sdma(void *handle, bool gate)
1174{
1175	struct pp_hwmgr *hwmgr = handle;
1176
1177	if (!hwmgr)
1178		return;
1179
1180	if (hwmgr->hwmgr_func->powergate_sdma == NULL) {
1181		pr_info_ratelimited("%s was not implemented.\n", __func__);
1182		return;
1183	}
1184
1185	hwmgr->hwmgr_func->powergate_sdma(hwmgr, gate);
1186}
1187
1188static int pp_set_powergating_by_smu(void *handle,
1189				uint32_t block_type, bool gate)
1190{
1191	int ret = 0;
1192
1193	switch (block_type) {
1194	case AMD_IP_BLOCK_TYPE_UVD:
1195	case AMD_IP_BLOCK_TYPE_VCN:
1196		pp_dpm_powergate_uvd(handle, gate);
1197		break;
1198	case AMD_IP_BLOCK_TYPE_VCE:
1199		pp_dpm_powergate_vce(handle, gate);
1200		break;
1201	case AMD_IP_BLOCK_TYPE_GMC:
1202		/*
1203		 * For now, this is only used on PICASSO.
1204		 * And only "gate" operation is supported.
1205		 */
1206		if (gate)
1207			pp_dpm_powergate_mmhub(handle);
1208		break;
1209	case AMD_IP_BLOCK_TYPE_GFX:
1210		ret = pp_dpm_powergate_gfx(handle, gate);
1211		break;
1212	case AMD_IP_BLOCK_TYPE_ACP:
1213		pp_dpm_powergate_acp(handle, gate);
1214		break;
1215	case AMD_IP_BLOCK_TYPE_SDMA:
1216		pp_dpm_powergate_sdma(handle, gate);
1217		break;
1218	default:
1219		break;
1220	}
1221	return ret;
1222}
1223
1224static int pp_notify_smu_enable_pwe(void *handle)
1225{
1226	struct pp_hwmgr *hwmgr = handle;
1227
1228	if (!hwmgr || !hwmgr->pm_en)
1229		return -EINVAL;
1230
1231	if (hwmgr->hwmgr_func->smus_notify_pwe == NULL) {
1232		pr_info_ratelimited("%s was not implemented.\n", __func__);
1233		return -EINVAL;
1234	}
1235
1236	hwmgr->hwmgr_func->smus_notify_pwe(hwmgr);
1237
1238	return 0;
1239}
1240
1241static int pp_enable_mgpu_fan_boost(void *handle)
1242{
1243	struct pp_hwmgr *hwmgr = handle;
1244
1245	if (!hwmgr)
1246		return -EINVAL;
1247
1248	if (!hwmgr->pm_en ||
1249	     hwmgr->hwmgr_func->enable_mgpu_fan_boost == NULL)
1250		return 0;
1251
1252	hwmgr->hwmgr_func->enable_mgpu_fan_boost(hwmgr);
1253
1254	return 0;
1255}
1256
1257static int pp_set_min_deep_sleep_dcefclk(void *handle, uint32_t clock)
1258{
1259	struct pp_hwmgr *hwmgr = handle;
1260
1261	if (!hwmgr || !hwmgr->pm_en)
1262		return -EINVAL;
1263
1264	if (hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk == NULL) {
1265		pr_debug("%s was not implemented.\n", __func__);
1266		return -EINVAL;
1267	}
1268
1269	hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk(hwmgr, clock);
1270
1271	return 0;
1272}
1273
1274static int pp_set_hard_min_dcefclk_by_freq(void *handle, uint32_t clock)
1275{
1276	struct pp_hwmgr *hwmgr = handle;
1277
1278	if (!hwmgr || !hwmgr->pm_en)
1279		return -EINVAL;
1280
1281	if (hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq == NULL) {
1282		pr_debug("%s was not implemented.\n", __func__);
1283		return -EINVAL;
1284	}
1285
1286	hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq(hwmgr, clock);
1287
1288	return 0;
1289}
1290
1291static int pp_set_hard_min_fclk_by_freq(void *handle, uint32_t clock)
1292{
1293	struct pp_hwmgr *hwmgr = handle;
1294
1295	if (!hwmgr || !hwmgr->pm_en)
1296		return -EINVAL;
1297
1298	if (hwmgr->hwmgr_func->set_hard_min_fclk_by_freq == NULL) {
1299		pr_debug("%s was not implemented.\n", __func__);
1300		return -EINVAL;
1301	}
1302
1303	hwmgr->hwmgr_func->set_hard_min_fclk_by_freq(hwmgr, clock);
1304
1305	return 0;
1306}
1307
1308static int pp_set_active_display_count(void *handle, uint32_t count)
1309{
1310	struct pp_hwmgr *hwmgr = handle;
1311
1312	if (!hwmgr || !hwmgr->pm_en)
1313		return -EINVAL;
1314
1315	return phm_set_active_display_count(hwmgr, count);
1316}
1317
1318static int pp_get_asic_baco_capability(void *handle, bool *cap)
1319{
1320	struct pp_hwmgr *hwmgr = handle;
1321
1322	*cap = false;
1323	if (!hwmgr)
1324		return -EINVAL;
1325
1326	if (!(hwmgr->not_vf && amdgpu_dpm) ||
1327		!hwmgr->hwmgr_func->get_asic_baco_capability)
1328		return 0;
1329
1330	hwmgr->hwmgr_func->get_asic_baco_capability(hwmgr, cap);
1331
1332	return 0;
1333}
1334
1335static int pp_get_asic_baco_state(void *handle, int *state)
1336{
1337	struct pp_hwmgr *hwmgr = handle;
1338
1339	if (!hwmgr)
1340		return -EINVAL;
1341
1342	if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_state)
1343		return 0;
1344
1345	hwmgr->hwmgr_func->get_asic_baco_state(hwmgr, (enum BACO_STATE *)state);
1346
1347	return 0;
1348}
1349
1350static int pp_set_asic_baco_state(void *handle, int state)
1351{
1352	struct pp_hwmgr *hwmgr = handle;
1353
1354	if (!hwmgr)
1355		return -EINVAL;
1356
1357	if (!(hwmgr->not_vf && amdgpu_dpm) ||
1358		!hwmgr->hwmgr_func->set_asic_baco_state)
1359		return 0;
1360
1361	hwmgr->hwmgr_func->set_asic_baco_state(hwmgr, (enum BACO_STATE)state);
1362
1363	return 0;
1364}
1365
1366static int pp_get_ppfeature_status(void *handle, char *buf)
1367{
1368	struct pp_hwmgr *hwmgr = handle;
1369
1370	if (!hwmgr || !hwmgr->pm_en || !buf)
1371		return -EINVAL;
1372
1373	if (hwmgr->hwmgr_func->get_ppfeature_status == NULL) {
1374		pr_info_ratelimited("%s was not implemented.\n", __func__);
1375		return -EINVAL;
1376	}
1377
1378	return hwmgr->hwmgr_func->get_ppfeature_status(hwmgr, buf);
1379}
1380
1381static int pp_set_ppfeature_status(void *handle, uint64_t ppfeature_masks)
1382{
1383	struct pp_hwmgr *hwmgr = handle;
1384
1385	if (!hwmgr || !hwmgr->pm_en)
1386		return -EINVAL;
1387
1388	if (hwmgr->hwmgr_func->set_ppfeature_status == NULL) {
1389		pr_info_ratelimited("%s was not implemented.\n", __func__);
1390		return -EINVAL;
1391	}
1392
1393	return hwmgr->hwmgr_func->set_ppfeature_status(hwmgr, ppfeature_masks);
1394}
1395
1396static int pp_asic_reset_mode_2(void *handle)
1397{
1398	struct pp_hwmgr *hwmgr = handle;
1399
1400	if (!hwmgr || !hwmgr->pm_en)
1401		return -EINVAL;
1402
1403	if (hwmgr->hwmgr_func->asic_reset == NULL) {
1404		pr_info_ratelimited("%s was not implemented.\n", __func__);
1405		return -EINVAL;
1406	}
1407
1408	return hwmgr->hwmgr_func->asic_reset(hwmgr, SMU_ASIC_RESET_MODE_2);
1409}
1410
1411static int pp_smu_i2c_bus_access(void *handle, bool acquire)
1412{
1413	struct pp_hwmgr *hwmgr = handle;
1414
1415	if (!hwmgr || !hwmgr->pm_en)
1416		return -EINVAL;
1417
1418	if (hwmgr->hwmgr_func->smu_i2c_bus_access == NULL) {
1419		pr_info_ratelimited("%s was not implemented.\n", __func__);
1420		return -EINVAL;
1421	}
1422
1423	return hwmgr->hwmgr_func->smu_i2c_bus_access(hwmgr, acquire);
1424}
1425
1426static int pp_set_df_cstate(void *handle, enum pp_df_cstate state)
1427{
1428	struct pp_hwmgr *hwmgr = handle;
1429
1430	if (!hwmgr)
1431		return -EINVAL;
1432
1433	if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_df_cstate)
1434		return 0;
1435
1436	hwmgr->hwmgr_func->set_df_cstate(hwmgr, state);
1437
1438	return 0;
1439}
1440
1441static int pp_set_xgmi_pstate(void *handle, uint32_t pstate)
1442{
1443	struct pp_hwmgr *hwmgr = handle;
1444
1445	if (!hwmgr)
1446		return -EINVAL;
1447
1448	if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_xgmi_pstate)
1449		return 0;
1450
1451	hwmgr->hwmgr_func->set_xgmi_pstate(hwmgr, pstate);
1452
1453	return 0;
1454}
1455
1456static ssize_t pp_get_gpu_metrics(void *handle, void **table)
1457{
1458	struct pp_hwmgr *hwmgr = handle;
1459
1460	if (!hwmgr)
1461		return -EINVAL;
1462
1463	if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_gpu_metrics)
1464		return -EOPNOTSUPP;
1465
1466	return hwmgr->hwmgr_func->get_gpu_metrics(hwmgr, table);
1467}
1468
1469static int pp_gfx_state_change_set(void *handle, uint32_t state)
1470{
1471	struct pp_hwmgr *hwmgr = handle;
1472
1473	if (!hwmgr || !hwmgr->pm_en)
1474		return -EINVAL;
1475
1476	if (hwmgr->hwmgr_func->gfx_state_change == NULL) {
1477		pr_info_ratelimited("%s was not implemented.\n", __func__);
1478		return -EINVAL;
1479	}
1480
1481	hwmgr->hwmgr_func->gfx_state_change(hwmgr, state);
1482	return 0;
1483}
1484
1485static int pp_get_prv_buffer_details(void *handle, void **addr, size_t *size)
1486{
1487	struct pp_hwmgr *hwmgr = handle;
1488	struct amdgpu_device *adev = hwmgr->adev;
1489	int err;
1490
1491	if (!addr || !size)
1492		return -EINVAL;
1493
1494	*addr = NULL;
1495	*size = 0;
1496	if (adev->pm.smu_prv_buffer) {
1497		err = amdgpu_bo_kmap(adev->pm.smu_prv_buffer, addr);
1498		if (err)
1499			return err;
1500		*size = adev->pm.smu_prv_buffer_size;
1501	}
1502
1503	return 0;
1504}
1505
1506static void pp_pm_compute_clocks(void *handle)
1507{
1508	struct pp_hwmgr *hwmgr = handle;
1509	struct amdgpu_device *adev = hwmgr->adev;
1510
1511	if (!adev->dc_enabled) {
1512		amdgpu_dpm_get_active_displays(adev);
1513		adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
1514		adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
1515		adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
1516		/* we have issues with mclk switching with
1517		 * refresh rates over 120 hz on the non-DC code.
1518		 */
1519		if (adev->pm.pm_display_cfg.vrefresh > 120)
1520			adev->pm.pm_display_cfg.min_vblank_time = 0;
1521
1522		pp_display_configuration_change(handle,
1523						&adev->pm.pm_display_cfg);
1524	}
1525
1526	pp_dpm_dispatch_tasks(handle,
1527			      AMD_PP_TASK_DISPLAY_CONFIG_CHANGE,
1528			      NULL);
1529}
1530
1531static const struct amd_pm_funcs pp_dpm_funcs = {
1532	.load_firmware = pp_dpm_load_fw,
1533	.wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
1534	.force_performance_level = pp_dpm_force_performance_level,
1535	.get_performance_level = pp_dpm_get_performance_level,
1536	.get_current_power_state = pp_dpm_get_current_power_state,
1537	.dispatch_tasks = pp_dpm_dispatch_tasks,
1538	.set_fan_control_mode = pp_dpm_set_fan_control_mode,
1539	.get_fan_control_mode = pp_dpm_get_fan_control_mode,
1540	.set_fan_speed_pwm = pp_dpm_set_fan_speed_pwm,
1541	.get_fan_speed_pwm = pp_dpm_get_fan_speed_pwm,
1542	.get_fan_speed_rpm = pp_dpm_get_fan_speed_rpm,
1543	.set_fan_speed_rpm = pp_dpm_set_fan_speed_rpm,
1544	.get_pp_num_states = pp_dpm_get_pp_num_states,
1545	.get_pp_table = pp_dpm_get_pp_table,
1546	.set_pp_table = pp_dpm_set_pp_table,
1547	.force_clock_level = pp_dpm_force_clock_level,
1548	.emit_clock_levels = pp_dpm_emit_clock_levels,
1549	.print_clock_levels = pp_dpm_print_clock_levels,
1550	.get_sclk_od = pp_dpm_get_sclk_od,
1551	.set_sclk_od = pp_dpm_set_sclk_od,
1552	.get_mclk_od = pp_dpm_get_mclk_od,
1553	.set_mclk_od = pp_dpm_set_mclk_od,
1554	.read_sensor = pp_dpm_read_sensor,
1555	.get_vce_clock_state = pp_dpm_get_vce_clock_state,
1556	.switch_power_profile = pp_dpm_switch_power_profile,
1557	.set_clockgating_by_smu = pp_set_clockgating_by_smu,
1558	.set_powergating_by_smu = pp_set_powergating_by_smu,
1559	.get_power_profile_mode = pp_get_power_profile_mode,
1560	.set_power_profile_mode = pp_set_power_profile_mode,
1561	.set_fine_grain_clk_vol = pp_set_fine_grain_clk_vol,
1562	.odn_edit_dpm_table = pp_odn_edit_dpm_table,
1563	.set_mp1_state = pp_dpm_set_mp1_state,
1564	.set_power_limit = pp_set_power_limit,
1565	.get_power_limit = pp_get_power_limit,
1566/* export to DC */
1567	.get_sclk = pp_dpm_get_sclk,
1568	.get_mclk = pp_dpm_get_mclk,
1569	.display_configuration_change = pp_display_configuration_change,
1570	.get_display_power_level = pp_get_display_power_level,
1571	.get_current_clocks = pp_get_current_clocks,
1572	.get_clock_by_type = pp_get_clock_by_type,
1573	.get_clock_by_type_with_latency = pp_get_clock_by_type_with_latency,
1574	.get_clock_by_type_with_voltage = pp_get_clock_by_type_with_voltage,
1575	.set_watermarks_for_clocks_ranges = pp_set_watermarks_for_clocks_ranges,
1576	.display_clock_voltage_request = pp_display_clock_voltage_request,
1577	.get_display_mode_validation_clocks = pp_get_display_mode_validation_clocks,
1578	.notify_smu_enable_pwe = pp_notify_smu_enable_pwe,
1579	.enable_mgpu_fan_boost = pp_enable_mgpu_fan_boost,
1580	.set_active_display_count = pp_set_active_display_count,
1581	.set_min_deep_sleep_dcefclk = pp_set_min_deep_sleep_dcefclk,
1582	.set_hard_min_dcefclk_by_freq = pp_set_hard_min_dcefclk_by_freq,
1583	.set_hard_min_fclk_by_freq = pp_set_hard_min_fclk_by_freq,
1584	.get_asic_baco_capability = pp_get_asic_baco_capability,
1585	.get_asic_baco_state = pp_get_asic_baco_state,
1586	.set_asic_baco_state = pp_set_asic_baco_state,
1587	.get_ppfeature_status = pp_get_ppfeature_status,
1588	.set_ppfeature_status = pp_set_ppfeature_status,
1589	.asic_reset_mode_2 = pp_asic_reset_mode_2,
1590	.smu_i2c_bus_access = pp_smu_i2c_bus_access,
1591	.set_df_cstate = pp_set_df_cstate,
1592	.set_xgmi_pstate = pp_set_xgmi_pstate,
1593	.get_gpu_metrics = pp_get_gpu_metrics,
1594	.gfx_state_change_set = pp_gfx_state_change_set,
1595	.get_smu_prv_buf_details = pp_get_prv_buffer_details,
1596	.pm_compute_clocks = pp_pm_compute_clocks,
1597};