Linux Audio

Check our new training course

Loading...
v5.9
   1/*
   2 * Copyright 2015 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23#include "pp_debug.h"
  24#include <linux/types.h>
  25#include <linux/kernel.h>
  26#include <linux/gfp.h>
  27#include <linux/slab.h>
  28#include <linux/firmware.h>
  29#include "amd_shared.h"
  30#include "amd_powerplay.h"
 
  31#include "power_state.h"
  32#include "amdgpu.h"
  33#include "hwmgr.h"
  34
  35
  36static const struct amd_pm_funcs pp_dpm_funcs;
 
 
 
 
 
 
 
 
 
 
  37
  38static int amd_powerplay_create(struct amdgpu_device *adev)
  39{
  40	struct pp_hwmgr *hwmgr;
  41
  42	if (adev == NULL)
  43		return -EINVAL;
  44
  45	hwmgr = kzalloc(sizeof(struct pp_hwmgr), GFP_KERNEL);
  46	if (hwmgr == NULL)
  47		return -ENOMEM;
  48
  49	hwmgr->adev = adev;
  50	hwmgr->not_vf = !amdgpu_sriov_vf(adev);
  51	hwmgr->device = amdgpu_cgs_create_device(adev);
  52	mutex_init(&hwmgr->smu_lock);
  53	mutex_init(&hwmgr->msg_lock);
  54	hwmgr->chip_family = adev->family;
  55	hwmgr->chip_id = adev->asic_type;
  56	hwmgr->feature_mask = adev->pm.pp_feature;
  57	hwmgr->display_config = &adev->pm.pm_display_cfg;
  58	adev->powerplay.pp_handle = hwmgr;
  59	adev->powerplay.pp_funcs = &pp_dpm_funcs;
  60	return 0;
  61}
  62
  63
  64static void amd_powerplay_destroy(struct amdgpu_device *adev)
  65{
  66	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
 
 
  67
  68	mutex_destroy(&hwmgr->msg_lock);
 
  69
  70	kfree(hwmgr->hardcode_pp_table);
  71	hwmgr->hardcode_pp_table = NULL;
  72
  73	kfree(hwmgr);
  74	hwmgr = NULL;
  75}
  76
  77static int pp_early_init(void *handle)
  78{
  79	int ret;
  80	struct amdgpu_device *adev = handle;
  81
  82	ret = amd_powerplay_create(adev);
 
 
 
  83
  84	if (ret != 0)
  85		return ret;
 
  86
  87	ret = hwmgr_early_init(adev->powerplay.pp_handle);
  88	if (ret)
  89		return -EINVAL;
 
 
  90
  91	return 0;
 
 
 
 
 
 
  92}
  93
  94static int pp_sw_init(void *handle)
  95{
  96	struct amdgpu_device *adev = handle;
  97	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
  98	int ret = 0;
  99
 100	ret = hwmgr_sw_init(hwmgr);
 
 101
 102	pr_debug("powerplay sw init %s\n", ret ? "failed" : "successfully");
 
 103
 104	return ret;
 105}
 106
 107static int pp_sw_fini(void *handle)
 108{
 109	struct amdgpu_device *adev = handle;
 110	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
 111
 112	hwmgr_sw_fini(hwmgr);
 113
 114	release_firmware(adev->pm.fw);
 115	adev->pm.fw = NULL;
 116
 117	return 0;
 118}
 119
 120static int pp_hw_init(void *handle)
 121{
 
 
 
 
 122	int ret = 0;
 123	struct amdgpu_device *adev = handle;
 124	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
 125
 126	ret = hwmgr_hw_init(hwmgr);
 
 127
 128	if (ret)
 129		pr_err("powerplay hw init failed\n");
 
 130
 131	return ret;
 132}
 
 
 133
 134static int pp_hw_fini(void *handle)
 135{
 136	struct amdgpu_device *adev = handle;
 137	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
 
 138
 139	hwmgr_hw_fini(hwmgr);
 
 
 
 
 
 140
 141	return 0;
 142}
 143
 144static void pp_reserve_vram_for_smu(struct amdgpu_device *adev)
 145{
 146	int r = -EINVAL;
 147	void *cpu_ptr = NULL;
 148	uint64_t gpu_addr;
 149	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
 150
 151	if (amdgpu_bo_create_kernel(adev, adev->pm.smu_prv_buffer_size,
 152						PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
 153						&adev->pm.smu_prv_buffer,
 154						&gpu_addr,
 155						&cpu_ptr)) {
 156		DRM_ERROR("amdgpu: failed to create smu prv buffer\n");
 157		return;
 158	}
 159
 160	if (hwmgr->hwmgr_func->notify_cac_buffer_info)
 161		r = hwmgr->hwmgr_func->notify_cac_buffer_info(hwmgr,
 162					lower_32_bits((unsigned long)cpu_ptr),
 163					upper_32_bits((unsigned long)cpu_ptr),
 164					lower_32_bits(gpu_addr),
 165					upper_32_bits(gpu_addr),
 166					adev->pm.smu_prv_buffer_size);
 167
 168	if (r) {
 169		amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL);
 170		adev->pm.smu_prv_buffer = NULL;
 171		DRM_ERROR("amdgpu: failed to notify SMU buffer address\n");
 172	}
 173}
 174
 175static int pp_late_init(void *handle)
 176{
 177	struct amdgpu_device *adev = handle;
 178	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
 179
 180	if (hwmgr && hwmgr->pm_en) {
 181		mutex_lock(&hwmgr->smu_lock);
 182		hwmgr_handle_task(hwmgr,
 183					AMD_PP_TASK_COMPLETE_INIT, NULL);
 184		mutex_unlock(&hwmgr->smu_lock);
 185	}
 186	if (adev->pm.smu_prv_buffer_size != 0)
 187		pp_reserve_vram_for_smu(adev);
 188
 
 189	return 0;
 190}
 191
 192static void pp_late_fini(void *handle)
 193{
 194	struct amdgpu_device *adev = handle;
 
 
 195
 196	if (adev->pm.smu_prv_buffer)
 197		amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL);
 198	amd_powerplay_destroy(adev);
 199}
 
 
 
 
 200
 
 
 
 
 
 
 
 
 201
 202static bool pp_is_idle(void *handle)
 203{
 204	return false;
 205}
 206
 207static int pp_wait_for_idle(void *handle)
 208{
 209	return 0;
 210}
 211
 212static int pp_sw_reset(void *handle)
 213{
 214	return 0;
 215}
 216
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 217static int pp_set_powergating_state(void *handle,
 218				    enum amd_powergating_state state)
 219{
 220	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 221}
 222
 223static int pp_suspend(void *handle)
 224{
 225	struct amdgpu_device *adev = handle;
 226	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
 
 227
 228	return hwmgr_suspend(hwmgr);
 
 
 
 
 
 
 
 
 229}
 230
 231static int pp_resume(void *handle)
 232{
 233	struct amdgpu_device *adev = handle;
 234	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
 
 
 
 235
 236	return hwmgr_resume(hwmgr);
 237}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 238
 239static int pp_set_clockgating_state(void *handle,
 240					  enum amd_clockgating_state state)
 241{
 242	return 0;
 243}
 244
 245static const struct amd_ip_funcs pp_ip_funcs = {
 246	.name = "powerplay",
 247	.early_init = pp_early_init,
 248	.late_init = pp_late_init,
 249	.sw_init = pp_sw_init,
 250	.sw_fini = pp_sw_fini,
 251	.hw_init = pp_hw_init,
 252	.hw_fini = pp_hw_fini,
 253	.late_fini = pp_late_fini,
 254	.suspend = pp_suspend,
 255	.resume = pp_resume,
 256	.is_idle = pp_is_idle,
 257	.wait_for_idle = pp_wait_for_idle,
 258	.soft_reset = pp_sw_reset,
 259	.set_clockgating_state = pp_set_clockgating_state,
 260	.set_powergating_state = pp_set_powergating_state,
 261};
 262
 263const struct amdgpu_ip_block_version pp_smu_ip_block =
 264{
 265	.type = AMD_IP_BLOCK_TYPE_SMC,
 266	.major = 1,
 267	.minor = 0,
 268	.rev = 0,
 269	.funcs = &pp_ip_funcs,
 270};
 271
 272/* This interface only be supported On Vi,
 273 * because only smu7/8 can help to load gfx/sdma fw,
 274 * smu need to be enabled before load other ip's fw.
 275 * so call start smu to load smu7 fw and other ip's fw
 276 */
 277static int pp_dpm_load_fw(void *handle)
 278{
 279	struct pp_hwmgr *hwmgr = handle;
 280
 281	if (!hwmgr || !hwmgr->smumgr_funcs || !hwmgr->smumgr_funcs->start_smu)
 282		return -EINVAL;
 283
 284	if (hwmgr->smumgr_funcs->start_smu(hwmgr)) {
 285		pr_err("fw load failed\n");
 286		return -EINVAL;
 287	}
 288
 289	return 0;
 290}
 291
 292static int pp_dpm_fw_loading_complete(void *handle)
 293{
 294	return 0;
 295}
 296
 297static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id)
 
 298{
 299	struct pp_hwmgr *hwmgr = handle;
 
 300
 301	if (!hwmgr || !hwmgr->pm_en)
 302		return -EINVAL;
 303
 304	if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
 305		pr_info_ratelimited("%s was not implemented.\n", __func__);
 306		return 0;
 307	}
 308
 309	return hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
 310}
 311
 312static void pp_dpm_en_umd_pstate(struct pp_hwmgr  *hwmgr,
 313						enum amd_dpm_forced_level *level)
 314{
 315	uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
 316					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
 317					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
 318					AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
 319
 320	if (!(hwmgr->dpm_level & profile_mode_mask)) {
 321		/* enter umd pstate, save current level, disable gfx cg*/
 322		if (*level & profile_mode_mask) {
 323			hwmgr->saved_dpm_level = hwmgr->dpm_level;
 324			hwmgr->en_umd_pstate = true;
 325			amdgpu_device_ip_set_powergating_state(hwmgr->adev,
 326					AMD_IP_BLOCK_TYPE_GFX,
 327					AMD_PG_STATE_UNGATE);
 328			amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
 329						AMD_IP_BLOCK_TYPE_GFX,
 330						AMD_CG_STATE_UNGATE);
 331		}
 332	} else {
 333		/* exit umd pstate, restore level, enable gfx cg*/
 334		if (!(*level & profile_mode_mask)) {
 335			if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
 336				*level = hwmgr->saved_dpm_level;
 337			hwmgr->en_umd_pstate = false;
 338			amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
 339					AMD_IP_BLOCK_TYPE_GFX,
 340					AMD_CG_STATE_GATE);
 341			amdgpu_device_ip_set_powergating_state(hwmgr->adev,
 342					AMD_IP_BLOCK_TYPE_GFX,
 343					AMD_PG_STATE_GATE);
 344		}
 345	}
 346}
 347
 348static int pp_dpm_force_performance_level(void *handle,
 349					enum amd_dpm_forced_level level)
 350{
 351	struct pp_hwmgr *hwmgr = handle;
 352
 353	if (!hwmgr || !hwmgr->pm_en)
 354		return -EINVAL;
 355
 356	if (level == hwmgr->dpm_level)
 
 357		return 0;
 
 358
 359	mutex_lock(&hwmgr->smu_lock);
 360	pp_dpm_en_umd_pstate(hwmgr, &level);
 361	hwmgr->request_dpm_level = level;
 362	hwmgr_handle_task(hwmgr, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
 363	mutex_unlock(&hwmgr->smu_lock);
 364
 365	return 0;
 366}
 367
 368static enum amd_dpm_forced_level pp_dpm_get_performance_level(
 369								void *handle)
 370{
 371	struct pp_hwmgr *hwmgr = handle;
 372	enum amd_dpm_forced_level level;
 373
 374	if (!hwmgr || !hwmgr->pm_en)
 375		return -EINVAL;
 376
 377	mutex_lock(&hwmgr->smu_lock);
 378	level = hwmgr->dpm_level;
 379	mutex_unlock(&hwmgr->smu_lock);
 380	return level;
 
 381}
 382
 383static uint32_t pp_dpm_get_sclk(void *handle, bool low)
 384{
 385	struct pp_hwmgr *hwmgr = handle;
 386	uint32_t clk = 0;
 387
 388	if (!hwmgr || !hwmgr->pm_en)
 389		return 0;
 
 
 
 
 390
 391	if (hwmgr->hwmgr_func->get_sclk == NULL) {
 392		pr_info_ratelimited("%s was not implemented.\n", __func__);
 393		return 0;
 394	}
 395	mutex_lock(&hwmgr->smu_lock);
 396	clk = hwmgr->hwmgr_func->get_sclk(hwmgr, low);
 397	mutex_unlock(&hwmgr->smu_lock);
 398	return clk;
 399}
 400
 401static uint32_t pp_dpm_get_mclk(void *handle, bool low)
 402{
 403	struct pp_hwmgr *hwmgr = handle;
 404	uint32_t clk = 0;
 405
 406	if (!hwmgr || !hwmgr->pm_en)
 407		return 0;
 
 
 
 
 408
 409	if (hwmgr->hwmgr_func->get_mclk == NULL) {
 410		pr_info_ratelimited("%s was not implemented.\n", __func__);
 411		return 0;
 412	}
 413	mutex_lock(&hwmgr->smu_lock);
 414	clk = hwmgr->hwmgr_func->get_mclk(hwmgr, low);
 415	mutex_unlock(&hwmgr->smu_lock);
 416	return clk;
 417}
 418
 419static void pp_dpm_powergate_vce(void *handle, bool gate)
 420{
 421	struct pp_hwmgr *hwmgr = handle;
 422
 423	if (!hwmgr || !hwmgr->pm_en)
 424		return;
 
 
 
 
 425
 426	if (hwmgr->hwmgr_func->powergate_vce == NULL) {
 427		pr_info_ratelimited("%s was not implemented.\n", __func__);
 428		return;
 429	}
 430	mutex_lock(&hwmgr->smu_lock);
 431	hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
 432	mutex_unlock(&hwmgr->smu_lock);
 433}
 434
 435static void pp_dpm_powergate_uvd(void *handle, bool gate)
 436{
 437	struct pp_hwmgr *hwmgr = handle;
 438
 439	if (!hwmgr || !hwmgr->pm_en)
 440		return;
 
 
 
 
 441
 442	if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
 443		pr_info_ratelimited("%s was not implemented.\n", __func__);
 444		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 445	}
 446	mutex_lock(&hwmgr->smu_lock);
 447	hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
 448	mutex_unlock(&hwmgr->smu_lock);
 449}
 450
 451static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
 452		enum amd_pm_state_type *user_state)
 453{
 454	int ret = 0;
 455	struct pp_hwmgr *hwmgr = handle;
 
 456
 457	if (!hwmgr || !hwmgr->pm_en)
 
 
 458		return -EINVAL;
 459
 460	mutex_lock(&hwmgr->smu_lock);
 461	ret = hwmgr_handle_task(hwmgr, task_id, user_state);
 462	mutex_unlock(&hwmgr->smu_lock);
 463
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 464	return ret;
 465}
 466
 467static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
 468{
 469	struct pp_hwmgr *hwmgr = handle;
 470	struct pp_power_state *state;
 471	enum amd_pm_state_type pm_type;
 472
 473	if (!hwmgr || !hwmgr->pm_en || !hwmgr->current_ps)
 474		return -EINVAL;
 475
 476	mutex_lock(&hwmgr->smu_lock);
 
 
 
 477
 478	state = hwmgr->current_ps;
 479
 480	switch (state->classification.ui_label) {
 481	case PP_StateUILabel_Battery:
 482		pm_type = POWER_STATE_TYPE_BATTERY;
 483		break;
 484	case PP_StateUILabel_Balanced:
 485		pm_type = POWER_STATE_TYPE_BALANCED;
 486		break;
 487	case PP_StateUILabel_Performance:
 488		pm_type = POWER_STATE_TYPE_PERFORMANCE;
 489		break;
 490	default:
 491		if (state->classification.flags & PP_StateClassificationFlag_Boot)
 492			pm_type = POWER_STATE_TYPE_INTERNAL_BOOT;
 493		else
 494			pm_type = POWER_STATE_TYPE_DEFAULT;
 495		break;
 496	}
 497	mutex_unlock(&hwmgr->smu_lock);
 498
 499	return pm_type;
 500}
 501
 502static void pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
 503{
 504	struct pp_hwmgr *hwmgr = handle;
 
 
 
 505
 506	if (!hwmgr || !hwmgr->pm_en)
 507		return;
 
 508
 509	if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) {
 510		pr_info_ratelimited("%s was not implemented.\n", __func__);
 511		return;
 512	}
 513	mutex_lock(&hwmgr->smu_lock);
 514	hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode);
 515	mutex_unlock(&hwmgr->smu_lock);
 516}
 517
 518static uint32_t pp_dpm_get_fan_control_mode(void *handle)
 519{
 520	struct pp_hwmgr *hwmgr = handle;
 521	uint32_t mode = 0;
 522
 523	if (!hwmgr || !hwmgr->pm_en)
 524		return 0;
 
 
 
 
 525
 526	if (hwmgr->hwmgr_func->get_fan_control_mode == NULL) {
 527		pr_info_ratelimited("%s was not implemented.\n", __func__);
 528		return 0;
 529	}
 530	mutex_lock(&hwmgr->smu_lock);
 531	mode = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr);
 532	mutex_unlock(&hwmgr->smu_lock);
 533	return mode;
 534}
 535
 536static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent)
 537{
 538	struct pp_hwmgr *hwmgr = handle;
 539	int ret = 0;
 540
 541	if (!hwmgr || !hwmgr->pm_en)
 542		return -EINVAL;
 543
 
 
 
 
 544	if (hwmgr->hwmgr_func->set_fan_speed_percent == NULL) {
 545		pr_info_ratelimited("%s was not implemented.\n", __func__);
 546		return 0;
 547	}
 548	mutex_lock(&hwmgr->smu_lock);
 549	ret = hwmgr->hwmgr_func->set_fan_speed_percent(hwmgr, percent);
 550	mutex_unlock(&hwmgr->smu_lock);
 551	return ret;
 552}
 553
 554static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed)
 555{
 556	struct pp_hwmgr *hwmgr = handle;
 557	int ret = 0;
 558
 559	if (!hwmgr || !hwmgr->pm_en)
 560		return -EINVAL;
 561
 
 
 
 
 562	if (hwmgr->hwmgr_func->get_fan_speed_percent == NULL) {
 563		pr_info_ratelimited("%s was not implemented.\n", __func__);
 564		return 0;
 565	}
 566
 567	mutex_lock(&hwmgr->smu_lock);
 568	ret = hwmgr->hwmgr_func->get_fan_speed_percent(hwmgr, speed);
 569	mutex_unlock(&hwmgr->smu_lock);
 570	return ret;
 571}
 572
 573static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
 574{
 575	struct pp_hwmgr *hwmgr = handle;
 576	int ret = 0;
 577
 578	if (!hwmgr || !hwmgr->pm_en)
 579		return -EINVAL;
 580
 
 
 
 
 581	if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL)
 582		return -EINVAL;
 583
 584	mutex_lock(&hwmgr->smu_lock);
 585	ret = hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm);
 586	mutex_unlock(&hwmgr->smu_lock);
 587	return ret;
 588}
 589
 590static int pp_dpm_set_fan_speed_rpm(void *handle, uint32_t rpm)
 591{
 592	struct pp_hwmgr *hwmgr = handle;
 593	int ret = 0;
 594
 595	if (!hwmgr || !hwmgr->pm_en)
 596		return -EINVAL;
 597
 598	if (hwmgr->hwmgr_func->set_fan_speed_rpm == NULL) {
 599		pr_info_ratelimited("%s was not implemented.\n", __func__);
 
 
 
 
 600		return 0;
 601	}
 602	mutex_lock(&hwmgr->smu_lock);
 603	ret = hwmgr->hwmgr_func->set_fan_speed_rpm(hwmgr, rpm);
 604	mutex_unlock(&hwmgr->smu_lock);
 605	return ret;
 606}
 607
 608static int pp_dpm_get_pp_num_states(void *handle,
 609		struct pp_states_info *data)
 610{
 611	struct pp_hwmgr *hwmgr = handle;
 612	int i;
 613
 614	memset(data, 0, sizeof(*data));
 615
 616	if (!hwmgr || !hwmgr->pm_en ||!hwmgr->ps)
 617		return -EINVAL;
 618
 619	mutex_lock(&hwmgr->smu_lock);
 
 
 
 620
 621	data->nums = hwmgr->num_ps;
 622
 623	for (i = 0; i < hwmgr->num_ps; i++) {
 624		struct pp_power_state *state = (struct pp_power_state *)
 625				((unsigned long)hwmgr->ps + i * hwmgr->ps_size);
 626		switch (state->classification.ui_label) {
 627		case PP_StateUILabel_Battery:
 628			data->states[i] = POWER_STATE_TYPE_BATTERY;
 629			break;
 630		case PP_StateUILabel_Balanced:
 631			data->states[i] = POWER_STATE_TYPE_BALANCED;
 632			break;
 633		case PP_StateUILabel_Performance:
 634			data->states[i] = POWER_STATE_TYPE_PERFORMANCE;
 635			break;
 636		default:
 637			if (state->classification.flags & PP_StateClassificationFlag_Boot)
 638				data->states[i] = POWER_STATE_TYPE_INTERNAL_BOOT;
 639			else
 640				data->states[i] = POWER_STATE_TYPE_DEFAULT;
 641		}
 642	}
 643	mutex_unlock(&hwmgr->smu_lock);
 644	return 0;
 645}
 646
 647static int pp_dpm_get_pp_table(void *handle, char **table)
 648{
 649	struct pp_hwmgr *hwmgr = handle;
 650	int size = 0;
 651
 652	if (!hwmgr || !hwmgr->pm_en ||!hwmgr->soft_pp_table)
 653		return -EINVAL;
 654
 655	mutex_lock(&hwmgr->smu_lock);
 656	*table = (char *)hwmgr->soft_pp_table;
 657	size = hwmgr->soft_pp_table_size;
 658	mutex_unlock(&hwmgr->smu_lock);
 659	return size;
 660}
 661
 662static int amd_powerplay_reset(void *handle)
 663{
 664	struct pp_hwmgr *hwmgr = handle;
 665	int ret;
 666
 667	ret = hwmgr_hw_fini(hwmgr);
 668	if (ret)
 669		return ret;
 670
 671	ret = hwmgr_hw_init(hwmgr);
 672	if (ret)
 673		return ret;
 674
 675	return hwmgr_handle_task(hwmgr, AMD_PP_TASK_COMPLETE_INIT, NULL);
 676}
 677
 678static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
 679{
 680	struct pp_hwmgr *hwmgr = handle;
 681	int ret = -ENOMEM;
 682
 683	if (!hwmgr || !hwmgr->pm_en)
 684		return -EINVAL;
 685
 686	mutex_lock(&hwmgr->smu_lock);
 
 
 
 687	if (!hwmgr->hardcode_pp_table) {
 688		hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table,
 689						   hwmgr->soft_pp_table_size,
 690						   GFP_KERNEL);
 
 691		if (!hwmgr->hardcode_pp_table)
 692			goto err;
 693	}
 694
 695	memcpy(hwmgr->hardcode_pp_table, buf, size);
 696
 697	hwmgr->soft_pp_table = hwmgr->hardcode_pp_table;
 698
 699	ret = amd_powerplay_reset(handle);
 700	if (ret)
 701		goto err;
 702
 703	if (hwmgr->hwmgr_func->avfs_control) {
 704		ret = hwmgr->hwmgr_func->avfs_control(hwmgr, false);
 705		if (ret)
 706			goto err;
 707	}
 708	mutex_unlock(&hwmgr->smu_lock);
 709	return 0;
 710err:
 711	mutex_unlock(&hwmgr->smu_lock);
 712	return ret;
 713}
 714
 715static int pp_dpm_force_clock_level(void *handle,
 716		enum pp_clock_type type, uint32_t mask)
 717{
 718	struct pp_hwmgr *hwmgr = handle;
 719	int ret = 0;
 720
 721	if (!hwmgr || !hwmgr->pm_en)
 722		return -EINVAL;
 723
 
 
 
 
 724	if (hwmgr->hwmgr_func->force_clock_level == NULL) {
 725		pr_info_ratelimited("%s was not implemented.\n", __func__);
 726		return 0;
 727	}
 728
 729	if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
 730		pr_debug("force clock level is for dpm manual mode only.\n");
 731		return -EINVAL;
 732	}
 733
 734	mutex_lock(&hwmgr->smu_lock);
 735	ret = hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask);
 736	mutex_unlock(&hwmgr->smu_lock);
 737	return ret;
 738}
 739
 740static int pp_dpm_print_clock_levels(void *handle,
 741		enum pp_clock_type type, char *buf)
 742{
 743	struct pp_hwmgr *hwmgr = handle;
 744	int ret = 0;
 745
 746	if (!hwmgr || !hwmgr->pm_en)
 747		return -EINVAL;
 748
 
 
 
 
 749	if (hwmgr->hwmgr_func->print_clock_levels == NULL) {
 750		pr_info_ratelimited("%s was not implemented.\n", __func__);
 751		return 0;
 752	}
 753	mutex_lock(&hwmgr->smu_lock);
 754	ret = hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
 755	mutex_unlock(&hwmgr->smu_lock);
 756	return ret;
 757}
 758
 759static int pp_dpm_get_sclk_od(void *handle)
 760{
 761	struct pp_hwmgr *hwmgr = handle;
 762	int ret = 0;
 763
 764	if (!hwmgr || !hwmgr->pm_en)
 765		return -EINVAL;
 766
 
 
 
 
 767	if (hwmgr->hwmgr_func->get_sclk_od == NULL) {
 768		pr_info_ratelimited("%s was not implemented.\n", __func__);
 769		return 0;
 770	}
 771	mutex_lock(&hwmgr->smu_lock);
 772	ret = hwmgr->hwmgr_func->get_sclk_od(hwmgr);
 773	mutex_unlock(&hwmgr->smu_lock);
 774	return ret;
 775}
 776
 777static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
 778{
 779	struct pp_hwmgr *hwmgr = handle;
 780	int ret = 0;
 781
 782	if (!hwmgr || !hwmgr->pm_en)
 783		return -EINVAL;
 784
 
 
 
 
 785	if (hwmgr->hwmgr_func->set_sclk_od == NULL) {
 786		pr_info_ratelimited("%s was not implemented.\n", __func__);
 787		return 0;
 788	}
 789
 790	mutex_lock(&hwmgr->smu_lock);
 791	ret = hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
 792	mutex_unlock(&hwmgr->smu_lock);
 793	return ret;
 794}
 795
 796static int pp_dpm_get_mclk_od(void *handle)
 797{
 798	struct pp_hwmgr *hwmgr = handle;
 799	int ret = 0;
 800
 801	if (!hwmgr || !hwmgr->pm_en)
 802		return -EINVAL;
 803
 
 
 
 
 804	if (hwmgr->hwmgr_func->get_mclk_od == NULL) {
 805		pr_info_ratelimited("%s was not implemented.\n", __func__);
 806		return 0;
 807	}
 808	mutex_lock(&hwmgr->smu_lock);
 809	ret = hwmgr->hwmgr_func->get_mclk_od(hwmgr);
 810	mutex_unlock(&hwmgr->smu_lock);
 811	return ret;
 812}
 813
 814static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
 815{
 816	struct pp_hwmgr *hwmgr = handle;
 817	int ret = 0;
 818
 819	if (!hwmgr || !hwmgr->pm_en)
 820		return -EINVAL;
 821
 
 
 
 
 822	if (hwmgr->hwmgr_func->set_mclk_od == NULL) {
 823		pr_info_ratelimited("%s was not implemented.\n", __func__);
 824		return 0;
 825	}
 826	mutex_lock(&hwmgr->smu_lock);
 827	ret = hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
 828	mutex_unlock(&hwmgr->smu_lock);
 829	return ret;
 830}
 831
 832static int pp_dpm_read_sensor(void *handle, int idx,
 833			      void *value, int *size)
 834{
 835	struct pp_hwmgr *hwmgr = handle;
 836	int ret = 0;
 837
 838	if (!hwmgr || !hwmgr->pm_en || !value)
 839		return -EINVAL;
 840
 841	switch (idx) {
 842	case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
 843		*((uint32_t *)value) = hwmgr->pstate_sclk;
 844		return 0;
 845	case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
 846		*((uint32_t *)value) = hwmgr->pstate_mclk;
 847		return 0;
 848	case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
 849		*((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMinRPM;
 850		return 0;
 851	case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
 852		*((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMaxRPM;
 853		return 0;
 854	default:
 855		mutex_lock(&hwmgr->smu_lock);
 856		ret = hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value, size);
 857		mutex_unlock(&hwmgr->smu_lock);
 858		return ret;
 859	}
 
 
 860}
 861
 862static struct amd_vce_state*
 863pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
 864{
 865	struct pp_hwmgr *hwmgr = handle;
 
 
 
 866
 867	if (!hwmgr || !hwmgr->pm_en)
 868		return NULL;
 
 869
 870	if (idx < hwmgr->num_vce_state_tables)
 871		return &hwmgr->vce_states[idx];
 872	return NULL;
 873}
 874
 875static int pp_get_power_profile_mode(void *handle, char *buf)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 876{
 877	struct pp_hwmgr *hwmgr = handle;
 
 878
 879	if (!hwmgr || !hwmgr->pm_en || !buf)
 880		return -EINVAL;
 
 881
 882	if (hwmgr->hwmgr_func->get_power_profile_mode == NULL) {
 883		pr_info_ratelimited("%s was not implemented.\n", __func__);
 884		return snprintf(buf, PAGE_SIZE, "\n");
 885	}
 886
 887	return hwmgr->hwmgr_func->get_power_profile_mode(hwmgr, buf);
 888}
 
 889
 890static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
 891{
 892	struct pp_hwmgr *hwmgr = handle;
 893	int ret = -EINVAL;
 894
 895	if (!hwmgr || !hwmgr->pm_en)
 896		return ret;
 897
 898	if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
 899		pr_info_ratelimited("%s was not implemented.\n", __func__);
 900		return ret;
 901	}
 902
 903	if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
 904		pr_debug("power profile setting is for manual dpm mode only.\n");
 905		return ret;
 906	}
 907
 908	mutex_lock(&hwmgr->smu_lock);
 909	ret = hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, input, size);
 910	mutex_unlock(&hwmgr->smu_lock);
 
 
 
 
 
 
 
 
 
 911	return ret;
 912}
 913
 914static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint32_t size)
 915{
 916	struct pp_hwmgr *hwmgr = handle;
 917
 918	if (!hwmgr || !hwmgr->pm_en)
 919		return -EINVAL;
 920
 921	if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) {
 922		pr_info_ratelimited("%s was not implemented.\n", __func__);
 923		return -EINVAL;
 
 924	}
 925
 926	return hwmgr->hwmgr_func->odn_edit_dpm_table(hwmgr, type, input, size);
 
 
 927}
 928
 929static int pp_dpm_set_mp1_state(void *handle, enum pp_mp1_state mp1_state)
 
 930{
 931	struct pp_hwmgr *hwmgr = handle;
 932
 933	if (!hwmgr)
 934		return -EINVAL;
 935
 936	if (!hwmgr->pm_en)
 937		return 0;
 
 
 938
 939	if (hwmgr->hwmgr_func->set_mp1_state)
 940		return hwmgr->hwmgr_func->set_mp1_state(hwmgr, mp1_state);
 941
 942	return 0;
 943}
 944
 945static int pp_dpm_switch_power_profile(void *handle,
 946		enum PP_SMC_POWER_PROFILE type, bool en)
 947{
 948	struct pp_hwmgr *hwmgr = handle;
 949	long workload;
 950	uint32_t index;
 951
 952	if (!hwmgr || !hwmgr->pm_en)
 953		return -EINVAL;
 954
 955	if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
 956		pr_info_ratelimited("%s was not implemented.\n", __func__);
 957		return -EINVAL;
 958	}
 959
 960	if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
 961		return -EINVAL;
 962
 963	mutex_lock(&hwmgr->smu_lock);
 964
 965	if (!en) {
 966		hwmgr->workload_mask &= ~(1 << hwmgr->workload_prority[type]);
 967		index = fls(hwmgr->workload_mask);
 968		index = index > 0 && index <= Workload_Policy_Max ? index - 1 : 0;
 969		workload = hwmgr->workload_setting[index];
 970	} else {
 971		hwmgr->workload_mask |= (1 << hwmgr->workload_prority[type]);
 972		index = fls(hwmgr->workload_mask);
 973		index = index <= Workload_Policy_Max ? index - 1 : 0;
 974		workload = hwmgr->workload_setting[index];
 975	}
 976
 977	if (type == PP_SMC_POWER_PROFILE_COMPUTE &&
 978		hwmgr->hwmgr_func->disable_power_features_for_compute_performance) {
 979			if (hwmgr->hwmgr_func->disable_power_features_for_compute_performance(hwmgr, en)) {
 980				mutex_unlock(&hwmgr->smu_lock);
 981				return -EINVAL;
 982			}
 983	}
 984
 985	if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
 986		hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0);
 987	mutex_unlock(&hwmgr->smu_lock);
 988
 989	return 0;
 990}
 991
 992static int pp_set_power_limit(void *handle, uint32_t limit)
 993{
 994	struct pp_hwmgr *hwmgr = handle;
 995	uint32_t max_power_limit;
 
 
 996
 997	if (!hwmgr || !hwmgr->pm_en)
 998		return -EINVAL;
 999
1000	if (hwmgr->hwmgr_func->set_power_limit == NULL) {
1001		pr_info_ratelimited("%s was not implemented.\n", __func__);
1002		return -EINVAL;
1003	}
1004
1005	if (limit == 0)
1006		limit = hwmgr->default_power_limit;
1007
1008	max_power_limit = hwmgr->default_power_limit;
1009	if (hwmgr->od_enabled) {
1010		max_power_limit *= (100 + hwmgr->platform_descriptor.TDPODLimit);
1011		max_power_limit /= 100;
1012	}
1013
1014	if (limit > max_power_limit)
1015		return -EINVAL;
1016
1017	mutex_lock(&hwmgr->smu_lock);
1018	hwmgr->hwmgr_func->set_power_limit(hwmgr, limit);
1019	hwmgr->power_limit = limit;
1020	mutex_unlock(&hwmgr->smu_lock);
1021	return 0;
1022}
1023
1024static int pp_get_power_limit(void *handle, uint32_t *limit, bool default_limit)
1025{
1026	struct pp_hwmgr *hwmgr = handle;
1027
1028	if (!hwmgr || !hwmgr->pm_en ||!limit)
1029		return -EINVAL;
 
1030
1031	mutex_lock(&hwmgr->smu_lock);
1032
1033	if (default_limit) {
1034		*limit = hwmgr->default_power_limit;
1035		if (hwmgr->od_enabled) {
1036			*limit *= (100 + hwmgr->platform_descriptor.TDPODLimit);
1037			*limit /= 100;
1038		}
1039	}
1040	else
1041		*limit = hwmgr->power_limit;
1042
1043	mutex_unlock(&hwmgr->smu_lock);
 
 
1044
1045	return 0;
1046}
1047
1048static int pp_display_configuration_change(void *handle,
 
 
1049	const struct amd_pp_display_configuration *display_config)
1050{
1051	struct pp_hwmgr *hwmgr = handle;
1052
1053	if (!hwmgr || !hwmgr->pm_en)
1054		return -EINVAL;
 
 
 
1055
1056	mutex_lock(&hwmgr->smu_lock);
1057	phm_store_dal_configuration_data(hwmgr, display_config);
1058	mutex_unlock(&hwmgr->smu_lock);
1059	return 0;
1060}
1061
1062static int pp_get_display_power_level(void *handle,
1063		struct amd_pp_simple_clock_info *output)
1064{
1065	struct pp_hwmgr *hwmgr = handle;
1066	int ret = 0;
 
1067
1068	if (!hwmgr || !hwmgr->pm_en ||!output)
1069		return -EINVAL;
1070
1071	mutex_lock(&hwmgr->smu_lock);
1072	ret = phm_get_dal_power_level(hwmgr, output);
1073	mutex_unlock(&hwmgr->smu_lock);
1074	return ret;
 
1075}
1076
1077static int pp_get_current_clocks(void *handle,
1078		struct amd_pp_clock_info *clocks)
1079{
1080	struct amd_pp_simple_clock_info simple_clocks = { 0 };
 
1081	struct pp_clock_info hw_clocks;
1082	struct pp_hwmgr *hwmgr = handle;
1083	int ret = 0;
1084
1085	if (!hwmgr || !hwmgr->pm_en)
 
 
1086		return -EINVAL;
1087
1088	mutex_lock(&hwmgr->smu_lock);
1089
1090	phm_get_dal_power_level(hwmgr, &simple_clocks);
1091
1092	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1093					PHM_PlatformCaps_PowerContainment))
1094		ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
1095					&hw_clocks, PHM_PerformanceLevelDesignation_PowerContainment);
1096	else
1097		ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
1098					&hw_clocks, PHM_PerformanceLevelDesignation_Activity);
1099
1100	if (ret) {
1101		pr_debug("Error in phm_get_clock_info \n");
1102		mutex_unlock(&hwmgr->smu_lock);
1103		return -EINVAL;
 
 
1104	}
1105
1106	clocks->min_engine_clock = hw_clocks.min_eng_clk;
1107	clocks->max_engine_clock = hw_clocks.max_eng_clk;
1108	clocks->min_memory_clock = hw_clocks.min_mem_clk;
1109	clocks->max_memory_clock = hw_clocks.max_mem_clk;
1110	clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
1111	clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
1112
1113	clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1114	clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1115
1116	if (simple_clocks.level == 0)
1117		clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
1118	else
1119		clocks->max_clocks_state = simple_clocks.level;
1120
1121	if (0 == phm_get_current_shallow_sleep_clocks(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks)) {
1122		clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1123		clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1124	}
1125	mutex_unlock(&hwmgr->smu_lock);
1126	return 0;
1127}
1128
1129static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks)
1130{
1131	struct pp_hwmgr *hwmgr = handle;
1132	int ret = 0;
1133
1134	if (!hwmgr || !hwmgr->pm_en)
1135		return -EINVAL;
1136
1137	if (clocks == NULL)
1138		return -EINVAL;
1139
1140	mutex_lock(&hwmgr->smu_lock);
1141	ret = phm_get_clock_by_type(hwmgr, type, clocks);
1142	mutex_unlock(&hwmgr->smu_lock);
1143	return ret;
1144}
1145
1146static int pp_get_clock_by_type_with_latency(void *handle,
1147		enum amd_pp_clock_type type,
1148		struct pp_clock_levels_with_latency *clocks)
1149{
1150	struct pp_hwmgr *hwmgr = handle;
1151	int ret = 0;
1152
1153	if (!hwmgr || !hwmgr->pm_en ||!clocks)
1154		return -EINVAL;
1155
1156	mutex_lock(&hwmgr->smu_lock);
1157	ret = phm_get_clock_by_type_with_latency(hwmgr, type, clocks);
1158	mutex_unlock(&hwmgr->smu_lock);
1159	return ret;
1160}
1161
1162static int pp_get_clock_by_type_with_voltage(void *handle,
1163		enum amd_pp_clock_type type,
1164		struct pp_clock_levels_with_voltage *clocks)
1165{
1166	struct pp_hwmgr *hwmgr = handle;
1167	int ret = 0;
1168
1169	if (!hwmgr || !hwmgr->pm_en ||!clocks)
1170		return -EINVAL;
1171
1172	mutex_lock(&hwmgr->smu_lock);
1173
1174	ret = phm_get_clock_by_type_with_voltage(hwmgr, type, clocks);
1175
1176	mutex_unlock(&hwmgr->smu_lock);
1177	return ret;
1178}
1179
1180static int pp_set_watermarks_for_clocks_ranges(void *handle,
1181		void *clock_ranges)
1182{
1183	struct pp_hwmgr *hwmgr = handle;
1184	int ret = 0;
1185
1186	if (!hwmgr || !hwmgr->pm_en || !clock_ranges)
1187		return -EINVAL;
1188
1189	mutex_lock(&hwmgr->smu_lock);
1190	ret = phm_set_watermarks_for_clocks_ranges(hwmgr,
1191			clock_ranges);
1192	mutex_unlock(&hwmgr->smu_lock);
1193
1194	return ret;
1195}
1196
1197static int pp_display_clock_voltage_request(void *handle,
1198		struct pp_display_clock_request *clock)
1199{
1200	struct pp_hwmgr *hwmgr = handle;
1201	int ret = 0;
1202
1203	if (!hwmgr || !hwmgr->pm_en ||!clock)
1204		return -EINVAL;
1205
1206	mutex_lock(&hwmgr->smu_lock);
1207	ret = phm_display_clock_voltage_request(hwmgr, clock);
1208	mutex_unlock(&hwmgr->smu_lock);
1209
1210	return ret;
1211}
1212
1213static int pp_get_display_mode_validation_clocks(void *handle,
1214		struct amd_pp_simple_clock_info *clocks)
1215{
1216	struct pp_hwmgr *hwmgr = handle;
1217	int ret = 0;
1218
1219	if (!hwmgr || !hwmgr->pm_en ||!clocks)
1220		return -EINVAL;
1221
1222	clocks->level = PP_DAL_POWERLEVEL_7;
1223
1224	mutex_lock(&hwmgr->smu_lock);
1225
1226	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))
1227		ret = phm_get_max_high_clocks(hwmgr, clocks);
1228
1229	mutex_unlock(&hwmgr->smu_lock);
1230	return ret;
1231}
1232
1233static int pp_dpm_powergate_mmhub(void *handle)
1234{
1235	struct pp_hwmgr *hwmgr = handle;
1236
1237	if (!hwmgr || !hwmgr->pm_en)
1238		return -EINVAL;
1239
1240	if (hwmgr->hwmgr_func->powergate_mmhub == NULL) {
1241		pr_info_ratelimited("%s was not implemented.\n", __func__);
1242		return 0;
1243	}
1244
1245	return hwmgr->hwmgr_func->powergate_mmhub(hwmgr);
1246}
1247
1248static int pp_dpm_powergate_gfx(void *handle, bool gate)
1249{
1250	struct pp_hwmgr *hwmgr = handle;
1251
1252	if (!hwmgr || !hwmgr->pm_en)
1253		return 0;
1254
1255	if (hwmgr->hwmgr_func->powergate_gfx == NULL) {
1256		pr_info_ratelimited("%s was not implemented.\n", __func__);
1257		return 0;
1258	}
1259
1260	return hwmgr->hwmgr_func->powergate_gfx(hwmgr, gate);
1261}
1262
1263static void pp_dpm_powergate_acp(void *handle, bool gate)
1264{
1265	struct pp_hwmgr *hwmgr = handle;
1266
1267	if (!hwmgr || !hwmgr->pm_en)
1268		return;
1269
1270	if (hwmgr->hwmgr_func->powergate_acp == NULL) {
1271		pr_info_ratelimited("%s was not implemented.\n", __func__);
1272		return;
1273	}
1274
1275	hwmgr->hwmgr_func->powergate_acp(hwmgr, gate);
1276}
1277
1278static void pp_dpm_powergate_sdma(void *handle, bool gate)
1279{
1280	struct pp_hwmgr *hwmgr = handle;
1281
1282	if (!hwmgr)
1283		return;
1284
1285	if (hwmgr->hwmgr_func->powergate_sdma == NULL) {
1286		pr_info_ratelimited("%s was not implemented.\n", __func__);
1287		return;
1288	}
1289
1290	hwmgr->hwmgr_func->powergate_sdma(hwmgr, gate);
1291}
1292
1293static int pp_set_powergating_by_smu(void *handle,
1294				uint32_t block_type, bool gate)
1295{
1296	int ret = 0;
1297
1298	switch (block_type) {
1299	case AMD_IP_BLOCK_TYPE_UVD:
1300	case AMD_IP_BLOCK_TYPE_VCN:
1301		pp_dpm_powergate_uvd(handle, gate);
1302		break;
1303	case AMD_IP_BLOCK_TYPE_VCE:
1304		pp_dpm_powergate_vce(handle, gate);
1305		break;
1306	case AMD_IP_BLOCK_TYPE_GMC:
1307		pp_dpm_powergate_mmhub(handle);
1308		break;
1309	case AMD_IP_BLOCK_TYPE_GFX:
1310		ret = pp_dpm_powergate_gfx(handle, gate);
1311		break;
1312	case AMD_IP_BLOCK_TYPE_ACP:
1313		pp_dpm_powergate_acp(handle, gate);
1314		break;
1315	case AMD_IP_BLOCK_TYPE_SDMA:
1316		pp_dpm_powergate_sdma(handle, gate);
1317		break;
1318	default:
1319		break;
1320	}
1321	return ret;
1322}
1323
1324static int pp_notify_smu_enable_pwe(void *handle)
1325{
1326	struct pp_hwmgr *hwmgr = handle;
1327
1328	if (!hwmgr || !hwmgr->pm_en)
1329		return -EINVAL;
1330
1331	if (hwmgr->hwmgr_func->smus_notify_pwe == NULL) {
1332		pr_info_ratelimited("%s was not implemented.\n", __func__);
1333		return -EINVAL;
1334	}
1335
1336	mutex_lock(&hwmgr->smu_lock);
1337	hwmgr->hwmgr_func->smus_notify_pwe(hwmgr);
1338	mutex_unlock(&hwmgr->smu_lock);
1339
1340	return 0;
1341}
1342
1343static int pp_enable_mgpu_fan_boost(void *handle)
1344{
1345	struct pp_hwmgr *hwmgr = handle;
1346
1347	if (!hwmgr)
1348		return -EINVAL;
1349
1350	if (!hwmgr->pm_en ||
1351	     hwmgr->hwmgr_func->enable_mgpu_fan_boost == NULL)
1352		return 0;
1353
1354	mutex_lock(&hwmgr->smu_lock);
1355	hwmgr->hwmgr_func->enable_mgpu_fan_boost(hwmgr);
1356	mutex_unlock(&hwmgr->smu_lock);
1357
1358	return 0;
1359}
1360
1361static int pp_set_min_deep_sleep_dcefclk(void *handle, uint32_t clock)
1362{
1363	struct pp_hwmgr *hwmgr = handle;
1364
1365	if (!hwmgr || !hwmgr->pm_en)
1366		return -EINVAL;
1367
1368	if (hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk == NULL) {
1369		pr_debug("%s was not implemented.\n", __func__);
1370		return -EINVAL;
1371	}
1372
1373	mutex_lock(&hwmgr->smu_lock);
1374	hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk(hwmgr, clock);
1375	mutex_unlock(&hwmgr->smu_lock);
1376
1377	return 0;
1378}
1379
1380static int pp_set_hard_min_dcefclk_by_freq(void *handle, uint32_t clock)
1381{
1382	struct pp_hwmgr *hwmgr = handle;
1383
1384	if (!hwmgr || !hwmgr->pm_en)
1385		return -EINVAL;
1386
1387	if (hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq == NULL) {
1388		pr_debug("%s was not implemented.\n", __func__);
1389		return -EINVAL;
1390	}
1391
1392	mutex_lock(&hwmgr->smu_lock);
1393	hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq(hwmgr, clock);
1394	mutex_unlock(&hwmgr->smu_lock);
1395
1396	return 0;
1397}
1398
1399static int pp_set_hard_min_fclk_by_freq(void *handle, uint32_t clock)
1400{
1401	struct pp_hwmgr *hwmgr = handle;
1402
1403	if (!hwmgr || !hwmgr->pm_en)
1404		return -EINVAL;
1405
1406	if (hwmgr->hwmgr_func->set_hard_min_fclk_by_freq == NULL) {
1407		pr_debug("%s was not implemented.\n", __func__);
1408		return -EINVAL;
1409	}
1410
1411	mutex_lock(&hwmgr->smu_lock);
1412	hwmgr->hwmgr_func->set_hard_min_fclk_by_freq(hwmgr, clock);
1413	mutex_unlock(&hwmgr->smu_lock);
1414
1415	return 0;
1416}
1417
1418static int pp_set_active_display_count(void *handle, uint32_t count)
1419{
1420	struct pp_hwmgr *hwmgr = handle;
1421	int ret = 0;
1422
1423	if (!hwmgr || !hwmgr->pm_en)
1424		return -EINVAL;
1425
1426	mutex_lock(&hwmgr->smu_lock);
1427	ret = phm_set_active_display_count(hwmgr, count);
1428	mutex_unlock(&hwmgr->smu_lock);
1429
1430	return ret;
1431}
1432
1433static int pp_get_asic_baco_capability(void *handle, bool *cap)
1434{
1435	struct pp_hwmgr *hwmgr = handle;
1436
1437	*cap = false;
1438	if (!hwmgr)
1439		return -EINVAL;
1440
1441	if (!(hwmgr->not_vf && amdgpu_dpm) ||
1442		!hwmgr->hwmgr_func->get_asic_baco_capability)
1443		return 0;
1444
1445	mutex_lock(&hwmgr->smu_lock);
1446	hwmgr->hwmgr_func->get_asic_baco_capability(hwmgr, cap);
1447	mutex_unlock(&hwmgr->smu_lock);
1448
1449	return 0;
1450}
1451
1452static int pp_get_asic_baco_state(void *handle, int *state)
1453{
1454	struct pp_hwmgr *hwmgr = handle;
1455
1456	if (!hwmgr)
1457		return -EINVAL;
1458
1459	if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_state)
1460		return 0;
1461
1462	mutex_lock(&hwmgr->smu_lock);
1463	hwmgr->hwmgr_func->get_asic_baco_state(hwmgr, (enum BACO_STATE *)state);
1464	mutex_unlock(&hwmgr->smu_lock);
1465
1466	return 0;
1467}
1468
1469static int pp_set_asic_baco_state(void *handle, int state)
1470{
1471	struct pp_hwmgr *hwmgr = handle;
1472
1473	if (!hwmgr)
1474		return -EINVAL;
1475
1476	if (!(hwmgr->not_vf && amdgpu_dpm) ||
1477		!hwmgr->hwmgr_func->set_asic_baco_state)
1478		return 0;
1479
1480	mutex_lock(&hwmgr->smu_lock);
1481	hwmgr->hwmgr_func->set_asic_baco_state(hwmgr, (enum BACO_STATE)state);
1482	mutex_unlock(&hwmgr->smu_lock);
1483
1484	return 0;
1485}
1486
1487static int pp_get_ppfeature_status(void *handle, char *buf)
1488{
1489	struct pp_hwmgr *hwmgr = handle;
1490	int ret = 0;
1491
1492	if (!hwmgr || !hwmgr->pm_en || !buf)
1493		return -EINVAL;
1494
1495	if (hwmgr->hwmgr_func->get_ppfeature_status == NULL) {
1496		pr_info_ratelimited("%s was not implemented.\n", __func__);
1497		return -EINVAL;
1498	}
1499
1500	mutex_lock(&hwmgr->smu_lock);
1501	ret = hwmgr->hwmgr_func->get_ppfeature_status(hwmgr, buf);
1502	mutex_unlock(&hwmgr->smu_lock);
1503
1504	return ret;
1505}
1506
1507static int pp_set_ppfeature_status(void *handle, uint64_t ppfeature_masks)
1508{
1509	struct pp_hwmgr *hwmgr = handle;
1510	int ret = 0;
1511
1512	if (!hwmgr || !hwmgr->pm_en)
1513		return -EINVAL;
1514
1515	if (hwmgr->hwmgr_func->set_ppfeature_status == NULL) {
1516		pr_info_ratelimited("%s was not implemented.\n", __func__);
1517		return -EINVAL;
1518	}
1519
1520	mutex_lock(&hwmgr->smu_lock);
1521	ret = hwmgr->hwmgr_func->set_ppfeature_status(hwmgr, ppfeature_masks);
1522	mutex_unlock(&hwmgr->smu_lock);
1523
1524	return ret;
1525}
1526
1527static int pp_asic_reset_mode_2(void *handle)
1528{
1529	struct pp_hwmgr *hwmgr = handle;
1530		int ret = 0;
1531
1532	if (!hwmgr || !hwmgr->pm_en)
1533		return -EINVAL;
1534
1535	if (hwmgr->hwmgr_func->asic_reset == NULL) {
1536		pr_info_ratelimited("%s was not implemented.\n", __func__);
1537		return -EINVAL;
1538	}
1539
1540	mutex_lock(&hwmgr->smu_lock);
1541	ret = hwmgr->hwmgr_func->asic_reset(hwmgr, SMU_ASIC_RESET_MODE_2);
1542	mutex_unlock(&hwmgr->smu_lock);
1543
1544	return ret;
1545}
1546
1547static int pp_smu_i2c_bus_access(void *handle, bool acquire)
 
1548{
1549	struct pp_hwmgr *hwmgr = handle;
1550	int ret = 0;
1551
1552	if (!hwmgr || !hwmgr->pm_en)
1553		return -EINVAL;
1554
1555	if (hwmgr->hwmgr_func->smu_i2c_bus_access == NULL) {
1556		pr_info_ratelimited("%s was not implemented.\n", __func__);
1557		return -EINVAL;
1558	}
1559
1560	mutex_lock(&hwmgr->smu_lock);
1561	ret = hwmgr->hwmgr_func->smu_i2c_bus_access(hwmgr, acquire);
1562	mutex_unlock(&hwmgr->smu_lock);
1563
1564	return ret;
1565}
1566
1567static int pp_set_df_cstate(void *handle, enum pp_df_cstate state)
1568{
1569	struct pp_hwmgr *hwmgr = handle;
1570
1571	if (!hwmgr)
1572		return -EINVAL;
1573
1574	if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_df_cstate)
1575		return 0;
1576
1577	mutex_lock(&hwmgr->smu_lock);
1578	hwmgr->hwmgr_func->set_df_cstate(hwmgr, state);
1579	mutex_unlock(&hwmgr->smu_lock);
1580
1581	return 0;
1582}
1583
1584static int pp_set_xgmi_pstate(void *handle, uint32_t pstate)
1585{
1586	struct pp_hwmgr *hwmgr = handle;
1587
1588	if (!hwmgr)
1589		return -EINVAL;
1590
1591	if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_xgmi_pstate)
1592		return 0;
1593
1594	mutex_lock(&hwmgr->smu_lock);
1595	hwmgr->hwmgr_func->set_xgmi_pstate(hwmgr, pstate);
1596	mutex_unlock(&hwmgr->smu_lock);
1597
1598	return 0;
1599}
1600
1601static const struct amd_pm_funcs pp_dpm_funcs = {
1602	.load_firmware = pp_dpm_load_fw,
1603	.wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
1604	.force_performance_level = pp_dpm_force_performance_level,
1605	.get_performance_level = pp_dpm_get_performance_level,
1606	.get_current_power_state = pp_dpm_get_current_power_state,
1607	.dispatch_tasks = pp_dpm_dispatch_tasks,
1608	.set_fan_control_mode = pp_dpm_set_fan_control_mode,
1609	.get_fan_control_mode = pp_dpm_get_fan_control_mode,
1610	.set_fan_speed_percent = pp_dpm_set_fan_speed_percent,
1611	.get_fan_speed_percent = pp_dpm_get_fan_speed_percent,
1612	.get_fan_speed_rpm = pp_dpm_get_fan_speed_rpm,
1613	.set_fan_speed_rpm = pp_dpm_set_fan_speed_rpm,
1614	.get_pp_num_states = pp_dpm_get_pp_num_states,
1615	.get_pp_table = pp_dpm_get_pp_table,
1616	.set_pp_table = pp_dpm_set_pp_table,
1617	.force_clock_level = pp_dpm_force_clock_level,
1618	.print_clock_levels = pp_dpm_print_clock_levels,
1619	.get_sclk_od = pp_dpm_get_sclk_od,
1620	.set_sclk_od = pp_dpm_set_sclk_od,
1621	.get_mclk_od = pp_dpm_get_mclk_od,
1622	.set_mclk_od = pp_dpm_set_mclk_od,
1623	.read_sensor = pp_dpm_read_sensor,
1624	.get_vce_clock_state = pp_dpm_get_vce_clock_state,
1625	.switch_power_profile = pp_dpm_switch_power_profile,
1626	.set_clockgating_by_smu = pp_set_clockgating_by_smu,
1627	.set_powergating_by_smu = pp_set_powergating_by_smu,
1628	.get_power_profile_mode = pp_get_power_profile_mode,
1629	.set_power_profile_mode = pp_set_power_profile_mode,
1630	.odn_edit_dpm_table = pp_odn_edit_dpm_table,
1631	.set_mp1_state = pp_dpm_set_mp1_state,
1632	.set_power_limit = pp_set_power_limit,
1633	.get_power_limit = pp_get_power_limit,
1634/* export to DC */
1635	.get_sclk = pp_dpm_get_sclk,
1636	.get_mclk = pp_dpm_get_mclk,
1637	.display_configuration_change = pp_display_configuration_change,
1638	.get_display_power_level = pp_get_display_power_level,
1639	.get_current_clocks = pp_get_current_clocks,
1640	.get_clock_by_type = pp_get_clock_by_type,
1641	.get_clock_by_type_with_latency = pp_get_clock_by_type_with_latency,
1642	.get_clock_by_type_with_voltage = pp_get_clock_by_type_with_voltage,
1643	.set_watermarks_for_clocks_ranges = pp_set_watermarks_for_clocks_ranges,
1644	.display_clock_voltage_request = pp_display_clock_voltage_request,
1645	.get_display_mode_validation_clocks = pp_get_display_mode_validation_clocks,
1646	.notify_smu_enable_pwe = pp_notify_smu_enable_pwe,
1647	.enable_mgpu_fan_boost = pp_enable_mgpu_fan_boost,
1648	.set_active_display_count = pp_set_active_display_count,
1649	.set_min_deep_sleep_dcefclk = pp_set_min_deep_sleep_dcefclk,
1650	.set_hard_min_dcefclk_by_freq = pp_set_hard_min_dcefclk_by_freq,
1651	.set_hard_min_fclk_by_freq = pp_set_hard_min_fclk_by_freq,
1652	.get_asic_baco_capability = pp_get_asic_baco_capability,
1653	.get_asic_baco_state = pp_get_asic_baco_state,
1654	.set_asic_baco_state = pp_set_asic_baco_state,
1655	.get_ppfeature_status = pp_get_ppfeature_status,
1656	.set_ppfeature_status = pp_set_ppfeature_status,
1657	.asic_reset_mode_2 = pp_asic_reset_mode_2,
1658	.smu_i2c_bus_access = pp_smu_i2c_bus_access,
1659	.set_df_cstate = pp_set_df_cstate,
1660	.set_xgmi_pstate = pp_set_xgmi_pstate,
1661};
v4.10.11
   1/*
   2 * Copyright 2015 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
 
  23#include <linux/types.h>
  24#include <linux/kernel.h>
  25#include <linux/gfp.h>
  26#include <linux/slab.h>
 
  27#include "amd_shared.h"
  28#include "amd_powerplay.h"
  29#include "pp_instance.h"
  30#include "power_state.h"
  31#include "eventmanager.h"
  32#include "pp_debug.h"
  33
  34
  35#define PP_CHECK(handle)						\
  36	do {								\
  37		if ((handle) == NULL || (handle)->pp_valid != PP_VALID)	\
  38			return -EINVAL;					\
  39	} while (0)
  40
  41#define PP_CHECK_HW(hwmgr)						\
  42	do {								\
  43		if ((hwmgr) == NULL || (hwmgr)->hwmgr_func == NULL)	\
  44			return 0;					\
  45	} while (0)
  46
  47static int pp_early_init(void *handle)
  48{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  49	return 0;
  50}
  51
  52static int pp_sw_init(void *handle)
 
  53{
  54	struct pp_instance *pp_handle;
  55	struct pp_hwmgr  *hwmgr;
  56	int ret = 0;
  57
  58	if (handle == NULL)
  59		return -EINVAL;
  60
  61	pp_handle = (struct pp_instance *)handle;
  62	hwmgr = pp_handle->hwmgr;
  63
  64	PP_CHECK_HW(hwmgr);
 
 
 
 
 
 
 
  65
  66	if (hwmgr->pptable_func == NULL ||
  67	    hwmgr->pptable_func->pptable_init == NULL ||
  68	    hwmgr->hwmgr_func->backend_init == NULL)
  69		return -EINVAL;
  70
  71	ret = hwmgr->pptable_func->pptable_init(hwmgr);
  72	if (ret)
  73		goto err;
  74
  75	ret = hwmgr->hwmgr_func->backend_init(hwmgr);
  76	if (ret)
  77		goto err1;
  78
  79	pr_info("amdgpu: powerplay initialized\n");
  80
  81	return 0;
  82err1:
  83	if (hwmgr->pptable_func->pptable_fini)
  84		hwmgr->pptable_func->pptable_fini(hwmgr);
  85err:
  86	pr_err("amdgpu: powerplay initialization failed\n");
  87	return ret;
  88}
  89
  90static int pp_sw_fini(void *handle)
  91{
  92	struct pp_instance *pp_handle;
  93	struct pp_hwmgr  *hwmgr;
  94	int ret = 0;
  95
  96	if (handle == NULL)
  97		return -EINVAL;
  98
  99	pp_handle = (struct pp_instance *)handle;
 100	hwmgr = pp_handle->hwmgr;
 101
 102	PP_CHECK_HW(hwmgr);
 
 103
 104	if (hwmgr->hwmgr_func->backend_fini != NULL)
 105		ret = hwmgr->hwmgr_func->backend_fini(hwmgr);
 
 
 
 
 106
 107	if (hwmgr->pptable_func->pptable_fini)
 108		hwmgr->pptable_func->pptable_fini(hwmgr);
 109
 110	return ret;
 111}
 112
 113static int pp_hw_init(void *handle)
 114{
 115	struct pp_instance *pp_handle;
 116	struct pp_smumgr *smumgr;
 117	struct pp_eventmgr *eventmgr;
 118	struct pp_hwmgr  *hwmgr;
 119	int ret = 0;
 
 
 120
 121	if (handle == NULL)
 122		return -EINVAL;
 123
 124	pp_handle = (struct pp_instance *)handle;
 125	smumgr = pp_handle->smu_mgr;
 126	hwmgr = pp_handle->hwmgr;
 127
 128	if (smumgr == NULL || smumgr->smumgr_funcs == NULL ||
 129		smumgr->smumgr_funcs->smu_init == NULL ||
 130		smumgr->smumgr_funcs->start_smu == NULL)
 131		return -EINVAL;
 132
 133	ret = smumgr->smumgr_funcs->smu_init(smumgr);
 134	if (ret) {
 135		printk(KERN_ERR "[ powerplay ] smc initialization failed\n");
 136		return ret;
 137	}
 138
 139	ret = smumgr->smumgr_funcs->start_smu(smumgr);
 140	if (ret) {
 141		printk(KERN_ERR "[ powerplay ] smc start failed\n");
 142		smumgr->smumgr_funcs->smu_fini(smumgr);
 143		return ret;
 144	}
 145
 146	PP_CHECK_HW(hwmgr);
 
 147
 148	hw_init_power_state_table(hwmgr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 149
 150	eventmgr = pp_handle->eventmgr;
 151	if (eventmgr == NULL || eventmgr->pp_eventmgr_init == NULL)
 152		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 153
 154	ret = eventmgr->pp_eventmgr_init(eventmgr);
 155	return 0;
 156}
 157
 158static int pp_hw_fini(void *handle)
 159{
 160	struct pp_instance *pp_handle;
 161	struct pp_smumgr *smumgr;
 162	struct pp_eventmgr *eventmgr;
 163
 164	if (handle == NULL)
 165		return -EINVAL;
 166
 167	pp_handle = (struct pp_instance *)handle;
 168	eventmgr = pp_handle->eventmgr;
 169
 170	if (eventmgr != NULL && eventmgr->pp_eventmgr_fini != NULL)
 171		eventmgr->pp_eventmgr_fini(eventmgr);
 172
 173	smumgr = pp_handle->smu_mgr;
 174
 175	if (smumgr != NULL && smumgr->smumgr_funcs != NULL &&
 176		smumgr->smumgr_funcs->smu_fini != NULL)
 177		smumgr->smumgr_funcs->smu_fini(smumgr);
 178
 179	return 0;
 180}
 181
 182static bool pp_is_idle(void *handle)
 183{
 184	return false;
 185}
 186
 187static int pp_wait_for_idle(void *handle)
 188{
 189	return 0;
 190}
 191
 192static int pp_sw_reset(void *handle)
 193{
 194	return 0;
 195}
 196
 197
 198int amd_set_clockgating_by_smu(void *handle, uint32_t msg_id)
 199{
 200	struct pp_hwmgr  *hwmgr;
 201
 202	if (handle == NULL)
 203		return -EINVAL;
 204
 205	hwmgr = ((struct pp_instance *)handle)->hwmgr;
 206
 207	PP_CHECK_HW(hwmgr);
 208
 209	if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
 210		printk(KERN_INFO "%s was not implemented.\n", __func__);
 211		return 0;
 212	}
 213
 214	return hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
 215}
 216
 217static int pp_set_powergating_state(void *handle,
 218				    enum amd_powergating_state state)
 219{
 220	struct pp_hwmgr  *hwmgr;
 221
 222	if (handle == NULL)
 223		return -EINVAL;
 224
 225	hwmgr = ((struct pp_instance *)handle)->hwmgr;
 226
 227	PP_CHECK_HW(hwmgr);
 228
 229	if (hwmgr->hwmgr_func->enable_per_cu_power_gating == NULL) {
 230		printk(KERN_INFO "%s was not implemented.\n", __func__);
 231		return 0;
 232	}
 233
 234	/* Enable/disable GFX per cu powergating through SMU */
 235	return hwmgr->hwmgr_func->enable_per_cu_power_gating(hwmgr,
 236			state == AMD_PG_STATE_GATE ? true : false);
 237}
 238
 239static int pp_suspend(void *handle)
 240{
 241	struct pp_instance *pp_handle;
 242	struct pp_eventmgr *eventmgr;
 243	struct pem_event_data event_data = { {0} };
 244
 245	if (handle == NULL)
 246		return -EINVAL;
 247
 248	pp_handle = (struct pp_instance *)handle;
 249	eventmgr = pp_handle->eventmgr;
 250
 251	if (eventmgr != NULL)
 252		pem_handle_event(eventmgr, AMD_PP_EVENT_SUSPEND, &event_data);
 253	return 0;
 254}
 255
 256static int pp_resume(void *handle)
 257{
 258	struct pp_instance *pp_handle;
 259	struct pp_eventmgr *eventmgr;
 260	struct pem_event_data event_data = { {0} };
 261	struct pp_smumgr *smumgr;
 262	int ret;
 263
 264	if (handle == NULL)
 265		return -EINVAL;
 266
 267	pp_handle = (struct pp_instance *)handle;
 268	smumgr = pp_handle->smu_mgr;
 269
 270	if (smumgr == NULL || smumgr->smumgr_funcs == NULL ||
 271		smumgr->smumgr_funcs->start_smu == NULL)
 272		return -EINVAL;
 273
 274	ret = smumgr->smumgr_funcs->start_smu(smumgr);
 275	if (ret) {
 276		printk(KERN_ERR "[ powerplay ] smc start failed\n");
 277		smumgr->smumgr_funcs->smu_fini(smumgr);
 278		return ret;
 279	}
 280
 281	eventmgr = pp_handle->eventmgr;
 282	if (eventmgr != NULL)
 283		pem_handle_event(eventmgr, AMD_PP_EVENT_RESUME, &event_data);
 284
 
 
 
 285	return 0;
 286}
 287
 288const struct amd_ip_funcs pp_ip_funcs = {
 289	.name = "powerplay",
 290	.early_init = pp_early_init,
 291	.late_init = NULL,
 292	.sw_init = pp_sw_init,
 293	.sw_fini = pp_sw_fini,
 294	.hw_init = pp_hw_init,
 295	.hw_fini = pp_hw_fini,
 
 296	.suspend = pp_suspend,
 297	.resume = pp_resume,
 298	.is_idle = pp_is_idle,
 299	.wait_for_idle = pp_wait_for_idle,
 300	.soft_reset = pp_sw_reset,
 301	.set_clockgating_state = NULL,
 302	.set_powergating_state = pp_set_powergating_state,
 303};
 304
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 305static int pp_dpm_load_fw(void *handle)
 306{
 
 
 
 
 
 
 
 
 
 
 307	return 0;
 308}
 309
 310static int pp_dpm_fw_loading_complete(void *handle)
 311{
 312	return 0;
 313}
 314
 315static int pp_dpm_force_performance_level(void *handle,
 316					enum amd_dpm_forced_level level)
 317{
 318	struct pp_instance *pp_handle;
 319	struct pp_hwmgr  *hwmgr;
 320
 321	if (handle == NULL)
 322		return -EINVAL;
 323
 324	pp_handle = (struct pp_instance *)handle;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 325
 326	hwmgr = pp_handle->hwmgr;
 
 
 
 327
 328	PP_CHECK_HW(hwmgr);
 
 329
 330	if (hwmgr->hwmgr_func->force_dpm_level == NULL) {
 331		printk(KERN_INFO "%s was not implemented.\n", __func__);
 332		return 0;
 333	}
 334
 335	hwmgr->hwmgr_func->force_dpm_level(hwmgr, level);
 
 
 
 
 336
 337	return 0;
 338}
 339
 340static enum amd_dpm_forced_level pp_dpm_get_performance_level(
 341								void *handle)
 342{
 343	struct pp_hwmgr  *hwmgr;
 
 344
 345	if (handle == NULL)
 346		return -EINVAL;
 347
 348	hwmgr = ((struct pp_instance *)handle)->hwmgr;
 349
 350	PP_CHECK_HW(hwmgr);
 351
 352	return (((struct pp_instance *)handle)->hwmgr->dpm_level);
 353}
 354
 355static int pp_dpm_get_sclk(void *handle, bool low)
 356{
 357	struct pp_hwmgr  *hwmgr;
 
 358
 359	if (handle == NULL)
 360		return -EINVAL;
 361
 362	hwmgr = ((struct pp_instance *)handle)->hwmgr;
 363
 364	PP_CHECK_HW(hwmgr);
 365
 366	if (hwmgr->hwmgr_func->get_sclk == NULL) {
 367		printk(KERN_INFO "%s was not implemented.\n", __func__);
 368		return 0;
 369	}
 370
 371	return hwmgr->hwmgr_func->get_sclk(hwmgr, low);
 
 
 372}
 373
 374static int pp_dpm_get_mclk(void *handle, bool low)
 375{
 376	struct pp_hwmgr  *hwmgr;
 
 377
 378	if (handle == NULL)
 379		return -EINVAL;
 380
 381	hwmgr = ((struct pp_instance *)handle)->hwmgr;
 382
 383	PP_CHECK_HW(hwmgr);
 384
 385	if (hwmgr->hwmgr_func->get_mclk == NULL) {
 386		printk(KERN_INFO "%s was not implemented.\n", __func__);
 387		return 0;
 388	}
 389
 390	return hwmgr->hwmgr_func->get_mclk(hwmgr, low);
 
 
 391}
 392
 393static int pp_dpm_powergate_vce(void *handle, bool gate)
 394{
 395	struct pp_hwmgr  *hwmgr;
 396
 397	if (handle == NULL)
 398		return -EINVAL;
 399
 400	hwmgr = ((struct pp_instance *)handle)->hwmgr;
 401
 402	PP_CHECK_HW(hwmgr);
 403
 404	if (hwmgr->hwmgr_func->powergate_vce == NULL) {
 405		printk(KERN_INFO "%s was not implemented.\n", __func__);
 406		return 0;
 407	}
 408
 409	return hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
 
 410}
 411
 412static int pp_dpm_powergate_uvd(void *handle, bool gate)
 413{
 414	struct pp_hwmgr  *hwmgr;
 415
 416	if (handle == NULL)
 417		return -EINVAL;
 418
 419	hwmgr = ((struct pp_instance *)handle)->hwmgr;
 420
 421	PP_CHECK_HW(hwmgr);
 422
 423	if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
 424		printk(KERN_INFO "%s was not implemented.\n", __func__);
 425		return 0;
 426	}
 427
 428	return hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
 429}
 430
 431static enum PP_StateUILabel power_state_convert(enum amd_pm_state_type  state)
 432{
 433	switch (state) {
 434	case POWER_STATE_TYPE_BATTERY:
 435		return PP_StateUILabel_Battery;
 436	case POWER_STATE_TYPE_BALANCED:
 437		return PP_StateUILabel_Balanced;
 438	case POWER_STATE_TYPE_PERFORMANCE:
 439		return PP_StateUILabel_Performance;
 440	default:
 441		return PP_StateUILabel_None;
 442	}
 
 
 
 443}
 444
 445static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id,
 446		void *input, void *output)
 447{
 448	int ret = 0;
 449	struct pp_instance *pp_handle;
 450	struct pem_event_data data = { {0} };
 451
 452	pp_handle = (struct pp_instance *)handle;
 453
 454	if (pp_handle == NULL)
 455		return -EINVAL;
 456
 457	if (pp_handle->eventmgr == NULL)
 458		return 0;
 
 459
 460	switch (event_id) {
 461	case AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE:
 462		ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
 463		break;
 464	case AMD_PP_EVENT_ENABLE_USER_STATE:
 465	{
 466		enum amd_pm_state_type  ps;
 467
 468		if (input == NULL)
 469			return -EINVAL;
 470		ps = *(unsigned long *)input;
 471
 472		data.requested_ui_label = power_state_convert(ps);
 473		ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
 474		break;
 475	}
 476	case AMD_PP_EVENT_COMPLETE_INIT:
 477		ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
 478		break;
 479	case AMD_PP_EVENT_READJUST_POWER_STATE:
 480		ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
 481		break;
 482	default:
 483		break;
 484	}
 485	return ret;
 486}
 487
 488static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
 489{
 490	struct pp_hwmgr *hwmgr;
 491	struct pp_power_state *state;
 
 492
 493	if (handle == NULL)
 494		return -EINVAL;
 495
 496	hwmgr = ((struct pp_instance *)handle)->hwmgr;
 497
 498	if (hwmgr == NULL || hwmgr->current_ps == NULL)
 499		return -EINVAL;
 500
 501	state = hwmgr->current_ps;
 502
 503	switch (state->classification.ui_label) {
 504	case PP_StateUILabel_Battery:
 505		return POWER_STATE_TYPE_BATTERY;
 
 506	case PP_StateUILabel_Balanced:
 507		return POWER_STATE_TYPE_BALANCED;
 
 508	case PP_StateUILabel_Performance:
 509		return POWER_STATE_TYPE_PERFORMANCE;
 
 510	default:
 511		if (state->classification.flags & PP_StateClassificationFlag_Boot)
 512			return  POWER_STATE_TYPE_INTERNAL_BOOT;
 513		else
 514			return POWER_STATE_TYPE_DEFAULT;
 
 515	}
 
 
 
 516}
 517
 518static int pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
 519{
 520	struct pp_hwmgr  *hwmgr;
 521
 522	if (handle == NULL)
 523		return -EINVAL;
 524
 525	hwmgr = ((struct pp_instance *)handle)->hwmgr;
 526
 527	PP_CHECK_HW(hwmgr);
 528
 529	if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) {
 530		printk(KERN_INFO "%s was not implemented.\n", __func__);
 531		return 0;
 532	}
 533
 534	return hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode);
 
 535}
 536
 537static int pp_dpm_get_fan_control_mode(void *handle)
 538{
 539	struct pp_hwmgr  *hwmgr;
 
 540
 541	if (handle == NULL)
 542		return -EINVAL;
 543
 544	hwmgr = ((struct pp_instance *)handle)->hwmgr;
 545
 546	PP_CHECK_HW(hwmgr);
 547
 548	if (hwmgr->hwmgr_func->get_fan_control_mode == NULL) {
 549		printk(KERN_INFO "%s was not implemented.\n", __func__);
 550		return 0;
 551	}
 552
 553	return hwmgr->hwmgr_func->get_fan_control_mode(hwmgr);
 
 
 554}
 555
 556static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent)
 557{
 558	struct pp_hwmgr  *hwmgr;
 
 559
 560	if (handle == NULL)
 561		return -EINVAL;
 562
 563	hwmgr = ((struct pp_instance *)handle)->hwmgr;
 564
 565	PP_CHECK_HW(hwmgr);
 566
 567	if (hwmgr->hwmgr_func->set_fan_speed_percent == NULL) {
 568		printk(KERN_INFO "%s was not implemented.\n", __func__);
 569		return 0;
 570	}
 571
 572	return hwmgr->hwmgr_func->set_fan_speed_percent(hwmgr, percent);
 
 
 573}
 574
 575static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed)
 576{
 577	struct pp_hwmgr  *hwmgr;
 
 578
 579	if (handle == NULL)
 580		return -EINVAL;
 581
 582	hwmgr = ((struct pp_instance *)handle)->hwmgr;
 583
 584	PP_CHECK_HW(hwmgr);
 585
 586	if (hwmgr->hwmgr_func->get_fan_speed_percent == NULL) {
 587		printk(KERN_INFO "%s was not implemented.\n", __func__);
 588		return 0;
 589	}
 590
 591	return hwmgr->hwmgr_func->get_fan_speed_percent(hwmgr, speed);
 
 
 
 592}
 593
 594static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
 595{
 596	struct pp_hwmgr *hwmgr;
 
 597
 598	if (handle == NULL)
 599		return -EINVAL;
 600
 601	hwmgr = ((struct pp_instance *)handle)->hwmgr;
 602
 603	PP_CHECK_HW(hwmgr);
 604
 605	if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL)
 606		return -EINVAL;
 607
 608	return hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm);
 
 
 
 609}
 610
 611static int pp_dpm_get_temperature(void *handle)
 612{
 613	struct pp_hwmgr  *hwmgr;
 
 614
 615	if (handle == NULL)
 616		return -EINVAL;
 617
 618	hwmgr = ((struct pp_instance *)handle)->hwmgr;
 619
 620	PP_CHECK_HW(hwmgr);
 621
 622	if (hwmgr->hwmgr_func->get_temperature == NULL) {
 623		printk(KERN_INFO "%s was not implemented.\n", __func__);
 624		return 0;
 625	}
 626
 627	return hwmgr->hwmgr_func->get_temperature(hwmgr);
 
 
 628}
 629
 630static int pp_dpm_get_pp_num_states(void *handle,
 631		struct pp_states_info *data)
 632{
 633	struct pp_hwmgr *hwmgr;
 634	int i;
 635
 636	if (!handle)
 
 
 637		return -EINVAL;
 638
 639	hwmgr = ((struct pp_instance *)handle)->hwmgr;
 640
 641	if (hwmgr == NULL || hwmgr->ps == NULL)
 642		return -EINVAL;
 643
 644	data->nums = hwmgr->num_ps;
 645
 646	for (i = 0; i < hwmgr->num_ps; i++) {
 647		struct pp_power_state *state = (struct pp_power_state *)
 648				((unsigned long)hwmgr->ps + i * hwmgr->ps_size);
 649		switch (state->classification.ui_label) {
 650		case PP_StateUILabel_Battery:
 651			data->states[i] = POWER_STATE_TYPE_BATTERY;
 652			break;
 653		case PP_StateUILabel_Balanced:
 654			data->states[i] = POWER_STATE_TYPE_BALANCED;
 655			break;
 656		case PP_StateUILabel_Performance:
 657			data->states[i] = POWER_STATE_TYPE_PERFORMANCE;
 658			break;
 659		default:
 660			if (state->classification.flags & PP_StateClassificationFlag_Boot)
 661				data->states[i] = POWER_STATE_TYPE_INTERNAL_BOOT;
 662			else
 663				data->states[i] = POWER_STATE_TYPE_DEFAULT;
 664		}
 665	}
 666
 667	return 0;
 668}
 669
 670static int pp_dpm_get_pp_table(void *handle, char **table)
 671{
 672	struct pp_hwmgr *hwmgr;
 
 673
 674	if (!handle)
 675		return -EINVAL;
 676
 677	hwmgr = ((struct pp_instance *)handle)->hwmgr;
 
 
 
 
 
 678
 679	PP_CHECK_HW(hwmgr);
 
 
 
 680
 681	if (!hwmgr->soft_pp_table)
 682		return -EINVAL;
 
 683
 684	*table = (char *)hwmgr->soft_pp_table;
 
 
 685
 686	return hwmgr->soft_pp_table_size;
 687}
 688
 689static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
 690{
 691	struct pp_hwmgr *hwmgr;
 
 692
 693	if (!handle)
 694		return -EINVAL;
 695
 696	hwmgr = ((struct pp_instance *)handle)->hwmgr;
 697
 698	PP_CHECK_HW(hwmgr);
 699
 700	if (!hwmgr->hardcode_pp_table) {
 701		hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table,
 702						   hwmgr->soft_pp_table_size,
 703						   GFP_KERNEL);
 704
 705		if (!hwmgr->hardcode_pp_table)
 706			return -ENOMEM;
 707	}
 708
 709	memcpy(hwmgr->hardcode_pp_table, buf, size);
 710
 711	hwmgr->soft_pp_table = hwmgr->hardcode_pp_table;
 712
 713	return amd_powerplay_reset(handle);
 
 
 
 
 
 
 
 
 
 
 
 
 
 714}
 715
 716static int pp_dpm_force_clock_level(void *handle,
 717		enum pp_clock_type type, uint32_t mask)
 718{
 719	struct pp_hwmgr *hwmgr;
 
 720
 721	if (!handle)
 722		return -EINVAL;
 723
 724	hwmgr = ((struct pp_instance *)handle)->hwmgr;
 725
 726	PP_CHECK_HW(hwmgr);
 727
 728	if (hwmgr->hwmgr_func->force_clock_level == NULL) {
 729		printk(KERN_INFO "%s was not implemented.\n", __func__);
 730		return 0;
 731	}
 732
 733	return hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask);
 
 
 
 
 
 
 
 
 734}
 735
 736static int pp_dpm_print_clock_levels(void *handle,
 737		enum pp_clock_type type, char *buf)
 738{
 739	struct pp_hwmgr *hwmgr;
 
 740
 741	if (!handle)
 742		return -EINVAL;
 743
 744	hwmgr = ((struct pp_instance *)handle)->hwmgr;
 745
 746	PP_CHECK_HW(hwmgr);
 747
 748	if (hwmgr->hwmgr_func->print_clock_levels == NULL) {
 749		printk(KERN_INFO "%s was not implemented.\n", __func__);
 750		return 0;
 751	}
 752	return hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
 
 
 
 753}
 754
 755static int pp_dpm_get_sclk_od(void *handle)
 756{
 757	struct pp_hwmgr *hwmgr;
 
 758
 759	if (!handle)
 760		return -EINVAL;
 761
 762	hwmgr = ((struct pp_instance *)handle)->hwmgr;
 763
 764	PP_CHECK_HW(hwmgr);
 765
 766	if (hwmgr->hwmgr_func->get_sclk_od == NULL) {
 767		printk(KERN_INFO "%s was not implemented.\n", __func__);
 768		return 0;
 769	}
 770
 771	return hwmgr->hwmgr_func->get_sclk_od(hwmgr);
 
 
 772}
 773
 774static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
 775{
 776	struct pp_hwmgr *hwmgr;
 
 777
 778	if (!handle)
 779		return -EINVAL;
 780
 781	hwmgr = ((struct pp_instance *)handle)->hwmgr;
 782
 783	PP_CHECK_HW(hwmgr);
 784
 785	if (hwmgr->hwmgr_func->set_sclk_od == NULL) {
 786		printk(KERN_INFO "%s was not implemented.\n", __func__);
 787		return 0;
 788	}
 789
 790	return hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
 
 
 
 791}
 792
 793static int pp_dpm_get_mclk_od(void *handle)
 794{
 795	struct pp_hwmgr *hwmgr;
 
 796
 797	if (!handle)
 798		return -EINVAL;
 799
 800	hwmgr = ((struct pp_instance *)handle)->hwmgr;
 801
 802	PP_CHECK_HW(hwmgr);
 803
 804	if (hwmgr->hwmgr_func->get_mclk_od == NULL) {
 805		printk(KERN_INFO "%s was not implemented.\n", __func__);
 806		return 0;
 807	}
 808
 809	return hwmgr->hwmgr_func->get_mclk_od(hwmgr);
 
 
 810}
 811
 812static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
 813{
 814	struct pp_hwmgr *hwmgr;
 
 815
 816	if (!handle)
 817		return -EINVAL;
 818
 819	hwmgr = ((struct pp_instance *)handle)->hwmgr;
 820
 821	PP_CHECK_HW(hwmgr);
 822
 823	if (hwmgr->hwmgr_func->set_mclk_od == NULL) {
 824		printk(KERN_INFO "%s was not implemented.\n", __func__);
 825		return 0;
 826	}
 827
 828	return hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
 
 
 829}
 830
 831static int pp_dpm_read_sensor(void *handle, int idx, int32_t *value)
 
 832{
 833	struct pp_hwmgr *hwmgr;
 
 834
 835	if (!handle)
 836		return -EINVAL;
 837
 838	hwmgr = ((struct pp_instance *)handle)->hwmgr;
 839
 840	PP_CHECK_HW(hwmgr);
 841
 842	if (hwmgr->hwmgr_func->read_sensor == NULL) {
 843		printk(KERN_INFO "%s was not implemented.\n", __func__);
 
 
 
 
 
 
 844		return 0;
 
 
 
 
 
 845	}
 846
 847	return hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value);
 848}
 849
 850static struct amd_vce_state*
 851pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
 852{
 853	struct pp_hwmgr *hwmgr;
 854
 855	if (handle) {
 856		hwmgr = ((struct pp_instance *)handle)->hwmgr;
 857
 858		if (hwmgr && idx < hwmgr->num_vce_state_tables)
 859			return &hwmgr->vce_states[idx];
 860	}
 861
 
 
 862	return NULL;
 863}
 864
 865const struct amd_powerplay_funcs pp_dpm_funcs = {
 866	.get_temperature = pp_dpm_get_temperature,
 867	.load_firmware = pp_dpm_load_fw,
 868	.wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
 869	.force_performance_level = pp_dpm_force_performance_level,
 870	.get_performance_level = pp_dpm_get_performance_level,
 871	.get_current_power_state = pp_dpm_get_current_power_state,
 872	.get_sclk = pp_dpm_get_sclk,
 873	.get_mclk = pp_dpm_get_mclk,
 874	.powergate_vce = pp_dpm_powergate_vce,
 875	.powergate_uvd = pp_dpm_powergate_uvd,
 876	.dispatch_tasks = pp_dpm_dispatch_tasks,
 877	.set_fan_control_mode = pp_dpm_set_fan_control_mode,
 878	.get_fan_control_mode = pp_dpm_get_fan_control_mode,
 879	.set_fan_speed_percent = pp_dpm_set_fan_speed_percent,
 880	.get_fan_speed_percent = pp_dpm_get_fan_speed_percent,
 881	.get_fan_speed_rpm = pp_dpm_get_fan_speed_rpm,
 882	.get_pp_num_states = pp_dpm_get_pp_num_states,
 883	.get_pp_table = pp_dpm_get_pp_table,
 884	.set_pp_table = pp_dpm_set_pp_table,
 885	.force_clock_level = pp_dpm_force_clock_level,
 886	.print_clock_levels = pp_dpm_print_clock_levels,
 887	.get_sclk_od = pp_dpm_get_sclk_od,
 888	.set_sclk_od = pp_dpm_set_sclk_od,
 889	.get_mclk_od = pp_dpm_get_mclk_od,
 890	.set_mclk_od = pp_dpm_set_mclk_od,
 891	.read_sensor = pp_dpm_read_sensor,
 892	.get_vce_clock_state = pp_dpm_get_vce_clock_state,
 893};
 894
 895static int amd_pp_instance_init(struct amd_pp_init *pp_init,
 896				struct amd_powerplay *amd_pp)
 897{
 898	int ret;
 899	struct pp_instance *handle;
 900
 901	handle = kzalloc(sizeof(struct pp_instance), GFP_KERNEL);
 902	if (handle == NULL)
 903		return -ENOMEM;
 904
 905	handle->pp_valid = PP_VALID;
 
 
 
 906
 907	ret = smum_init(pp_init, handle);
 908	if (ret)
 909		goto fail_smum;
 910
 
 
 
 
 911
 912	amd_pp->pp_handle = handle;
 
 913
 914	if ((amdgpu_dpm == 0)
 915		|| cgs_is_virtualization_enabled(pp_init->device))
 916		return 0;
 
 917
 918	ret = hwmgr_init(pp_init, handle);
 919	if (ret)
 920		goto fail_hwmgr;
 
 921
 922	ret = eventmgr_init(handle);
 923	if (ret)
 924		goto fail_eventmgr;
 925
 926	return 0;
 927
 928fail_eventmgr:
 929	hwmgr_fini(handle->hwmgr);
 930fail_hwmgr:
 931	smum_fini(handle->smu_mgr);
 932fail_smum:
 933	kfree(handle);
 934	return ret;
 935}
 936
 937static int amd_pp_instance_fini(void *handle)
 938{
 939	struct pp_instance *instance = (struct pp_instance *)handle;
 940
 941	if (instance == NULL)
 942		return -EINVAL;
 943
 944	if ((amdgpu_dpm != 0)
 945		&& !cgs_is_virtualization_enabled(instance->smu_mgr->device)) {
 946		eventmgr_fini(instance->eventmgr);
 947		hwmgr_fini(instance->hwmgr);
 948	}
 949
 950	smum_fini(instance->smu_mgr);
 951	kfree(handle);
 952	return 0;
 953}
 954
 955int amd_powerplay_init(struct amd_pp_init *pp_init,
 956		       struct amd_powerplay *amd_pp)
 957{
 958	int ret;
 959
 960	if (pp_init == NULL || amd_pp == NULL)
 961		return -EINVAL;
 962
 963	ret = amd_pp_instance_init(pp_init, amd_pp);
 964
 965	if (ret)
 966		return ret;
 967
 968	amd_pp->ip_funcs = &pp_ip_funcs;
 969	amd_pp->pp_funcs = &pp_dpm_funcs;
 970
 971	return 0;
 972}
 973
 974int amd_powerplay_fini(void *handle)
 
 975{
 976	amd_pp_instance_fini(handle);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 977
 978	return 0;
 979}
 980
 981int amd_powerplay_reset(void *handle)
 982{
 983	struct pp_instance *instance = (struct pp_instance *)handle;
 984	struct pp_eventmgr *eventmgr;
 985	struct pem_event_data event_data = { {0} };
 986	int ret;
 987
 988	if (instance == NULL)
 989		return -EINVAL;
 990
 991	eventmgr = instance->eventmgr;
 992	if (!eventmgr || !eventmgr->pp_eventmgr_fini)
 993		return -EINVAL;
 
 994
 995	eventmgr->pp_eventmgr_fini(eventmgr);
 
 996
 997	ret = pp_sw_fini(handle);
 998	if (ret)
 999		return ret;
 
 
 
 
 
1000
1001	kfree(instance->hwmgr->ps);
 
 
 
 
 
1002
1003	ret = pp_sw_init(handle);
1004	if (ret)
1005		return ret;
1006
1007	if ((amdgpu_dpm == 0)
1008		|| cgs_is_virtualization_enabled(instance->smu_mgr->device))
1009		return 0;
1010
1011	hw_init_power_state_table(instance->hwmgr);
1012
1013	if (eventmgr == NULL || eventmgr->pp_eventmgr_init == NULL)
1014		return -EINVAL;
 
 
 
 
 
 
 
1015
1016	ret = eventmgr->pp_eventmgr_init(eventmgr);
1017	if (ret)
1018		return ret;
1019
1020	return pem_handle_event(eventmgr, AMD_PP_EVENT_COMPLETE_INIT, &event_data);
1021}
1022
1023/* export this function to DAL */
1024
1025int amd_powerplay_display_configuration_change(void *handle,
1026	const struct amd_pp_display_configuration *display_config)
1027{
1028	struct pp_hwmgr  *hwmgr;
1029
1030	PP_CHECK((struct pp_instance *)handle);
1031
1032	hwmgr = ((struct pp_instance *)handle)->hwmgr;
1033
1034	PP_CHECK_HW(hwmgr);
1035
 
1036	phm_store_dal_configuration_data(hwmgr, display_config);
1037
1038	return 0;
1039}
1040
1041int amd_powerplay_get_display_power_level(void *handle,
1042		struct amd_pp_simple_clock_info *output)
1043{
1044	struct pp_hwmgr  *hwmgr;
1045
1046	PP_CHECK((struct pp_instance *)handle);
1047
1048	if (output == NULL)
1049		return -EINVAL;
1050
1051	hwmgr = ((struct pp_instance *)handle)->hwmgr;
1052
1053	PP_CHECK_HW(hwmgr);
1054
1055	return phm_get_dal_power_level(hwmgr, output);
1056}
1057
1058int amd_powerplay_get_current_clocks(void *handle,
1059		struct amd_pp_clock_info *clocks)
1060{
1061	struct pp_hwmgr  *hwmgr;
1062	struct amd_pp_simple_clock_info simple_clocks;
1063	struct pp_clock_info hw_clocks;
 
 
1064
1065	PP_CHECK((struct pp_instance *)handle);
1066
1067	if (clocks == NULL)
1068		return -EINVAL;
1069
1070	hwmgr = ((struct pp_instance *)handle)->hwmgr;
1071
1072	PP_CHECK_HW(hwmgr);
1073
1074	phm_get_dal_power_level(hwmgr, &simple_clocks);
 
 
 
 
 
 
1075
1076	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PowerContainment)) {
1077		if (0 != phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks, PHM_PerformanceLevelDesignation_PowerContainment))
1078			PP_ASSERT_WITH_CODE(0, "Error in PHM_GetPowerContainmentClockInfo", return -1);
1079	} else {
1080		if (0 != phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks, PHM_PerformanceLevelDesignation_Activity))
1081			PP_ASSERT_WITH_CODE(0, "Error in PHM_GetClockInfo", return -1);
1082	}
1083
1084	clocks->min_engine_clock = hw_clocks.min_eng_clk;
1085	clocks->max_engine_clock = hw_clocks.max_eng_clk;
1086	clocks->min_memory_clock = hw_clocks.min_mem_clk;
1087	clocks->max_memory_clock = hw_clocks.max_mem_clk;
1088	clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
1089	clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
1090
1091	clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1092	clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1093
1094	clocks->max_clocks_state = simple_clocks.level;
 
 
 
1095
1096	if (0 == phm_get_current_shallow_sleep_clocks(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks)) {
1097		clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1098		clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1099	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1100
1101	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1102
 
 
 
 
 
 
 
 
 
 
 
 
1103}
1104
1105int amd_powerplay_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks)
1106{
1107	int result = -1;
 
 
 
 
1108
1109	struct pp_hwmgr *hwmgr;
 
 
 
 
 
 
 
 
 
 
1110
1111	PP_CHECK((struct pp_instance *)handle);
 
 
 
1112
1113	if (clocks == NULL)
1114		return -EINVAL;
1115
1116	hwmgr = ((struct pp_instance *)handle)->hwmgr;
 
 
 
1117
1118	PP_CHECK_HW(hwmgr);
 
 
1119
1120	result = phm_get_clock_by_type(hwmgr, type, clocks);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1121
1122	return result;
1123}
1124
1125int amd_powerplay_get_display_mode_validation_clocks(void *handle,
1126		struct amd_pp_simple_clock_info *clocks)
1127{
1128	int result = -1;
1129	struct pp_hwmgr  *hwmgr;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1130
1131	PP_CHECK((struct pp_instance *)handle);
 
 
1132
1133	if (clocks == NULL)
1134		return -EINVAL;
1135
1136	hwmgr = ((struct pp_instance *)handle)->hwmgr;
 
1137
1138	PP_CHECK_HW(hwmgr);
 
 
1139
1140	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))
1141		result = phm_get_max_high_clocks(hwmgr, clocks);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1142
1143	return result;
1144}
1145