Linux Audio

Check our new training course

Linux kernel drivers training

Mar 31-Apr 9, 2025, special US time zones
Register
Loading...
Note: File does not exist in v6.9.4.
   1/*
   2 * Copyright 2017 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23
  24#include <linux/delay.h>
  25#include <linux/fb.h>
  26#include <linux/module.h>
  27#include <linux/slab.h>
  28
  29#include "hwmgr.h"
  30#include "amd_powerplay.h"
  31#include "vega12_smumgr.h"
  32#include "hardwaremanager.h"
  33#include "ppatomfwctrl.h"
  34#include "atomfirmware.h"
  35#include "cgs_common.h"
  36#include "vega12_inc.h"
  37#include "pppcielanes.h"
  38#include "vega12_hwmgr.h"
  39#include "vega12_processpptables.h"
  40#include "vega12_pptable.h"
  41#include "vega12_thermal.h"
  42#include "vega12_ppsmc.h"
  43#include "pp_debug.h"
  44#include "amd_pcie_helpers.h"
  45#include "ppinterrupt.h"
  46#include "pp_overdriver.h"
  47#include "pp_thermal.h"
  48#include "vega12_baco.h"
  49
  50
  51static int vega12_force_clock_level(struct pp_hwmgr *hwmgr,
  52		enum pp_clock_type type, uint32_t mask);
  53static int vega12_get_clock_ranges(struct pp_hwmgr *hwmgr,
  54		uint32_t *clock,
  55		PPCLK_e clock_select,
  56		bool max);
  57
  58static void vega12_set_default_registry_data(struct pp_hwmgr *hwmgr)
  59{
  60	struct vega12_hwmgr *data =
  61			(struct vega12_hwmgr *)(hwmgr->backend);
  62
  63	data->gfxclk_average_alpha = PPVEGA12_VEGA12GFXCLKAVERAGEALPHA_DFLT;
  64	data->socclk_average_alpha = PPVEGA12_VEGA12SOCCLKAVERAGEALPHA_DFLT;
  65	data->uclk_average_alpha = PPVEGA12_VEGA12UCLKCLKAVERAGEALPHA_DFLT;
  66	data->gfx_activity_average_alpha = PPVEGA12_VEGA12GFXACTIVITYAVERAGEALPHA_DFLT;
  67	data->lowest_uclk_reserved_for_ulv = PPVEGA12_VEGA12LOWESTUCLKRESERVEDFORULV_DFLT;
  68
  69	data->display_voltage_mode = PPVEGA12_VEGA12DISPLAYVOLTAGEMODE_DFLT;
  70	data->dcef_clk_quad_eqn_a = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
  71	data->dcef_clk_quad_eqn_b = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
  72	data->dcef_clk_quad_eqn_c = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
  73	data->disp_clk_quad_eqn_a = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
  74	data->disp_clk_quad_eqn_b = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
  75	data->disp_clk_quad_eqn_c = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
  76	data->pixel_clk_quad_eqn_a = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
  77	data->pixel_clk_quad_eqn_b = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
  78	data->pixel_clk_quad_eqn_c = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
  79	data->phy_clk_quad_eqn_a = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
  80	data->phy_clk_quad_eqn_b = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
  81	data->phy_clk_quad_eqn_c = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
  82
  83	data->registry_data.disallowed_features = 0x0;
  84	data->registry_data.od_state_in_dc_support = 0;
  85	data->registry_data.thermal_support = 1;
  86	data->registry_data.skip_baco_hardware = 0;
  87
  88	data->registry_data.log_avfs_param = 0;
  89	data->registry_data.sclk_throttle_low_notification = 1;
  90	data->registry_data.force_dpm_high = 0;
  91	data->registry_data.stable_pstate_sclk_dpm_percentage = 75;
  92
  93	data->registry_data.didt_support = 0;
  94	if (data->registry_data.didt_support) {
  95		data->registry_data.didt_mode = 6;
  96		data->registry_data.sq_ramping_support = 1;
  97		data->registry_data.db_ramping_support = 0;
  98		data->registry_data.td_ramping_support = 0;
  99		data->registry_data.tcp_ramping_support = 0;
 100		data->registry_data.dbr_ramping_support = 0;
 101		data->registry_data.edc_didt_support = 1;
 102		data->registry_data.gc_didt_support = 0;
 103		data->registry_data.psm_didt_support = 0;
 104	}
 105
 106	data->registry_data.pcie_lane_override = 0xff;
 107	data->registry_data.pcie_speed_override = 0xff;
 108	data->registry_data.pcie_clock_override = 0xffffffff;
 109	data->registry_data.regulator_hot_gpio_support = 1;
 110	data->registry_data.ac_dc_switch_gpio_support = 0;
 111	data->registry_data.quick_transition_support = 0;
 112	data->registry_data.zrpm_start_temp = 0xffff;
 113	data->registry_data.zrpm_stop_temp = 0xffff;
 114	data->registry_data.odn_feature_enable = 1;
 115	data->registry_data.disable_water_mark = 0;
 116	data->registry_data.disable_pp_tuning = 0;
 117	data->registry_data.disable_xlpp_tuning = 0;
 118	data->registry_data.disable_workload_policy = 0;
 119	data->registry_data.perf_ui_tuning_profile_turbo = 0x19190F0F;
 120	data->registry_data.perf_ui_tuning_profile_powerSave = 0x19191919;
 121	data->registry_data.perf_ui_tuning_profile_xl = 0x00000F0A;
 122	data->registry_data.force_workload_policy_mask = 0;
 123	data->registry_data.disable_3d_fs_detection = 0;
 124	data->registry_data.fps_support = 1;
 125	data->registry_data.disable_auto_wattman = 1;
 126	data->registry_data.auto_wattman_debug = 0;
 127	data->registry_data.auto_wattman_sample_period = 100;
 128	data->registry_data.auto_wattman_threshold = 50;
 129}
 130
 131static int vega12_set_features_platform_caps(struct pp_hwmgr *hwmgr)
 132{
 133	struct vega12_hwmgr *data =
 134			(struct vega12_hwmgr *)(hwmgr->backend);
 135	struct amdgpu_device *adev = hwmgr->adev;
 136
 137	if (data->vddci_control == VEGA12_VOLTAGE_CONTROL_NONE)
 138		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
 139				PHM_PlatformCaps_ControlVDDCI);
 140
 141	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
 142			PHM_PlatformCaps_TablelessHardwareInterface);
 143
 144	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
 145			PHM_PlatformCaps_EnableSMU7ThermalManagement);
 146
 147	if (adev->pg_flags & AMD_PG_SUPPORT_UVD) {
 148		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
 149				PHM_PlatformCaps_UVDPowerGating);
 150		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
 151				PHM_PlatformCaps_UVDDynamicPowerGating);
 152	}
 153
 154	if (adev->pg_flags & AMD_PG_SUPPORT_VCE)
 155		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
 156				PHM_PlatformCaps_VCEPowerGating);
 157
 158	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
 159			PHM_PlatformCaps_UnTabledHardwareInterface);
 160
 161	if (data->registry_data.odn_feature_enable)
 162		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
 163				PHM_PlatformCaps_ODNinACSupport);
 164	else {
 165		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
 166				PHM_PlatformCaps_OD6inACSupport);
 167		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
 168				PHM_PlatformCaps_OD6PlusinACSupport);
 169	}
 170
 171	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
 172			PHM_PlatformCaps_ActivityReporting);
 173	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
 174			PHM_PlatformCaps_FanSpeedInTableIsRPM);
 175
 176	if (data->registry_data.od_state_in_dc_support) {
 177		if (data->registry_data.odn_feature_enable)
 178			phm_cap_set(hwmgr->platform_descriptor.platformCaps,
 179					PHM_PlatformCaps_ODNinDCSupport);
 180		else {
 181			phm_cap_set(hwmgr->platform_descriptor.platformCaps,
 182					PHM_PlatformCaps_OD6inDCSupport);
 183			phm_cap_set(hwmgr->platform_descriptor.platformCaps,
 184					PHM_PlatformCaps_OD6PlusinDCSupport);
 185		}
 186	}
 187
 188	if (data->registry_data.thermal_support
 189			&& data->registry_data.fuzzy_fan_control_support
 190			&& hwmgr->thermal_controller.advanceFanControlParameters.usTMax)
 191		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
 192				PHM_PlatformCaps_ODFuzzyFanControlSupport);
 193
 194	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
 195				PHM_PlatformCaps_DynamicPowerManagement);
 196	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
 197			PHM_PlatformCaps_SMC);
 198	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
 199			PHM_PlatformCaps_ThermalPolicyDelay);
 200
 201	if (data->registry_data.force_dpm_high)
 202		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
 203				PHM_PlatformCaps_ExclusiveModeAlwaysHigh);
 204
 205	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
 206			PHM_PlatformCaps_DynamicUVDState);
 207
 208	if (data->registry_data.sclk_throttle_low_notification)
 209		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
 210				PHM_PlatformCaps_SclkThrottleLowNotification);
 211
 212	/* power tune caps */
 213	/* assume disabled */
 214	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
 215			PHM_PlatformCaps_PowerContainment);
 216	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
 217			PHM_PlatformCaps_DiDtSupport);
 218	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
 219			PHM_PlatformCaps_SQRamping);
 220	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
 221			PHM_PlatformCaps_DBRamping);
 222	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
 223			PHM_PlatformCaps_TDRamping);
 224	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
 225			PHM_PlatformCaps_TCPRamping);
 226	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
 227			PHM_PlatformCaps_DBRRamping);
 228	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
 229			PHM_PlatformCaps_DiDtEDCEnable);
 230	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
 231			PHM_PlatformCaps_GCEDC);
 232	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
 233			PHM_PlatformCaps_PSM);
 234
 235	if (data->registry_data.didt_support) {
 236		phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DiDtSupport);
 237		if (data->registry_data.sq_ramping_support)
 238			phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping);
 239		if (data->registry_data.db_ramping_support)
 240			phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping);
 241		if (data->registry_data.td_ramping_support)
 242			phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping);
 243		if (data->registry_data.tcp_ramping_support)
 244			phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping);
 245		if (data->registry_data.dbr_ramping_support)
 246			phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRRamping);
 247		if (data->registry_data.edc_didt_support)
 248			phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DiDtEDCEnable);
 249		if (data->registry_data.gc_didt_support)
 250			phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_GCEDC);
 251		if (data->registry_data.psm_didt_support)
 252			phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PSM);
 253	}
 254
 255	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
 256			PHM_PlatformCaps_RegulatorHot);
 257
 258	if (data->registry_data.ac_dc_switch_gpio_support) {
 259		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
 260				PHM_PlatformCaps_AutomaticDCTransition);
 261		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
 262				PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme);
 263	}
 264
 265	if (data->registry_data.quick_transition_support) {
 266		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
 267				PHM_PlatformCaps_AutomaticDCTransition);
 268		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
 269				PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme);
 270		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
 271				PHM_PlatformCaps_Falcon_QuickTransition);
 272	}
 273
 274	if (data->lowest_uclk_reserved_for_ulv != PPVEGA12_VEGA12LOWESTUCLKRESERVEDFORULV_DFLT) {
 275		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
 276				PHM_PlatformCaps_LowestUclkReservedForUlv);
 277		if (data->lowest_uclk_reserved_for_ulv == 1)
 278			phm_cap_set(hwmgr->platform_descriptor.platformCaps,
 279					PHM_PlatformCaps_LowestUclkReservedForUlv);
 280	}
 281
 282	if (data->registry_data.custom_fan_support)
 283		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
 284				PHM_PlatformCaps_CustomFanControlSupport);
 285
 286	return 0;
 287}
 288
 289static void vega12_init_dpm_defaults(struct pp_hwmgr *hwmgr)
 290{
 291	struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
 292	struct amdgpu_device *adev = hwmgr->adev;
 293	uint32_t top32, bottom32;
 294	int i;
 295
 296	data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_id =
 297			FEATURE_DPM_PREFETCHER_BIT;
 298	data->smu_features[GNLD_DPM_GFXCLK].smu_feature_id =
 299			FEATURE_DPM_GFXCLK_BIT;
 300	data->smu_features[GNLD_DPM_UCLK].smu_feature_id =
 301			FEATURE_DPM_UCLK_BIT;
 302	data->smu_features[GNLD_DPM_SOCCLK].smu_feature_id =
 303			FEATURE_DPM_SOCCLK_BIT;
 304	data->smu_features[GNLD_DPM_UVD].smu_feature_id =
 305			FEATURE_DPM_UVD_BIT;
 306	data->smu_features[GNLD_DPM_VCE].smu_feature_id =
 307			FEATURE_DPM_VCE_BIT;
 308	data->smu_features[GNLD_ULV].smu_feature_id =
 309			FEATURE_ULV_BIT;
 310	data->smu_features[GNLD_DPM_MP0CLK].smu_feature_id =
 311			FEATURE_DPM_MP0CLK_BIT;
 312	data->smu_features[GNLD_DPM_LINK].smu_feature_id =
 313			FEATURE_DPM_LINK_BIT;
 314	data->smu_features[GNLD_DPM_DCEFCLK].smu_feature_id =
 315			FEATURE_DPM_DCEFCLK_BIT;
 316	data->smu_features[GNLD_DS_GFXCLK].smu_feature_id =
 317			FEATURE_DS_GFXCLK_BIT;
 318	data->smu_features[GNLD_DS_SOCCLK].smu_feature_id =
 319			FEATURE_DS_SOCCLK_BIT;
 320	data->smu_features[GNLD_DS_LCLK].smu_feature_id =
 321			FEATURE_DS_LCLK_BIT;
 322	data->smu_features[GNLD_PPT].smu_feature_id =
 323			FEATURE_PPT_BIT;
 324	data->smu_features[GNLD_TDC].smu_feature_id =
 325			FEATURE_TDC_BIT;
 326	data->smu_features[GNLD_THERMAL].smu_feature_id =
 327			FEATURE_THERMAL_BIT;
 328	data->smu_features[GNLD_GFX_PER_CU_CG].smu_feature_id =
 329			FEATURE_GFX_PER_CU_CG_BIT;
 330	data->smu_features[GNLD_RM].smu_feature_id =
 331			FEATURE_RM_BIT;
 332	data->smu_features[GNLD_DS_DCEFCLK].smu_feature_id =
 333			FEATURE_DS_DCEFCLK_BIT;
 334	data->smu_features[GNLD_ACDC].smu_feature_id =
 335			FEATURE_ACDC_BIT;
 336	data->smu_features[GNLD_VR0HOT].smu_feature_id =
 337			FEATURE_VR0HOT_BIT;
 338	data->smu_features[GNLD_VR1HOT].smu_feature_id =
 339			FEATURE_VR1HOT_BIT;
 340	data->smu_features[GNLD_FW_CTF].smu_feature_id =
 341			FEATURE_FW_CTF_BIT;
 342	data->smu_features[GNLD_LED_DISPLAY].smu_feature_id =
 343			FEATURE_LED_DISPLAY_BIT;
 344	data->smu_features[GNLD_FAN_CONTROL].smu_feature_id =
 345			FEATURE_FAN_CONTROL_BIT;
 346	data->smu_features[GNLD_DIDT].smu_feature_id = FEATURE_GFX_EDC_BIT;
 347	data->smu_features[GNLD_GFXOFF].smu_feature_id = FEATURE_GFXOFF_BIT;
 348	data->smu_features[GNLD_CG].smu_feature_id = FEATURE_CG_BIT;
 349	data->smu_features[GNLD_ACG].smu_feature_id = FEATURE_ACG_BIT;
 350
 351	for (i = 0; i < GNLD_FEATURES_MAX; i++) {
 352		data->smu_features[i].smu_feature_bitmap =
 353			(uint64_t)(1ULL << data->smu_features[i].smu_feature_id);
 354		data->smu_features[i].allowed =
 355			((data->registry_data.disallowed_features >> i) & 1) ?
 356			false : true;
 357	}
 358
 359	/* Get the SN to turn into a Unique ID */
 360	smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32);
 361	top32 = smum_get_argument(hwmgr);
 362	smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32);
 363	bottom32 = smum_get_argument(hwmgr);
 364
 365	adev->unique_id = ((uint64_t)bottom32 << 32) | top32;
 366}
 367
 368static int vega12_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
 369{
 370	return 0;
 371}
 372
 373static int vega12_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
 374{
 375	kfree(hwmgr->backend);
 376	hwmgr->backend = NULL;
 377
 378	return 0;
 379}
 380
 381static int vega12_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
 382{
 383	int result = 0;
 384	struct vega12_hwmgr *data;
 385	struct amdgpu_device *adev = hwmgr->adev;
 386
 387	data = kzalloc(sizeof(struct vega12_hwmgr), GFP_KERNEL);
 388	if (data == NULL)
 389		return -ENOMEM;
 390
 391	hwmgr->backend = data;
 392
 393	vega12_set_default_registry_data(hwmgr);
 394
 395	data->disable_dpm_mask = 0xff;
 396	data->workload_mask = 0xff;
 397
 398	/* need to set voltage control types before EVV patching */
 399	data->vddc_control = VEGA12_VOLTAGE_CONTROL_NONE;
 400	data->mvdd_control = VEGA12_VOLTAGE_CONTROL_NONE;
 401	data->vddci_control = VEGA12_VOLTAGE_CONTROL_NONE;
 402
 403	data->water_marks_bitmap = 0;
 404	data->avfs_exist = false;
 405
 406	vega12_set_features_platform_caps(hwmgr);
 407
 408	vega12_init_dpm_defaults(hwmgr);
 409
 410	/* Parse pptable data read from VBIOS */
 411	vega12_set_private_data_based_on_pptable(hwmgr);
 412
 413	data->is_tlu_enabled = false;
 414
 415	hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
 416			VEGA12_MAX_HARDWARE_POWERLEVELS;
 417	hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
 418	hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
 419
 420	hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
 421	/* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
 422	hwmgr->platform_descriptor.clockStep.engineClock = 500;
 423	hwmgr->platform_descriptor.clockStep.memoryClock = 500;
 424
 425	data->total_active_cus = adev->gfx.cu_info.number;
 426	/* Setup default Overdrive Fan control settings */
 427	data->odn_fan_table.target_fan_speed =
 428			hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM;
 429	data->odn_fan_table.target_temperature =
 430			hwmgr->thermal_controller.advanceFanControlParameters.ucTargetTemperature;
 431	data->odn_fan_table.min_performance_clock =
 432			hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit;
 433	data->odn_fan_table.min_fan_limit =
 434			hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMinLimit *
 435			hwmgr->thermal_controller.fanInfo.ulMaxRPM / 100;
 436
 437	if (hwmgr->feature_mask & PP_GFXOFF_MASK)
 438		data->gfxoff_controlled_by_driver = true;
 439	else
 440		data->gfxoff_controlled_by_driver = false;
 441
 442	return result;
 443}
 444
 445static int vega12_init_sclk_threshold(struct pp_hwmgr *hwmgr)
 446{
 447	struct vega12_hwmgr *data =
 448			(struct vega12_hwmgr *)(hwmgr->backend);
 449
 450	data->low_sclk_interrupt_threshold = 0;
 451
 452	return 0;
 453}
 454
 455static int vega12_setup_asic_task(struct pp_hwmgr *hwmgr)
 456{
 457	PP_ASSERT_WITH_CODE(!vega12_init_sclk_threshold(hwmgr),
 458			"Failed to init sclk threshold!",
 459			return -EINVAL);
 460
 461	return 0;
 462}
 463
 464/*
 465 * @fn vega12_init_dpm_state
 466 * @brief Function to initialize all Soft Min/Max and Hard Min/Max to 0xff.
 467 *
 468 * @param    dpm_state - the address of the DPM Table to initiailize.
 469 * @return   None.
 470 */
 471static void vega12_init_dpm_state(struct vega12_dpm_state *dpm_state)
 472{
 473	dpm_state->soft_min_level = 0x0;
 474	dpm_state->soft_max_level = 0xffff;
 475	dpm_state->hard_min_level = 0x0;
 476	dpm_state->hard_max_level = 0xffff;
 477}
 478
 479static int vega12_get_number_of_dpm_level(struct pp_hwmgr *hwmgr,
 480		PPCLK_e clk_id, uint32_t *num_of_levels)
 481{
 482	int ret = 0;
 483
 484	ret = smum_send_msg_to_smc_with_parameter(hwmgr,
 485			PPSMC_MSG_GetDpmFreqByIndex,
 486			(clk_id << 16 | 0xFF));
 487	PP_ASSERT_WITH_CODE(!ret,
 488			"[GetNumOfDpmLevel] failed to get dpm levels!",
 489			return ret);
 490
 491	*num_of_levels = smum_get_argument(hwmgr);
 492	PP_ASSERT_WITH_CODE(*num_of_levels > 0,
 493			"[GetNumOfDpmLevel] number of clk levels is invalid!",
 494			return -EINVAL);
 495
 496	return ret;
 497}
 498
 499static int vega12_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr,
 500		PPCLK_e clkID, uint32_t index, uint32_t *clock)
 501{
 502	int result = 0;
 503
 504	/*
 505	 *SMU expects the Clock ID to be in the top 16 bits.
 506	 *Lower 16 bits specify the level
 507	 */
 508	PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
 509		PPSMC_MSG_GetDpmFreqByIndex, (clkID << 16 | index)) == 0,
 510		"[GetDpmFrequencyByIndex] Failed to get dpm frequency from SMU!",
 511		return -EINVAL);
 512
 513	*clock = smum_get_argument(hwmgr);
 514
 515	return result;
 516}
 517
 518static int vega12_setup_single_dpm_table(struct pp_hwmgr *hwmgr,
 519		struct vega12_single_dpm_table *dpm_table, PPCLK_e clk_id)
 520{
 521	int ret = 0;
 522	uint32_t i, num_of_levels, clk;
 523
 524	ret = vega12_get_number_of_dpm_level(hwmgr, clk_id, &num_of_levels);
 525	PP_ASSERT_WITH_CODE(!ret,
 526			"[SetupSingleDpmTable] failed to get clk levels!",
 527			return ret);
 528
 529	dpm_table->count = num_of_levels;
 530
 531	for (i = 0; i < num_of_levels; i++) {
 532		ret = vega12_get_dpm_frequency_by_index(hwmgr, clk_id, i, &clk);
 533		PP_ASSERT_WITH_CODE(!ret,
 534			"[SetupSingleDpmTable] failed to get clk of specific level!",
 535			return ret);
 536		dpm_table->dpm_levels[i].value = clk;
 537		dpm_table->dpm_levels[i].enabled = true;
 538	}
 539
 540	return ret;
 541}
 542
 543/*
 544 * This function is to initialize all DPM state tables
 545 * for SMU based on the dependency table.
 546 * Dynamic state patching function will then trim these
 547 * state tables to the allowed range based
 548 * on the power policy or external client requests,
 549 * such as UVD request, etc.
 550 */
 551static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
 552{
 553
 554	struct vega12_hwmgr *data =
 555			(struct vega12_hwmgr *)(hwmgr->backend);
 556	struct vega12_single_dpm_table *dpm_table;
 557	int ret = 0;
 558
 559	memset(&data->dpm_table, 0, sizeof(data->dpm_table));
 560
 561	/* socclk */
 562	dpm_table = &(data->dpm_table.soc_table);
 563	if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
 564		ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_SOCCLK);
 565		PP_ASSERT_WITH_CODE(!ret,
 566				"[SetupDefaultDpmTable] failed to get socclk dpm levels!",
 567				return ret);
 568	} else {
 569		dpm_table->count = 1;
 570		dpm_table->dpm_levels[0].value = data->vbios_boot_state.soc_clock / 100;
 571	}
 572	vega12_init_dpm_state(&(dpm_table->dpm_state));
 573
 574	/* gfxclk */
 575	dpm_table = &(data->dpm_table.gfx_table);
 576	if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
 577		ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_GFXCLK);
 578		PP_ASSERT_WITH_CODE(!ret,
 579				"[SetupDefaultDpmTable] failed to get gfxclk dpm levels!",
 580				return ret);
 581	} else {
 582		dpm_table->count = 1;
 583		dpm_table->dpm_levels[0].value = data->vbios_boot_state.gfx_clock / 100;
 584	}
 585	vega12_init_dpm_state(&(dpm_table->dpm_state));
 586
 587	/* memclk */
 588	dpm_table = &(data->dpm_table.mem_table);
 589	if (data->smu_features[GNLD_DPM_UCLK].enabled) {
 590		ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_UCLK);
 591		PP_ASSERT_WITH_CODE(!ret,
 592				"[SetupDefaultDpmTable] failed to get memclk dpm levels!",
 593				return ret);
 594	} else {
 595		dpm_table->count = 1;
 596		dpm_table->dpm_levels[0].value = data->vbios_boot_state.mem_clock / 100;
 597	}
 598	vega12_init_dpm_state(&(dpm_table->dpm_state));
 599
 600	/* eclk */
 601	dpm_table = &(data->dpm_table.eclk_table);
 602	if (data->smu_features[GNLD_DPM_VCE].enabled) {
 603		ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_ECLK);
 604		PP_ASSERT_WITH_CODE(!ret,
 605				"[SetupDefaultDpmTable] failed to get eclk dpm levels!",
 606				return ret);
 607	} else {
 608		dpm_table->count = 1;
 609		dpm_table->dpm_levels[0].value = data->vbios_boot_state.eclock / 100;
 610	}
 611	vega12_init_dpm_state(&(dpm_table->dpm_state));
 612
 613	/* vclk */
 614	dpm_table = &(data->dpm_table.vclk_table);
 615	if (data->smu_features[GNLD_DPM_UVD].enabled) {
 616		ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_VCLK);
 617		PP_ASSERT_WITH_CODE(!ret,
 618				"[SetupDefaultDpmTable] failed to get vclk dpm levels!",
 619				return ret);
 620	} else {
 621		dpm_table->count = 1;
 622		dpm_table->dpm_levels[0].value = data->vbios_boot_state.vclock / 100;
 623	}
 624	vega12_init_dpm_state(&(dpm_table->dpm_state));
 625
 626	/* dclk */
 627	dpm_table = &(data->dpm_table.dclk_table);
 628	if (data->smu_features[GNLD_DPM_UVD].enabled) {
 629		ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DCLK);
 630		PP_ASSERT_WITH_CODE(!ret,
 631				"[SetupDefaultDpmTable] failed to get dclk dpm levels!",
 632				return ret);
 633	} else {
 634		dpm_table->count = 1;
 635		dpm_table->dpm_levels[0].value = data->vbios_boot_state.dclock / 100;
 636	}
 637	vega12_init_dpm_state(&(dpm_table->dpm_state));
 638
 639	/* dcefclk */
 640	dpm_table = &(data->dpm_table.dcef_table);
 641	if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
 642		ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DCEFCLK);
 643		PP_ASSERT_WITH_CODE(!ret,
 644				"[SetupDefaultDpmTable] failed to get dcefclk dpm levels!",
 645				return ret);
 646	} else {
 647		dpm_table->count = 1;
 648		dpm_table->dpm_levels[0].value = data->vbios_boot_state.dcef_clock / 100;
 649	}
 650	vega12_init_dpm_state(&(dpm_table->dpm_state));
 651
 652	/* pixclk */
 653	dpm_table = &(data->dpm_table.pixel_table);
 654	if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
 655		ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_PIXCLK);
 656		PP_ASSERT_WITH_CODE(!ret,
 657				"[SetupDefaultDpmTable] failed to get pixclk dpm levels!",
 658				return ret);
 659	} else
 660		dpm_table->count = 0;
 661	vega12_init_dpm_state(&(dpm_table->dpm_state));
 662
 663	/* dispclk */
 664	dpm_table = &(data->dpm_table.display_table);
 665	if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
 666		ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DISPCLK);
 667		PP_ASSERT_WITH_CODE(!ret,
 668				"[SetupDefaultDpmTable] failed to get dispclk dpm levels!",
 669				return ret);
 670	} else
 671		dpm_table->count = 0;
 672	vega12_init_dpm_state(&(dpm_table->dpm_state));
 673
 674	/* phyclk */
 675	dpm_table = &(data->dpm_table.phy_table);
 676	if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
 677		ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_PHYCLK);
 678		PP_ASSERT_WITH_CODE(!ret,
 679				"[SetupDefaultDpmTable] failed to get phyclk dpm levels!",
 680				return ret);
 681	} else
 682		dpm_table->count = 0;
 683	vega12_init_dpm_state(&(dpm_table->dpm_state));
 684
 685	/* save a copy of the default DPM table */
 686	memcpy(&(data->golden_dpm_table), &(data->dpm_table),
 687			sizeof(struct vega12_dpm_table));
 688
 689	return 0;
 690}
 691
 692#if 0
 693static int vega12_save_default_power_profile(struct pp_hwmgr *hwmgr)
 694{
 695	struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
 696	struct vega12_single_dpm_table *dpm_table = &(data->dpm_table.gfx_table);
 697	uint32_t min_level;
 698
 699	hwmgr->default_gfx_power_profile.type = AMD_PP_GFX_PROFILE;
 700	hwmgr->default_compute_power_profile.type = AMD_PP_COMPUTE_PROFILE;
 701
 702	/* Optimize compute power profile: Use only highest
 703	 * 2 power levels (if more than 2 are available)
 704	 */
 705	if (dpm_table->count > 2)
 706		min_level = dpm_table->count - 2;
 707	else if (dpm_table->count == 2)
 708		min_level = 1;
 709	else
 710		min_level = 0;
 711
 712	hwmgr->default_compute_power_profile.min_sclk =
 713			dpm_table->dpm_levels[min_level].value;
 714
 715	hwmgr->gfx_power_profile = hwmgr->default_gfx_power_profile;
 716	hwmgr->compute_power_profile = hwmgr->default_compute_power_profile;
 717
 718	return 0;
 719}
 720#endif
 721
 722/**
 723* Initializes the SMC table and uploads it
 724*
 725* @param    hwmgr  the address of the powerplay hardware manager.
 726* @param    pInput  the pointer to input data (PowerState)
 727* @return   always 0
 728*/
 729static int vega12_init_smc_table(struct pp_hwmgr *hwmgr)
 730{
 731	int result;
 732	struct vega12_hwmgr *data =
 733			(struct vega12_hwmgr *)(hwmgr->backend);
 734	PPTable_t *pp_table = &(data->smc_state_table.pp_table);
 735	struct pp_atomfwctrl_bios_boot_up_values boot_up_values;
 736	struct phm_ppt_v3_information *pptable_information =
 737		(struct phm_ppt_v3_information *)hwmgr->pptable;
 738
 739	result = pp_atomfwctrl_get_vbios_bootup_values(hwmgr, &boot_up_values);
 740	if (!result) {
 741		data->vbios_boot_state.vddc     = boot_up_values.usVddc;
 742		data->vbios_boot_state.vddci    = boot_up_values.usVddci;
 743		data->vbios_boot_state.mvddc    = boot_up_values.usMvddc;
 744		data->vbios_boot_state.gfx_clock = boot_up_values.ulGfxClk;
 745		data->vbios_boot_state.mem_clock = boot_up_values.ulUClk;
 746		data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk;
 747		data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk;
 748		data->vbios_boot_state.uc_cooling_id = boot_up_values.ucCoolingID;
 749		data->vbios_boot_state.eclock = boot_up_values.ulEClk;
 750		data->vbios_boot_state.dclock = boot_up_values.ulDClk;
 751		data->vbios_boot_state.vclock = boot_up_values.ulVClk;
 752		smum_send_msg_to_smc_with_parameter(hwmgr,
 753				PPSMC_MSG_SetMinDeepSleepDcefclk,
 754			(uint32_t)(data->vbios_boot_state.dcef_clock / 100));
 755	}
 756
 757	memcpy(pp_table, pptable_information->smc_pptable, sizeof(PPTable_t));
 758
 759	result = smum_smc_table_manager(hwmgr,
 760					(uint8_t *)pp_table, TABLE_PPTABLE, false);
 761	PP_ASSERT_WITH_CODE(!result,
 762			"Failed to upload PPtable!", return result);
 763
 764	return 0;
 765}
 766
 767static int vega12_run_acg_btc(struct pp_hwmgr *hwmgr)
 768{
 769	uint32_t result;
 770
 771	PP_ASSERT_WITH_CODE(
 772		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc) == 0,
 773		"[Run_ACG_BTC] Attempt to run ACG BTC failed!",
 774		return -EINVAL);
 775
 776	result = smum_get_argument(hwmgr);
 777	PP_ASSERT_WITH_CODE(result == 1,
 778			"Failed to run ACG BTC!", return -EINVAL);
 779
 780	return 0;
 781}
 782
 783static int vega12_set_allowed_featuresmask(struct pp_hwmgr *hwmgr)
 784{
 785	struct vega12_hwmgr *data =
 786			(struct vega12_hwmgr *)(hwmgr->backend);
 787	int i;
 788	uint32_t allowed_features_low = 0, allowed_features_high = 0;
 789
 790	for (i = 0; i < GNLD_FEATURES_MAX; i++)
 791		if (data->smu_features[i].allowed)
 792			data->smu_features[i].smu_feature_id > 31 ?
 793				(allowed_features_high |= ((data->smu_features[i].smu_feature_bitmap >> SMU_FEATURES_HIGH_SHIFT) & 0xFFFFFFFF)) :
 794				(allowed_features_low |= ((data->smu_features[i].smu_feature_bitmap >> SMU_FEATURES_LOW_SHIFT) & 0xFFFFFFFF));
 795
 796	PP_ASSERT_WITH_CODE(
 797		smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetAllowedFeaturesMaskHigh, allowed_features_high) == 0,
 798		"[SetAllowedFeaturesMask] Attempt to set allowed features mask (high) failed!",
 799		return -1);
 800
 801	PP_ASSERT_WITH_CODE(
 802		smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetAllowedFeaturesMaskLow, allowed_features_low) == 0,
 803		"[SetAllowedFeaturesMask] Attempt to set allowed features mask (low) failed!",
 804		return -1);
 805
 806	return 0;
 807}
 808
 809static void vega12_init_powergate_state(struct pp_hwmgr *hwmgr)
 810{
 811	struct vega12_hwmgr *data =
 812			(struct vega12_hwmgr *)(hwmgr->backend);
 813
 814	data->uvd_power_gated = true;
 815	data->vce_power_gated = true;
 816
 817	if (data->smu_features[GNLD_DPM_UVD].enabled)
 818		data->uvd_power_gated = false;
 819
 820	if (data->smu_features[GNLD_DPM_VCE].enabled)
 821		data->vce_power_gated = false;
 822}
 823
 824static int vega12_enable_all_smu_features(struct pp_hwmgr *hwmgr)
 825{
 826	struct vega12_hwmgr *data =
 827			(struct vega12_hwmgr *)(hwmgr->backend);
 828	uint64_t features_enabled;
 829	int i;
 830	bool enabled;
 831
 832	PP_ASSERT_WITH_CODE(
 833		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAllSmuFeatures) == 0,
 834		"[EnableAllSMUFeatures] Failed to enable all smu features!",
 835		return -1);
 836
 837	if (vega12_get_enabled_smc_features(hwmgr, &features_enabled) == 0) {
 838		for (i = 0; i < GNLD_FEATURES_MAX; i++) {
 839			enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ? true : false;
 840			data->smu_features[i].enabled = enabled;
 841			data->smu_features[i].supported = enabled;
 842		}
 843	}
 844
 845	vega12_init_powergate_state(hwmgr);
 846
 847	return 0;
 848}
 849
 850static int vega12_disable_all_smu_features(struct pp_hwmgr *hwmgr)
 851{
 852	struct vega12_hwmgr *data =
 853			(struct vega12_hwmgr *)(hwmgr->backend);
 854	uint64_t features_enabled;
 855	int i;
 856	bool enabled;
 857
 858	PP_ASSERT_WITH_CODE(
 859		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableAllSmuFeatures) == 0,
 860		"[DisableAllSMUFeatures] Failed to disable all smu features!",
 861		return -1);
 862
 863	if (vega12_get_enabled_smc_features(hwmgr, &features_enabled) == 0) {
 864		for (i = 0; i < GNLD_FEATURES_MAX; i++) {
 865			enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ? true : false;
 866			data->smu_features[i].enabled = enabled;
 867			data->smu_features[i].supported = enabled;
 868		}
 869	}
 870
 871	return 0;
 872}
 873
 874static int vega12_odn_initialize_default_settings(
 875		struct pp_hwmgr *hwmgr)
 876{
 877	return 0;
 878}
 879
 880static int vega12_set_overdrive_target_percentage(struct pp_hwmgr *hwmgr,
 881		uint32_t adjust_percent)
 882{
 883	return smum_send_msg_to_smc_with_parameter(hwmgr,
 884			PPSMC_MSG_OverDriveSetPercentage, adjust_percent);
 885}
 886
 887static int vega12_power_control_set_level(struct pp_hwmgr *hwmgr)
 888{
 889	int adjust_percent, result = 0;
 890
 891	if (PP_CAP(PHM_PlatformCaps_PowerContainment)) {
 892		adjust_percent =
 893				hwmgr->platform_descriptor.TDPAdjustmentPolarity ?
 894				hwmgr->platform_descriptor.TDPAdjustment :
 895				(-1 * hwmgr->platform_descriptor.TDPAdjustment);
 896		result = vega12_set_overdrive_target_percentage(hwmgr,
 897				(uint32_t)adjust_percent);
 898	}
 899	return result;
 900}
 901
 902static int vega12_get_all_clock_ranges_helper(struct pp_hwmgr *hwmgr,
 903		PPCLK_e clkid, struct vega12_clock_range *clock)
 904{
 905	/* AC Max */
 906	PP_ASSERT_WITH_CODE(
 907		smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMaxDpmFreq, (clkid << 16)) == 0,
 908		"[GetClockRanges] Failed to get max ac clock from SMC!",
 909		return -EINVAL);
 910	clock->ACMax = smum_get_argument(hwmgr);
 911
 912	/* AC Min */
 913	PP_ASSERT_WITH_CODE(
 914		smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMinDpmFreq, (clkid << 16)) == 0,
 915		"[GetClockRanges] Failed to get min ac clock from SMC!",
 916		return -EINVAL);
 917	clock->ACMin = smum_get_argument(hwmgr);
 918
 919	/* DC Max */
 920	PP_ASSERT_WITH_CODE(
 921		smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDcModeMaxDpmFreq, (clkid << 16)) == 0,
 922		"[GetClockRanges] Failed to get max dc clock from SMC!",
 923		return -EINVAL);
 924	clock->DCMax = smum_get_argument(hwmgr);
 925
 926	return 0;
 927}
 928
 929static int vega12_get_all_clock_ranges(struct pp_hwmgr *hwmgr)
 930{
 931	struct vega12_hwmgr *data =
 932			(struct vega12_hwmgr *)(hwmgr->backend);
 933	uint32_t i;
 934
 935	for (i = 0; i < PPCLK_COUNT; i++)
 936		PP_ASSERT_WITH_CODE(!vega12_get_all_clock_ranges_helper(hwmgr,
 937					i, &(data->clk_range[i])),
 938				"Failed to get clk range from SMC!",
 939				return -EINVAL);
 940
 941	return 0;
 942}
 943
 944static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
 945{
 946	int tmp_result, result = 0;
 947
 948	smum_send_msg_to_smc_with_parameter(hwmgr,
 949			PPSMC_MSG_NumOfDisplays, 0);
 950
 951	result = vega12_set_allowed_featuresmask(hwmgr);
 952	PP_ASSERT_WITH_CODE(result == 0,
 953			"[EnableDPMTasks] Failed to set allowed featuresmask!\n",
 954			return result);
 955
 956	tmp_result = vega12_init_smc_table(hwmgr);
 957	PP_ASSERT_WITH_CODE(!tmp_result,
 958			"Failed to initialize SMC table!",
 959			result = tmp_result);
 960
 961	tmp_result = vega12_run_acg_btc(hwmgr);
 962	PP_ASSERT_WITH_CODE(!tmp_result,
 963			"Failed to run ACG BTC!",
 964			result = tmp_result);
 965
 966	result = vega12_enable_all_smu_features(hwmgr);
 967	PP_ASSERT_WITH_CODE(!result,
 968			"Failed to enable all smu features!",
 969			return result);
 970
 971	tmp_result = vega12_power_control_set_level(hwmgr);
 972	PP_ASSERT_WITH_CODE(!tmp_result,
 973			"Failed to power control set level!",
 974			result = tmp_result);
 975
 976	result = vega12_get_all_clock_ranges(hwmgr);
 977	PP_ASSERT_WITH_CODE(!result,
 978			"Failed to get all clock ranges!",
 979			return result);
 980
 981	result = vega12_odn_initialize_default_settings(hwmgr);
 982	PP_ASSERT_WITH_CODE(!result,
 983			"Failed to power control set level!",
 984			return result);
 985
 986	result = vega12_setup_default_dpm_tables(hwmgr);
 987	PP_ASSERT_WITH_CODE(!result,
 988			"Failed to setup default DPM tables!",
 989			return result);
 990	return result;
 991}
 992
 993static int vega12_patch_boot_state(struct pp_hwmgr *hwmgr,
 994	     struct pp_hw_power_state *hw_ps)
 995{
 996	return 0;
 997}
 998
 999static uint32_t vega12_find_lowest_dpm_level(
1000		struct vega12_single_dpm_table *table)
1001{
1002	uint32_t i;
1003
1004	for (i = 0; i < table->count; i++) {
1005		if (table->dpm_levels[i].enabled)
1006			break;
1007	}
1008
1009	if (i >= table->count) {
1010		i = 0;
1011		table->dpm_levels[i].enabled = true;
1012	}
1013
1014	return i;
1015}
1016
1017static uint32_t vega12_find_highest_dpm_level(
1018		struct vega12_single_dpm_table *table)
1019{
1020	int32_t i = 0;
1021	PP_ASSERT_WITH_CODE(table->count <= MAX_REGULAR_DPM_NUMBER,
1022			"[FindHighestDPMLevel] DPM Table has too many entries!",
1023			return MAX_REGULAR_DPM_NUMBER - 1);
1024
1025	for (i = table->count - 1; i >= 0; i--) {
1026		if (table->dpm_levels[i].enabled)
1027			break;
1028	}
1029
1030	if (i < 0) {
1031		i = 0;
1032		table->dpm_levels[i].enabled = true;
1033	}
1034
1035	return (uint32_t)i;
1036}
1037
1038static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
1039{
1040	struct vega12_hwmgr *data = hwmgr->backend;
1041	uint32_t min_freq;
1042	int ret = 0;
1043
1044	if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
1045		min_freq = data->dpm_table.gfx_table.dpm_state.soft_min_level;
1046		PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1047					hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1048					(PPCLK_GFXCLK << 16) | (min_freq & 0xffff))),
1049					"Failed to set soft min gfxclk !",
1050					return ret);
1051	}
1052
1053	if (data->smu_features[GNLD_DPM_UCLK].enabled) {
1054		min_freq = data->dpm_table.mem_table.dpm_state.soft_min_level;
1055		PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1056					hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1057					(PPCLK_UCLK << 16) | (min_freq & 0xffff))),
1058					"Failed to set soft min memclk !",
1059					return ret);
1060
1061		min_freq = data->dpm_table.mem_table.dpm_state.hard_min_level;
1062		PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1063					hwmgr, PPSMC_MSG_SetHardMinByFreq,
1064					(PPCLK_UCLK << 16) | (min_freq & 0xffff))),
1065					"Failed to set hard min memclk !",
1066					return ret);
1067	}
1068
1069	if (data->smu_features[GNLD_DPM_UVD].enabled) {
1070		min_freq = data->dpm_table.vclk_table.dpm_state.soft_min_level;
1071
1072		PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1073					hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1074					(PPCLK_VCLK << 16) | (min_freq & 0xffff))),
1075					"Failed to set soft min vclk!",
1076					return ret);
1077
1078		min_freq = data->dpm_table.dclk_table.dpm_state.soft_min_level;
1079
1080		PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1081					hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1082					(PPCLK_DCLK << 16) | (min_freq & 0xffff))),
1083					"Failed to set soft min dclk!",
1084					return ret);
1085	}
1086
1087	if (data->smu_features[GNLD_DPM_VCE].enabled) {
1088		min_freq = data->dpm_table.eclk_table.dpm_state.soft_min_level;
1089
1090		PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1091					hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1092					(PPCLK_ECLK << 16) | (min_freq & 0xffff))),
1093					"Failed to set soft min eclk!",
1094					return ret);
1095	}
1096
1097	if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
1098		min_freq = data->dpm_table.soc_table.dpm_state.soft_min_level;
1099
1100		PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1101					hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1102					(PPCLK_SOCCLK << 16) | (min_freq & 0xffff))),
1103					"Failed to set soft min socclk!",
1104					return ret);
1105	}
1106
1107	if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
1108		min_freq = data->dpm_table.dcef_table.dpm_state.hard_min_level;
1109
1110		PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1111					hwmgr, PPSMC_MSG_SetHardMinByFreq,
1112					(PPCLK_DCEFCLK << 16) | (min_freq & 0xffff))),
1113					"Failed to set hard min dcefclk!",
1114					return ret);
1115	}
1116
1117	return ret;
1118
1119}
1120
1121static int vega12_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
1122{
1123	struct vega12_hwmgr *data = hwmgr->backend;
1124	uint32_t max_freq;
1125	int ret = 0;
1126
1127	if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
1128		max_freq = data->dpm_table.gfx_table.dpm_state.soft_max_level;
1129
1130		PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1131					hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1132					(PPCLK_GFXCLK << 16) | (max_freq & 0xffff))),
1133					"Failed to set soft max gfxclk!",
1134					return ret);
1135	}
1136
1137	if (data->smu_features[GNLD_DPM_UCLK].enabled) {
1138		max_freq = data->dpm_table.mem_table.dpm_state.soft_max_level;
1139
1140		PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1141					hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1142					(PPCLK_UCLK << 16) | (max_freq & 0xffff))),
1143					"Failed to set soft max memclk!",
1144					return ret);
1145	}
1146
1147	if (data->smu_features[GNLD_DPM_UVD].enabled) {
1148		max_freq = data->dpm_table.vclk_table.dpm_state.soft_max_level;
1149
1150		PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1151					hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1152					(PPCLK_VCLK << 16) | (max_freq & 0xffff))),
1153					"Failed to set soft max vclk!",
1154					return ret);
1155
1156		max_freq = data->dpm_table.dclk_table.dpm_state.soft_max_level;
1157		PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1158					hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1159					(PPCLK_DCLK << 16) | (max_freq & 0xffff))),
1160					"Failed to set soft max dclk!",
1161					return ret);
1162	}
1163
1164	if (data->smu_features[GNLD_DPM_VCE].enabled) {
1165		max_freq = data->dpm_table.eclk_table.dpm_state.soft_max_level;
1166
1167		PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1168					hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1169					(PPCLK_ECLK << 16) | (max_freq & 0xffff))),
1170					"Failed to set soft max eclk!",
1171					return ret);
1172	}
1173
1174	if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
1175		max_freq = data->dpm_table.soc_table.dpm_state.soft_max_level;
1176
1177		PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1178					hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1179					(PPCLK_SOCCLK << 16) | (max_freq & 0xffff))),
1180					"Failed to set soft max socclk!",
1181					return ret);
1182	}
1183
1184	return ret;
1185}
1186
1187int vega12_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
1188{
1189	struct vega12_hwmgr *data =
1190			(struct vega12_hwmgr *)(hwmgr->backend);
1191
1192	if (data->smu_features[GNLD_DPM_VCE].supported) {
1193		PP_ASSERT_WITH_CODE(!vega12_enable_smc_features(hwmgr,
1194				enable,
1195				data->smu_features[GNLD_DPM_VCE].smu_feature_bitmap),
1196				"Attempt to Enable/Disable DPM VCE Failed!",
1197				return -1);
1198		data->smu_features[GNLD_DPM_VCE].enabled = enable;
1199	}
1200
1201	return 0;
1202}
1203
1204static uint32_t vega12_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
1205{
1206	struct vega12_hwmgr *data =
1207			(struct vega12_hwmgr *)(hwmgr->backend);
1208	uint32_t gfx_clk;
1209
1210	if (!data->smu_features[GNLD_DPM_GFXCLK].enabled)
1211		return -1;
1212
1213	if (low)
1214		PP_ASSERT_WITH_CODE(
1215			vega12_get_clock_ranges(hwmgr, &gfx_clk, PPCLK_GFXCLK, false) == 0,
1216			"[GetSclks]: fail to get min PPCLK_GFXCLK\n",
1217			return -1);
1218	else
1219		PP_ASSERT_WITH_CODE(
1220			vega12_get_clock_ranges(hwmgr, &gfx_clk, PPCLK_GFXCLK, true) == 0,
1221			"[GetSclks]: fail to get max PPCLK_GFXCLK\n",
1222			return -1);
1223
1224	return (gfx_clk * 100);
1225}
1226
1227static uint32_t vega12_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
1228{
1229	struct vega12_hwmgr *data =
1230			(struct vega12_hwmgr *)(hwmgr->backend);
1231	uint32_t mem_clk;
1232
1233	if (!data->smu_features[GNLD_DPM_UCLK].enabled)
1234		return -1;
1235
1236	if (low)
1237		PP_ASSERT_WITH_CODE(
1238			vega12_get_clock_ranges(hwmgr, &mem_clk, PPCLK_UCLK, false) == 0,
1239			"[GetMclks]: fail to get min PPCLK_UCLK\n",
1240			return -1);
1241	else
1242		PP_ASSERT_WITH_CODE(
1243			vega12_get_clock_ranges(hwmgr, &mem_clk, PPCLK_UCLK, true) == 0,
1244			"[GetMclks]: fail to get max PPCLK_UCLK\n",
1245			return -1);
1246
1247	return (mem_clk * 100);
1248}
1249
1250static int vega12_get_metrics_table(struct pp_hwmgr *hwmgr, SmuMetrics_t *metrics_table)
1251{
1252	struct vega12_hwmgr *data =
1253			(struct vega12_hwmgr *)(hwmgr->backend);
1254	int ret = 0;
1255
1256	if (!data->metrics_time || time_after(jiffies, data->metrics_time + HZ / 2)) {
1257		ret = smum_smc_table_manager(hwmgr, (uint8_t *)metrics_table,
1258				TABLE_SMU_METRICS, true);
1259		if (ret) {
1260			pr_info("Failed to export SMU metrics table!\n");
1261			return ret;
1262		}
1263		memcpy(&data->metrics_table, metrics_table, sizeof(SmuMetrics_t));
1264		data->metrics_time = jiffies;
1265	} else
1266		memcpy(metrics_table, &data->metrics_table, sizeof(SmuMetrics_t));
1267
1268	return ret;
1269}
1270
1271static int vega12_get_gpu_power(struct pp_hwmgr *hwmgr, uint32_t *query)
1272{
1273	SmuMetrics_t metrics_table;
1274	int ret = 0;
1275
1276	ret = vega12_get_metrics_table(hwmgr, &metrics_table);
1277	if (ret)
1278		return ret;
1279
1280	*query = metrics_table.CurrSocketPower << 8;
1281
1282	return ret;
1283}
1284
1285static int vega12_get_current_gfx_clk_freq(struct pp_hwmgr *hwmgr, uint32_t *gfx_freq)
1286{
1287	uint32_t gfx_clk = 0;
1288
1289	*gfx_freq = 0;
1290
1291	PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
1292			PPSMC_MSG_GetDpmClockFreq, (PPCLK_GFXCLK << 16)) == 0,
1293			"[GetCurrentGfxClkFreq] Attempt to get Current GFXCLK Frequency Failed!",
1294			return -EINVAL);
1295	gfx_clk = smum_get_argument(hwmgr);
1296
1297	*gfx_freq = gfx_clk * 100;
1298
1299	return 0;
1300}
1301
1302static int vega12_get_current_mclk_freq(struct pp_hwmgr *hwmgr, uint32_t *mclk_freq)
1303{
1304	uint32_t mem_clk = 0;
1305
1306	*mclk_freq = 0;
1307
1308	PP_ASSERT_WITH_CODE(
1309			smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDpmClockFreq, (PPCLK_UCLK << 16)) == 0,
1310			"[GetCurrentMClkFreq] Attempt to get Current MCLK Frequency Failed!",
1311			return -EINVAL);
1312	mem_clk = smum_get_argument(hwmgr);
1313
1314	*mclk_freq = mem_clk * 100;
1315
1316	return 0;
1317}
1318
1319static int vega12_get_current_activity_percent(
1320		struct pp_hwmgr *hwmgr,
1321		int idx,
1322		uint32_t *activity_percent)
1323{
1324	SmuMetrics_t metrics_table;
1325	int ret = 0;
1326
1327	ret = vega12_get_metrics_table(hwmgr, &metrics_table);
1328	if (ret)
1329		return ret;
1330
1331	switch (idx) {
1332	case AMDGPU_PP_SENSOR_GPU_LOAD:
1333		*activity_percent = metrics_table.AverageGfxActivity;
1334		break;
1335	case AMDGPU_PP_SENSOR_MEM_LOAD:
1336		*activity_percent = metrics_table.AverageUclkActivity;
1337		break;
1338	default:
1339		pr_err("Invalid index for retrieving clock activity\n");
1340		return -EINVAL;
1341	}
1342
1343	return ret;
1344}
1345
1346static int vega12_read_sensor(struct pp_hwmgr *hwmgr, int idx,
1347			      void *value, int *size)
1348{
1349	struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1350	SmuMetrics_t metrics_table;
1351	int ret = 0;
1352
1353	switch (idx) {
1354	case AMDGPU_PP_SENSOR_GFX_SCLK:
1355		ret = vega12_get_current_gfx_clk_freq(hwmgr, (uint32_t *)value);
1356		if (!ret)
1357			*size = 4;
1358		break;
1359	case AMDGPU_PP_SENSOR_GFX_MCLK:
1360		ret = vega12_get_current_mclk_freq(hwmgr, (uint32_t *)value);
1361		if (!ret)
1362			*size = 4;
1363		break;
1364	case AMDGPU_PP_SENSOR_GPU_LOAD:
1365	case AMDGPU_PP_SENSOR_MEM_LOAD:
1366		ret = vega12_get_current_activity_percent(hwmgr, idx, (uint32_t *)value);
1367		if (!ret)
1368			*size = 4;
1369		break;
1370	case AMDGPU_PP_SENSOR_GPU_TEMP:
1371		*((uint32_t *)value) = vega12_thermal_get_temperature(hwmgr);
1372		*size = 4;
1373		break;
1374	case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
1375		ret = vega12_get_metrics_table(hwmgr, &metrics_table);
1376		if (ret)
1377			return ret;
1378
1379		*((uint32_t *)value) = metrics_table.TemperatureHotspot *
1380			PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1381		*size = 4;
1382		break;
1383	case AMDGPU_PP_SENSOR_MEM_TEMP:
1384		ret = vega12_get_metrics_table(hwmgr, &metrics_table);
1385		if (ret)
1386			return ret;
1387
1388		*((uint32_t *)value) = metrics_table.TemperatureHBM *
1389			PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1390		*size = 4;
1391		break;
1392	case AMDGPU_PP_SENSOR_UVD_POWER:
1393		*((uint32_t *)value) = data->uvd_power_gated ? 0 : 1;
1394		*size = 4;
1395		break;
1396	case AMDGPU_PP_SENSOR_VCE_POWER:
1397		*((uint32_t *)value) = data->vce_power_gated ? 0 : 1;
1398		*size = 4;
1399		break;
1400	case AMDGPU_PP_SENSOR_GPU_POWER:
1401		ret = vega12_get_gpu_power(hwmgr, (uint32_t *)value);
1402		if (!ret)
1403			*size = 4;
1404		break;
1405	case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
1406		ret = vega12_get_enabled_smc_features(hwmgr, (uint64_t *)value);
1407		if (!ret)
1408			*size = 8;
1409		break;
1410	default:
1411		ret = -EINVAL;
1412		break;
1413	}
1414	return ret;
1415}
1416
1417static int vega12_notify_smc_display_change(struct pp_hwmgr *hwmgr,
1418		bool has_disp)
1419{
1420	struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1421
1422	if (data->smu_features[GNLD_DPM_UCLK].enabled)
1423		return smum_send_msg_to_smc_with_parameter(hwmgr,
1424			PPSMC_MSG_SetUclkFastSwitch,
1425			has_disp ? 1 : 0);
1426
1427	return 0;
1428}
1429
1430int vega12_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
1431		struct pp_display_clock_request *clock_req)
1432{
1433	int result = 0;
1434	struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1435	enum amd_pp_clock_type clk_type = clock_req->clock_type;
1436	uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
1437	PPCLK_e clk_select = 0;
1438	uint32_t clk_request = 0;
1439
1440	if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
1441		switch (clk_type) {
1442		case amd_pp_dcef_clock:
1443			clk_select = PPCLK_DCEFCLK;
1444			break;
1445		case amd_pp_disp_clock:
1446			clk_select = PPCLK_DISPCLK;
1447			break;
1448		case amd_pp_pixel_clock:
1449			clk_select = PPCLK_PIXCLK;
1450			break;
1451		case amd_pp_phy_clock:
1452			clk_select = PPCLK_PHYCLK;
1453			break;
1454		default:
1455			pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!");
1456			result = -1;
1457			break;
1458		}
1459
1460		if (!result) {
1461			clk_request = (clk_select << 16) | clk_freq;
1462			result = smum_send_msg_to_smc_with_parameter(hwmgr,
1463					PPSMC_MSG_SetHardMinByFreq,
1464					clk_request);
1465		}
1466	}
1467
1468	return result;
1469}
1470
1471static int vega12_notify_smc_display_config_after_ps_adjustment(
1472		struct pp_hwmgr *hwmgr)
1473{
1474	struct vega12_hwmgr *data =
1475			(struct vega12_hwmgr *)(hwmgr->backend);
1476	struct PP_Clocks min_clocks = {0};
1477	struct pp_display_clock_request clock_req;
1478
1479	if ((hwmgr->display_config->num_display > 1) &&
1480	     !hwmgr->display_config->multi_monitor_in_sync &&
1481	     !hwmgr->display_config->nb_pstate_switch_disable)
1482		vega12_notify_smc_display_change(hwmgr, false);
1483	else
1484		vega12_notify_smc_display_change(hwmgr, true);
1485
1486	min_clocks.dcefClock = hwmgr->display_config->min_dcef_set_clk;
1487	min_clocks.dcefClockInSR = hwmgr->display_config->min_dcef_deep_sleep_set_clk;
1488	min_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock;
1489
1490	if (data->smu_features[GNLD_DPM_DCEFCLK].supported) {
1491		clock_req.clock_type = amd_pp_dcef_clock;
1492		clock_req.clock_freq_in_khz = min_clocks.dcefClock/10;
1493		if (!vega12_display_clock_voltage_request(hwmgr, &clock_req)) {
1494			if (data->smu_features[GNLD_DS_DCEFCLK].supported)
1495				PP_ASSERT_WITH_CODE(
1496					!smum_send_msg_to_smc_with_parameter(
1497					hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk,
1498					min_clocks.dcefClockInSR /100),
1499					"Attempt to set divider for DCEFCLK Failed!",
1500					return -1);
1501		} else {
1502			pr_info("Attempt to set Hard Min for DCEFCLK Failed!");
1503		}
1504	}
1505
1506	return 0;
1507}
1508
1509static int vega12_force_dpm_highest(struct pp_hwmgr *hwmgr)
1510{
1511	struct vega12_hwmgr *data =
1512			(struct vega12_hwmgr *)(hwmgr->backend);
1513
1514	uint32_t soft_level;
1515
1516	soft_level = vega12_find_highest_dpm_level(&(data->dpm_table.gfx_table));
1517
1518	data->dpm_table.gfx_table.dpm_state.soft_min_level =
1519		data->dpm_table.gfx_table.dpm_state.soft_max_level =
1520		data->dpm_table.gfx_table.dpm_levels[soft_level].value;
1521
1522	soft_level = vega12_find_highest_dpm_level(&(data->dpm_table.mem_table));
1523
1524	data->dpm_table.mem_table.dpm_state.soft_min_level =
1525		data->dpm_table.mem_table.dpm_state.soft_max_level =
1526		data->dpm_table.mem_table.dpm_levels[soft_level].value;
1527
1528	PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
1529			"Failed to upload boot level to highest!",
1530			return -1);
1531
1532	PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr),
1533			"Failed to upload dpm max level to highest!",
1534			return -1);
1535
1536	return 0;
1537}
1538
1539static int vega12_force_dpm_lowest(struct pp_hwmgr *hwmgr)
1540{
1541	struct vega12_hwmgr *data =
1542			(struct vega12_hwmgr *)(hwmgr->backend);
1543	uint32_t soft_level;
1544
1545	soft_level = vega12_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
1546
1547	data->dpm_table.gfx_table.dpm_state.soft_min_level =
1548		data->dpm_table.gfx_table.dpm_state.soft_max_level =
1549		data->dpm_table.gfx_table.dpm_levels[soft_level].value;
1550
1551	soft_level = vega12_find_lowest_dpm_level(&(data->dpm_table.mem_table));
1552
1553	data->dpm_table.mem_table.dpm_state.soft_min_level =
1554		data->dpm_table.mem_table.dpm_state.soft_max_level =
1555		data->dpm_table.mem_table.dpm_levels[soft_level].value;
1556
1557	PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
1558			"Failed to upload boot level to highest!",
1559			return -1);
1560
1561	PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr),
1562			"Failed to upload dpm max level to highest!",
1563			return -1);
1564
1565	return 0;
1566
1567}
1568
1569static int vega12_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
1570{
1571	PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
1572			"Failed to upload DPM Bootup Levels!",
1573			return -1);
1574
1575	PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr),
1576			"Failed to upload DPM Max Levels!",
1577			return -1);
1578
1579	return 0;
1580}
1581
1582static int vega12_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level,
1583				uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *soc_mask)
1584{
1585	struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1586	struct vega12_single_dpm_table *gfx_dpm_table = &(data->dpm_table.gfx_table);
1587	struct vega12_single_dpm_table *mem_dpm_table = &(data->dpm_table.mem_table);
1588	struct vega12_single_dpm_table *soc_dpm_table = &(data->dpm_table.soc_table);
1589
1590	*sclk_mask = 0;
1591	*mclk_mask = 0;
1592	*soc_mask  = 0;
1593
1594	if (gfx_dpm_table->count > VEGA12_UMD_PSTATE_GFXCLK_LEVEL &&
1595	    mem_dpm_table->count > VEGA12_UMD_PSTATE_MCLK_LEVEL &&
1596	    soc_dpm_table->count > VEGA12_UMD_PSTATE_SOCCLK_LEVEL) {
1597		*sclk_mask = VEGA12_UMD_PSTATE_GFXCLK_LEVEL;
1598		*mclk_mask = VEGA12_UMD_PSTATE_MCLK_LEVEL;
1599		*soc_mask  = VEGA12_UMD_PSTATE_SOCCLK_LEVEL;
1600	}
1601
1602	if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
1603		*sclk_mask = 0;
1604	} else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
1605		*mclk_mask = 0;
1606	} else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
1607		*sclk_mask = gfx_dpm_table->count - 1;
1608		*mclk_mask = mem_dpm_table->count - 1;
1609		*soc_mask  = soc_dpm_table->count - 1;
1610	}
1611
1612	return 0;
1613}
1614
1615static void vega12_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
1616{
1617	switch (mode) {
1618	case AMD_FAN_CTRL_NONE:
1619		break;
1620	case AMD_FAN_CTRL_MANUAL:
1621		if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
1622			vega12_fan_ctrl_stop_smc_fan_control(hwmgr);
1623		break;
1624	case AMD_FAN_CTRL_AUTO:
1625		if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
1626			vega12_fan_ctrl_start_smc_fan_control(hwmgr);
1627		break;
1628	default:
1629		break;
1630	}
1631}
1632
1633static int vega12_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
1634				enum amd_dpm_forced_level level)
1635{
1636	int ret = 0;
1637	uint32_t sclk_mask = 0;
1638	uint32_t mclk_mask = 0;
1639	uint32_t soc_mask = 0;
1640
1641	switch (level) {
1642	case AMD_DPM_FORCED_LEVEL_HIGH:
1643		ret = vega12_force_dpm_highest(hwmgr);
1644		break;
1645	case AMD_DPM_FORCED_LEVEL_LOW:
1646		ret = vega12_force_dpm_lowest(hwmgr);
1647		break;
1648	case AMD_DPM_FORCED_LEVEL_AUTO:
1649		ret = vega12_unforce_dpm_levels(hwmgr);
1650		break;
1651	case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
1652	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
1653	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
1654	case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
1655		ret = vega12_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask);
1656		if (ret)
1657			return ret;
1658		vega12_force_clock_level(hwmgr, PP_SCLK, 1 << sclk_mask);
1659		vega12_force_clock_level(hwmgr, PP_MCLK, 1 << mclk_mask);
1660		break;
1661	case AMD_DPM_FORCED_LEVEL_MANUAL:
1662	case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
1663	default:
1664		break;
1665	}
1666
1667	return ret;
1668}
1669
1670static uint32_t vega12_get_fan_control_mode(struct pp_hwmgr *hwmgr)
1671{
1672	struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1673
1674	if (data->smu_features[GNLD_FAN_CONTROL].enabled == false)
1675		return AMD_FAN_CTRL_MANUAL;
1676	else
1677		return AMD_FAN_CTRL_AUTO;
1678}
1679
1680static int vega12_get_dal_power_level(struct pp_hwmgr *hwmgr,
1681		struct amd_pp_simple_clock_info *info)
1682{
1683#if 0
1684	struct phm_ppt_v2_information *table_info =
1685			(struct phm_ppt_v2_information *)hwmgr->pptable;
1686	struct phm_clock_and_voltage_limits *max_limits =
1687			&table_info->max_clock_voltage_on_ac;
1688
1689	info->engine_max_clock = max_limits->sclk;
1690	info->memory_max_clock = max_limits->mclk;
1691#endif
1692	return 0;
1693}
1694
1695static int vega12_get_clock_ranges(struct pp_hwmgr *hwmgr,
1696		uint32_t *clock,
1697		PPCLK_e clock_select,
1698		bool max)
1699{
1700	struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1701
1702	if (max)
1703		*clock = data->clk_range[clock_select].ACMax;
1704	else
1705		*clock = data->clk_range[clock_select].ACMin;
1706
1707	return 0;
1708}
1709
1710static int vega12_get_sclks(struct pp_hwmgr *hwmgr,
1711		struct pp_clock_levels_with_latency *clocks)
1712{
1713	struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1714	uint32_t ucount;
1715	int i;
1716	struct vega12_single_dpm_table *dpm_table;
1717
1718	if (!data->smu_features[GNLD_DPM_GFXCLK].enabled)
1719		return -1;
1720
1721	dpm_table = &(data->dpm_table.gfx_table);
1722	ucount = (dpm_table->count > MAX_NUM_CLOCKS) ?
1723		MAX_NUM_CLOCKS : dpm_table->count;
1724
1725	for (i = 0; i < ucount; i++) {
1726		clocks->data[i].clocks_in_khz =
1727			dpm_table->dpm_levels[i].value * 1000;
1728
1729		clocks->data[i].latency_in_us = 0;
1730	}
1731
1732	clocks->num_levels = ucount;
1733
1734	return 0;
1735}
1736
1737static uint32_t vega12_get_mem_latency(struct pp_hwmgr *hwmgr,
1738		uint32_t clock)
1739{
1740	return 25;
1741}
1742
1743static int vega12_get_memclocks(struct pp_hwmgr *hwmgr,
1744		struct pp_clock_levels_with_latency *clocks)
1745{
1746	struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1747	uint32_t ucount;
1748	int i;
1749	struct vega12_single_dpm_table *dpm_table;
1750	if (!data->smu_features[GNLD_DPM_UCLK].enabled)
1751		return -1;
1752
1753	dpm_table = &(data->dpm_table.mem_table);
1754	ucount = (dpm_table->count > MAX_NUM_CLOCKS) ?
1755		MAX_NUM_CLOCKS : dpm_table->count;
1756
1757	for (i = 0; i < ucount; i++) {
1758		clocks->data[i].clocks_in_khz = dpm_table->dpm_levels[i].value * 1000;
1759		data->mclk_latency_table.entries[i].frequency = dpm_table->dpm_levels[i].value * 100;
1760		clocks->data[i].latency_in_us =
1761			data->mclk_latency_table.entries[i].latency =
1762			vega12_get_mem_latency(hwmgr, dpm_table->dpm_levels[i].value);
1763	}
1764
1765	clocks->num_levels = data->mclk_latency_table.count = ucount;
1766
1767	return 0;
1768}
1769
1770static int vega12_get_dcefclocks(struct pp_hwmgr *hwmgr,
1771		struct pp_clock_levels_with_latency *clocks)
1772{
1773	struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1774	uint32_t ucount;
1775	int i;
1776	struct vega12_single_dpm_table *dpm_table;
1777
1778	if (!data->smu_features[GNLD_DPM_DCEFCLK].enabled)
1779		return -1;
1780
1781
1782	dpm_table = &(data->dpm_table.dcef_table);
1783	ucount = (dpm_table->count > MAX_NUM_CLOCKS) ?
1784		MAX_NUM_CLOCKS : dpm_table->count;
1785
1786	for (i = 0; i < ucount; i++) {
1787		clocks->data[i].clocks_in_khz =
1788			dpm_table->dpm_levels[i].value * 1000;
1789
1790		clocks->data[i].latency_in_us = 0;
1791	}
1792
1793	clocks->num_levels = ucount;
1794
1795	return 0;
1796}
1797
1798static int vega12_get_socclocks(struct pp_hwmgr *hwmgr,
1799		struct pp_clock_levels_with_latency *clocks)
1800{
1801	struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1802	uint32_t ucount;
1803	int i;
1804	struct vega12_single_dpm_table *dpm_table;
1805
1806	if (!data->smu_features[GNLD_DPM_SOCCLK].enabled)
1807		return -1;
1808
1809
1810	dpm_table = &(data->dpm_table.soc_table);
1811	ucount = (dpm_table->count > MAX_NUM_CLOCKS) ?
1812		MAX_NUM_CLOCKS : dpm_table->count;
1813
1814	for (i = 0; i < ucount; i++) {
1815		clocks->data[i].clocks_in_khz =
1816			dpm_table->dpm_levels[i].value * 1000;
1817
1818		clocks->data[i].latency_in_us = 0;
1819	}
1820
1821	clocks->num_levels = ucount;
1822
1823	return 0;
1824
1825}
1826
1827static int vega12_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
1828		enum amd_pp_clock_type type,
1829		struct pp_clock_levels_with_latency *clocks)
1830{
1831	int ret;
1832
1833	switch (type) {
1834	case amd_pp_sys_clock:
1835		ret = vega12_get_sclks(hwmgr, clocks);
1836		break;
1837	case amd_pp_mem_clock:
1838		ret = vega12_get_memclocks(hwmgr, clocks);
1839		break;
1840	case amd_pp_dcef_clock:
1841		ret = vega12_get_dcefclocks(hwmgr, clocks);
1842		break;
1843	case amd_pp_soc_clock:
1844		ret = vega12_get_socclocks(hwmgr, clocks);
1845		break;
1846	default:
1847		return -EINVAL;
1848	}
1849
1850	return ret;
1851}
1852
1853static int vega12_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
1854		enum amd_pp_clock_type type,
1855		struct pp_clock_levels_with_voltage *clocks)
1856{
1857	clocks->num_levels = 0;
1858
1859	return 0;
1860}
1861
1862static int vega12_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
1863							void *clock_ranges)
1864{
1865	struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1866	Watermarks_t *table = &(data->smc_state_table.water_marks_table);
1867	struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_ranges;
1868
1869	if (!data->registry_data.disable_water_mark &&
1870			data->smu_features[GNLD_DPM_DCEFCLK].supported &&
1871			data->smu_features[GNLD_DPM_SOCCLK].supported) {
1872		smu_set_watermarks_for_clocks_ranges(table, wm_with_clock_ranges);
1873		data->water_marks_bitmap |= WaterMarksExist;
1874		data->water_marks_bitmap &= ~WaterMarksLoaded;
1875	}
1876
1877	return 0;
1878}
1879
1880static int vega12_force_clock_level(struct pp_hwmgr *hwmgr,
1881		enum pp_clock_type type, uint32_t mask)
1882{
1883	struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1884	uint32_t soft_min_level, soft_max_level, hard_min_level;
1885	int ret = 0;
1886
1887	switch (type) {
1888	case PP_SCLK:
1889		soft_min_level = mask ? (ffs(mask) - 1) : 0;
1890		soft_max_level = mask ? (fls(mask) - 1) : 0;
1891
1892		data->dpm_table.gfx_table.dpm_state.soft_min_level =
1893			data->dpm_table.gfx_table.dpm_levels[soft_min_level].value;
1894		data->dpm_table.gfx_table.dpm_state.soft_max_level =
1895			data->dpm_table.gfx_table.dpm_levels[soft_max_level].value;
1896
1897		ret = vega12_upload_dpm_min_level(hwmgr);
1898		PP_ASSERT_WITH_CODE(!ret,
1899			"Failed to upload boot level to lowest!",
1900			return ret);
1901
1902		ret = vega12_upload_dpm_max_level(hwmgr);
1903		PP_ASSERT_WITH_CODE(!ret,
1904			"Failed to upload dpm max level to highest!",
1905			return ret);
1906		break;
1907
1908	case PP_MCLK:
1909		soft_min_level = mask ? (ffs(mask) - 1) : 0;
1910		soft_max_level = mask ? (fls(mask) - 1) : 0;
1911
1912		data->dpm_table.mem_table.dpm_state.soft_min_level =
1913			data->dpm_table.mem_table.dpm_levels[soft_min_level].value;
1914		data->dpm_table.mem_table.dpm_state.soft_max_level =
1915			data->dpm_table.mem_table.dpm_levels[soft_max_level].value;
1916
1917		ret = vega12_upload_dpm_min_level(hwmgr);
1918		PP_ASSERT_WITH_CODE(!ret,
1919			"Failed to upload boot level to lowest!",
1920			return ret);
1921
1922		ret = vega12_upload_dpm_max_level(hwmgr);
1923		PP_ASSERT_WITH_CODE(!ret,
1924			"Failed to upload dpm max level to highest!",
1925			return ret);
1926
1927		break;
1928
1929	case PP_SOCCLK:
1930		soft_min_level = mask ? (ffs(mask) - 1) : 0;
1931		soft_max_level = mask ? (fls(mask) - 1) : 0;
1932
1933		if (soft_max_level >= data->dpm_table.soc_table.count) {
1934			pr_err("Clock level specified %d is over max allowed %d\n",
1935					soft_max_level,
1936					data->dpm_table.soc_table.count - 1);
1937			return -EINVAL;
1938		}
1939
1940		data->dpm_table.soc_table.dpm_state.soft_min_level =
1941			data->dpm_table.soc_table.dpm_levels[soft_min_level].value;
1942		data->dpm_table.soc_table.dpm_state.soft_max_level =
1943			data->dpm_table.soc_table.dpm_levels[soft_max_level].value;
1944
1945		ret = vega12_upload_dpm_min_level(hwmgr);
1946		PP_ASSERT_WITH_CODE(!ret,
1947			"Failed to upload boot level to lowest!",
1948			return ret);
1949
1950		ret = vega12_upload_dpm_max_level(hwmgr);
1951		PP_ASSERT_WITH_CODE(!ret,
1952			"Failed to upload dpm max level to highest!",
1953			return ret);
1954
1955		break;
1956
1957	case PP_DCEFCLK:
1958		hard_min_level = mask ? (ffs(mask) - 1) : 0;
1959
1960		if (hard_min_level >= data->dpm_table.dcef_table.count) {
1961			pr_err("Clock level specified %d is over max allowed %d\n",
1962					hard_min_level,
1963					data->dpm_table.dcef_table.count - 1);
1964			return -EINVAL;
1965		}
1966
1967		data->dpm_table.dcef_table.dpm_state.hard_min_level =
1968			data->dpm_table.dcef_table.dpm_levels[hard_min_level].value;
1969
1970		ret = vega12_upload_dpm_min_level(hwmgr);
1971		PP_ASSERT_WITH_CODE(!ret,
1972			"Failed to upload boot level to lowest!",
1973			return ret);
1974
1975		//TODO: Setting DCEFCLK max dpm level is not supported
1976
1977		break;
1978
1979	case PP_PCIE:
1980		break;
1981
1982	default:
1983		break;
1984	}
1985
1986	return 0;
1987}
1988
1989static int vega12_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf)
1990{
1991	static const char *ppfeature_name[] = {
1992			"DPM_PREFETCHER",
1993			"GFXCLK_DPM",
1994			"UCLK_DPM",
1995			"SOCCLK_DPM",
1996			"UVD_DPM",
1997			"VCE_DPM",
1998			"ULV",
1999			"MP0CLK_DPM",
2000			"LINK_DPM",
2001			"DCEFCLK_DPM",
2002			"GFXCLK_DS",
2003			"SOCCLK_DS",
2004			"LCLK_DS",
2005			"PPT",
2006			"TDC",
2007			"THERMAL",
2008			"GFX_PER_CU_CG",
2009			"RM",
2010			"DCEFCLK_DS",
2011			"ACDC",
2012			"VR0HOT",
2013			"VR1HOT",
2014			"FW_CTF",
2015			"LED_DISPLAY",
2016			"FAN_CONTROL",
2017			"DIDT",
2018			"GFXOFF",
2019			"CG",
2020			"ACG"};
2021	static const char *output_title[] = {
2022			"FEATURES",
2023			"BITMASK",
2024			"ENABLEMENT"};
2025	uint64_t features_enabled;
2026	int i;
2027	int ret = 0;
2028	int size = 0;
2029
2030	ret = vega12_get_enabled_smc_features(hwmgr, &features_enabled);
2031	PP_ASSERT_WITH_CODE(!ret,
2032		"[EnableAllSmuFeatures] Failed to get enabled smc features!",
2033		return ret);
2034
2035	size += sprintf(buf + size, "Current ppfeatures: 0x%016llx\n", features_enabled);
2036	size += sprintf(buf + size, "%-19s %-22s %s\n",
2037				output_title[0],
2038				output_title[1],
2039				output_title[2]);
2040	for (i = 0; i < GNLD_FEATURES_MAX; i++) {
2041		size += sprintf(buf + size, "%-19s 0x%016llx %6s\n",
2042				ppfeature_name[i],
2043				1ULL << i,
2044				(features_enabled & (1ULL << i)) ? "Y" : "N");
2045	}
2046
2047	return size;
2048}
2049
2050static int vega12_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfeature_masks)
2051{
2052	uint64_t features_enabled;
2053	uint64_t features_to_enable;
2054	uint64_t features_to_disable;
2055	int ret = 0;
2056
2057	if (new_ppfeature_masks >= (1ULL << GNLD_FEATURES_MAX))
2058		return -EINVAL;
2059
2060	ret = vega12_get_enabled_smc_features(hwmgr, &features_enabled);
2061	if (ret)
2062		return ret;
2063
2064	features_to_disable =
2065		features_enabled & ~new_ppfeature_masks;
2066	features_to_enable =
2067		~features_enabled & new_ppfeature_masks;
2068
2069	pr_debug("features_to_disable 0x%llx\n", features_to_disable);
2070	pr_debug("features_to_enable 0x%llx\n", features_to_enable);
2071
2072	if (features_to_disable) {
2073		ret = vega12_enable_smc_features(hwmgr, false, features_to_disable);
2074		if (ret)
2075			return ret;
2076	}
2077
2078	if (features_to_enable) {
2079		ret = vega12_enable_smc_features(hwmgr, true, features_to_enable);
2080		if (ret)
2081			return ret;
2082	}
2083
2084	return 0;
2085}
2086
2087static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
2088		enum pp_clock_type type, char *buf)
2089{
2090	int i, now, size = 0;
2091	struct pp_clock_levels_with_latency clocks;
2092
2093	switch (type) {
2094	case PP_SCLK:
2095		PP_ASSERT_WITH_CODE(
2096				vega12_get_current_gfx_clk_freq(hwmgr, &now) == 0,
2097				"Attempt to get current gfx clk Failed!",
2098				return -1);
2099
2100		PP_ASSERT_WITH_CODE(
2101				vega12_get_sclks(hwmgr, &clocks) == 0,
2102				"Attempt to get gfx clk levels Failed!",
2103				return -1);
2104		for (i = 0; i < clocks.num_levels; i++)
2105			size += sprintf(buf + size, "%d: %uMhz %s\n",
2106				i, clocks.data[i].clocks_in_khz / 1000,
2107				(clocks.data[i].clocks_in_khz / 1000 == now / 100) ? "*" : "");
2108		break;
2109
2110	case PP_MCLK:
2111		PP_ASSERT_WITH_CODE(
2112				vega12_get_current_mclk_freq(hwmgr, &now) == 0,
2113				"Attempt to get current mclk freq Failed!",
2114				return -1);
2115
2116		PP_ASSERT_WITH_CODE(
2117				vega12_get_memclocks(hwmgr, &clocks) == 0,
2118				"Attempt to get memory clk levels Failed!",
2119				return -1);
2120		for (i = 0; i < clocks.num_levels; i++)
2121			size += sprintf(buf + size, "%d: %uMhz %s\n",
2122				i, clocks.data[i].clocks_in_khz / 1000,
2123				(clocks.data[i].clocks_in_khz / 1000 == now / 100) ? "*" : "");
2124		break;
2125
2126	case PP_SOCCLK:
2127		PP_ASSERT_WITH_CODE(
2128				smum_send_msg_to_smc_with_parameter(hwmgr,
2129					PPSMC_MSG_GetDpmClockFreq, (PPCLK_SOCCLK << 16)) == 0,
2130				"Attempt to get Current SOCCLK Frequency Failed!",
2131				return -EINVAL);
2132		now = smum_get_argument(hwmgr);
2133
2134		PP_ASSERT_WITH_CODE(
2135				vega12_get_socclocks(hwmgr, &clocks) == 0,
2136				"Attempt to get soc clk levels Failed!",
2137				return -1);
2138		for (i = 0; i < clocks.num_levels; i++)
2139			size += sprintf(buf + size, "%d: %uMhz %s\n",
2140				i, clocks.data[i].clocks_in_khz / 1000,
2141				(clocks.data[i].clocks_in_khz / 1000 == now) ? "*" : "");
2142		break;
2143
2144	case PP_DCEFCLK:
2145		PP_ASSERT_WITH_CODE(
2146				smum_send_msg_to_smc_with_parameter(hwmgr,
2147					PPSMC_MSG_GetDpmClockFreq, (PPCLK_DCEFCLK << 16)) == 0,
2148				"Attempt to get Current DCEFCLK Frequency Failed!",
2149				return -EINVAL);
2150		now = smum_get_argument(hwmgr);
2151
2152		PP_ASSERT_WITH_CODE(
2153				vega12_get_dcefclocks(hwmgr, &clocks) == 0,
2154				"Attempt to get dcef clk levels Failed!",
2155				return -1);
2156		for (i = 0; i < clocks.num_levels; i++)
2157			size += sprintf(buf + size, "%d: %uMhz %s\n",
2158				i, clocks.data[i].clocks_in_khz / 1000,
2159				(clocks.data[i].clocks_in_khz / 1000 == now) ? "*" : "");
2160		break;
2161
2162	case PP_PCIE:
2163		break;
2164
2165	default:
2166		break;
2167	}
2168	return size;
2169}
2170
2171static int vega12_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
2172{
2173	struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
2174	struct vega12_single_dpm_table *dpm_table;
2175	bool vblank_too_short = false;
2176	bool disable_mclk_switching;
2177	uint32_t i, latency;
2178
2179	disable_mclk_switching = ((1 < hwmgr->display_config->num_display) &&
2180			          !hwmgr->display_config->multi_monitor_in_sync) ||
2181			          vblank_too_short;
2182	latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency;
2183
2184	/* gfxclk */
2185	dpm_table = &(data->dpm_table.gfx_table);
2186	dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
2187	dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2188	dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
2189	dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2190
2191	if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
2192		if (VEGA12_UMD_PSTATE_GFXCLK_LEVEL < dpm_table->count) {
2193			dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_GFXCLK_LEVEL].value;
2194			dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_GFXCLK_LEVEL].value;
2195		}
2196
2197		if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
2198			dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
2199			dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value;
2200		}
2201
2202		if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
2203			dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2204			dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2205		}
2206	}
2207
2208	/* memclk */
2209	dpm_table = &(data->dpm_table.mem_table);
2210	dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
2211	dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2212	dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
2213	dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2214
2215	if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
2216		if (VEGA12_UMD_PSTATE_MCLK_LEVEL < dpm_table->count) {
2217			dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_MCLK_LEVEL].value;
2218			dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_MCLK_LEVEL].value;
2219		}
2220
2221		if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
2222			dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
2223			dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value;
2224		}
2225
2226		if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
2227			dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2228			dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2229		}
2230	}
2231
2232	/* honour DAL's UCLK Hardmin */
2233	if (dpm_table->dpm_state.hard_min_level < (hwmgr->display_config->min_mem_set_clock / 100))
2234		dpm_table->dpm_state.hard_min_level = hwmgr->display_config->min_mem_set_clock / 100;
2235
2236	/* Hardmin is dependent on displayconfig */
2237	if (disable_mclk_switching) {
2238		dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2239		for (i = 0; i < data->mclk_latency_table.count - 1; i++) {
2240			if (data->mclk_latency_table.entries[i].latency <= latency) {
2241				if (dpm_table->dpm_levels[i].value >= (hwmgr->display_config->min_mem_set_clock / 100)) {
2242					dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[i].value;
2243					break;
2244				}
2245			}
2246		}
2247	}
2248
2249	if (hwmgr->display_config->nb_pstate_switch_disable)
2250		dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2251
2252	/* vclk */
2253	dpm_table = &(data->dpm_table.vclk_table);
2254	dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
2255	dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2256	dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
2257	dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2258
2259	if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
2260		if (VEGA12_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) {
2261			dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
2262			dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
2263		}
2264
2265		if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
2266			dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2267			dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2268		}
2269	}
2270
2271	/* dclk */
2272	dpm_table = &(data->dpm_table.dclk_table);
2273	dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
2274	dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2275	dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
2276	dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2277
2278	if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
2279		if (VEGA12_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) {
2280			dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
2281			dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
2282		}
2283
2284		if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
2285			dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2286			dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2287		}
2288	}
2289
2290	/* socclk */
2291	dpm_table = &(data->dpm_table.soc_table);
2292	dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
2293	dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2294	dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
2295	dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2296
2297	if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
2298		if (VEGA12_UMD_PSTATE_SOCCLK_LEVEL < dpm_table->count) {
2299			dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_SOCCLK_LEVEL].value;
2300			dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_SOCCLK_LEVEL].value;
2301		}
2302
2303		if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
2304			dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2305			dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2306		}
2307	}
2308
2309	/* eclk */
2310	dpm_table = &(data->dpm_table.eclk_table);
2311	dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
2312	dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2313	dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
2314	dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2315
2316	if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
2317		if (VEGA12_UMD_PSTATE_VCEMCLK_LEVEL < dpm_table->count) {
2318			dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_VCEMCLK_LEVEL].value;
2319			dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_VCEMCLK_LEVEL].value;
2320		}
2321
2322		if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
2323			dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2324			dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2325		}
2326	}
2327
2328	return 0;
2329}
2330
2331static int vega12_set_uclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr,
2332		struct vega12_single_dpm_table *dpm_table)
2333{
2334	struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
2335	int ret = 0;
2336
2337	if (data->smu_features[GNLD_DPM_UCLK].enabled) {
2338		PP_ASSERT_WITH_CODE(dpm_table->count > 0,
2339				"[SetUclkToHightestDpmLevel] Dpm table has no entry!",
2340				return -EINVAL);
2341		PP_ASSERT_WITH_CODE(dpm_table->count <= NUM_UCLK_DPM_LEVELS,
2342				"[SetUclkToHightestDpmLevel] Dpm table has too many entries!",
2343				return -EINVAL);
2344
2345		dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2346		PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr,
2347				PPSMC_MSG_SetHardMinByFreq,
2348				(PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level)),
2349				"[SetUclkToHightestDpmLevel] Set hard min uclk failed!",
2350				return ret);
2351	}
2352
2353	return ret;
2354}
2355
2356static int vega12_pre_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
2357{
2358	struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
2359	int ret = 0;
2360
2361	smum_send_msg_to_smc_with_parameter(hwmgr,
2362			PPSMC_MSG_NumOfDisplays, 0);
2363
2364	ret = vega12_set_uclk_to_highest_dpm_level(hwmgr,
2365			&data->dpm_table.mem_table);
2366
2367	return ret;
2368}
2369
2370static int vega12_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
2371{
2372	struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
2373	int result = 0;
2374	Watermarks_t *wm_table = &(data->smc_state_table.water_marks_table);
2375
2376	if ((data->water_marks_bitmap & WaterMarksExist) &&
2377			!(data->water_marks_bitmap & WaterMarksLoaded)) {
2378		result = smum_smc_table_manager(hwmgr,
2379						(uint8_t *)wm_table, TABLE_WATERMARKS, false);
2380		PP_ASSERT_WITH_CODE(result, "Failed to update WMTABLE!", return EINVAL);
2381		data->water_marks_bitmap |= WaterMarksLoaded;
2382	}
2383
2384	if ((data->water_marks_bitmap & WaterMarksExist) &&
2385		data->smu_features[GNLD_DPM_DCEFCLK].supported &&
2386		data->smu_features[GNLD_DPM_SOCCLK].supported)
2387		smum_send_msg_to_smc_with_parameter(hwmgr,
2388			PPSMC_MSG_NumOfDisplays, hwmgr->display_config->num_display);
2389
2390	return result;
2391}
2392
2393int vega12_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
2394{
2395	struct vega12_hwmgr *data =
2396			(struct vega12_hwmgr *)(hwmgr->backend);
2397
2398	if (data->smu_features[GNLD_DPM_UVD].supported) {
2399		PP_ASSERT_WITH_CODE(!vega12_enable_smc_features(hwmgr,
2400				enable,
2401				data->smu_features[GNLD_DPM_UVD].smu_feature_bitmap),
2402				"Attempt to Enable/Disable DPM UVD Failed!",
2403				return -1);
2404		data->smu_features[GNLD_DPM_UVD].enabled = enable;
2405	}
2406
2407	return 0;
2408}
2409
2410static void vega12_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate)
2411{
2412	struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
2413
2414	if (data->vce_power_gated == bgate)
2415		return;
2416
2417	data->vce_power_gated = bgate;
2418	vega12_enable_disable_vce_dpm(hwmgr, !bgate);
2419}
2420
2421static void vega12_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
2422{
2423	struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
2424
2425	if (data->uvd_power_gated == bgate)
2426		return;
2427
2428	data->uvd_power_gated = bgate;
2429	vega12_enable_disable_uvd_dpm(hwmgr, !bgate);
2430}
2431
2432static bool
2433vega12_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
2434{
2435	struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
2436	bool is_update_required = false;
2437
2438	if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
2439		is_update_required = true;
2440
2441	if (data->registry_data.gfx_clk_deep_sleep_support) {
2442		if (data->display_timing.min_clock_in_sr != hwmgr->display_config->min_core_set_clock_in_sr)
2443			is_update_required = true;
2444	}
2445
2446	return is_update_required;
2447}
2448
2449static int vega12_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
2450{
2451	int tmp_result, result = 0;
2452
2453	tmp_result = vega12_disable_all_smu_features(hwmgr);
2454	PP_ASSERT_WITH_CODE((tmp_result == 0),
2455			"Failed to disable all smu features!", result = tmp_result);
2456
2457	return result;
2458}
2459
2460static int vega12_power_off_asic(struct pp_hwmgr *hwmgr)
2461{
2462	struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
2463	int result;
2464
2465	result = vega12_disable_dpm_tasks(hwmgr);
2466	PP_ASSERT_WITH_CODE((0 == result),
2467			"[disable_dpm_tasks] Failed to disable DPM!",
2468			);
2469	data->water_marks_bitmap &= ~(WaterMarksLoaded);
2470
2471	return result;
2472}
2473
2474#if 0
2475static void vega12_find_min_clock_index(struct pp_hwmgr *hwmgr,
2476		uint32_t *sclk_idx, uint32_t *mclk_idx,
2477		uint32_t min_sclk, uint32_t min_mclk)
2478{
2479	struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
2480	struct vega12_dpm_table *dpm_table = &(data->dpm_table);
2481	uint32_t i;
2482
2483	for (i = 0; i < dpm_table->gfx_table.count; i++) {
2484		if (dpm_table->gfx_table.dpm_levels[i].enabled &&
2485			dpm_table->gfx_table.dpm_levels[i].value >= min_sclk) {
2486			*sclk_idx = i;
2487			break;
2488		}
2489	}
2490
2491	for (i = 0; i < dpm_table->mem_table.count; i++) {
2492		if (dpm_table->mem_table.dpm_levels[i].enabled &&
2493			dpm_table->mem_table.dpm_levels[i].value >= min_mclk) {
2494			*mclk_idx = i;
2495			break;
2496		}
2497	}
2498}
2499#endif
2500
2501#if 0
2502static int vega12_set_power_profile_state(struct pp_hwmgr *hwmgr,
2503		struct amd_pp_profile *request)
2504{
2505	return 0;
2506}
2507
2508static int vega12_get_sclk_od(struct pp_hwmgr *hwmgr)
2509{
2510	struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
2511	struct vega12_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
2512	struct vega12_single_dpm_table *golden_sclk_table =
2513			&(data->golden_dpm_table.gfx_table);
2514	int value = sclk_table->dpm_levels[sclk_table->count - 1].value;
2515	int golden_value = golden_sclk_table->dpm_levels
2516			[golden_sclk_table->count - 1].value;
2517
2518	value -= golden_value;
2519	value = DIV_ROUND_UP(value * 100, golden_value);
2520
2521	return value;
2522}
2523
2524static int vega12_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
2525{
2526	return 0;
2527}
2528
2529static int vega12_get_mclk_od(struct pp_hwmgr *hwmgr)
2530{
2531	struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
2532	struct vega12_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
2533	struct vega12_single_dpm_table *golden_mclk_table =
2534			&(data->golden_dpm_table.mem_table);
2535	int value = mclk_table->dpm_levels[mclk_table->count - 1].value;
2536	int golden_value = golden_mclk_table->dpm_levels
2537			[golden_mclk_table->count - 1].value;
2538
2539	value -= golden_value;
2540	value = DIV_ROUND_UP(value * 100, golden_value);
2541
2542	return value;
2543}
2544
2545static int vega12_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
2546{
2547	return 0;
2548}
2549#endif
2550
2551static int vega12_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
2552					uint32_t virtual_addr_low,
2553					uint32_t virtual_addr_hi,
2554					uint32_t mc_addr_low,
2555					uint32_t mc_addr_hi,
2556					uint32_t size)
2557{
2558	smum_send_msg_to_smc_with_parameter(hwmgr,
2559					PPSMC_MSG_SetSystemVirtualDramAddrHigh,
2560					virtual_addr_hi);
2561	smum_send_msg_to_smc_with_parameter(hwmgr,
2562					PPSMC_MSG_SetSystemVirtualDramAddrLow,
2563					virtual_addr_low);
2564	smum_send_msg_to_smc_with_parameter(hwmgr,
2565					PPSMC_MSG_DramLogSetDramAddrHigh,
2566					mc_addr_hi);
2567
2568	smum_send_msg_to_smc_with_parameter(hwmgr,
2569					PPSMC_MSG_DramLogSetDramAddrLow,
2570					mc_addr_low);
2571
2572	smum_send_msg_to_smc_with_parameter(hwmgr,
2573					PPSMC_MSG_DramLogSetDramSize,
2574					size);
2575	return 0;
2576}
2577
2578static int vega12_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
2579		struct PP_TemperatureRange *thermal_data)
2580{
2581	struct vega12_hwmgr *data =
2582			(struct vega12_hwmgr *)(hwmgr->backend);
2583	PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2584
2585	memcpy(thermal_data, &SMU7ThermalWithDelayPolicy[0], sizeof(struct PP_TemperatureRange));
2586
2587	thermal_data->max = pp_table->TedgeLimit *
2588		PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
2589	thermal_data->edge_emergency_max = (pp_table->TedgeLimit + CTF_OFFSET_EDGE) *
2590		PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
2591	thermal_data->hotspot_crit_max = pp_table->ThotspotLimit *
2592		PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
2593	thermal_data->hotspot_emergency_max = (pp_table->ThotspotLimit + CTF_OFFSET_HOTSPOT) *
2594		PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
2595	thermal_data->mem_crit_max = pp_table->ThbmLimit *
2596		PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
2597	thermal_data->mem_emergency_max = (pp_table->ThbmLimit + CTF_OFFSET_HBM)*
2598		PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
2599
2600	return 0;
2601}
2602
2603static int vega12_enable_gfx_off(struct pp_hwmgr *hwmgr)
2604{
2605	struct vega12_hwmgr *data =
2606			(struct vega12_hwmgr *)(hwmgr->backend);
2607	int ret = 0;
2608
2609	if (data->gfxoff_controlled_by_driver)
2610		ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_AllowGfxOff);
2611
2612	return ret;
2613}
2614
2615static int vega12_disable_gfx_off(struct pp_hwmgr *hwmgr)
2616{
2617	struct vega12_hwmgr *data =
2618			(struct vega12_hwmgr *)(hwmgr->backend);
2619	int ret = 0;
2620
2621	if (data->gfxoff_controlled_by_driver)
2622		ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisallowGfxOff);
2623
2624	return ret;
2625}
2626
2627static int vega12_gfx_off_control(struct pp_hwmgr *hwmgr, bool enable)
2628{
2629	if (enable)
2630		return vega12_enable_gfx_off(hwmgr);
2631	else
2632		return vega12_disable_gfx_off(hwmgr);
2633}
2634
2635static int vega12_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
2636				PHM_PerformanceLevelDesignation designation, uint32_t index,
2637				PHM_PerformanceLevel *level)
2638{
2639	return 0;
2640}
2641
2642static int vega12_set_mp1_state(struct pp_hwmgr *hwmgr,
2643				enum pp_mp1_state mp1_state)
2644{
2645	uint16_t msg;
2646	int ret;
2647
2648	switch (mp1_state) {
2649	case PP_MP1_STATE_UNLOAD:
2650		msg = PPSMC_MSG_PrepareMp1ForUnload;
2651		break;
2652	case PP_MP1_STATE_SHUTDOWN:
2653	case PP_MP1_STATE_RESET:
2654	case PP_MP1_STATE_NONE:
2655	default:
2656		return 0;
2657	}
2658
2659	PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, msg)) == 0,
2660			    "[PrepareMp1] Failed!",
2661			    return ret);
2662
2663	return 0;
2664}
2665
2666static const struct pp_hwmgr_func vega12_hwmgr_funcs = {
2667	.backend_init = vega12_hwmgr_backend_init,
2668	.backend_fini = vega12_hwmgr_backend_fini,
2669	.asic_setup = vega12_setup_asic_task,
2670	.dynamic_state_management_enable = vega12_enable_dpm_tasks,
2671	.dynamic_state_management_disable = vega12_disable_dpm_tasks,
2672	.patch_boot_state = vega12_patch_boot_state,
2673	.get_sclk = vega12_dpm_get_sclk,
2674	.get_mclk = vega12_dpm_get_mclk,
2675	.notify_smc_display_config_after_ps_adjustment =
2676			vega12_notify_smc_display_config_after_ps_adjustment,
2677	.force_dpm_level = vega12_dpm_force_dpm_level,
2678	.stop_thermal_controller = vega12_thermal_stop_thermal_controller,
2679	.get_fan_speed_info = vega12_fan_ctrl_get_fan_speed_info,
2680	.reset_fan_speed_to_default =
2681			vega12_fan_ctrl_reset_fan_speed_to_default,
2682	.get_fan_speed_rpm = vega12_fan_ctrl_get_fan_speed_rpm,
2683	.set_fan_control_mode = vega12_set_fan_control_mode,
2684	.get_fan_control_mode = vega12_get_fan_control_mode,
2685	.read_sensor = vega12_read_sensor,
2686	.get_dal_power_level = vega12_get_dal_power_level,
2687	.get_clock_by_type_with_latency = vega12_get_clock_by_type_with_latency,
2688	.get_clock_by_type_with_voltage = vega12_get_clock_by_type_with_voltage,
2689	.set_watermarks_for_clocks_ranges = vega12_set_watermarks_for_clocks_ranges,
2690	.display_clock_voltage_request = vega12_display_clock_voltage_request,
2691	.force_clock_level = vega12_force_clock_level,
2692	.print_clock_levels = vega12_print_clock_levels,
2693	.apply_clocks_adjust_rules =
2694		vega12_apply_clocks_adjust_rules,
2695	.pre_display_config_changed =
2696		vega12_pre_display_configuration_changed_task,
2697	.display_config_changed = vega12_display_configuration_changed_task,
2698	.powergate_uvd = vega12_power_gate_uvd,
2699	.powergate_vce = vega12_power_gate_vce,
2700	.check_smc_update_required_for_display_configuration =
2701			vega12_check_smc_update_required_for_display_configuration,
2702	.power_off_asic = vega12_power_off_asic,
2703	.disable_smc_firmware_ctf = vega12_thermal_disable_alert,
2704#if 0
2705	.set_power_profile_state = vega12_set_power_profile_state,
2706	.get_sclk_od = vega12_get_sclk_od,
2707	.set_sclk_od = vega12_set_sclk_od,
2708	.get_mclk_od = vega12_get_mclk_od,
2709	.set_mclk_od = vega12_set_mclk_od,
2710#endif
2711	.notify_cac_buffer_info = vega12_notify_cac_buffer_info,
2712	.get_thermal_temperature_range = vega12_get_thermal_temperature_range,
2713	.register_irq_handlers = smu9_register_irq_handlers,
2714	.start_thermal_controller = vega12_start_thermal_controller,
2715	.powergate_gfx = vega12_gfx_off_control,
2716	.get_performance_level = vega12_get_performance_level,
2717	.get_asic_baco_capability = smu9_baco_get_capability,
2718	.get_asic_baco_state = smu9_baco_get_state,
2719	.set_asic_baco_state = vega12_baco_set_state,
2720	.get_ppfeature_status = vega12_get_ppfeature_status,
2721	.set_ppfeature_status = vega12_set_ppfeature_status,
2722	.set_mp1_state = vega12_set_mp1_state,
2723};
2724
2725int vega12_hwmgr_init(struct pp_hwmgr *hwmgr)
2726{
2727	hwmgr->hwmgr_func = &vega12_hwmgr_funcs;
2728	hwmgr->pptable_func = &vega12_pptable_funcs;
2729
2730	return 0;
2731}