Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.15.
   1/*
   2 * Copyright 2019 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 */
  22
  23#define SWSMU_CODE_LAYER_L1
  24
  25#include <linux/firmware.h>
  26#include <linux/pci.h>
  27#include <linux/power_supply.h>
  28#include <linux/reboot.h>
  29
  30#include "amdgpu.h"
  31#include "amdgpu_smu.h"
  32#include "smu_internal.h"
  33#include "atom.h"
  34#include "arcturus_ppt.h"
  35#include "navi10_ppt.h"
  36#include "sienna_cichlid_ppt.h"
  37#include "renoir_ppt.h"
  38#include "vangogh_ppt.h"
  39#include "aldebaran_ppt.h"
  40#include "yellow_carp_ppt.h"
  41#include "cyan_skillfish_ppt.h"
  42#include "smu_v13_0_0_ppt.h"
  43#include "smu_v13_0_4_ppt.h"
  44#include "smu_v13_0_5_ppt.h"
  45#include "smu_v13_0_6_ppt.h"
  46#include "smu_v13_0_7_ppt.h"
  47#include "smu_v14_0_0_ppt.h"
  48#include "amd_pcie.h"
  49
  50/*
  51 * DO NOT use these for err/warn/info/debug messages.
  52 * Use dev_err, dev_warn, dev_info and dev_dbg instead.
  53 * They are more MGPU friendly.
  54 */
  55#undef pr_err
  56#undef pr_warn
  57#undef pr_info
  58#undef pr_debug
  59
  60static const struct amd_pm_funcs swsmu_pm_funcs;
  61static int smu_force_smuclk_levels(struct smu_context *smu,
  62				   enum smu_clk_type clk_type,
  63				   uint32_t mask);
  64static int smu_handle_task(struct smu_context *smu,
  65			   enum amd_dpm_forced_level level,
  66			   enum amd_pp_task task_id);
  67static int smu_reset(struct smu_context *smu);
  68static int smu_set_fan_speed_pwm(void *handle, u32 speed);
  69static int smu_set_fan_control_mode(void *handle, u32 value);
  70static int smu_set_power_limit(void *handle, uint32_t limit);
  71static int smu_set_fan_speed_rpm(void *handle, uint32_t speed);
  72static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled);
  73static int smu_set_mp1_state(void *handle, enum pp_mp1_state mp1_state);
  74
  75static int smu_sys_get_pp_feature_mask(void *handle,
  76				       char *buf)
  77{
  78	struct smu_context *smu = handle;
  79
  80	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
  81		return -EOPNOTSUPP;
  82
  83	return smu_get_pp_feature_mask(smu, buf);
  84}
  85
  86static int smu_sys_set_pp_feature_mask(void *handle,
  87				       uint64_t new_mask)
  88{
  89	struct smu_context *smu = handle;
  90
  91	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
  92		return -EOPNOTSUPP;
  93
  94	return smu_set_pp_feature_mask(smu, new_mask);
  95}
  96
  97int smu_set_residency_gfxoff(struct smu_context *smu, bool value)
  98{
  99	if (!smu->ppt_funcs->set_gfx_off_residency)
 100		return -EINVAL;
 101
 102	return smu_set_gfx_off_residency(smu, value);
 103}
 104
 105int smu_get_residency_gfxoff(struct smu_context *smu, u32 *value)
 106{
 107	if (!smu->ppt_funcs->get_gfx_off_residency)
 108		return -EINVAL;
 109
 110	return smu_get_gfx_off_residency(smu, value);
 111}
 112
 113int smu_get_entrycount_gfxoff(struct smu_context *smu, u64 *value)
 114{
 115	if (!smu->ppt_funcs->get_gfx_off_entrycount)
 116		return -EINVAL;
 117
 118	return smu_get_gfx_off_entrycount(smu, value);
 119}
 120
 121int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value)
 122{
 123	if (!smu->ppt_funcs->get_gfx_off_status)
 124		return -EINVAL;
 125
 126	*value = smu_get_gfx_off_status(smu);
 127
 128	return 0;
 129}
 130
 131int smu_set_soft_freq_range(struct smu_context *smu,
 132			    enum smu_clk_type clk_type,
 133			    uint32_t min,
 134			    uint32_t max)
 135{
 136	int ret = 0;
 137
 138	if (smu->ppt_funcs->set_soft_freq_limited_range)
 139		ret = smu->ppt_funcs->set_soft_freq_limited_range(smu,
 140								  clk_type,
 141								  min,
 142								  max);
 143
 144	return ret;
 145}
 146
 147int smu_get_dpm_freq_range(struct smu_context *smu,
 148			   enum smu_clk_type clk_type,
 149			   uint32_t *min,
 150			   uint32_t *max)
 151{
 152	int ret = -ENOTSUPP;
 153
 154	if (!min && !max)
 155		return -EINVAL;
 156
 157	if (smu->ppt_funcs->get_dpm_ultimate_freq)
 158		ret = smu->ppt_funcs->get_dpm_ultimate_freq(smu,
 159							    clk_type,
 160							    min,
 161							    max);
 162
 163	return ret;
 164}
 165
 166int smu_set_gfx_power_up_by_imu(struct smu_context *smu)
 167{
 168	int ret = 0;
 169	struct amdgpu_device *adev = smu->adev;
 170
 171	if (smu->ppt_funcs->set_gfx_power_up_by_imu) {
 172		ret = smu->ppt_funcs->set_gfx_power_up_by_imu(smu);
 173		if (ret)
 174			dev_err(adev->dev, "Failed to enable gfx imu!\n");
 175	}
 176	return ret;
 177}
 178
 179static u32 smu_get_mclk(void *handle, bool low)
 180{
 181	struct smu_context *smu = handle;
 182	uint32_t clk_freq;
 183	int ret = 0;
 184
 185	ret = smu_get_dpm_freq_range(smu, SMU_UCLK,
 186				     low ? &clk_freq : NULL,
 187				     !low ? &clk_freq : NULL);
 188	if (ret)
 189		return 0;
 190	return clk_freq * 100;
 191}
 192
 193static u32 smu_get_sclk(void *handle, bool low)
 194{
 195	struct smu_context *smu = handle;
 196	uint32_t clk_freq;
 197	int ret = 0;
 198
 199	ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK,
 200				     low ? &clk_freq : NULL,
 201				     !low ? &clk_freq : NULL);
 202	if (ret)
 203		return 0;
 204	return clk_freq * 100;
 205}
 206
 207static int smu_set_gfx_imu_enable(struct smu_context *smu)
 208{
 209	struct amdgpu_device *adev = smu->adev;
 210
 211	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
 212		return 0;
 213
 214	if (amdgpu_in_reset(smu->adev) || adev->in_s0ix)
 215		return 0;
 216
 217	return smu_set_gfx_power_up_by_imu(smu);
 218}
 219
 220static bool is_vcn_enabled(struct amdgpu_device *adev)
 221{
 222	int i;
 223
 224	for (i = 0; i < adev->num_ip_blocks; i++) {
 225		if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_VCN ||
 226			adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_JPEG) &&
 227			!adev->ip_blocks[i].status.valid)
 228			return false;
 229	}
 230
 231	return true;
 232}
 233
 234static int smu_dpm_set_vcn_enable(struct smu_context *smu,
 235				  bool enable)
 236{
 237	struct smu_power_context *smu_power = &smu->smu_power;
 238	struct smu_power_gate *power_gate = &smu_power->power_gate;
 239	int ret = 0;
 240
 241	/*
 242	 * don't poweron vcn/jpeg when they are skipped.
 243	 */
 244	if (!is_vcn_enabled(smu->adev))
 245		return 0;
 246
 247	if (!smu->ppt_funcs->dpm_set_vcn_enable)
 248		return 0;
 249
 250	if (atomic_read(&power_gate->vcn_gated) ^ enable)
 251		return 0;
 252
 253	ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable);
 254	if (!ret)
 255		atomic_set(&power_gate->vcn_gated, !enable);
 256
 257	return ret;
 258}
 259
 260static int smu_dpm_set_jpeg_enable(struct smu_context *smu,
 261				   bool enable)
 262{
 263	struct smu_power_context *smu_power = &smu->smu_power;
 264	struct smu_power_gate *power_gate = &smu_power->power_gate;
 265	int ret = 0;
 266
 267	if (!is_vcn_enabled(smu->adev))
 268		return 0;
 269
 270	if (!smu->ppt_funcs->dpm_set_jpeg_enable)
 271		return 0;
 272
 273	if (atomic_read(&power_gate->jpeg_gated) ^ enable)
 274		return 0;
 275
 276	ret = smu->ppt_funcs->dpm_set_jpeg_enable(smu, enable);
 277	if (!ret)
 278		atomic_set(&power_gate->jpeg_gated, !enable);
 279
 280	return ret;
 281}
 282
 283static int smu_dpm_set_vpe_enable(struct smu_context *smu,
 284				   bool enable)
 285{
 286	struct smu_power_context *smu_power = &smu->smu_power;
 287	struct smu_power_gate *power_gate = &smu_power->power_gate;
 288	int ret = 0;
 289
 290	if (!smu->ppt_funcs->dpm_set_vpe_enable)
 291		return 0;
 292
 293	if (atomic_read(&power_gate->vpe_gated) ^ enable)
 294		return 0;
 295
 296	ret = smu->ppt_funcs->dpm_set_vpe_enable(smu, enable);
 297	if (!ret)
 298		atomic_set(&power_gate->vpe_gated, !enable);
 299
 300	return ret;
 301}
 302
 303static int smu_dpm_set_umsch_mm_enable(struct smu_context *smu,
 304				   bool enable)
 305{
 306	struct smu_power_context *smu_power = &smu->smu_power;
 307	struct smu_power_gate *power_gate = &smu_power->power_gate;
 308	int ret = 0;
 309
 310	if (!smu->adev->enable_umsch_mm)
 311		return 0;
 312
 313	if (!smu->ppt_funcs->dpm_set_umsch_mm_enable)
 314		return 0;
 315
 316	if (atomic_read(&power_gate->umsch_mm_gated) ^ enable)
 317		return 0;
 318
 319	ret = smu->ppt_funcs->dpm_set_umsch_mm_enable(smu, enable);
 320	if (!ret)
 321		atomic_set(&power_gate->umsch_mm_gated, !enable);
 322
 323	return ret;
 324}
 325
 326/**
 327 * smu_dpm_set_power_gate - power gate/ungate the specific IP block
 328 *
 329 * @handle:        smu_context pointer
 330 * @block_type: the IP block to power gate/ungate
 331 * @gate:       to power gate if true, ungate otherwise
 332 *
 333 * This API uses no smu->mutex lock protection due to:
 334 * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce).
 335 *    This is guarded to be race condition free by the caller.
 336 * 2. Or get called on user setting request of power_dpm_force_performance_level.
 337 *    Under this case, the smu->mutex lock protection is already enforced on
 338 *    the parent API smu_force_performance_level of the call path.
 339 */
 340static int smu_dpm_set_power_gate(void *handle,
 341				  uint32_t block_type,
 342				  bool gate)
 343{
 344	struct smu_context *smu = handle;
 345	int ret = 0;
 346
 347	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) {
 348		dev_WARN(smu->adev->dev,
 349			 "SMU uninitialized but power %s requested for %u!\n",
 350			 gate ? "gate" : "ungate", block_type);
 351		return -EOPNOTSUPP;
 352	}
 353
 354	switch (block_type) {
 355	/*
 356	 * Some legacy code of amdgpu_vcn.c and vcn_v2*.c still uses
 357	 * AMD_IP_BLOCK_TYPE_UVD for VCN. So, here both of them are kept.
 358	 */
 359	case AMD_IP_BLOCK_TYPE_UVD:
 360	case AMD_IP_BLOCK_TYPE_VCN:
 361		ret = smu_dpm_set_vcn_enable(smu, !gate);
 362		if (ret)
 363			dev_err(smu->adev->dev, "Failed to power %s VCN!\n",
 364				gate ? "gate" : "ungate");
 365		break;
 366	case AMD_IP_BLOCK_TYPE_GFX:
 367		ret = smu_gfx_off_control(smu, gate);
 368		if (ret)
 369			dev_err(smu->adev->dev, "Failed to %s gfxoff!\n",
 370				gate ? "enable" : "disable");
 371		break;
 372	case AMD_IP_BLOCK_TYPE_SDMA:
 373		ret = smu_powergate_sdma(smu, gate);
 374		if (ret)
 375			dev_err(smu->adev->dev, "Failed to power %s SDMA!\n",
 376				gate ? "gate" : "ungate");
 377		break;
 378	case AMD_IP_BLOCK_TYPE_JPEG:
 379		ret = smu_dpm_set_jpeg_enable(smu, !gate);
 380		if (ret)
 381			dev_err(smu->adev->dev, "Failed to power %s JPEG!\n",
 382				gate ? "gate" : "ungate");
 383		break;
 384	case AMD_IP_BLOCK_TYPE_VPE:
 385		ret = smu_dpm_set_vpe_enable(smu, !gate);
 386		if (ret)
 387			dev_err(smu->adev->dev, "Failed to power %s VPE!\n",
 388				gate ? "gate" : "ungate");
 389		break;
 390	default:
 391		dev_err(smu->adev->dev, "Unsupported block type!\n");
 392		return -EINVAL;
 393	}
 394
 395	return ret;
 396}
 397
 398/**
 399 * smu_set_user_clk_dependencies - set user profile clock dependencies
 400 *
 401 * @smu:	smu_context pointer
 402 * @clk:	enum smu_clk_type type
 403 *
 404 * Enable/Disable the clock dependency for the @clk type.
 405 */
 406static void smu_set_user_clk_dependencies(struct smu_context *smu, enum smu_clk_type clk)
 407{
 408	if (smu->adev->in_suspend)
 409		return;
 410
 411	if (clk == SMU_MCLK) {
 412		smu->user_dpm_profile.clk_dependency = 0;
 413		smu->user_dpm_profile.clk_dependency = BIT(SMU_FCLK) | BIT(SMU_SOCCLK);
 414	} else if (clk == SMU_FCLK) {
 415		/* MCLK takes precedence over FCLK */
 416		if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK)))
 417			return;
 418
 419		smu->user_dpm_profile.clk_dependency = 0;
 420		smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_SOCCLK);
 421	} else if (clk == SMU_SOCCLK) {
 422		/* MCLK takes precedence over SOCCLK */
 423		if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK)))
 424			return;
 425
 426		smu->user_dpm_profile.clk_dependency = 0;
 427		smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_FCLK);
 428	} else
 429		/* Add clk dependencies here, if any */
 430		return;
 431}
 432
 433/**
 434 * smu_restore_dpm_user_profile - reinstate user dpm profile
 435 *
 436 * @smu:	smu_context pointer
 437 *
 438 * Restore the saved user power configurations include power limit,
 439 * clock frequencies, fan control mode and fan speed.
 440 */
 441static void smu_restore_dpm_user_profile(struct smu_context *smu)
 442{
 443	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
 444	int ret = 0;
 445
 446	if (!smu->adev->in_suspend)
 447		return;
 448
 449	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
 450		return;
 451
 452	/* Enable restore flag */
 453	smu->user_dpm_profile.flags |= SMU_DPM_USER_PROFILE_RESTORE;
 454
 455	/* set the user dpm power limit */
 456	if (smu->user_dpm_profile.power_limit) {
 457		ret = smu_set_power_limit(smu, smu->user_dpm_profile.power_limit);
 458		if (ret)
 459			dev_err(smu->adev->dev, "Failed to set power limit value\n");
 460	}
 461
 462	/* set the user dpm clock configurations */
 463	if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
 464		enum smu_clk_type clk_type;
 465
 466		for (clk_type = 0; clk_type < SMU_CLK_COUNT; clk_type++) {
 467			/*
 468			 * Iterate over smu clk type and force the saved user clk
 469			 * configs, skip if clock dependency is enabled
 470			 */
 471			if (!(smu->user_dpm_profile.clk_dependency & BIT(clk_type)) &&
 472					smu->user_dpm_profile.clk_mask[clk_type]) {
 473				ret = smu_force_smuclk_levels(smu, clk_type,
 474						smu->user_dpm_profile.clk_mask[clk_type]);
 475				if (ret)
 476					dev_err(smu->adev->dev,
 477						"Failed to set clock type = %d\n", clk_type);
 478			}
 479		}
 480	}
 481
 482	/* set the user dpm fan configurations */
 483	if (smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_MANUAL ||
 484	    smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_NONE) {
 485		ret = smu_set_fan_control_mode(smu, smu->user_dpm_profile.fan_mode);
 486		if (ret != -EOPNOTSUPP) {
 487			smu->user_dpm_profile.fan_speed_pwm = 0;
 488			smu->user_dpm_profile.fan_speed_rpm = 0;
 489			smu->user_dpm_profile.fan_mode = AMD_FAN_CTRL_AUTO;
 490			dev_err(smu->adev->dev, "Failed to set manual fan control mode\n");
 491		}
 492
 493		if (smu->user_dpm_profile.fan_speed_pwm) {
 494			ret = smu_set_fan_speed_pwm(smu, smu->user_dpm_profile.fan_speed_pwm);
 495			if (ret != -EOPNOTSUPP)
 496				dev_err(smu->adev->dev, "Failed to set manual fan speed in pwm\n");
 497		}
 498
 499		if (smu->user_dpm_profile.fan_speed_rpm) {
 500			ret = smu_set_fan_speed_rpm(smu, smu->user_dpm_profile.fan_speed_rpm);
 501			if (ret != -EOPNOTSUPP)
 502				dev_err(smu->adev->dev, "Failed to set manual fan speed in rpm\n");
 503		}
 504	}
 505
 506	/* Restore user customized OD settings */
 507	if (smu->user_dpm_profile.user_od) {
 508		if (smu->ppt_funcs->restore_user_od_settings) {
 509			ret = smu->ppt_funcs->restore_user_od_settings(smu);
 510			if (ret)
 511				dev_err(smu->adev->dev, "Failed to upload customized OD settings\n");
 512		}
 513	}
 514
 515	/* Disable restore flag */
 516	smu->user_dpm_profile.flags &= ~SMU_DPM_USER_PROFILE_RESTORE;
 517}
 518
 519static int smu_get_power_num_states(void *handle,
 520				    struct pp_states_info *state_info)
 521{
 522	if (!state_info)
 523		return -EINVAL;
 524
 525	/* not support power state */
 526	memset(state_info, 0, sizeof(struct pp_states_info));
 527	state_info->nums = 1;
 528	state_info->states[0] = POWER_STATE_TYPE_DEFAULT;
 529
 530	return 0;
 531}
 532
 533bool is_support_sw_smu(struct amdgpu_device *adev)
 534{
 535	/* vega20 is 11.0.2, but it's supported via the powerplay code */
 536	if (adev->asic_type == CHIP_VEGA20)
 537		return false;
 538
 539	if (amdgpu_ip_version(adev, MP1_HWIP, 0) >= IP_VERSION(11, 0, 0))
 540		return true;
 541
 542	return false;
 543}
 544
 545bool is_support_cclk_dpm(struct amdgpu_device *adev)
 546{
 547	struct smu_context *smu = adev->powerplay.pp_handle;
 548
 549	if (!smu_feature_is_enabled(smu, SMU_FEATURE_CCLK_DPM_BIT))
 550		return false;
 551
 552	return true;
 553}
 554
 555
 556static int smu_sys_get_pp_table(void *handle,
 557				char **table)
 558{
 559	struct smu_context *smu = handle;
 560	struct smu_table_context *smu_table = &smu->smu_table;
 561
 562	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
 563		return -EOPNOTSUPP;
 564
 565	if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
 566		return -EINVAL;
 567
 568	if (smu_table->hardcode_pptable)
 569		*table = smu_table->hardcode_pptable;
 570	else
 571		*table = smu_table->power_play_table;
 572
 573	return smu_table->power_play_table_size;
 574}
 575
 576static int smu_sys_set_pp_table(void *handle,
 577				const char *buf,
 578				size_t size)
 579{
 580	struct smu_context *smu = handle;
 581	struct smu_table_context *smu_table = &smu->smu_table;
 582	ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
 583	int ret = 0;
 584
 585	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
 586		return -EOPNOTSUPP;
 587
 588	if (header->usStructureSize != size) {
 589		dev_err(smu->adev->dev, "pp table size not matched !\n");
 590		return -EIO;
 591	}
 592
 593	if (!smu_table->hardcode_pptable) {
 594		smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
 595		if (!smu_table->hardcode_pptable)
 596			return -ENOMEM;
 597	}
 598
 599	memcpy(smu_table->hardcode_pptable, buf, size);
 600	smu_table->power_play_table = smu_table->hardcode_pptable;
 601	smu_table->power_play_table_size = size;
 602
 603	/*
 604	 * Special hw_fini action(for Navi1x, the DPMs disablement will be
 605	 * skipped) may be needed for custom pptable uploading.
 606	 */
 607	smu->uploading_custom_pp_table = true;
 608
 609	ret = smu_reset(smu);
 610	if (ret)
 611		dev_info(smu->adev->dev, "smu reset failed, ret = %d\n", ret);
 612
 613	smu->uploading_custom_pp_table = false;
 614
 615	return ret;
 616}
 617
 618static int smu_get_driver_allowed_feature_mask(struct smu_context *smu)
 619{
 620	struct smu_feature *feature = &smu->smu_feature;
 621	uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
 622	int ret = 0;
 623
 624	/*
 625	 * With SCPM enabled, the allowed featuremasks setting(via
 626	 * PPSMC_MSG_SetAllowedFeaturesMaskLow/High) is not permitted.
 627	 * That means there is no way to let PMFW knows the settings below.
 628	 * Thus, we just assume all the features are allowed under
 629	 * such scenario.
 630	 */
 631	if (smu->adev->scpm_enabled) {
 632		bitmap_fill(feature->allowed, SMU_FEATURE_MAX);
 633		return 0;
 634	}
 635
 636	bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
 637
 638	ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
 639					     SMU_FEATURE_MAX/32);
 640	if (ret)
 641		return ret;
 642
 643	bitmap_or(feature->allowed, feature->allowed,
 644		      (unsigned long *)allowed_feature_mask,
 645		      feature->feature_num);
 646
 647	return ret;
 648}
 649
 650static int smu_set_funcs(struct amdgpu_device *adev)
 651{
 652	struct smu_context *smu = adev->powerplay.pp_handle;
 653
 654	if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
 655		smu->od_enabled = true;
 656
 657	switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
 658	case IP_VERSION(11, 0, 0):
 659	case IP_VERSION(11, 0, 5):
 660	case IP_VERSION(11, 0, 9):
 661		navi10_set_ppt_funcs(smu);
 662		break;
 663	case IP_VERSION(11, 0, 7):
 664	case IP_VERSION(11, 0, 11):
 665	case IP_VERSION(11, 0, 12):
 666	case IP_VERSION(11, 0, 13):
 667		sienna_cichlid_set_ppt_funcs(smu);
 668		break;
 669	case IP_VERSION(12, 0, 0):
 670	case IP_VERSION(12, 0, 1):
 671		renoir_set_ppt_funcs(smu);
 672		break;
 673	case IP_VERSION(11, 5, 0):
 674		vangogh_set_ppt_funcs(smu);
 675		break;
 676	case IP_VERSION(13, 0, 1):
 677	case IP_VERSION(13, 0, 3):
 678	case IP_VERSION(13, 0, 8):
 679		yellow_carp_set_ppt_funcs(smu);
 680		break;
 681	case IP_VERSION(13, 0, 4):
 682	case IP_VERSION(13, 0, 11):
 683		smu_v13_0_4_set_ppt_funcs(smu);
 684		break;
 685	case IP_VERSION(13, 0, 5):
 686		smu_v13_0_5_set_ppt_funcs(smu);
 687		break;
 688	case IP_VERSION(11, 0, 8):
 689		cyan_skillfish_set_ppt_funcs(smu);
 690		break;
 691	case IP_VERSION(11, 0, 2):
 692		adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
 693		arcturus_set_ppt_funcs(smu);
 694		/* OD is not supported on Arcturus */
 695		smu->od_enabled = false;
 696		break;
 697	case IP_VERSION(13, 0, 2):
 698		aldebaran_set_ppt_funcs(smu);
 699		/* Enable pp_od_clk_voltage node */
 700		smu->od_enabled = true;
 701		break;
 702	case IP_VERSION(13, 0, 0):
 703	case IP_VERSION(13, 0, 10):
 704		smu_v13_0_0_set_ppt_funcs(smu);
 705		break;
 706	case IP_VERSION(13, 0, 6):
 707		smu_v13_0_6_set_ppt_funcs(smu);
 708		/* Enable pp_od_clk_voltage node */
 709		smu->od_enabled = true;
 710		break;
 711	case IP_VERSION(13, 0, 7):
 712		smu_v13_0_7_set_ppt_funcs(smu);
 713		break;
 714	case IP_VERSION(14, 0, 0):
 715	case IP_VERSION(14, 0, 1):
 716		smu_v14_0_0_set_ppt_funcs(smu);
 717		break;
 718	default:
 719		return -EINVAL;
 720	}
 721
 722	return 0;
 723}
 724
 725static int smu_early_init(void *handle)
 726{
 727	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 728	struct smu_context *smu;
 729	int r;
 730
 731	smu = kzalloc(sizeof(struct smu_context), GFP_KERNEL);
 732	if (!smu)
 733		return -ENOMEM;
 734
 735	smu->adev = adev;
 736	smu->pm_enabled = !!amdgpu_dpm;
 737	smu->is_apu = false;
 738	smu->smu_baco.state = SMU_BACO_STATE_NONE;
 739	smu->smu_baco.platform_support = false;
 740	smu->user_dpm_profile.fan_mode = -1;
 741
 742	mutex_init(&smu->message_lock);
 743
 744	adev->powerplay.pp_handle = smu;
 745	adev->powerplay.pp_funcs = &swsmu_pm_funcs;
 746
 747	r = smu_set_funcs(adev);
 748	if (r)
 749		return r;
 750	return smu_init_microcode(smu);
 751}
 752
 753static int smu_set_default_dpm_table(struct smu_context *smu)
 754{
 755	struct amdgpu_device *adev = smu->adev;
 756	struct smu_power_context *smu_power = &smu->smu_power;
 757	struct smu_power_gate *power_gate = &smu_power->power_gate;
 758	int vcn_gate, jpeg_gate;
 759	int ret = 0;
 760
 761	if (!smu->ppt_funcs->set_default_dpm_table)
 762		return 0;
 763
 764	if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
 765		vcn_gate = atomic_read(&power_gate->vcn_gated);
 766	if (adev->pg_flags & AMD_PG_SUPPORT_JPEG)
 767		jpeg_gate = atomic_read(&power_gate->jpeg_gated);
 768
 769	if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
 770		ret = smu_dpm_set_vcn_enable(smu, true);
 771		if (ret)
 772			return ret;
 773	}
 774
 775	if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) {
 776		ret = smu_dpm_set_jpeg_enable(smu, true);
 777		if (ret)
 778			goto err_out;
 779	}
 780
 781	ret = smu->ppt_funcs->set_default_dpm_table(smu);
 782	if (ret)
 783		dev_err(smu->adev->dev,
 784			"Failed to setup default dpm clock tables!\n");
 785
 786	if (adev->pg_flags & AMD_PG_SUPPORT_JPEG)
 787		smu_dpm_set_jpeg_enable(smu, !jpeg_gate);
 788err_out:
 789	if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
 790		smu_dpm_set_vcn_enable(smu, !vcn_gate);
 791
 792	return ret;
 793}
 794
 795static int smu_apply_default_config_table_settings(struct smu_context *smu)
 796{
 797	struct amdgpu_device *adev = smu->adev;
 798	int ret = 0;
 799
 800	ret = smu_get_default_config_table_settings(smu,
 801						    &adev->pm.config_table);
 802	if (ret)
 803		return ret;
 804
 805	return smu_set_config_table(smu, &adev->pm.config_table);
 806}
 807
 808static int smu_late_init(void *handle)
 809{
 810	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 811	struct smu_context *smu = adev->powerplay.pp_handle;
 812	int ret = 0;
 813
 814	smu_set_fine_grain_gfx_freq_parameters(smu);
 815
 816	if (!smu->pm_enabled)
 817		return 0;
 818
 819	ret = smu_post_init(smu);
 820	if (ret) {
 821		dev_err(adev->dev, "Failed to post smu init!\n");
 822		return ret;
 823	}
 824
 825	/*
 826	 * Explicitly notify PMFW the power mode the system in. Since
 827	 * the PMFW may boot the ASIC with a different mode.
 828	 * For those supporting ACDC switch via gpio, PMFW will
 829	 * handle the switch automatically. Driver involvement
 830	 * is unnecessary.
 831	 */
 832	adev->pm.ac_power = power_supply_is_system_supplied() > 0;
 833	smu_set_ac_dc(smu);
 834
 835	if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 1)) ||
 836	    (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 3)))
 837		return 0;
 838
 839	if (!amdgpu_sriov_vf(adev) || smu->od_enabled) {
 840		ret = smu_set_default_od_settings(smu);
 841		if (ret) {
 842			dev_err(adev->dev, "Failed to setup default OD settings!\n");
 843			return ret;
 844		}
 845	}
 846
 847	ret = smu_populate_umd_state_clk(smu);
 848	if (ret) {
 849		dev_err(adev->dev, "Failed to populate UMD state clocks!\n");
 850		return ret;
 851	}
 852
 853	ret = smu_get_asic_power_limits(smu,
 854					&smu->current_power_limit,
 855					&smu->default_power_limit,
 856					&smu->max_power_limit,
 857					&smu->min_power_limit);
 858	if (ret) {
 859		dev_err(adev->dev, "Failed to get asic power limits!\n");
 860		return ret;
 861	}
 862
 863	if (!amdgpu_sriov_vf(adev))
 864		smu_get_unique_id(smu);
 865
 866	smu_get_fan_parameters(smu);
 867
 868	smu_handle_task(smu,
 869			smu->smu_dpm.dpm_level,
 870			AMD_PP_TASK_COMPLETE_INIT);
 871
 872	ret = smu_apply_default_config_table_settings(smu);
 873	if (ret && (ret != -EOPNOTSUPP)) {
 874		dev_err(adev->dev, "Failed to apply default DriverSmuConfig settings!\n");
 875		return ret;
 876	}
 877
 878	smu_restore_dpm_user_profile(smu);
 879
 880	return 0;
 881}
 882
 883static int smu_init_fb_allocations(struct smu_context *smu)
 884{
 885	struct amdgpu_device *adev = smu->adev;
 886	struct smu_table_context *smu_table = &smu->smu_table;
 887	struct smu_table *tables = smu_table->tables;
 888	struct smu_table *driver_table = &(smu_table->driver_table);
 889	uint32_t max_table_size = 0;
 890	int ret, i;
 891
 892	/* VRAM allocation for tool table */
 893	if (tables[SMU_TABLE_PMSTATUSLOG].size) {
 894		ret = amdgpu_bo_create_kernel(adev,
 895					      tables[SMU_TABLE_PMSTATUSLOG].size,
 896					      tables[SMU_TABLE_PMSTATUSLOG].align,
 897					      tables[SMU_TABLE_PMSTATUSLOG].domain,
 898					      &tables[SMU_TABLE_PMSTATUSLOG].bo,
 899					      &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
 900					      &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
 901		if (ret) {
 902			dev_err(adev->dev, "VRAM allocation for tool table failed!\n");
 903			return ret;
 904		}
 905	}
 906
 907	driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT;
 908	/* VRAM allocation for driver table */
 909	for (i = 0; i < SMU_TABLE_COUNT; i++) {
 910		if (tables[i].size == 0)
 911			continue;
 912
 913		/* If one of the tables has VRAM domain restriction, keep it in
 914		 * VRAM
 915		 */
 916		if ((tables[i].domain &
 917		    (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) ==
 918			    AMDGPU_GEM_DOMAIN_VRAM)
 919			driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
 920
 921		if (i == SMU_TABLE_PMSTATUSLOG)
 922			continue;
 923
 924		if (max_table_size < tables[i].size)
 925			max_table_size = tables[i].size;
 926	}
 927
 928	driver_table->size = max_table_size;
 929	driver_table->align = PAGE_SIZE;
 930
 931	ret = amdgpu_bo_create_kernel(adev,
 932				      driver_table->size,
 933				      driver_table->align,
 934				      driver_table->domain,
 935				      &driver_table->bo,
 936				      &driver_table->mc_address,
 937				      &driver_table->cpu_addr);
 938	if (ret) {
 939		dev_err(adev->dev, "VRAM allocation for driver table failed!\n");
 940		if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
 941			amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
 942					      &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
 943					      &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
 944	}
 945
 946	return ret;
 947}
 948
 949static int smu_fini_fb_allocations(struct smu_context *smu)
 950{
 951	struct smu_table_context *smu_table = &smu->smu_table;
 952	struct smu_table *tables = smu_table->tables;
 953	struct smu_table *driver_table = &(smu_table->driver_table);
 954
 955	if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
 956		amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
 957				      &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
 958				      &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
 959
 960	amdgpu_bo_free_kernel(&driver_table->bo,
 961			      &driver_table->mc_address,
 962			      &driver_table->cpu_addr);
 963
 964	return 0;
 965}
 966
 967/**
 968 * smu_alloc_memory_pool - allocate memory pool in the system memory
 969 *
 970 * @smu: amdgpu_device pointer
 971 *
 972 * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
 973 * and DramLogSetDramAddr can notify it changed.
 974 *
 975 * Returns 0 on success, error on failure.
 976 */
 977static int smu_alloc_memory_pool(struct smu_context *smu)
 978{
 979	struct amdgpu_device *adev = smu->adev;
 980	struct smu_table_context *smu_table = &smu->smu_table;
 981	struct smu_table *memory_pool = &smu_table->memory_pool;
 982	uint64_t pool_size = smu->pool_size;
 983	int ret = 0;
 984
 985	if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
 986		return ret;
 987
 988	memory_pool->size = pool_size;
 989	memory_pool->align = PAGE_SIZE;
 990	memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT;
 991
 992	switch (pool_size) {
 993	case SMU_MEMORY_POOL_SIZE_256_MB:
 994	case SMU_MEMORY_POOL_SIZE_512_MB:
 995	case SMU_MEMORY_POOL_SIZE_1_GB:
 996	case SMU_MEMORY_POOL_SIZE_2_GB:
 997		ret = amdgpu_bo_create_kernel(adev,
 998					      memory_pool->size,
 999					      memory_pool->align,
1000					      memory_pool->domain,
1001					      &memory_pool->bo,
1002					      &memory_pool->mc_address,
1003					      &memory_pool->cpu_addr);
1004		if (ret)
1005			dev_err(adev->dev, "VRAM allocation for dramlog failed!\n");
1006		break;
1007	default:
1008		break;
1009	}
1010
1011	return ret;
1012}
1013
1014static int smu_free_memory_pool(struct smu_context *smu)
1015{
1016	struct smu_table_context *smu_table = &smu->smu_table;
1017	struct smu_table *memory_pool = &smu_table->memory_pool;
1018
1019	if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
1020		return 0;
1021
1022	amdgpu_bo_free_kernel(&memory_pool->bo,
1023			      &memory_pool->mc_address,
1024			      &memory_pool->cpu_addr);
1025
1026	memset(memory_pool, 0, sizeof(struct smu_table));
1027
1028	return 0;
1029}
1030
1031static int smu_alloc_dummy_read_table(struct smu_context *smu)
1032{
1033	struct smu_table_context *smu_table = &smu->smu_table;
1034	struct smu_table *dummy_read_1_table =
1035			&smu_table->dummy_read_1_table;
1036	struct amdgpu_device *adev = smu->adev;
1037	int ret = 0;
1038
1039	if (!dummy_read_1_table->size)
1040		return 0;
1041
1042	ret = amdgpu_bo_create_kernel(adev,
1043				      dummy_read_1_table->size,
1044				      dummy_read_1_table->align,
1045				      dummy_read_1_table->domain,
1046				      &dummy_read_1_table->bo,
1047				      &dummy_read_1_table->mc_address,
1048				      &dummy_read_1_table->cpu_addr);
1049	if (ret)
1050		dev_err(adev->dev, "VRAM allocation for dummy read table failed!\n");
1051
1052	return ret;
1053}
1054
1055static void smu_free_dummy_read_table(struct smu_context *smu)
1056{
1057	struct smu_table_context *smu_table = &smu->smu_table;
1058	struct smu_table *dummy_read_1_table =
1059			&smu_table->dummy_read_1_table;
1060
1061
1062	amdgpu_bo_free_kernel(&dummy_read_1_table->bo,
1063			      &dummy_read_1_table->mc_address,
1064			      &dummy_read_1_table->cpu_addr);
1065
1066	memset(dummy_read_1_table, 0, sizeof(struct smu_table));
1067}
1068
1069static int smu_smc_table_sw_init(struct smu_context *smu)
1070{
1071	int ret;
1072
1073	/**
1074	 * Create smu_table structure, and init smc tables such as
1075	 * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
1076	 */
1077	ret = smu_init_smc_tables(smu);
1078	if (ret) {
1079		dev_err(smu->adev->dev, "Failed to init smc tables!\n");
1080		return ret;
1081	}
1082
1083	/**
1084	 * Create smu_power_context structure, and allocate smu_dpm_context and
1085	 * context size to fill the smu_power_context data.
1086	 */
1087	ret = smu_init_power(smu);
1088	if (ret) {
1089		dev_err(smu->adev->dev, "Failed to init smu_init_power!\n");
1090		return ret;
1091	}
1092
1093	/*
1094	 * allocate vram bos to store smc table contents.
1095	 */
1096	ret = smu_init_fb_allocations(smu);
1097	if (ret)
1098		return ret;
1099
1100	ret = smu_alloc_memory_pool(smu);
1101	if (ret)
1102		return ret;
1103
1104	ret = smu_alloc_dummy_read_table(smu);
1105	if (ret)
1106		return ret;
1107
1108	ret = smu_i2c_init(smu);
1109	if (ret)
1110		return ret;
1111
1112	return 0;
1113}
1114
1115static int smu_smc_table_sw_fini(struct smu_context *smu)
1116{
1117	int ret;
1118
1119	smu_i2c_fini(smu);
1120
1121	smu_free_dummy_read_table(smu);
1122
1123	ret = smu_free_memory_pool(smu);
1124	if (ret)
1125		return ret;
1126
1127	ret = smu_fini_fb_allocations(smu);
1128	if (ret)
1129		return ret;
1130
1131	ret = smu_fini_power(smu);
1132	if (ret) {
1133		dev_err(smu->adev->dev, "Failed to init smu_fini_power!\n");
1134		return ret;
1135	}
1136
1137	ret = smu_fini_smc_tables(smu);
1138	if (ret) {
1139		dev_err(smu->adev->dev, "Failed to smu_fini_smc_tables!\n");
1140		return ret;
1141	}
1142
1143	return 0;
1144}
1145
1146static void smu_throttling_logging_work_fn(struct work_struct *work)
1147{
1148	struct smu_context *smu = container_of(work, struct smu_context,
1149					       throttling_logging_work);
1150
1151	smu_log_thermal_throttling(smu);
1152}
1153
1154static void smu_interrupt_work_fn(struct work_struct *work)
1155{
1156	struct smu_context *smu = container_of(work, struct smu_context,
1157					       interrupt_work);
1158
1159	if (smu->ppt_funcs && smu->ppt_funcs->interrupt_work)
1160		smu->ppt_funcs->interrupt_work(smu);
1161}
1162
1163static void smu_swctf_delayed_work_handler(struct work_struct *work)
1164{
1165	struct smu_context *smu =
1166		container_of(work, struct smu_context, swctf_delayed_work.work);
1167	struct smu_temperature_range *range =
1168				&smu->thermal_range;
1169	struct amdgpu_device *adev = smu->adev;
1170	uint32_t hotspot_tmp, size;
1171
1172	/*
1173	 * If the hotspot temperature is confirmed as below SW CTF setting point
1174	 * after the delay enforced, nothing will be done.
1175	 * Otherwise, a graceful shutdown will be performed to prevent further damage.
1176	 */
1177	if (range->software_shutdown_temp &&
1178	    smu->ppt_funcs->read_sensor &&
1179	    !smu->ppt_funcs->read_sensor(smu,
1180					 AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
1181					 &hotspot_tmp,
1182					 &size) &&
1183	    hotspot_tmp / 1000 < range->software_shutdown_temp)
1184		return;
1185
1186	dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
1187	dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
1188	orderly_poweroff(true);
1189}
1190
1191static void smu_init_xgmi_plpd_mode(struct smu_context *smu)
1192{
1193	if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 2)) {
1194		smu->plpd_mode = XGMI_PLPD_DEFAULT;
1195		return;
1196	}
1197
1198	/* PMFW put PLPD into default policy after enabling the feature */
1199	if (smu_feature_is_enabled(smu,
1200				   SMU_FEATURE_XGMI_PER_LINK_PWR_DWN_BIT))
1201		smu->plpd_mode = XGMI_PLPD_DEFAULT;
1202	else
1203		smu->plpd_mode = XGMI_PLPD_NONE;
1204}
1205
1206static int smu_sw_init(void *handle)
1207{
1208	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1209	struct smu_context *smu = adev->powerplay.pp_handle;
1210	int ret;
1211
1212	smu->pool_size = adev->pm.smu_prv_buffer_size;
1213	smu->smu_feature.feature_num = SMU_FEATURE_MAX;
1214	bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
1215	bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
1216
1217	INIT_WORK(&smu->throttling_logging_work, smu_throttling_logging_work_fn);
1218	INIT_WORK(&smu->interrupt_work, smu_interrupt_work_fn);
1219	atomic64_set(&smu->throttle_int_counter, 0);
1220	smu->watermarks_bitmap = 0;
1221	smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
1222	smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
1223
1224	atomic_set(&smu->smu_power.power_gate.vcn_gated, 1);
1225	atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1);
1226	atomic_set(&smu->smu_power.power_gate.vpe_gated, 1);
1227	atomic_set(&smu->smu_power.power_gate.umsch_mm_gated, 1);
1228
1229	smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
1230	smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
1231	smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
1232	smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
1233	smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
1234	smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
1235	smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
1236	smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
1237
1238	smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
1239	smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
1240	smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
1241	smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
1242	smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
1243	smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
1244	smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
1245	smu->display_config = &adev->pm.pm_display_cfg;
1246
1247	smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
1248	smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
1249
1250	INIT_DELAYED_WORK(&smu->swctf_delayed_work,
1251			  smu_swctf_delayed_work_handler);
1252
1253	ret = smu_smc_table_sw_init(smu);
1254	if (ret) {
1255		dev_err(adev->dev, "Failed to sw init smc table!\n");
1256		return ret;
1257	}
1258
1259	/* get boot_values from vbios to set revision, gfxclk, and etc. */
1260	ret = smu_get_vbios_bootup_values(smu);
1261	if (ret) {
1262		dev_err(adev->dev, "Failed to get VBIOS boot clock values!\n");
1263		return ret;
1264	}
1265
1266	ret = smu_init_pptable_microcode(smu);
1267	if (ret) {
1268		dev_err(adev->dev, "Failed to setup pptable firmware!\n");
1269		return ret;
1270	}
1271
1272	ret = smu_register_irq_handler(smu);
1273	if (ret) {
1274		dev_err(adev->dev, "Failed to register smc irq handler!\n");
1275		return ret;
1276	}
1277
1278	/* If there is no way to query fan control mode, fan control is not supported */
1279	if (!smu->ppt_funcs->get_fan_control_mode)
1280		smu->adev->pm.no_fan = true;
1281
1282	return 0;
1283}
1284
1285static int smu_sw_fini(void *handle)
1286{
1287	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1288	struct smu_context *smu = adev->powerplay.pp_handle;
1289	int ret;
1290
1291	ret = smu_smc_table_sw_fini(smu);
1292	if (ret) {
1293		dev_err(adev->dev, "Failed to sw fini smc table!\n");
1294		return ret;
1295	}
1296
1297	smu_fini_microcode(smu);
1298
1299	return 0;
1300}
1301
1302static int smu_get_thermal_temperature_range(struct smu_context *smu)
1303{
1304	struct amdgpu_device *adev = smu->adev;
1305	struct smu_temperature_range *range =
1306				&smu->thermal_range;
1307	int ret = 0;
1308
1309	if (!smu->ppt_funcs->get_thermal_temperature_range)
1310		return 0;
1311
1312	ret = smu->ppt_funcs->get_thermal_temperature_range(smu, range);
1313	if (ret)
1314		return ret;
1315
1316	adev->pm.dpm.thermal.min_temp = range->min;
1317	adev->pm.dpm.thermal.max_temp = range->max;
1318	adev->pm.dpm.thermal.max_edge_emergency_temp = range->edge_emergency_max;
1319	adev->pm.dpm.thermal.min_hotspot_temp = range->hotspot_min;
1320	adev->pm.dpm.thermal.max_hotspot_crit_temp = range->hotspot_crit_max;
1321	adev->pm.dpm.thermal.max_hotspot_emergency_temp = range->hotspot_emergency_max;
1322	adev->pm.dpm.thermal.min_mem_temp = range->mem_min;
1323	adev->pm.dpm.thermal.max_mem_crit_temp = range->mem_crit_max;
1324	adev->pm.dpm.thermal.max_mem_emergency_temp = range->mem_emergency_max;
1325
1326	return ret;
1327}
1328
1329/**
1330 * smu_wbrf_handle_exclusion_ranges - consume the wbrf exclusion ranges
1331 *
1332 * @smu: smu_context pointer
1333 *
1334 * Retrieve the wbrf exclusion ranges and send them to PMFW for proper handling.
1335 * Returns 0 on success, error on failure.
1336 */
1337static int smu_wbrf_handle_exclusion_ranges(struct smu_context *smu)
1338{
1339	struct wbrf_ranges_in_out wbrf_exclusion = {0};
1340	struct freq_band_range *wifi_bands = wbrf_exclusion.band_list;
1341	struct amdgpu_device *adev = smu->adev;
1342	uint32_t num_of_wbrf_ranges = MAX_NUM_OF_WBRF_RANGES;
1343	uint64_t start, end;
1344	int ret, i, j;
1345
1346	ret = amd_wbrf_retrieve_freq_band(adev->dev, &wbrf_exclusion);
1347	if (ret) {
1348		dev_err(adev->dev, "Failed to retrieve exclusion ranges!\n");
1349		return ret;
1350	}
1351
1352	/*
1353	 * The exclusion ranges array we got might be filled with holes and duplicate
1354	 * entries. For example:
1355	 * {(2400, 2500), (0, 0), (6882, 6962), (2400, 2500), (0, 0), (6117, 6189), (0, 0)...}
1356	 * We need to do some sortups to eliminate those holes and duplicate entries.
1357	 * Expected output: {(2400, 2500), (6117, 6189), (6882, 6962), (0, 0)...}
1358	 */
1359	for (i = 0; i < num_of_wbrf_ranges; i++) {
1360		start = wifi_bands[i].start;
1361		end = wifi_bands[i].end;
1362
1363		/* get the last valid entry to fill the intermediate hole */
1364		if (!start && !end) {
1365			for (j = num_of_wbrf_ranges - 1; j > i; j--)
1366				if (wifi_bands[j].start && wifi_bands[j].end)
1367					break;
1368
1369			/* no valid entry left */
1370			if (j <= i)
1371				break;
1372
1373			start = wifi_bands[i].start = wifi_bands[j].start;
1374			end = wifi_bands[i].end = wifi_bands[j].end;
1375			wifi_bands[j].start = 0;
1376			wifi_bands[j].end = 0;
1377			num_of_wbrf_ranges = j;
1378		}
1379
1380		/* eliminate duplicate entries */
1381		for (j = i + 1; j < num_of_wbrf_ranges; j++) {
1382			if ((wifi_bands[j].start == start) && (wifi_bands[j].end == end)) {
1383				wifi_bands[j].start = 0;
1384				wifi_bands[j].end = 0;
1385			}
1386		}
1387	}
1388
1389	/* Send the sorted wifi_bands to PMFW */
1390	ret = smu_set_wbrf_exclusion_ranges(smu, wifi_bands);
1391	/* Try to set the wifi_bands again */
1392	if (unlikely(ret == -EBUSY)) {
1393		mdelay(5);
1394		ret = smu_set_wbrf_exclusion_ranges(smu, wifi_bands);
1395	}
1396
1397	return ret;
1398}
1399
1400/**
1401 * smu_wbrf_event_handler - handle notify events
1402 *
1403 * @nb: notifier block
1404 * @action: event type
1405 * @_arg: event data
1406 *
1407 * Calls relevant amdgpu function in response to wbrf event
1408 * notification from kernel.
1409 */
1410static int smu_wbrf_event_handler(struct notifier_block *nb,
1411				  unsigned long action, void *_arg)
1412{
1413	struct smu_context *smu = container_of(nb, struct smu_context, wbrf_notifier);
1414
1415	switch (action) {
1416	case WBRF_CHANGED:
1417		schedule_delayed_work(&smu->wbrf_delayed_work,
1418				      msecs_to_jiffies(SMU_WBRF_EVENT_HANDLING_PACE));
1419		break;
1420	default:
1421		return NOTIFY_DONE;
1422	}
1423
1424	return NOTIFY_OK;
1425}
1426
1427/**
1428 * smu_wbrf_delayed_work_handler - callback on delayed work timer expired
1429 *
1430 * @work: struct work_struct pointer
1431 *
1432 * Flood is over and driver will consume the latest exclusion ranges.
1433 */
1434static void smu_wbrf_delayed_work_handler(struct work_struct *work)
1435{
1436	struct smu_context *smu = container_of(work, struct smu_context, wbrf_delayed_work.work);
1437
1438	smu_wbrf_handle_exclusion_ranges(smu);
1439}
1440
1441/**
1442 * smu_wbrf_support_check - check wbrf support
1443 *
1444 * @smu: smu_context pointer
1445 *
1446 * Verifies the ACPI interface whether wbrf is supported.
1447 */
1448static void smu_wbrf_support_check(struct smu_context *smu)
1449{
1450	struct amdgpu_device *adev = smu->adev;
1451
1452	smu->wbrf_supported = smu_is_asic_wbrf_supported(smu) && amdgpu_wbrf &&
1453							acpi_amd_wbrf_supported_consumer(adev->dev);
1454
1455	if (smu->wbrf_supported)
1456		dev_info(adev->dev, "RF interference mitigation is supported\n");
1457}
1458
1459/**
1460 * smu_wbrf_init - init driver wbrf support
1461 *
1462 * @smu: smu_context pointer
1463 *
1464 * Verifies the AMD ACPI interfaces and registers with the wbrf
1465 * notifier chain if wbrf feature is supported.
1466 * Returns 0 on success, error on failure.
1467 */
1468static int smu_wbrf_init(struct smu_context *smu)
1469{
1470	int ret;
1471
1472	if (!smu->wbrf_supported)
1473		return 0;
1474
1475	INIT_DELAYED_WORK(&smu->wbrf_delayed_work, smu_wbrf_delayed_work_handler);
1476
1477	smu->wbrf_notifier.notifier_call = smu_wbrf_event_handler;
1478	ret = amd_wbrf_register_notifier(&smu->wbrf_notifier);
1479	if (ret)
1480		return ret;
1481
1482	/*
1483	 * Some wifiband exclusion ranges may be already there
1484	 * before our driver loaded. To make sure our driver
1485	 * is awared of those exclusion ranges.
1486	 */
1487	schedule_delayed_work(&smu->wbrf_delayed_work,
1488			      msecs_to_jiffies(SMU_WBRF_EVENT_HANDLING_PACE));
1489
1490	return 0;
1491}
1492
1493/**
1494 * smu_wbrf_fini - tear down driver wbrf support
1495 *
1496 * @smu: smu_context pointer
1497 *
1498 * Unregisters with the wbrf notifier chain.
1499 */
1500static void smu_wbrf_fini(struct smu_context *smu)
1501{
1502	if (!smu->wbrf_supported)
1503		return;
1504
1505	amd_wbrf_unregister_notifier(&smu->wbrf_notifier);
1506
1507	cancel_delayed_work_sync(&smu->wbrf_delayed_work);
1508}
1509
1510static int smu_smc_hw_setup(struct smu_context *smu)
1511{
1512	struct smu_feature *feature = &smu->smu_feature;
1513	struct amdgpu_device *adev = smu->adev;
1514	uint8_t pcie_gen = 0, pcie_width = 0;
1515	uint64_t features_supported;
1516	int ret = 0;
1517
1518	switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
1519	case IP_VERSION(11, 0, 7):
1520	case IP_VERSION(11, 0, 11):
1521	case IP_VERSION(11, 5, 0):
1522	case IP_VERSION(11, 0, 12):
1523		if (adev->in_suspend && smu_is_dpm_running(smu)) {
1524			dev_info(adev->dev, "dpm has been enabled\n");
1525			ret = smu_system_features_control(smu, true);
1526			if (ret)
1527				dev_err(adev->dev, "Failed system features control!\n");
1528			return ret;
1529		}
1530		break;
1531	default:
1532		break;
1533	}
1534
1535	ret = smu_init_display_count(smu, 0);
1536	if (ret) {
1537		dev_info(adev->dev, "Failed to pre-set display count as 0!\n");
1538		return ret;
1539	}
1540
1541	ret = smu_set_driver_table_location(smu);
1542	if (ret) {
1543		dev_err(adev->dev, "Failed to SetDriverDramAddr!\n");
1544		return ret;
1545	}
1546
1547	/*
1548	 * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
1549	 */
1550	ret = smu_set_tool_table_location(smu);
1551	if (ret) {
1552		dev_err(adev->dev, "Failed to SetToolsDramAddr!\n");
1553		return ret;
1554	}
1555
1556	/*
1557	 * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
1558	 * pool location.
1559	 */
1560	ret = smu_notify_memory_pool_location(smu);
1561	if (ret) {
1562		dev_err(adev->dev, "Failed to SetDramLogDramAddr!\n");
1563		return ret;
1564	}
1565
1566	/*
1567	 * It is assumed the pptable used before runpm is same as
1568	 * the one used afterwards. Thus, we can reuse the stored
1569	 * copy and do not need to resetup the pptable again.
1570	 */
1571	if (!adev->in_runpm) {
1572		ret = smu_setup_pptable(smu);
1573		if (ret) {
1574			dev_err(adev->dev, "Failed to setup pptable!\n");
1575			return ret;
1576		}
1577	}
1578
1579	/* smu_dump_pptable(smu); */
1580
1581	/*
1582	 * With SCPM enabled, PSP is responsible for the PPTable transferring
1583	 * (to SMU). Driver involvement is not needed and permitted.
1584	 */
1585	if (!adev->scpm_enabled) {
1586		/*
1587		 * Copy pptable bo in the vram to smc with SMU MSGs such as
1588		 * SetDriverDramAddr and TransferTableDram2Smu.
1589		 */
1590		ret = smu_write_pptable(smu);
1591		if (ret) {
1592			dev_err(adev->dev, "Failed to transfer pptable to SMC!\n");
1593			return ret;
1594		}
1595	}
1596
1597	/* issue Run*Btc msg */
1598	ret = smu_run_btc(smu);
1599	if (ret)
1600		return ret;
1601
1602	/* Enable UclkShadow on wbrf supported */
1603	if (smu->wbrf_supported) {
1604		ret = smu_enable_uclk_shadow(smu, true);
1605		if (ret) {
1606			dev_err(adev->dev, "Failed to enable UclkShadow feature to support wbrf!\n");
1607			return ret;
1608		}
1609	}
1610
1611	/*
1612	 * With SCPM enabled, these actions(and relevant messages) are
1613	 * not needed and permitted.
1614	 */
1615	if (!adev->scpm_enabled) {
1616		ret = smu_feature_set_allowed_mask(smu);
1617		if (ret) {
1618			dev_err(adev->dev, "Failed to set driver allowed features mask!\n");
1619			return ret;
1620		}
1621	}
1622
1623	ret = smu_system_features_control(smu, true);
1624	if (ret) {
1625		dev_err(adev->dev, "Failed to enable requested dpm features!\n");
1626		return ret;
1627	}
1628
1629	smu_init_xgmi_plpd_mode(smu);
1630
1631	ret = smu_feature_get_enabled_mask(smu, &features_supported);
1632	if (ret) {
1633		dev_err(adev->dev, "Failed to retrieve supported dpm features!\n");
1634		return ret;
1635	}
1636	bitmap_copy(feature->supported,
1637		    (unsigned long *)&features_supported,
1638		    feature->feature_num);
1639
1640	if (!smu_is_dpm_running(smu))
1641		dev_info(adev->dev, "dpm has been disabled\n");
1642
1643	/*
1644	 * Set initialized values (get from vbios) to dpm tables context such as
1645	 * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
1646	 * type of clks.
1647	 */
1648	ret = smu_set_default_dpm_table(smu);
1649	if (ret) {
1650		dev_err(adev->dev, "Failed to setup default dpm clock tables!\n");
1651		return ret;
1652	}
1653
1654	if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
1655		pcie_gen = 3;
1656	else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
1657		pcie_gen = 2;
1658	else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
1659		pcie_gen = 1;
1660	else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
1661		pcie_gen = 0;
1662
1663	/* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
1664	 * Bit 15:8:  PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
1665	 * Bit 7:0:   PCIE lane width, 1 to 7 corresponds is x1 to x32
1666	 */
1667	if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
1668		pcie_width = 6;
1669	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
1670		pcie_width = 5;
1671	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
1672		pcie_width = 4;
1673	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
1674		pcie_width = 3;
1675	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
1676		pcie_width = 2;
1677	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
1678		pcie_width = 1;
1679	ret = smu_update_pcie_parameters(smu, pcie_gen, pcie_width);
1680	if (ret) {
1681		dev_err(adev->dev, "Attempt to override pcie params failed!\n");
1682		return ret;
1683	}
1684
1685	ret = smu_get_thermal_temperature_range(smu);
1686	if (ret) {
1687		dev_err(adev->dev, "Failed to get thermal temperature ranges!\n");
1688		return ret;
1689	}
1690
1691	ret = smu_enable_thermal_alert(smu);
1692	if (ret) {
1693	  dev_err(adev->dev, "Failed to enable thermal alert!\n");
1694	  return ret;
1695	}
1696
1697	ret = smu_notify_display_change(smu);
1698	if (ret) {
1699		dev_err(adev->dev, "Failed to notify display change!\n");
1700		return ret;
1701	}
1702
1703	/*
1704	 * Set min deep sleep dce fclk with bootup value from vbios via
1705	 * SetMinDeepSleepDcefclk MSG.
1706	 */
1707	ret = smu_set_min_dcef_deep_sleep(smu,
1708					  smu->smu_table.boot_values.dcefclk / 100);
1709	if (ret) {
1710		dev_err(adev->dev, "Error setting min deepsleep dcefclk\n");
1711		return ret;
1712	}
1713
1714	/* Init wbrf support. Properly setup the notifier */
1715	ret = smu_wbrf_init(smu);
1716	if (ret)
1717		dev_err(adev->dev, "Error during wbrf init call\n");
1718
1719	return ret;
1720}
1721
1722static int smu_start_smc_engine(struct smu_context *smu)
1723{
1724	struct amdgpu_device *adev = smu->adev;
1725	int ret = 0;
1726
1727	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1728		if (amdgpu_ip_version(adev, MP1_HWIP, 0) < IP_VERSION(11, 0, 0)) {
1729			if (smu->ppt_funcs->load_microcode) {
1730				ret = smu->ppt_funcs->load_microcode(smu);
1731				if (ret)
1732					return ret;
1733			}
1734		}
1735	}
1736
1737	if (smu->ppt_funcs->check_fw_status) {
1738		ret = smu->ppt_funcs->check_fw_status(smu);
1739		if (ret) {
1740			dev_err(adev->dev, "SMC is not ready\n");
1741			return ret;
1742		}
1743	}
1744
1745	/*
1746	 * Send msg GetDriverIfVersion to check if the return value is equal
1747	 * with DRIVER_IF_VERSION of smc header.
1748	 */
1749	ret = smu_check_fw_version(smu);
1750	if (ret)
1751		return ret;
1752
1753	return ret;
1754}
1755
1756static int smu_hw_init(void *handle)
1757{
1758	int ret;
1759	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1760	struct smu_context *smu = adev->powerplay.pp_handle;
1761
1762	if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) {
1763		smu->pm_enabled = false;
1764		return 0;
1765	}
1766
1767	ret = smu_start_smc_engine(smu);
1768	if (ret) {
1769		dev_err(adev->dev, "SMC engine is not correctly up!\n");
1770		return ret;
1771	}
1772
1773	/*
1774	 * Check whether wbrf is supported. This needs to be done
1775	 * before SMU setup starts since part of SMU configuration
1776	 * relies on this.
1777	 */
1778	smu_wbrf_support_check(smu);
1779
1780	if (smu->is_apu) {
1781		ret = smu_set_gfx_imu_enable(smu);
1782		if (ret)
1783			return ret;
1784		smu_dpm_set_vcn_enable(smu, true);
1785		smu_dpm_set_jpeg_enable(smu, true);
1786		smu_dpm_set_vpe_enable(smu, true);
1787		smu_dpm_set_umsch_mm_enable(smu, true);
1788		smu_set_gfx_cgpg(smu, true);
1789	}
1790
1791	if (!smu->pm_enabled)
1792		return 0;
1793
1794	ret = smu_get_driver_allowed_feature_mask(smu);
1795	if (ret)
1796		return ret;
1797
1798	ret = smu_smc_hw_setup(smu);
1799	if (ret) {
1800		dev_err(adev->dev, "Failed to setup smc hw!\n");
1801		return ret;
1802	}
1803
1804	/*
1805	 * Move maximum sustainable clock retrieving here considering
1806	 * 1. It is not needed on resume(from S3).
1807	 * 2. DAL settings come between .hw_init and .late_init of SMU.
1808	 *    And DAL needs to know the maximum sustainable clocks. Thus
1809	 *    it cannot be put in .late_init().
1810	 */
1811	ret = smu_init_max_sustainable_clocks(smu);
1812	if (ret) {
1813		dev_err(adev->dev, "Failed to init max sustainable clocks!\n");
1814		return ret;
1815	}
1816
1817	adev->pm.dpm_enabled = true;
1818
1819	dev_info(adev->dev, "SMU is initialized successfully!\n");
1820
1821	return 0;
1822}
1823
1824static int smu_disable_dpms(struct smu_context *smu)
1825{
1826	struct amdgpu_device *adev = smu->adev;
1827	int ret = 0;
1828	bool use_baco = !smu->is_apu &&
1829		((amdgpu_in_reset(adev) &&
1830		  (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
1831		 ((adev->in_runpm || adev->in_s4) && amdgpu_asic_supports_baco(adev)));
1832
1833	/*
1834	 * For SMU 13.0.0 and 13.0.7, PMFW will handle the DPM features(disablement or others)
1835	 * properly on suspend/reset/unload. Driver involvement may cause some unexpected issues.
1836	 */
1837	switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
1838	case IP_VERSION(13, 0, 0):
1839	case IP_VERSION(13, 0, 7):
1840	case IP_VERSION(13, 0, 10):
1841		return 0;
1842	default:
1843		break;
1844	}
1845
1846	/*
1847	 * For custom pptable uploading, skip the DPM features
1848	 * disable process on Navi1x ASICs.
1849	 *   - As the gfx related features are under control of
1850	 *     RLC on those ASICs. RLC reinitialization will be
1851	 *     needed to reenable them. That will cost much more
1852	 *     efforts.
1853	 *
1854	 *   - SMU firmware can handle the DPM reenablement
1855	 *     properly.
1856	 */
1857	if (smu->uploading_custom_pp_table) {
1858		switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
1859		case IP_VERSION(11, 0, 0):
1860		case IP_VERSION(11, 0, 5):
1861		case IP_VERSION(11, 0, 9):
1862		case IP_VERSION(11, 0, 7):
1863		case IP_VERSION(11, 0, 11):
1864		case IP_VERSION(11, 5, 0):
1865		case IP_VERSION(11, 0, 12):
1866		case IP_VERSION(11, 0, 13):
1867			return 0;
1868		default:
1869			break;
1870		}
1871	}
1872
1873	/*
1874	 * For Sienna_Cichlid, PMFW will handle the features disablement properly
1875	 * on BACO in. Driver involvement is unnecessary.
1876	 */
1877	if (use_baco) {
1878		switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
1879		case IP_VERSION(11, 0, 7):
1880		case IP_VERSION(11, 0, 0):
1881		case IP_VERSION(11, 0, 5):
1882		case IP_VERSION(11, 0, 9):
1883		case IP_VERSION(13, 0, 7):
1884			return 0;
1885		default:
1886			break;
1887		}
1888	}
1889
1890	/*
1891	 * For SMU 13.0.4/11 and 14.0.0, PMFW will handle the features disablement properly
1892	 * for gpu reset and S0i3 cases. Driver involvement is unnecessary.
1893	 */
1894	if (amdgpu_in_reset(adev) || adev->in_s0ix) {
1895		switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
1896		case IP_VERSION(13, 0, 4):
1897		case IP_VERSION(13, 0, 11):
1898		case IP_VERSION(14, 0, 0):
1899		case IP_VERSION(14, 0, 1):
1900			return 0;
1901		default:
1902			break;
1903		}
1904	}
1905
1906	/*
1907	 * For gpu reset, runpm and hibernation through BACO,
1908	 * BACO feature has to be kept enabled.
1909	 */
1910	if (use_baco && smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) {
1911		ret = smu_disable_all_features_with_exception(smu,
1912							      SMU_FEATURE_BACO_BIT);
1913		if (ret)
1914			dev_err(adev->dev, "Failed to disable smu features except BACO.\n");
1915	} else {
1916		/* DisableAllSmuFeatures message is not permitted with SCPM enabled */
1917		if (!adev->scpm_enabled) {
1918			ret = smu_system_features_control(smu, false);
1919			if (ret)
1920				dev_err(adev->dev, "Failed to disable smu features.\n");
1921		}
1922	}
1923
1924	/* Notify SMU RLC is going to be off, stop RLC and SMU interaction.
1925	 * otherwise SMU will hang while interacting with RLC if RLC is halted
1926	 * this is a WA for Vangogh asic which fix the SMU hang issue.
1927	 */
1928	ret = smu_notify_rlc_state(smu, false);
1929	if (ret) {
1930		dev_err(adev->dev, "Fail to notify rlc status!\n");
1931		return ret;
1932	}
1933
1934	if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 4, 2) &&
1935	    !((adev->flags & AMD_IS_APU) && adev->gfx.imu.funcs) &&
1936	    !amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs->stop)
1937		adev->gfx.rlc.funcs->stop(adev);
1938
1939	return ret;
1940}
1941
1942static int smu_smc_hw_cleanup(struct smu_context *smu)
1943{
1944	struct amdgpu_device *adev = smu->adev;
1945	int ret = 0;
1946
1947	smu_wbrf_fini(smu);
1948
1949	cancel_work_sync(&smu->throttling_logging_work);
1950	cancel_work_sync(&smu->interrupt_work);
1951
1952	ret = smu_disable_thermal_alert(smu);
1953	if (ret) {
1954		dev_err(adev->dev, "Fail to disable thermal alert!\n");
1955		return ret;
1956	}
1957
1958	cancel_delayed_work_sync(&smu->swctf_delayed_work);
1959
1960	ret = smu_disable_dpms(smu);
1961	if (ret) {
1962		dev_err(adev->dev, "Fail to disable dpm features!\n");
1963		return ret;
1964	}
1965
1966	return 0;
1967}
1968
1969static int smu_reset_mp1_state(struct smu_context *smu)
1970{
1971	struct amdgpu_device *adev = smu->adev;
1972	int ret = 0;
1973
1974	if ((!adev->in_runpm) && (!adev->in_suspend) &&
1975		(!amdgpu_in_reset(adev)) && amdgpu_ip_version(adev, MP1_HWIP, 0) ==
1976									IP_VERSION(13, 0, 10) &&
1977		!amdgpu_device_has_display_hardware(adev))
1978		ret = smu_set_mp1_state(smu, PP_MP1_STATE_UNLOAD);
1979
1980	return ret;
1981}
1982
1983static int smu_hw_fini(void *handle)
1984{
1985	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1986	struct smu_context *smu = adev->powerplay.pp_handle;
1987	int ret;
1988
1989	if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
1990		return 0;
1991
1992	smu_dpm_set_vcn_enable(smu, false);
1993	smu_dpm_set_jpeg_enable(smu, false);
1994	smu_dpm_set_vpe_enable(smu, false);
1995	smu_dpm_set_umsch_mm_enable(smu, false);
1996
1997	adev->vcn.cur_state = AMD_PG_STATE_GATE;
1998	adev->jpeg.cur_state = AMD_PG_STATE_GATE;
1999
2000	if (!smu->pm_enabled)
2001		return 0;
2002
2003	adev->pm.dpm_enabled = false;
2004
2005	ret = smu_smc_hw_cleanup(smu);
2006	if (ret)
2007		return ret;
2008
2009	ret = smu_reset_mp1_state(smu);
2010	if (ret)
2011		return ret;
2012
2013	return 0;
2014}
2015
2016static void smu_late_fini(void *handle)
2017{
2018	struct amdgpu_device *adev = handle;
2019	struct smu_context *smu = adev->powerplay.pp_handle;
2020
2021	kfree(smu);
2022}
2023
2024static int smu_reset(struct smu_context *smu)
2025{
2026	struct amdgpu_device *adev = smu->adev;
2027	int ret;
2028
2029	ret = smu_hw_fini(adev);
2030	if (ret)
2031		return ret;
2032
2033	ret = smu_hw_init(adev);
2034	if (ret)
2035		return ret;
2036
2037	ret = smu_late_init(adev);
2038	if (ret)
2039		return ret;
2040
2041	return 0;
2042}
2043
2044static int smu_suspend(void *handle)
2045{
2046	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2047	struct smu_context *smu = adev->powerplay.pp_handle;
2048	int ret;
2049	uint64_t count;
2050
2051	if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
2052		return 0;
2053
2054	if (!smu->pm_enabled)
2055		return 0;
2056
2057	adev->pm.dpm_enabled = false;
2058
2059	ret = smu_smc_hw_cleanup(smu);
2060	if (ret)
2061		return ret;
2062
2063	smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
2064
2065	smu_set_gfx_cgpg(smu, false);
2066
2067	/*
2068	 * pwfw resets entrycount when device is suspended, so we save the
2069	 * last value to be used when we resume to keep it consistent
2070	 */
2071	ret = smu_get_entrycount_gfxoff(smu, &count);
2072	if (!ret)
2073		adev->gfx.gfx_off_entrycount = count;
2074
2075	return 0;
2076}
2077
2078static int smu_resume(void *handle)
2079{
2080	int ret;
2081	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2082	struct smu_context *smu = adev->powerplay.pp_handle;
2083
2084	if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
2085		return 0;
2086
2087	if (!smu->pm_enabled)
2088		return 0;
2089
2090	dev_info(adev->dev, "SMU is resuming...\n");
2091
2092	ret = smu_start_smc_engine(smu);
2093	if (ret) {
2094		dev_err(adev->dev, "SMC engine is not correctly up!\n");
2095		return ret;
2096	}
2097
2098	ret = smu_smc_hw_setup(smu);
2099	if (ret) {
2100		dev_err(adev->dev, "Failed to setup smc hw!\n");
2101		return ret;
2102	}
2103
2104	ret = smu_set_gfx_imu_enable(smu);
2105	if (ret)
2106		return ret;
2107
2108	smu_set_gfx_cgpg(smu, true);
2109
2110	smu->disable_uclk_switch = 0;
2111
2112	adev->pm.dpm_enabled = true;
2113
2114	dev_info(adev->dev, "SMU is resumed successfully!\n");
2115
2116	return 0;
2117}
2118
2119static int smu_display_configuration_change(void *handle,
2120					    const struct amd_pp_display_configuration *display_config)
2121{
2122	struct smu_context *smu = handle;
2123
2124	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2125		return -EOPNOTSUPP;
2126
2127	if (!display_config)
2128		return -EINVAL;
2129
2130	smu_set_min_dcef_deep_sleep(smu,
2131				    display_config->min_dcef_deep_sleep_set_clk / 100);
2132
2133	return 0;
2134}
2135
2136static int smu_set_clockgating_state(void *handle,
2137				     enum amd_clockgating_state state)
2138{
2139	return 0;
2140}
2141
2142static int smu_set_powergating_state(void *handle,
2143				     enum amd_powergating_state state)
2144{
2145	return 0;
2146}
2147
2148static int smu_enable_umd_pstate(void *handle,
2149		      enum amd_dpm_forced_level *level)
2150{
2151	uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
2152					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
2153					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
2154					AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
2155
2156	struct smu_context *smu = (struct smu_context*)(handle);
2157	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2158
2159	if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
2160		return -EINVAL;
2161
2162	if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
2163		/* enter umd pstate, save current level, disable gfx cg*/
2164		if (*level & profile_mode_mask) {
2165			smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
2166			smu_gpo_control(smu, false);
2167			smu_gfx_ulv_control(smu, false);
2168			smu_deep_sleep_control(smu, false);
2169			amdgpu_asic_update_umd_stable_pstate(smu->adev, true);
2170		}
2171	} else {
2172		/* exit umd pstate, restore level, enable gfx cg*/
2173		if (!(*level & profile_mode_mask)) {
2174			if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
2175				*level = smu_dpm_ctx->saved_dpm_level;
2176			amdgpu_asic_update_umd_stable_pstate(smu->adev, false);
2177			smu_deep_sleep_control(smu, true);
2178			smu_gfx_ulv_control(smu, true);
2179			smu_gpo_control(smu, true);
2180		}
2181	}
2182
2183	return 0;
2184}
2185
2186static int smu_bump_power_profile_mode(struct smu_context *smu,
2187					   long *param,
2188					   uint32_t param_size)
2189{
2190	int ret = 0;
2191
2192	if (smu->ppt_funcs->set_power_profile_mode)
2193		ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size);
2194
2195	return ret;
2196}
2197
2198static int smu_adjust_power_state_dynamic(struct smu_context *smu,
2199				   enum amd_dpm_forced_level level,
2200				   bool skip_display_settings)
2201{
2202	int ret = 0;
2203	int index = 0;
2204	long workload;
2205	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2206
2207	if (!skip_display_settings) {
2208		ret = smu_display_config_changed(smu);
2209		if (ret) {
2210			dev_err(smu->adev->dev, "Failed to change display config!");
2211			return ret;
2212		}
2213	}
2214
2215	ret = smu_apply_clocks_adjust_rules(smu);
2216	if (ret) {
2217		dev_err(smu->adev->dev, "Failed to apply clocks adjust rules!");
2218		return ret;
2219	}
2220
2221	if (!skip_display_settings) {
2222		ret = smu_notify_smc_display_config(smu);
2223		if (ret) {
2224			dev_err(smu->adev->dev, "Failed to notify smc display config!");
2225			return ret;
2226		}
2227	}
2228
2229	if (smu_dpm_ctx->dpm_level != level) {
2230		ret = smu_asic_set_performance_level(smu, level);
2231		if (ret) {
2232			dev_err(smu->adev->dev, "Failed to set performance level!");
2233			return ret;
2234		}
2235
2236		/* update the saved copy */
2237		smu_dpm_ctx->dpm_level = level;
2238	}
2239
2240	if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
2241		smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
2242		index = fls(smu->workload_mask);
2243		index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
2244		workload = smu->workload_setting[index];
2245
2246		if (smu->power_profile_mode != workload)
2247			smu_bump_power_profile_mode(smu, &workload, 0);
2248	}
2249
2250	return ret;
2251}
2252
2253static int smu_handle_task(struct smu_context *smu,
2254			   enum amd_dpm_forced_level level,
2255			   enum amd_pp_task task_id)
2256{
2257	int ret = 0;
2258
2259	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2260		return -EOPNOTSUPP;
2261
2262	switch (task_id) {
2263	case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
2264		ret = smu_pre_display_config_changed(smu);
2265		if (ret)
2266			return ret;
2267		ret = smu_adjust_power_state_dynamic(smu, level, false);
2268		break;
2269	case AMD_PP_TASK_COMPLETE_INIT:
2270	case AMD_PP_TASK_READJUST_POWER_STATE:
2271		ret = smu_adjust_power_state_dynamic(smu, level, true);
2272		break;
2273	default:
2274		break;
2275	}
2276
2277	return ret;
2278}
2279
2280static int smu_handle_dpm_task(void *handle,
2281			       enum amd_pp_task task_id,
2282			       enum amd_pm_state_type *user_state)
2283{
2284	struct smu_context *smu = handle;
2285	struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
2286
2287	return smu_handle_task(smu, smu_dpm->dpm_level, task_id);
2288
2289}
2290
2291static int smu_switch_power_profile(void *handle,
2292				    enum PP_SMC_POWER_PROFILE type,
2293				    bool en)
2294{
2295	struct smu_context *smu = handle;
2296	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2297	long workload;
2298	uint32_t index;
2299
2300	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2301		return -EOPNOTSUPP;
2302
2303	if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
2304		return -EINVAL;
2305
2306	if (!en) {
2307		smu->workload_mask &= ~(1 << smu->workload_prority[type]);
2308		index = fls(smu->workload_mask);
2309		index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
2310		workload = smu->workload_setting[index];
2311	} else {
2312		smu->workload_mask |= (1 << smu->workload_prority[type]);
2313		index = fls(smu->workload_mask);
2314		index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
2315		workload = smu->workload_setting[index];
2316	}
2317
2318	if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
2319		smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
2320		smu_bump_power_profile_mode(smu, &workload, 0);
2321
2322	return 0;
2323}
2324
2325static enum amd_dpm_forced_level smu_get_performance_level(void *handle)
2326{
2327	struct smu_context *smu = handle;
2328	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2329
2330	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2331		return -EOPNOTSUPP;
2332
2333	if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
2334		return -EINVAL;
2335
2336	return smu_dpm_ctx->dpm_level;
2337}
2338
2339static int smu_force_performance_level(void *handle,
2340				       enum amd_dpm_forced_level level)
2341{
2342	struct smu_context *smu = handle;
2343	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2344	int ret = 0;
2345
2346	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2347		return -EOPNOTSUPP;
2348
2349	if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
2350		return -EINVAL;
2351
2352	ret = smu_enable_umd_pstate(smu, &level);
2353	if (ret)
2354		return ret;
2355
2356	ret = smu_handle_task(smu, level,
2357			      AMD_PP_TASK_READJUST_POWER_STATE);
2358
2359	/* reset user dpm clock state */
2360	if (!ret && smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
2361		memset(smu->user_dpm_profile.clk_mask, 0, sizeof(smu->user_dpm_profile.clk_mask));
2362		smu->user_dpm_profile.clk_dependency = 0;
2363	}
2364
2365	return ret;
2366}
2367
2368static int smu_set_display_count(void *handle, uint32_t count)
2369{
2370	struct smu_context *smu = handle;
2371
2372	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2373		return -EOPNOTSUPP;
2374
2375	return smu_init_display_count(smu, count);
2376}
2377
2378static int smu_force_smuclk_levels(struct smu_context *smu,
2379			 enum smu_clk_type clk_type,
2380			 uint32_t mask)
2381{
2382	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2383	int ret = 0;
2384
2385	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2386		return -EOPNOTSUPP;
2387
2388	if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
2389		dev_dbg(smu->adev->dev, "force clock level is for dpm manual mode only.\n");
2390		return -EINVAL;
2391	}
2392
2393	if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) {
2394		ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
2395		if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
2396			smu->user_dpm_profile.clk_mask[clk_type] = mask;
2397			smu_set_user_clk_dependencies(smu, clk_type);
2398		}
2399	}
2400
2401	return ret;
2402}
2403
2404static int smu_force_ppclk_levels(void *handle,
2405				  enum pp_clock_type type,
2406				  uint32_t mask)
2407{
2408	struct smu_context *smu = handle;
2409	enum smu_clk_type clk_type;
2410
2411	switch (type) {
2412	case PP_SCLK:
2413		clk_type = SMU_SCLK; break;
2414	case PP_MCLK:
2415		clk_type = SMU_MCLK; break;
2416	case PP_PCIE:
2417		clk_type = SMU_PCIE; break;
2418	case PP_SOCCLK:
2419		clk_type = SMU_SOCCLK; break;
2420	case PP_FCLK:
2421		clk_type = SMU_FCLK; break;
2422	case PP_DCEFCLK:
2423		clk_type = SMU_DCEFCLK; break;
2424	case PP_VCLK:
2425		clk_type = SMU_VCLK; break;
2426	case PP_VCLK1:
2427		clk_type = SMU_VCLK1; break;
2428	case PP_DCLK:
2429		clk_type = SMU_DCLK; break;
2430	case PP_DCLK1:
2431		clk_type = SMU_DCLK1; break;
2432	case OD_SCLK:
2433		clk_type = SMU_OD_SCLK; break;
2434	case OD_MCLK:
2435		clk_type = SMU_OD_MCLK; break;
2436	case OD_VDDC_CURVE:
2437		clk_type = SMU_OD_VDDC_CURVE; break;
2438	case OD_RANGE:
2439		clk_type = SMU_OD_RANGE; break;
2440	default:
2441		return -EINVAL;
2442	}
2443
2444	return smu_force_smuclk_levels(smu, clk_type, mask);
2445}
2446
2447/*
2448 * On system suspending or resetting, the dpm_enabled
2449 * flag will be cleared. So that those SMU services which
2450 * are not supported will be gated.
2451 * However, the mp1 state setting should still be granted
2452 * even if the dpm_enabled cleared.
2453 */
2454static int smu_set_mp1_state(void *handle,
2455			     enum pp_mp1_state mp1_state)
2456{
2457	struct smu_context *smu = handle;
2458	int ret = 0;
2459
2460	if (!smu->pm_enabled)
2461		return -EOPNOTSUPP;
2462
2463	if (smu->ppt_funcs &&
2464	    smu->ppt_funcs->set_mp1_state)
2465		ret = smu->ppt_funcs->set_mp1_state(smu, mp1_state);
2466
2467	return ret;
2468}
2469
2470static int smu_set_df_cstate(void *handle,
2471			     enum pp_df_cstate state)
2472{
2473	struct smu_context *smu = handle;
2474	int ret = 0;
2475
2476	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2477		return -EOPNOTSUPP;
2478
2479	if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
2480		return 0;
2481
2482	ret = smu->ppt_funcs->set_df_cstate(smu, state);
2483	if (ret)
2484		dev_err(smu->adev->dev, "[SetDfCstate] failed!\n");
2485
2486	return ret;
2487}
2488
2489int smu_write_watermarks_table(struct smu_context *smu)
2490{
2491	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2492		return -EOPNOTSUPP;
2493
2494	return smu_set_watermarks_table(smu, NULL);
2495}
2496
2497static int smu_set_watermarks_for_clock_ranges(void *handle,
2498					       struct pp_smu_wm_range_sets *clock_ranges)
2499{
2500	struct smu_context *smu = handle;
2501
2502	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2503		return -EOPNOTSUPP;
2504
2505	if (smu->disable_watermark)
2506		return 0;
2507
2508	return smu_set_watermarks_table(smu, clock_ranges);
2509}
2510
2511int smu_set_ac_dc(struct smu_context *smu)
2512{
2513	int ret = 0;
2514
2515	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2516		return -EOPNOTSUPP;
2517
2518	/* controlled by firmware */
2519	if (smu->dc_controlled_by_gpio)
2520		return 0;
2521
2522	ret = smu_set_power_source(smu,
2523				   smu->adev->pm.ac_power ? SMU_POWER_SOURCE_AC :
2524				   SMU_POWER_SOURCE_DC);
2525	if (ret)
2526		dev_err(smu->adev->dev, "Failed to switch to %s mode!\n",
2527		       smu->adev->pm.ac_power ? "AC" : "DC");
2528
2529	return ret;
2530}
2531
2532const struct amd_ip_funcs smu_ip_funcs = {
2533	.name = "smu",
2534	.early_init = smu_early_init,
2535	.late_init = smu_late_init,
2536	.sw_init = smu_sw_init,
2537	.sw_fini = smu_sw_fini,
2538	.hw_init = smu_hw_init,
2539	.hw_fini = smu_hw_fini,
2540	.late_fini = smu_late_fini,
2541	.suspend = smu_suspend,
2542	.resume = smu_resume,
2543	.is_idle = NULL,
2544	.check_soft_reset = NULL,
2545	.wait_for_idle = NULL,
2546	.soft_reset = NULL,
2547	.set_clockgating_state = smu_set_clockgating_state,
2548	.set_powergating_state = smu_set_powergating_state,
2549};
2550
2551const struct amdgpu_ip_block_version smu_v11_0_ip_block = {
2552	.type = AMD_IP_BLOCK_TYPE_SMC,
2553	.major = 11,
2554	.minor = 0,
2555	.rev = 0,
2556	.funcs = &smu_ip_funcs,
2557};
2558
2559const struct amdgpu_ip_block_version smu_v12_0_ip_block = {
2560	.type = AMD_IP_BLOCK_TYPE_SMC,
2561	.major = 12,
2562	.minor = 0,
2563	.rev = 0,
2564	.funcs = &smu_ip_funcs,
2565};
2566
2567const struct amdgpu_ip_block_version smu_v13_0_ip_block = {
2568	.type = AMD_IP_BLOCK_TYPE_SMC,
2569	.major = 13,
2570	.minor = 0,
2571	.rev = 0,
2572	.funcs = &smu_ip_funcs,
2573};
2574
2575const struct amdgpu_ip_block_version smu_v14_0_ip_block = {
2576	.type = AMD_IP_BLOCK_TYPE_SMC,
2577	.major = 14,
2578	.minor = 0,
2579	.rev = 0,
2580	.funcs = &smu_ip_funcs,
2581};
2582
2583static int smu_load_microcode(void *handle)
2584{
2585	struct smu_context *smu = handle;
2586	struct amdgpu_device *adev = smu->adev;
2587	int ret = 0;
2588
2589	if (!smu->pm_enabled)
2590		return -EOPNOTSUPP;
2591
2592	/* This should be used for non PSP loading */
2593	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
2594		return 0;
2595
2596	if (smu->ppt_funcs->load_microcode) {
2597		ret = smu->ppt_funcs->load_microcode(smu);
2598		if (ret) {
2599			dev_err(adev->dev, "Load microcode failed\n");
2600			return ret;
2601		}
2602	}
2603
2604	if (smu->ppt_funcs->check_fw_status) {
2605		ret = smu->ppt_funcs->check_fw_status(smu);
2606		if (ret) {
2607			dev_err(adev->dev, "SMC is not ready\n");
2608			return ret;
2609		}
2610	}
2611
2612	return ret;
2613}
2614
2615static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
2616{
2617	int ret = 0;
2618
2619	if (smu->ppt_funcs->set_gfx_cgpg)
2620		ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled);
2621
2622	return ret;
2623}
2624
2625static int smu_set_fan_speed_rpm(void *handle, uint32_t speed)
2626{
2627	struct smu_context *smu = handle;
2628	int ret = 0;
2629
2630	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2631		return -EOPNOTSUPP;
2632
2633	if (!smu->ppt_funcs->set_fan_speed_rpm)
2634		return -EOPNOTSUPP;
2635
2636	if (speed == U32_MAX)
2637		return -EINVAL;
2638
2639	ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
2640	if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
2641		smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_RPM;
2642		smu->user_dpm_profile.fan_speed_rpm = speed;
2643
2644		/* Override custom PWM setting as they cannot co-exist */
2645		smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_PWM;
2646		smu->user_dpm_profile.fan_speed_pwm = 0;
2647	}
2648
2649	return ret;
2650}
2651
2652/**
2653 * smu_get_power_limit - Request one of the SMU Power Limits
2654 *
2655 * @handle: pointer to smu context
2656 * @limit: requested limit is written back to this variable
2657 * @pp_limit_level: &pp_power_limit_level which limit of the power to return
2658 * @pp_power_type: &pp_power_type type of power
2659 * Return:  0 on success, <0 on error
2660 *
2661 */
2662int smu_get_power_limit(void *handle,
2663			uint32_t *limit,
2664			enum pp_power_limit_level pp_limit_level,
2665			enum pp_power_type pp_power_type)
2666{
2667	struct smu_context *smu = handle;
2668	struct amdgpu_device *adev = smu->adev;
2669	enum smu_ppt_limit_level limit_level;
2670	uint32_t limit_type;
2671	int ret = 0;
2672
2673	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2674		return -EOPNOTSUPP;
2675
2676	switch (pp_power_type) {
2677	case PP_PWR_TYPE_SUSTAINED:
2678		limit_type = SMU_DEFAULT_PPT_LIMIT;
2679		break;
2680	case PP_PWR_TYPE_FAST:
2681		limit_type = SMU_FAST_PPT_LIMIT;
2682		break;
2683	default:
2684		return -EOPNOTSUPP;
2685	}
2686
2687	switch (pp_limit_level) {
2688	case PP_PWR_LIMIT_CURRENT:
2689		limit_level = SMU_PPT_LIMIT_CURRENT;
2690		break;
2691	case PP_PWR_LIMIT_DEFAULT:
2692		limit_level = SMU_PPT_LIMIT_DEFAULT;
2693		break;
2694	case PP_PWR_LIMIT_MAX:
2695		limit_level = SMU_PPT_LIMIT_MAX;
2696		break;
2697	case PP_PWR_LIMIT_MIN:
2698		limit_level = SMU_PPT_LIMIT_MIN;
2699		break;
2700	default:
2701		return -EOPNOTSUPP;
2702	}
2703
2704	if (limit_type != SMU_DEFAULT_PPT_LIMIT) {
2705		if (smu->ppt_funcs->get_ppt_limit)
2706			ret = smu->ppt_funcs->get_ppt_limit(smu, limit, limit_type, limit_level);
2707	} else {
2708		switch (limit_level) {
2709		case SMU_PPT_LIMIT_CURRENT:
2710			switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
2711			case IP_VERSION(13, 0, 2):
2712			case IP_VERSION(13, 0, 6):
2713			case IP_VERSION(11, 0, 7):
2714			case IP_VERSION(11, 0, 11):
2715			case IP_VERSION(11, 0, 12):
2716			case IP_VERSION(11, 0, 13):
2717				ret = smu_get_asic_power_limits(smu,
2718								&smu->current_power_limit,
2719								NULL, NULL, NULL);
2720				break;
2721			default:
2722				break;
2723			}
2724			*limit = smu->current_power_limit;
2725			break;
2726		case SMU_PPT_LIMIT_DEFAULT:
2727			*limit = smu->default_power_limit;
2728			break;
2729		case SMU_PPT_LIMIT_MAX:
2730			*limit = smu->max_power_limit;
2731			break;
2732		case SMU_PPT_LIMIT_MIN:
2733			*limit = smu->min_power_limit;
2734			break;
2735		default:
2736			return -EINVAL;
2737		}
2738	}
2739
2740	return ret;
2741}
2742
2743static int smu_set_power_limit(void *handle, uint32_t limit)
2744{
2745	struct smu_context *smu = handle;
2746	uint32_t limit_type = limit >> 24;
2747	int ret = 0;
2748
2749	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2750		return -EOPNOTSUPP;
2751
2752	limit &= (1<<24)-1;
2753	if (limit_type != SMU_DEFAULT_PPT_LIMIT)
2754		if (smu->ppt_funcs->set_power_limit)
2755			return smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
2756
2757	if ((limit > smu->max_power_limit) || (limit < smu->min_power_limit)) {
2758		dev_err(smu->adev->dev,
2759			"New power limit (%d) is out of range [%d,%d]\n",
2760			limit, smu->min_power_limit, smu->max_power_limit);
2761		return -EINVAL;
2762	}
2763
2764	if (!limit)
2765		limit = smu->current_power_limit;
2766
2767	if (smu->ppt_funcs->set_power_limit) {
2768		ret = smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
2769		if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
2770			smu->user_dpm_profile.power_limit = limit;
2771	}
2772
2773	return ret;
2774}
2775
2776static int smu_print_smuclk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
2777{
2778	int ret = 0;
2779
2780	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2781		return -EOPNOTSUPP;
2782
2783	if (smu->ppt_funcs->print_clk_levels)
2784		ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
2785
2786	return ret;
2787}
2788
2789static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type)
2790{
2791	enum smu_clk_type clk_type;
2792
2793	switch (type) {
2794	case PP_SCLK:
2795		clk_type = SMU_SCLK; break;
2796	case PP_MCLK:
2797		clk_type = SMU_MCLK; break;
2798	case PP_PCIE:
2799		clk_type = SMU_PCIE; break;
2800	case PP_SOCCLK:
2801		clk_type = SMU_SOCCLK; break;
2802	case PP_FCLK:
2803		clk_type = SMU_FCLK; break;
2804	case PP_DCEFCLK:
2805		clk_type = SMU_DCEFCLK; break;
2806	case PP_VCLK:
2807		clk_type = SMU_VCLK; break;
2808	case PP_VCLK1:
2809		clk_type = SMU_VCLK1; break;
2810	case PP_DCLK:
2811		clk_type = SMU_DCLK; break;
2812	case PP_DCLK1:
2813		clk_type = SMU_DCLK1; break;
2814	case OD_SCLK:
2815		clk_type = SMU_OD_SCLK; break;
2816	case OD_MCLK:
2817		clk_type = SMU_OD_MCLK; break;
2818	case OD_VDDC_CURVE:
2819		clk_type = SMU_OD_VDDC_CURVE; break;
2820	case OD_RANGE:
2821		clk_type = SMU_OD_RANGE; break;
2822	case OD_VDDGFX_OFFSET:
2823		clk_type = SMU_OD_VDDGFX_OFFSET; break;
2824	case OD_CCLK:
2825		clk_type = SMU_OD_CCLK; break;
2826	case OD_FAN_CURVE:
2827		clk_type = SMU_OD_FAN_CURVE; break;
2828	case OD_ACOUSTIC_LIMIT:
2829		clk_type = SMU_OD_ACOUSTIC_LIMIT; break;
2830	case OD_ACOUSTIC_TARGET:
2831		clk_type = SMU_OD_ACOUSTIC_TARGET; break;
2832	case OD_FAN_TARGET_TEMPERATURE:
2833		clk_type = SMU_OD_FAN_TARGET_TEMPERATURE; break;
2834	case OD_FAN_MINIMUM_PWM:
2835		clk_type = SMU_OD_FAN_MINIMUM_PWM; break;
2836	default:
2837		clk_type = SMU_CLK_COUNT; break;
2838	}
2839
2840	return clk_type;
2841}
2842
2843static int smu_print_ppclk_levels(void *handle,
2844				  enum pp_clock_type type,
2845				  char *buf)
2846{
2847	struct smu_context *smu = handle;
2848	enum smu_clk_type clk_type;
2849
2850	clk_type = smu_convert_to_smuclk(type);
2851	if (clk_type == SMU_CLK_COUNT)
2852		return -EINVAL;
2853
2854	return smu_print_smuclk_levels(smu, clk_type, buf);
2855}
2856
2857static int smu_emit_ppclk_levels(void *handle, enum pp_clock_type type, char *buf, int *offset)
2858{
2859	struct smu_context *smu = handle;
2860	enum smu_clk_type clk_type;
2861
2862	clk_type = smu_convert_to_smuclk(type);
2863	if (clk_type == SMU_CLK_COUNT)
2864		return -EINVAL;
2865
2866	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2867		return -EOPNOTSUPP;
2868
2869	if (!smu->ppt_funcs->emit_clk_levels)
2870		return -ENOENT;
2871
2872	return smu->ppt_funcs->emit_clk_levels(smu, clk_type, buf, offset);
2873
2874}
2875
2876static int smu_od_edit_dpm_table(void *handle,
2877				 enum PP_OD_DPM_TABLE_COMMAND type,
2878				 long *input, uint32_t size)
2879{
2880	struct smu_context *smu = handle;
2881	int ret = 0;
2882
2883	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2884		return -EOPNOTSUPP;
2885
2886	if (smu->ppt_funcs->od_edit_dpm_table) {
2887		ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
2888	}
2889
2890	return ret;
2891}
2892
2893static int smu_read_sensor(void *handle,
2894			   int sensor,
2895			   void *data,
2896			   int *size_arg)
2897{
2898	struct smu_context *smu = handle;
2899	struct smu_umd_pstate_table *pstate_table =
2900				&smu->pstate_table;
2901	int ret = 0;
2902	uint32_t *size, size_val;
2903
2904	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2905		return -EOPNOTSUPP;
2906
2907	if (!data || !size_arg)
2908		return -EINVAL;
2909
2910	size_val = *size_arg;
2911	size = &size_val;
2912
2913	if (smu->ppt_funcs->read_sensor)
2914		if (!smu->ppt_funcs->read_sensor(smu, sensor, data, size))
2915			goto unlock;
2916
2917	switch (sensor) {
2918	case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
2919		*((uint32_t *)data) = pstate_table->gfxclk_pstate.standard * 100;
2920		*size = 4;
2921		break;
2922	case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
2923		*((uint32_t *)data) = pstate_table->uclk_pstate.standard * 100;
2924		*size = 4;
2925		break;
2926	case AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK:
2927		*((uint32_t *)data) = pstate_table->gfxclk_pstate.peak * 100;
2928		*size = 4;
2929		break;
2930	case AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK:
2931		*((uint32_t *)data) = pstate_table->uclk_pstate.peak * 100;
2932		*size = 4;
2933		break;
2934	case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
2935		ret = smu_feature_get_enabled_mask(smu, (uint64_t *)data);
2936		*size = 8;
2937		break;
2938	case AMDGPU_PP_SENSOR_UVD_POWER:
2939		*(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0;
2940		*size = 4;
2941		break;
2942	case AMDGPU_PP_SENSOR_VCE_POWER:
2943		*(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
2944		*size = 4;
2945		break;
2946	case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
2947		*(uint32_t *)data = atomic_read(&smu->smu_power.power_gate.vcn_gated) ? 0 : 1;
2948		*size = 4;
2949		break;
2950	case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
2951		*(uint32_t *)data = 0;
2952		*size = 4;
2953		break;
2954	default:
2955		*size = 0;
2956		ret = -EOPNOTSUPP;
2957		break;
2958	}
2959
2960unlock:
2961	// assign uint32_t to int
2962	*size_arg = size_val;
2963
2964	return ret;
2965}
2966
2967static int smu_get_apu_thermal_limit(void *handle, uint32_t *limit)
2968{
2969	int ret = -EOPNOTSUPP;
2970	struct smu_context *smu = handle;
2971
2972	if (smu->ppt_funcs && smu->ppt_funcs->get_apu_thermal_limit)
2973		ret = smu->ppt_funcs->get_apu_thermal_limit(smu, limit);
2974
2975	return ret;
2976}
2977
2978static int smu_set_apu_thermal_limit(void *handle, uint32_t limit)
2979{
2980	int ret = -EOPNOTSUPP;
2981	struct smu_context *smu = handle;
2982
2983	if (smu->ppt_funcs && smu->ppt_funcs->set_apu_thermal_limit)
2984		ret = smu->ppt_funcs->set_apu_thermal_limit(smu, limit);
2985
2986	return ret;
2987}
2988
2989static int smu_get_power_profile_mode(void *handle, char *buf)
2990{
2991	struct smu_context *smu = handle;
2992
2993	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
2994	    !smu->ppt_funcs->get_power_profile_mode)
2995		return -EOPNOTSUPP;
2996	if (!buf)
2997		return -EINVAL;
2998
2999	return smu->ppt_funcs->get_power_profile_mode(smu, buf);
3000}
3001
3002static int smu_set_power_profile_mode(void *handle,
3003				      long *param,
3004				      uint32_t param_size)
3005{
3006	struct smu_context *smu = handle;
3007
3008	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
3009	    !smu->ppt_funcs->set_power_profile_mode)
3010		return -EOPNOTSUPP;
3011
3012	return smu_bump_power_profile_mode(smu, param, param_size);
3013}
3014
3015static int smu_get_fan_control_mode(void *handle, u32 *fan_mode)
3016{
3017	struct smu_context *smu = handle;
3018
3019	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3020		return -EOPNOTSUPP;
3021
3022	if (!smu->ppt_funcs->get_fan_control_mode)
3023		return -EOPNOTSUPP;
3024
3025	if (!fan_mode)
3026		return -EINVAL;
3027
3028	*fan_mode = smu->ppt_funcs->get_fan_control_mode(smu);
3029
3030	return 0;
3031}
3032
3033static int smu_set_fan_control_mode(void *handle, u32 value)
3034{
3035	struct smu_context *smu = handle;
3036	int ret = 0;
3037
3038	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3039		return -EOPNOTSUPP;
3040
3041	if (!smu->ppt_funcs->set_fan_control_mode)
3042		return -EOPNOTSUPP;
3043
3044	if (value == U32_MAX)
3045		return -EINVAL;
3046
3047	ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
3048	if (ret)
3049		goto out;
3050
3051	if (!(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
3052		smu->user_dpm_profile.fan_mode = value;
3053
3054		/* reset user dpm fan speed */
3055		if (value != AMD_FAN_CTRL_MANUAL) {
3056			smu->user_dpm_profile.fan_speed_pwm = 0;
3057			smu->user_dpm_profile.fan_speed_rpm = 0;
3058			smu->user_dpm_profile.flags &= ~(SMU_CUSTOM_FAN_SPEED_RPM | SMU_CUSTOM_FAN_SPEED_PWM);
3059		}
3060	}
3061
3062out:
3063	return ret;
3064}
3065
3066static int smu_get_fan_speed_pwm(void *handle, u32 *speed)
3067{
3068	struct smu_context *smu = handle;
3069	int ret = 0;
3070
3071	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3072		return -EOPNOTSUPP;
3073
3074	if (!smu->ppt_funcs->get_fan_speed_pwm)
3075		return -EOPNOTSUPP;
3076
3077	if (!speed)
3078		return -EINVAL;
3079
3080	ret = smu->ppt_funcs->get_fan_speed_pwm(smu, speed);
3081
3082	return ret;
3083}
3084
3085static int smu_set_fan_speed_pwm(void *handle, u32 speed)
3086{
3087	struct smu_context *smu = handle;
3088	int ret = 0;
3089
3090	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3091		return -EOPNOTSUPP;
3092
3093	if (!smu->ppt_funcs->set_fan_speed_pwm)
3094		return -EOPNOTSUPP;
3095
3096	if (speed == U32_MAX)
3097		return -EINVAL;
3098
3099	ret = smu->ppt_funcs->set_fan_speed_pwm(smu, speed);
3100	if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
3101		smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_PWM;
3102		smu->user_dpm_profile.fan_speed_pwm = speed;
3103
3104		/* Override custom RPM setting as they cannot co-exist */
3105		smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_RPM;
3106		smu->user_dpm_profile.fan_speed_rpm = 0;
3107	}
3108
3109	return ret;
3110}
3111
3112static int smu_get_fan_speed_rpm(void *handle, uint32_t *speed)
3113{
3114	struct smu_context *smu = handle;
3115	int ret = 0;
3116
3117	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3118		return -EOPNOTSUPP;
3119
3120	if (!smu->ppt_funcs->get_fan_speed_rpm)
3121		return -EOPNOTSUPP;
3122
3123	if (!speed)
3124		return -EINVAL;
3125
3126	ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
3127
3128	return ret;
3129}
3130
3131static int smu_set_deep_sleep_dcefclk(void *handle, uint32_t clk)
3132{
3133	struct smu_context *smu = handle;
3134
3135	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3136		return -EOPNOTSUPP;
3137
3138	return smu_set_min_dcef_deep_sleep(smu, clk);
3139}
3140
3141static int smu_get_clock_by_type_with_latency(void *handle,
3142					      enum amd_pp_clock_type type,
3143					      struct pp_clock_levels_with_latency *clocks)
3144{
3145	struct smu_context *smu = handle;
3146	enum smu_clk_type clk_type;
3147	int ret = 0;
3148
3149	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3150		return -EOPNOTSUPP;
3151
3152	if (smu->ppt_funcs->get_clock_by_type_with_latency) {
3153		switch (type) {
3154		case amd_pp_sys_clock:
3155			clk_type = SMU_GFXCLK;
3156			break;
3157		case amd_pp_mem_clock:
3158			clk_type = SMU_MCLK;
3159			break;
3160		case amd_pp_dcef_clock:
3161			clk_type = SMU_DCEFCLK;
3162			break;
3163		case amd_pp_disp_clock:
3164			clk_type = SMU_DISPCLK;
3165			break;
3166		default:
3167			dev_err(smu->adev->dev, "Invalid clock type!\n");
3168			return -EINVAL;
3169		}
3170
3171		ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks);
3172	}
3173
3174	return ret;
3175}
3176
3177static int smu_display_clock_voltage_request(void *handle,
3178					     struct pp_display_clock_request *clock_req)
3179{
3180	struct smu_context *smu = handle;
3181	int ret = 0;
3182
3183	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3184		return -EOPNOTSUPP;
3185
3186	if (smu->ppt_funcs->display_clock_voltage_request)
3187		ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req);
3188
3189	return ret;
3190}
3191
3192
3193static int smu_display_disable_memory_clock_switch(void *handle,
3194						   bool disable_memory_clock_switch)
3195{
3196	struct smu_context *smu = handle;
3197	int ret = -EINVAL;
3198
3199	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3200		return -EOPNOTSUPP;
3201
3202	if (smu->ppt_funcs->display_disable_memory_clock_switch)
3203		ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch);
3204
3205	return ret;
3206}
3207
3208static int smu_set_xgmi_pstate(void *handle,
3209			       uint32_t pstate)
3210{
3211	struct smu_context *smu = handle;
3212	int ret = 0;
3213
3214	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3215		return -EOPNOTSUPP;
3216
3217	if (smu->ppt_funcs->set_xgmi_pstate)
3218		ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
3219
3220	if (ret)
3221		dev_err(smu->adev->dev, "Failed to set XGMI pstate!\n");
3222
3223	return ret;
3224}
3225
3226static bool smu_get_baco_capability(void *handle)
3227{
3228	struct smu_context *smu = handle;
3229
3230	if (!smu->pm_enabled)
3231		return false;
3232
3233	if (!smu->ppt_funcs || !smu->ppt_funcs->baco_is_support)
3234		return false;
3235
3236	return smu->ppt_funcs->baco_is_support(smu);
3237}
3238
3239static int smu_baco_set_state(void *handle, int state)
3240{
3241	struct smu_context *smu = handle;
3242	int ret = 0;
3243
3244	if (!smu->pm_enabled)
3245		return -EOPNOTSUPP;
3246
3247	if (state == 0) {
3248		if (smu->ppt_funcs->baco_exit)
3249			ret = smu->ppt_funcs->baco_exit(smu);
3250	} else if (state == 1) {
3251		if (smu->ppt_funcs->baco_enter)
3252			ret = smu->ppt_funcs->baco_enter(smu);
3253	} else {
3254		return -EINVAL;
3255	}
3256
3257	if (ret)
3258		dev_err(smu->adev->dev, "Failed to %s BACO state!\n",
3259				(state)?"enter":"exit");
3260
3261	return ret;
3262}
3263
3264bool smu_mode1_reset_is_support(struct smu_context *smu)
3265{
3266	bool ret = false;
3267
3268	if (!smu->pm_enabled)
3269		return false;
3270
3271	if (smu->ppt_funcs && smu->ppt_funcs->mode1_reset_is_support)
3272		ret = smu->ppt_funcs->mode1_reset_is_support(smu);
3273
3274	return ret;
3275}
3276
3277bool smu_mode2_reset_is_support(struct smu_context *smu)
3278{
3279	bool ret = false;
3280
3281	if (!smu->pm_enabled)
3282		return false;
3283
3284	if (smu->ppt_funcs && smu->ppt_funcs->mode2_reset_is_support)
3285		ret = smu->ppt_funcs->mode2_reset_is_support(smu);
3286
3287	return ret;
3288}
3289
3290int smu_mode1_reset(struct smu_context *smu)
3291{
3292	int ret = 0;
3293
3294	if (!smu->pm_enabled)
3295		return -EOPNOTSUPP;
3296
3297	if (smu->ppt_funcs->mode1_reset)
3298		ret = smu->ppt_funcs->mode1_reset(smu);
3299
3300	return ret;
3301}
3302
3303static int smu_mode2_reset(void *handle)
3304{
3305	struct smu_context *smu = handle;
3306	int ret = 0;
3307
3308	if (!smu->pm_enabled)
3309		return -EOPNOTSUPP;
3310
3311	if (smu->ppt_funcs->mode2_reset)
3312		ret = smu->ppt_funcs->mode2_reset(smu);
3313
3314	if (ret)
3315		dev_err(smu->adev->dev, "Mode2 reset failed!\n");
3316
3317	return ret;
3318}
3319
3320static int smu_enable_gfx_features(void *handle)
3321{
3322	struct smu_context *smu = handle;
3323	int ret = 0;
3324
3325	if (!smu->pm_enabled)
3326		return -EOPNOTSUPP;
3327
3328	if (smu->ppt_funcs->enable_gfx_features)
3329		ret = smu->ppt_funcs->enable_gfx_features(smu);
3330
3331	if (ret)
3332		dev_err(smu->adev->dev, "enable gfx features failed!\n");
3333
3334	return ret;
3335}
3336
3337static int smu_get_max_sustainable_clocks_by_dc(void *handle,
3338						struct pp_smu_nv_clock_table *max_clocks)
3339{
3340	struct smu_context *smu = handle;
3341	int ret = 0;
3342
3343	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3344		return -EOPNOTSUPP;
3345
3346	if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
3347		ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks);
3348
3349	return ret;
3350}
3351
3352static int smu_get_uclk_dpm_states(void *handle,
3353				   unsigned int *clock_values_in_khz,
3354				   unsigned int *num_states)
3355{
3356	struct smu_context *smu = handle;
3357	int ret = 0;
3358
3359	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3360		return -EOPNOTSUPP;
3361
3362	if (smu->ppt_funcs->get_uclk_dpm_states)
3363		ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states);
3364
3365	return ret;
3366}
3367
3368static enum amd_pm_state_type smu_get_current_power_state(void *handle)
3369{
3370	struct smu_context *smu = handle;
3371	enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
3372
3373	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3374		return -EOPNOTSUPP;
3375
3376	if (smu->ppt_funcs->get_current_power_state)
3377		pm_state = smu->ppt_funcs->get_current_power_state(smu);
3378
3379	return pm_state;
3380}
3381
3382static int smu_get_dpm_clock_table(void *handle,
3383				   struct dpm_clocks *clock_table)
3384{
3385	struct smu_context *smu = handle;
3386	int ret = 0;
3387
3388	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3389		return -EOPNOTSUPP;
3390
3391	if (smu->ppt_funcs->get_dpm_clock_table)
3392		ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table);
3393
3394	return ret;
3395}
3396
3397static ssize_t smu_sys_get_gpu_metrics(void *handle, void **table)
3398{
3399	struct smu_context *smu = handle;
3400
3401	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3402		return -EOPNOTSUPP;
3403
3404	if (!smu->ppt_funcs->get_gpu_metrics)
3405		return -EOPNOTSUPP;
3406
3407	return smu->ppt_funcs->get_gpu_metrics(smu, table);
3408}
3409
3410static ssize_t smu_sys_get_pm_metrics(void *handle, void *pm_metrics,
3411				      size_t size)
3412{
3413	struct smu_context *smu = handle;
3414
3415	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3416		return -EOPNOTSUPP;
3417
3418	if (!smu->ppt_funcs->get_pm_metrics)
3419		return -EOPNOTSUPP;
3420
3421	return smu->ppt_funcs->get_pm_metrics(smu, pm_metrics, size);
3422}
3423
3424static int smu_enable_mgpu_fan_boost(void *handle)
3425{
3426	struct smu_context *smu = handle;
3427	int ret = 0;
3428
3429	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3430		return -EOPNOTSUPP;
3431
3432	if (smu->ppt_funcs->enable_mgpu_fan_boost)
3433		ret = smu->ppt_funcs->enable_mgpu_fan_boost(smu);
3434
3435	return ret;
3436}
3437
3438static int smu_gfx_state_change_set(void *handle,
3439				    uint32_t state)
3440{
3441	struct smu_context *smu = handle;
3442	int ret = 0;
3443
3444	if (smu->ppt_funcs->gfx_state_change_set)
3445		ret = smu->ppt_funcs->gfx_state_change_set(smu, state);
3446
3447	return ret;
3448}
3449
3450int smu_handle_passthrough_sbr(struct smu_context *smu, bool enable)
3451{
3452	int ret = 0;
3453
3454	if (smu->ppt_funcs->smu_handle_passthrough_sbr)
3455		ret = smu->ppt_funcs->smu_handle_passthrough_sbr(smu, enable);
3456
3457	return ret;
3458}
3459
3460int smu_get_ecc_info(struct smu_context *smu, void *umc_ecc)
3461{
3462	int ret = -EOPNOTSUPP;
3463
3464	if (smu->ppt_funcs &&
3465		smu->ppt_funcs->get_ecc_info)
3466		ret = smu->ppt_funcs->get_ecc_info(smu, umc_ecc);
3467
3468	return ret;
3469
3470}
3471
3472static int smu_get_prv_buffer_details(void *handle, void **addr, size_t *size)
3473{
3474	struct smu_context *smu = handle;
3475	struct smu_table_context *smu_table = &smu->smu_table;
3476	struct smu_table *memory_pool = &smu_table->memory_pool;
3477
3478	if (!addr || !size)
3479		return -EINVAL;
3480
3481	*addr = NULL;
3482	*size = 0;
3483	if (memory_pool->bo) {
3484		*addr = memory_pool->cpu_addr;
3485		*size = memory_pool->size;
3486	}
3487
3488	return 0;
3489}
3490
3491int smu_set_xgmi_plpd_mode(struct smu_context *smu,
3492			   enum pp_xgmi_plpd_mode mode)
3493{
3494	int ret = -EOPNOTSUPP;
3495
3496	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3497		return ret;
3498
3499	/* PLPD policy is not supported if it's NONE */
3500	if (smu->plpd_mode == XGMI_PLPD_NONE)
3501		return ret;
3502
3503	if (smu->plpd_mode == mode)
3504		return 0;
3505
3506	if (smu->ppt_funcs && smu->ppt_funcs->select_xgmi_plpd_policy)
3507		ret = smu->ppt_funcs->select_xgmi_plpd_policy(smu, mode);
3508
3509	if (!ret)
3510		smu->plpd_mode = mode;
3511
3512	return ret;
3513}
3514
3515static const struct amd_pm_funcs swsmu_pm_funcs = {
3516	/* export for sysfs */
3517	.set_fan_control_mode    = smu_set_fan_control_mode,
3518	.get_fan_control_mode    = smu_get_fan_control_mode,
3519	.set_fan_speed_pwm   = smu_set_fan_speed_pwm,
3520	.get_fan_speed_pwm   = smu_get_fan_speed_pwm,
3521	.force_clock_level       = smu_force_ppclk_levels,
3522	.print_clock_levels      = smu_print_ppclk_levels,
3523	.emit_clock_levels       = smu_emit_ppclk_levels,
3524	.force_performance_level = smu_force_performance_level,
3525	.read_sensor             = smu_read_sensor,
3526	.get_apu_thermal_limit       = smu_get_apu_thermal_limit,
3527	.set_apu_thermal_limit       = smu_set_apu_thermal_limit,
3528	.get_performance_level   = smu_get_performance_level,
3529	.get_current_power_state = smu_get_current_power_state,
3530	.get_fan_speed_rpm       = smu_get_fan_speed_rpm,
3531	.set_fan_speed_rpm       = smu_set_fan_speed_rpm,
3532	.get_pp_num_states       = smu_get_power_num_states,
3533	.get_pp_table            = smu_sys_get_pp_table,
3534	.set_pp_table            = smu_sys_set_pp_table,
3535	.switch_power_profile    = smu_switch_power_profile,
3536	/* export to amdgpu */
3537	.dispatch_tasks          = smu_handle_dpm_task,
3538	.load_firmware           = smu_load_microcode,
3539	.set_powergating_by_smu  = smu_dpm_set_power_gate,
3540	.set_power_limit         = smu_set_power_limit,
3541	.get_power_limit         = smu_get_power_limit,
3542	.get_power_profile_mode  = smu_get_power_profile_mode,
3543	.set_power_profile_mode  = smu_set_power_profile_mode,
3544	.odn_edit_dpm_table      = smu_od_edit_dpm_table,
3545	.set_mp1_state           = smu_set_mp1_state,
3546	.gfx_state_change_set    = smu_gfx_state_change_set,
3547	/* export to DC */
3548	.get_sclk                         = smu_get_sclk,
3549	.get_mclk                         = smu_get_mclk,
3550	.display_configuration_change     = smu_display_configuration_change,
3551	.get_clock_by_type_with_latency   = smu_get_clock_by_type_with_latency,
3552	.display_clock_voltage_request    = smu_display_clock_voltage_request,
3553	.enable_mgpu_fan_boost            = smu_enable_mgpu_fan_boost,
3554	.set_active_display_count         = smu_set_display_count,
3555	.set_min_deep_sleep_dcefclk       = smu_set_deep_sleep_dcefclk,
3556	.get_asic_baco_capability         = smu_get_baco_capability,
3557	.set_asic_baco_state              = smu_baco_set_state,
3558	.get_ppfeature_status             = smu_sys_get_pp_feature_mask,
3559	.set_ppfeature_status             = smu_sys_set_pp_feature_mask,
3560	.asic_reset_mode_2                = smu_mode2_reset,
3561	.asic_reset_enable_gfx_features   = smu_enable_gfx_features,
3562	.set_df_cstate                    = smu_set_df_cstate,
3563	.set_xgmi_pstate                  = smu_set_xgmi_pstate,
3564	.get_gpu_metrics                  = smu_sys_get_gpu_metrics,
3565	.get_pm_metrics                   = smu_sys_get_pm_metrics,
3566	.set_watermarks_for_clock_ranges     = smu_set_watermarks_for_clock_ranges,
3567	.display_disable_memory_clock_switch = smu_display_disable_memory_clock_switch,
3568	.get_max_sustainable_clocks_by_dc    = smu_get_max_sustainable_clocks_by_dc,
3569	.get_uclk_dpm_states              = smu_get_uclk_dpm_states,
3570	.get_dpm_clock_table              = smu_get_dpm_clock_table,
3571	.get_smu_prv_buf_details = smu_get_prv_buffer_details,
3572};
3573
3574int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event,
3575		       uint64_t event_arg)
3576{
3577	int ret = -EINVAL;
3578
3579	if (smu->ppt_funcs->wait_for_event)
3580		ret = smu->ppt_funcs->wait_for_event(smu, event, event_arg);
3581
3582	return ret;
3583}
3584
3585int smu_stb_collect_info(struct smu_context *smu, void *buf, uint32_t size)
3586{
3587
3588	if (!smu->ppt_funcs->stb_collect_info || !smu->stb_context.enabled)
3589		return -EOPNOTSUPP;
3590
3591	/* Confirm the buffer allocated is of correct size */
3592	if (size != smu->stb_context.stb_buf_size)
3593		return -EINVAL;
3594
3595	/*
3596	 * No need to lock smu mutex as we access STB directly through MMIO
3597	 * and not going through SMU messaging route (for now at least).
3598	 * For registers access rely on implementation internal locking.
3599	 */
3600	return smu->ppt_funcs->stb_collect_info(smu, buf, size);
3601}
3602
3603#if defined(CONFIG_DEBUG_FS)
3604
3605static int smu_stb_debugfs_open(struct inode *inode, struct file *filp)
3606{
3607	struct amdgpu_device *adev = filp->f_inode->i_private;
3608	struct smu_context *smu = adev->powerplay.pp_handle;
3609	unsigned char *buf;
3610	int r;
3611
3612	buf = kvmalloc_array(smu->stb_context.stb_buf_size, sizeof(*buf), GFP_KERNEL);
3613	if (!buf)
3614		return -ENOMEM;
3615
3616	r = smu_stb_collect_info(smu, buf, smu->stb_context.stb_buf_size);
3617	if (r)
3618		goto out;
3619
3620	filp->private_data = buf;
3621
3622	return 0;
3623
3624out:
3625	kvfree(buf);
3626	return r;
3627}
3628
3629static ssize_t smu_stb_debugfs_read(struct file *filp, char __user *buf, size_t size,
3630				loff_t *pos)
3631{
3632	struct amdgpu_device *adev = filp->f_inode->i_private;
3633	struct smu_context *smu = adev->powerplay.pp_handle;
3634
3635
3636	if (!filp->private_data)
3637		return -EINVAL;
3638
3639	return simple_read_from_buffer(buf,
3640				       size,
3641				       pos, filp->private_data,
3642				       smu->stb_context.stb_buf_size);
3643}
3644
3645static int smu_stb_debugfs_release(struct inode *inode, struct file *filp)
3646{
3647	kvfree(filp->private_data);
3648	filp->private_data = NULL;
3649
3650	return 0;
3651}
3652
3653/*
3654 * We have to define not only read method but also
3655 * open and release because .read takes up to PAGE_SIZE
3656 * data each time so and so is invoked multiple times.
3657 *  We allocate the STB buffer in .open and release it
3658 *  in .release
3659 */
3660static const struct file_operations smu_stb_debugfs_fops = {
3661	.owner = THIS_MODULE,
3662	.open = smu_stb_debugfs_open,
3663	.read = smu_stb_debugfs_read,
3664	.release = smu_stb_debugfs_release,
3665	.llseek = default_llseek,
3666};
3667
3668#endif
3669
3670void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev)
3671{
3672#if defined(CONFIG_DEBUG_FS)
3673
3674	struct smu_context *smu = adev->powerplay.pp_handle;
3675
3676	if (!smu || (!smu->stb_context.stb_buf_size))
3677		return;
3678
3679	debugfs_create_file_size("amdgpu_smu_stb_dump",
3680			    S_IRUSR,
3681			    adev_to_drm(adev)->primary->debugfs_root,
3682			    adev,
3683			    &smu_stb_debugfs_fops,
3684			    smu->stb_context.stb_buf_size);
3685#endif
3686}
3687
3688int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size)
3689{
3690	int ret = 0;
3691
3692	if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_pages_num)
3693		ret = smu->ppt_funcs->send_hbm_bad_pages_num(smu, size);
3694
3695	return ret;
3696}
3697
3698int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size)
3699{
3700	int ret = 0;
3701
3702	if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_channel_flag)
3703		ret = smu->ppt_funcs->send_hbm_bad_channel_flag(smu, size);
3704
3705	return ret;
3706}
3707
3708int smu_send_rma_reason(struct smu_context *smu)
3709{
3710	int ret = 0;
3711
3712	if (smu->ppt_funcs && smu->ppt_funcs->send_rma_reason)
3713		ret = smu->ppt_funcs->send_rma_reason(smu);
3714
3715	return ret;
3716}