Linux Audio

Check our new training course

Loading...
Note: File does not exist in v5.4.
   1/*
   2 * Copyright 2020 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23
  24#define SWSMU_CODE_LAYER_L2
  25
  26#include "amdgpu.h"
  27#include "amdgpu_smu.h"
  28#include "smu_v13_0.h"
  29#include "smu13_driver_if_yellow_carp.h"
  30#include "yellow_carp_ppt.h"
  31#include "smu_v13_0_1_ppsmc.h"
  32#include "smu_v13_0_1_pmfw.h"
  33#include "smu_cmn.h"
  34
  35/*
  36 * DO NOT use these for err/warn/info/debug messages.
  37 * Use dev_err, dev_warn, dev_info and dev_dbg instead.
  38 * They are more MGPU friendly.
  39 */
  40#undef pr_err
  41#undef pr_warn
  42#undef pr_info
  43#undef pr_debug
  44
  45#define regSMUIO_GFX_MISC_CNTL  						0x00c5
  46#define regSMUIO_GFX_MISC_CNTL_BASE_IDX					0
  47#define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK		0x00000006L
  48#define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT          0x1L
  49
  50#define SMU_13_0_8_UMD_PSTATE_GFXCLK                   533
  51#define SMU_13_0_8_UMD_PSTATE_SOCCLK                   533
  52#define SMU_13_0_8_UMD_PSTATE_FCLK                     800
  53
  54#define SMU_13_0_1_UMD_PSTATE_GFXCLK					700
  55#define SMU_13_0_1_UMD_PSTATE_SOCCLK		              678
  56#define SMU_13_0_1_UMD_PSTATE_FCLK			          1800
  57
  58#define FEATURE_MASK(feature) (1ULL << feature)
  59#define SMC_DPM_FEATURE ( \
  60	FEATURE_MASK(FEATURE_CCLK_DPM_BIT) | \
  61	FEATURE_MASK(FEATURE_VCN_DPM_BIT)	 | \
  62	FEATURE_MASK(FEATURE_FCLK_DPM_BIT)	 | \
  63	FEATURE_MASK(FEATURE_SOCCLK_DPM_BIT)	 | \
  64	FEATURE_MASK(FEATURE_MP0CLK_DPM_BIT)	 | \
  65	FEATURE_MASK(FEATURE_LCLK_DPM_BIT)	 | \
  66	FEATURE_MASK(FEATURE_SHUBCLK_DPM_BIT)	 | \
  67	FEATURE_MASK(FEATURE_DCFCLK_DPM_BIT)| \
  68	FEATURE_MASK(FEATURE_GFX_DPM_BIT))
  69
  70static struct cmn2asic_msg_mapping yellow_carp_message_map[SMU_MSG_MAX_COUNT] = {
  71	MSG_MAP(TestMessage,                    PPSMC_MSG_TestMessage,			1),
  72	MSG_MAP(GetSmuVersion,                  PPSMC_MSG_GetSmuVersion,		1),
  73	MSG_MAP(GetDriverIfVersion,             PPSMC_MSG_GetDriverIfVersion,		1),
  74	MSG_MAP(EnableGfxOff,                   PPSMC_MSG_EnableGfxOff,			1),
  75	MSG_MAP(AllowGfxOff,                    PPSMC_MSG_AllowGfxOff,			1),
  76	MSG_MAP(DisallowGfxOff,                 PPSMC_MSG_DisallowGfxOff,		1),
  77	MSG_MAP(PowerDownVcn,                   PPSMC_MSG_PowerDownVcn,			1),
  78	MSG_MAP(PowerUpVcn,                     PPSMC_MSG_PowerUpVcn,			1),
  79	MSG_MAP(SetHardMinVcn,                  PPSMC_MSG_SetHardMinVcn,		1),
  80	MSG_MAP(PrepareMp1ForUnload,            PPSMC_MSG_PrepareMp1ForUnload,      1),
  81	MSG_MAP(SetDriverDramAddrHigh,          PPSMC_MSG_SetDriverDramAddrHigh,	1),
  82	MSG_MAP(SetDriverDramAddrLow,           PPSMC_MSG_SetDriverDramAddrLow,		1),
  83	MSG_MAP(TransferTableSmu2Dram,          PPSMC_MSG_TransferTableSmu2Dram,	1),
  84	MSG_MAP(TransferTableDram2Smu,          PPSMC_MSG_TransferTableDram2Smu,	1),
  85	MSG_MAP(GfxDeviceDriverReset,           PPSMC_MSG_GfxDeviceDriverReset,		1),
  86	MSG_MAP(GetEnabledSmuFeatures,          PPSMC_MSG_GetEnabledSmuFeatures,	1),
  87	MSG_MAP(SetHardMinSocclkByFreq,         PPSMC_MSG_SetHardMinSocclkByFreq,	1),
  88	MSG_MAP(SetSoftMinVcn,                  PPSMC_MSG_SetSoftMinVcn,		1),
  89	MSG_MAP(GetGfxclkFrequency,             PPSMC_MSG_GetGfxclkFrequency,		1),
  90	MSG_MAP(GetFclkFrequency,               PPSMC_MSG_GetFclkFrequency,		1),
  91	MSG_MAP(SetSoftMaxGfxClk,               PPSMC_MSG_SetSoftMaxGfxClk,		1),
  92	MSG_MAP(SetHardMinGfxClk,               PPSMC_MSG_SetHardMinGfxClk,		1),
  93	MSG_MAP(SetSoftMaxSocclkByFreq,         PPSMC_MSG_SetSoftMaxSocclkByFreq,	1),
  94	MSG_MAP(SetSoftMaxFclkByFreq,           PPSMC_MSG_SetSoftMaxFclkByFreq,		1),
  95	MSG_MAP(SetSoftMaxVcn,                  PPSMC_MSG_SetSoftMaxVcn,		1),
  96	MSG_MAP(SetPowerLimitPercentage,        PPSMC_MSG_SetPowerLimitPercentage,	1),
  97	MSG_MAP(PowerDownJpeg,                  PPSMC_MSG_PowerDownJpeg,		1),
  98	MSG_MAP(PowerUpJpeg,                    PPSMC_MSG_PowerUpJpeg,			1),
  99	MSG_MAP(SetHardMinFclkByFreq,           PPSMC_MSG_SetHardMinFclkByFreq,		1),
 100	MSG_MAP(SetSoftMinSocclkByFreq,         PPSMC_MSG_SetSoftMinSocclkByFreq,	1),
 101};
 102
 103static struct cmn2asic_mapping yellow_carp_feature_mask_map[SMU_FEATURE_COUNT] = {
 104	FEA_MAP(CCLK_DPM),
 105	FEA_MAP(FAN_CONTROLLER),
 106	FEA_MAP(PPT),
 107	FEA_MAP(TDC),
 108	FEA_MAP(THERMAL),
 109	FEA_MAP(ULV),
 110	FEA_MAP(VCN_DPM),
 111	FEA_MAP_REVERSE(FCLK),
 112	FEA_MAP_REVERSE(SOCCLK),
 113	FEA_MAP(LCLK_DPM),
 114	FEA_MAP(SHUBCLK_DPM),
 115	FEA_MAP(DCFCLK_DPM),
 116	FEA_MAP_HALF_REVERSE(GFX),
 117	FEA_MAP(DS_GFXCLK),
 118	FEA_MAP(DS_SOCCLK),
 119	FEA_MAP(DS_LCLK),
 120	FEA_MAP(DS_DCFCLK),
 121	FEA_MAP(DS_FCLK),
 122	FEA_MAP(DS_MP1CLK),
 123	FEA_MAP(DS_MP0CLK),
 124	FEA_MAP(GFX_DEM),
 125	FEA_MAP(PSI),
 126	FEA_MAP(PROCHOT),
 127	FEA_MAP(CPUOFF),
 128	FEA_MAP(STAPM),
 129	FEA_MAP(S0I3),
 130	FEA_MAP(PERF_LIMIT),
 131	FEA_MAP(CORE_DLDO),
 132	FEA_MAP(RSMU_LOW_POWER),
 133	FEA_MAP(SMN_LOW_POWER),
 134	FEA_MAP(THM_LOW_POWER),
 135	FEA_MAP(SMUIO_LOW_POWER),
 136	FEA_MAP(MP1_LOW_POWER),
 137	FEA_MAP(DS_VCN),
 138	FEA_MAP(CPPC),
 139	FEA_MAP(DF_CSTATES),
 140	FEA_MAP(MSMU_LOW_POWER),
 141	FEA_MAP(ATHUB_PG),
 142};
 143
 144static struct cmn2asic_mapping yellow_carp_table_map[SMU_TABLE_COUNT] = {
 145	TAB_MAP_VALID(WATERMARKS),
 146	TAB_MAP_VALID(SMU_METRICS),
 147	TAB_MAP_VALID(CUSTOM_DPM),
 148	TAB_MAP_VALID(DPMCLOCKS),
 149};
 150	
 151static int yellow_carp_init_smc_tables(struct smu_context *smu)
 152{
 153	struct smu_table_context *smu_table = &smu->smu_table;
 154	struct smu_table *tables = smu_table->tables;
 155
 156	SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t),
 157		PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
 158	SMU_TABLE_INIT(tables, SMU_TABLE_DPMCLOCKS, sizeof(DpmClocks_t),
 159		PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
 160	SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
 161		PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
 162
 163	smu_table->clocks_table = kzalloc(sizeof(DpmClocks_t), GFP_KERNEL);
 164	if (!smu_table->clocks_table)
 165		goto err0_out;
 166
 167	smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
 168	if (!smu_table->metrics_table)
 169		goto err1_out;
 170	smu_table->metrics_time = 0;
 171
 172	smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL);
 173	if (!smu_table->watermarks_table)
 174		goto err2_out;
 175
 176	smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_1);
 177	smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
 178	if (!smu_table->gpu_metrics_table)
 179		goto err3_out;
 180
 181	return 0;
 182
 183err3_out:
 184	kfree(smu_table->watermarks_table);
 185err2_out:
 186	kfree(smu_table->metrics_table);
 187err1_out:
 188	kfree(smu_table->clocks_table);
 189err0_out:
 190	return -ENOMEM;
 191}
 192
 193static int yellow_carp_fini_smc_tables(struct smu_context *smu)
 194{
 195	struct smu_table_context *smu_table = &smu->smu_table;
 196
 197	kfree(smu_table->clocks_table);
 198	smu_table->clocks_table = NULL;
 199
 200	kfree(smu_table->metrics_table);
 201	smu_table->metrics_table = NULL;
 202
 203	kfree(smu_table->watermarks_table);
 204	smu_table->watermarks_table = NULL;
 205
 206	kfree(smu_table->gpu_metrics_table);
 207	smu_table->gpu_metrics_table = NULL;
 208
 209	return 0;
 210}
 211
 212static int yellow_carp_system_features_control(struct smu_context *smu, bool en)
 213{
 214	struct amdgpu_device *adev = smu->adev;
 215	int ret = 0;
 216
 217	if (!en && !adev->in_s0ix)
 218		ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL);
 219
 220	return ret;
 221}
 222
 223static int yellow_carp_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
 224{
 225	int ret = 0;
 226
 227	/* vcn dpm on is a prerequisite for vcn power gate messages */
 228	if (enable)
 229		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn,
 230						      0, NULL);
 231	else
 232		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn,
 233						      0, NULL);
 234
 235	return ret;
 236}
 237
 238static int yellow_carp_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
 239{
 240	int ret = 0;
 241
 242	if (enable)
 243		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg,
 244						      0, NULL);
 245	else
 246		ret = smu_cmn_send_smc_msg_with_param(smu,
 247						      SMU_MSG_PowerDownJpeg, 0,
 248						      NULL);
 249
 250	return ret;
 251}
 252
 253
 254static bool yellow_carp_is_dpm_running(struct smu_context *smu)
 255{
 256	int ret = 0;
 257	uint64_t feature_enabled;
 258
 259	ret = smu_cmn_get_enabled_mask(smu, &feature_enabled);
 260
 261	if (ret)
 262		return false;
 263
 264	return !!(feature_enabled & SMC_DPM_FEATURE);
 265}
 266
 267static int yellow_carp_post_smu_init(struct smu_context *smu)
 268{
 269	struct amdgpu_device *adev = smu->adev;
 270	int ret = 0;
 271
 272	/* allow message will be sent after enable message on Yellow Carp*/
 273	ret = smu_cmn_send_smc_msg(smu, SMU_MSG_EnableGfxOff, NULL);
 274	if (ret)
 275		dev_err(adev->dev, "Failed to Enable GfxOff!\n");
 276	return ret;
 277}
 278
 279static int yellow_carp_mode_reset(struct smu_context *smu, int type)
 280{
 281	int ret = 0;
 282
 283	ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, type, NULL);
 284	if (ret)
 285		dev_err(smu->adev->dev, "Failed to mode reset!\n");
 286
 287	return ret;
 288}
 289
 290static int yellow_carp_mode2_reset(struct smu_context *smu)
 291{
 292	return yellow_carp_mode_reset(smu, SMU_RESET_MODE_2);
 293}
 294
 295
 296static void yellow_carp_get_ss_power_percent(SmuMetrics_t *metrics,
 297					uint32_t *apu_percent, uint32_t *dgpu_percent)
 298{
 299	uint32_t apu_boost = 0;
 300	uint32_t dgpu_boost = 0;
 301	uint16_t apu_limit = 0;
 302	uint16_t dgpu_limit = 0;
 303	uint16_t apu_power = 0;
 304	uint16_t dgpu_power = 0;
 305
 306	/* APU and dGPU power values are reported in milli Watts
 307	 * and STAPM power limits are in Watts */
 308	apu_power = metrics->ApuPower/1000;
 309	apu_limit = metrics->StapmOpnLimit;
 310	if (apu_power > apu_limit && apu_limit != 0)
 311		apu_boost =  ((apu_power - apu_limit) * 100) / apu_limit;
 312	apu_boost = (apu_boost > 100) ? 100 : apu_boost;
 313
 314	dgpu_power = metrics->dGpuPower/1000;
 315	if (metrics->StapmCurrentLimit > metrics->StapmOpnLimit)
 316		dgpu_limit = metrics->StapmCurrentLimit - metrics->StapmOpnLimit;
 317	if (dgpu_power > dgpu_limit && dgpu_limit != 0)
 318		dgpu_boost = ((dgpu_power - dgpu_limit) * 100) / dgpu_limit;
 319	dgpu_boost = (dgpu_boost > 100) ? 100 : dgpu_boost;
 320
 321	if (dgpu_boost >= apu_boost)
 322		apu_boost = 0;
 323	else
 324		dgpu_boost = 0;
 325
 326	*apu_percent = apu_boost;
 327	*dgpu_percent = dgpu_boost;
 328
 329}
 330
 331static int yellow_carp_get_smu_metrics_data(struct smu_context *smu,
 332							MetricsMember_t member,
 333							uint32_t *value)
 334{
 335	struct smu_table_context *smu_table = &smu->smu_table;
 336
 337	SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
 338	int ret = 0;
 339	uint32_t apu_percent = 0;
 340	uint32_t dgpu_percent = 0;
 341
 342	ret = smu_cmn_get_metrics_table(smu, NULL, false);
 343	if (ret)
 344		return ret;
 345
 346	switch (member) {
 347	case METRICS_AVERAGE_GFXCLK:
 348		*value = metrics->GfxclkFrequency;
 349		break;
 350	case METRICS_AVERAGE_SOCCLK:
 351		*value = metrics->SocclkFrequency;
 352		break;
 353	case METRICS_AVERAGE_VCLK:
 354		*value = metrics->VclkFrequency;
 355		break;
 356	case METRICS_AVERAGE_DCLK:
 357		*value = metrics->DclkFrequency;
 358		break;
 359	case METRICS_AVERAGE_UCLK:
 360		*value = metrics->MemclkFrequency;
 361		break;
 362	case METRICS_AVERAGE_GFXACTIVITY:
 363		*value = metrics->GfxActivity / 100;
 364		break;
 365	case METRICS_AVERAGE_VCNACTIVITY:
 366		*value = metrics->UvdActivity;
 367		break;
 368	case METRICS_CURR_SOCKETPOWER:
 369		*value = (metrics->CurrentSocketPower << 8) / 1000;
 370		break;
 371	case METRICS_TEMPERATURE_EDGE:
 372		*value = metrics->GfxTemperature / 100 *
 373		SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
 374		break;
 375	case METRICS_TEMPERATURE_HOTSPOT:
 376		*value = metrics->SocTemperature / 100 *
 377		SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
 378		break;
 379	case METRICS_THROTTLER_STATUS:
 380		*value = metrics->ThrottlerStatus;
 381		break;
 382	case METRICS_VOLTAGE_VDDGFX:
 383		*value = metrics->Voltage[0];
 384		break;
 385	case METRICS_VOLTAGE_VDDSOC:
 386		*value = metrics->Voltage[1];
 387		break;
 388	case METRICS_SS_APU_SHARE:
 389		/* return the percentage of APU power boost
 390		 * with respect to APU's power limit.
 391		 */
 392		yellow_carp_get_ss_power_percent(metrics, &apu_percent, &dgpu_percent);
 393		*value = apu_percent;
 394		break;
 395	case METRICS_SS_DGPU_SHARE:
 396		/* return the percentage of dGPU power boost
 397		 * with respect to dGPU's power limit.
 398		 */
 399		yellow_carp_get_ss_power_percent(metrics, &apu_percent, &dgpu_percent);
 400		*value = dgpu_percent;
 401		break;
 402	default:
 403		*value = UINT_MAX;
 404		break;
 405	}
 406
 407	return ret;
 408}
 409
 410static int yellow_carp_read_sensor(struct smu_context *smu,
 411					enum amd_pp_sensors sensor,
 412					void *data, uint32_t *size)
 413{
 414	int ret = 0;
 415
 416	if (!data || !size)
 417		return -EINVAL;
 418
 419	switch (sensor) {
 420	case AMDGPU_PP_SENSOR_GPU_LOAD:
 421		ret = yellow_carp_get_smu_metrics_data(smu,
 422								METRICS_AVERAGE_GFXACTIVITY,
 423								(uint32_t *)data);
 424		*size = 4;
 425		break;
 426	case AMDGPU_PP_SENSOR_GPU_INPUT_POWER:
 427		ret = yellow_carp_get_smu_metrics_data(smu,
 428								METRICS_CURR_SOCKETPOWER,
 429								(uint32_t *)data);
 430		*size = 4;
 431		break;
 432	case AMDGPU_PP_SENSOR_EDGE_TEMP:
 433		ret = yellow_carp_get_smu_metrics_data(smu,
 434								METRICS_TEMPERATURE_EDGE,
 435								(uint32_t *)data);
 436		*size = 4;
 437		break;
 438	case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
 439		ret = yellow_carp_get_smu_metrics_data(smu,
 440								METRICS_TEMPERATURE_HOTSPOT,
 441								(uint32_t *)data);
 442		*size = 4;
 443		break;
 444	case AMDGPU_PP_SENSOR_GFX_MCLK:
 445		ret = yellow_carp_get_smu_metrics_data(smu,
 446								METRICS_AVERAGE_UCLK,
 447								(uint32_t *)data);
 448		*(uint32_t *)data *= 100;
 449		*size = 4;
 450		break;
 451	case AMDGPU_PP_SENSOR_GFX_SCLK:
 452		ret = yellow_carp_get_smu_metrics_data(smu,
 453								METRICS_AVERAGE_GFXCLK,
 454								(uint32_t *)data);
 455		*(uint32_t *)data *= 100;
 456		*size = 4;
 457		break;
 458	case AMDGPU_PP_SENSOR_VDDGFX:
 459		ret = yellow_carp_get_smu_metrics_data(smu,
 460								METRICS_VOLTAGE_VDDGFX,
 461								(uint32_t *)data);
 462		*size = 4;
 463		break;
 464	case AMDGPU_PP_SENSOR_VDDNB:
 465		ret = yellow_carp_get_smu_metrics_data(smu,
 466								METRICS_VOLTAGE_VDDSOC,
 467								(uint32_t *)data);
 468		*size = 4;
 469		break;
 470	case AMDGPU_PP_SENSOR_SS_APU_SHARE:
 471		ret = yellow_carp_get_smu_metrics_data(smu,
 472						       METRICS_SS_APU_SHARE,
 473						       (uint32_t *)data);
 474		*size = 4;
 475		break;
 476	case AMDGPU_PP_SENSOR_SS_DGPU_SHARE:
 477		ret = yellow_carp_get_smu_metrics_data(smu,
 478						       METRICS_SS_DGPU_SHARE,
 479						       (uint32_t *)data);
 480		*size = 4;
 481		break;
 482	case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
 483	default:
 484		ret = -EOPNOTSUPP;
 485		break;
 486	}
 487
 488	return ret;
 489}
 490
 491static int yellow_carp_set_watermarks_table(struct smu_context *smu,
 492				struct pp_smu_wm_range_sets *clock_ranges)
 493{
 494	int i;
 495	int ret = 0;
 496	Watermarks_t *table = smu->smu_table.watermarks_table;
 497
 498	if (!table || !clock_ranges)
 499		return -EINVAL;
 500
 501	if (clock_ranges) {
 502		if (clock_ranges->num_reader_wm_sets > NUM_WM_RANGES ||
 503			clock_ranges->num_writer_wm_sets > NUM_WM_RANGES)
 504			return -EINVAL;
 505
 506		for (i = 0; i < clock_ranges->num_reader_wm_sets; i++) {
 507			table->WatermarkRow[WM_DCFCLK][i].MinClock =
 508				clock_ranges->reader_wm_sets[i].min_drain_clk_mhz;
 509			table->WatermarkRow[WM_DCFCLK][i].MaxClock =
 510				clock_ranges->reader_wm_sets[i].max_drain_clk_mhz;
 511			table->WatermarkRow[WM_DCFCLK][i].MinMclk =
 512				clock_ranges->reader_wm_sets[i].min_fill_clk_mhz;
 513			table->WatermarkRow[WM_DCFCLK][i].MaxMclk =
 514				clock_ranges->reader_wm_sets[i].max_fill_clk_mhz;
 515
 516			table->WatermarkRow[WM_DCFCLK][i].WmSetting =
 517				clock_ranges->reader_wm_sets[i].wm_inst;
 518		}
 519
 520		for (i = 0; i < clock_ranges->num_writer_wm_sets; i++) {
 521			table->WatermarkRow[WM_SOCCLK][i].MinClock =
 522				clock_ranges->writer_wm_sets[i].min_fill_clk_mhz;
 523			table->WatermarkRow[WM_SOCCLK][i].MaxClock =
 524				clock_ranges->writer_wm_sets[i].max_fill_clk_mhz;
 525			table->WatermarkRow[WM_SOCCLK][i].MinMclk =
 526				clock_ranges->writer_wm_sets[i].min_drain_clk_mhz;
 527			table->WatermarkRow[WM_SOCCLK][i].MaxMclk =
 528				clock_ranges->writer_wm_sets[i].max_drain_clk_mhz;
 529
 530			table->WatermarkRow[WM_SOCCLK][i].WmSetting =
 531				clock_ranges->writer_wm_sets[i].wm_inst;
 532		}
 533
 534		smu->watermarks_bitmap |= WATERMARKS_EXIST;
 535	}
 536
 537	/* pass data to smu controller */
 538	if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
 539	     !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
 540		ret = smu_cmn_write_watermarks_table(smu);
 541		if (ret) {
 542			dev_err(smu->adev->dev, "Failed to update WMTABLE!");
 543			return ret;
 544		}
 545		smu->watermarks_bitmap |= WATERMARKS_LOADED;
 546	}
 547
 548	return 0;
 549}
 550
 551static ssize_t yellow_carp_get_gpu_metrics(struct smu_context *smu,
 552						void **table)
 553{
 554	struct smu_table_context *smu_table = &smu->smu_table;
 555	struct gpu_metrics_v2_1 *gpu_metrics =
 556		(struct gpu_metrics_v2_1 *)smu_table->gpu_metrics_table;
 557	SmuMetrics_t metrics;
 558	int ret = 0;
 559
 560	ret = smu_cmn_get_metrics_table(smu, &metrics, true);
 561	if (ret)
 562		return ret;
 563
 564	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 1);
 565
 566	gpu_metrics->temperature_gfx = metrics.GfxTemperature;
 567	gpu_metrics->temperature_soc = metrics.SocTemperature;
 568	memcpy(&gpu_metrics->temperature_core[0],
 569		&metrics.CoreTemperature[0],
 570		sizeof(uint16_t) * 8);
 571	gpu_metrics->temperature_l3[0] = metrics.L3Temperature;
 572
 573	gpu_metrics->average_gfx_activity = metrics.GfxActivity;
 574	gpu_metrics->average_mm_activity = metrics.UvdActivity;
 575
 576	gpu_metrics->average_socket_power = metrics.CurrentSocketPower;
 577	gpu_metrics->average_gfx_power = metrics.Power[0];
 578	gpu_metrics->average_soc_power = metrics.Power[1];
 579	memcpy(&gpu_metrics->average_core_power[0],
 580		&metrics.CorePower[0],
 581		sizeof(uint16_t) * 8);
 582
 583	gpu_metrics->average_gfxclk_frequency = metrics.GfxclkFrequency;
 584	gpu_metrics->average_socclk_frequency = metrics.SocclkFrequency;
 585	gpu_metrics->average_uclk_frequency = metrics.MemclkFrequency;
 586	gpu_metrics->average_fclk_frequency = metrics.MemclkFrequency;
 587	gpu_metrics->average_vclk_frequency = metrics.VclkFrequency;
 588	gpu_metrics->average_dclk_frequency = metrics.DclkFrequency;
 589
 590	memcpy(&gpu_metrics->current_coreclk[0],
 591		&metrics.CoreFrequency[0],
 592		sizeof(uint16_t) * 8);
 593	gpu_metrics->current_l3clk[0] = metrics.L3Frequency;
 594
 595	gpu_metrics->throttle_status = metrics.ThrottlerStatus;
 596
 597	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
 598
 599	*table = (void *)gpu_metrics;
 600
 601	return sizeof(struct gpu_metrics_v2_1);
 602}
 603
 604/**
 605 * yellow_carp_get_gfxoff_status - get gfxoff status
 606 *
 607 * @smu: smu_context pointer
 608 *
 609 * This function will be used to get gfxoff status
 610 *
 611 * Returns 0=GFXOFF(default).
 612 * Returns 1=Transition out of GFX State.
 613 * Returns 2=Not in GFXOFF.
 614 * Returns 3=Transition into GFXOFF.
 615 */
 616static uint32_t yellow_carp_get_gfxoff_status(struct smu_context *smu)
 617{
 618	uint32_t reg;
 619	uint32_t gfxoff_status = 0;
 620	struct amdgpu_device *adev = smu->adev;
 621
 622	reg = RREG32_SOC15(SMUIO, 0, regSMUIO_GFX_MISC_CNTL);
 623	gfxoff_status = (reg & SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK)
 624		>> SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT;
 625
 626	return gfxoff_status;
 627}
 628
 629static int yellow_carp_set_default_dpm_tables(struct smu_context *smu)
 630{
 631	struct smu_table_context *smu_table = &smu->smu_table;
 632
 633	return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false);
 634}
 635
 636static int yellow_carp_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type,
 637					long input[], uint32_t size)
 638{
 639	struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
 640	int ret = 0;
 641
 642	/* Only allowed in manual mode */
 643	if (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
 644		return -EINVAL;
 645
 646	switch (type) {
 647	case PP_OD_EDIT_SCLK_VDDC_TABLE:
 648		if (size != 2) {
 649			dev_err(smu->adev->dev, "Input parameter number not correct\n");
 650			return -EINVAL;
 651		}
 652
 653		if (input[0] == 0) {
 654			if (input[1] < smu->gfx_default_hard_min_freq) {
 655				dev_warn(smu->adev->dev,
 656					"Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n",
 657					input[1], smu->gfx_default_hard_min_freq);
 658				return -EINVAL;
 659			}
 660			smu->gfx_actual_hard_min_freq = input[1];
 661		} else if (input[0] == 1) {
 662			if (input[1] > smu->gfx_default_soft_max_freq) {
 663				dev_warn(smu->adev->dev,
 664					"Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n",
 665					input[1], smu->gfx_default_soft_max_freq);
 666				return -EINVAL;
 667			}
 668			smu->gfx_actual_soft_max_freq = input[1];
 669		} else {
 670			return -EINVAL;
 671		}
 672		break;
 673	case PP_OD_RESTORE_DEFAULT_TABLE:
 674		if (size != 0) {
 675			dev_err(smu->adev->dev, "Input parameter number not correct\n");
 676			return -EINVAL;
 677		} else {
 678			smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
 679			smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
 680		}
 681		break;
 682	case PP_OD_COMMIT_DPM_TABLE:
 683		if (size != 0) {
 684			dev_err(smu->adev->dev, "Input parameter number not correct\n");
 685			return -EINVAL;
 686		} else {
 687			if (smu->gfx_actual_hard_min_freq > smu->gfx_actual_soft_max_freq) {
 688				dev_err(smu->adev->dev,
 689					"The setting minimum sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
 690					smu->gfx_actual_hard_min_freq,
 691					smu->gfx_actual_soft_max_freq);
 692				return -EINVAL;
 693			}
 694
 695			ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
 696									smu->gfx_actual_hard_min_freq, NULL);
 697			if (ret) {
 698				dev_err(smu->adev->dev, "Set hard min sclk failed!");
 699				return ret;
 700			}
 701
 702			ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
 703									smu->gfx_actual_soft_max_freq, NULL);
 704			if (ret) {
 705				dev_err(smu->adev->dev, "Set soft max sclk failed!");
 706				return ret;
 707			}
 708		}
 709		break;
 710	default:
 711		return -ENOSYS;
 712	}
 713
 714	return ret;
 715}
 716
 717static int yellow_carp_get_current_clk_freq(struct smu_context *smu,
 718						enum smu_clk_type clk_type,
 719						uint32_t *value)
 720{
 721	MetricsMember_t member_type;
 722
 723	switch (clk_type) {
 724	case SMU_SOCCLK:
 725		member_type = METRICS_AVERAGE_SOCCLK;
 726		break;
 727	case SMU_VCLK:
 728	    member_type = METRICS_AVERAGE_VCLK;
 729		break;
 730	case SMU_DCLK:
 731		member_type = METRICS_AVERAGE_DCLK;
 732		break;
 733	case SMU_MCLK:
 734		member_type = METRICS_AVERAGE_UCLK;
 735		break;
 736	case SMU_FCLK:
 737		return smu_cmn_send_smc_msg_with_param(smu,
 738				SMU_MSG_GetFclkFrequency, 0, value);
 739	case SMU_GFXCLK:
 740	case SMU_SCLK:
 741		return smu_cmn_send_smc_msg_with_param(smu,
 742				SMU_MSG_GetGfxclkFrequency, 0, value);
 743		break;
 744	default:
 745		return -EINVAL;
 746	}
 747
 748	return yellow_carp_get_smu_metrics_data(smu, member_type, value);
 749}
 750
 751static int yellow_carp_get_dpm_level_count(struct smu_context *smu,
 752						enum smu_clk_type clk_type,
 753						uint32_t *count)
 754{
 755	DpmClocks_t *clk_table = smu->smu_table.clocks_table;
 756
 757	switch (clk_type) {
 758	case SMU_SOCCLK:
 759		*count = clk_table->NumSocClkLevelsEnabled;
 760		break;
 761	case SMU_VCLK:
 762		*count = clk_table->VcnClkLevelsEnabled;
 763		break;
 764	case SMU_DCLK:
 765		*count = clk_table->VcnClkLevelsEnabled;
 766		break;
 767	case SMU_MCLK:
 768		*count = clk_table->NumDfPstatesEnabled;
 769		break;
 770	case SMU_FCLK:
 771		*count = clk_table->NumDfPstatesEnabled;
 772		break;
 773	default:
 774		break;
 775	}
 776
 777	return 0;
 778}
 779
 780static int yellow_carp_get_dpm_freq_by_index(struct smu_context *smu,
 781						enum smu_clk_type clk_type,
 782						uint32_t dpm_level,
 783						uint32_t *freq)
 784{
 785	DpmClocks_t *clk_table = smu->smu_table.clocks_table;
 786
 787	if (!clk_table || clk_type >= SMU_CLK_COUNT)
 788		return -EINVAL;
 789
 790	switch (clk_type) {
 791	case SMU_SOCCLK:
 792		if (dpm_level >= clk_table->NumSocClkLevelsEnabled)
 793			return -EINVAL;
 794		*freq = clk_table->SocClocks[dpm_level];
 795		break;
 796	case SMU_VCLK:
 797		if (dpm_level >= clk_table->VcnClkLevelsEnabled)
 798			return -EINVAL;
 799		*freq = clk_table->VClocks[dpm_level];
 800		break;
 801	case SMU_DCLK:
 802		if (dpm_level >= clk_table->VcnClkLevelsEnabled)
 803			return -EINVAL;
 804		*freq = clk_table->DClocks[dpm_level];
 805		break;
 806	case SMU_UCLK:
 807	case SMU_MCLK:
 808		if (dpm_level >= clk_table->NumDfPstatesEnabled)
 809			return -EINVAL;
 810		*freq = clk_table->DfPstateTable[dpm_level].MemClk;
 811		break;
 812	case SMU_FCLK:
 813		if (dpm_level >= clk_table->NumDfPstatesEnabled)
 814			return -EINVAL;
 815		*freq = clk_table->DfPstateTable[dpm_level].FClk;
 816		break;
 817	default:
 818		return -EINVAL;
 819	}
 820
 821	return 0;
 822}
 823
 824static bool yellow_carp_clk_dpm_is_enabled(struct smu_context *smu,
 825						enum smu_clk_type clk_type)
 826{
 827	enum smu_feature_mask feature_id = 0;
 828
 829	switch (clk_type) {
 830	case SMU_MCLK:
 831	case SMU_UCLK:
 832	case SMU_FCLK:
 833		feature_id = SMU_FEATURE_DPM_FCLK_BIT;
 834		break;
 835	case SMU_GFXCLK:
 836	case SMU_SCLK:
 837		feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
 838		break;
 839	case SMU_SOCCLK:
 840		feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
 841		break;
 842	case SMU_VCLK:
 843	case SMU_DCLK:
 844		feature_id = SMU_FEATURE_VCN_DPM_BIT;
 845		break;
 846	default:
 847		return true;
 848	}
 849
 850	return smu_cmn_feature_is_enabled(smu, feature_id);
 851}
 852
 853static int yellow_carp_get_dpm_ultimate_freq(struct smu_context *smu,
 854							enum smu_clk_type clk_type,
 855							uint32_t *min,
 856							uint32_t *max)
 857{
 858	DpmClocks_t *clk_table = smu->smu_table.clocks_table;
 859	uint32_t clock_limit;
 860	uint32_t max_dpm_level, min_dpm_level;
 861	int ret = 0;
 862
 863	if (!yellow_carp_clk_dpm_is_enabled(smu, clk_type)) {
 864		switch (clk_type) {
 865		case SMU_MCLK:
 866		case SMU_UCLK:
 867			clock_limit = smu->smu_table.boot_values.uclk;
 868			break;
 869		case SMU_FCLK:
 870			clock_limit = smu->smu_table.boot_values.fclk;
 871			break;
 872		case SMU_GFXCLK:
 873		case SMU_SCLK:
 874			clock_limit = smu->smu_table.boot_values.gfxclk;
 875			break;
 876		case SMU_SOCCLK:
 877			clock_limit = smu->smu_table.boot_values.socclk;
 878			break;
 879		case SMU_VCLK:
 880			clock_limit = smu->smu_table.boot_values.vclk;
 881			break;
 882		case SMU_DCLK:
 883			clock_limit = smu->smu_table.boot_values.dclk;
 884			break;
 885		default:
 886			clock_limit = 0;
 887			break;
 888		}
 889
 890		/* clock in Mhz unit */
 891		if (min)
 892			*min = clock_limit / 100;
 893		if (max)
 894			*max = clock_limit / 100;
 895
 896		return 0;
 897	}
 898
 899	if (max) {
 900		switch (clk_type) {
 901		case SMU_GFXCLK:
 902		case SMU_SCLK:
 903			*max = clk_table->MaxGfxClk;
 904			break;
 905		case SMU_MCLK:
 906		case SMU_UCLK:
 907		case SMU_FCLK:
 908			max_dpm_level = 0;
 909			break;
 910		case SMU_SOCCLK:
 911			max_dpm_level = clk_table->NumSocClkLevelsEnabled - 1;
 912			break;
 913		case SMU_VCLK:
 914		case SMU_DCLK:
 915			max_dpm_level = clk_table->VcnClkLevelsEnabled - 1;
 916			break;
 917		default:
 918			ret = -EINVAL;
 919			goto failed;
 920		}
 921
 922		if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) {
 923			ret = yellow_carp_get_dpm_freq_by_index(smu, clk_type, max_dpm_level, max);
 924			if (ret)
 925				goto failed;
 926		}
 927	}
 928
 929	if (min) {
 930		switch (clk_type) {
 931		case SMU_GFXCLK:
 932		case SMU_SCLK:
 933			*min = clk_table->MinGfxClk;
 934			break;
 935		case SMU_MCLK:
 936		case SMU_UCLK:
 937		case SMU_FCLK:
 938			min_dpm_level = clk_table->NumDfPstatesEnabled - 1;
 939			break;
 940		case SMU_SOCCLK:
 941			min_dpm_level = 0;
 942			break;
 943		case SMU_VCLK:
 944		case SMU_DCLK:
 945			min_dpm_level = 0;
 946			break;
 947		default:
 948			ret = -EINVAL;
 949			goto failed;
 950		}
 951
 952		if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) {
 953			ret = yellow_carp_get_dpm_freq_by_index(smu, clk_type, min_dpm_level, min);
 954			if (ret)
 955				goto failed;
 956		}
 957	}
 958
 959failed:
 960	return ret;
 961}
 962
 963static int yellow_carp_set_soft_freq_limited_range(struct smu_context *smu,
 964							enum smu_clk_type clk_type,
 965							uint32_t min,
 966							uint32_t max)
 967{
 968	enum smu_message_type msg_set_min, msg_set_max;
 969	uint32_t min_clk = min;
 970	uint32_t max_clk = max;
 971
 972	int ret = 0;
 973
 974	if (!yellow_carp_clk_dpm_is_enabled(smu, clk_type))
 975		return -EINVAL;
 976
 977	switch (clk_type) {
 978	case SMU_GFXCLK:
 979	case SMU_SCLK:
 980		msg_set_min = SMU_MSG_SetHardMinGfxClk;
 981		msg_set_max = SMU_MSG_SetSoftMaxGfxClk;
 982		break;
 983	case SMU_FCLK:
 984		msg_set_min = SMU_MSG_SetHardMinFclkByFreq;
 985		msg_set_max = SMU_MSG_SetSoftMaxFclkByFreq;
 986		break;
 987	case SMU_SOCCLK:
 988		msg_set_min = SMU_MSG_SetHardMinSocclkByFreq;
 989		msg_set_max = SMU_MSG_SetSoftMaxSocclkByFreq;
 990		break;
 991	case SMU_VCLK:
 992	case SMU_DCLK:
 993		msg_set_min = SMU_MSG_SetHardMinVcn;
 994		msg_set_max = SMU_MSG_SetSoftMaxVcn;
 995		break;
 996	default:
 997		return -EINVAL;
 998	}
 999
1000	if (clk_type == SMU_VCLK) {
1001		min_clk = min << SMU_13_VCLK_SHIFT;
1002		max_clk = max << SMU_13_VCLK_SHIFT;
1003	}
1004
1005	ret = smu_cmn_send_smc_msg_with_param(smu, msg_set_min, min_clk, NULL);
1006
1007	if (ret)
1008		goto out;
1009
1010	ret = smu_cmn_send_smc_msg_with_param(smu, msg_set_max, max_clk, NULL);
1011	if (ret)
1012		goto out;
1013
1014out:
1015	return ret;
1016}
1017
1018static uint32_t yellow_carp_get_umd_pstate_clk_default(struct smu_context *smu,
1019					enum smu_clk_type clk_type)
1020{
1021	uint32_t clk_limit = 0;
1022	struct amdgpu_device *adev = smu->adev;
1023
1024	switch (clk_type) {
1025	case SMU_GFXCLK:
1026	case SMU_SCLK:
1027		if ((amdgpu_ip_version(adev, MP1_HWIP, 0)) == IP_VERSION(13, 0, 8))
1028			clk_limit = SMU_13_0_8_UMD_PSTATE_GFXCLK;
1029		if ((amdgpu_ip_version(adev, MP1_HWIP, 0)) == IP_VERSION(13, 0, 1) ||
1030			(amdgpu_ip_version(adev, MP1_HWIP, 0)) == IP_VERSION(13, 0, 3))
1031			clk_limit = SMU_13_0_1_UMD_PSTATE_GFXCLK;
1032		break;
1033	case SMU_SOCCLK:
1034		if ((amdgpu_ip_version(adev, MP1_HWIP, 0)) == IP_VERSION(13, 0, 8))
1035			clk_limit = SMU_13_0_8_UMD_PSTATE_SOCCLK;
1036		if ((amdgpu_ip_version(adev, MP1_HWIP, 0)) == IP_VERSION(13, 0, 1) ||
1037			(amdgpu_ip_version(adev, MP1_HWIP, 0)) == IP_VERSION(13, 0, 3))
1038			clk_limit = SMU_13_0_1_UMD_PSTATE_SOCCLK;
1039		break;
1040	case SMU_FCLK:
1041		if ((amdgpu_ip_version(adev, MP1_HWIP, 0)) == IP_VERSION(13, 0, 8))
1042			clk_limit = SMU_13_0_8_UMD_PSTATE_FCLK;
1043		if ((amdgpu_ip_version(adev, MP1_HWIP, 0)) == IP_VERSION(13, 0, 1) ||
1044			(amdgpu_ip_version(adev, MP1_HWIP, 0)) == IP_VERSION(13, 0, 3))
1045			clk_limit = SMU_13_0_1_UMD_PSTATE_FCLK;
1046		break;
1047	default:
1048		break;
1049	}
1050
1051	return clk_limit;
1052}
1053
1054static int yellow_carp_print_clk_levels(struct smu_context *smu,
1055				enum smu_clk_type clk_type, char *buf)
1056{
1057	int i, idx, size = 0, ret = 0;
1058	uint32_t cur_value = 0, value = 0, count = 0;
1059	uint32_t min, max;
1060	uint32_t clk_limit = 0;
1061
1062	smu_cmn_get_sysfs_buf(&buf, &size);
1063
1064	switch (clk_type) {
1065	case SMU_OD_SCLK:
1066		size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
1067		size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
1068		(smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
1069		size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
1070		(smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq);
1071		break;
1072	case SMU_OD_RANGE:
1073		size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
1074		size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
1075						smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
1076		break;
1077	case SMU_SOCCLK:
1078	case SMU_VCLK:
1079	case SMU_DCLK:
1080	case SMU_MCLK:
1081	case SMU_FCLK:
1082		ret = yellow_carp_get_current_clk_freq(smu, clk_type, &cur_value);
1083		if (ret)
1084			goto print_clk_out;
1085
1086		ret = yellow_carp_get_dpm_level_count(smu, clk_type, &count);
1087		if (ret)
1088			goto print_clk_out;
1089
1090		for (i = 0; i < count; i++) {
1091			idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i;
1092			ret = yellow_carp_get_dpm_freq_by_index(smu, clk_type, idx, &value);
1093			if (ret)
1094				goto print_clk_out;
1095
1096			size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, value,
1097					cur_value == value ? "*" : "");
1098		}
1099		break;
1100	case SMU_GFXCLK:
1101	case SMU_SCLK:
1102		clk_limit = yellow_carp_get_umd_pstate_clk_default(smu, clk_type);
1103		ret = yellow_carp_get_current_clk_freq(smu, clk_type, &cur_value);
1104		if (ret)
1105			goto print_clk_out;
1106		min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq;
1107		max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq;
1108		if (cur_value  == max)
1109			i = 2;
1110		else if (cur_value == min)
1111			i = 0;
1112		else
1113			i = 1;
1114		size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", min,
1115				i == 0 ? "*" : "");
1116		size += sysfs_emit_at(buf, size, "1: %uMhz %s\n",
1117				i == 1 ? cur_value : clk_limit,
1118				i == 1 ? "*" : "");
1119		size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", max,
1120				i == 2 ? "*" : "");
1121		break;
1122	default:
1123		break;
1124	}
1125
1126print_clk_out:
1127	return size;
1128}
1129
1130static int yellow_carp_force_clk_levels(struct smu_context *smu,
1131				enum smu_clk_type clk_type, uint32_t mask)
1132{
1133	uint32_t soft_min_level = 0, soft_max_level = 0;
1134	uint32_t min_freq = 0, max_freq = 0;
1135	int ret = 0;
1136
1137	soft_min_level = mask ? (ffs(mask) - 1) : 0;
1138	soft_max_level = mask ? (fls(mask) - 1) : 0;
1139
1140	switch (clk_type) {
1141	case SMU_SOCCLK:
1142	case SMU_FCLK:
1143	case SMU_VCLK:
1144	case SMU_DCLK:
1145		ret = yellow_carp_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq);
1146		if (ret)
1147			goto force_level_out;
1148
1149		ret = yellow_carp_get_dpm_freq_by_index(smu, clk_type, soft_max_level, &max_freq);
1150		if (ret)
1151			goto force_level_out;
1152
1153		ret = yellow_carp_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq);
1154		if (ret)
1155			goto force_level_out;
1156		break;
1157	default:
1158		ret = -EINVAL;
1159		break;
1160	}
1161
1162force_level_out:
1163	return ret;
1164}
1165
1166static int yellow_carp_get_dpm_profile_freq(struct smu_context *smu,
1167					enum amd_dpm_forced_level level,
1168					enum smu_clk_type clk_type,
1169					uint32_t *min_clk,
1170					uint32_t *max_clk)
1171{
1172	int ret = 0;
1173	uint32_t clk_limit = 0;
1174
1175	clk_limit = yellow_carp_get_umd_pstate_clk_default(smu, clk_type);
1176
1177	switch (clk_type) {
1178	case SMU_GFXCLK:
1179	case SMU_SCLK:
1180		if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
1181			yellow_carp_get_dpm_ultimate_freq(smu, SMU_SCLK, NULL, &clk_limit);
1182		else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK)
1183			yellow_carp_get_dpm_ultimate_freq(smu, SMU_SCLK, &clk_limit, NULL);
1184		break;
1185	case SMU_SOCCLK:
1186		if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
1187			yellow_carp_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &clk_limit);
1188		break;
1189	case SMU_FCLK:
1190		if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
1191			yellow_carp_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &clk_limit);
1192		else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK)
1193			yellow_carp_get_dpm_ultimate_freq(smu, SMU_FCLK, &clk_limit, NULL);
1194		break;
1195	case SMU_VCLK:
1196		yellow_carp_get_dpm_ultimate_freq(smu, SMU_VCLK, NULL, &clk_limit);
1197		break;
1198	case SMU_DCLK:
1199		yellow_carp_get_dpm_ultimate_freq(smu, SMU_DCLK, NULL, &clk_limit);
1200		break;
1201	default:
1202		ret = -EINVAL;
1203		break;
1204	}
1205	*min_clk = *max_clk = clk_limit;
1206	return ret;
1207}
1208
1209static int yellow_carp_set_performance_level(struct smu_context *smu,
1210						enum amd_dpm_forced_level level)
1211{
1212	struct amdgpu_device *adev = smu->adev;
1213	uint32_t sclk_min = 0, sclk_max = 0;
1214	uint32_t fclk_min = 0, fclk_max = 0;
1215	uint32_t socclk_min = 0, socclk_max = 0;
1216	uint32_t vclk_min = 0, vclk_max = 0;
1217	uint32_t dclk_min = 0, dclk_max = 0;
1218
1219	int ret = 0;
1220
1221	switch (level) {
1222	case AMD_DPM_FORCED_LEVEL_HIGH:
1223		yellow_carp_get_dpm_ultimate_freq(smu, SMU_SCLK, NULL, &sclk_max);
1224		yellow_carp_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &fclk_max);
1225		yellow_carp_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &socclk_max);
1226		yellow_carp_get_dpm_ultimate_freq(smu, SMU_VCLK, NULL, &vclk_max);
1227		yellow_carp_get_dpm_ultimate_freq(smu, SMU_DCLK, NULL, &dclk_max);
1228		sclk_min = sclk_max;
1229		fclk_min = fclk_max;
1230		socclk_min = socclk_max;
1231		vclk_min = vclk_max;
1232		dclk_min = dclk_max;
1233		break;
1234	case AMD_DPM_FORCED_LEVEL_LOW:
1235		yellow_carp_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, NULL);
1236		yellow_carp_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, NULL);
1237		yellow_carp_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, NULL);
1238		yellow_carp_get_dpm_ultimate_freq(smu, SMU_VCLK, &vclk_min, NULL);
1239		yellow_carp_get_dpm_ultimate_freq(smu, SMU_DCLK, &dclk_min, NULL);
1240		sclk_max = sclk_min;
1241		fclk_max = fclk_min;
1242		socclk_max = socclk_min;
1243		vclk_max = vclk_min;
1244		dclk_max = dclk_min;
1245		break;
1246	case AMD_DPM_FORCED_LEVEL_AUTO:
1247		yellow_carp_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, &sclk_max);
1248		yellow_carp_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, &fclk_max);
1249		yellow_carp_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, &socclk_max);
1250		yellow_carp_get_dpm_ultimate_freq(smu, SMU_VCLK, &vclk_min, &vclk_max);
1251		yellow_carp_get_dpm_ultimate_freq(smu, SMU_DCLK, &dclk_min, &dclk_max);
1252		break;
1253	case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
1254	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
1255	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
1256	case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
1257		yellow_carp_get_dpm_profile_freq(smu, level, SMU_SCLK, &sclk_min, &sclk_max);
1258		yellow_carp_get_dpm_profile_freq(smu, level, SMU_FCLK, &fclk_min, &fclk_max);
1259		yellow_carp_get_dpm_profile_freq(smu, level, SMU_SOCCLK, &socclk_min, &socclk_max);
1260		yellow_carp_get_dpm_profile_freq(smu, level, SMU_VCLK, &vclk_min, &vclk_max);
1261		yellow_carp_get_dpm_profile_freq(smu, level, SMU_DCLK, &dclk_min, &dclk_max);
1262		break;
1263	case AMD_DPM_FORCED_LEVEL_MANUAL:
1264	case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
1265		return 0;
1266	default:
1267		dev_err(adev->dev, "Invalid performance level %d\n", level);
1268		return -EINVAL;
1269	}
1270
1271	if (sclk_min && sclk_max) {
1272		ret = yellow_carp_set_soft_freq_limited_range(smu,
1273							    SMU_SCLK,
1274							    sclk_min,
1275							    sclk_max);
1276		if (ret)
1277			return ret;
1278
1279		smu->gfx_actual_hard_min_freq = sclk_min;
1280		smu->gfx_actual_soft_max_freq = sclk_max;
1281	}
1282
1283	if (fclk_min && fclk_max) {
1284		ret = yellow_carp_set_soft_freq_limited_range(smu,
1285							    SMU_FCLK,
1286							    fclk_min,
1287							    fclk_max);
1288		if (ret)
1289			return ret;
1290	}
1291
1292	if (socclk_min && socclk_max) {
1293		ret = yellow_carp_set_soft_freq_limited_range(smu,
1294							    SMU_SOCCLK,
1295							    socclk_min,
1296							    socclk_max);
1297		if (ret)
1298			return ret;
1299	}
1300
1301	if (vclk_min && vclk_max) {
1302		ret = yellow_carp_set_soft_freq_limited_range(smu,
1303							      SMU_VCLK,
1304							      vclk_min,
1305							      vclk_max);
1306		if (ret)
1307			return ret;
1308	}
1309
1310	if (dclk_min && dclk_max) {
1311		ret = yellow_carp_set_soft_freq_limited_range(smu,
1312							      SMU_DCLK,
1313							      dclk_min,
1314							      dclk_max);
1315		if (ret)
1316			return ret;
1317	}
1318
1319	return ret;
1320}
1321
1322static int yellow_carp_set_fine_grain_gfx_freq_parameters(struct smu_context *smu)
1323{
1324	DpmClocks_t *clk_table = smu->smu_table.clocks_table;
1325
1326	smu->gfx_default_hard_min_freq = clk_table->MinGfxClk;
1327	smu->gfx_default_soft_max_freq = clk_table->MaxGfxClk;
1328	smu->gfx_actual_hard_min_freq = 0;
1329	smu->gfx_actual_soft_max_freq = 0;
1330
1331	return 0;
1332}
1333
1334static const struct pptable_funcs yellow_carp_ppt_funcs = {
1335	.check_fw_status = smu_v13_0_check_fw_status,
1336	.check_fw_version = smu_v13_0_check_fw_version,
1337	.init_smc_tables = yellow_carp_init_smc_tables,
1338	.fini_smc_tables = yellow_carp_fini_smc_tables,
1339	.get_vbios_bootup_values = smu_v13_0_get_vbios_bootup_values,
1340	.system_features_control = yellow_carp_system_features_control,
1341	.send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param,
1342	.send_smc_msg = smu_cmn_send_smc_msg,
1343	.dpm_set_vcn_enable = yellow_carp_dpm_set_vcn_enable,
1344	.dpm_set_jpeg_enable = yellow_carp_dpm_set_jpeg_enable,
1345	.set_default_dpm_table = yellow_carp_set_default_dpm_tables,
1346	.read_sensor = yellow_carp_read_sensor,
1347	.is_dpm_running = yellow_carp_is_dpm_running,
1348	.set_watermarks_table = yellow_carp_set_watermarks_table,
1349	.get_gpu_metrics = yellow_carp_get_gpu_metrics,
1350	.get_enabled_mask = smu_cmn_get_enabled_mask,
1351	.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
1352	.set_driver_table_location = smu_v13_0_set_driver_table_location,
1353	.gfx_off_control = smu_v13_0_gfx_off_control,
1354	.get_gfx_off_status = yellow_carp_get_gfxoff_status,
1355	.post_init = yellow_carp_post_smu_init,
1356	.mode2_reset = yellow_carp_mode2_reset,
1357	.get_dpm_ultimate_freq = yellow_carp_get_dpm_ultimate_freq,
1358	.od_edit_dpm_table = yellow_carp_od_edit_dpm_table,
1359	.print_clk_levels = yellow_carp_print_clk_levels,
1360	.force_clk_levels = yellow_carp_force_clk_levels,
1361	.set_performance_level = yellow_carp_set_performance_level,
1362	.set_fine_grain_gfx_freq_parameters = yellow_carp_set_fine_grain_gfx_freq_parameters,
1363};
1364
1365void yellow_carp_set_ppt_funcs(struct smu_context *smu)
1366{
1367	smu->ppt_funcs = &yellow_carp_ppt_funcs;
1368	smu->message_map = yellow_carp_message_map;
1369	smu->feature_map = yellow_carp_feature_mask_map;
1370	smu->table_map = yellow_carp_table_map;
1371	smu->is_apu = true;
1372	smu->smc_driver_if_version = SMU13_YELLOW_CARP_DRIVER_IF_VERSION;
1373	smu_v13_0_set_smu_mailbox_registers(smu);
1374}