Loading...
1/*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#define SWSMU_CODE_LAYER_L1
24
25#include <linux/firmware.h>
26#include <linux/pci.h>
27
28#include "amdgpu.h"
29#include "amdgpu_smu.h"
30#include "smu_internal.h"
31#include "atom.h"
32#include "arcturus_ppt.h"
33#include "navi10_ppt.h"
34#include "sienna_cichlid_ppt.h"
35#include "renoir_ppt.h"
36#include "vangogh_ppt.h"
37#include "aldebaran_ppt.h"
38#include "yellow_carp_ppt.h"
39#include "cyan_skillfish_ppt.h"
40#include "smu_v13_0_0_ppt.h"
41#include "smu_v13_0_4_ppt.h"
42#include "smu_v13_0_5_ppt.h"
43#include "smu_v13_0_7_ppt.h"
44#include "amd_pcie.h"
45
46/*
47 * DO NOT use these for err/warn/info/debug messages.
48 * Use dev_err, dev_warn, dev_info and dev_dbg instead.
49 * They are more MGPU friendly.
50 */
51#undef pr_err
52#undef pr_warn
53#undef pr_info
54#undef pr_debug
55
56static const struct amd_pm_funcs swsmu_pm_funcs;
57static int smu_force_smuclk_levels(struct smu_context *smu,
58 enum smu_clk_type clk_type,
59 uint32_t mask);
60static int smu_handle_task(struct smu_context *smu,
61 enum amd_dpm_forced_level level,
62 enum amd_pp_task task_id);
63static int smu_reset(struct smu_context *smu);
64static int smu_set_fan_speed_pwm(void *handle, u32 speed);
65static int smu_set_fan_control_mode(void *handle, u32 value);
66static int smu_set_power_limit(void *handle, uint32_t limit);
67static int smu_set_fan_speed_rpm(void *handle, uint32_t speed);
68static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled);
69static int smu_set_mp1_state(void *handle, enum pp_mp1_state mp1_state);
70
71static int smu_sys_get_pp_feature_mask(void *handle,
72 char *buf)
73{
74 struct smu_context *smu = handle;
75
76 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
77 return -EOPNOTSUPP;
78
79 return smu_get_pp_feature_mask(smu, buf);
80}
81
82static int smu_sys_set_pp_feature_mask(void *handle,
83 uint64_t new_mask)
84{
85 struct smu_context *smu = handle;
86
87 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
88 return -EOPNOTSUPP;
89
90 return smu_set_pp_feature_mask(smu, new_mask);
91}
92
93int smu_set_residency_gfxoff(struct smu_context *smu, bool value)
94{
95 if (!smu->ppt_funcs->set_gfx_off_residency)
96 return -EINVAL;
97
98 return smu_set_gfx_off_residency(smu, value);
99}
100
101int smu_get_residency_gfxoff(struct smu_context *smu, u32 *value)
102{
103 if (!smu->ppt_funcs->get_gfx_off_residency)
104 return -EINVAL;
105
106 return smu_get_gfx_off_residency(smu, value);
107}
108
109int smu_get_entrycount_gfxoff(struct smu_context *smu, u64 *value)
110{
111 if (!smu->ppt_funcs->get_gfx_off_entrycount)
112 return -EINVAL;
113
114 return smu_get_gfx_off_entrycount(smu, value);
115}
116
117int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value)
118{
119 if (!smu->ppt_funcs->get_gfx_off_status)
120 return -EINVAL;
121
122 *value = smu_get_gfx_off_status(smu);
123
124 return 0;
125}
126
127int smu_set_soft_freq_range(struct smu_context *smu,
128 enum smu_clk_type clk_type,
129 uint32_t min,
130 uint32_t max)
131{
132 int ret = 0;
133
134 if (smu->ppt_funcs->set_soft_freq_limited_range)
135 ret = smu->ppt_funcs->set_soft_freq_limited_range(smu,
136 clk_type,
137 min,
138 max);
139
140 return ret;
141}
142
143int smu_get_dpm_freq_range(struct smu_context *smu,
144 enum smu_clk_type clk_type,
145 uint32_t *min,
146 uint32_t *max)
147{
148 int ret = -ENOTSUPP;
149
150 if (!min && !max)
151 return -EINVAL;
152
153 if (smu->ppt_funcs->get_dpm_ultimate_freq)
154 ret = smu->ppt_funcs->get_dpm_ultimate_freq(smu,
155 clk_type,
156 min,
157 max);
158
159 return ret;
160}
161
162int smu_set_gfx_power_up_by_imu(struct smu_context *smu)
163{
164 if (!smu->ppt_funcs || !smu->ppt_funcs->set_gfx_power_up_by_imu)
165 return -EOPNOTSUPP;
166
167 return smu->ppt_funcs->set_gfx_power_up_by_imu(smu);
168}
169
170static u32 smu_get_mclk(void *handle, bool low)
171{
172 struct smu_context *smu = handle;
173 uint32_t clk_freq;
174 int ret = 0;
175
176 ret = smu_get_dpm_freq_range(smu, SMU_UCLK,
177 low ? &clk_freq : NULL,
178 !low ? &clk_freq : NULL);
179 if (ret)
180 return 0;
181 return clk_freq * 100;
182}
183
184static u32 smu_get_sclk(void *handle, bool low)
185{
186 struct smu_context *smu = handle;
187 uint32_t clk_freq;
188 int ret = 0;
189
190 ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK,
191 low ? &clk_freq : NULL,
192 !low ? &clk_freq : NULL);
193 if (ret)
194 return 0;
195 return clk_freq * 100;
196}
197
198static int smu_dpm_set_vcn_enable(struct smu_context *smu,
199 bool enable)
200{
201 struct smu_power_context *smu_power = &smu->smu_power;
202 struct smu_power_gate *power_gate = &smu_power->power_gate;
203 int ret = 0;
204
205 if (!smu->ppt_funcs->dpm_set_vcn_enable)
206 return 0;
207
208 if (atomic_read(&power_gate->vcn_gated) ^ enable)
209 return 0;
210
211 ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable);
212 if (!ret)
213 atomic_set(&power_gate->vcn_gated, !enable);
214
215 return ret;
216}
217
218static int smu_dpm_set_jpeg_enable(struct smu_context *smu,
219 bool enable)
220{
221 struct smu_power_context *smu_power = &smu->smu_power;
222 struct smu_power_gate *power_gate = &smu_power->power_gate;
223 int ret = 0;
224
225 if (!smu->ppt_funcs->dpm_set_jpeg_enable)
226 return 0;
227
228 if (atomic_read(&power_gate->jpeg_gated) ^ enable)
229 return 0;
230
231 ret = smu->ppt_funcs->dpm_set_jpeg_enable(smu, enable);
232 if (!ret)
233 atomic_set(&power_gate->jpeg_gated, !enable);
234
235 return ret;
236}
237
238/**
239 * smu_dpm_set_power_gate - power gate/ungate the specific IP block
240 *
241 * @handle: smu_context pointer
242 * @block_type: the IP block to power gate/ungate
243 * @gate: to power gate if true, ungate otherwise
244 *
245 * This API uses no smu->mutex lock protection due to:
246 * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce).
247 * This is guarded to be race condition free by the caller.
248 * 2. Or get called on user setting request of power_dpm_force_performance_level.
249 * Under this case, the smu->mutex lock protection is already enforced on
250 * the parent API smu_force_performance_level of the call path.
251 */
252static int smu_dpm_set_power_gate(void *handle,
253 uint32_t block_type,
254 bool gate)
255{
256 struct smu_context *smu = handle;
257 int ret = 0;
258
259 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) {
260 dev_WARN(smu->adev->dev,
261 "SMU uninitialized but power %s requested for %u!\n",
262 gate ? "gate" : "ungate", block_type);
263 return -EOPNOTSUPP;
264 }
265
266 switch (block_type) {
267 /*
268 * Some legacy code of amdgpu_vcn.c and vcn_v2*.c still uses
269 * AMD_IP_BLOCK_TYPE_UVD for VCN. So, here both of them are kept.
270 */
271 case AMD_IP_BLOCK_TYPE_UVD:
272 case AMD_IP_BLOCK_TYPE_VCN:
273 ret = smu_dpm_set_vcn_enable(smu, !gate);
274 if (ret)
275 dev_err(smu->adev->dev, "Failed to power %s VCN!\n",
276 gate ? "gate" : "ungate");
277 break;
278 case AMD_IP_BLOCK_TYPE_GFX:
279 ret = smu_gfx_off_control(smu, gate);
280 if (ret)
281 dev_err(smu->adev->dev, "Failed to %s gfxoff!\n",
282 gate ? "enable" : "disable");
283 break;
284 case AMD_IP_BLOCK_TYPE_SDMA:
285 ret = smu_powergate_sdma(smu, gate);
286 if (ret)
287 dev_err(smu->adev->dev, "Failed to power %s SDMA!\n",
288 gate ? "gate" : "ungate");
289 break;
290 case AMD_IP_BLOCK_TYPE_JPEG:
291 ret = smu_dpm_set_jpeg_enable(smu, !gate);
292 if (ret)
293 dev_err(smu->adev->dev, "Failed to power %s JPEG!\n",
294 gate ? "gate" : "ungate");
295 break;
296 default:
297 dev_err(smu->adev->dev, "Unsupported block type!\n");
298 return -EINVAL;
299 }
300
301 return ret;
302}
303
304/**
305 * smu_set_user_clk_dependencies - set user profile clock dependencies
306 *
307 * @smu: smu_context pointer
308 * @clk: enum smu_clk_type type
309 *
310 * Enable/Disable the clock dependency for the @clk type.
311 */
312static void smu_set_user_clk_dependencies(struct smu_context *smu, enum smu_clk_type clk)
313{
314 if (smu->adev->in_suspend)
315 return;
316
317 if (clk == SMU_MCLK) {
318 smu->user_dpm_profile.clk_dependency = 0;
319 smu->user_dpm_profile.clk_dependency = BIT(SMU_FCLK) | BIT(SMU_SOCCLK);
320 } else if (clk == SMU_FCLK) {
321 /* MCLK takes precedence over FCLK */
322 if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK)))
323 return;
324
325 smu->user_dpm_profile.clk_dependency = 0;
326 smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_SOCCLK);
327 } else if (clk == SMU_SOCCLK) {
328 /* MCLK takes precedence over SOCCLK */
329 if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK)))
330 return;
331
332 smu->user_dpm_profile.clk_dependency = 0;
333 smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_FCLK);
334 } else
335 /* Add clk dependencies here, if any */
336 return;
337}
338
339/**
340 * smu_restore_dpm_user_profile - reinstate user dpm profile
341 *
342 * @smu: smu_context pointer
343 *
344 * Restore the saved user power configurations include power limit,
345 * clock frequencies, fan control mode and fan speed.
346 */
347static void smu_restore_dpm_user_profile(struct smu_context *smu)
348{
349 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
350 int ret = 0;
351
352 if (!smu->adev->in_suspend)
353 return;
354
355 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
356 return;
357
358 /* Enable restore flag */
359 smu->user_dpm_profile.flags |= SMU_DPM_USER_PROFILE_RESTORE;
360
361 /* set the user dpm power limit */
362 if (smu->user_dpm_profile.power_limit) {
363 ret = smu_set_power_limit(smu, smu->user_dpm_profile.power_limit);
364 if (ret)
365 dev_err(smu->adev->dev, "Failed to set power limit value\n");
366 }
367
368 /* set the user dpm clock configurations */
369 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
370 enum smu_clk_type clk_type;
371
372 for (clk_type = 0; clk_type < SMU_CLK_COUNT; clk_type++) {
373 /*
374 * Iterate over smu clk type and force the saved user clk
375 * configs, skip if clock dependency is enabled
376 */
377 if (!(smu->user_dpm_profile.clk_dependency & BIT(clk_type)) &&
378 smu->user_dpm_profile.clk_mask[clk_type]) {
379 ret = smu_force_smuclk_levels(smu, clk_type,
380 smu->user_dpm_profile.clk_mask[clk_type]);
381 if (ret)
382 dev_err(smu->adev->dev,
383 "Failed to set clock type = %d\n", clk_type);
384 }
385 }
386 }
387
388 /* set the user dpm fan configurations */
389 if (smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_MANUAL ||
390 smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_NONE) {
391 ret = smu_set_fan_control_mode(smu, smu->user_dpm_profile.fan_mode);
392 if (ret != -EOPNOTSUPP) {
393 smu->user_dpm_profile.fan_speed_pwm = 0;
394 smu->user_dpm_profile.fan_speed_rpm = 0;
395 smu->user_dpm_profile.fan_mode = AMD_FAN_CTRL_AUTO;
396 dev_err(smu->adev->dev, "Failed to set manual fan control mode\n");
397 }
398
399 if (smu->user_dpm_profile.fan_speed_pwm) {
400 ret = smu_set_fan_speed_pwm(smu, smu->user_dpm_profile.fan_speed_pwm);
401 if (ret != -EOPNOTSUPP)
402 dev_err(smu->adev->dev, "Failed to set manual fan speed in pwm\n");
403 }
404
405 if (smu->user_dpm_profile.fan_speed_rpm) {
406 ret = smu_set_fan_speed_rpm(smu, smu->user_dpm_profile.fan_speed_rpm);
407 if (ret != -EOPNOTSUPP)
408 dev_err(smu->adev->dev, "Failed to set manual fan speed in rpm\n");
409 }
410 }
411
412 /* Restore user customized OD settings */
413 if (smu->user_dpm_profile.user_od) {
414 if (smu->ppt_funcs->restore_user_od_settings) {
415 ret = smu->ppt_funcs->restore_user_od_settings(smu);
416 if (ret)
417 dev_err(smu->adev->dev, "Failed to upload customized OD settings\n");
418 }
419 }
420
421 /* Disable restore flag */
422 smu->user_dpm_profile.flags &= ~SMU_DPM_USER_PROFILE_RESTORE;
423}
424
425static int smu_get_power_num_states(void *handle,
426 struct pp_states_info *state_info)
427{
428 if (!state_info)
429 return -EINVAL;
430
431 /* not support power state */
432 memset(state_info, 0, sizeof(struct pp_states_info));
433 state_info->nums = 1;
434 state_info->states[0] = POWER_STATE_TYPE_DEFAULT;
435
436 return 0;
437}
438
439bool is_support_sw_smu(struct amdgpu_device *adev)
440{
441 /* vega20 is 11.0.2, but it's supported via the powerplay code */
442 if (adev->asic_type == CHIP_VEGA20)
443 return false;
444
445 if (adev->ip_versions[MP1_HWIP][0] >= IP_VERSION(11, 0, 0))
446 return true;
447
448 return false;
449}
450
451bool is_support_cclk_dpm(struct amdgpu_device *adev)
452{
453 struct smu_context *smu = adev->powerplay.pp_handle;
454
455 if (!smu_feature_is_enabled(smu, SMU_FEATURE_CCLK_DPM_BIT))
456 return false;
457
458 return true;
459}
460
461
462static int smu_sys_get_pp_table(void *handle,
463 char **table)
464{
465 struct smu_context *smu = handle;
466 struct smu_table_context *smu_table = &smu->smu_table;
467
468 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
469 return -EOPNOTSUPP;
470
471 if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
472 return -EINVAL;
473
474 if (smu_table->hardcode_pptable)
475 *table = smu_table->hardcode_pptable;
476 else
477 *table = smu_table->power_play_table;
478
479 return smu_table->power_play_table_size;
480}
481
482static int smu_sys_set_pp_table(void *handle,
483 const char *buf,
484 size_t size)
485{
486 struct smu_context *smu = handle;
487 struct smu_table_context *smu_table = &smu->smu_table;
488 ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
489 int ret = 0;
490
491 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
492 return -EOPNOTSUPP;
493
494 if (header->usStructureSize != size) {
495 dev_err(smu->adev->dev, "pp table size not matched !\n");
496 return -EIO;
497 }
498
499 if (!smu_table->hardcode_pptable) {
500 smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
501 if (!smu_table->hardcode_pptable)
502 return -ENOMEM;
503 }
504
505 memcpy(smu_table->hardcode_pptable, buf, size);
506 smu_table->power_play_table = smu_table->hardcode_pptable;
507 smu_table->power_play_table_size = size;
508
509 /*
510 * Special hw_fini action(for Navi1x, the DPMs disablement will be
511 * skipped) may be needed for custom pptable uploading.
512 */
513 smu->uploading_custom_pp_table = true;
514
515 ret = smu_reset(smu);
516 if (ret)
517 dev_info(smu->adev->dev, "smu reset failed, ret = %d\n", ret);
518
519 smu->uploading_custom_pp_table = false;
520
521 return ret;
522}
523
524static int smu_get_driver_allowed_feature_mask(struct smu_context *smu)
525{
526 struct smu_feature *feature = &smu->smu_feature;
527 uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
528 int ret = 0;
529
530 /*
531 * With SCPM enabled, the allowed featuremasks setting(via
532 * PPSMC_MSG_SetAllowedFeaturesMaskLow/High) is not permitted.
533 * That means there is no way to let PMFW knows the settings below.
534 * Thus, we just assume all the features are allowed under
535 * such scenario.
536 */
537 if (smu->adev->scpm_enabled) {
538 bitmap_fill(feature->allowed, SMU_FEATURE_MAX);
539 return 0;
540 }
541
542 bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
543
544 ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
545 SMU_FEATURE_MAX/32);
546 if (ret)
547 return ret;
548
549 bitmap_or(feature->allowed, feature->allowed,
550 (unsigned long *)allowed_feature_mask,
551 feature->feature_num);
552
553 return ret;
554}
555
556static int smu_set_funcs(struct amdgpu_device *adev)
557{
558 struct smu_context *smu = adev->powerplay.pp_handle;
559
560 if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
561 smu->od_enabled = true;
562
563 switch (adev->ip_versions[MP1_HWIP][0]) {
564 case IP_VERSION(11, 0, 0):
565 case IP_VERSION(11, 0, 5):
566 case IP_VERSION(11, 0, 9):
567 navi10_set_ppt_funcs(smu);
568 break;
569 case IP_VERSION(11, 0, 7):
570 case IP_VERSION(11, 0, 11):
571 case IP_VERSION(11, 0, 12):
572 case IP_VERSION(11, 0, 13):
573 sienna_cichlid_set_ppt_funcs(smu);
574 break;
575 case IP_VERSION(12, 0, 0):
576 case IP_VERSION(12, 0, 1):
577 renoir_set_ppt_funcs(smu);
578 break;
579 case IP_VERSION(11, 5, 0):
580 vangogh_set_ppt_funcs(smu);
581 break;
582 case IP_VERSION(13, 0, 1):
583 case IP_VERSION(13, 0, 3):
584 case IP_VERSION(13, 0, 8):
585 yellow_carp_set_ppt_funcs(smu);
586 break;
587 case IP_VERSION(13, 0, 4):
588 case IP_VERSION(13, 0, 11):
589 smu_v13_0_4_set_ppt_funcs(smu);
590 break;
591 case IP_VERSION(13, 0, 5):
592 smu_v13_0_5_set_ppt_funcs(smu);
593 break;
594 case IP_VERSION(11, 0, 8):
595 cyan_skillfish_set_ppt_funcs(smu);
596 break;
597 case IP_VERSION(11, 0, 2):
598 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
599 arcturus_set_ppt_funcs(smu);
600 /* OD is not supported on Arcturus */
601 smu->od_enabled =false;
602 break;
603 case IP_VERSION(13, 0, 2):
604 aldebaran_set_ppt_funcs(smu);
605 /* Enable pp_od_clk_voltage node */
606 smu->od_enabled = true;
607 break;
608 case IP_VERSION(13, 0, 0):
609 case IP_VERSION(13, 0, 10):
610 smu_v13_0_0_set_ppt_funcs(smu);
611 break;
612 case IP_VERSION(13, 0, 7):
613 smu_v13_0_7_set_ppt_funcs(smu);
614 break;
615 default:
616 return -EINVAL;
617 }
618
619 return 0;
620}
621
622static int smu_early_init(void *handle)
623{
624 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
625 struct smu_context *smu;
626
627 smu = kzalloc(sizeof(struct smu_context), GFP_KERNEL);
628 if (!smu)
629 return -ENOMEM;
630
631 smu->adev = adev;
632 smu->pm_enabled = !!amdgpu_dpm;
633 smu->is_apu = false;
634 smu->smu_baco.state = SMU_BACO_STATE_EXIT;
635 smu->smu_baco.platform_support = false;
636 smu->user_dpm_profile.fan_mode = -1;
637
638 mutex_init(&smu->message_lock);
639
640 adev->powerplay.pp_handle = smu;
641 adev->powerplay.pp_funcs = &swsmu_pm_funcs;
642
643 return smu_set_funcs(adev);
644}
645
646static int smu_set_default_dpm_table(struct smu_context *smu)
647{
648 struct smu_power_context *smu_power = &smu->smu_power;
649 struct smu_power_gate *power_gate = &smu_power->power_gate;
650 int vcn_gate, jpeg_gate;
651 int ret = 0;
652
653 if (!smu->ppt_funcs->set_default_dpm_table)
654 return 0;
655
656 vcn_gate = atomic_read(&power_gate->vcn_gated);
657 jpeg_gate = atomic_read(&power_gate->jpeg_gated);
658
659 ret = smu_dpm_set_vcn_enable(smu, true);
660 if (ret)
661 return ret;
662
663 ret = smu_dpm_set_jpeg_enable(smu, true);
664 if (ret)
665 goto err_out;
666
667 ret = smu->ppt_funcs->set_default_dpm_table(smu);
668 if (ret)
669 dev_err(smu->adev->dev,
670 "Failed to setup default dpm clock tables!\n");
671
672 smu_dpm_set_jpeg_enable(smu, !jpeg_gate);
673err_out:
674 smu_dpm_set_vcn_enable(smu, !vcn_gate);
675 return ret;
676}
677
678static int smu_apply_default_config_table_settings(struct smu_context *smu)
679{
680 struct amdgpu_device *adev = smu->adev;
681 int ret = 0;
682
683 ret = smu_get_default_config_table_settings(smu,
684 &adev->pm.config_table);
685 if (ret)
686 return ret;
687
688 return smu_set_config_table(smu, &adev->pm.config_table);
689}
690
691static int smu_late_init(void *handle)
692{
693 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
694 struct smu_context *smu = adev->powerplay.pp_handle;
695 int ret = 0;
696
697 smu_set_fine_grain_gfx_freq_parameters(smu);
698
699 if (!smu->pm_enabled)
700 return 0;
701
702 ret = smu_post_init(smu);
703 if (ret) {
704 dev_err(adev->dev, "Failed to post smu init!\n");
705 return ret;
706 }
707
708 if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 1)) ||
709 (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 3)))
710 return 0;
711
712 if (!amdgpu_sriov_vf(adev) || smu->od_enabled) {
713 ret = smu_set_default_od_settings(smu);
714 if (ret) {
715 dev_err(adev->dev, "Failed to setup default OD settings!\n");
716 return ret;
717 }
718 }
719
720 ret = smu_populate_umd_state_clk(smu);
721 if (ret) {
722 dev_err(adev->dev, "Failed to populate UMD state clocks!\n");
723 return ret;
724 }
725
726 ret = smu_get_asic_power_limits(smu,
727 &smu->current_power_limit,
728 &smu->default_power_limit,
729 &smu->max_power_limit);
730 if (ret) {
731 dev_err(adev->dev, "Failed to get asic power limits!\n");
732 return ret;
733 }
734
735 if (!amdgpu_sriov_vf(adev))
736 smu_get_unique_id(smu);
737
738 smu_get_fan_parameters(smu);
739
740 smu_handle_task(smu,
741 smu->smu_dpm.dpm_level,
742 AMD_PP_TASK_COMPLETE_INIT);
743
744 ret = smu_apply_default_config_table_settings(smu);
745 if (ret && (ret != -EOPNOTSUPP)) {
746 dev_err(adev->dev, "Failed to apply default DriverSmuConfig settings!\n");
747 return ret;
748 }
749
750 smu_restore_dpm_user_profile(smu);
751
752 return 0;
753}
754
755static int smu_init_fb_allocations(struct smu_context *smu)
756{
757 struct amdgpu_device *adev = smu->adev;
758 struct smu_table_context *smu_table = &smu->smu_table;
759 struct smu_table *tables = smu_table->tables;
760 struct smu_table *driver_table = &(smu_table->driver_table);
761 uint32_t max_table_size = 0;
762 int ret, i;
763
764 /* VRAM allocation for tool table */
765 if (tables[SMU_TABLE_PMSTATUSLOG].size) {
766 ret = amdgpu_bo_create_kernel(adev,
767 tables[SMU_TABLE_PMSTATUSLOG].size,
768 tables[SMU_TABLE_PMSTATUSLOG].align,
769 tables[SMU_TABLE_PMSTATUSLOG].domain,
770 &tables[SMU_TABLE_PMSTATUSLOG].bo,
771 &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
772 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
773 if (ret) {
774 dev_err(adev->dev, "VRAM allocation for tool table failed!\n");
775 return ret;
776 }
777 }
778
779 /* VRAM allocation for driver table */
780 for (i = 0; i < SMU_TABLE_COUNT; i++) {
781 if (tables[i].size == 0)
782 continue;
783
784 if (i == SMU_TABLE_PMSTATUSLOG)
785 continue;
786
787 if (max_table_size < tables[i].size)
788 max_table_size = tables[i].size;
789 }
790
791 driver_table->size = max_table_size;
792 driver_table->align = PAGE_SIZE;
793 driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
794
795 ret = amdgpu_bo_create_kernel(adev,
796 driver_table->size,
797 driver_table->align,
798 driver_table->domain,
799 &driver_table->bo,
800 &driver_table->mc_address,
801 &driver_table->cpu_addr);
802 if (ret) {
803 dev_err(adev->dev, "VRAM allocation for driver table failed!\n");
804 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
805 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
806 &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
807 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
808 }
809
810 return ret;
811}
812
813static int smu_fini_fb_allocations(struct smu_context *smu)
814{
815 struct smu_table_context *smu_table = &smu->smu_table;
816 struct smu_table *tables = smu_table->tables;
817 struct smu_table *driver_table = &(smu_table->driver_table);
818
819 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
820 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
821 &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
822 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
823
824 amdgpu_bo_free_kernel(&driver_table->bo,
825 &driver_table->mc_address,
826 &driver_table->cpu_addr);
827
828 return 0;
829}
830
831/**
832 * smu_alloc_memory_pool - allocate memory pool in the system memory
833 *
834 * @smu: amdgpu_device pointer
835 *
836 * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
837 * and DramLogSetDramAddr can notify it changed.
838 *
839 * Returns 0 on success, error on failure.
840 */
841static int smu_alloc_memory_pool(struct smu_context *smu)
842{
843 struct amdgpu_device *adev = smu->adev;
844 struct smu_table_context *smu_table = &smu->smu_table;
845 struct smu_table *memory_pool = &smu_table->memory_pool;
846 uint64_t pool_size = smu->pool_size;
847 int ret = 0;
848
849 if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
850 return ret;
851
852 memory_pool->size = pool_size;
853 memory_pool->align = PAGE_SIZE;
854 memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT;
855
856 switch (pool_size) {
857 case SMU_MEMORY_POOL_SIZE_256_MB:
858 case SMU_MEMORY_POOL_SIZE_512_MB:
859 case SMU_MEMORY_POOL_SIZE_1_GB:
860 case SMU_MEMORY_POOL_SIZE_2_GB:
861 ret = amdgpu_bo_create_kernel(adev,
862 memory_pool->size,
863 memory_pool->align,
864 memory_pool->domain,
865 &memory_pool->bo,
866 &memory_pool->mc_address,
867 &memory_pool->cpu_addr);
868 if (ret)
869 dev_err(adev->dev, "VRAM allocation for dramlog failed!\n");
870 break;
871 default:
872 break;
873 }
874
875 return ret;
876}
877
878static int smu_free_memory_pool(struct smu_context *smu)
879{
880 struct smu_table_context *smu_table = &smu->smu_table;
881 struct smu_table *memory_pool = &smu_table->memory_pool;
882
883 if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
884 return 0;
885
886 amdgpu_bo_free_kernel(&memory_pool->bo,
887 &memory_pool->mc_address,
888 &memory_pool->cpu_addr);
889
890 memset(memory_pool, 0, sizeof(struct smu_table));
891
892 return 0;
893}
894
895static int smu_alloc_dummy_read_table(struct smu_context *smu)
896{
897 struct smu_table_context *smu_table = &smu->smu_table;
898 struct smu_table *dummy_read_1_table =
899 &smu_table->dummy_read_1_table;
900 struct amdgpu_device *adev = smu->adev;
901 int ret = 0;
902
903 dummy_read_1_table->size = 0x40000;
904 dummy_read_1_table->align = PAGE_SIZE;
905 dummy_read_1_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
906
907 ret = amdgpu_bo_create_kernel(adev,
908 dummy_read_1_table->size,
909 dummy_read_1_table->align,
910 dummy_read_1_table->domain,
911 &dummy_read_1_table->bo,
912 &dummy_read_1_table->mc_address,
913 &dummy_read_1_table->cpu_addr);
914 if (ret)
915 dev_err(adev->dev, "VRAM allocation for dummy read table failed!\n");
916
917 return ret;
918}
919
920static void smu_free_dummy_read_table(struct smu_context *smu)
921{
922 struct smu_table_context *smu_table = &smu->smu_table;
923 struct smu_table *dummy_read_1_table =
924 &smu_table->dummy_read_1_table;
925
926
927 amdgpu_bo_free_kernel(&dummy_read_1_table->bo,
928 &dummy_read_1_table->mc_address,
929 &dummy_read_1_table->cpu_addr);
930
931 memset(dummy_read_1_table, 0, sizeof(struct smu_table));
932}
933
934static int smu_smc_table_sw_init(struct smu_context *smu)
935{
936 int ret;
937
938 /**
939 * Create smu_table structure, and init smc tables such as
940 * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
941 */
942 ret = smu_init_smc_tables(smu);
943 if (ret) {
944 dev_err(smu->adev->dev, "Failed to init smc tables!\n");
945 return ret;
946 }
947
948 /**
949 * Create smu_power_context structure, and allocate smu_dpm_context and
950 * context size to fill the smu_power_context data.
951 */
952 ret = smu_init_power(smu);
953 if (ret) {
954 dev_err(smu->adev->dev, "Failed to init smu_init_power!\n");
955 return ret;
956 }
957
958 /*
959 * allocate vram bos to store smc table contents.
960 */
961 ret = smu_init_fb_allocations(smu);
962 if (ret)
963 return ret;
964
965 ret = smu_alloc_memory_pool(smu);
966 if (ret)
967 return ret;
968
969 ret = smu_alloc_dummy_read_table(smu);
970 if (ret)
971 return ret;
972
973 ret = smu_i2c_init(smu);
974 if (ret)
975 return ret;
976
977 return 0;
978}
979
980static int smu_smc_table_sw_fini(struct smu_context *smu)
981{
982 int ret;
983
984 smu_i2c_fini(smu);
985
986 smu_free_dummy_read_table(smu);
987
988 ret = smu_free_memory_pool(smu);
989 if (ret)
990 return ret;
991
992 ret = smu_fini_fb_allocations(smu);
993 if (ret)
994 return ret;
995
996 ret = smu_fini_power(smu);
997 if (ret) {
998 dev_err(smu->adev->dev, "Failed to init smu_fini_power!\n");
999 return ret;
1000 }
1001
1002 ret = smu_fini_smc_tables(smu);
1003 if (ret) {
1004 dev_err(smu->adev->dev, "Failed to smu_fini_smc_tables!\n");
1005 return ret;
1006 }
1007
1008 return 0;
1009}
1010
1011static void smu_throttling_logging_work_fn(struct work_struct *work)
1012{
1013 struct smu_context *smu = container_of(work, struct smu_context,
1014 throttling_logging_work);
1015
1016 smu_log_thermal_throttling(smu);
1017}
1018
1019static void smu_interrupt_work_fn(struct work_struct *work)
1020{
1021 struct smu_context *smu = container_of(work, struct smu_context,
1022 interrupt_work);
1023
1024 if (smu->ppt_funcs && smu->ppt_funcs->interrupt_work)
1025 smu->ppt_funcs->interrupt_work(smu);
1026}
1027
1028static int smu_sw_init(void *handle)
1029{
1030 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1031 struct smu_context *smu = adev->powerplay.pp_handle;
1032 int ret;
1033
1034 smu->pool_size = adev->pm.smu_prv_buffer_size;
1035 smu->smu_feature.feature_num = SMU_FEATURE_MAX;
1036 bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
1037 bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
1038
1039 INIT_WORK(&smu->throttling_logging_work, smu_throttling_logging_work_fn);
1040 INIT_WORK(&smu->interrupt_work, smu_interrupt_work_fn);
1041 atomic64_set(&smu->throttle_int_counter, 0);
1042 smu->watermarks_bitmap = 0;
1043 smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
1044 smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
1045
1046 atomic_set(&smu->smu_power.power_gate.vcn_gated, 1);
1047 atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1);
1048
1049 smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
1050 smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
1051 smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
1052 smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
1053 smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
1054 smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
1055 smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
1056 smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
1057
1058 smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
1059 smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
1060 smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
1061 smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
1062 smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
1063 smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
1064 smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
1065 smu->display_config = &adev->pm.pm_display_cfg;
1066
1067 smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
1068 smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
1069
1070 ret = smu_init_microcode(smu);
1071 if (ret) {
1072 dev_err(adev->dev, "Failed to load smu firmware!\n");
1073 return ret;
1074 }
1075
1076 ret = smu_smc_table_sw_init(smu);
1077 if (ret) {
1078 dev_err(adev->dev, "Failed to sw init smc table!\n");
1079 return ret;
1080 }
1081
1082 /* get boot_values from vbios to set revision, gfxclk, and etc. */
1083 ret = smu_get_vbios_bootup_values(smu);
1084 if (ret) {
1085 dev_err(adev->dev, "Failed to get VBIOS boot clock values!\n");
1086 return ret;
1087 }
1088
1089 ret = smu_init_pptable_microcode(smu);
1090 if (ret) {
1091 dev_err(adev->dev, "Failed to setup pptable firmware!\n");
1092 return ret;
1093 }
1094
1095 ret = smu_register_irq_handler(smu);
1096 if (ret) {
1097 dev_err(adev->dev, "Failed to register smc irq handler!\n");
1098 return ret;
1099 }
1100
1101 /* If there is no way to query fan control mode, fan control is not supported */
1102 if (!smu->ppt_funcs->get_fan_control_mode)
1103 smu->adev->pm.no_fan = true;
1104
1105 return 0;
1106}
1107
1108static int smu_sw_fini(void *handle)
1109{
1110 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1111 struct smu_context *smu = adev->powerplay.pp_handle;
1112 int ret;
1113
1114 ret = smu_smc_table_sw_fini(smu);
1115 if (ret) {
1116 dev_err(adev->dev, "Failed to sw fini smc table!\n");
1117 return ret;
1118 }
1119
1120 smu_fini_microcode(smu);
1121
1122 return 0;
1123}
1124
1125static int smu_get_thermal_temperature_range(struct smu_context *smu)
1126{
1127 struct amdgpu_device *adev = smu->adev;
1128 struct smu_temperature_range *range =
1129 &smu->thermal_range;
1130 int ret = 0;
1131
1132 if (!smu->ppt_funcs->get_thermal_temperature_range)
1133 return 0;
1134
1135 ret = smu->ppt_funcs->get_thermal_temperature_range(smu, range);
1136 if (ret)
1137 return ret;
1138
1139 adev->pm.dpm.thermal.min_temp = range->min;
1140 adev->pm.dpm.thermal.max_temp = range->max;
1141 adev->pm.dpm.thermal.max_edge_emergency_temp = range->edge_emergency_max;
1142 adev->pm.dpm.thermal.min_hotspot_temp = range->hotspot_min;
1143 adev->pm.dpm.thermal.max_hotspot_crit_temp = range->hotspot_crit_max;
1144 adev->pm.dpm.thermal.max_hotspot_emergency_temp = range->hotspot_emergency_max;
1145 adev->pm.dpm.thermal.min_mem_temp = range->mem_min;
1146 adev->pm.dpm.thermal.max_mem_crit_temp = range->mem_crit_max;
1147 adev->pm.dpm.thermal.max_mem_emergency_temp = range->mem_emergency_max;
1148
1149 return ret;
1150}
1151
1152static int smu_smc_hw_setup(struct smu_context *smu)
1153{
1154 struct smu_feature *feature = &smu->smu_feature;
1155 struct amdgpu_device *adev = smu->adev;
1156 uint32_t pcie_gen = 0, pcie_width = 0;
1157 uint64_t features_supported;
1158 int ret = 0;
1159
1160 switch (adev->ip_versions[MP1_HWIP][0]) {
1161 case IP_VERSION(11, 0, 7):
1162 case IP_VERSION(11, 0, 11):
1163 case IP_VERSION(11, 5, 0):
1164 case IP_VERSION(11, 0, 12):
1165 if (adev->in_suspend && smu_is_dpm_running(smu)) {
1166 dev_info(adev->dev, "dpm has been enabled\n");
1167 ret = smu_system_features_control(smu, true);
1168 if (ret)
1169 dev_err(adev->dev, "Failed system features control!\n");
1170 return ret;
1171 }
1172 break;
1173 default:
1174 break;
1175 }
1176
1177 ret = smu_init_display_count(smu, 0);
1178 if (ret) {
1179 dev_info(adev->dev, "Failed to pre-set display count as 0!\n");
1180 return ret;
1181 }
1182
1183 ret = smu_set_driver_table_location(smu);
1184 if (ret) {
1185 dev_err(adev->dev, "Failed to SetDriverDramAddr!\n");
1186 return ret;
1187 }
1188
1189 /*
1190 * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
1191 */
1192 ret = smu_set_tool_table_location(smu);
1193 if (ret) {
1194 dev_err(adev->dev, "Failed to SetToolsDramAddr!\n");
1195 return ret;
1196 }
1197
1198 /*
1199 * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
1200 * pool location.
1201 */
1202 ret = smu_notify_memory_pool_location(smu);
1203 if (ret) {
1204 dev_err(adev->dev, "Failed to SetDramLogDramAddr!\n");
1205 return ret;
1206 }
1207
1208 ret = smu_setup_pptable(smu);
1209 if (ret) {
1210 dev_err(adev->dev, "Failed to setup pptable!\n");
1211 return ret;
1212 }
1213
1214 /* smu_dump_pptable(smu); */
1215
1216 /*
1217 * With SCPM enabled, PSP is responsible for the PPTable transferring
1218 * (to SMU). Driver involvement is not needed and permitted.
1219 */
1220 if (!adev->scpm_enabled) {
1221 /*
1222 * Copy pptable bo in the vram to smc with SMU MSGs such as
1223 * SetDriverDramAddr and TransferTableDram2Smu.
1224 */
1225 ret = smu_write_pptable(smu);
1226 if (ret) {
1227 dev_err(adev->dev, "Failed to transfer pptable to SMC!\n");
1228 return ret;
1229 }
1230 }
1231
1232 /* issue Run*Btc msg */
1233 ret = smu_run_btc(smu);
1234 if (ret)
1235 return ret;
1236
1237 /*
1238 * With SCPM enabled, these actions(and relevant messages) are
1239 * not needed and permitted.
1240 */
1241 if (!adev->scpm_enabled) {
1242 ret = smu_feature_set_allowed_mask(smu);
1243 if (ret) {
1244 dev_err(adev->dev, "Failed to set driver allowed features mask!\n");
1245 return ret;
1246 }
1247 }
1248
1249 ret = smu_system_features_control(smu, true);
1250 if (ret) {
1251 dev_err(adev->dev, "Failed to enable requested dpm features!\n");
1252 return ret;
1253 }
1254
1255 ret = smu_feature_get_enabled_mask(smu, &features_supported);
1256 if (ret) {
1257 dev_err(adev->dev, "Failed to retrieve supported dpm features!\n");
1258 return ret;
1259 }
1260 bitmap_copy(feature->supported,
1261 (unsigned long *)&features_supported,
1262 feature->feature_num);
1263
1264 if (!smu_is_dpm_running(smu))
1265 dev_info(adev->dev, "dpm has been disabled\n");
1266
1267 /*
1268 * Set initialized values (get from vbios) to dpm tables context such as
1269 * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
1270 * type of clks.
1271 */
1272 ret = smu_set_default_dpm_table(smu);
1273 if (ret) {
1274 dev_err(adev->dev, "Failed to setup default dpm clock tables!\n");
1275 return ret;
1276 }
1277
1278 if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
1279 pcie_gen = 3;
1280 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
1281 pcie_gen = 2;
1282 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
1283 pcie_gen = 1;
1284 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
1285 pcie_gen = 0;
1286
1287 /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
1288 * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
1289 * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32
1290 */
1291 if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
1292 pcie_width = 6;
1293 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
1294 pcie_width = 5;
1295 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
1296 pcie_width = 4;
1297 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
1298 pcie_width = 3;
1299 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
1300 pcie_width = 2;
1301 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
1302 pcie_width = 1;
1303 ret = smu_update_pcie_parameters(smu, pcie_gen, pcie_width);
1304 if (ret) {
1305 dev_err(adev->dev, "Attempt to override pcie params failed!\n");
1306 return ret;
1307 }
1308
1309 ret = smu_get_thermal_temperature_range(smu);
1310 if (ret) {
1311 dev_err(adev->dev, "Failed to get thermal temperature ranges!\n");
1312 return ret;
1313 }
1314
1315 ret = smu_enable_thermal_alert(smu);
1316 if (ret) {
1317 dev_err(adev->dev, "Failed to enable thermal alert!\n");
1318 return ret;
1319 }
1320
1321 ret = smu_notify_display_change(smu);
1322 if (ret) {
1323 dev_err(adev->dev, "Failed to notify display change!\n");
1324 return ret;
1325 }
1326
1327 /*
1328 * Set min deep sleep dce fclk with bootup value from vbios via
1329 * SetMinDeepSleepDcefclk MSG.
1330 */
1331 ret = smu_set_min_dcef_deep_sleep(smu,
1332 smu->smu_table.boot_values.dcefclk / 100);
1333
1334 return ret;
1335}
1336
1337static int smu_start_smc_engine(struct smu_context *smu)
1338{
1339 struct amdgpu_device *adev = smu->adev;
1340 int ret = 0;
1341
1342 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1343 if (adev->ip_versions[MP1_HWIP][0] < IP_VERSION(11, 0, 0)) {
1344 if (smu->ppt_funcs->load_microcode) {
1345 ret = smu->ppt_funcs->load_microcode(smu);
1346 if (ret)
1347 return ret;
1348 }
1349 }
1350 }
1351
1352 if (smu->ppt_funcs->check_fw_status) {
1353 ret = smu->ppt_funcs->check_fw_status(smu);
1354 if (ret) {
1355 dev_err(adev->dev, "SMC is not ready\n");
1356 return ret;
1357 }
1358 }
1359
1360 /*
1361 * Send msg GetDriverIfVersion to check if the return value is equal
1362 * with DRIVER_IF_VERSION of smc header.
1363 */
1364 ret = smu_check_fw_version(smu);
1365 if (ret)
1366 return ret;
1367
1368 return ret;
1369}
1370
1371static int smu_hw_init(void *handle)
1372{
1373 int ret;
1374 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1375 struct smu_context *smu = adev->powerplay.pp_handle;
1376
1377 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) {
1378 smu->pm_enabled = false;
1379 return 0;
1380 }
1381
1382 ret = smu_start_smc_engine(smu);
1383 if (ret) {
1384 dev_err(adev->dev, "SMC engine is not correctly up!\n");
1385 return ret;
1386 }
1387
1388 if (smu->is_apu) {
1389 if ((smu->ppt_funcs->set_gfx_power_up_by_imu) &&
1390 likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
1391 ret = smu->ppt_funcs->set_gfx_power_up_by_imu(smu);
1392 if (ret) {
1393 dev_err(adev->dev, "Failed to Enable gfx imu!\n");
1394 return ret;
1395 }
1396 }
1397
1398 smu_dpm_set_vcn_enable(smu, true);
1399 smu_dpm_set_jpeg_enable(smu, true);
1400 smu_set_gfx_cgpg(smu, true);
1401 }
1402
1403 if (!smu->pm_enabled)
1404 return 0;
1405
1406 ret = smu_get_driver_allowed_feature_mask(smu);
1407 if (ret)
1408 return ret;
1409
1410 ret = smu_smc_hw_setup(smu);
1411 if (ret) {
1412 dev_err(adev->dev, "Failed to setup smc hw!\n");
1413 return ret;
1414 }
1415
1416 /*
1417 * Move maximum sustainable clock retrieving here considering
1418 * 1. It is not needed on resume(from S3).
1419 * 2. DAL settings come between .hw_init and .late_init of SMU.
1420 * And DAL needs to know the maximum sustainable clocks. Thus
1421 * it cannot be put in .late_init().
1422 */
1423 ret = smu_init_max_sustainable_clocks(smu);
1424 if (ret) {
1425 dev_err(adev->dev, "Failed to init max sustainable clocks!\n");
1426 return ret;
1427 }
1428
1429 adev->pm.dpm_enabled = true;
1430
1431 dev_info(adev->dev, "SMU is initialized successfully!\n");
1432
1433 return 0;
1434}
1435
1436static int smu_disable_dpms(struct smu_context *smu)
1437{
1438 struct amdgpu_device *adev = smu->adev;
1439 int ret = 0;
1440 bool use_baco = !smu->is_apu &&
1441 ((amdgpu_in_reset(adev) &&
1442 (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
1443 ((adev->in_runpm || adev->in_s4) && amdgpu_asic_supports_baco(adev)));
1444
1445 /*
1446 * For SMU 13.0.0 and 13.0.7, PMFW will handle the DPM features(disablement or others)
1447 * properly on suspend/reset/unload. Driver involvement may cause some unexpected issues.
1448 */
1449 switch (adev->ip_versions[MP1_HWIP][0]) {
1450 case IP_VERSION(13, 0, 0):
1451 case IP_VERSION(13, 0, 7):
1452 case IP_VERSION(13, 0, 10):
1453 return 0;
1454 default:
1455 break;
1456 }
1457
1458 /*
1459 * For custom pptable uploading, skip the DPM features
1460 * disable process on Navi1x ASICs.
1461 * - As the gfx related features are under control of
1462 * RLC on those ASICs. RLC reinitialization will be
1463 * needed to reenable them. That will cost much more
1464 * efforts.
1465 *
1466 * - SMU firmware can handle the DPM reenablement
1467 * properly.
1468 */
1469 if (smu->uploading_custom_pp_table) {
1470 switch (adev->ip_versions[MP1_HWIP][0]) {
1471 case IP_VERSION(11, 0, 0):
1472 case IP_VERSION(11, 0, 5):
1473 case IP_VERSION(11, 0, 9):
1474 case IP_VERSION(11, 0, 7):
1475 case IP_VERSION(11, 0, 11):
1476 case IP_VERSION(11, 5, 0):
1477 case IP_VERSION(11, 0, 12):
1478 case IP_VERSION(11, 0, 13):
1479 return 0;
1480 default:
1481 break;
1482 }
1483 }
1484
1485 /*
1486 * For Sienna_Cichlid, PMFW will handle the features disablement properly
1487 * on BACO in. Driver involvement is unnecessary.
1488 */
1489 if (use_baco) {
1490 switch (adev->ip_versions[MP1_HWIP][0]) {
1491 case IP_VERSION(11, 0, 7):
1492 case IP_VERSION(11, 0, 0):
1493 case IP_VERSION(11, 0, 5):
1494 case IP_VERSION(11, 0, 9):
1495 case IP_VERSION(13, 0, 7):
1496 return 0;
1497 default:
1498 break;
1499 }
1500 }
1501
1502 /*
1503 * For SMU 13.0.4/11, PMFW will handle the features disablement properly
1504 * for gpu reset case. Driver involvement is unnecessary.
1505 */
1506 if (amdgpu_in_reset(adev)) {
1507 switch (adev->ip_versions[MP1_HWIP][0]) {
1508 case IP_VERSION(13, 0, 4):
1509 case IP_VERSION(13, 0, 11):
1510 return 0;
1511 default:
1512 break;
1513 }
1514 }
1515
1516 /*
1517 * For gpu reset, runpm and hibernation through BACO,
1518 * BACO feature has to be kept enabled.
1519 */
1520 if (use_baco && smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) {
1521 ret = smu_disable_all_features_with_exception(smu,
1522 SMU_FEATURE_BACO_BIT);
1523 if (ret)
1524 dev_err(adev->dev, "Failed to disable smu features except BACO.\n");
1525 } else {
1526 /* DisableAllSmuFeatures message is not permitted with SCPM enabled */
1527 if (!adev->scpm_enabled) {
1528 ret = smu_system_features_control(smu, false);
1529 if (ret)
1530 dev_err(adev->dev, "Failed to disable smu features.\n");
1531 }
1532 }
1533
1534 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 4, 2) &&
1535 !amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs->stop)
1536 adev->gfx.rlc.funcs->stop(adev);
1537
1538 return ret;
1539}
1540
1541static int smu_smc_hw_cleanup(struct smu_context *smu)
1542{
1543 struct amdgpu_device *adev = smu->adev;
1544 int ret = 0;
1545
1546 cancel_work_sync(&smu->throttling_logging_work);
1547 cancel_work_sync(&smu->interrupt_work);
1548
1549 ret = smu_disable_thermal_alert(smu);
1550 if (ret) {
1551 dev_err(adev->dev, "Fail to disable thermal alert!\n");
1552 return ret;
1553 }
1554
1555 ret = smu_disable_dpms(smu);
1556 if (ret) {
1557 dev_err(adev->dev, "Fail to disable dpm features!\n");
1558 return ret;
1559 }
1560
1561 return 0;
1562}
1563
1564static int smu_hw_fini(void *handle)
1565{
1566 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1567 struct smu_context *smu = adev->powerplay.pp_handle;
1568
1569 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1570 return 0;
1571
1572 smu_dpm_set_vcn_enable(smu, false);
1573 smu_dpm_set_jpeg_enable(smu, false);
1574
1575 adev->vcn.cur_state = AMD_PG_STATE_GATE;
1576 adev->jpeg.cur_state = AMD_PG_STATE_GATE;
1577
1578 if (!smu->pm_enabled)
1579 return 0;
1580
1581 adev->pm.dpm_enabled = false;
1582
1583 return smu_smc_hw_cleanup(smu);
1584}
1585
1586static void smu_late_fini(void *handle)
1587{
1588 struct amdgpu_device *adev = handle;
1589 struct smu_context *smu = adev->powerplay.pp_handle;
1590
1591 kfree(smu);
1592}
1593
1594static int smu_reset(struct smu_context *smu)
1595{
1596 struct amdgpu_device *adev = smu->adev;
1597 int ret;
1598
1599 ret = smu_hw_fini(adev);
1600 if (ret)
1601 return ret;
1602
1603 ret = smu_hw_init(adev);
1604 if (ret)
1605 return ret;
1606
1607 ret = smu_late_init(adev);
1608 if (ret)
1609 return ret;
1610
1611 return 0;
1612}
1613
1614static int smu_suspend(void *handle)
1615{
1616 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1617 struct smu_context *smu = adev->powerplay.pp_handle;
1618 int ret;
1619 uint64_t count;
1620
1621 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1622 return 0;
1623
1624 if (!smu->pm_enabled)
1625 return 0;
1626
1627 adev->pm.dpm_enabled = false;
1628
1629 ret = smu_smc_hw_cleanup(smu);
1630 if (ret)
1631 return ret;
1632
1633 smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
1634
1635 smu_set_gfx_cgpg(smu, false);
1636
1637 /*
1638 * pwfw resets entrycount when device is suspended, so we save the
1639 * last value to be used when we resume to keep it consistent
1640 */
1641 ret = smu_get_entrycount_gfxoff(smu, &count);
1642 if (!ret)
1643 adev->gfx.gfx_off_entrycount = count;
1644
1645 return 0;
1646}
1647
1648static int smu_resume(void *handle)
1649{
1650 int ret;
1651 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1652 struct smu_context *smu = adev->powerplay.pp_handle;
1653
1654 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1655 return 0;
1656
1657 if (!smu->pm_enabled)
1658 return 0;
1659
1660 dev_info(adev->dev, "SMU is resuming...\n");
1661
1662 ret = smu_start_smc_engine(smu);
1663 if (ret) {
1664 dev_err(adev->dev, "SMC engine is not correctly up!\n");
1665 return ret;
1666 }
1667
1668 ret = smu_smc_hw_setup(smu);
1669 if (ret) {
1670 dev_err(adev->dev, "Failed to setup smc hw!\n");
1671 return ret;
1672 }
1673
1674 smu_set_gfx_cgpg(smu, true);
1675
1676 smu->disable_uclk_switch = 0;
1677
1678 adev->pm.dpm_enabled = true;
1679
1680 dev_info(adev->dev, "SMU is resumed successfully!\n");
1681
1682 return 0;
1683}
1684
1685static int smu_display_configuration_change(void *handle,
1686 const struct amd_pp_display_configuration *display_config)
1687{
1688 struct smu_context *smu = handle;
1689 int index = 0;
1690 int num_of_active_display = 0;
1691
1692 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1693 return -EOPNOTSUPP;
1694
1695 if (!display_config)
1696 return -EINVAL;
1697
1698 smu_set_min_dcef_deep_sleep(smu,
1699 display_config->min_dcef_deep_sleep_set_clk / 100);
1700
1701 for (index = 0; index < display_config->num_path_including_non_display; index++) {
1702 if (display_config->displays[index].controller_id != 0)
1703 num_of_active_display++;
1704 }
1705
1706 return 0;
1707}
1708
1709static int smu_set_clockgating_state(void *handle,
1710 enum amd_clockgating_state state)
1711{
1712 return 0;
1713}
1714
1715static int smu_set_powergating_state(void *handle,
1716 enum amd_powergating_state state)
1717{
1718 return 0;
1719}
1720
1721static int smu_enable_umd_pstate(void *handle,
1722 enum amd_dpm_forced_level *level)
1723{
1724 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
1725 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
1726 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
1727 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
1728
1729 struct smu_context *smu = (struct smu_context*)(handle);
1730 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1731
1732 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1733 return -EINVAL;
1734
1735 if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
1736 /* enter umd pstate, save current level, disable gfx cg*/
1737 if (*level & profile_mode_mask) {
1738 smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
1739 smu_gpo_control(smu, false);
1740 smu_gfx_ulv_control(smu, false);
1741 smu_deep_sleep_control(smu, false);
1742 amdgpu_asic_update_umd_stable_pstate(smu->adev, true);
1743 }
1744 } else {
1745 /* exit umd pstate, restore level, enable gfx cg*/
1746 if (!(*level & profile_mode_mask)) {
1747 if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
1748 *level = smu_dpm_ctx->saved_dpm_level;
1749 amdgpu_asic_update_umd_stable_pstate(smu->adev, false);
1750 smu_deep_sleep_control(smu, true);
1751 smu_gfx_ulv_control(smu, true);
1752 smu_gpo_control(smu, true);
1753 }
1754 }
1755
1756 return 0;
1757}
1758
1759static int smu_bump_power_profile_mode(struct smu_context *smu,
1760 long *param,
1761 uint32_t param_size)
1762{
1763 int ret = 0;
1764
1765 if (smu->ppt_funcs->set_power_profile_mode)
1766 ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size);
1767
1768 return ret;
1769}
1770
1771static int smu_adjust_power_state_dynamic(struct smu_context *smu,
1772 enum amd_dpm_forced_level level,
1773 bool skip_display_settings)
1774{
1775 int ret = 0;
1776 int index = 0;
1777 long workload;
1778 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1779
1780 if (!skip_display_settings) {
1781 ret = smu_display_config_changed(smu);
1782 if (ret) {
1783 dev_err(smu->adev->dev, "Failed to change display config!");
1784 return ret;
1785 }
1786 }
1787
1788 ret = smu_apply_clocks_adjust_rules(smu);
1789 if (ret) {
1790 dev_err(smu->adev->dev, "Failed to apply clocks adjust rules!");
1791 return ret;
1792 }
1793
1794 if (!skip_display_settings) {
1795 ret = smu_notify_smc_display_config(smu);
1796 if (ret) {
1797 dev_err(smu->adev->dev, "Failed to notify smc display config!");
1798 return ret;
1799 }
1800 }
1801
1802 if (smu_dpm_ctx->dpm_level != level) {
1803 ret = smu_asic_set_performance_level(smu, level);
1804 if (ret) {
1805 dev_err(smu->adev->dev, "Failed to set performance level!");
1806 return ret;
1807 }
1808
1809 /* update the saved copy */
1810 smu_dpm_ctx->dpm_level = level;
1811 }
1812
1813 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
1814 smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
1815 index = fls(smu->workload_mask);
1816 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1817 workload = smu->workload_setting[index];
1818
1819 if (smu->power_profile_mode != workload)
1820 smu_bump_power_profile_mode(smu, &workload, 0);
1821 }
1822
1823 return ret;
1824}
1825
1826static int smu_handle_task(struct smu_context *smu,
1827 enum amd_dpm_forced_level level,
1828 enum amd_pp_task task_id)
1829{
1830 int ret = 0;
1831
1832 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1833 return -EOPNOTSUPP;
1834
1835 switch (task_id) {
1836 case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
1837 ret = smu_pre_display_config_changed(smu);
1838 if (ret)
1839 return ret;
1840 ret = smu_adjust_power_state_dynamic(smu, level, false);
1841 break;
1842 case AMD_PP_TASK_COMPLETE_INIT:
1843 case AMD_PP_TASK_READJUST_POWER_STATE:
1844 ret = smu_adjust_power_state_dynamic(smu, level, true);
1845 break;
1846 default:
1847 break;
1848 }
1849
1850 return ret;
1851}
1852
1853static int smu_handle_dpm_task(void *handle,
1854 enum amd_pp_task task_id,
1855 enum amd_pm_state_type *user_state)
1856{
1857 struct smu_context *smu = handle;
1858 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
1859
1860 return smu_handle_task(smu, smu_dpm->dpm_level, task_id);
1861
1862}
1863
1864static int smu_switch_power_profile(void *handle,
1865 enum PP_SMC_POWER_PROFILE type,
1866 bool en)
1867{
1868 struct smu_context *smu = handle;
1869 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1870 long workload;
1871 uint32_t index;
1872
1873 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1874 return -EOPNOTSUPP;
1875
1876 if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
1877 return -EINVAL;
1878
1879 if (!en) {
1880 smu->workload_mask &= ~(1 << smu->workload_prority[type]);
1881 index = fls(smu->workload_mask);
1882 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1883 workload = smu->workload_setting[index];
1884 } else {
1885 smu->workload_mask |= (1 << smu->workload_prority[type]);
1886 index = fls(smu->workload_mask);
1887 index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1888 workload = smu->workload_setting[index];
1889 }
1890
1891 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
1892 smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
1893 smu_bump_power_profile_mode(smu, &workload, 0);
1894
1895 return 0;
1896}
1897
1898static enum amd_dpm_forced_level smu_get_performance_level(void *handle)
1899{
1900 struct smu_context *smu = handle;
1901 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1902
1903 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1904 return -EOPNOTSUPP;
1905
1906 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1907 return -EINVAL;
1908
1909 return smu_dpm_ctx->dpm_level;
1910}
1911
1912static int smu_force_performance_level(void *handle,
1913 enum amd_dpm_forced_level level)
1914{
1915 struct smu_context *smu = handle;
1916 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1917 int ret = 0;
1918
1919 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1920 return -EOPNOTSUPP;
1921
1922 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1923 return -EINVAL;
1924
1925 ret = smu_enable_umd_pstate(smu, &level);
1926 if (ret)
1927 return ret;
1928
1929 ret = smu_handle_task(smu, level,
1930 AMD_PP_TASK_READJUST_POWER_STATE);
1931
1932 /* reset user dpm clock state */
1933 if (!ret && smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1934 memset(smu->user_dpm_profile.clk_mask, 0, sizeof(smu->user_dpm_profile.clk_mask));
1935 smu->user_dpm_profile.clk_dependency = 0;
1936 }
1937
1938 return ret;
1939}
1940
1941static int smu_set_display_count(void *handle, uint32_t count)
1942{
1943 struct smu_context *smu = handle;
1944
1945 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1946 return -EOPNOTSUPP;
1947
1948 return smu_init_display_count(smu, count);
1949}
1950
1951static int smu_force_smuclk_levels(struct smu_context *smu,
1952 enum smu_clk_type clk_type,
1953 uint32_t mask)
1954{
1955 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1956 int ret = 0;
1957
1958 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1959 return -EOPNOTSUPP;
1960
1961 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1962 dev_dbg(smu->adev->dev, "force clock level is for dpm manual mode only.\n");
1963 return -EINVAL;
1964 }
1965
1966 if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) {
1967 ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
1968 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
1969 smu->user_dpm_profile.clk_mask[clk_type] = mask;
1970 smu_set_user_clk_dependencies(smu, clk_type);
1971 }
1972 }
1973
1974 return ret;
1975}
1976
1977static int smu_force_ppclk_levels(void *handle,
1978 enum pp_clock_type type,
1979 uint32_t mask)
1980{
1981 struct smu_context *smu = handle;
1982 enum smu_clk_type clk_type;
1983
1984 switch (type) {
1985 case PP_SCLK:
1986 clk_type = SMU_SCLK; break;
1987 case PP_MCLK:
1988 clk_type = SMU_MCLK; break;
1989 case PP_PCIE:
1990 clk_type = SMU_PCIE; break;
1991 case PP_SOCCLK:
1992 clk_type = SMU_SOCCLK; break;
1993 case PP_FCLK:
1994 clk_type = SMU_FCLK; break;
1995 case PP_DCEFCLK:
1996 clk_type = SMU_DCEFCLK; break;
1997 case PP_VCLK:
1998 clk_type = SMU_VCLK; break;
1999 case PP_DCLK:
2000 clk_type = SMU_DCLK; break;
2001 case OD_SCLK:
2002 clk_type = SMU_OD_SCLK; break;
2003 case OD_MCLK:
2004 clk_type = SMU_OD_MCLK; break;
2005 case OD_VDDC_CURVE:
2006 clk_type = SMU_OD_VDDC_CURVE; break;
2007 case OD_RANGE:
2008 clk_type = SMU_OD_RANGE; break;
2009 default:
2010 return -EINVAL;
2011 }
2012
2013 return smu_force_smuclk_levels(smu, clk_type, mask);
2014}
2015
2016/*
2017 * On system suspending or resetting, the dpm_enabled
2018 * flag will be cleared. So that those SMU services which
2019 * are not supported will be gated.
2020 * However, the mp1 state setting should still be granted
2021 * even if the dpm_enabled cleared.
2022 */
2023static int smu_set_mp1_state(void *handle,
2024 enum pp_mp1_state mp1_state)
2025{
2026 struct smu_context *smu = handle;
2027 int ret = 0;
2028
2029 if (!smu->pm_enabled)
2030 return -EOPNOTSUPP;
2031
2032 if (smu->ppt_funcs &&
2033 smu->ppt_funcs->set_mp1_state)
2034 ret = smu->ppt_funcs->set_mp1_state(smu, mp1_state);
2035
2036 return ret;
2037}
2038
2039static int smu_set_df_cstate(void *handle,
2040 enum pp_df_cstate state)
2041{
2042 struct smu_context *smu = handle;
2043 int ret = 0;
2044
2045 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2046 return -EOPNOTSUPP;
2047
2048 if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
2049 return 0;
2050
2051 ret = smu->ppt_funcs->set_df_cstate(smu, state);
2052 if (ret)
2053 dev_err(smu->adev->dev, "[SetDfCstate] failed!\n");
2054
2055 return ret;
2056}
2057
2058int smu_allow_xgmi_power_down(struct smu_context *smu, bool en)
2059{
2060 int ret = 0;
2061
2062 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2063 return -EOPNOTSUPP;
2064
2065 if (!smu->ppt_funcs || !smu->ppt_funcs->allow_xgmi_power_down)
2066 return 0;
2067
2068 ret = smu->ppt_funcs->allow_xgmi_power_down(smu, en);
2069 if (ret)
2070 dev_err(smu->adev->dev, "[AllowXgmiPowerDown] failed!\n");
2071
2072 return ret;
2073}
2074
2075int smu_write_watermarks_table(struct smu_context *smu)
2076{
2077 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2078 return -EOPNOTSUPP;
2079
2080 return smu_set_watermarks_table(smu, NULL);
2081}
2082
2083static int smu_set_watermarks_for_clock_ranges(void *handle,
2084 struct pp_smu_wm_range_sets *clock_ranges)
2085{
2086 struct smu_context *smu = handle;
2087
2088 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2089 return -EOPNOTSUPP;
2090
2091 if (smu->disable_watermark)
2092 return 0;
2093
2094 return smu_set_watermarks_table(smu, clock_ranges);
2095}
2096
2097int smu_set_ac_dc(struct smu_context *smu)
2098{
2099 int ret = 0;
2100
2101 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2102 return -EOPNOTSUPP;
2103
2104 /* controlled by firmware */
2105 if (smu->dc_controlled_by_gpio)
2106 return 0;
2107
2108 ret = smu_set_power_source(smu,
2109 smu->adev->pm.ac_power ? SMU_POWER_SOURCE_AC :
2110 SMU_POWER_SOURCE_DC);
2111 if (ret)
2112 dev_err(smu->adev->dev, "Failed to switch to %s mode!\n",
2113 smu->adev->pm.ac_power ? "AC" : "DC");
2114
2115 return ret;
2116}
2117
2118const struct amd_ip_funcs smu_ip_funcs = {
2119 .name = "smu",
2120 .early_init = smu_early_init,
2121 .late_init = smu_late_init,
2122 .sw_init = smu_sw_init,
2123 .sw_fini = smu_sw_fini,
2124 .hw_init = smu_hw_init,
2125 .hw_fini = smu_hw_fini,
2126 .late_fini = smu_late_fini,
2127 .suspend = smu_suspend,
2128 .resume = smu_resume,
2129 .is_idle = NULL,
2130 .check_soft_reset = NULL,
2131 .wait_for_idle = NULL,
2132 .soft_reset = NULL,
2133 .set_clockgating_state = smu_set_clockgating_state,
2134 .set_powergating_state = smu_set_powergating_state,
2135};
2136
2137const struct amdgpu_ip_block_version smu_v11_0_ip_block =
2138{
2139 .type = AMD_IP_BLOCK_TYPE_SMC,
2140 .major = 11,
2141 .minor = 0,
2142 .rev = 0,
2143 .funcs = &smu_ip_funcs,
2144};
2145
2146const struct amdgpu_ip_block_version smu_v12_0_ip_block =
2147{
2148 .type = AMD_IP_BLOCK_TYPE_SMC,
2149 .major = 12,
2150 .minor = 0,
2151 .rev = 0,
2152 .funcs = &smu_ip_funcs,
2153};
2154
2155const struct amdgpu_ip_block_version smu_v13_0_ip_block =
2156{
2157 .type = AMD_IP_BLOCK_TYPE_SMC,
2158 .major = 13,
2159 .minor = 0,
2160 .rev = 0,
2161 .funcs = &smu_ip_funcs,
2162};
2163
2164static int smu_load_microcode(void *handle)
2165{
2166 struct smu_context *smu = handle;
2167 struct amdgpu_device *adev = smu->adev;
2168 int ret = 0;
2169
2170 if (!smu->pm_enabled)
2171 return -EOPNOTSUPP;
2172
2173 /* This should be used for non PSP loading */
2174 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
2175 return 0;
2176
2177 if (smu->ppt_funcs->load_microcode) {
2178 ret = smu->ppt_funcs->load_microcode(smu);
2179 if (ret) {
2180 dev_err(adev->dev, "Load microcode failed\n");
2181 return ret;
2182 }
2183 }
2184
2185 if (smu->ppt_funcs->check_fw_status) {
2186 ret = smu->ppt_funcs->check_fw_status(smu);
2187 if (ret) {
2188 dev_err(adev->dev, "SMC is not ready\n");
2189 return ret;
2190 }
2191 }
2192
2193 return ret;
2194}
2195
2196static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
2197{
2198 int ret = 0;
2199
2200 if (smu->ppt_funcs->set_gfx_cgpg)
2201 ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled);
2202
2203 return ret;
2204}
2205
2206static int smu_set_fan_speed_rpm(void *handle, uint32_t speed)
2207{
2208 struct smu_context *smu = handle;
2209 int ret = 0;
2210
2211 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2212 return -EOPNOTSUPP;
2213
2214 if (!smu->ppt_funcs->set_fan_speed_rpm)
2215 return -EOPNOTSUPP;
2216
2217 if (speed == U32_MAX)
2218 return -EINVAL;
2219
2220 ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
2221 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
2222 smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_RPM;
2223 smu->user_dpm_profile.fan_speed_rpm = speed;
2224
2225 /* Override custom PWM setting as they cannot co-exist */
2226 smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_PWM;
2227 smu->user_dpm_profile.fan_speed_pwm = 0;
2228 }
2229
2230 return ret;
2231}
2232
2233/**
2234 * smu_get_power_limit - Request one of the SMU Power Limits
2235 *
2236 * @handle: pointer to smu context
2237 * @limit: requested limit is written back to this variable
2238 * @pp_limit_level: &pp_power_limit_level which limit of the power to return
2239 * @pp_power_type: &pp_power_type type of power
2240 * Return: 0 on success, <0 on error
2241 *
2242 */
2243int smu_get_power_limit(void *handle,
2244 uint32_t *limit,
2245 enum pp_power_limit_level pp_limit_level,
2246 enum pp_power_type pp_power_type)
2247{
2248 struct smu_context *smu = handle;
2249 struct amdgpu_device *adev = smu->adev;
2250 enum smu_ppt_limit_level limit_level;
2251 uint32_t limit_type;
2252 int ret = 0;
2253
2254 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2255 return -EOPNOTSUPP;
2256
2257 switch(pp_power_type) {
2258 case PP_PWR_TYPE_SUSTAINED:
2259 limit_type = SMU_DEFAULT_PPT_LIMIT;
2260 break;
2261 case PP_PWR_TYPE_FAST:
2262 limit_type = SMU_FAST_PPT_LIMIT;
2263 break;
2264 default:
2265 return -EOPNOTSUPP;
2266 break;
2267 }
2268
2269 switch(pp_limit_level){
2270 case PP_PWR_LIMIT_CURRENT:
2271 limit_level = SMU_PPT_LIMIT_CURRENT;
2272 break;
2273 case PP_PWR_LIMIT_DEFAULT:
2274 limit_level = SMU_PPT_LIMIT_DEFAULT;
2275 break;
2276 case PP_PWR_LIMIT_MAX:
2277 limit_level = SMU_PPT_LIMIT_MAX;
2278 break;
2279 case PP_PWR_LIMIT_MIN:
2280 default:
2281 return -EOPNOTSUPP;
2282 break;
2283 }
2284
2285 if (limit_type != SMU_DEFAULT_PPT_LIMIT) {
2286 if (smu->ppt_funcs->get_ppt_limit)
2287 ret = smu->ppt_funcs->get_ppt_limit(smu, limit, limit_type, limit_level);
2288 } else {
2289 switch (limit_level) {
2290 case SMU_PPT_LIMIT_CURRENT:
2291 switch (adev->ip_versions[MP1_HWIP][0]) {
2292 case IP_VERSION(13, 0, 2):
2293 case IP_VERSION(11, 0, 7):
2294 case IP_VERSION(11, 0, 11):
2295 case IP_VERSION(11, 0, 12):
2296 case IP_VERSION(11, 0, 13):
2297 ret = smu_get_asic_power_limits(smu,
2298 &smu->current_power_limit,
2299 NULL,
2300 NULL);
2301 break;
2302 default:
2303 break;
2304 }
2305 *limit = smu->current_power_limit;
2306 break;
2307 case SMU_PPT_LIMIT_DEFAULT:
2308 *limit = smu->default_power_limit;
2309 break;
2310 case SMU_PPT_LIMIT_MAX:
2311 *limit = smu->max_power_limit;
2312 break;
2313 default:
2314 break;
2315 }
2316 }
2317
2318 return ret;
2319}
2320
2321static int smu_set_power_limit(void *handle, uint32_t limit)
2322{
2323 struct smu_context *smu = handle;
2324 uint32_t limit_type = limit >> 24;
2325 int ret = 0;
2326
2327 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2328 return -EOPNOTSUPP;
2329
2330 limit &= (1<<24)-1;
2331 if (limit_type != SMU_DEFAULT_PPT_LIMIT)
2332 if (smu->ppt_funcs->set_power_limit)
2333 return smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
2334
2335 if (limit > smu->max_power_limit) {
2336 dev_err(smu->adev->dev,
2337 "New power limit (%d) is over the max allowed %d\n",
2338 limit, smu->max_power_limit);
2339 return -EINVAL;
2340 }
2341
2342 if (!limit)
2343 limit = smu->current_power_limit;
2344
2345 if (smu->ppt_funcs->set_power_limit) {
2346 ret = smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
2347 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
2348 smu->user_dpm_profile.power_limit = limit;
2349 }
2350
2351 return ret;
2352}
2353
2354static int smu_print_smuclk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
2355{
2356 int ret = 0;
2357
2358 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2359 return -EOPNOTSUPP;
2360
2361 if (smu->ppt_funcs->print_clk_levels)
2362 ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
2363
2364 return ret;
2365}
2366
2367static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type)
2368{
2369 enum smu_clk_type clk_type;
2370
2371 switch (type) {
2372 case PP_SCLK:
2373 clk_type = SMU_SCLK; break;
2374 case PP_MCLK:
2375 clk_type = SMU_MCLK; break;
2376 case PP_PCIE:
2377 clk_type = SMU_PCIE; break;
2378 case PP_SOCCLK:
2379 clk_type = SMU_SOCCLK; break;
2380 case PP_FCLK:
2381 clk_type = SMU_FCLK; break;
2382 case PP_DCEFCLK:
2383 clk_type = SMU_DCEFCLK; break;
2384 case PP_VCLK:
2385 clk_type = SMU_VCLK; break;
2386 case PP_DCLK:
2387 clk_type = SMU_DCLK; break;
2388 case OD_SCLK:
2389 clk_type = SMU_OD_SCLK; break;
2390 case OD_MCLK:
2391 clk_type = SMU_OD_MCLK; break;
2392 case OD_VDDC_CURVE:
2393 clk_type = SMU_OD_VDDC_CURVE; break;
2394 case OD_RANGE:
2395 clk_type = SMU_OD_RANGE; break;
2396 case OD_VDDGFX_OFFSET:
2397 clk_type = SMU_OD_VDDGFX_OFFSET; break;
2398 case OD_CCLK:
2399 clk_type = SMU_OD_CCLK; break;
2400 default:
2401 clk_type = SMU_CLK_COUNT; break;
2402 }
2403
2404 return clk_type;
2405}
2406
2407static int smu_print_ppclk_levels(void *handle,
2408 enum pp_clock_type type,
2409 char *buf)
2410{
2411 struct smu_context *smu = handle;
2412 enum smu_clk_type clk_type;
2413
2414 clk_type = smu_convert_to_smuclk(type);
2415 if (clk_type == SMU_CLK_COUNT)
2416 return -EINVAL;
2417
2418 return smu_print_smuclk_levels(smu, clk_type, buf);
2419}
2420
2421static int smu_emit_ppclk_levels(void *handle, enum pp_clock_type type, char *buf, int *offset)
2422{
2423 struct smu_context *smu = handle;
2424 enum smu_clk_type clk_type;
2425
2426 clk_type = smu_convert_to_smuclk(type);
2427 if (clk_type == SMU_CLK_COUNT)
2428 return -EINVAL;
2429
2430 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2431 return -EOPNOTSUPP;
2432
2433 if (!smu->ppt_funcs->emit_clk_levels)
2434 return -ENOENT;
2435
2436 return smu->ppt_funcs->emit_clk_levels(smu, clk_type, buf, offset);
2437
2438}
2439
2440static int smu_od_edit_dpm_table(void *handle,
2441 enum PP_OD_DPM_TABLE_COMMAND type,
2442 long *input, uint32_t size)
2443{
2444 struct smu_context *smu = handle;
2445 int ret = 0;
2446
2447 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2448 return -EOPNOTSUPP;
2449
2450 if (smu->ppt_funcs->od_edit_dpm_table) {
2451 ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
2452 }
2453
2454 return ret;
2455}
2456
2457static int smu_read_sensor(void *handle,
2458 int sensor,
2459 void *data,
2460 int *size_arg)
2461{
2462 struct smu_context *smu = handle;
2463 struct smu_umd_pstate_table *pstate_table =
2464 &smu->pstate_table;
2465 int ret = 0;
2466 uint32_t *size, size_val;
2467
2468 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2469 return -EOPNOTSUPP;
2470
2471 if (!data || !size_arg)
2472 return -EINVAL;
2473
2474 size_val = *size_arg;
2475 size = &size_val;
2476
2477 if (smu->ppt_funcs->read_sensor)
2478 if (!smu->ppt_funcs->read_sensor(smu, sensor, data, size))
2479 goto unlock;
2480
2481 switch (sensor) {
2482 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
2483 *((uint32_t *)data) = pstate_table->gfxclk_pstate.standard * 100;
2484 *size = 4;
2485 break;
2486 case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
2487 *((uint32_t *)data) = pstate_table->uclk_pstate.standard * 100;
2488 *size = 4;
2489 break;
2490 case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
2491 ret = smu_feature_get_enabled_mask(smu, (uint64_t *)data);
2492 *size = 8;
2493 break;
2494 case AMDGPU_PP_SENSOR_UVD_POWER:
2495 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0;
2496 *size = 4;
2497 break;
2498 case AMDGPU_PP_SENSOR_VCE_POWER:
2499 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
2500 *size = 4;
2501 break;
2502 case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
2503 *(uint32_t *)data = atomic_read(&smu->smu_power.power_gate.vcn_gated) ? 0: 1;
2504 *size = 4;
2505 break;
2506 case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
2507 *(uint32_t *)data = 0;
2508 *size = 4;
2509 break;
2510 default:
2511 *size = 0;
2512 ret = -EOPNOTSUPP;
2513 break;
2514 }
2515
2516unlock:
2517 // assign uint32_t to int
2518 *size_arg = size_val;
2519
2520 return ret;
2521}
2522
2523static int smu_get_power_profile_mode(void *handle, char *buf)
2524{
2525 struct smu_context *smu = handle;
2526
2527 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
2528 !smu->ppt_funcs->get_power_profile_mode)
2529 return -EOPNOTSUPP;
2530 if (!buf)
2531 return -EINVAL;
2532
2533 return smu->ppt_funcs->get_power_profile_mode(smu, buf);
2534}
2535
2536static int smu_set_power_profile_mode(void *handle,
2537 long *param,
2538 uint32_t param_size)
2539{
2540 struct smu_context *smu = handle;
2541
2542 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
2543 !smu->ppt_funcs->set_power_profile_mode)
2544 return -EOPNOTSUPP;
2545
2546 return smu_bump_power_profile_mode(smu, param, param_size);
2547}
2548
2549static int smu_get_fan_control_mode(void *handle, u32 *fan_mode)
2550{
2551 struct smu_context *smu = handle;
2552
2553 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2554 return -EOPNOTSUPP;
2555
2556 if (!smu->ppt_funcs->get_fan_control_mode)
2557 return -EOPNOTSUPP;
2558
2559 if (!fan_mode)
2560 return -EINVAL;
2561
2562 *fan_mode = smu->ppt_funcs->get_fan_control_mode(smu);
2563
2564 return 0;
2565}
2566
2567static int smu_set_fan_control_mode(void *handle, u32 value)
2568{
2569 struct smu_context *smu = handle;
2570 int ret = 0;
2571
2572 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2573 return -EOPNOTSUPP;
2574
2575 if (!smu->ppt_funcs->set_fan_control_mode)
2576 return -EOPNOTSUPP;
2577
2578 if (value == U32_MAX)
2579 return -EINVAL;
2580
2581 ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
2582 if (ret)
2583 goto out;
2584
2585 if (!(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
2586 smu->user_dpm_profile.fan_mode = value;
2587
2588 /* reset user dpm fan speed */
2589 if (value != AMD_FAN_CTRL_MANUAL) {
2590 smu->user_dpm_profile.fan_speed_pwm = 0;
2591 smu->user_dpm_profile.fan_speed_rpm = 0;
2592 smu->user_dpm_profile.flags &= ~(SMU_CUSTOM_FAN_SPEED_RPM | SMU_CUSTOM_FAN_SPEED_PWM);
2593 }
2594 }
2595
2596out:
2597 return ret;
2598}
2599
2600static int smu_get_fan_speed_pwm(void *handle, u32 *speed)
2601{
2602 struct smu_context *smu = handle;
2603 int ret = 0;
2604
2605 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2606 return -EOPNOTSUPP;
2607
2608 if (!smu->ppt_funcs->get_fan_speed_pwm)
2609 return -EOPNOTSUPP;
2610
2611 if (!speed)
2612 return -EINVAL;
2613
2614 ret = smu->ppt_funcs->get_fan_speed_pwm(smu, speed);
2615
2616 return ret;
2617}
2618
2619static int smu_set_fan_speed_pwm(void *handle, u32 speed)
2620{
2621 struct smu_context *smu = handle;
2622 int ret = 0;
2623
2624 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2625 return -EOPNOTSUPP;
2626
2627 if (!smu->ppt_funcs->set_fan_speed_pwm)
2628 return -EOPNOTSUPP;
2629
2630 if (speed == U32_MAX)
2631 return -EINVAL;
2632
2633 ret = smu->ppt_funcs->set_fan_speed_pwm(smu, speed);
2634 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
2635 smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_PWM;
2636 smu->user_dpm_profile.fan_speed_pwm = speed;
2637
2638 /* Override custom RPM setting as they cannot co-exist */
2639 smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_RPM;
2640 smu->user_dpm_profile.fan_speed_rpm = 0;
2641 }
2642
2643 return ret;
2644}
2645
2646static int smu_get_fan_speed_rpm(void *handle, uint32_t *speed)
2647{
2648 struct smu_context *smu = handle;
2649 int ret = 0;
2650
2651 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2652 return -EOPNOTSUPP;
2653
2654 if (!smu->ppt_funcs->get_fan_speed_rpm)
2655 return -EOPNOTSUPP;
2656
2657 if (!speed)
2658 return -EINVAL;
2659
2660 ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
2661
2662 return ret;
2663}
2664
2665static int smu_set_deep_sleep_dcefclk(void *handle, uint32_t clk)
2666{
2667 struct smu_context *smu = handle;
2668
2669 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2670 return -EOPNOTSUPP;
2671
2672 return smu_set_min_dcef_deep_sleep(smu, clk);
2673}
2674
2675static int smu_get_clock_by_type_with_latency(void *handle,
2676 enum amd_pp_clock_type type,
2677 struct pp_clock_levels_with_latency *clocks)
2678{
2679 struct smu_context *smu = handle;
2680 enum smu_clk_type clk_type;
2681 int ret = 0;
2682
2683 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2684 return -EOPNOTSUPP;
2685
2686 if (smu->ppt_funcs->get_clock_by_type_with_latency) {
2687 switch (type) {
2688 case amd_pp_sys_clock:
2689 clk_type = SMU_GFXCLK;
2690 break;
2691 case amd_pp_mem_clock:
2692 clk_type = SMU_MCLK;
2693 break;
2694 case amd_pp_dcef_clock:
2695 clk_type = SMU_DCEFCLK;
2696 break;
2697 case amd_pp_disp_clock:
2698 clk_type = SMU_DISPCLK;
2699 break;
2700 default:
2701 dev_err(smu->adev->dev, "Invalid clock type!\n");
2702 return -EINVAL;
2703 }
2704
2705 ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks);
2706 }
2707
2708 return ret;
2709}
2710
2711static int smu_display_clock_voltage_request(void *handle,
2712 struct pp_display_clock_request *clock_req)
2713{
2714 struct smu_context *smu = handle;
2715 int ret = 0;
2716
2717 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2718 return -EOPNOTSUPP;
2719
2720 if (smu->ppt_funcs->display_clock_voltage_request)
2721 ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req);
2722
2723 return ret;
2724}
2725
2726
2727static int smu_display_disable_memory_clock_switch(void *handle,
2728 bool disable_memory_clock_switch)
2729{
2730 struct smu_context *smu = handle;
2731 int ret = -EINVAL;
2732
2733 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2734 return -EOPNOTSUPP;
2735
2736 if (smu->ppt_funcs->display_disable_memory_clock_switch)
2737 ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch);
2738
2739 return ret;
2740}
2741
2742static int smu_set_xgmi_pstate(void *handle,
2743 uint32_t pstate)
2744{
2745 struct smu_context *smu = handle;
2746 int ret = 0;
2747
2748 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2749 return -EOPNOTSUPP;
2750
2751 if (smu->ppt_funcs->set_xgmi_pstate)
2752 ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
2753
2754 if(ret)
2755 dev_err(smu->adev->dev, "Failed to set XGMI pstate!\n");
2756
2757 return ret;
2758}
2759
2760static int smu_get_baco_capability(void *handle, bool *cap)
2761{
2762 struct smu_context *smu = handle;
2763
2764 *cap = false;
2765
2766 if (!smu->pm_enabled)
2767 return 0;
2768
2769 if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support)
2770 *cap = smu->ppt_funcs->baco_is_support(smu);
2771
2772 return 0;
2773}
2774
2775static int smu_baco_set_state(void *handle, int state)
2776{
2777 struct smu_context *smu = handle;
2778 int ret = 0;
2779
2780 if (!smu->pm_enabled)
2781 return -EOPNOTSUPP;
2782
2783 if (state == 0) {
2784 if (smu->ppt_funcs->baco_exit)
2785 ret = smu->ppt_funcs->baco_exit(smu);
2786 } else if (state == 1) {
2787 if (smu->ppt_funcs->baco_enter)
2788 ret = smu->ppt_funcs->baco_enter(smu);
2789 } else {
2790 return -EINVAL;
2791 }
2792
2793 if (ret)
2794 dev_err(smu->adev->dev, "Failed to %s BACO state!\n",
2795 (state)?"enter":"exit");
2796
2797 return ret;
2798}
2799
2800bool smu_mode1_reset_is_support(struct smu_context *smu)
2801{
2802 bool ret = false;
2803
2804 if (!smu->pm_enabled)
2805 return false;
2806
2807 if (smu->ppt_funcs && smu->ppt_funcs->mode1_reset_is_support)
2808 ret = smu->ppt_funcs->mode1_reset_is_support(smu);
2809
2810 return ret;
2811}
2812
2813bool smu_mode2_reset_is_support(struct smu_context *smu)
2814{
2815 bool ret = false;
2816
2817 if (!smu->pm_enabled)
2818 return false;
2819
2820 if (smu->ppt_funcs && smu->ppt_funcs->mode2_reset_is_support)
2821 ret = smu->ppt_funcs->mode2_reset_is_support(smu);
2822
2823 return ret;
2824}
2825
2826int smu_mode1_reset(struct smu_context *smu)
2827{
2828 int ret = 0;
2829
2830 if (!smu->pm_enabled)
2831 return -EOPNOTSUPP;
2832
2833 if (smu->ppt_funcs->mode1_reset)
2834 ret = smu->ppt_funcs->mode1_reset(smu);
2835
2836 return ret;
2837}
2838
2839static int smu_mode2_reset(void *handle)
2840{
2841 struct smu_context *smu = handle;
2842 int ret = 0;
2843
2844 if (!smu->pm_enabled)
2845 return -EOPNOTSUPP;
2846
2847 if (smu->ppt_funcs->mode2_reset)
2848 ret = smu->ppt_funcs->mode2_reset(smu);
2849
2850 if (ret)
2851 dev_err(smu->adev->dev, "Mode2 reset failed!\n");
2852
2853 return ret;
2854}
2855
2856static int smu_get_max_sustainable_clocks_by_dc(void *handle,
2857 struct pp_smu_nv_clock_table *max_clocks)
2858{
2859 struct smu_context *smu = handle;
2860 int ret = 0;
2861
2862 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2863 return -EOPNOTSUPP;
2864
2865 if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
2866 ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks);
2867
2868 return ret;
2869}
2870
2871static int smu_get_uclk_dpm_states(void *handle,
2872 unsigned int *clock_values_in_khz,
2873 unsigned int *num_states)
2874{
2875 struct smu_context *smu = handle;
2876 int ret = 0;
2877
2878 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2879 return -EOPNOTSUPP;
2880
2881 if (smu->ppt_funcs->get_uclk_dpm_states)
2882 ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states);
2883
2884 return ret;
2885}
2886
2887static enum amd_pm_state_type smu_get_current_power_state(void *handle)
2888{
2889 struct smu_context *smu = handle;
2890 enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
2891
2892 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2893 return -EOPNOTSUPP;
2894
2895 if (smu->ppt_funcs->get_current_power_state)
2896 pm_state = smu->ppt_funcs->get_current_power_state(smu);
2897
2898 return pm_state;
2899}
2900
2901static int smu_get_dpm_clock_table(void *handle,
2902 struct dpm_clocks *clock_table)
2903{
2904 struct smu_context *smu = handle;
2905 int ret = 0;
2906
2907 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2908 return -EOPNOTSUPP;
2909
2910 if (smu->ppt_funcs->get_dpm_clock_table)
2911 ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table);
2912
2913 return ret;
2914}
2915
2916static ssize_t smu_sys_get_gpu_metrics(void *handle, void **table)
2917{
2918 struct smu_context *smu = handle;
2919
2920 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2921 return -EOPNOTSUPP;
2922
2923 if (!smu->ppt_funcs->get_gpu_metrics)
2924 return -EOPNOTSUPP;
2925
2926 return smu->ppt_funcs->get_gpu_metrics(smu, table);
2927}
2928
2929static int smu_enable_mgpu_fan_boost(void *handle)
2930{
2931 struct smu_context *smu = handle;
2932 int ret = 0;
2933
2934 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2935 return -EOPNOTSUPP;
2936
2937 if (smu->ppt_funcs->enable_mgpu_fan_boost)
2938 ret = smu->ppt_funcs->enable_mgpu_fan_boost(smu);
2939
2940 return ret;
2941}
2942
2943static int smu_gfx_state_change_set(void *handle,
2944 uint32_t state)
2945{
2946 struct smu_context *smu = handle;
2947 int ret = 0;
2948
2949 if (smu->ppt_funcs->gfx_state_change_set)
2950 ret = smu->ppt_funcs->gfx_state_change_set(smu, state);
2951
2952 return ret;
2953}
2954
2955int smu_handle_passthrough_sbr(struct smu_context *smu, bool enable)
2956{
2957 int ret = 0;
2958
2959 if (smu->ppt_funcs->smu_handle_passthrough_sbr)
2960 ret = smu->ppt_funcs->smu_handle_passthrough_sbr(smu, enable);
2961
2962 return ret;
2963}
2964
2965int smu_get_ecc_info(struct smu_context *smu, void *umc_ecc)
2966{
2967 int ret = -EOPNOTSUPP;
2968
2969 if (smu->ppt_funcs &&
2970 smu->ppt_funcs->get_ecc_info)
2971 ret = smu->ppt_funcs->get_ecc_info(smu, umc_ecc);
2972
2973 return ret;
2974
2975}
2976
2977static int smu_get_prv_buffer_details(void *handle, void **addr, size_t *size)
2978{
2979 struct smu_context *smu = handle;
2980 struct smu_table_context *smu_table = &smu->smu_table;
2981 struct smu_table *memory_pool = &smu_table->memory_pool;
2982
2983 if (!addr || !size)
2984 return -EINVAL;
2985
2986 *addr = NULL;
2987 *size = 0;
2988 if (memory_pool->bo) {
2989 *addr = memory_pool->cpu_addr;
2990 *size = memory_pool->size;
2991 }
2992
2993 return 0;
2994}
2995
2996static const struct amd_pm_funcs swsmu_pm_funcs = {
2997 /* export for sysfs */
2998 .set_fan_control_mode = smu_set_fan_control_mode,
2999 .get_fan_control_mode = smu_get_fan_control_mode,
3000 .set_fan_speed_pwm = smu_set_fan_speed_pwm,
3001 .get_fan_speed_pwm = smu_get_fan_speed_pwm,
3002 .force_clock_level = smu_force_ppclk_levels,
3003 .print_clock_levels = smu_print_ppclk_levels,
3004 .emit_clock_levels = smu_emit_ppclk_levels,
3005 .force_performance_level = smu_force_performance_level,
3006 .read_sensor = smu_read_sensor,
3007 .get_performance_level = smu_get_performance_level,
3008 .get_current_power_state = smu_get_current_power_state,
3009 .get_fan_speed_rpm = smu_get_fan_speed_rpm,
3010 .set_fan_speed_rpm = smu_set_fan_speed_rpm,
3011 .get_pp_num_states = smu_get_power_num_states,
3012 .get_pp_table = smu_sys_get_pp_table,
3013 .set_pp_table = smu_sys_set_pp_table,
3014 .switch_power_profile = smu_switch_power_profile,
3015 /* export to amdgpu */
3016 .dispatch_tasks = smu_handle_dpm_task,
3017 .load_firmware = smu_load_microcode,
3018 .set_powergating_by_smu = smu_dpm_set_power_gate,
3019 .set_power_limit = smu_set_power_limit,
3020 .get_power_limit = smu_get_power_limit,
3021 .get_power_profile_mode = smu_get_power_profile_mode,
3022 .set_power_profile_mode = smu_set_power_profile_mode,
3023 .odn_edit_dpm_table = smu_od_edit_dpm_table,
3024 .set_mp1_state = smu_set_mp1_state,
3025 .gfx_state_change_set = smu_gfx_state_change_set,
3026 /* export to DC */
3027 .get_sclk = smu_get_sclk,
3028 .get_mclk = smu_get_mclk,
3029 .display_configuration_change = smu_display_configuration_change,
3030 .get_clock_by_type_with_latency = smu_get_clock_by_type_with_latency,
3031 .display_clock_voltage_request = smu_display_clock_voltage_request,
3032 .enable_mgpu_fan_boost = smu_enable_mgpu_fan_boost,
3033 .set_active_display_count = smu_set_display_count,
3034 .set_min_deep_sleep_dcefclk = smu_set_deep_sleep_dcefclk,
3035 .get_asic_baco_capability = smu_get_baco_capability,
3036 .set_asic_baco_state = smu_baco_set_state,
3037 .get_ppfeature_status = smu_sys_get_pp_feature_mask,
3038 .set_ppfeature_status = smu_sys_set_pp_feature_mask,
3039 .asic_reset_mode_2 = smu_mode2_reset,
3040 .set_df_cstate = smu_set_df_cstate,
3041 .set_xgmi_pstate = smu_set_xgmi_pstate,
3042 .get_gpu_metrics = smu_sys_get_gpu_metrics,
3043 .set_watermarks_for_clock_ranges = smu_set_watermarks_for_clock_ranges,
3044 .display_disable_memory_clock_switch = smu_display_disable_memory_clock_switch,
3045 .get_max_sustainable_clocks_by_dc = smu_get_max_sustainable_clocks_by_dc,
3046 .get_uclk_dpm_states = smu_get_uclk_dpm_states,
3047 .get_dpm_clock_table = smu_get_dpm_clock_table,
3048 .get_smu_prv_buf_details = smu_get_prv_buffer_details,
3049};
3050
3051int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event,
3052 uint64_t event_arg)
3053{
3054 int ret = -EINVAL;
3055
3056 if (smu->ppt_funcs->wait_for_event)
3057 ret = smu->ppt_funcs->wait_for_event(smu, event, event_arg);
3058
3059 return ret;
3060}
3061
3062int smu_stb_collect_info(struct smu_context *smu, void *buf, uint32_t size)
3063{
3064
3065 if (!smu->ppt_funcs->stb_collect_info || !smu->stb_context.enabled)
3066 return -EOPNOTSUPP;
3067
3068 /* Confirm the buffer allocated is of correct size */
3069 if (size != smu->stb_context.stb_buf_size)
3070 return -EINVAL;
3071
3072 /*
3073 * No need to lock smu mutex as we access STB directly through MMIO
3074 * and not going through SMU messaging route (for now at least).
3075 * For registers access rely on implementation internal locking.
3076 */
3077 return smu->ppt_funcs->stb_collect_info(smu, buf, size);
3078}
3079
3080#if defined(CONFIG_DEBUG_FS)
3081
3082static int smu_stb_debugfs_open(struct inode *inode, struct file *filp)
3083{
3084 struct amdgpu_device *adev = filp->f_inode->i_private;
3085 struct smu_context *smu = adev->powerplay.pp_handle;
3086 unsigned char *buf;
3087 int r;
3088
3089 buf = kvmalloc_array(smu->stb_context.stb_buf_size, sizeof(*buf), GFP_KERNEL);
3090 if (!buf)
3091 return -ENOMEM;
3092
3093 r = smu_stb_collect_info(smu, buf, smu->stb_context.stb_buf_size);
3094 if (r)
3095 goto out;
3096
3097 filp->private_data = buf;
3098
3099 return 0;
3100
3101out:
3102 kvfree(buf);
3103 return r;
3104}
3105
3106static ssize_t smu_stb_debugfs_read(struct file *filp, char __user *buf, size_t size,
3107 loff_t *pos)
3108{
3109 struct amdgpu_device *adev = filp->f_inode->i_private;
3110 struct smu_context *smu = adev->powerplay.pp_handle;
3111
3112
3113 if (!filp->private_data)
3114 return -EINVAL;
3115
3116 return simple_read_from_buffer(buf,
3117 size,
3118 pos, filp->private_data,
3119 smu->stb_context.stb_buf_size);
3120}
3121
3122static int smu_stb_debugfs_release(struct inode *inode, struct file *filp)
3123{
3124 kvfree(filp->private_data);
3125 filp->private_data = NULL;
3126
3127 return 0;
3128}
3129
3130/*
3131 * We have to define not only read method but also
3132 * open and release because .read takes up to PAGE_SIZE
3133 * data each time so and so is invoked multiple times.
3134 * We allocate the STB buffer in .open and release it
3135 * in .release
3136 */
3137static const struct file_operations smu_stb_debugfs_fops = {
3138 .owner = THIS_MODULE,
3139 .open = smu_stb_debugfs_open,
3140 .read = smu_stb_debugfs_read,
3141 .release = smu_stb_debugfs_release,
3142 .llseek = default_llseek,
3143};
3144
3145#endif
3146
3147void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev)
3148{
3149#if defined(CONFIG_DEBUG_FS)
3150
3151 struct smu_context *smu = adev->powerplay.pp_handle;
3152
3153 if (!smu || (!smu->stb_context.stb_buf_size))
3154 return;
3155
3156 debugfs_create_file_size("amdgpu_smu_stb_dump",
3157 S_IRUSR,
3158 adev_to_drm(adev)->primary->debugfs_root,
3159 adev,
3160 &smu_stb_debugfs_fops,
3161 smu->stb_context.stb_buf_size);
3162#endif
3163}
3164
3165int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size)
3166{
3167 int ret = 0;
3168
3169 if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_pages_num)
3170 ret = smu->ppt_funcs->send_hbm_bad_pages_num(smu, size);
3171
3172 return ret;
3173}
3174
3175int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size)
3176{
3177 int ret = 0;
3178
3179 if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_channel_flag)
3180 ret = smu->ppt_funcs->send_hbm_bad_channel_flag(smu, size);
3181
3182 return ret;
3183}
1/*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#define SWSMU_CODE_LAYER_L1
24
25#include <linux/firmware.h>
26#include <linux/pci.h>
27#include <linux/power_supply.h>
28#include <linux/reboot.h>
29
30#include "amdgpu.h"
31#include "amdgpu_smu.h"
32#include "smu_internal.h"
33#include "atom.h"
34#include "arcturus_ppt.h"
35#include "navi10_ppt.h"
36#include "sienna_cichlid_ppt.h"
37#include "renoir_ppt.h"
38#include "vangogh_ppt.h"
39#include "aldebaran_ppt.h"
40#include "yellow_carp_ppt.h"
41#include "cyan_skillfish_ppt.h"
42#include "smu_v13_0_0_ppt.h"
43#include "smu_v13_0_4_ppt.h"
44#include "smu_v13_0_5_ppt.h"
45#include "smu_v13_0_6_ppt.h"
46#include "smu_v13_0_7_ppt.h"
47#include "smu_v14_0_0_ppt.h"
48#include "smu_v14_0_2_ppt.h"
49#include "amd_pcie.h"
50
51/*
52 * DO NOT use these for err/warn/info/debug messages.
53 * Use dev_err, dev_warn, dev_info and dev_dbg instead.
54 * They are more MGPU friendly.
55 */
56#undef pr_err
57#undef pr_warn
58#undef pr_info
59#undef pr_debug
60
61static const struct amd_pm_funcs swsmu_pm_funcs;
62static int smu_force_smuclk_levels(struct smu_context *smu,
63 enum smu_clk_type clk_type,
64 uint32_t mask);
65static int smu_handle_task(struct smu_context *smu,
66 enum amd_dpm_forced_level level,
67 enum amd_pp_task task_id);
68static int smu_reset(struct smu_context *smu);
69static int smu_set_fan_speed_pwm(void *handle, u32 speed);
70static int smu_set_fan_control_mode(void *handle, u32 value);
71static int smu_set_power_limit(void *handle, uint32_t limit);
72static int smu_set_fan_speed_rpm(void *handle, uint32_t speed);
73static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled);
74static int smu_set_mp1_state(void *handle, enum pp_mp1_state mp1_state);
75static void smu_power_profile_mode_get(struct smu_context *smu,
76 enum PP_SMC_POWER_PROFILE profile_mode);
77static void smu_power_profile_mode_put(struct smu_context *smu,
78 enum PP_SMC_POWER_PROFILE profile_mode);
79
80static int smu_sys_get_pp_feature_mask(void *handle,
81 char *buf)
82{
83 struct smu_context *smu = handle;
84
85 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
86 return -EOPNOTSUPP;
87
88 return smu_get_pp_feature_mask(smu, buf);
89}
90
91static int smu_sys_set_pp_feature_mask(void *handle,
92 uint64_t new_mask)
93{
94 struct smu_context *smu = handle;
95
96 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
97 return -EOPNOTSUPP;
98
99 return smu_set_pp_feature_mask(smu, new_mask);
100}
101
102int smu_set_residency_gfxoff(struct smu_context *smu, bool value)
103{
104 if (!smu->ppt_funcs->set_gfx_off_residency)
105 return -EINVAL;
106
107 return smu_set_gfx_off_residency(smu, value);
108}
109
110int smu_get_residency_gfxoff(struct smu_context *smu, u32 *value)
111{
112 if (!smu->ppt_funcs->get_gfx_off_residency)
113 return -EINVAL;
114
115 return smu_get_gfx_off_residency(smu, value);
116}
117
118int smu_get_entrycount_gfxoff(struct smu_context *smu, u64 *value)
119{
120 if (!smu->ppt_funcs->get_gfx_off_entrycount)
121 return -EINVAL;
122
123 return smu_get_gfx_off_entrycount(smu, value);
124}
125
126int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value)
127{
128 if (!smu->ppt_funcs->get_gfx_off_status)
129 return -EINVAL;
130
131 *value = smu_get_gfx_off_status(smu);
132
133 return 0;
134}
135
136int smu_set_soft_freq_range(struct smu_context *smu,
137 enum smu_clk_type clk_type,
138 uint32_t min,
139 uint32_t max)
140{
141 int ret = 0;
142
143 if (smu->ppt_funcs->set_soft_freq_limited_range)
144 ret = smu->ppt_funcs->set_soft_freq_limited_range(smu,
145 clk_type,
146 min,
147 max,
148 false);
149
150 return ret;
151}
152
153int smu_get_dpm_freq_range(struct smu_context *smu,
154 enum smu_clk_type clk_type,
155 uint32_t *min,
156 uint32_t *max)
157{
158 int ret = -ENOTSUPP;
159
160 if (!min && !max)
161 return -EINVAL;
162
163 if (smu->ppt_funcs->get_dpm_ultimate_freq)
164 ret = smu->ppt_funcs->get_dpm_ultimate_freq(smu,
165 clk_type,
166 min,
167 max);
168
169 return ret;
170}
171
172int smu_set_gfx_power_up_by_imu(struct smu_context *smu)
173{
174 int ret = 0;
175 struct amdgpu_device *adev = smu->adev;
176
177 if (smu->ppt_funcs->set_gfx_power_up_by_imu) {
178 ret = smu->ppt_funcs->set_gfx_power_up_by_imu(smu);
179 if (ret)
180 dev_err(adev->dev, "Failed to enable gfx imu!\n");
181 }
182 return ret;
183}
184
185static u32 smu_get_mclk(void *handle, bool low)
186{
187 struct smu_context *smu = handle;
188 uint32_t clk_freq;
189 int ret = 0;
190
191 ret = smu_get_dpm_freq_range(smu, SMU_UCLK,
192 low ? &clk_freq : NULL,
193 !low ? &clk_freq : NULL);
194 if (ret)
195 return 0;
196 return clk_freq * 100;
197}
198
199static u32 smu_get_sclk(void *handle, bool low)
200{
201 struct smu_context *smu = handle;
202 uint32_t clk_freq;
203 int ret = 0;
204
205 ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK,
206 low ? &clk_freq : NULL,
207 !low ? &clk_freq : NULL);
208 if (ret)
209 return 0;
210 return clk_freq * 100;
211}
212
213static int smu_set_gfx_imu_enable(struct smu_context *smu)
214{
215 struct amdgpu_device *adev = smu->adev;
216
217 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
218 return 0;
219
220 if (amdgpu_in_reset(smu->adev) || adev->in_s0ix)
221 return 0;
222
223 return smu_set_gfx_power_up_by_imu(smu);
224}
225
226static bool is_vcn_enabled(struct amdgpu_device *adev)
227{
228 int i;
229
230 for (i = 0; i < adev->num_ip_blocks; i++) {
231 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_VCN ||
232 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_JPEG) &&
233 !adev->ip_blocks[i].status.valid)
234 return false;
235 }
236
237 return true;
238}
239
240static int smu_dpm_set_vcn_enable(struct smu_context *smu,
241 bool enable)
242{
243 struct smu_power_context *smu_power = &smu->smu_power;
244 struct smu_power_gate *power_gate = &smu_power->power_gate;
245 int ret = 0;
246
247 /*
248 * don't poweron vcn/jpeg when they are skipped.
249 */
250 if (!is_vcn_enabled(smu->adev))
251 return 0;
252
253 if (!smu->ppt_funcs->dpm_set_vcn_enable)
254 return 0;
255
256 if (atomic_read(&power_gate->vcn_gated) ^ enable)
257 return 0;
258
259 ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable, 0xff);
260 if (!ret)
261 atomic_set(&power_gate->vcn_gated, !enable);
262
263 return ret;
264}
265
266static int smu_dpm_set_jpeg_enable(struct smu_context *smu,
267 bool enable)
268{
269 struct smu_power_context *smu_power = &smu->smu_power;
270 struct smu_power_gate *power_gate = &smu_power->power_gate;
271 int ret = 0;
272
273 if (!is_vcn_enabled(smu->adev))
274 return 0;
275
276 if (!smu->ppt_funcs->dpm_set_jpeg_enable)
277 return 0;
278
279 if (atomic_read(&power_gate->jpeg_gated) ^ enable)
280 return 0;
281
282 ret = smu->ppt_funcs->dpm_set_jpeg_enable(smu, enable);
283 if (!ret)
284 atomic_set(&power_gate->jpeg_gated, !enable);
285
286 return ret;
287}
288
289static int smu_dpm_set_vpe_enable(struct smu_context *smu,
290 bool enable)
291{
292 struct smu_power_context *smu_power = &smu->smu_power;
293 struct smu_power_gate *power_gate = &smu_power->power_gate;
294 int ret = 0;
295
296 if (!smu->ppt_funcs->dpm_set_vpe_enable)
297 return 0;
298
299 if (atomic_read(&power_gate->vpe_gated) ^ enable)
300 return 0;
301
302 ret = smu->ppt_funcs->dpm_set_vpe_enable(smu, enable);
303 if (!ret)
304 atomic_set(&power_gate->vpe_gated, !enable);
305
306 return ret;
307}
308
309static int smu_dpm_set_umsch_mm_enable(struct smu_context *smu,
310 bool enable)
311{
312 struct smu_power_context *smu_power = &smu->smu_power;
313 struct smu_power_gate *power_gate = &smu_power->power_gate;
314 int ret = 0;
315
316 if (!smu->adev->enable_umsch_mm)
317 return 0;
318
319 if (!smu->ppt_funcs->dpm_set_umsch_mm_enable)
320 return 0;
321
322 if (atomic_read(&power_gate->umsch_mm_gated) ^ enable)
323 return 0;
324
325 ret = smu->ppt_funcs->dpm_set_umsch_mm_enable(smu, enable);
326 if (!ret)
327 atomic_set(&power_gate->umsch_mm_gated, !enable);
328
329 return ret;
330}
331
332static int smu_set_mall_enable(struct smu_context *smu)
333{
334 int ret = 0;
335
336 if (!smu->ppt_funcs->set_mall_enable)
337 return 0;
338
339 ret = smu->ppt_funcs->set_mall_enable(smu);
340
341 return ret;
342}
343
344/**
345 * smu_dpm_set_power_gate - power gate/ungate the specific IP block
346 *
347 * @handle: smu_context pointer
348 * @block_type: the IP block to power gate/ungate
349 * @gate: to power gate if true, ungate otherwise
350 *
351 * This API uses no smu->mutex lock protection due to:
352 * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce).
353 * This is guarded to be race condition free by the caller.
354 * 2. Or get called on user setting request of power_dpm_force_performance_level.
355 * Under this case, the smu->mutex lock protection is already enforced on
356 * the parent API smu_force_performance_level of the call path.
357 */
358static int smu_dpm_set_power_gate(void *handle,
359 uint32_t block_type,
360 bool gate)
361{
362 struct smu_context *smu = handle;
363 int ret = 0;
364
365 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) {
366 dev_WARN(smu->adev->dev,
367 "SMU uninitialized but power %s requested for %u!\n",
368 gate ? "gate" : "ungate", block_type);
369 return -EOPNOTSUPP;
370 }
371
372 switch (block_type) {
373 /*
374 * Some legacy code of amdgpu_vcn.c and vcn_v2*.c still uses
375 * AMD_IP_BLOCK_TYPE_UVD for VCN. So, here both of them are kept.
376 */
377 case AMD_IP_BLOCK_TYPE_UVD:
378 case AMD_IP_BLOCK_TYPE_VCN:
379 ret = smu_dpm_set_vcn_enable(smu, !gate);
380 if (ret)
381 dev_err(smu->adev->dev, "Failed to power %s VCN!\n",
382 gate ? "gate" : "ungate");
383 break;
384 case AMD_IP_BLOCK_TYPE_GFX:
385 ret = smu_gfx_off_control(smu, gate);
386 if (ret)
387 dev_err(smu->adev->dev, "Failed to %s gfxoff!\n",
388 gate ? "enable" : "disable");
389 break;
390 case AMD_IP_BLOCK_TYPE_SDMA:
391 ret = smu_powergate_sdma(smu, gate);
392 if (ret)
393 dev_err(smu->adev->dev, "Failed to power %s SDMA!\n",
394 gate ? "gate" : "ungate");
395 break;
396 case AMD_IP_BLOCK_TYPE_JPEG:
397 ret = smu_dpm_set_jpeg_enable(smu, !gate);
398 if (ret)
399 dev_err(smu->adev->dev, "Failed to power %s JPEG!\n",
400 gate ? "gate" : "ungate");
401 break;
402 case AMD_IP_BLOCK_TYPE_VPE:
403 ret = smu_dpm_set_vpe_enable(smu, !gate);
404 if (ret)
405 dev_err(smu->adev->dev, "Failed to power %s VPE!\n",
406 gate ? "gate" : "ungate");
407 break;
408 default:
409 dev_err(smu->adev->dev, "Unsupported block type!\n");
410 return -EINVAL;
411 }
412
413 return ret;
414}
415
416/**
417 * smu_set_user_clk_dependencies - set user profile clock dependencies
418 *
419 * @smu: smu_context pointer
420 * @clk: enum smu_clk_type type
421 *
422 * Enable/Disable the clock dependency for the @clk type.
423 */
424static void smu_set_user_clk_dependencies(struct smu_context *smu, enum smu_clk_type clk)
425{
426 if (smu->adev->in_suspend)
427 return;
428
429 if (clk == SMU_MCLK) {
430 smu->user_dpm_profile.clk_dependency = 0;
431 smu->user_dpm_profile.clk_dependency = BIT(SMU_FCLK) | BIT(SMU_SOCCLK);
432 } else if (clk == SMU_FCLK) {
433 /* MCLK takes precedence over FCLK */
434 if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK)))
435 return;
436
437 smu->user_dpm_profile.clk_dependency = 0;
438 smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_SOCCLK);
439 } else if (clk == SMU_SOCCLK) {
440 /* MCLK takes precedence over SOCCLK */
441 if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK)))
442 return;
443
444 smu->user_dpm_profile.clk_dependency = 0;
445 smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_FCLK);
446 } else
447 /* Add clk dependencies here, if any */
448 return;
449}
450
451/**
452 * smu_restore_dpm_user_profile - reinstate user dpm profile
453 *
454 * @smu: smu_context pointer
455 *
456 * Restore the saved user power configurations include power limit,
457 * clock frequencies, fan control mode and fan speed.
458 */
459static void smu_restore_dpm_user_profile(struct smu_context *smu)
460{
461 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
462 int ret = 0;
463
464 if (!smu->adev->in_suspend)
465 return;
466
467 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
468 return;
469
470 /* Enable restore flag */
471 smu->user_dpm_profile.flags |= SMU_DPM_USER_PROFILE_RESTORE;
472
473 /* set the user dpm power limit */
474 if (smu->user_dpm_profile.power_limit) {
475 ret = smu_set_power_limit(smu, smu->user_dpm_profile.power_limit);
476 if (ret)
477 dev_err(smu->adev->dev, "Failed to set power limit value\n");
478 }
479
480 /* set the user dpm clock configurations */
481 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
482 enum smu_clk_type clk_type;
483
484 for (clk_type = 0; clk_type < SMU_CLK_COUNT; clk_type++) {
485 /*
486 * Iterate over smu clk type and force the saved user clk
487 * configs, skip if clock dependency is enabled
488 */
489 if (!(smu->user_dpm_profile.clk_dependency & BIT(clk_type)) &&
490 smu->user_dpm_profile.clk_mask[clk_type]) {
491 ret = smu_force_smuclk_levels(smu, clk_type,
492 smu->user_dpm_profile.clk_mask[clk_type]);
493 if (ret)
494 dev_err(smu->adev->dev,
495 "Failed to set clock type = %d\n", clk_type);
496 }
497 }
498 }
499
500 /* set the user dpm fan configurations */
501 if (smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_MANUAL ||
502 smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_NONE) {
503 ret = smu_set_fan_control_mode(smu, smu->user_dpm_profile.fan_mode);
504 if (ret != -EOPNOTSUPP) {
505 smu->user_dpm_profile.fan_speed_pwm = 0;
506 smu->user_dpm_profile.fan_speed_rpm = 0;
507 smu->user_dpm_profile.fan_mode = AMD_FAN_CTRL_AUTO;
508 dev_err(smu->adev->dev, "Failed to set manual fan control mode\n");
509 }
510
511 if (smu->user_dpm_profile.fan_speed_pwm) {
512 ret = smu_set_fan_speed_pwm(smu, smu->user_dpm_profile.fan_speed_pwm);
513 if (ret != -EOPNOTSUPP)
514 dev_err(smu->adev->dev, "Failed to set manual fan speed in pwm\n");
515 }
516
517 if (smu->user_dpm_profile.fan_speed_rpm) {
518 ret = smu_set_fan_speed_rpm(smu, smu->user_dpm_profile.fan_speed_rpm);
519 if (ret != -EOPNOTSUPP)
520 dev_err(smu->adev->dev, "Failed to set manual fan speed in rpm\n");
521 }
522 }
523
524 /* Restore user customized OD settings */
525 if (smu->user_dpm_profile.user_od) {
526 if (smu->ppt_funcs->restore_user_od_settings) {
527 ret = smu->ppt_funcs->restore_user_od_settings(smu);
528 if (ret)
529 dev_err(smu->adev->dev, "Failed to upload customized OD settings\n");
530 }
531 }
532
533 /* Disable restore flag */
534 smu->user_dpm_profile.flags &= ~SMU_DPM_USER_PROFILE_RESTORE;
535}
536
537static int smu_get_power_num_states(void *handle,
538 struct pp_states_info *state_info)
539{
540 if (!state_info)
541 return -EINVAL;
542
543 /* not support power state */
544 memset(state_info, 0, sizeof(struct pp_states_info));
545 state_info->nums = 1;
546 state_info->states[0] = POWER_STATE_TYPE_DEFAULT;
547
548 return 0;
549}
550
551bool is_support_sw_smu(struct amdgpu_device *adev)
552{
553 /* vega20 is 11.0.2, but it's supported via the powerplay code */
554 if (adev->asic_type == CHIP_VEGA20)
555 return false;
556
557 if ((amdgpu_ip_version(adev, MP1_HWIP, 0) >= IP_VERSION(11, 0, 0)) &&
558 amdgpu_device_ip_is_valid(adev, AMD_IP_BLOCK_TYPE_SMC))
559 return true;
560
561 return false;
562}
563
564bool is_support_cclk_dpm(struct amdgpu_device *adev)
565{
566 struct smu_context *smu = adev->powerplay.pp_handle;
567
568 if (!smu_feature_is_enabled(smu, SMU_FEATURE_CCLK_DPM_BIT))
569 return false;
570
571 return true;
572}
573
574
575static int smu_sys_get_pp_table(void *handle,
576 char **table)
577{
578 struct smu_context *smu = handle;
579 struct smu_table_context *smu_table = &smu->smu_table;
580
581 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
582 return -EOPNOTSUPP;
583
584 if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
585 return -EINVAL;
586
587 if (smu_table->hardcode_pptable)
588 *table = smu_table->hardcode_pptable;
589 else
590 *table = smu_table->power_play_table;
591
592 return smu_table->power_play_table_size;
593}
594
595static int smu_sys_set_pp_table(void *handle,
596 const char *buf,
597 size_t size)
598{
599 struct smu_context *smu = handle;
600 struct smu_table_context *smu_table = &smu->smu_table;
601 ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
602 int ret = 0;
603
604 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
605 return -EOPNOTSUPP;
606
607 if (header->usStructureSize != size) {
608 dev_err(smu->adev->dev, "pp table size not matched !\n");
609 return -EIO;
610 }
611
612 if (!smu_table->hardcode_pptable || smu_table->power_play_table_size < size) {
613 kfree(smu_table->hardcode_pptable);
614 smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
615 if (!smu_table->hardcode_pptable)
616 return -ENOMEM;
617 }
618
619 memcpy(smu_table->hardcode_pptable, buf, size);
620 smu_table->power_play_table = smu_table->hardcode_pptable;
621 smu_table->power_play_table_size = size;
622
623 /*
624 * Special hw_fini action(for Navi1x, the DPMs disablement will be
625 * skipped) may be needed for custom pptable uploading.
626 */
627 smu->uploading_custom_pp_table = true;
628
629 ret = smu_reset(smu);
630 if (ret)
631 dev_info(smu->adev->dev, "smu reset failed, ret = %d\n", ret);
632
633 smu->uploading_custom_pp_table = false;
634
635 return ret;
636}
637
638static int smu_get_driver_allowed_feature_mask(struct smu_context *smu)
639{
640 struct smu_feature *feature = &smu->smu_feature;
641 uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
642 int ret = 0;
643
644 /*
645 * With SCPM enabled, the allowed featuremasks setting(via
646 * PPSMC_MSG_SetAllowedFeaturesMaskLow/High) is not permitted.
647 * That means there is no way to let PMFW knows the settings below.
648 * Thus, we just assume all the features are allowed under
649 * such scenario.
650 */
651 if (smu->adev->scpm_enabled) {
652 bitmap_fill(feature->allowed, SMU_FEATURE_MAX);
653 return 0;
654 }
655
656 bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
657
658 ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
659 SMU_FEATURE_MAX/32);
660 if (ret)
661 return ret;
662
663 bitmap_or(feature->allowed, feature->allowed,
664 (unsigned long *)allowed_feature_mask,
665 feature->feature_num);
666
667 return ret;
668}
669
670static int smu_set_funcs(struct amdgpu_device *adev)
671{
672 struct smu_context *smu = adev->powerplay.pp_handle;
673
674 if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
675 smu->od_enabled = true;
676
677 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
678 case IP_VERSION(11, 0, 0):
679 case IP_VERSION(11, 0, 5):
680 case IP_VERSION(11, 0, 9):
681 navi10_set_ppt_funcs(smu);
682 break;
683 case IP_VERSION(11, 0, 7):
684 case IP_VERSION(11, 0, 11):
685 case IP_VERSION(11, 0, 12):
686 case IP_VERSION(11, 0, 13):
687 sienna_cichlid_set_ppt_funcs(smu);
688 break;
689 case IP_VERSION(12, 0, 0):
690 case IP_VERSION(12, 0, 1):
691 renoir_set_ppt_funcs(smu);
692 break;
693 case IP_VERSION(11, 5, 0):
694 vangogh_set_ppt_funcs(smu);
695 break;
696 case IP_VERSION(13, 0, 1):
697 case IP_VERSION(13, 0, 3):
698 case IP_VERSION(13, 0, 8):
699 yellow_carp_set_ppt_funcs(smu);
700 break;
701 case IP_VERSION(13, 0, 4):
702 case IP_VERSION(13, 0, 11):
703 smu_v13_0_4_set_ppt_funcs(smu);
704 break;
705 case IP_VERSION(13, 0, 5):
706 smu_v13_0_5_set_ppt_funcs(smu);
707 break;
708 case IP_VERSION(11, 0, 8):
709 cyan_skillfish_set_ppt_funcs(smu);
710 break;
711 case IP_VERSION(11, 0, 2):
712 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
713 arcturus_set_ppt_funcs(smu);
714 /* OD is not supported on Arcturus */
715 smu->od_enabled = false;
716 break;
717 case IP_VERSION(13, 0, 2):
718 aldebaran_set_ppt_funcs(smu);
719 /* Enable pp_od_clk_voltage node */
720 smu->od_enabled = true;
721 break;
722 case IP_VERSION(13, 0, 0):
723 case IP_VERSION(13, 0, 10):
724 smu_v13_0_0_set_ppt_funcs(smu);
725 break;
726 case IP_VERSION(13, 0, 6):
727 case IP_VERSION(13, 0, 14):
728 smu_v13_0_6_set_ppt_funcs(smu);
729 /* Enable pp_od_clk_voltage node */
730 smu->od_enabled = true;
731 break;
732 case IP_VERSION(13, 0, 7):
733 smu_v13_0_7_set_ppt_funcs(smu);
734 break;
735 case IP_VERSION(14, 0, 0):
736 case IP_VERSION(14, 0, 1):
737 case IP_VERSION(14, 0, 4):
738 smu_v14_0_0_set_ppt_funcs(smu);
739 break;
740 case IP_VERSION(14, 0, 2):
741 case IP_VERSION(14, 0, 3):
742 smu_v14_0_2_set_ppt_funcs(smu);
743 break;
744 default:
745 return -EINVAL;
746 }
747
748 return 0;
749}
750
751static int smu_early_init(struct amdgpu_ip_block *ip_block)
752{
753 struct amdgpu_device *adev = ip_block->adev;
754 struct smu_context *smu;
755 int r;
756
757 smu = kzalloc(sizeof(struct smu_context), GFP_KERNEL);
758 if (!smu)
759 return -ENOMEM;
760
761 smu->adev = adev;
762 smu->pm_enabled = !!amdgpu_dpm;
763 smu->is_apu = false;
764 smu->smu_baco.state = SMU_BACO_STATE_NONE;
765 smu->smu_baco.platform_support = false;
766 smu->smu_baco.maco_support = false;
767 smu->user_dpm_profile.fan_mode = -1;
768 smu->power_profile_mode = PP_SMC_POWER_PROFILE_UNKNOWN;
769
770 mutex_init(&smu->message_lock);
771
772 adev->powerplay.pp_handle = smu;
773 adev->powerplay.pp_funcs = &swsmu_pm_funcs;
774
775 r = smu_set_funcs(adev);
776 if (r)
777 return r;
778 return smu_init_microcode(smu);
779}
780
781static int smu_set_default_dpm_table(struct smu_context *smu)
782{
783 struct amdgpu_device *adev = smu->adev;
784 struct smu_power_context *smu_power = &smu->smu_power;
785 struct smu_power_gate *power_gate = &smu_power->power_gate;
786 int vcn_gate, jpeg_gate;
787 int ret = 0;
788
789 if (!smu->ppt_funcs->set_default_dpm_table)
790 return 0;
791
792 if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
793 vcn_gate = atomic_read(&power_gate->vcn_gated);
794 if (adev->pg_flags & AMD_PG_SUPPORT_JPEG)
795 jpeg_gate = atomic_read(&power_gate->jpeg_gated);
796
797 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
798 ret = smu_dpm_set_vcn_enable(smu, true);
799 if (ret)
800 return ret;
801 }
802
803 if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) {
804 ret = smu_dpm_set_jpeg_enable(smu, true);
805 if (ret)
806 goto err_out;
807 }
808
809 ret = smu->ppt_funcs->set_default_dpm_table(smu);
810 if (ret)
811 dev_err(smu->adev->dev,
812 "Failed to setup default dpm clock tables!\n");
813
814 if (adev->pg_flags & AMD_PG_SUPPORT_JPEG)
815 smu_dpm_set_jpeg_enable(smu, !jpeg_gate);
816err_out:
817 if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
818 smu_dpm_set_vcn_enable(smu, !vcn_gate);
819
820 return ret;
821}
822
823static int smu_apply_default_config_table_settings(struct smu_context *smu)
824{
825 struct amdgpu_device *adev = smu->adev;
826 int ret = 0;
827
828 ret = smu_get_default_config_table_settings(smu,
829 &adev->pm.config_table);
830 if (ret)
831 return ret;
832
833 return smu_set_config_table(smu, &adev->pm.config_table);
834}
835
836static int smu_late_init(struct amdgpu_ip_block *ip_block)
837{
838 struct amdgpu_device *adev = ip_block->adev;
839 struct smu_context *smu = adev->powerplay.pp_handle;
840 int ret = 0;
841
842 smu_set_fine_grain_gfx_freq_parameters(smu);
843
844 if (!smu->pm_enabled)
845 return 0;
846
847 ret = smu_post_init(smu);
848 if (ret) {
849 dev_err(adev->dev, "Failed to post smu init!\n");
850 return ret;
851 }
852
853 /*
854 * Explicitly notify PMFW the power mode the system in. Since
855 * the PMFW may boot the ASIC with a different mode.
856 * For those supporting ACDC switch via gpio, PMFW will
857 * handle the switch automatically. Driver involvement
858 * is unnecessary.
859 */
860 adev->pm.ac_power = power_supply_is_system_supplied() > 0;
861 smu_set_ac_dc(smu);
862
863 if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 1)) ||
864 (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 3)))
865 return 0;
866
867 if (!amdgpu_sriov_vf(adev) || smu->od_enabled) {
868 ret = smu_set_default_od_settings(smu);
869 if (ret) {
870 dev_err(adev->dev, "Failed to setup default OD settings!\n");
871 return ret;
872 }
873 }
874
875 ret = smu_populate_umd_state_clk(smu);
876 if (ret) {
877 dev_err(adev->dev, "Failed to populate UMD state clocks!\n");
878 return ret;
879 }
880
881 ret = smu_get_asic_power_limits(smu,
882 &smu->current_power_limit,
883 &smu->default_power_limit,
884 &smu->max_power_limit,
885 &smu->min_power_limit);
886 if (ret) {
887 dev_err(adev->dev, "Failed to get asic power limits!\n");
888 return ret;
889 }
890
891 if (!amdgpu_sriov_vf(adev))
892 smu_get_unique_id(smu);
893
894 smu_get_fan_parameters(smu);
895
896 smu_handle_task(smu,
897 smu->smu_dpm.dpm_level,
898 AMD_PP_TASK_COMPLETE_INIT);
899
900 ret = smu_apply_default_config_table_settings(smu);
901 if (ret && (ret != -EOPNOTSUPP)) {
902 dev_err(adev->dev, "Failed to apply default DriverSmuConfig settings!\n");
903 return ret;
904 }
905
906 smu_restore_dpm_user_profile(smu);
907
908 return 0;
909}
910
911static int smu_init_fb_allocations(struct smu_context *smu)
912{
913 struct amdgpu_device *adev = smu->adev;
914 struct smu_table_context *smu_table = &smu->smu_table;
915 struct smu_table *tables = smu_table->tables;
916 struct smu_table *driver_table = &(smu_table->driver_table);
917 uint32_t max_table_size = 0;
918 int ret, i;
919
920 /* VRAM allocation for tool table */
921 if (tables[SMU_TABLE_PMSTATUSLOG].size) {
922 ret = amdgpu_bo_create_kernel(adev,
923 tables[SMU_TABLE_PMSTATUSLOG].size,
924 tables[SMU_TABLE_PMSTATUSLOG].align,
925 tables[SMU_TABLE_PMSTATUSLOG].domain,
926 &tables[SMU_TABLE_PMSTATUSLOG].bo,
927 &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
928 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
929 if (ret) {
930 dev_err(adev->dev, "VRAM allocation for tool table failed!\n");
931 return ret;
932 }
933 }
934
935 driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT;
936 /* VRAM allocation for driver table */
937 for (i = 0; i < SMU_TABLE_COUNT; i++) {
938 if (tables[i].size == 0)
939 continue;
940
941 /* If one of the tables has VRAM domain restriction, keep it in
942 * VRAM
943 */
944 if ((tables[i].domain &
945 (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) ==
946 AMDGPU_GEM_DOMAIN_VRAM)
947 driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
948
949 if (i == SMU_TABLE_PMSTATUSLOG)
950 continue;
951
952 if (max_table_size < tables[i].size)
953 max_table_size = tables[i].size;
954 }
955
956 driver_table->size = max_table_size;
957 driver_table->align = PAGE_SIZE;
958
959 ret = amdgpu_bo_create_kernel(adev,
960 driver_table->size,
961 driver_table->align,
962 driver_table->domain,
963 &driver_table->bo,
964 &driver_table->mc_address,
965 &driver_table->cpu_addr);
966 if (ret) {
967 dev_err(adev->dev, "VRAM allocation for driver table failed!\n");
968 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
969 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
970 &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
971 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
972 }
973
974 return ret;
975}
976
977static int smu_fini_fb_allocations(struct smu_context *smu)
978{
979 struct smu_table_context *smu_table = &smu->smu_table;
980 struct smu_table *tables = smu_table->tables;
981 struct smu_table *driver_table = &(smu_table->driver_table);
982
983 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
984 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
985 &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
986 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
987
988 amdgpu_bo_free_kernel(&driver_table->bo,
989 &driver_table->mc_address,
990 &driver_table->cpu_addr);
991
992 return 0;
993}
994
995/**
996 * smu_alloc_memory_pool - allocate memory pool in the system memory
997 *
998 * @smu: amdgpu_device pointer
999 *
1000 * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
1001 * and DramLogSetDramAddr can notify it changed.
1002 *
1003 * Returns 0 on success, error on failure.
1004 */
1005static int smu_alloc_memory_pool(struct smu_context *smu)
1006{
1007 struct amdgpu_device *adev = smu->adev;
1008 struct smu_table_context *smu_table = &smu->smu_table;
1009 struct smu_table *memory_pool = &smu_table->memory_pool;
1010 uint64_t pool_size = smu->pool_size;
1011 int ret = 0;
1012
1013 if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
1014 return ret;
1015
1016 memory_pool->size = pool_size;
1017 memory_pool->align = PAGE_SIZE;
1018 memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT;
1019
1020 switch (pool_size) {
1021 case SMU_MEMORY_POOL_SIZE_256_MB:
1022 case SMU_MEMORY_POOL_SIZE_512_MB:
1023 case SMU_MEMORY_POOL_SIZE_1_GB:
1024 case SMU_MEMORY_POOL_SIZE_2_GB:
1025 ret = amdgpu_bo_create_kernel(adev,
1026 memory_pool->size,
1027 memory_pool->align,
1028 memory_pool->domain,
1029 &memory_pool->bo,
1030 &memory_pool->mc_address,
1031 &memory_pool->cpu_addr);
1032 if (ret)
1033 dev_err(adev->dev, "VRAM allocation for dramlog failed!\n");
1034 break;
1035 default:
1036 break;
1037 }
1038
1039 return ret;
1040}
1041
1042static int smu_free_memory_pool(struct smu_context *smu)
1043{
1044 struct smu_table_context *smu_table = &smu->smu_table;
1045 struct smu_table *memory_pool = &smu_table->memory_pool;
1046
1047 if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
1048 return 0;
1049
1050 amdgpu_bo_free_kernel(&memory_pool->bo,
1051 &memory_pool->mc_address,
1052 &memory_pool->cpu_addr);
1053
1054 memset(memory_pool, 0, sizeof(struct smu_table));
1055
1056 return 0;
1057}
1058
1059static int smu_alloc_dummy_read_table(struct smu_context *smu)
1060{
1061 struct smu_table_context *smu_table = &smu->smu_table;
1062 struct smu_table *dummy_read_1_table =
1063 &smu_table->dummy_read_1_table;
1064 struct amdgpu_device *adev = smu->adev;
1065 int ret = 0;
1066
1067 if (!dummy_read_1_table->size)
1068 return 0;
1069
1070 ret = amdgpu_bo_create_kernel(adev,
1071 dummy_read_1_table->size,
1072 dummy_read_1_table->align,
1073 dummy_read_1_table->domain,
1074 &dummy_read_1_table->bo,
1075 &dummy_read_1_table->mc_address,
1076 &dummy_read_1_table->cpu_addr);
1077 if (ret)
1078 dev_err(adev->dev, "VRAM allocation for dummy read table failed!\n");
1079
1080 return ret;
1081}
1082
1083static void smu_free_dummy_read_table(struct smu_context *smu)
1084{
1085 struct smu_table_context *smu_table = &smu->smu_table;
1086 struct smu_table *dummy_read_1_table =
1087 &smu_table->dummy_read_1_table;
1088
1089
1090 amdgpu_bo_free_kernel(&dummy_read_1_table->bo,
1091 &dummy_read_1_table->mc_address,
1092 &dummy_read_1_table->cpu_addr);
1093
1094 memset(dummy_read_1_table, 0, sizeof(struct smu_table));
1095}
1096
1097static int smu_smc_table_sw_init(struct smu_context *smu)
1098{
1099 int ret;
1100
1101 /**
1102 * Create smu_table structure, and init smc tables such as
1103 * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
1104 */
1105 ret = smu_init_smc_tables(smu);
1106 if (ret) {
1107 dev_err(smu->adev->dev, "Failed to init smc tables!\n");
1108 return ret;
1109 }
1110
1111 /**
1112 * Create smu_power_context structure, and allocate smu_dpm_context and
1113 * context size to fill the smu_power_context data.
1114 */
1115 ret = smu_init_power(smu);
1116 if (ret) {
1117 dev_err(smu->adev->dev, "Failed to init smu_init_power!\n");
1118 return ret;
1119 }
1120
1121 /*
1122 * allocate vram bos to store smc table contents.
1123 */
1124 ret = smu_init_fb_allocations(smu);
1125 if (ret)
1126 return ret;
1127
1128 ret = smu_alloc_memory_pool(smu);
1129 if (ret)
1130 return ret;
1131
1132 ret = smu_alloc_dummy_read_table(smu);
1133 if (ret)
1134 return ret;
1135
1136 ret = smu_i2c_init(smu);
1137 if (ret)
1138 return ret;
1139
1140 return 0;
1141}
1142
1143static int smu_smc_table_sw_fini(struct smu_context *smu)
1144{
1145 int ret;
1146
1147 smu_i2c_fini(smu);
1148
1149 smu_free_dummy_read_table(smu);
1150
1151 ret = smu_free_memory_pool(smu);
1152 if (ret)
1153 return ret;
1154
1155 ret = smu_fini_fb_allocations(smu);
1156 if (ret)
1157 return ret;
1158
1159 ret = smu_fini_power(smu);
1160 if (ret) {
1161 dev_err(smu->adev->dev, "Failed to init smu_fini_power!\n");
1162 return ret;
1163 }
1164
1165 ret = smu_fini_smc_tables(smu);
1166 if (ret) {
1167 dev_err(smu->adev->dev, "Failed to smu_fini_smc_tables!\n");
1168 return ret;
1169 }
1170
1171 return 0;
1172}
1173
1174static void smu_throttling_logging_work_fn(struct work_struct *work)
1175{
1176 struct smu_context *smu = container_of(work, struct smu_context,
1177 throttling_logging_work);
1178
1179 smu_log_thermal_throttling(smu);
1180}
1181
1182static void smu_interrupt_work_fn(struct work_struct *work)
1183{
1184 struct smu_context *smu = container_of(work, struct smu_context,
1185 interrupt_work);
1186
1187 if (smu->ppt_funcs && smu->ppt_funcs->interrupt_work)
1188 smu->ppt_funcs->interrupt_work(smu);
1189}
1190
1191static void smu_swctf_delayed_work_handler(struct work_struct *work)
1192{
1193 struct smu_context *smu =
1194 container_of(work, struct smu_context, swctf_delayed_work.work);
1195 struct smu_temperature_range *range =
1196 &smu->thermal_range;
1197 struct amdgpu_device *adev = smu->adev;
1198 uint32_t hotspot_tmp, size;
1199
1200 /*
1201 * If the hotspot temperature is confirmed as below SW CTF setting point
1202 * after the delay enforced, nothing will be done.
1203 * Otherwise, a graceful shutdown will be performed to prevent further damage.
1204 */
1205 if (range->software_shutdown_temp &&
1206 smu->ppt_funcs->read_sensor &&
1207 !smu->ppt_funcs->read_sensor(smu,
1208 AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
1209 &hotspot_tmp,
1210 &size) &&
1211 hotspot_tmp / 1000 < range->software_shutdown_temp)
1212 return;
1213
1214 dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
1215 dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
1216 orderly_poweroff(true);
1217}
1218
1219static void smu_init_xgmi_plpd_mode(struct smu_context *smu)
1220{
1221 struct smu_dpm_context *dpm_ctxt = &(smu->smu_dpm);
1222 struct smu_dpm_policy_ctxt *policy_ctxt;
1223 struct smu_dpm_policy *policy;
1224
1225 policy = smu_get_pm_policy(smu, PP_PM_POLICY_XGMI_PLPD);
1226 if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 2)) {
1227 if (policy)
1228 policy->current_level = XGMI_PLPD_DEFAULT;
1229 return;
1230 }
1231
1232 /* PMFW put PLPD into default policy after enabling the feature */
1233 if (smu_feature_is_enabled(smu,
1234 SMU_FEATURE_XGMI_PER_LINK_PWR_DWN_BIT)) {
1235 if (policy)
1236 policy->current_level = XGMI_PLPD_DEFAULT;
1237 } else {
1238 policy_ctxt = dpm_ctxt->dpm_policies;
1239 if (policy_ctxt)
1240 policy_ctxt->policy_mask &=
1241 ~BIT(PP_PM_POLICY_XGMI_PLPD);
1242 }
1243}
1244
1245static bool smu_is_workload_profile_available(struct smu_context *smu,
1246 u32 profile)
1247{
1248 if (profile >= PP_SMC_POWER_PROFILE_COUNT)
1249 return false;
1250 return smu->workload_map && smu->workload_map[profile].valid_mapping;
1251}
1252
1253static void smu_init_power_profile(struct smu_context *smu)
1254{
1255 if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_UNKNOWN) {
1256 if (smu->is_apu ||
1257 !smu_is_workload_profile_available(
1258 smu, PP_SMC_POWER_PROFILE_FULLSCREEN3D))
1259 smu->power_profile_mode =
1260 PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
1261 else
1262 smu->power_profile_mode =
1263 PP_SMC_POWER_PROFILE_FULLSCREEN3D;
1264 }
1265 smu_power_profile_mode_get(smu, smu->power_profile_mode);
1266}
1267
1268static int smu_sw_init(struct amdgpu_ip_block *ip_block)
1269{
1270 struct amdgpu_device *adev = ip_block->adev;
1271 struct smu_context *smu = adev->powerplay.pp_handle;
1272 int ret;
1273
1274 smu->pool_size = adev->pm.smu_prv_buffer_size;
1275 smu->smu_feature.feature_num = SMU_FEATURE_MAX;
1276 bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
1277 bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
1278
1279 INIT_WORK(&smu->throttling_logging_work, smu_throttling_logging_work_fn);
1280 INIT_WORK(&smu->interrupt_work, smu_interrupt_work_fn);
1281 atomic64_set(&smu->throttle_int_counter, 0);
1282 smu->watermarks_bitmap = 0;
1283
1284 atomic_set(&smu->smu_power.power_gate.vcn_gated, 1);
1285 atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1);
1286 atomic_set(&smu->smu_power.power_gate.vpe_gated, 1);
1287 atomic_set(&smu->smu_power.power_gate.umsch_mm_gated, 1);
1288
1289 smu_init_power_profile(smu);
1290 smu->display_config = &adev->pm.pm_display_cfg;
1291
1292 smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
1293 smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
1294
1295 INIT_DELAYED_WORK(&smu->swctf_delayed_work,
1296 smu_swctf_delayed_work_handler);
1297
1298 ret = smu_smc_table_sw_init(smu);
1299 if (ret) {
1300 dev_err(adev->dev, "Failed to sw init smc table!\n");
1301 return ret;
1302 }
1303
1304 /* get boot_values from vbios to set revision, gfxclk, and etc. */
1305 ret = smu_get_vbios_bootup_values(smu);
1306 if (ret) {
1307 dev_err(adev->dev, "Failed to get VBIOS boot clock values!\n");
1308 return ret;
1309 }
1310
1311 ret = smu_init_pptable_microcode(smu);
1312 if (ret) {
1313 dev_err(adev->dev, "Failed to setup pptable firmware!\n");
1314 return ret;
1315 }
1316
1317 ret = smu_register_irq_handler(smu);
1318 if (ret) {
1319 dev_err(adev->dev, "Failed to register smc irq handler!\n");
1320 return ret;
1321 }
1322
1323 /* If there is no way to query fan control mode, fan control is not supported */
1324 if (!smu->ppt_funcs->get_fan_control_mode)
1325 smu->adev->pm.no_fan = true;
1326
1327 return 0;
1328}
1329
1330static int smu_sw_fini(struct amdgpu_ip_block *ip_block)
1331{
1332 struct amdgpu_device *adev = ip_block->adev;
1333 struct smu_context *smu = adev->powerplay.pp_handle;
1334 int ret;
1335
1336 ret = smu_smc_table_sw_fini(smu);
1337 if (ret) {
1338 dev_err(adev->dev, "Failed to sw fini smc table!\n");
1339 return ret;
1340 }
1341
1342 if (smu->custom_profile_params) {
1343 kfree(smu->custom_profile_params);
1344 smu->custom_profile_params = NULL;
1345 }
1346
1347 smu_fini_microcode(smu);
1348
1349 return 0;
1350}
1351
1352static int smu_get_thermal_temperature_range(struct smu_context *smu)
1353{
1354 struct amdgpu_device *adev = smu->adev;
1355 struct smu_temperature_range *range =
1356 &smu->thermal_range;
1357 int ret = 0;
1358
1359 if (!smu->ppt_funcs->get_thermal_temperature_range)
1360 return 0;
1361
1362 ret = smu->ppt_funcs->get_thermal_temperature_range(smu, range);
1363 if (ret)
1364 return ret;
1365
1366 adev->pm.dpm.thermal.min_temp = range->min;
1367 adev->pm.dpm.thermal.max_temp = range->max;
1368 adev->pm.dpm.thermal.max_edge_emergency_temp = range->edge_emergency_max;
1369 adev->pm.dpm.thermal.min_hotspot_temp = range->hotspot_min;
1370 adev->pm.dpm.thermal.max_hotspot_crit_temp = range->hotspot_crit_max;
1371 adev->pm.dpm.thermal.max_hotspot_emergency_temp = range->hotspot_emergency_max;
1372 adev->pm.dpm.thermal.min_mem_temp = range->mem_min;
1373 adev->pm.dpm.thermal.max_mem_crit_temp = range->mem_crit_max;
1374 adev->pm.dpm.thermal.max_mem_emergency_temp = range->mem_emergency_max;
1375
1376 return ret;
1377}
1378
1379/**
1380 * smu_wbrf_handle_exclusion_ranges - consume the wbrf exclusion ranges
1381 *
1382 * @smu: smu_context pointer
1383 *
1384 * Retrieve the wbrf exclusion ranges and send them to PMFW for proper handling.
1385 * Returns 0 on success, error on failure.
1386 */
1387static int smu_wbrf_handle_exclusion_ranges(struct smu_context *smu)
1388{
1389 struct wbrf_ranges_in_out wbrf_exclusion = {0};
1390 struct freq_band_range *wifi_bands = wbrf_exclusion.band_list;
1391 struct amdgpu_device *adev = smu->adev;
1392 uint32_t num_of_wbrf_ranges = MAX_NUM_OF_WBRF_RANGES;
1393 uint64_t start, end;
1394 int ret, i, j;
1395
1396 ret = amd_wbrf_retrieve_freq_band(adev->dev, &wbrf_exclusion);
1397 if (ret) {
1398 dev_err(adev->dev, "Failed to retrieve exclusion ranges!\n");
1399 return ret;
1400 }
1401
1402 /*
1403 * The exclusion ranges array we got might be filled with holes and duplicate
1404 * entries. For example:
1405 * {(2400, 2500), (0, 0), (6882, 6962), (2400, 2500), (0, 0), (6117, 6189), (0, 0)...}
1406 * We need to do some sortups to eliminate those holes and duplicate entries.
1407 * Expected output: {(2400, 2500), (6117, 6189), (6882, 6962), (0, 0)...}
1408 */
1409 for (i = 0; i < num_of_wbrf_ranges; i++) {
1410 start = wifi_bands[i].start;
1411 end = wifi_bands[i].end;
1412
1413 /* get the last valid entry to fill the intermediate hole */
1414 if (!start && !end) {
1415 for (j = num_of_wbrf_ranges - 1; j > i; j--)
1416 if (wifi_bands[j].start && wifi_bands[j].end)
1417 break;
1418
1419 /* no valid entry left */
1420 if (j <= i)
1421 break;
1422
1423 start = wifi_bands[i].start = wifi_bands[j].start;
1424 end = wifi_bands[i].end = wifi_bands[j].end;
1425 wifi_bands[j].start = 0;
1426 wifi_bands[j].end = 0;
1427 num_of_wbrf_ranges = j;
1428 }
1429
1430 /* eliminate duplicate entries */
1431 for (j = i + 1; j < num_of_wbrf_ranges; j++) {
1432 if ((wifi_bands[j].start == start) && (wifi_bands[j].end == end)) {
1433 wifi_bands[j].start = 0;
1434 wifi_bands[j].end = 0;
1435 }
1436 }
1437 }
1438
1439 /* Send the sorted wifi_bands to PMFW */
1440 ret = smu_set_wbrf_exclusion_ranges(smu, wifi_bands);
1441 /* Try to set the wifi_bands again */
1442 if (unlikely(ret == -EBUSY)) {
1443 mdelay(5);
1444 ret = smu_set_wbrf_exclusion_ranges(smu, wifi_bands);
1445 }
1446
1447 return ret;
1448}
1449
1450/**
1451 * smu_wbrf_event_handler - handle notify events
1452 *
1453 * @nb: notifier block
1454 * @action: event type
1455 * @_arg: event data
1456 *
1457 * Calls relevant amdgpu function in response to wbrf event
1458 * notification from kernel.
1459 */
1460static int smu_wbrf_event_handler(struct notifier_block *nb,
1461 unsigned long action, void *_arg)
1462{
1463 struct smu_context *smu = container_of(nb, struct smu_context, wbrf_notifier);
1464
1465 switch (action) {
1466 case WBRF_CHANGED:
1467 schedule_delayed_work(&smu->wbrf_delayed_work,
1468 msecs_to_jiffies(SMU_WBRF_EVENT_HANDLING_PACE));
1469 break;
1470 default:
1471 return NOTIFY_DONE;
1472 }
1473
1474 return NOTIFY_OK;
1475}
1476
1477/**
1478 * smu_wbrf_delayed_work_handler - callback on delayed work timer expired
1479 *
1480 * @work: struct work_struct pointer
1481 *
1482 * Flood is over and driver will consume the latest exclusion ranges.
1483 */
1484static void smu_wbrf_delayed_work_handler(struct work_struct *work)
1485{
1486 struct smu_context *smu = container_of(work, struct smu_context, wbrf_delayed_work.work);
1487
1488 smu_wbrf_handle_exclusion_ranges(smu);
1489}
1490
1491/**
1492 * smu_wbrf_support_check - check wbrf support
1493 *
1494 * @smu: smu_context pointer
1495 *
1496 * Verifies the ACPI interface whether wbrf is supported.
1497 */
1498static void smu_wbrf_support_check(struct smu_context *smu)
1499{
1500 struct amdgpu_device *adev = smu->adev;
1501
1502 smu->wbrf_supported = smu_is_asic_wbrf_supported(smu) && amdgpu_wbrf &&
1503 acpi_amd_wbrf_supported_consumer(adev->dev);
1504
1505 if (smu->wbrf_supported)
1506 dev_info(adev->dev, "RF interference mitigation is supported\n");
1507}
1508
1509/**
1510 * smu_wbrf_init - init driver wbrf support
1511 *
1512 * @smu: smu_context pointer
1513 *
1514 * Verifies the AMD ACPI interfaces and registers with the wbrf
1515 * notifier chain if wbrf feature is supported.
1516 * Returns 0 on success, error on failure.
1517 */
1518static int smu_wbrf_init(struct smu_context *smu)
1519{
1520 int ret;
1521
1522 if (!smu->wbrf_supported)
1523 return 0;
1524
1525 INIT_DELAYED_WORK(&smu->wbrf_delayed_work, smu_wbrf_delayed_work_handler);
1526
1527 smu->wbrf_notifier.notifier_call = smu_wbrf_event_handler;
1528 ret = amd_wbrf_register_notifier(&smu->wbrf_notifier);
1529 if (ret)
1530 return ret;
1531
1532 /*
1533 * Some wifiband exclusion ranges may be already there
1534 * before our driver loaded. To make sure our driver
1535 * is awared of those exclusion ranges.
1536 */
1537 schedule_delayed_work(&smu->wbrf_delayed_work,
1538 msecs_to_jiffies(SMU_WBRF_EVENT_HANDLING_PACE));
1539
1540 return 0;
1541}
1542
1543/**
1544 * smu_wbrf_fini - tear down driver wbrf support
1545 *
1546 * @smu: smu_context pointer
1547 *
1548 * Unregisters with the wbrf notifier chain.
1549 */
1550static void smu_wbrf_fini(struct smu_context *smu)
1551{
1552 if (!smu->wbrf_supported)
1553 return;
1554
1555 amd_wbrf_unregister_notifier(&smu->wbrf_notifier);
1556
1557 cancel_delayed_work_sync(&smu->wbrf_delayed_work);
1558}
1559
1560static int smu_smc_hw_setup(struct smu_context *smu)
1561{
1562 struct smu_feature *feature = &smu->smu_feature;
1563 struct amdgpu_device *adev = smu->adev;
1564 uint8_t pcie_gen = 0, pcie_width = 0;
1565 uint64_t features_supported;
1566 int ret = 0;
1567
1568 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
1569 case IP_VERSION(11, 0, 7):
1570 case IP_VERSION(11, 0, 11):
1571 case IP_VERSION(11, 5, 0):
1572 case IP_VERSION(11, 0, 12):
1573 if (adev->in_suspend && smu_is_dpm_running(smu)) {
1574 dev_info(adev->dev, "dpm has been enabled\n");
1575 ret = smu_system_features_control(smu, true);
1576 if (ret)
1577 dev_err(adev->dev, "Failed system features control!\n");
1578 return ret;
1579 }
1580 break;
1581 default:
1582 break;
1583 }
1584
1585 ret = smu_init_display_count(smu, 0);
1586 if (ret) {
1587 dev_info(adev->dev, "Failed to pre-set display count as 0!\n");
1588 return ret;
1589 }
1590
1591 ret = smu_set_driver_table_location(smu);
1592 if (ret) {
1593 dev_err(adev->dev, "Failed to SetDriverDramAddr!\n");
1594 return ret;
1595 }
1596
1597 /*
1598 * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
1599 */
1600 ret = smu_set_tool_table_location(smu);
1601 if (ret) {
1602 dev_err(adev->dev, "Failed to SetToolsDramAddr!\n");
1603 return ret;
1604 }
1605
1606 /*
1607 * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
1608 * pool location.
1609 */
1610 ret = smu_notify_memory_pool_location(smu);
1611 if (ret) {
1612 dev_err(adev->dev, "Failed to SetDramLogDramAddr!\n");
1613 return ret;
1614 }
1615
1616 /*
1617 * It is assumed the pptable used before runpm is same as
1618 * the one used afterwards. Thus, we can reuse the stored
1619 * copy and do not need to resetup the pptable again.
1620 */
1621 if (!adev->in_runpm) {
1622 ret = smu_setup_pptable(smu);
1623 if (ret) {
1624 dev_err(adev->dev, "Failed to setup pptable!\n");
1625 return ret;
1626 }
1627 }
1628
1629 /* smu_dump_pptable(smu); */
1630
1631 /*
1632 * With SCPM enabled, PSP is responsible for the PPTable transferring
1633 * (to SMU). Driver involvement is not needed and permitted.
1634 */
1635 if (!adev->scpm_enabled) {
1636 /*
1637 * Copy pptable bo in the vram to smc with SMU MSGs such as
1638 * SetDriverDramAddr and TransferTableDram2Smu.
1639 */
1640 ret = smu_write_pptable(smu);
1641 if (ret) {
1642 dev_err(adev->dev, "Failed to transfer pptable to SMC!\n");
1643 return ret;
1644 }
1645 }
1646
1647 /* issue Run*Btc msg */
1648 ret = smu_run_btc(smu);
1649 if (ret)
1650 return ret;
1651
1652 /* Enable UclkShadow on wbrf supported */
1653 if (smu->wbrf_supported) {
1654 ret = smu_enable_uclk_shadow(smu, true);
1655 if (ret) {
1656 dev_err(adev->dev, "Failed to enable UclkShadow feature to support wbrf!\n");
1657 return ret;
1658 }
1659 }
1660
1661 /*
1662 * With SCPM enabled, these actions(and relevant messages) are
1663 * not needed and permitted.
1664 */
1665 if (!adev->scpm_enabled) {
1666 ret = smu_feature_set_allowed_mask(smu);
1667 if (ret) {
1668 dev_err(adev->dev, "Failed to set driver allowed features mask!\n");
1669 return ret;
1670 }
1671 }
1672
1673 ret = smu_system_features_control(smu, true);
1674 if (ret) {
1675 dev_err(adev->dev, "Failed to enable requested dpm features!\n");
1676 return ret;
1677 }
1678
1679 smu_init_xgmi_plpd_mode(smu);
1680
1681 ret = smu_feature_get_enabled_mask(smu, &features_supported);
1682 if (ret) {
1683 dev_err(adev->dev, "Failed to retrieve supported dpm features!\n");
1684 return ret;
1685 }
1686 bitmap_copy(feature->supported,
1687 (unsigned long *)&features_supported,
1688 feature->feature_num);
1689
1690 if (!smu_is_dpm_running(smu))
1691 dev_info(adev->dev, "dpm has been disabled\n");
1692
1693 /*
1694 * Set initialized values (get from vbios) to dpm tables context such as
1695 * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
1696 * type of clks.
1697 */
1698 ret = smu_set_default_dpm_table(smu);
1699 if (ret) {
1700 dev_err(adev->dev, "Failed to setup default dpm clock tables!\n");
1701 return ret;
1702 }
1703
1704 if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5)
1705 pcie_gen = 4;
1706 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
1707 pcie_gen = 3;
1708 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
1709 pcie_gen = 2;
1710 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
1711 pcie_gen = 1;
1712 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
1713 pcie_gen = 0;
1714
1715 /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
1716 * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
1717 * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32
1718 */
1719 if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X32)
1720 pcie_width = 7;
1721 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
1722 pcie_width = 6;
1723 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
1724 pcie_width = 5;
1725 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
1726 pcie_width = 4;
1727 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
1728 pcie_width = 3;
1729 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
1730 pcie_width = 2;
1731 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
1732 pcie_width = 1;
1733 ret = smu_update_pcie_parameters(smu, pcie_gen, pcie_width);
1734 if (ret) {
1735 dev_err(adev->dev, "Attempt to override pcie params failed!\n");
1736 return ret;
1737 }
1738
1739 ret = smu_get_thermal_temperature_range(smu);
1740 if (ret) {
1741 dev_err(adev->dev, "Failed to get thermal temperature ranges!\n");
1742 return ret;
1743 }
1744
1745 ret = smu_enable_thermal_alert(smu);
1746 if (ret) {
1747 dev_err(adev->dev, "Failed to enable thermal alert!\n");
1748 return ret;
1749 }
1750
1751 ret = smu_notify_display_change(smu);
1752 if (ret) {
1753 dev_err(adev->dev, "Failed to notify display change!\n");
1754 return ret;
1755 }
1756
1757 /*
1758 * Set min deep sleep dce fclk with bootup value from vbios via
1759 * SetMinDeepSleepDcefclk MSG.
1760 */
1761 ret = smu_set_min_dcef_deep_sleep(smu,
1762 smu->smu_table.boot_values.dcefclk / 100);
1763 if (ret) {
1764 dev_err(adev->dev, "Error setting min deepsleep dcefclk\n");
1765 return ret;
1766 }
1767
1768 /* Init wbrf support. Properly setup the notifier */
1769 ret = smu_wbrf_init(smu);
1770 if (ret)
1771 dev_err(adev->dev, "Error during wbrf init call\n");
1772
1773 return ret;
1774}
1775
1776static int smu_start_smc_engine(struct smu_context *smu)
1777{
1778 struct amdgpu_device *adev = smu->adev;
1779 int ret = 0;
1780
1781 smu->smc_fw_state = SMU_FW_INIT;
1782
1783 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1784 if (amdgpu_ip_version(adev, MP1_HWIP, 0) < IP_VERSION(11, 0, 0)) {
1785 if (smu->ppt_funcs->load_microcode) {
1786 ret = smu->ppt_funcs->load_microcode(smu);
1787 if (ret)
1788 return ret;
1789 }
1790 }
1791 }
1792
1793 if (smu->ppt_funcs->check_fw_status) {
1794 ret = smu->ppt_funcs->check_fw_status(smu);
1795 if (ret) {
1796 dev_err(adev->dev, "SMC is not ready\n");
1797 return ret;
1798 }
1799 }
1800
1801 /*
1802 * Send msg GetDriverIfVersion to check if the return value is equal
1803 * with DRIVER_IF_VERSION of smc header.
1804 */
1805 ret = smu_check_fw_version(smu);
1806 if (ret)
1807 return ret;
1808
1809 return ret;
1810}
1811
1812static int smu_hw_init(struct amdgpu_ip_block *ip_block)
1813{
1814 int ret;
1815 struct amdgpu_device *adev = ip_block->adev;
1816 struct smu_context *smu = adev->powerplay.pp_handle;
1817
1818 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) {
1819 smu->pm_enabled = false;
1820 return 0;
1821 }
1822
1823 ret = smu_start_smc_engine(smu);
1824 if (ret) {
1825 dev_err(adev->dev, "SMC engine is not correctly up!\n");
1826 return ret;
1827 }
1828
1829 /*
1830 * Check whether wbrf is supported. This needs to be done
1831 * before SMU setup starts since part of SMU configuration
1832 * relies on this.
1833 */
1834 smu_wbrf_support_check(smu);
1835
1836 if (smu->is_apu) {
1837 ret = smu_set_gfx_imu_enable(smu);
1838 if (ret)
1839 return ret;
1840 smu_dpm_set_vcn_enable(smu, true);
1841 smu_dpm_set_jpeg_enable(smu, true);
1842 smu_dpm_set_vpe_enable(smu, true);
1843 smu_dpm_set_umsch_mm_enable(smu, true);
1844 smu_set_mall_enable(smu);
1845 smu_set_gfx_cgpg(smu, true);
1846 }
1847
1848 if (!smu->pm_enabled)
1849 return 0;
1850
1851 ret = smu_get_driver_allowed_feature_mask(smu);
1852 if (ret)
1853 return ret;
1854
1855 ret = smu_smc_hw_setup(smu);
1856 if (ret) {
1857 dev_err(adev->dev, "Failed to setup smc hw!\n");
1858 return ret;
1859 }
1860
1861 /*
1862 * Move maximum sustainable clock retrieving here considering
1863 * 1. It is not needed on resume(from S3).
1864 * 2. DAL settings come between .hw_init and .late_init of SMU.
1865 * And DAL needs to know the maximum sustainable clocks. Thus
1866 * it cannot be put in .late_init().
1867 */
1868 ret = smu_init_max_sustainable_clocks(smu);
1869 if (ret) {
1870 dev_err(adev->dev, "Failed to init max sustainable clocks!\n");
1871 return ret;
1872 }
1873
1874 adev->pm.dpm_enabled = true;
1875
1876 dev_info(adev->dev, "SMU is initialized successfully!\n");
1877
1878 return 0;
1879}
1880
1881static int smu_disable_dpms(struct smu_context *smu)
1882{
1883 struct amdgpu_device *adev = smu->adev;
1884 int ret = 0;
1885 bool use_baco = !smu->is_apu &&
1886 ((amdgpu_in_reset(adev) &&
1887 (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
1888 ((adev->in_runpm || adev->in_s4) && amdgpu_asic_supports_baco(adev)));
1889
1890 /*
1891 * For SMU 13.0.0 and 13.0.7, PMFW will handle the DPM features(disablement or others)
1892 * properly on suspend/reset/unload. Driver involvement may cause some unexpected issues.
1893 */
1894 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
1895 case IP_VERSION(13, 0, 0):
1896 case IP_VERSION(13, 0, 7):
1897 case IP_VERSION(13, 0, 10):
1898 case IP_VERSION(14, 0, 2):
1899 case IP_VERSION(14, 0, 3):
1900 return 0;
1901 default:
1902 break;
1903 }
1904
1905 /*
1906 * For custom pptable uploading, skip the DPM features
1907 * disable process on Navi1x ASICs.
1908 * - As the gfx related features are under control of
1909 * RLC on those ASICs. RLC reinitialization will be
1910 * needed to reenable them. That will cost much more
1911 * efforts.
1912 *
1913 * - SMU firmware can handle the DPM reenablement
1914 * properly.
1915 */
1916 if (smu->uploading_custom_pp_table) {
1917 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
1918 case IP_VERSION(11, 0, 0):
1919 case IP_VERSION(11, 0, 5):
1920 case IP_VERSION(11, 0, 9):
1921 case IP_VERSION(11, 0, 7):
1922 case IP_VERSION(11, 0, 11):
1923 case IP_VERSION(11, 5, 0):
1924 case IP_VERSION(11, 0, 12):
1925 case IP_VERSION(11, 0, 13):
1926 return 0;
1927 default:
1928 break;
1929 }
1930 }
1931
1932 /*
1933 * For Sienna_Cichlid, PMFW will handle the features disablement properly
1934 * on BACO in. Driver involvement is unnecessary.
1935 */
1936 if (use_baco) {
1937 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
1938 case IP_VERSION(11, 0, 7):
1939 case IP_VERSION(11, 0, 0):
1940 case IP_VERSION(11, 0, 5):
1941 case IP_VERSION(11, 0, 9):
1942 case IP_VERSION(13, 0, 7):
1943 return 0;
1944 default:
1945 break;
1946 }
1947 }
1948
1949 /*
1950 * For GFX11 and subsequent APUs, PMFW will handle the features disablement properly
1951 * for gpu reset and S0i3 cases. Driver involvement is unnecessary.
1952 */
1953 if (IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) >= 11 &&
1954 smu->is_apu && (amdgpu_in_reset(adev) || adev->in_s0ix))
1955 return 0;
1956
1957 /*
1958 * For gpu reset, runpm and hibernation through BACO,
1959 * BACO feature has to be kept enabled.
1960 */
1961 if (use_baco && smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) {
1962 ret = smu_disable_all_features_with_exception(smu,
1963 SMU_FEATURE_BACO_BIT);
1964 if (ret)
1965 dev_err(adev->dev, "Failed to disable smu features except BACO.\n");
1966 } else {
1967 /* DisableAllSmuFeatures message is not permitted with SCPM enabled */
1968 if (!adev->scpm_enabled) {
1969 ret = smu_system_features_control(smu, false);
1970 if (ret)
1971 dev_err(adev->dev, "Failed to disable smu features.\n");
1972 }
1973 }
1974
1975 /* Notify SMU RLC is going to be off, stop RLC and SMU interaction.
1976 * otherwise SMU will hang while interacting with RLC if RLC is halted
1977 * this is a WA for Vangogh asic which fix the SMU hang issue.
1978 */
1979 ret = smu_notify_rlc_state(smu, false);
1980 if (ret) {
1981 dev_err(adev->dev, "Fail to notify rlc status!\n");
1982 return ret;
1983 }
1984
1985 if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 4, 2) &&
1986 !((adev->flags & AMD_IS_APU) && adev->gfx.imu.funcs) &&
1987 !amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs->stop)
1988 adev->gfx.rlc.funcs->stop(adev);
1989
1990 return ret;
1991}
1992
1993static int smu_smc_hw_cleanup(struct smu_context *smu)
1994{
1995 struct amdgpu_device *adev = smu->adev;
1996 int ret = 0;
1997
1998 smu_wbrf_fini(smu);
1999
2000 cancel_work_sync(&smu->throttling_logging_work);
2001 cancel_work_sync(&smu->interrupt_work);
2002
2003 ret = smu_disable_thermal_alert(smu);
2004 if (ret) {
2005 dev_err(adev->dev, "Fail to disable thermal alert!\n");
2006 return ret;
2007 }
2008
2009 cancel_delayed_work_sync(&smu->swctf_delayed_work);
2010
2011 ret = smu_disable_dpms(smu);
2012 if (ret) {
2013 dev_err(adev->dev, "Fail to disable dpm features!\n");
2014 return ret;
2015 }
2016
2017 return 0;
2018}
2019
2020static int smu_reset_mp1_state(struct smu_context *smu)
2021{
2022 struct amdgpu_device *adev = smu->adev;
2023 int ret = 0;
2024
2025 if ((!adev->in_runpm) && (!adev->in_suspend) &&
2026 (!amdgpu_in_reset(adev)) && amdgpu_ip_version(adev, MP1_HWIP, 0) ==
2027 IP_VERSION(13, 0, 10) &&
2028 !amdgpu_device_has_display_hardware(adev))
2029 ret = smu_set_mp1_state(smu, PP_MP1_STATE_UNLOAD);
2030
2031 return ret;
2032}
2033
2034static int smu_hw_fini(struct amdgpu_ip_block *ip_block)
2035{
2036 struct amdgpu_device *adev = ip_block->adev;
2037 struct smu_context *smu = adev->powerplay.pp_handle;
2038 int ret;
2039
2040 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
2041 return 0;
2042
2043 smu_dpm_set_vcn_enable(smu, false);
2044 smu_dpm_set_jpeg_enable(smu, false);
2045 smu_dpm_set_vpe_enable(smu, false);
2046 smu_dpm_set_umsch_mm_enable(smu, false);
2047
2048 adev->vcn.cur_state = AMD_PG_STATE_GATE;
2049 adev->jpeg.cur_state = AMD_PG_STATE_GATE;
2050
2051 if (!smu->pm_enabled)
2052 return 0;
2053
2054 adev->pm.dpm_enabled = false;
2055
2056 ret = smu_smc_hw_cleanup(smu);
2057 if (ret)
2058 return ret;
2059
2060 ret = smu_reset_mp1_state(smu);
2061 if (ret)
2062 return ret;
2063
2064 return 0;
2065}
2066
2067static void smu_late_fini(struct amdgpu_ip_block *ip_block)
2068{
2069 struct amdgpu_device *adev = ip_block->adev;
2070 struct smu_context *smu = adev->powerplay.pp_handle;
2071
2072 kfree(smu);
2073}
2074
2075static int smu_reset(struct smu_context *smu)
2076{
2077 struct amdgpu_device *adev = smu->adev;
2078 struct amdgpu_ip_block *ip_block;
2079 int ret;
2080
2081 ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_SMC);
2082 if (!ip_block)
2083 return -EINVAL;
2084
2085 ret = smu_hw_fini(ip_block);
2086 if (ret)
2087 return ret;
2088
2089 ret = smu_hw_init(ip_block);
2090 if (ret)
2091 return ret;
2092
2093 ret = smu_late_init(ip_block);
2094 if (ret)
2095 return ret;
2096
2097 return 0;
2098}
2099
2100static int smu_suspend(struct amdgpu_ip_block *ip_block)
2101{
2102 struct amdgpu_device *adev = ip_block->adev;
2103 struct smu_context *smu = adev->powerplay.pp_handle;
2104 int ret;
2105 uint64_t count;
2106
2107 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
2108 return 0;
2109
2110 if (!smu->pm_enabled)
2111 return 0;
2112
2113 adev->pm.dpm_enabled = false;
2114
2115 ret = smu_smc_hw_cleanup(smu);
2116 if (ret)
2117 return ret;
2118
2119 smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
2120
2121 smu_set_gfx_cgpg(smu, false);
2122
2123 /*
2124 * pwfw resets entrycount when device is suspended, so we save the
2125 * last value to be used when we resume to keep it consistent
2126 */
2127 ret = smu_get_entrycount_gfxoff(smu, &count);
2128 if (!ret)
2129 adev->gfx.gfx_off_entrycount = count;
2130
2131 /* clear this on suspend so it will get reprogrammed on resume */
2132 smu->workload_mask = 0;
2133
2134 return 0;
2135}
2136
2137static int smu_resume(struct amdgpu_ip_block *ip_block)
2138{
2139 int ret;
2140 struct amdgpu_device *adev = ip_block->adev;
2141 struct smu_context *smu = adev->powerplay.pp_handle;
2142
2143 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
2144 return 0;
2145
2146 if (!smu->pm_enabled)
2147 return 0;
2148
2149 dev_info(adev->dev, "SMU is resuming...\n");
2150
2151 ret = smu_start_smc_engine(smu);
2152 if (ret) {
2153 dev_err(adev->dev, "SMC engine is not correctly up!\n");
2154 return ret;
2155 }
2156
2157 ret = smu_smc_hw_setup(smu);
2158 if (ret) {
2159 dev_err(adev->dev, "Failed to setup smc hw!\n");
2160 return ret;
2161 }
2162
2163 ret = smu_set_gfx_imu_enable(smu);
2164 if (ret)
2165 return ret;
2166
2167 smu_set_gfx_cgpg(smu, true);
2168
2169 smu->disable_uclk_switch = 0;
2170
2171 adev->pm.dpm_enabled = true;
2172
2173 dev_info(adev->dev, "SMU is resumed successfully!\n");
2174
2175 return 0;
2176}
2177
2178static int smu_display_configuration_change(void *handle,
2179 const struct amd_pp_display_configuration *display_config)
2180{
2181 struct smu_context *smu = handle;
2182
2183 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2184 return -EOPNOTSUPP;
2185
2186 if (!display_config)
2187 return -EINVAL;
2188
2189 smu_set_min_dcef_deep_sleep(smu,
2190 display_config->min_dcef_deep_sleep_set_clk / 100);
2191
2192 return 0;
2193}
2194
2195static int smu_set_clockgating_state(void *handle,
2196 enum amd_clockgating_state state)
2197{
2198 return 0;
2199}
2200
2201static int smu_set_powergating_state(void *handle,
2202 enum amd_powergating_state state)
2203{
2204 return 0;
2205}
2206
2207static int smu_enable_umd_pstate(void *handle,
2208 enum amd_dpm_forced_level *level)
2209{
2210 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
2211 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
2212 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
2213 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
2214
2215 struct smu_context *smu = (struct smu_context*)(handle);
2216 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2217
2218 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
2219 return -EINVAL;
2220
2221 if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
2222 /* enter umd pstate, save current level, disable gfx cg*/
2223 if (*level & profile_mode_mask) {
2224 smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
2225 smu_gpo_control(smu, false);
2226 smu_gfx_ulv_control(smu, false);
2227 smu_deep_sleep_control(smu, false);
2228 amdgpu_asic_update_umd_stable_pstate(smu->adev, true);
2229 }
2230 } else {
2231 /* exit umd pstate, restore level, enable gfx cg*/
2232 if (!(*level & profile_mode_mask)) {
2233 if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
2234 *level = smu_dpm_ctx->saved_dpm_level;
2235 amdgpu_asic_update_umd_stable_pstate(smu->adev, false);
2236 smu_deep_sleep_control(smu, true);
2237 smu_gfx_ulv_control(smu, true);
2238 smu_gpo_control(smu, true);
2239 }
2240 }
2241
2242 return 0;
2243}
2244
2245static int smu_bump_power_profile_mode(struct smu_context *smu,
2246 long *custom_params,
2247 u32 custom_params_max_idx)
2248{
2249 u32 workload_mask = 0;
2250 int i, ret = 0;
2251
2252 for (i = 0; i < PP_SMC_POWER_PROFILE_COUNT; i++) {
2253 if (smu->workload_refcount[i])
2254 workload_mask |= 1 << i;
2255 }
2256
2257 if (smu->workload_mask == workload_mask)
2258 return 0;
2259
2260 if (smu->ppt_funcs->set_power_profile_mode)
2261 ret = smu->ppt_funcs->set_power_profile_mode(smu, workload_mask,
2262 custom_params,
2263 custom_params_max_idx);
2264
2265 if (!ret)
2266 smu->workload_mask = workload_mask;
2267
2268 return ret;
2269}
2270
2271static void smu_power_profile_mode_get(struct smu_context *smu,
2272 enum PP_SMC_POWER_PROFILE profile_mode)
2273{
2274 smu->workload_refcount[profile_mode]++;
2275}
2276
2277static void smu_power_profile_mode_put(struct smu_context *smu,
2278 enum PP_SMC_POWER_PROFILE profile_mode)
2279{
2280 if (smu->workload_refcount[profile_mode])
2281 smu->workload_refcount[profile_mode]--;
2282}
2283
2284static int smu_adjust_power_state_dynamic(struct smu_context *smu,
2285 enum amd_dpm_forced_level level,
2286 bool skip_display_settings)
2287{
2288 int ret = 0;
2289 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2290
2291 if (!skip_display_settings) {
2292 ret = smu_display_config_changed(smu);
2293 if (ret) {
2294 dev_err(smu->adev->dev, "Failed to change display config!");
2295 return ret;
2296 }
2297 }
2298
2299 ret = smu_apply_clocks_adjust_rules(smu);
2300 if (ret) {
2301 dev_err(smu->adev->dev, "Failed to apply clocks adjust rules!");
2302 return ret;
2303 }
2304
2305 if (!skip_display_settings) {
2306 ret = smu_notify_smc_display_config(smu);
2307 if (ret) {
2308 dev_err(smu->adev->dev, "Failed to notify smc display config!");
2309 return ret;
2310 }
2311 }
2312
2313 if (smu_dpm_ctx->dpm_level != level) {
2314 ret = smu_asic_set_performance_level(smu, level);
2315 if (ret) {
2316 dev_err(smu->adev->dev, "Failed to set performance level!");
2317 return ret;
2318 }
2319
2320 /* update the saved copy */
2321 smu_dpm_ctx->dpm_level = level;
2322 }
2323
2324 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
2325 smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
2326 smu_bump_power_profile_mode(smu, NULL, 0);
2327
2328 return ret;
2329}
2330
2331static int smu_handle_task(struct smu_context *smu,
2332 enum amd_dpm_forced_level level,
2333 enum amd_pp_task task_id)
2334{
2335 int ret = 0;
2336
2337 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2338 return -EOPNOTSUPP;
2339
2340 switch (task_id) {
2341 case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
2342 ret = smu_pre_display_config_changed(smu);
2343 if (ret)
2344 return ret;
2345 ret = smu_adjust_power_state_dynamic(smu, level, false);
2346 break;
2347 case AMD_PP_TASK_COMPLETE_INIT:
2348 ret = smu_adjust_power_state_dynamic(smu, level, true);
2349 break;
2350 case AMD_PP_TASK_READJUST_POWER_STATE:
2351 ret = smu_adjust_power_state_dynamic(smu, level, true);
2352 break;
2353 default:
2354 break;
2355 }
2356
2357 return ret;
2358}
2359
2360static int smu_handle_dpm_task(void *handle,
2361 enum amd_pp_task task_id,
2362 enum amd_pm_state_type *user_state)
2363{
2364 struct smu_context *smu = handle;
2365 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
2366
2367 return smu_handle_task(smu, smu_dpm->dpm_level, task_id);
2368
2369}
2370
2371static int smu_switch_power_profile(void *handle,
2372 enum PP_SMC_POWER_PROFILE type,
2373 bool enable)
2374{
2375 struct smu_context *smu = handle;
2376 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2377 int ret;
2378
2379 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2380 return -EOPNOTSUPP;
2381
2382 if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
2383 return -EINVAL;
2384
2385 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
2386 smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
2387 if (enable)
2388 smu_power_profile_mode_get(smu, type);
2389 else
2390 smu_power_profile_mode_put(smu, type);
2391 ret = smu_bump_power_profile_mode(smu, NULL, 0);
2392 if (ret) {
2393 if (enable)
2394 smu_power_profile_mode_put(smu, type);
2395 else
2396 smu_power_profile_mode_get(smu, type);
2397 return ret;
2398 }
2399 }
2400
2401 return 0;
2402}
2403
2404static enum amd_dpm_forced_level smu_get_performance_level(void *handle)
2405{
2406 struct smu_context *smu = handle;
2407 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2408
2409 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2410 return -EOPNOTSUPP;
2411
2412 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
2413 return -EINVAL;
2414
2415 return smu_dpm_ctx->dpm_level;
2416}
2417
2418static int smu_force_performance_level(void *handle,
2419 enum amd_dpm_forced_level level)
2420{
2421 struct smu_context *smu = handle;
2422 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2423 int ret = 0;
2424
2425 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2426 return -EOPNOTSUPP;
2427
2428 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
2429 return -EINVAL;
2430
2431 ret = smu_enable_umd_pstate(smu, &level);
2432 if (ret)
2433 return ret;
2434
2435 ret = smu_handle_task(smu, level,
2436 AMD_PP_TASK_READJUST_POWER_STATE);
2437
2438 /* reset user dpm clock state */
2439 if (!ret && smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
2440 memset(smu->user_dpm_profile.clk_mask, 0, sizeof(smu->user_dpm_profile.clk_mask));
2441 smu->user_dpm_profile.clk_dependency = 0;
2442 }
2443
2444 return ret;
2445}
2446
2447static int smu_set_display_count(void *handle, uint32_t count)
2448{
2449 struct smu_context *smu = handle;
2450
2451 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2452 return -EOPNOTSUPP;
2453
2454 return smu_init_display_count(smu, count);
2455}
2456
2457static int smu_force_smuclk_levels(struct smu_context *smu,
2458 enum smu_clk_type clk_type,
2459 uint32_t mask)
2460{
2461 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2462 int ret = 0;
2463
2464 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2465 return -EOPNOTSUPP;
2466
2467 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
2468 dev_dbg(smu->adev->dev, "force clock level is for dpm manual mode only.\n");
2469 return -EINVAL;
2470 }
2471
2472 if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) {
2473 ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
2474 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
2475 smu->user_dpm_profile.clk_mask[clk_type] = mask;
2476 smu_set_user_clk_dependencies(smu, clk_type);
2477 }
2478 }
2479
2480 return ret;
2481}
2482
2483static int smu_force_ppclk_levels(void *handle,
2484 enum pp_clock_type type,
2485 uint32_t mask)
2486{
2487 struct smu_context *smu = handle;
2488 enum smu_clk_type clk_type;
2489
2490 switch (type) {
2491 case PP_SCLK:
2492 clk_type = SMU_SCLK; break;
2493 case PP_MCLK:
2494 clk_type = SMU_MCLK; break;
2495 case PP_PCIE:
2496 clk_type = SMU_PCIE; break;
2497 case PP_SOCCLK:
2498 clk_type = SMU_SOCCLK; break;
2499 case PP_FCLK:
2500 clk_type = SMU_FCLK; break;
2501 case PP_DCEFCLK:
2502 clk_type = SMU_DCEFCLK; break;
2503 case PP_VCLK:
2504 clk_type = SMU_VCLK; break;
2505 case PP_VCLK1:
2506 clk_type = SMU_VCLK1; break;
2507 case PP_DCLK:
2508 clk_type = SMU_DCLK; break;
2509 case PP_DCLK1:
2510 clk_type = SMU_DCLK1; break;
2511 case OD_SCLK:
2512 clk_type = SMU_OD_SCLK; break;
2513 case OD_MCLK:
2514 clk_type = SMU_OD_MCLK; break;
2515 case OD_VDDC_CURVE:
2516 clk_type = SMU_OD_VDDC_CURVE; break;
2517 case OD_RANGE:
2518 clk_type = SMU_OD_RANGE; break;
2519 default:
2520 return -EINVAL;
2521 }
2522
2523 return smu_force_smuclk_levels(smu, clk_type, mask);
2524}
2525
2526/*
2527 * On system suspending or resetting, the dpm_enabled
2528 * flag will be cleared. So that those SMU services which
2529 * are not supported will be gated.
2530 * However, the mp1 state setting should still be granted
2531 * even if the dpm_enabled cleared.
2532 */
2533static int smu_set_mp1_state(void *handle,
2534 enum pp_mp1_state mp1_state)
2535{
2536 struct smu_context *smu = handle;
2537 int ret = 0;
2538
2539 if (!smu->pm_enabled)
2540 return -EOPNOTSUPP;
2541
2542 if (smu->ppt_funcs &&
2543 smu->ppt_funcs->set_mp1_state)
2544 ret = smu->ppt_funcs->set_mp1_state(smu, mp1_state);
2545
2546 return ret;
2547}
2548
2549static int smu_set_df_cstate(void *handle,
2550 enum pp_df_cstate state)
2551{
2552 struct smu_context *smu = handle;
2553 int ret = 0;
2554
2555 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2556 return -EOPNOTSUPP;
2557
2558 if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
2559 return 0;
2560
2561 ret = smu->ppt_funcs->set_df_cstate(smu, state);
2562 if (ret)
2563 dev_err(smu->adev->dev, "[SetDfCstate] failed!\n");
2564
2565 return ret;
2566}
2567
2568int smu_write_watermarks_table(struct smu_context *smu)
2569{
2570 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2571 return -EOPNOTSUPP;
2572
2573 return smu_set_watermarks_table(smu, NULL);
2574}
2575
2576static int smu_set_watermarks_for_clock_ranges(void *handle,
2577 struct pp_smu_wm_range_sets *clock_ranges)
2578{
2579 struct smu_context *smu = handle;
2580
2581 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2582 return -EOPNOTSUPP;
2583
2584 if (smu->disable_watermark)
2585 return 0;
2586
2587 return smu_set_watermarks_table(smu, clock_ranges);
2588}
2589
2590int smu_set_ac_dc(struct smu_context *smu)
2591{
2592 int ret = 0;
2593
2594 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2595 return -EOPNOTSUPP;
2596
2597 /* controlled by firmware */
2598 if (smu->dc_controlled_by_gpio)
2599 return 0;
2600
2601 ret = smu_set_power_source(smu,
2602 smu->adev->pm.ac_power ? SMU_POWER_SOURCE_AC :
2603 SMU_POWER_SOURCE_DC);
2604 if (ret)
2605 dev_err(smu->adev->dev, "Failed to switch to %s mode!\n",
2606 smu->adev->pm.ac_power ? "AC" : "DC");
2607
2608 return ret;
2609}
2610
2611const struct amd_ip_funcs smu_ip_funcs = {
2612 .name = "smu",
2613 .early_init = smu_early_init,
2614 .late_init = smu_late_init,
2615 .sw_init = smu_sw_init,
2616 .sw_fini = smu_sw_fini,
2617 .hw_init = smu_hw_init,
2618 .hw_fini = smu_hw_fini,
2619 .late_fini = smu_late_fini,
2620 .suspend = smu_suspend,
2621 .resume = smu_resume,
2622 .is_idle = NULL,
2623 .check_soft_reset = NULL,
2624 .wait_for_idle = NULL,
2625 .soft_reset = NULL,
2626 .set_clockgating_state = smu_set_clockgating_state,
2627 .set_powergating_state = smu_set_powergating_state,
2628};
2629
2630const struct amdgpu_ip_block_version smu_v11_0_ip_block = {
2631 .type = AMD_IP_BLOCK_TYPE_SMC,
2632 .major = 11,
2633 .minor = 0,
2634 .rev = 0,
2635 .funcs = &smu_ip_funcs,
2636};
2637
2638const struct amdgpu_ip_block_version smu_v12_0_ip_block = {
2639 .type = AMD_IP_BLOCK_TYPE_SMC,
2640 .major = 12,
2641 .minor = 0,
2642 .rev = 0,
2643 .funcs = &smu_ip_funcs,
2644};
2645
2646const struct amdgpu_ip_block_version smu_v13_0_ip_block = {
2647 .type = AMD_IP_BLOCK_TYPE_SMC,
2648 .major = 13,
2649 .minor = 0,
2650 .rev = 0,
2651 .funcs = &smu_ip_funcs,
2652};
2653
2654const struct amdgpu_ip_block_version smu_v14_0_ip_block = {
2655 .type = AMD_IP_BLOCK_TYPE_SMC,
2656 .major = 14,
2657 .minor = 0,
2658 .rev = 0,
2659 .funcs = &smu_ip_funcs,
2660};
2661
2662static int smu_load_microcode(void *handle)
2663{
2664 struct smu_context *smu = handle;
2665 struct amdgpu_device *adev = smu->adev;
2666 int ret = 0;
2667
2668 if (!smu->pm_enabled)
2669 return -EOPNOTSUPP;
2670
2671 /* This should be used for non PSP loading */
2672 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
2673 return 0;
2674
2675 if (smu->ppt_funcs->load_microcode) {
2676 ret = smu->ppt_funcs->load_microcode(smu);
2677 if (ret) {
2678 dev_err(adev->dev, "Load microcode failed\n");
2679 return ret;
2680 }
2681 }
2682
2683 if (smu->ppt_funcs->check_fw_status) {
2684 ret = smu->ppt_funcs->check_fw_status(smu);
2685 if (ret) {
2686 dev_err(adev->dev, "SMC is not ready\n");
2687 return ret;
2688 }
2689 }
2690
2691 return ret;
2692}
2693
2694static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
2695{
2696 int ret = 0;
2697
2698 if (smu->ppt_funcs->set_gfx_cgpg)
2699 ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled);
2700
2701 return ret;
2702}
2703
2704static int smu_set_fan_speed_rpm(void *handle, uint32_t speed)
2705{
2706 struct smu_context *smu = handle;
2707 int ret = 0;
2708
2709 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2710 return -EOPNOTSUPP;
2711
2712 if (!smu->ppt_funcs->set_fan_speed_rpm)
2713 return -EOPNOTSUPP;
2714
2715 if (speed == U32_MAX)
2716 return -EINVAL;
2717
2718 ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
2719 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
2720 smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_RPM;
2721 smu->user_dpm_profile.fan_speed_rpm = speed;
2722
2723 /* Override custom PWM setting as they cannot co-exist */
2724 smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_PWM;
2725 smu->user_dpm_profile.fan_speed_pwm = 0;
2726 }
2727
2728 return ret;
2729}
2730
2731/**
2732 * smu_get_power_limit - Request one of the SMU Power Limits
2733 *
2734 * @handle: pointer to smu context
2735 * @limit: requested limit is written back to this variable
2736 * @pp_limit_level: &pp_power_limit_level which limit of the power to return
2737 * @pp_power_type: &pp_power_type type of power
2738 * Return: 0 on success, <0 on error
2739 *
2740 */
2741int smu_get_power_limit(void *handle,
2742 uint32_t *limit,
2743 enum pp_power_limit_level pp_limit_level,
2744 enum pp_power_type pp_power_type)
2745{
2746 struct smu_context *smu = handle;
2747 struct amdgpu_device *adev = smu->adev;
2748 enum smu_ppt_limit_level limit_level;
2749 uint32_t limit_type;
2750 int ret = 0;
2751
2752 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2753 return -EOPNOTSUPP;
2754
2755 switch (pp_power_type) {
2756 case PP_PWR_TYPE_SUSTAINED:
2757 limit_type = SMU_DEFAULT_PPT_LIMIT;
2758 break;
2759 case PP_PWR_TYPE_FAST:
2760 limit_type = SMU_FAST_PPT_LIMIT;
2761 break;
2762 default:
2763 return -EOPNOTSUPP;
2764 }
2765
2766 switch (pp_limit_level) {
2767 case PP_PWR_LIMIT_CURRENT:
2768 limit_level = SMU_PPT_LIMIT_CURRENT;
2769 break;
2770 case PP_PWR_LIMIT_DEFAULT:
2771 limit_level = SMU_PPT_LIMIT_DEFAULT;
2772 break;
2773 case PP_PWR_LIMIT_MAX:
2774 limit_level = SMU_PPT_LIMIT_MAX;
2775 break;
2776 case PP_PWR_LIMIT_MIN:
2777 limit_level = SMU_PPT_LIMIT_MIN;
2778 break;
2779 default:
2780 return -EOPNOTSUPP;
2781 }
2782
2783 if (limit_type != SMU_DEFAULT_PPT_LIMIT) {
2784 if (smu->ppt_funcs->get_ppt_limit)
2785 ret = smu->ppt_funcs->get_ppt_limit(smu, limit, limit_type, limit_level);
2786 } else {
2787 switch (limit_level) {
2788 case SMU_PPT_LIMIT_CURRENT:
2789 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
2790 case IP_VERSION(13, 0, 2):
2791 case IP_VERSION(13, 0, 6):
2792 case IP_VERSION(13, 0, 14):
2793 case IP_VERSION(11, 0, 7):
2794 case IP_VERSION(11, 0, 11):
2795 case IP_VERSION(11, 0, 12):
2796 case IP_VERSION(11, 0, 13):
2797 ret = smu_get_asic_power_limits(smu,
2798 &smu->current_power_limit,
2799 NULL, NULL, NULL);
2800 break;
2801 default:
2802 break;
2803 }
2804 *limit = smu->current_power_limit;
2805 break;
2806 case SMU_PPT_LIMIT_DEFAULT:
2807 *limit = smu->default_power_limit;
2808 break;
2809 case SMU_PPT_LIMIT_MAX:
2810 *limit = smu->max_power_limit;
2811 break;
2812 case SMU_PPT_LIMIT_MIN:
2813 *limit = smu->min_power_limit;
2814 break;
2815 default:
2816 return -EINVAL;
2817 }
2818 }
2819
2820 return ret;
2821}
2822
2823static int smu_set_power_limit(void *handle, uint32_t limit)
2824{
2825 struct smu_context *smu = handle;
2826 uint32_t limit_type = limit >> 24;
2827 int ret = 0;
2828
2829 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2830 return -EOPNOTSUPP;
2831
2832 limit &= (1<<24)-1;
2833 if (limit_type != SMU_DEFAULT_PPT_LIMIT)
2834 if (smu->ppt_funcs->set_power_limit)
2835 return smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
2836
2837 if ((limit > smu->max_power_limit) || (limit < smu->min_power_limit)) {
2838 dev_err(smu->adev->dev,
2839 "New power limit (%d) is out of range [%d,%d]\n",
2840 limit, smu->min_power_limit, smu->max_power_limit);
2841 return -EINVAL;
2842 }
2843
2844 if (!limit)
2845 limit = smu->current_power_limit;
2846
2847 if (smu->ppt_funcs->set_power_limit) {
2848 ret = smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
2849 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
2850 smu->user_dpm_profile.power_limit = limit;
2851 }
2852
2853 return ret;
2854}
2855
2856static int smu_print_smuclk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
2857{
2858 int ret = 0;
2859
2860 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2861 return -EOPNOTSUPP;
2862
2863 if (smu->ppt_funcs->print_clk_levels)
2864 ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
2865
2866 return ret;
2867}
2868
2869static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type)
2870{
2871 enum smu_clk_type clk_type;
2872
2873 switch (type) {
2874 case PP_SCLK:
2875 clk_type = SMU_SCLK; break;
2876 case PP_MCLK:
2877 clk_type = SMU_MCLK; break;
2878 case PP_PCIE:
2879 clk_type = SMU_PCIE; break;
2880 case PP_SOCCLK:
2881 clk_type = SMU_SOCCLK; break;
2882 case PP_FCLK:
2883 clk_type = SMU_FCLK; break;
2884 case PP_DCEFCLK:
2885 clk_type = SMU_DCEFCLK; break;
2886 case PP_VCLK:
2887 clk_type = SMU_VCLK; break;
2888 case PP_VCLK1:
2889 clk_type = SMU_VCLK1; break;
2890 case PP_DCLK:
2891 clk_type = SMU_DCLK; break;
2892 case PP_DCLK1:
2893 clk_type = SMU_DCLK1; break;
2894 case OD_SCLK:
2895 clk_type = SMU_OD_SCLK; break;
2896 case OD_MCLK:
2897 clk_type = SMU_OD_MCLK; break;
2898 case OD_VDDC_CURVE:
2899 clk_type = SMU_OD_VDDC_CURVE; break;
2900 case OD_RANGE:
2901 clk_type = SMU_OD_RANGE; break;
2902 case OD_VDDGFX_OFFSET:
2903 clk_type = SMU_OD_VDDGFX_OFFSET; break;
2904 case OD_CCLK:
2905 clk_type = SMU_OD_CCLK; break;
2906 case OD_FAN_CURVE:
2907 clk_type = SMU_OD_FAN_CURVE; break;
2908 case OD_ACOUSTIC_LIMIT:
2909 clk_type = SMU_OD_ACOUSTIC_LIMIT; break;
2910 case OD_ACOUSTIC_TARGET:
2911 clk_type = SMU_OD_ACOUSTIC_TARGET; break;
2912 case OD_FAN_TARGET_TEMPERATURE:
2913 clk_type = SMU_OD_FAN_TARGET_TEMPERATURE; break;
2914 case OD_FAN_MINIMUM_PWM:
2915 clk_type = SMU_OD_FAN_MINIMUM_PWM; break;
2916 case OD_FAN_ZERO_RPM_ENABLE:
2917 clk_type = SMU_OD_FAN_ZERO_RPM_ENABLE; break;
2918 case OD_FAN_ZERO_RPM_STOP_TEMP:
2919 clk_type = SMU_OD_FAN_ZERO_RPM_STOP_TEMP; break;
2920 default:
2921 clk_type = SMU_CLK_COUNT; break;
2922 }
2923
2924 return clk_type;
2925}
2926
2927static int smu_print_ppclk_levels(void *handle,
2928 enum pp_clock_type type,
2929 char *buf)
2930{
2931 struct smu_context *smu = handle;
2932 enum smu_clk_type clk_type;
2933
2934 clk_type = smu_convert_to_smuclk(type);
2935 if (clk_type == SMU_CLK_COUNT)
2936 return -EINVAL;
2937
2938 return smu_print_smuclk_levels(smu, clk_type, buf);
2939}
2940
2941static int smu_emit_ppclk_levels(void *handle, enum pp_clock_type type, char *buf, int *offset)
2942{
2943 struct smu_context *smu = handle;
2944 enum smu_clk_type clk_type;
2945
2946 clk_type = smu_convert_to_smuclk(type);
2947 if (clk_type == SMU_CLK_COUNT)
2948 return -EINVAL;
2949
2950 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2951 return -EOPNOTSUPP;
2952
2953 if (!smu->ppt_funcs->emit_clk_levels)
2954 return -ENOENT;
2955
2956 return smu->ppt_funcs->emit_clk_levels(smu, clk_type, buf, offset);
2957
2958}
2959
2960static int smu_od_edit_dpm_table(void *handle,
2961 enum PP_OD_DPM_TABLE_COMMAND type,
2962 long *input, uint32_t size)
2963{
2964 struct smu_context *smu = handle;
2965 int ret = 0;
2966
2967 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2968 return -EOPNOTSUPP;
2969
2970 if (smu->ppt_funcs->od_edit_dpm_table) {
2971 ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
2972 }
2973
2974 return ret;
2975}
2976
2977static int smu_read_sensor(void *handle,
2978 int sensor,
2979 void *data,
2980 int *size_arg)
2981{
2982 struct smu_context *smu = handle;
2983 struct smu_umd_pstate_table *pstate_table =
2984 &smu->pstate_table;
2985 int ret = 0;
2986 uint32_t *size, size_val;
2987
2988 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2989 return -EOPNOTSUPP;
2990
2991 if (!data || !size_arg)
2992 return -EINVAL;
2993
2994 size_val = *size_arg;
2995 size = &size_val;
2996
2997 if (smu->ppt_funcs->read_sensor)
2998 if (!smu->ppt_funcs->read_sensor(smu, sensor, data, size))
2999 goto unlock;
3000
3001 switch (sensor) {
3002 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
3003 *((uint32_t *)data) = pstate_table->gfxclk_pstate.standard * 100;
3004 *size = 4;
3005 break;
3006 case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
3007 *((uint32_t *)data) = pstate_table->uclk_pstate.standard * 100;
3008 *size = 4;
3009 break;
3010 case AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK:
3011 *((uint32_t *)data) = pstate_table->gfxclk_pstate.peak * 100;
3012 *size = 4;
3013 break;
3014 case AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK:
3015 *((uint32_t *)data) = pstate_table->uclk_pstate.peak * 100;
3016 *size = 4;
3017 break;
3018 case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
3019 ret = smu_feature_get_enabled_mask(smu, (uint64_t *)data);
3020 *size = 8;
3021 break;
3022 case AMDGPU_PP_SENSOR_UVD_POWER:
3023 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0;
3024 *size = 4;
3025 break;
3026 case AMDGPU_PP_SENSOR_VCE_POWER:
3027 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
3028 *size = 4;
3029 break;
3030 case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
3031 *(uint32_t *)data = atomic_read(&smu->smu_power.power_gate.vcn_gated) ? 0 : 1;
3032 *size = 4;
3033 break;
3034 case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
3035 *(uint32_t *)data = 0;
3036 *size = 4;
3037 break;
3038 default:
3039 *size = 0;
3040 ret = -EOPNOTSUPP;
3041 break;
3042 }
3043
3044unlock:
3045 // assign uint32_t to int
3046 *size_arg = size_val;
3047
3048 return ret;
3049}
3050
3051static int smu_get_apu_thermal_limit(void *handle, uint32_t *limit)
3052{
3053 int ret = -EOPNOTSUPP;
3054 struct smu_context *smu = handle;
3055
3056 if (smu->ppt_funcs && smu->ppt_funcs->get_apu_thermal_limit)
3057 ret = smu->ppt_funcs->get_apu_thermal_limit(smu, limit);
3058
3059 return ret;
3060}
3061
3062static int smu_set_apu_thermal_limit(void *handle, uint32_t limit)
3063{
3064 int ret = -EOPNOTSUPP;
3065 struct smu_context *smu = handle;
3066
3067 if (smu->ppt_funcs && smu->ppt_funcs->set_apu_thermal_limit)
3068 ret = smu->ppt_funcs->set_apu_thermal_limit(smu, limit);
3069
3070 return ret;
3071}
3072
3073static int smu_get_power_profile_mode(void *handle, char *buf)
3074{
3075 struct smu_context *smu = handle;
3076
3077 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
3078 !smu->ppt_funcs->get_power_profile_mode)
3079 return -EOPNOTSUPP;
3080 if (!buf)
3081 return -EINVAL;
3082
3083 return smu->ppt_funcs->get_power_profile_mode(smu, buf);
3084}
3085
3086static int smu_set_power_profile_mode(void *handle,
3087 long *param,
3088 uint32_t param_size)
3089{
3090 struct smu_context *smu = handle;
3091 bool custom = false;
3092 int ret = 0;
3093
3094 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
3095 !smu->ppt_funcs->set_power_profile_mode)
3096 return -EOPNOTSUPP;
3097
3098 if (param[param_size] == PP_SMC_POWER_PROFILE_CUSTOM) {
3099 custom = true;
3100 /* clear frontend mask so custom changes propogate */
3101 smu->workload_mask = 0;
3102 }
3103
3104 if ((param[param_size] != smu->power_profile_mode) || custom) {
3105 /* clear the old user preference */
3106 smu_power_profile_mode_put(smu, smu->power_profile_mode);
3107 /* set the new user preference */
3108 smu_power_profile_mode_get(smu, param[param_size]);
3109 ret = smu_bump_power_profile_mode(smu,
3110 custom ? param : NULL,
3111 custom ? param_size : 0);
3112 if (ret)
3113 smu_power_profile_mode_put(smu, param[param_size]);
3114 else
3115 /* store the user's preference */
3116 smu->power_profile_mode = param[param_size];
3117 }
3118
3119 return ret;
3120}
3121
3122static int smu_get_fan_control_mode(void *handle, u32 *fan_mode)
3123{
3124 struct smu_context *smu = handle;
3125
3126 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3127 return -EOPNOTSUPP;
3128
3129 if (!smu->ppt_funcs->get_fan_control_mode)
3130 return -EOPNOTSUPP;
3131
3132 if (!fan_mode)
3133 return -EINVAL;
3134
3135 *fan_mode = smu->ppt_funcs->get_fan_control_mode(smu);
3136
3137 return 0;
3138}
3139
3140static int smu_set_fan_control_mode(void *handle, u32 value)
3141{
3142 struct smu_context *smu = handle;
3143 int ret = 0;
3144
3145 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3146 return -EOPNOTSUPP;
3147
3148 if (!smu->ppt_funcs->set_fan_control_mode)
3149 return -EOPNOTSUPP;
3150
3151 if (value == U32_MAX)
3152 return -EINVAL;
3153
3154 ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
3155 if (ret)
3156 goto out;
3157
3158 if (!(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
3159 smu->user_dpm_profile.fan_mode = value;
3160
3161 /* reset user dpm fan speed */
3162 if (value != AMD_FAN_CTRL_MANUAL) {
3163 smu->user_dpm_profile.fan_speed_pwm = 0;
3164 smu->user_dpm_profile.fan_speed_rpm = 0;
3165 smu->user_dpm_profile.flags &= ~(SMU_CUSTOM_FAN_SPEED_RPM | SMU_CUSTOM_FAN_SPEED_PWM);
3166 }
3167 }
3168
3169out:
3170 return ret;
3171}
3172
3173static int smu_get_fan_speed_pwm(void *handle, u32 *speed)
3174{
3175 struct smu_context *smu = handle;
3176 int ret = 0;
3177
3178 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3179 return -EOPNOTSUPP;
3180
3181 if (!smu->ppt_funcs->get_fan_speed_pwm)
3182 return -EOPNOTSUPP;
3183
3184 if (!speed)
3185 return -EINVAL;
3186
3187 ret = smu->ppt_funcs->get_fan_speed_pwm(smu, speed);
3188
3189 return ret;
3190}
3191
3192static int smu_set_fan_speed_pwm(void *handle, u32 speed)
3193{
3194 struct smu_context *smu = handle;
3195 int ret = 0;
3196
3197 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3198 return -EOPNOTSUPP;
3199
3200 if (!smu->ppt_funcs->set_fan_speed_pwm)
3201 return -EOPNOTSUPP;
3202
3203 if (speed == U32_MAX)
3204 return -EINVAL;
3205
3206 ret = smu->ppt_funcs->set_fan_speed_pwm(smu, speed);
3207 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
3208 smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_PWM;
3209 smu->user_dpm_profile.fan_speed_pwm = speed;
3210
3211 /* Override custom RPM setting as they cannot co-exist */
3212 smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_RPM;
3213 smu->user_dpm_profile.fan_speed_rpm = 0;
3214 }
3215
3216 return ret;
3217}
3218
3219static int smu_get_fan_speed_rpm(void *handle, uint32_t *speed)
3220{
3221 struct smu_context *smu = handle;
3222 int ret = 0;
3223
3224 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3225 return -EOPNOTSUPP;
3226
3227 if (!smu->ppt_funcs->get_fan_speed_rpm)
3228 return -EOPNOTSUPP;
3229
3230 if (!speed)
3231 return -EINVAL;
3232
3233 ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
3234
3235 return ret;
3236}
3237
3238static int smu_set_deep_sleep_dcefclk(void *handle, uint32_t clk)
3239{
3240 struct smu_context *smu = handle;
3241
3242 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3243 return -EOPNOTSUPP;
3244
3245 return smu_set_min_dcef_deep_sleep(smu, clk);
3246}
3247
3248static int smu_get_clock_by_type_with_latency(void *handle,
3249 enum amd_pp_clock_type type,
3250 struct pp_clock_levels_with_latency *clocks)
3251{
3252 struct smu_context *smu = handle;
3253 enum smu_clk_type clk_type;
3254 int ret = 0;
3255
3256 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3257 return -EOPNOTSUPP;
3258
3259 if (smu->ppt_funcs->get_clock_by_type_with_latency) {
3260 switch (type) {
3261 case amd_pp_sys_clock:
3262 clk_type = SMU_GFXCLK;
3263 break;
3264 case amd_pp_mem_clock:
3265 clk_type = SMU_MCLK;
3266 break;
3267 case amd_pp_dcef_clock:
3268 clk_type = SMU_DCEFCLK;
3269 break;
3270 case amd_pp_disp_clock:
3271 clk_type = SMU_DISPCLK;
3272 break;
3273 default:
3274 dev_err(smu->adev->dev, "Invalid clock type!\n");
3275 return -EINVAL;
3276 }
3277
3278 ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks);
3279 }
3280
3281 return ret;
3282}
3283
3284static int smu_display_clock_voltage_request(void *handle,
3285 struct pp_display_clock_request *clock_req)
3286{
3287 struct smu_context *smu = handle;
3288 int ret = 0;
3289
3290 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3291 return -EOPNOTSUPP;
3292
3293 if (smu->ppt_funcs->display_clock_voltage_request)
3294 ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req);
3295
3296 return ret;
3297}
3298
3299
3300static int smu_display_disable_memory_clock_switch(void *handle,
3301 bool disable_memory_clock_switch)
3302{
3303 struct smu_context *smu = handle;
3304 int ret = -EINVAL;
3305
3306 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3307 return -EOPNOTSUPP;
3308
3309 if (smu->ppt_funcs->display_disable_memory_clock_switch)
3310 ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch);
3311
3312 return ret;
3313}
3314
3315static int smu_set_xgmi_pstate(void *handle,
3316 uint32_t pstate)
3317{
3318 struct smu_context *smu = handle;
3319 int ret = 0;
3320
3321 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3322 return -EOPNOTSUPP;
3323
3324 if (smu->ppt_funcs->set_xgmi_pstate)
3325 ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
3326
3327 if (ret)
3328 dev_err(smu->adev->dev, "Failed to set XGMI pstate!\n");
3329
3330 return ret;
3331}
3332
3333static int smu_get_baco_capability(void *handle)
3334{
3335 struct smu_context *smu = handle;
3336
3337 if (!smu->pm_enabled)
3338 return false;
3339
3340 if (!smu->ppt_funcs || !smu->ppt_funcs->get_bamaco_support)
3341 return false;
3342
3343 return smu->ppt_funcs->get_bamaco_support(smu);
3344}
3345
3346static int smu_baco_set_state(void *handle, int state)
3347{
3348 struct smu_context *smu = handle;
3349 int ret = 0;
3350
3351 if (!smu->pm_enabled)
3352 return -EOPNOTSUPP;
3353
3354 if (state == 0) {
3355 if (smu->ppt_funcs->baco_exit)
3356 ret = smu->ppt_funcs->baco_exit(smu);
3357 } else if (state == 1) {
3358 if (smu->ppt_funcs->baco_enter)
3359 ret = smu->ppt_funcs->baco_enter(smu);
3360 } else {
3361 return -EINVAL;
3362 }
3363
3364 if (ret)
3365 dev_err(smu->adev->dev, "Failed to %s BACO state!\n",
3366 (state)?"enter":"exit");
3367
3368 return ret;
3369}
3370
3371bool smu_mode1_reset_is_support(struct smu_context *smu)
3372{
3373 bool ret = false;
3374
3375 if (!smu->pm_enabled)
3376 return false;
3377
3378 if (smu->ppt_funcs && smu->ppt_funcs->mode1_reset_is_support)
3379 ret = smu->ppt_funcs->mode1_reset_is_support(smu);
3380
3381 return ret;
3382}
3383
3384bool smu_mode2_reset_is_support(struct smu_context *smu)
3385{
3386 bool ret = false;
3387
3388 if (!smu->pm_enabled)
3389 return false;
3390
3391 if (smu->ppt_funcs && smu->ppt_funcs->mode2_reset_is_support)
3392 ret = smu->ppt_funcs->mode2_reset_is_support(smu);
3393
3394 return ret;
3395}
3396
3397int smu_mode1_reset(struct smu_context *smu)
3398{
3399 int ret = 0;
3400
3401 if (!smu->pm_enabled)
3402 return -EOPNOTSUPP;
3403
3404 if (smu->ppt_funcs->mode1_reset)
3405 ret = smu->ppt_funcs->mode1_reset(smu);
3406
3407 return ret;
3408}
3409
3410static int smu_mode2_reset(void *handle)
3411{
3412 struct smu_context *smu = handle;
3413 int ret = 0;
3414
3415 if (!smu->pm_enabled)
3416 return -EOPNOTSUPP;
3417
3418 if (smu->ppt_funcs->mode2_reset)
3419 ret = smu->ppt_funcs->mode2_reset(smu);
3420
3421 if (ret)
3422 dev_err(smu->adev->dev, "Mode2 reset failed!\n");
3423
3424 return ret;
3425}
3426
3427static int smu_enable_gfx_features(void *handle)
3428{
3429 struct smu_context *smu = handle;
3430 int ret = 0;
3431
3432 if (!smu->pm_enabled)
3433 return -EOPNOTSUPP;
3434
3435 if (smu->ppt_funcs->enable_gfx_features)
3436 ret = smu->ppt_funcs->enable_gfx_features(smu);
3437
3438 if (ret)
3439 dev_err(smu->adev->dev, "enable gfx features failed!\n");
3440
3441 return ret;
3442}
3443
3444static int smu_get_max_sustainable_clocks_by_dc(void *handle,
3445 struct pp_smu_nv_clock_table *max_clocks)
3446{
3447 struct smu_context *smu = handle;
3448 int ret = 0;
3449
3450 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3451 return -EOPNOTSUPP;
3452
3453 if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
3454 ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks);
3455
3456 return ret;
3457}
3458
3459static int smu_get_uclk_dpm_states(void *handle,
3460 unsigned int *clock_values_in_khz,
3461 unsigned int *num_states)
3462{
3463 struct smu_context *smu = handle;
3464 int ret = 0;
3465
3466 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3467 return -EOPNOTSUPP;
3468
3469 if (smu->ppt_funcs->get_uclk_dpm_states)
3470 ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states);
3471
3472 return ret;
3473}
3474
3475static enum amd_pm_state_type smu_get_current_power_state(void *handle)
3476{
3477 struct smu_context *smu = handle;
3478 enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
3479
3480 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3481 return -EOPNOTSUPP;
3482
3483 if (smu->ppt_funcs->get_current_power_state)
3484 pm_state = smu->ppt_funcs->get_current_power_state(smu);
3485
3486 return pm_state;
3487}
3488
3489static int smu_get_dpm_clock_table(void *handle,
3490 struct dpm_clocks *clock_table)
3491{
3492 struct smu_context *smu = handle;
3493 int ret = 0;
3494
3495 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3496 return -EOPNOTSUPP;
3497
3498 if (smu->ppt_funcs->get_dpm_clock_table)
3499 ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table);
3500
3501 return ret;
3502}
3503
3504static ssize_t smu_sys_get_gpu_metrics(void *handle, void **table)
3505{
3506 struct smu_context *smu = handle;
3507
3508 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3509 return -EOPNOTSUPP;
3510
3511 if (!smu->ppt_funcs->get_gpu_metrics)
3512 return -EOPNOTSUPP;
3513
3514 return smu->ppt_funcs->get_gpu_metrics(smu, table);
3515}
3516
3517static ssize_t smu_sys_get_pm_metrics(void *handle, void *pm_metrics,
3518 size_t size)
3519{
3520 struct smu_context *smu = handle;
3521
3522 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3523 return -EOPNOTSUPP;
3524
3525 if (!smu->ppt_funcs->get_pm_metrics)
3526 return -EOPNOTSUPP;
3527
3528 return smu->ppt_funcs->get_pm_metrics(smu, pm_metrics, size);
3529}
3530
3531static int smu_enable_mgpu_fan_boost(void *handle)
3532{
3533 struct smu_context *smu = handle;
3534 int ret = 0;
3535
3536 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3537 return -EOPNOTSUPP;
3538
3539 if (smu->ppt_funcs->enable_mgpu_fan_boost)
3540 ret = smu->ppt_funcs->enable_mgpu_fan_boost(smu);
3541
3542 return ret;
3543}
3544
3545static int smu_gfx_state_change_set(void *handle,
3546 uint32_t state)
3547{
3548 struct smu_context *smu = handle;
3549 int ret = 0;
3550
3551 if (smu->ppt_funcs->gfx_state_change_set)
3552 ret = smu->ppt_funcs->gfx_state_change_set(smu, state);
3553
3554 return ret;
3555}
3556
3557int smu_handle_passthrough_sbr(struct smu_context *smu, bool enable)
3558{
3559 int ret = 0;
3560
3561 if (smu->ppt_funcs->smu_handle_passthrough_sbr)
3562 ret = smu->ppt_funcs->smu_handle_passthrough_sbr(smu, enable);
3563
3564 return ret;
3565}
3566
3567int smu_get_ecc_info(struct smu_context *smu, void *umc_ecc)
3568{
3569 int ret = -EOPNOTSUPP;
3570
3571 if (smu->ppt_funcs &&
3572 smu->ppt_funcs->get_ecc_info)
3573 ret = smu->ppt_funcs->get_ecc_info(smu, umc_ecc);
3574
3575 return ret;
3576
3577}
3578
3579static int smu_get_prv_buffer_details(void *handle, void **addr, size_t *size)
3580{
3581 struct smu_context *smu = handle;
3582 struct smu_table_context *smu_table = &smu->smu_table;
3583 struct smu_table *memory_pool = &smu_table->memory_pool;
3584
3585 if (!addr || !size)
3586 return -EINVAL;
3587
3588 *addr = NULL;
3589 *size = 0;
3590 if (memory_pool->bo) {
3591 *addr = memory_pool->cpu_addr;
3592 *size = memory_pool->size;
3593 }
3594
3595 return 0;
3596}
3597
3598static void smu_print_dpm_policy(struct smu_dpm_policy *policy, char *sysbuf,
3599 size_t *size)
3600{
3601 size_t offset = *size;
3602 int level;
3603
3604 for_each_set_bit(level, &policy->level_mask, PP_POLICY_MAX_LEVELS) {
3605 if (level == policy->current_level)
3606 offset += sysfs_emit_at(sysbuf, offset,
3607 "%d : %s*\n", level,
3608 policy->desc->get_desc(policy, level));
3609 else
3610 offset += sysfs_emit_at(sysbuf, offset,
3611 "%d : %s\n", level,
3612 policy->desc->get_desc(policy, level));
3613 }
3614
3615 *size = offset;
3616}
3617
3618ssize_t smu_get_pm_policy_info(struct smu_context *smu,
3619 enum pp_pm_policy p_type, char *sysbuf)
3620{
3621 struct smu_dpm_context *dpm_ctxt = &smu->smu_dpm;
3622 struct smu_dpm_policy_ctxt *policy_ctxt;
3623 struct smu_dpm_policy *dpm_policy;
3624 size_t offset = 0;
3625
3626 policy_ctxt = dpm_ctxt->dpm_policies;
3627 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || !policy_ctxt ||
3628 !policy_ctxt->policy_mask)
3629 return -EOPNOTSUPP;
3630
3631 if (p_type == PP_PM_POLICY_NONE)
3632 return -EINVAL;
3633
3634 dpm_policy = smu_get_pm_policy(smu, p_type);
3635 if (!dpm_policy || !dpm_policy->level_mask || !dpm_policy->desc)
3636 return -ENOENT;
3637
3638 if (!sysbuf)
3639 return -EINVAL;
3640
3641 smu_print_dpm_policy(dpm_policy, sysbuf, &offset);
3642
3643 return offset;
3644}
3645
3646struct smu_dpm_policy *smu_get_pm_policy(struct smu_context *smu,
3647 enum pp_pm_policy p_type)
3648{
3649 struct smu_dpm_context *dpm_ctxt = &smu->smu_dpm;
3650 struct smu_dpm_policy_ctxt *policy_ctxt;
3651 int i;
3652
3653 policy_ctxt = dpm_ctxt->dpm_policies;
3654 if (!policy_ctxt)
3655 return NULL;
3656
3657 for (i = 0; i < hweight32(policy_ctxt->policy_mask); ++i) {
3658 if (policy_ctxt->policies[i].policy_type == p_type)
3659 return &policy_ctxt->policies[i];
3660 }
3661
3662 return NULL;
3663}
3664
3665int smu_set_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type,
3666 int level)
3667{
3668 struct smu_dpm_context *dpm_ctxt = &smu->smu_dpm;
3669 struct smu_dpm_policy *dpm_policy = NULL;
3670 struct smu_dpm_policy_ctxt *policy_ctxt;
3671 int ret = -EOPNOTSUPP;
3672
3673 policy_ctxt = dpm_ctxt->dpm_policies;
3674 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || !policy_ctxt ||
3675 !policy_ctxt->policy_mask)
3676 return ret;
3677
3678 if (level < 0 || level >= PP_POLICY_MAX_LEVELS)
3679 return -EINVAL;
3680
3681 dpm_policy = smu_get_pm_policy(smu, p_type);
3682
3683 if (!dpm_policy || !dpm_policy->level_mask || !dpm_policy->set_policy)
3684 return ret;
3685
3686 if (dpm_policy->current_level == level)
3687 return 0;
3688
3689 ret = dpm_policy->set_policy(smu, level);
3690
3691 if (!ret)
3692 dpm_policy->current_level = level;
3693
3694 return ret;
3695}
3696
3697static const struct amd_pm_funcs swsmu_pm_funcs = {
3698 /* export for sysfs */
3699 .set_fan_control_mode = smu_set_fan_control_mode,
3700 .get_fan_control_mode = smu_get_fan_control_mode,
3701 .set_fan_speed_pwm = smu_set_fan_speed_pwm,
3702 .get_fan_speed_pwm = smu_get_fan_speed_pwm,
3703 .force_clock_level = smu_force_ppclk_levels,
3704 .print_clock_levels = smu_print_ppclk_levels,
3705 .emit_clock_levels = smu_emit_ppclk_levels,
3706 .force_performance_level = smu_force_performance_level,
3707 .read_sensor = smu_read_sensor,
3708 .get_apu_thermal_limit = smu_get_apu_thermal_limit,
3709 .set_apu_thermal_limit = smu_set_apu_thermal_limit,
3710 .get_performance_level = smu_get_performance_level,
3711 .get_current_power_state = smu_get_current_power_state,
3712 .get_fan_speed_rpm = smu_get_fan_speed_rpm,
3713 .set_fan_speed_rpm = smu_set_fan_speed_rpm,
3714 .get_pp_num_states = smu_get_power_num_states,
3715 .get_pp_table = smu_sys_get_pp_table,
3716 .set_pp_table = smu_sys_set_pp_table,
3717 .switch_power_profile = smu_switch_power_profile,
3718 /* export to amdgpu */
3719 .dispatch_tasks = smu_handle_dpm_task,
3720 .load_firmware = smu_load_microcode,
3721 .set_powergating_by_smu = smu_dpm_set_power_gate,
3722 .set_power_limit = smu_set_power_limit,
3723 .get_power_limit = smu_get_power_limit,
3724 .get_power_profile_mode = smu_get_power_profile_mode,
3725 .set_power_profile_mode = smu_set_power_profile_mode,
3726 .odn_edit_dpm_table = smu_od_edit_dpm_table,
3727 .set_mp1_state = smu_set_mp1_state,
3728 .gfx_state_change_set = smu_gfx_state_change_set,
3729 /* export to DC */
3730 .get_sclk = smu_get_sclk,
3731 .get_mclk = smu_get_mclk,
3732 .display_configuration_change = smu_display_configuration_change,
3733 .get_clock_by_type_with_latency = smu_get_clock_by_type_with_latency,
3734 .display_clock_voltage_request = smu_display_clock_voltage_request,
3735 .enable_mgpu_fan_boost = smu_enable_mgpu_fan_boost,
3736 .set_active_display_count = smu_set_display_count,
3737 .set_min_deep_sleep_dcefclk = smu_set_deep_sleep_dcefclk,
3738 .get_asic_baco_capability = smu_get_baco_capability,
3739 .set_asic_baco_state = smu_baco_set_state,
3740 .get_ppfeature_status = smu_sys_get_pp_feature_mask,
3741 .set_ppfeature_status = smu_sys_set_pp_feature_mask,
3742 .asic_reset_mode_2 = smu_mode2_reset,
3743 .asic_reset_enable_gfx_features = smu_enable_gfx_features,
3744 .set_df_cstate = smu_set_df_cstate,
3745 .set_xgmi_pstate = smu_set_xgmi_pstate,
3746 .get_gpu_metrics = smu_sys_get_gpu_metrics,
3747 .get_pm_metrics = smu_sys_get_pm_metrics,
3748 .set_watermarks_for_clock_ranges = smu_set_watermarks_for_clock_ranges,
3749 .display_disable_memory_clock_switch = smu_display_disable_memory_clock_switch,
3750 .get_max_sustainable_clocks_by_dc = smu_get_max_sustainable_clocks_by_dc,
3751 .get_uclk_dpm_states = smu_get_uclk_dpm_states,
3752 .get_dpm_clock_table = smu_get_dpm_clock_table,
3753 .get_smu_prv_buf_details = smu_get_prv_buffer_details,
3754};
3755
3756int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event,
3757 uint64_t event_arg)
3758{
3759 int ret = -EINVAL;
3760
3761 if (smu->ppt_funcs->wait_for_event)
3762 ret = smu->ppt_funcs->wait_for_event(smu, event, event_arg);
3763
3764 return ret;
3765}
3766
3767int smu_stb_collect_info(struct smu_context *smu, void *buf, uint32_t size)
3768{
3769
3770 if (!smu->ppt_funcs->stb_collect_info || !smu->stb_context.enabled)
3771 return -EOPNOTSUPP;
3772
3773 /* Confirm the buffer allocated is of correct size */
3774 if (size != smu->stb_context.stb_buf_size)
3775 return -EINVAL;
3776
3777 /*
3778 * No need to lock smu mutex as we access STB directly through MMIO
3779 * and not going through SMU messaging route (for now at least).
3780 * For registers access rely on implementation internal locking.
3781 */
3782 return smu->ppt_funcs->stb_collect_info(smu, buf, size);
3783}
3784
3785#if defined(CONFIG_DEBUG_FS)
3786
3787static int smu_stb_debugfs_open(struct inode *inode, struct file *filp)
3788{
3789 struct amdgpu_device *adev = filp->f_inode->i_private;
3790 struct smu_context *smu = adev->powerplay.pp_handle;
3791 unsigned char *buf;
3792 int r;
3793
3794 buf = kvmalloc_array(smu->stb_context.stb_buf_size, sizeof(*buf), GFP_KERNEL);
3795 if (!buf)
3796 return -ENOMEM;
3797
3798 r = smu_stb_collect_info(smu, buf, smu->stb_context.stb_buf_size);
3799 if (r)
3800 goto out;
3801
3802 filp->private_data = buf;
3803
3804 return 0;
3805
3806out:
3807 kvfree(buf);
3808 return r;
3809}
3810
3811static ssize_t smu_stb_debugfs_read(struct file *filp, char __user *buf, size_t size,
3812 loff_t *pos)
3813{
3814 struct amdgpu_device *adev = filp->f_inode->i_private;
3815 struct smu_context *smu = adev->powerplay.pp_handle;
3816
3817
3818 if (!filp->private_data)
3819 return -EINVAL;
3820
3821 return simple_read_from_buffer(buf,
3822 size,
3823 pos, filp->private_data,
3824 smu->stb_context.stb_buf_size);
3825}
3826
3827static int smu_stb_debugfs_release(struct inode *inode, struct file *filp)
3828{
3829 kvfree(filp->private_data);
3830 filp->private_data = NULL;
3831
3832 return 0;
3833}
3834
3835/*
3836 * We have to define not only read method but also
3837 * open and release because .read takes up to PAGE_SIZE
3838 * data each time so and so is invoked multiple times.
3839 * We allocate the STB buffer in .open and release it
3840 * in .release
3841 */
3842static const struct file_operations smu_stb_debugfs_fops = {
3843 .owner = THIS_MODULE,
3844 .open = smu_stb_debugfs_open,
3845 .read = smu_stb_debugfs_read,
3846 .release = smu_stb_debugfs_release,
3847 .llseek = default_llseek,
3848};
3849
3850#endif
3851
3852void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev)
3853{
3854#if defined(CONFIG_DEBUG_FS)
3855
3856 struct smu_context *smu = adev->powerplay.pp_handle;
3857
3858 if (!smu || (!smu->stb_context.stb_buf_size))
3859 return;
3860
3861 debugfs_create_file_size("amdgpu_smu_stb_dump",
3862 S_IRUSR,
3863 adev_to_drm(adev)->primary->debugfs_root,
3864 adev,
3865 &smu_stb_debugfs_fops,
3866 smu->stb_context.stb_buf_size);
3867#endif
3868}
3869
3870int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size)
3871{
3872 int ret = 0;
3873
3874 if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_pages_num)
3875 ret = smu->ppt_funcs->send_hbm_bad_pages_num(smu, size);
3876
3877 return ret;
3878}
3879
3880int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size)
3881{
3882 int ret = 0;
3883
3884 if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_channel_flag)
3885 ret = smu->ppt_funcs->send_hbm_bad_channel_flag(smu, size);
3886
3887 return ret;
3888}
3889
3890int smu_send_rma_reason(struct smu_context *smu)
3891{
3892 int ret = 0;
3893
3894 if (smu->ppt_funcs && smu->ppt_funcs->send_rma_reason)
3895 ret = smu->ppt_funcs->send_rma_reason(smu);
3896
3897 return ret;
3898}