Loading...
1/*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#define SWSMU_CODE_LAYER_L1
24
25#include <linux/firmware.h>
26#include <linux/pci.h>
27
28#include "amdgpu.h"
29#include "amdgpu_smu.h"
30#include "smu_internal.h"
31#include "atom.h"
32#include "arcturus_ppt.h"
33#include "navi10_ppt.h"
34#include "sienna_cichlid_ppt.h"
35#include "renoir_ppt.h"
36#include "vangogh_ppt.h"
37#include "aldebaran_ppt.h"
38#include "yellow_carp_ppt.h"
39#include "amd_pcie.h"
40
41/*
42 * DO NOT use these for err/warn/info/debug messages.
43 * Use dev_err, dev_warn, dev_info and dev_dbg instead.
44 * They are more MGPU friendly.
45 */
46#undef pr_err
47#undef pr_warn
48#undef pr_info
49#undef pr_debug
50
51static const struct amd_pm_funcs swsmu_pm_funcs;
52static int smu_force_smuclk_levels(struct smu_context *smu,
53 enum smu_clk_type clk_type,
54 uint32_t mask);
55static int smu_handle_task(struct smu_context *smu,
56 enum amd_dpm_forced_level level,
57 enum amd_pp_task task_id,
58 bool lock_needed);
59static int smu_reset(struct smu_context *smu);
60static int smu_set_fan_speed_percent(void *handle, u32 speed);
61static int smu_set_fan_control_mode(struct smu_context *smu, int value);
62static int smu_set_power_limit(void *handle, uint32_t limit);
63static int smu_set_fan_speed_rpm(void *handle, uint32_t speed);
64static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled);
65
66static int smu_sys_get_pp_feature_mask(void *handle,
67 char *buf)
68{
69 struct smu_context *smu = handle;
70 int size = 0;
71
72 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
73 return -EOPNOTSUPP;
74
75 mutex_lock(&smu->mutex);
76
77 size = smu_get_pp_feature_mask(smu, buf);
78
79 mutex_unlock(&smu->mutex);
80
81 return size;
82}
83
84static int smu_sys_set_pp_feature_mask(void *handle,
85 uint64_t new_mask)
86{
87 struct smu_context *smu = handle;
88 int ret = 0;
89
90 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
91 return -EOPNOTSUPP;
92
93 mutex_lock(&smu->mutex);
94
95 ret = smu_set_pp_feature_mask(smu, new_mask);
96
97 mutex_unlock(&smu->mutex);
98
99 return ret;
100}
101
102int smu_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value)
103{
104 int ret = 0;
105 struct smu_context *smu = &adev->smu;
106
107 if (is_support_sw_smu(adev) && smu->ppt_funcs->get_gfx_off_status)
108 *value = smu_get_gfx_off_status(smu);
109 else
110 ret = -EINVAL;
111
112 return ret;
113}
114
115int smu_set_soft_freq_range(struct smu_context *smu,
116 enum smu_clk_type clk_type,
117 uint32_t min,
118 uint32_t max)
119{
120 int ret = 0;
121
122 mutex_lock(&smu->mutex);
123
124 if (smu->ppt_funcs->set_soft_freq_limited_range)
125 ret = smu->ppt_funcs->set_soft_freq_limited_range(smu,
126 clk_type,
127 min,
128 max);
129
130 mutex_unlock(&smu->mutex);
131
132 return ret;
133}
134
135int smu_get_dpm_freq_range(struct smu_context *smu,
136 enum smu_clk_type clk_type,
137 uint32_t *min,
138 uint32_t *max)
139{
140 int ret = 0;
141
142 if (!min && !max)
143 return -EINVAL;
144
145 mutex_lock(&smu->mutex);
146
147 if (smu->ppt_funcs->get_dpm_ultimate_freq)
148 ret = smu->ppt_funcs->get_dpm_ultimate_freq(smu,
149 clk_type,
150 min,
151 max);
152
153 mutex_unlock(&smu->mutex);
154
155 return ret;
156}
157
158static u32 smu_get_mclk(void *handle, bool low)
159{
160 struct smu_context *smu = handle;
161 uint32_t clk_freq;
162 int ret = 0;
163
164 ret = smu_get_dpm_freq_range(smu, SMU_UCLK,
165 low ? &clk_freq : NULL,
166 !low ? &clk_freq : NULL);
167 if (ret)
168 return 0;
169 return clk_freq * 100;
170}
171
172static u32 smu_get_sclk(void *handle, bool low)
173{
174 struct smu_context *smu = handle;
175 uint32_t clk_freq;
176 int ret = 0;
177
178 ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK,
179 low ? &clk_freq : NULL,
180 !low ? &clk_freq : NULL);
181 if (ret)
182 return 0;
183 return clk_freq * 100;
184}
185
186static int smu_dpm_set_vcn_enable_locked(struct smu_context *smu,
187 bool enable)
188{
189 struct smu_power_context *smu_power = &smu->smu_power;
190 struct smu_power_gate *power_gate = &smu_power->power_gate;
191 int ret = 0;
192
193 if (!smu->ppt_funcs->dpm_set_vcn_enable)
194 return 0;
195
196 if (atomic_read(&power_gate->vcn_gated) ^ enable)
197 return 0;
198
199 ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable);
200 if (!ret)
201 atomic_set(&power_gate->vcn_gated, !enable);
202
203 return ret;
204}
205
206static int smu_dpm_set_vcn_enable(struct smu_context *smu,
207 bool enable)
208{
209 struct smu_power_context *smu_power = &smu->smu_power;
210 struct smu_power_gate *power_gate = &smu_power->power_gate;
211 int ret = 0;
212
213 mutex_lock(&power_gate->vcn_gate_lock);
214
215 ret = smu_dpm_set_vcn_enable_locked(smu, enable);
216
217 mutex_unlock(&power_gate->vcn_gate_lock);
218
219 return ret;
220}
221
222static int smu_dpm_set_jpeg_enable_locked(struct smu_context *smu,
223 bool enable)
224{
225 struct smu_power_context *smu_power = &smu->smu_power;
226 struct smu_power_gate *power_gate = &smu_power->power_gate;
227 int ret = 0;
228
229 if (!smu->ppt_funcs->dpm_set_jpeg_enable)
230 return 0;
231
232 if (atomic_read(&power_gate->jpeg_gated) ^ enable)
233 return 0;
234
235 ret = smu->ppt_funcs->dpm_set_jpeg_enable(smu, enable);
236 if (!ret)
237 atomic_set(&power_gate->jpeg_gated, !enable);
238
239 return ret;
240}
241
242static int smu_dpm_set_jpeg_enable(struct smu_context *smu,
243 bool enable)
244{
245 struct smu_power_context *smu_power = &smu->smu_power;
246 struct smu_power_gate *power_gate = &smu_power->power_gate;
247 int ret = 0;
248
249 mutex_lock(&power_gate->jpeg_gate_lock);
250
251 ret = smu_dpm_set_jpeg_enable_locked(smu, enable);
252
253 mutex_unlock(&power_gate->jpeg_gate_lock);
254
255 return ret;
256}
257
258/**
259 * smu_dpm_set_power_gate - power gate/ungate the specific IP block
260 *
261 * @handle: smu_context pointer
262 * @block_type: the IP block to power gate/ungate
263 * @gate: to power gate if true, ungate otherwise
264 *
265 * This API uses no smu->mutex lock protection due to:
266 * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce).
267 * This is guarded to be race condition free by the caller.
268 * 2. Or get called on user setting request of power_dpm_force_performance_level.
269 * Under this case, the smu->mutex lock protection is already enforced on
270 * the parent API smu_force_performance_level of the call path.
271 */
272static int smu_dpm_set_power_gate(void *handle,
273 uint32_t block_type,
274 bool gate)
275{
276 struct smu_context *smu = handle;
277 int ret = 0;
278
279 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
280 return -EOPNOTSUPP;
281
282 switch (block_type) {
283 /*
284 * Some legacy code of amdgpu_vcn.c and vcn_v2*.c still uses
285 * AMD_IP_BLOCK_TYPE_UVD for VCN. So, here both of them are kept.
286 */
287 case AMD_IP_BLOCK_TYPE_UVD:
288 case AMD_IP_BLOCK_TYPE_VCN:
289 ret = smu_dpm_set_vcn_enable(smu, !gate);
290 if (ret)
291 dev_err(smu->adev->dev, "Failed to power %s VCN!\n",
292 gate ? "gate" : "ungate");
293 break;
294 case AMD_IP_BLOCK_TYPE_GFX:
295 ret = smu_gfx_off_control(smu, gate);
296 if (ret)
297 dev_err(smu->adev->dev, "Failed to %s gfxoff!\n",
298 gate ? "enable" : "disable");
299 break;
300 case AMD_IP_BLOCK_TYPE_SDMA:
301 ret = smu_powergate_sdma(smu, gate);
302 if (ret)
303 dev_err(smu->adev->dev, "Failed to power %s SDMA!\n",
304 gate ? "gate" : "ungate");
305 break;
306 case AMD_IP_BLOCK_TYPE_JPEG:
307 ret = smu_dpm_set_jpeg_enable(smu, !gate);
308 if (ret)
309 dev_err(smu->adev->dev, "Failed to power %s JPEG!\n",
310 gate ? "gate" : "ungate");
311 break;
312 default:
313 dev_err(smu->adev->dev, "Unsupported block type!\n");
314 return -EINVAL;
315 }
316
317 return ret;
318}
319
320/**
321 * smu_set_user_clk_dependencies - set user profile clock dependencies
322 *
323 * @smu: smu_context pointer
324 * @clk: enum smu_clk_type type
325 *
326 * Enable/Disable the clock dependency for the @clk type.
327 */
328static void smu_set_user_clk_dependencies(struct smu_context *smu, enum smu_clk_type clk)
329{
330 if (smu->adev->in_suspend)
331 return;
332
333 if (clk == SMU_MCLK) {
334 smu->user_dpm_profile.clk_dependency = 0;
335 smu->user_dpm_profile.clk_dependency = BIT(SMU_FCLK) | BIT(SMU_SOCCLK);
336 } else if (clk == SMU_FCLK) {
337 /* MCLK takes precedence over FCLK */
338 if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK)))
339 return;
340
341 smu->user_dpm_profile.clk_dependency = 0;
342 smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_SOCCLK);
343 } else if (clk == SMU_SOCCLK) {
344 /* MCLK takes precedence over SOCCLK */
345 if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK)))
346 return;
347
348 smu->user_dpm_profile.clk_dependency = 0;
349 smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_FCLK);
350 } else
351 /* Add clk dependencies here, if any */
352 return;
353}
354
355/**
356 * smu_restore_dpm_user_profile - reinstate user dpm profile
357 *
358 * @smu: smu_context pointer
359 *
360 * Restore the saved user power configurations include power limit,
361 * clock frequencies, fan control mode and fan speed.
362 */
363static void smu_restore_dpm_user_profile(struct smu_context *smu)
364{
365 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
366 int ret = 0;
367
368 if (!smu->adev->in_suspend)
369 return;
370
371 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
372 return;
373
374 /* Enable restore flag */
375 smu->user_dpm_profile.flags |= SMU_DPM_USER_PROFILE_RESTORE;
376
377 /* set the user dpm power limit */
378 if (smu->user_dpm_profile.power_limit) {
379 ret = smu_set_power_limit(smu, smu->user_dpm_profile.power_limit);
380 if (ret)
381 dev_err(smu->adev->dev, "Failed to set power limit value\n");
382 }
383
384 /* set the user dpm clock configurations */
385 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
386 enum smu_clk_type clk_type;
387
388 for (clk_type = 0; clk_type < SMU_CLK_COUNT; clk_type++) {
389 /*
390 * Iterate over smu clk type and force the saved user clk
391 * configs, skip if clock dependency is enabled
392 */
393 if (!(smu->user_dpm_profile.clk_dependency & BIT(clk_type)) &&
394 smu->user_dpm_profile.clk_mask[clk_type]) {
395 ret = smu_force_smuclk_levels(smu, clk_type,
396 smu->user_dpm_profile.clk_mask[clk_type]);
397 if (ret)
398 dev_err(smu->adev->dev,
399 "Failed to set clock type = %d\n", clk_type);
400 }
401 }
402 }
403
404 /* set the user dpm fan configurations */
405 if (smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_MANUAL) {
406 ret = smu_set_fan_control_mode(smu, smu->user_dpm_profile.fan_mode);
407 if (ret) {
408 dev_err(smu->adev->dev, "Failed to set manual fan control mode\n");
409 return;
410 }
411
412 if (!ret && smu->user_dpm_profile.fan_speed_percent) {
413 ret = smu_set_fan_speed_percent(smu, smu->user_dpm_profile.fan_speed_percent);
414 if (ret)
415 dev_err(smu->adev->dev, "Failed to set manual fan speed\n");
416 }
417 }
418
419 /* Disable restore flag */
420 smu->user_dpm_profile.flags &= ~SMU_DPM_USER_PROFILE_RESTORE;
421}
422
423static int smu_get_power_num_states(void *handle,
424 struct pp_states_info *state_info)
425{
426 if (!state_info)
427 return -EINVAL;
428
429 /* not support power state */
430 memset(state_info, 0, sizeof(struct pp_states_info));
431 state_info->nums = 1;
432 state_info->states[0] = POWER_STATE_TYPE_DEFAULT;
433
434 return 0;
435}
436
437bool is_support_sw_smu(struct amdgpu_device *adev)
438{
439 if (adev->asic_type >= CHIP_ARCTURUS)
440 return true;
441
442 return false;
443}
444
445bool is_support_cclk_dpm(struct amdgpu_device *adev)
446{
447 struct smu_context *smu = &adev->smu;
448
449 if (!is_support_sw_smu(adev))
450 return false;
451
452 if (!smu_feature_is_enabled(smu, SMU_FEATURE_CCLK_DPM_BIT))
453 return false;
454
455 return true;
456}
457
458
459static int smu_sys_get_pp_table(void *handle,
460 char **table)
461{
462 struct smu_context *smu = handle;
463 struct smu_table_context *smu_table = &smu->smu_table;
464 uint32_t powerplay_table_size;
465
466 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
467 return -EOPNOTSUPP;
468
469 if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
470 return -EINVAL;
471
472 mutex_lock(&smu->mutex);
473
474 if (smu_table->hardcode_pptable)
475 *table = smu_table->hardcode_pptable;
476 else
477 *table = smu_table->power_play_table;
478
479 powerplay_table_size = smu_table->power_play_table_size;
480
481 mutex_unlock(&smu->mutex);
482
483 return powerplay_table_size;
484}
485
486static int smu_sys_set_pp_table(void *handle,
487 const char *buf,
488 size_t size)
489{
490 struct smu_context *smu = handle;
491 struct smu_table_context *smu_table = &smu->smu_table;
492 ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
493 int ret = 0;
494
495 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
496 return -EOPNOTSUPP;
497
498 if (header->usStructureSize != size) {
499 dev_err(smu->adev->dev, "pp table size not matched !\n");
500 return -EIO;
501 }
502
503 mutex_lock(&smu->mutex);
504 if (!smu_table->hardcode_pptable)
505 smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
506 if (!smu_table->hardcode_pptable) {
507 ret = -ENOMEM;
508 goto failed;
509 }
510
511 memcpy(smu_table->hardcode_pptable, buf, size);
512 smu_table->power_play_table = smu_table->hardcode_pptable;
513 smu_table->power_play_table_size = size;
514
515 /*
516 * Special hw_fini action(for Navi1x, the DPMs disablement will be
517 * skipped) may be needed for custom pptable uploading.
518 */
519 smu->uploading_custom_pp_table = true;
520
521 ret = smu_reset(smu);
522 if (ret)
523 dev_info(smu->adev->dev, "smu reset failed, ret = %d\n", ret);
524
525 smu->uploading_custom_pp_table = false;
526
527failed:
528 mutex_unlock(&smu->mutex);
529 return ret;
530}
531
532static int smu_get_driver_allowed_feature_mask(struct smu_context *smu)
533{
534 struct smu_feature *feature = &smu->smu_feature;
535 int ret = 0;
536 uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
537
538 bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
539
540 ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
541 SMU_FEATURE_MAX/32);
542 if (ret)
543 return ret;
544
545 bitmap_or(feature->allowed, feature->allowed,
546 (unsigned long *)allowed_feature_mask,
547 feature->feature_num);
548
549 return ret;
550}
551
552static int smu_set_funcs(struct amdgpu_device *adev)
553{
554 struct smu_context *smu = &adev->smu;
555
556 if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
557 smu->od_enabled = true;
558
559 switch (adev->asic_type) {
560 case CHIP_NAVI10:
561 case CHIP_NAVI14:
562 case CHIP_NAVI12:
563 navi10_set_ppt_funcs(smu);
564 break;
565 case CHIP_ARCTURUS:
566 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
567 arcturus_set_ppt_funcs(smu);
568 /* OD is not supported on Arcturus */
569 smu->od_enabled =false;
570 break;
571 case CHIP_SIENNA_CICHLID:
572 case CHIP_NAVY_FLOUNDER:
573 case CHIP_DIMGREY_CAVEFISH:
574 case CHIP_BEIGE_GOBY:
575 sienna_cichlid_set_ppt_funcs(smu);
576 break;
577 case CHIP_ALDEBARAN:
578 aldebaran_set_ppt_funcs(smu);
579 /* Enable pp_od_clk_voltage node */
580 smu->od_enabled = true;
581 break;
582 case CHIP_RENOIR:
583 renoir_set_ppt_funcs(smu);
584 break;
585 case CHIP_VANGOGH:
586 vangogh_set_ppt_funcs(smu);
587 break;
588 case CHIP_YELLOW_CARP:
589 yellow_carp_set_ppt_funcs(smu);
590 break;
591 default:
592 return -EINVAL;
593 }
594
595 return 0;
596}
597
598static int smu_early_init(void *handle)
599{
600 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
601 struct smu_context *smu = &adev->smu;
602
603 smu->adev = adev;
604 smu->pm_enabled = !!amdgpu_dpm;
605 smu->is_apu = false;
606 mutex_init(&smu->mutex);
607 mutex_init(&smu->smu_baco.mutex);
608 smu->smu_baco.state = SMU_BACO_STATE_EXIT;
609 smu->smu_baco.platform_support = false;
610
611 adev->powerplay.pp_handle = smu;
612 adev->powerplay.pp_funcs = &swsmu_pm_funcs;
613
614 return smu_set_funcs(adev);
615}
616
617static int smu_set_default_dpm_table(struct smu_context *smu)
618{
619 struct smu_power_context *smu_power = &smu->smu_power;
620 struct smu_power_gate *power_gate = &smu_power->power_gate;
621 int vcn_gate, jpeg_gate;
622 int ret = 0;
623
624 if (!smu->ppt_funcs->set_default_dpm_table)
625 return 0;
626
627 mutex_lock(&power_gate->vcn_gate_lock);
628 mutex_lock(&power_gate->jpeg_gate_lock);
629
630 vcn_gate = atomic_read(&power_gate->vcn_gated);
631 jpeg_gate = atomic_read(&power_gate->jpeg_gated);
632
633 ret = smu_dpm_set_vcn_enable_locked(smu, true);
634 if (ret)
635 goto err0_out;
636
637 ret = smu_dpm_set_jpeg_enable_locked(smu, true);
638 if (ret)
639 goto err1_out;
640
641 ret = smu->ppt_funcs->set_default_dpm_table(smu);
642 if (ret)
643 dev_err(smu->adev->dev,
644 "Failed to setup default dpm clock tables!\n");
645
646 smu_dpm_set_jpeg_enable_locked(smu, !jpeg_gate);
647err1_out:
648 smu_dpm_set_vcn_enable_locked(smu, !vcn_gate);
649err0_out:
650 mutex_unlock(&power_gate->jpeg_gate_lock);
651 mutex_unlock(&power_gate->vcn_gate_lock);
652
653 return ret;
654}
655
656
657static int smu_late_init(void *handle)
658{
659 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
660 struct smu_context *smu = &adev->smu;
661 int ret = 0;
662
663 smu_set_fine_grain_gfx_freq_parameters(smu);
664
665 if (!smu->pm_enabled)
666 return 0;
667
668 ret = smu_post_init(smu);
669 if (ret) {
670 dev_err(adev->dev, "Failed to post smu init!\n");
671 return ret;
672 }
673
674 if (adev->asic_type == CHIP_YELLOW_CARP)
675 return 0;
676
677 if (!amdgpu_sriov_vf(adev) || smu->od_enabled) {
678 ret = smu_set_default_od_settings(smu);
679 if (ret) {
680 dev_err(adev->dev, "Failed to setup default OD settings!\n");
681 return ret;
682 }
683 }
684
685 ret = smu_populate_umd_state_clk(smu);
686 if (ret) {
687 dev_err(adev->dev, "Failed to populate UMD state clocks!\n");
688 return ret;
689 }
690
691 ret = smu_get_asic_power_limits(smu,
692 &smu->current_power_limit,
693 &smu->default_power_limit,
694 &smu->max_power_limit);
695 if (ret) {
696 dev_err(adev->dev, "Failed to get asic power limits!\n");
697 return ret;
698 }
699
700 if (!amdgpu_sriov_vf(adev))
701 smu_get_unique_id(smu);
702
703 smu_get_fan_parameters(smu);
704
705 smu_handle_task(&adev->smu,
706 smu->smu_dpm.dpm_level,
707 AMD_PP_TASK_COMPLETE_INIT,
708 false);
709
710 smu_restore_dpm_user_profile(smu);
711
712 return 0;
713}
714
715static int smu_init_fb_allocations(struct smu_context *smu)
716{
717 struct amdgpu_device *adev = smu->adev;
718 struct smu_table_context *smu_table = &smu->smu_table;
719 struct smu_table *tables = smu_table->tables;
720 struct smu_table *driver_table = &(smu_table->driver_table);
721 uint32_t max_table_size = 0;
722 int ret, i;
723
724 /* VRAM allocation for tool table */
725 if (tables[SMU_TABLE_PMSTATUSLOG].size) {
726 ret = amdgpu_bo_create_kernel(adev,
727 tables[SMU_TABLE_PMSTATUSLOG].size,
728 tables[SMU_TABLE_PMSTATUSLOG].align,
729 tables[SMU_TABLE_PMSTATUSLOG].domain,
730 &tables[SMU_TABLE_PMSTATUSLOG].bo,
731 &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
732 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
733 if (ret) {
734 dev_err(adev->dev, "VRAM allocation for tool table failed!\n");
735 return ret;
736 }
737 }
738
739 /* VRAM allocation for driver table */
740 for (i = 0; i < SMU_TABLE_COUNT; i++) {
741 if (tables[i].size == 0)
742 continue;
743
744 if (i == SMU_TABLE_PMSTATUSLOG)
745 continue;
746
747 if (max_table_size < tables[i].size)
748 max_table_size = tables[i].size;
749 }
750
751 driver_table->size = max_table_size;
752 driver_table->align = PAGE_SIZE;
753 driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
754
755 ret = amdgpu_bo_create_kernel(adev,
756 driver_table->size,
757 driver_table->align,
758 driver_table->domain,
759 &driver_table->bo,
760 &driver_table->mc_address,
761 &driver_table->cpu_addr);
762 if (ret) {
763 dev_err(adev->dev, "VRAM allocation for driver table failed!\n");
764 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
765 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
766 &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
767 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
768 }
769
770 return ret;
771}
772
773static int smu_fini_fb_allocations(struct smu_context *smu)
774{
775 struct smu_table_context *smu_table = &smu->smu_table;
776 struct smu_table *tables = smu_table->tables;
777 struct smu_table *driver_table = &(smu_table->driver_table);
778
779 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
780 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
781 &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
782 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
783
784 amdgpu_bo_free_kernel(&driver_table->bo,
785 &driver_table->mc_address,
786 &driver_table->cpu_addr);
787
788 return 0;
789}
790
791/**
792 * smu_alloc_memory_pool - allocate memory pool in the system memory
793 *
794 * @smu: amdgpu_device pointer
795 *
796 * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
797 * and DramLogSetDramAddr can notify it changed.
798 *
799 * Returns 0 on success, error on failure.
800 */
801static int smu_alloc_memory_pool(struct smu_context *smu)
802{
803 struct amdgpu_device *adev = smu->adev;
804 struct smu_table_context *smu_table = &smu->smu_table;
805 struct smu_table *memory_pool = &smu_table->memory_pool;
806 uint64_t pool_size = smu->pool_size;
807 int ret = 0;
808
809 if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
810 return ret;
811
812 memory_pool->size = pool_size;
813 memory_pool->align = PAGE_SIZE;
814 memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT;
815
816 switch (pool_size) {
817 case SMU_MEMORY_POOL_SIZE_256_MB:
818 case SMU_MEMORY_POOL_SIZE_512_MB:
819 case SMU_MEMORY_POOL_SIZE_1_GB:
820 case SMU_MEMORY_POOL_SIZE_2_GB:
821 ret = amdgpu_bo_create_kernel(adev,
822 memory_pool->size,
823 memory_pool->align,
824 memory_pool->domain,
825 &memory_pool->bo,
826 &memory_pool->mc_address,
827 &memory_pool->cpu_addr);
828 if (ret)
829 dev_err(adev->dev, "VRAM allocation for dramlog failed!\n");
830 break;
831 default:
832 break;
833 }
834
835 return ret;
836}
837
838static int smu_free_memory_pool(struct smu_context *smu)
839{
840 struct smu_table_context *smu_table = &smu->smu_table;
841 struct smu_table *memory_pool = &smu_table->memory_pool;
842
843 if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
844 return 0;
845
846 amdgpu_bo_free_kernel(&memory_pool->bo,
847 &memory_pool->mc_address,
848 &memory_pool->cpu_addr);
849
850 memset(memory_pool, 0, sizeof(struct smu_table));
851
852 return 0;
853}
854
855static int smu_alloc_dummy_read_table(struct smu_context *smu)
856{
857 struct smu_table_context *smu_table = &smu->smu_table;
858 struct smu_table *dummy_read_1_table =
859 &smu_table->dummy_read_1_table;
860 struct amdgpu_device *adev = smu->adev;
861 int ret = 0;
862
863 dummy_read_1_table->size = 0x40000;
864 dummy_read_1_table->align = PAGE_SIZE;
865 dummy_read_1_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
866
867 ret = amdgpu_bo_create_kernel(adev,
868 dummy_read_1_table->size,
869 dummy_read_1_table->align,
870 dummy_read_1_table->domain,
871 &dummy_read_1_table->bo,
872 &dummy_read_1_table->mc_address,
873 &dummy_read_1_table->cpu_addr);
874 if (ret)
875 dev_err(adev->dev, "VRAM allocation for dummy read table failed!\n");
876
877 return ret;
878}
879
880static void smu_free_dummy_read_table(struct smu_context *smu)
881{
882 struct smu_table_context *smu_table = &smu->smu_table;
883 struct smu_table *dummy_read_1_table =
884 &smu_table->dummy_read_1_table;
885
886
887 amdgpu_bo_free_kernel(&dummy_read_1_table->bo,
888 &dummy_read_1_table->mc_address,
889 &dummy_read_1_table->cpu_addr);
890
891 memset(dummy_read_1_table, 0, sizeof(struct smu_table));
892}
893
894static int smu_smc_table_sw_init(struct smu_context *smu)
895{
896 int ret;
897
898 /**
899 * Create smu_table structure, and init smc tables such as
900 * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
901 */
902 ret = smu_init_smc_tables(smu);
903 if (ret) {
904 dev_err(smu->adev->dev, "Failed to init smc tables!\n");
905 return ret;
906 }
907
908 /**
909 * Create smu_power_context structure, and allocate smu_dpm_context and
910 * context size to fill the smu_power_context data.
911 */
912 ret = smu_init_power(smu);
913 if (ret) {
914 dev_err(smu->adev->dev, "Failed to init smu_init_power!\n");
915 return ret;
916 }
917
918 /*
919 * allocate vram bos to store smc table contents.
920 */
921 ret = smu_init_fb_allocations(smu);
922 if (ret)
923 return ret;
924
925 ret = smu_alloc_memory_pool(smu);
926 if (ret)
927 return ret;
928
929 ret = smu_alloc_dummy_read_table(smu);
930 if (ret)
931 return ret;
932
933 ret = smu_i2c_init(smu, &smu->adev->pm.smu_i2c);
934 if (ret)
935 return ret;
936
937 return 0;
938}
939
940static int smu_smc_table_sw_fini(struct smu_context *smu)
941{
942 int ret;
943
944 smu_i2c_fini(smu, &smu->adev->pm.smu_i2c);
945
946 smu_free_dummy_read_table(smu);
947
948 ret = smu_free_memory_pool(smu);
949 if (ret)
950 return ret;
951
952 ret = smu_fini_fb_allocations(smu);
953 if (ret)
954 return ret;
955
956 ret = smu_fini_power(smu);
957 if (ret) {
958 dev_err(smu->adev->dev, "Failed to init smu_fini_power!\n");
959 return ret;
960 }
961
962 ret = smu_fini_smc_tables(smu);
963 if (ret) {
964 dev_err(smu->adev->dev, "Failed to smu_fini_smc_tables!\n");
965 return ret;
966 }
967
968 return 0;
969}
970
971static void smu_throttling_logging_work_fn(struct work_struct *work)
972{
973 struct smu_context *smu = container_of(work, struct smu_context,
974 throttling_logging_work);
975
976 smu_log_thermal_throttling(smu);
977}
978
979static void smu_interrupt_work_fn(struct work_struct *work)
980{
981 struct smu_context *smu = container_of(work, struct smu_context,
982 interrupt_work);
983
984 mutex_lock(&smu->mutex);
985
986 if (smu->ppt_funcs && smu->ppt_funcs->interrupt_work)
987 smu->ppt_funcs->interrupt_work(smu);
988
989 mutex_unlock(&smu->mutex);
990}
991
992static int smu_sw_init(void *handle)
993{
994 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
995 struct smu_context *smu = &adev->smu;
996 int ret;
997
998 smu->pool_size = adev->pm.smu_prv_buffer_size;
999 smu->smu_feature.feature_num = SMU_FEATURE_MAX;
1000 mutex_init(&smu->smu_feature.mutex);
1001 bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
1002 bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX);
1003 bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
1004
1005 mutex_init(&smu->sensor_lock);
1006 mutex_init(&smu->metrics_lock);
1007 mutex_init(&smu->message_lock);
1008
1009 INIT_WORK(&smu->throttling_logging_work, smu_throttling_logging_work_fn);
1010 INIT_WORK(&smu->interrupt_work, smu_interrupt_work_fn);
1011 atomic64_set(&smu->throttle_int_counter, 0);
1012 smu->watermarks_bitmap = 0;
1013 smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
1014 smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
1015
1016 atomic_set(&smu->smu_power.power_gate.vcn_gated, 1);
1017 atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1);
1018 mutex_init(&smu->smu_power.power_gate.vcn_gate_lock);
1019 mutex_init(&smu->smu_power.power_gate.jpeg_gate_lock);
1020
1021 smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
1022 smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
1023 smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
1024 smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
1025 smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
1026 smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
1027 smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
1028 smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
1029
1030 smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
1031 smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
1032 smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
1033 smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
1034 smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
1035 smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
1036 smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
1037 smu->display_config = &adev->pm.pm_display_cfg;
1038
1039 smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
1040 smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
1041
1042 ret = smu_init_microcode(smu);
1043 if (ret) {
1044 dev_err(adev->dev, "Failed to load smu firmware!\n");
1045 return ret;
1046 }
1047
1048 ret = smu_smc_table_sw_init(smu);
1049 if (ret) {
1050 dev_err(adev->dev, "Failed to sw init smc table!\n");
1051 return ret;
1052 }
1053
1054 ret = smu_register_irq_handler(smu);
1055 if (ret) {
1056 dev_err(adev->dev, "Failed to register smc irq handler!\n");
1057 return ret;
1058 }
1059
1060 /* If there is no way to query fan control mode, fan control is not supported */
1061 if (!smu->ppt_funcs->get_fan_control_mode)
1062 smu->adev->pm.no_fan = true;
1063
1064 return 0;
1065}
1066
1067static int smu_sw_fini(void *handle)
1068{
1069 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1070 struct smu_context *smu = &adev->smu;
1071 int ret;
1072
1073 ret = smu_smc_table_sw_fini(smu);
1074 if (ret) {
1075 dev_err(adev->dev, "Failed to sw fini smc table!\n");
1076 return ret;
1077 }
1078
1079 smu_fini_microcode(smu);
1080
1081 return 0;
1082}
1083
1084static int smu_get_thermal_temperature_range(struct smu_context *smu)
1085{
1086 struct amdgpu_device *adev = smu->adev;
1087 struct smu_temperature_range *range =
1088 &smu->thermal_range;
1089 int ret = 0;
1090
1091 if (!smu->ppt_funcs->get_thermal_temperature_range)
1092 return 0;
1093
1094 ret = smu->ppt_funcs->get_thermal_temperature_range(smu, range);
1095 if (ret)
1096 return ret;
1097
1098 adev->pm.dpm.thermal.min_temp = range->min;
1099 adev->pm.dpm.thermal.max_temp = range->max;
1100 adev->pm.dpm.thermal.max_edge_emergency_temp = range->edge_emergency_max;
1101 adev->pm.dpm.thermal.min_hotspot_temp = range->hotspot_min;
1102 adev->pm.dpm.thermal.max_hotspot_crit_temp = range->hotspot_crit_max;
1103 adev->pm.dpm.thermal.max_hotspot_emergency_temp = range->hotspot_emergency_max;
1104 adev->pm.dpm.thermal.min_mem_temp = range->mem_min;
1105 adev->pm.dpm.thermal.max_mem_crit_temp = range->mem_crit_max;
1106 adev->pm.dpm.thermal.max_mem_emergency_temp = range->mem_emergency_max;
1107
1108 return ret;
1109}
1110
1111static int smu_smc_hw_setup(struct smu_context *smu)
1112{
1113 struct amdgpu_device *adev = smu->adev;
1114 uint32_t pcie_gen = 0, pcie_width = 0;
1115 int ret = 0;
1116
1117 if (adev->in_suspend && smu_is_dpm_running(smu)) {
1118 dev_info(adev->dev, "dpm has been enabled\n");
1119 /* this is needed specifically */
1120 if ((adev->asic_type >= CHIP_SIENNA_CICHLID) &&
1121 (adev->asic_type <= CHIP_DIMGREY_CAVEFISH))
1122 ret = smu_system_features_control(smu, true);
1123 return ret;
1124 }
1125
1126 ret = smu_init_display_count(smu, 0);
1127 if (ret) {
1128 dev_info(adev->dev, "Failed to pre-set display count as 0!\n");
1129 return ret;
1130 }
1131
1132 ret = smu_set_driver_table_location(smu);
1133 if (ret) {
1134 dev_err(adev->dev, "Failed to SetDriverDramAddr!\n");
1135 return ret;
1136 }
1137
1138 /*
1139 * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
1140 */
1141 ret = smu_set_tool_table_location(smu);
1142 if (ret) {
1143 dev_err(adev->dev, "Failed to SetToolsDramAddr!\n");
1144 return ret;
1145 }
1146
1147 /*
1148 * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
1149 * pool location.
1150 */
1151 ret = smu_notify_memory_pool_location(smu);
1152 if (ret) {
1153 dev_err(adev->dev, "Failed to SetDramLogDramAddr!\n");
1154 return ret;
1155 }
1156
1157 /* smu_dump_pptable(smu); */
1158 /*
1159 * Copy pptable bo in the vram to smc with SMU MSGs such as
1160 * SetDriverDramAddr and TransferTableDram2Smu.
1161 */
1162 ret = smu_write_pptable(smu);
1163 if (ret) {
1164 dev_err(adev->dev, "Failed to transfer pptable to SMC!\n");
1165 return ret;
1166 }
1167
1168 /* issue Run*Btc msg */
1169 ret = smu_run_btc(smu);
1170 if (ret)
1171 return ret;
1172
1173 ret = smu_feature_set_allowed_mask(smu);
1174 if (ret) {
1175 dev_err(adev->dev, "Failed to set driver allowed features mask!\n");
1176 return ret;
1177 }
1178
1179 ret = smu_system_features_control(smu, true);
1180 if (ret) {
1181 dev_err(adev->dev, "Failed to enable requested dpm features!\n");
1182 return ret;
1183 }
1184
1185 if (!smu_is_dpm_running(smu))
1186 dev_info(adev->dev, "dpm has been disabled\n");
1187
1188 if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
1189 pcie_gen = 3;
1190 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
1191 pcie_gen = 2;
1192 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
1193 pcie_gen = 1;
1194 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
1195 pcie_gen = 0;
1196
1197 /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
1198 * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
1199 * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32
1200 */
1201 if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
1202 pcie_width = 6;
1203 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
1204 pcie_width = 5;
1205 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
1206 pcie_width = 4;
1207 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
1208 pcie_width = 3;
1209 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
1210 pcie_width = 2;
1211 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
1212 pcie_width = 1;
1213 ret = smu_update_pcie_parameters(smu, pcie_gen, pcie_width);
1214 if (ret) {
1215 dev_err(adev->dev, "Attempt to override pcie params failed!\n");
1216 return ret;
1217 }
1218
1219 ret = smu_get_thermal_temperature_range(smu);
1220 if (ret) {
1221 dev_err(adev->dev, "Failed to get thermal temperature ranges!\n");
1222 return ret;
1223 }
1224
1225 ret = smu_enable_thermal_alert(smu);
1226 if (ret) {
1227 dev_err(adev->dev, "Failed to enable thermal alert!\n");
1228 return ret;
1229 }
1230
1231 /*
1232 * Set initialized values (get from vbios) to dpm tables context such as
1233 * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
1234 * type of clks.
1235 */
1236 ret = smu_set_default_dpm_table(smu);
1237 if (ret) {
1238 dev_err(adev->dev, "Failed to setup default dpm clock tables!\n");
1239 return ret;
1240 }
1241
1242 ret = smu_notify_display_change(smu);
1243 if (ret)
1244 return ret;
1245
1246 /*
1247 * Set min deep sleep dce fclk with bootup value from vbios via
1248 * SetMinDeepSleepDcefclk MSG.
1249 */
1250 ret = smu_set_min_dcef_deep_sleep(smu,
1251 smu->smu_table.boot_values.dcefclk / 100);
1252 if (ret)
1253 return ret;
1254
1255 return ret;
1256}
1257
1258static int smu_start_smc_engine(struct smu_context *smu)
1259{
1260 struct amdgpu_device *adev = smu->adev;
1261 int ret = 0;
1262
1263 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1264 if (adev->asic_type < CHIP_NAVI10) {
1265 if (smu->ppt_funcs->load_microcode) {
1266 ret = smu->ppt_funcs->load_microcode(smu);
1267 if (ret)
1268 return ret;
1269 }
1270 }
1271 }
1272
1273 if (smu->ppt_funcs->check_fw_status) {
1274 ret = smu->ppt_funcs->check_fw_status(smu);
1275 if (ret) {
1276 dev_err(adev->dev, "SMC is not ready\n");
1277 return ret;
1278 }
1279 }
1280
1281 /*
1282 * Send msg GetDriverIfVersion to check if the return value is equal
1283 * with DRIVER_IF_VERSION of smc header.
1284 */
1285 ret = smu_check_fw_version(smu);
1286 if (ret)
1287 return ret;
1288
1289 return ret;
1290}
1291
1292static int smu_hw_init(void *handle)
1293{
1294 int ret;
1295 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1296 struct smu_context *smu = &adev->smu;
1297
1298 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) {
1299 smu->pm_enabled = false;
1300 return 0;
1301 }
1302
1303 ret = smu_start_smc_engine(smu);
1304 if (ret) {
1305 dev_err(adev->dev, "SMC engine is not correctly up!\n");
1306 return ret;
1307 }
1308
1309 if (smu->is_apu) {
1310 smu_powergate_sdma(&adev->smu, false);
1311 smu_dpm_set_vcn_enable(smu, true);
1312 smu_dpm_set_jpeg_enable(smu, true);
1313 smu_set_gfx_cgpg(&adev->smu, true);
1314 }
1315
1316 if (!smu->pm_enabled)
1317 return 0;
1318
1319 /* get boot_values from vbios to set revision, gfxclk, and etc. */
1320 ret = smu_get_vbios_bootup_values(smu);
1321 if (ret) {
1322 dev_err(adev->dev, "Failed to get VBIOS boot clock values!\n");
1323 return ret;
1324 }
1325
1326 ret = smu_setup_pptable(smu);
1327 if (ret) {
1328 dev_err(adev->dev, "Failed to setup pptable!\n");
1329 return ret;
1330 }
1331
1332 ret = smu_get_driver_allowed_feature_mask(smu);
1333 if (ret)
1334 return ret;
1335
1336 ret = smu_smc_hw_setup(smu);
1337 if (ret) {
1338 dev_err(adev->dev, "Failed to setup smc hw!\n");
1339 return ret;
1340 }
1341
1342 /*
1343 * Move maximum sustainable clock retrieving here considering
1344 * 1. It is not needed on resume(from S3).
1345 * 2. DAL settings come between .hw_init and .late_init of SMU.
1346 * And DAL needs to know the maximum sustainable clocks. Thus
1347 * it cannot be put in .late_init().
1348 */
1349 ret = smu_init_max_sustainable_clocks(smu);
1350 if (ret) {
1351 dev_err(adev->dev, "Failed to init max sustainable clocks!\n");
1352 return ret;
1353 }
1354
1355 adev->pm.dpm_enabled = true;
1356
1357 dev_info(adev->dev, "SMU is initialized successfully!\n");
1358
1359 return 0;
1360}
1361
1362static int smu_disable_dpms(struct smu_context *smu)
1363{
1364 struct amdgpu_device *adev = smu->adev;
1365 int ret = 0;
1366 bool use_baco = !smu->is_apu &&
1367 ((amdgpu_in_reset(adev) &&
1368 (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
1369 ((adev->in_runpm || adev->in_s4) && amdgpu_asic_supports_baco(adev)));
1370
1371 /*
1372 * For custom pptable uploading, skip the DPM features
1373 * disable process on Navi1x ASICs.
1374 * - As the gfx related features are under control of
1375 * RLC on those ASICs. RLC reinitialization will be
1376 * needed to reenable them. That will cost much more
1377 * efforts.
1378 *
1379 * - SMU firmware can handle the DPM reenablement
1380 * properly.
1381 */
1382 if (smu->uploading_custom_pp_table &&
1383 (adev->asic_type >= CHIP_NAVI10) &&
1384 (adev->asic_type <= CHIP_BEIGE_GOBY))
1385 return smu_disable_all_features_with_exception(smu,
1386 true,
1387 SMU_FEATURE_COUNT);
1388
1389 /*
1390 * For Sienna_Cichlid, PMFW will handle the features disablement properly
1391 * on BACO in. Driver involvement is unnecessary.
1392 */
1393 if (((adev->asic_type == CHIP_SIENNA_CICHLID) ||
1394 ((adev->asic_type >= CHIP_NAVI10) && (adev->asic_type <= CHIP_NAVI12))) &&
1395 use_baco)
1396 return smu_disable_all_features_with_exception(smu,
1397 true,
1398 SMU_FEATURE_BACO_BIT);
1399
1400 /*
1401 * For gpu reset, runpm and hibernation through BACO,
1402 * BACO feature has to be kept enabled.
1403 */
1404 if (use_baco && smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) {
1405 ret = smu_disable_all_features_with_exception(smu,
1406 false,
1407 SMU_FEATURE_BACO_BIT);
1408 if (ret)
1409 dev_err(adev->dev, "Failed to disable smu features except BACO.\n");
1410 } else {
1411 ret = smu_system_features_control(smu, false);
1412 if (ret)
1413 dev_err(adev->dev, "Failed to disable smu features.\n");
1414 }
1415
1416 if (adev->asic_type >= CHIP_NAVI10 &&
1417 adev->gfx.rlc.funcs->stop)
1418 adev->gfx.rlc.funcs->stop(adev);
1419
1420 return ret;
1421}
1422
1423static int smu_smc_hw_cleanup(struct smu_context *smu)
1424{
1425 struct amdgpu_device *adev = smu->adev;
1426 int ret = 0;
1427
1428 cancel_work_sync(&smu->throttling_logging_work);
1429 cancel_work_sync(&smu->interrupt_work);
1430
1431 ret = smu_disable_thermal_alert(smu);
1432 if (ret) {
1433 dev_err(adev->dev, "Fail to disable thermal alert!\n");
1434 return ret;
1435 }
1436
1437 ret = smu_disable_dpms(smu);
1438 if (ret) {
1439 dev_err(adev->dev, "Fail to disable dpm features!\n");
1440 return ret;
1441 }
1442
1443 return 0;
1444}
1445
1446static int smu_hw_fini(void *handle)
1447{
1448 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1449 struct smu_context *smu = &adev->smu;
1450
1451 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1452 return 0;
1453
1454 if (smu->is_apu) {
1455 smu_powergate_sdma(&adev->smu, true);
1456 }
1457
1458 smu_dpm_set_vcn_enable(smu, false);
1459 smu_dpm_set_jpeg_enable(smu, false);
1460
1461 adev->vcn.cur_state = AMD_PG_STATE_GATE;
1462 adev->jpeg.cur_state = AMD_PG_STATE_GATE;
1463
1464 if (!smu->pm_enabled)
1465 return 0;
1466
1467 adev->pm.dpm_enabled = false;
1468
1469 return smu_smc_hw_cleanup(smu);
1470}
1471
1472static int smu_reset(struct smu_context *smu)
1473{
1474 struct amdgpu_device *adev = smu->adev;
1475 int ret;
1476
1477 amdgpu_gfx_off_ctrl(smu->adev, false);
1478
1479 ret = smu_hw_fini(adev);
1480 if (ret)
1481 return ret;
1482
1483 ret = smu_hw_init(adev);
1484 if (ret)
1485 return ret;
1486
1487 ret = smu_late_init(adev);
1488 if (ret)
1489 return ret;
1490
1491 amdgpu_gfx_off_ctrl(smu->adev, true);
1492
1493 return 0;
1494}
1495
1496static int smu_suspend(void *handle)
1497{
1498 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1499 struct smu_context *smu = &adev->smu;
1500 int ret;
1501
1502 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1503 return 0;
1504
1505 if (!smu->pm_enabled)
1506 return 0;
1507
1508 adev->pm.dpm_enabled = false;
1509
1510 ret = smu_smc_hw_cleanup(smu);
1511 if (ret)
1512 return ret;
1513
1514 smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
1515
1516 /* skip CGPG when in S0ix */
1517 if (smu->is_apu && !adev->in_s0ix)
1518 smu_set_gfx_cgpg(&adev->smu, false);
1519
1520 return 0;
1521}
1522
1523static int smu_resume(void *handle)
1524{
1525 int ret;
1526 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1527 struct smu_context *smu = &adev->smu;
1528
1529 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1530 return 0;
1531
1532 if (!smu->pm_enabled)
1533 return 0;
1534
1535 dev_info(adev->dev, "SMU is resuming...\n");
1536
1537 ret = smu_start_smc_engine(smu);
1538 if (ret) {
1539 dev_err(adev->dev, "SMC engine is not correctly up!\n");
1540 return ret;
1541 }
1542
1543 ret = smu_smc_hw_setup(smu);
1544 if (ret) {
1545 dev_err(adev->dev, "Failed to setup smc hw!\n");
1546 return ret;
1547 }
1548
1549 if (smu->is_apu)
1550 smu_set_gfx_cgpg(&adev->smu, true);
1551
1552 smu->disable_uclk_switch = 0;
1553
1554 adev->pm.dpm_enabled = true;
1555
1556 dev_info(adev->dev, "SMU is resumed successfully!\n");
1557
1558 return 0;
1559}
1560
1561static int smu_display_configuration_change(void *handle,
1562 const struct amd_pp_display_configuration *display_config)
1563{
1564 struct smu_context *smu = handle;
1565 int index = 0;
1566 int num_of_active_display = 0;
1567
1568 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1569 return -EOPNOTSUPP;
1570
1571 if (!display_config)
1572 return -EINVAL;
1573
1574 mutex_lock(&smu->mutex);
1575
1576 smu_set_min_dcef_deep_sleep(smu,
1577 display_config->min_dcef_deep_sleep_set_clk / 100);
1578
1579 for (index = 0; index < display_config->num_path_including_non_display; index++) {
1580 if (display_config->displays[index].controller_id != 0)
1581 num_of_active_display++;
1582 }
1583
1584 mutex_unlock(&smu->mutex);
1585
1586 return 0;
1587}
1588
1589static int smu_set_clockgating_state(void *handle,
1590 enum amd_clockgating_state state)
1591{
1592 return 0;
1593}
1594
1595static int smu_set_powergating_state(void *handle,
1596 enum amd_powergating_state state)
1597{
1598 return 0;
1599}
1600
1601static int smu_enable_umd_pstate(void *handle,
1602 enum amd_dpm_forced_level *level)
1603{
1604 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
1605 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
1606 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
1607 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
1608
1609 struct smu_context *smu = (struct smu_context*)(handle);
1610 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1611
1612 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1613 return -EINVAL;
1614
1615 if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
1616 /* enter umd pstate, save current level, disable gfx cg*/
1617 if (*level & profile_mode_mask) {
1618 smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
1619 smu_dpm_ctx->enable_umd_pstate = true;
1620 smu_gpo_control(smu, false);
1621 amdgpu_device_ip_set_powergating_state(smu->adev,
1622 AMD_IP_BLOCK_TYPE_GFX,
1623 AMD_PG_STATE_UNGATE);
1624 amdgpu_device_ip_set_clockgating_state(smu->adev,
1625 AMD_IP_BLOCK_TYPE_GFX,
1626 AMD_CG_STATE_UNGATE);
1627 smu_gfx_ulv_control(smu, false);
1628 smu_deep_sleep_control(smu, false);
1629 amdgpu_asic_update_umd_stable_pstate(smu->adev, true);
1630 }
1631 } else {
1632 /* exit umd pstate, restore level, enable gfx cg*/
1633 if (!(*level & profile_mode_mask)) {
1634 if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
1635 *level = smu_dpm_ctx->saved_dpm_level;
1636 smu_dpm_ctx->enable_umd_pstate = false;
1637 amdgpu_asic_update_umd_stable_pstate(smu->adev, false);
1638 smu_deep_sleep_control(smu, true);
1639 smu_gfx_ulv_control(smu, true);
1640 amdgpu_device_ip_set_clockgating_state(smu->adev,
1641 AMD_IP_BLOCK_TYPE_GFX,
1642 AMD_CG_STATE_GATE);
1643 amdgpu_device_ip_set_powergating_state(smu->adev,
1644 AMD_IP_BLOCK_TYPE_GFX,
1645 AMD_PG_STATE_GATE);
1646 smu_gpo_control(smu, true);
1647 }
1648 }
1649
1650 return 0;
1651}
1652
1653static int smu_bump_power_profile_mode(struct smu_context *smu,
1654 long *param,
1655 uint32_t param_size)
1656{
1657 int ret = 0;
1658
1659 if (smu->ppt_funcs->set_power_profile_mode)
1660 ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size);
1661
1662 return ret;
1663}
1664
1665static int smu_adjust_power_state_dynamic(struct smu_context *smu,
1666 enum amd_dpm_forced_level level,
1667 bool skip_display_settings)
1668{
1669 int ret = 0;
1670 int index = 0;
1671 long workload;
1672 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1673
1674 if (!skip_display_settings) {
1675 ret = smu_display_config_changed(smu);
1676 if (ret) {
1677 dev_err(smu->adev->dev, "Failed to change display config!");
1678 return ret;
1679 }
1680 }
1681
1682 ret = smu_apply_clocks_adjust_rules(smu);
1683 if (ret) {
1684 dev_err(smu->adev->dev, "Failed to apply clocks adjust rules!");
1685 return ret;
1686 }
1687
1688 if (!skip_display_settings) {
1689 ret = smu_notify_smc_display_config(smu);
1690 if (ret) {
1691 dev_err(smu->adev->dev, "Failed to notify smc display config!");
1692 return ret;
1693 }
1694 }
1695
1696 if (smu_dpm_ctx->dpm_level != level) {
1697 ret = smu_asic_set_performance_level(smu, level);
1698 if (ret) {
1699 dev_err(smu->adev->dev, "Failed to set performance level!");
1700 return ret;
1701 }
1702
1703 /* update the saved copy */
1704 smu_dpm_ctx->dpm_level = level;
1705 }
1706
1707 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
1708 smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
1709 index = fls(smu->workload_mask);
1710 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1711 workload = smu->workload_setting[index];
1712
1713 if (smu->power_profile_mode != workload)
1714 smu_bump_power_profile_mode(smu, &workload, 0);
1715 }
1716
1717 return ret;
1718}
1719
1720static int smu_handle_task(struct smu_context *smu,
1721 enum amd_dpm_forced_level level,
1722 enum amd_pp_task task_id,
1723 bool lock_needed)
1724{
1725 int ret = 0;
1726
1727 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1728 return -EOPNOTSUPP;
1729
1730 if (lock_needed)
1731 mutex_lock(&smu->mutex);
1732
1733 switch (task_id) {
1734 case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
1735 ret = smu_pre_display_config_changed(smu);
1736 if (ret)
1737 goto out;
1738 ret = smu_adjust_power_state_dynamic(smu, level, false);
1739 break;
1740 case AMD_PP_TASK_COMPLETE_INIT:
1741 case AMD_PP_TASK_READJUST_POWER_STATE:
1742 ret = smu_adjust_power_state_dynamic(smu, level, true);
1743 break;
1744 default:
1745 break;
1746 }
1747
1748out:
1749 if (lock_needed)
1750 mutex_unlock(&smu->mutex);
1751
1752 return ret;
1753}
1754
1755static int smu_handle_dpm_task(void *handle,
1756 enum amd_pp_task task_id,
1757 enum amd_pm_state_type *user_state)
1758{
1759 struct smu_context *smu = handle;
1760 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
1761
1762 return smu_handle_task(smu, smu_dpm->dpm_level, task_id, true);
1763
1764}
1765
1766static int smu_switch_power_profile(void *handle,
1767 enum PP_SMC_POWER_PROFILE type,
1768 bool en)
1769{
1770 struct smu_context *smu = handle;
1771 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1772 long workload;
1773 uint32_t index;
1774
1775 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1776 return -EOPNOTSUPP;
1777
1778 if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
1779 return -EINVAL;
1780
1781 mutex_lock(&smu->mutex);
1782
1783 if (!en) {
1784 smu->workload_mask &= ~(1 << smu->workload_prority[type]);
1785 index = fls(smu->workload_mask);
1786 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1787 workload = smu->workload_setting[index];
1788 } else {
1789 smu->workload_mask |= (1 << smu->workload_prority[type]);
1790 index = fls(smu->workload_mask);
1791 index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1792 workload = smu->workload_setting[index];
1793 }
1794
1795 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
1796 smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
1797 smu_bump_power_profile_mode(smu, &workload, 0);
1798
1799 mutex_unlock(&smu->mutex);
1800
1801 return 0;
1802}
1803
1804static enum amd_dpm_forced_level smu_get_performance_level(void *handle)
1805{
1806 struct smu_context *smu = handle;
1807 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1808 enum amd_dpm_forced_level level;
1809
1810 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1811 return -EOPNOTSUPP;
1812
1813 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1814 return -EINVAL;
1815
1816 mutex_lock(&(smu->mutex));
1817 level = smu_dpm_ctx->dpm_level;
1818 mutex_unlock(&(smu->mutex));
1819
1820 return level;
1821}
1822
1823static int smu_force_performance_level(void *handle,
1824 enum amd_dpm_forced_level level)
1825{
1826 struct smu_context *smu = handle;
1827 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1828 int ret = 0;
1829
1830 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1831 return -EOPNOTSUPP;
1832
1833 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1834 return -EINVAL;
1835
1836 mutex_lock(&smu->mutex);
1837
1838 ret = smu_enable_umd_pstate(smu, &level);
1839 if (ret) {
1840 mutex_unlock(&smu->mutex);
1841 return ret;
1842 }
1843
1844 ret = smu_handle_task(smu, level,
1845 AMD_PP_TASK_READJUST_POWER_STATE,
1846 false);
1847
1848 mutex_unlock(&smu->mutex);
1849
1850 /* reset user dpm clock state */
1851 if (!ret && smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1852 memset(smu->user_dpm_profile.clk_mask, 0, sizeof(smu->user_dpm_profile.clk_mask));
1853 smu->user_dpm_profile.clk_dependency = 0;
1854 }
1855
1856 return ret;
1857}
1858
1859static int smu_set_display_count(void *handle, uint32_t count)
1860{
1861 struct smu_context *smu = handle;
1862 int ret = 0;
1863
1864 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1865 return -EOPNOTSUPP;
1866
1867 mutex_lock(&smu->mutex);
1868 ret = smu_init_display_count(smu, count);
1869 mutex_unlock(&smu->mutex);
1870
1871 return ret;
1872}
1873
1874static int smu_force_smuclk_levels(struct smu_context *smu,
1875 enum smu_clk_type clk_type,
1876 uint32_t mask)
1877{
1878 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1879 int ret = 0;
1880
1881 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1882 return -EOPNOTSUPP;
1883
1884 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1885 dev_dbg(smu->adev->dev, "force clock level is for dpm manual mode only.\n");
1886 return -EINVAL;
1887 }
1888
1889 mutex_lock(&smu->mutex);
1890
1891 if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) {
1892 ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
1893 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
1894 smu->user_dpm_profile.clk_mask[clk_type] = mask;
1895 smu_set_user_clk_dependencies(smu, clk_type);
1896 }
1897 }
1898
1899 mutex_unlock(&smu->mutex);
1900
1901 return ret;
1902}
1903
1904static int smu_force_ppclk_levels(void *handle,
1905 enum pp_clock_type type,
1906 uint32_t mask)
1907{
1908 struct smu_context *smu = handle;
1909 enum smu_clk_type clk_type;
1910
1911 switch (type) {
1912 case PP_SCLK:
1913 clk_type = SMU_SCLK; break;
1914 case PP_MCLK:
1915 clk_type = SMU_MCLK; break;
1916 case PP_PCIE:
1917 clk_type = SMU_PCIE; break;
1918 case PP_SOCCLK:
1919 clk_type = SMU_SOCCLK; break;
1920 case PP_FCLK:
1921 clk_type = SMU_FCLK; break;
1922 case PP_DCEFCLK:
1923 clk_type = SMU_DCEFCLK; break;
1924 case PP_VCLK:
1925 clk_type = SMU_VCLK; break;
1926 case PP_DCLK:
1927 clk_type = SMU_DCLK; break;
1928 case OD_SCLK:
1929 clk_type = SMU_OD_SCLK; break;
1930 case OD_MCLK:
1931 clk_type = SMU_OD_MCLK; break;
1932 case OD_VDDC_CURVE:
1933 clk_type = SMU_OD_VDDC_CURVE; break;
1934 case OD_RANGE:
1935 clk_type = SMU_OD_RANGE; break;
1936 default:
1937 return -EINVAL;
1938 }
1939
1940 return smu_force_smuclk_levels(smu, clk_type, mask);
1941}
1942
1943/*
1944 * On system suspending or resetting, the dpm_enabled
1945 * flag will be cleared. So that those SMU services which
1946 * are not supported will be gated.
1947 * However, the mp1 state setting should still be granted
1948 * even if the dpm_enabled cleared.
1949 */
1950static int smu_set_mp1_state(void *handle,
1951 enum pp_mp1_state mp1_state)
1952{
1953 struct smu_context *smu = handle;
1954 int ret = 0;
1955
1956 if (!smu->pm_enabled)
1957 return -EOPNOTSUPP;
1958
1959 mutex_lock(&smu->mutex);
1960
1961 if (smu->ppt_funcs &&
1962 smu->ppt_funcs->set_mp1_state)
1963 ret = smu->ppt_funcs->set_mp1_state(smu, mp1_state);
1964
1965 mutex_unlock(&smu->mutex);
1966
1967 return ret;
1968}
1969
1970static int smu_set_df_cstate(void *handle,
1971 enum pp_df_cstate state)
1972{
1973 struct smu_context *smu = handle;
1974 int ret = 0;
1975
1976 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1977 return -EOPNOTSUPP;
1978
1979 if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
1980 return 0;
1981
1982 mutex_lock(&smu->mutex);
1983
1984 ret = smu->ppt_funcs->set_df_cstate(smu, state);
1985 if (ret)
1986 dev_err(smu->adev->dev, "[SetDfCstate] failed!\n");
1987
1988 mutex_unlock(&smu->mutex);
1989
1990 return ret;
1991}
1992
1993int smu_allow_xgmi_power_down(struct smu_context *smu, bool en)
1994{
1995 int ret = 0;
1996
1997 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1998 return -EOPNOTSUPP;
1999
2000 if (!smu->ppt_funcs || !smu->ppt_funcs->allow_xgmi_power_down)
2001 return 0;
2002
2003 mutex_lock(&smu->mutex);
2004
2005 ret = smu->ppt_funcs->allow_xgmi_power_down(smu, en);
2006 if (ret)
2007 dev_err(smu->adev->dev, "[AllowXgmiPowerDown] failed!\n");
2008
2009 mutex_unlock(&smu->mutex);
2010
2011 return ret;
2012}
2013
2014int smu_write_watermarks_table(struct smu_context *smu)
2015{
2016 int ret = 0;
2017
2018 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2019 return -EOPNOTSUPP;
2020
2021 mutex_lock(&smu->mutex);
2022
2023 ret = smu_set_watermarks_table(smu, NULL);
2024
2025 mutex_unlock(&smu->mutex);
2026
2027 return ret;
2028}
2029
2030static int smu_set_watermarks_for_clock_ranges(void *handle,
2031 struct pp_smu_wm_range_sets *clock_ranges)
2032{
2033 struct smu_context *smu = handle;
2034 int ret = 0;
2035
2036 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2037 return -EOPNOTSUPP;
2038
2039 if (smu->disable_watermark)
2040 return 0;
2041
2042 mutex_lock(&smu->mutex);
2043
2044 ret = smu_set_watermarks_table(smu, clock_ranges);
2045
2046 mutex_unlock(&smu->mutex);
2047
2048 return ret;
2049}
2050
2051int smu_set_ac_dc(struct smu_context *smu)
2052{
2053 int ret = 0;
2054
2055 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2056 return -EOPNOTSUPP;
2057
2058 /* controlled by firmware */
2059 if (smu->dc_controlled_by_gpio)
2060 return 0;
2061
2062 mutex_lock(&smu->mutex);
2063 ret = smu_set_power_source(smu,
2064 smu->adev->pm.ac_power ? SMU_POWER_SOURCE_AC :
2065 SMU_POWER_SOURCE_DC);
2066 if (ret)
2067 dev_err(smu->adev->dev, "Failed to switch to %s mode!\n",
2068 smu->adev->pm.ac_power ? "AC" : "DC");
2069 mutex_unlock(&smu->mutex);
2070
2071 return ret;
2072}
2073
2074const struct amd_ip_funcs smu_ip_funcs = {
2075 .name = "smu",
2076 .early_init = smu_early_init,
2077 .late_init = smu_late_init,
2078 .sw_init = smu_sw_init,
2079 .sw_fini = smu_sw_fini,
2080 .hw_init = smu_hw_init,
2081 .hw_fini = smu_hw_fini,
2082 .suspend = smu_suspend,
2083 .resume = smu_resume,
2084 .is_idle = NULL,
2085 .check_soft_reset = NULL,
2086 .wait_for_idle = NULL,
2087 .soft_reset = NULL,
2088 .set_clockgating_state = smu_set_clockgating_state,
2089 .set_powergating_state = smu_set_powergating_state,
2090 .enable_umd_pstate = smu_enable_umd_pstate,
2091};
2092
2093const struct amdgpu_ip_block_version smu_v11_0_ip_block =
2094{
2095 .type = AMD_IP_BLOCK_TYPE_SMC,
2096 .major = 11,
2097 .minor = 0,
2098 .rev = 0,
2099 .funcs = &smu_ip_funcs,
2100};
2101
2102const struct amdgpu_ip_block_version smu_v12_0_ip_block =
2103{
2104 .type = AMD_IP_BLOCK_TYPE_SMC,
2105 .major = 12,
2106 .minor = 0,
2107 .rev = 0,
2108 .funcs = &smu_ip_funcs,
2109};
2110
2111const struct amdgpu_ip_block_version smu_v13_0_ip_block =
2112{
2113 .type = AMD_IP_BLOCK_TYPE_SMC,
2114 .major = 13,
2115 .minor = 0,
2116 .rev = 0,
2117 .funcs = &smu_ip_funcs,
2118};
2119
2120static int smu_load_microcode(void *handle)
2121{
2122 struct smu_context *smu = handle;
2123 struct amdgpu_device *adev = smu->adev;
2124 int ret = 0;
2125
2126 if (!smu->pm_enabled)
2127 return -EOPNOTSUPP;
2128
2129 /* This should be used for non PSP loading */
2130 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
2131 return 0;
2132
2133 if (smu->ppt_funcs->load_microcode) {
2134 ret = smu->ppt_funcs->load_microcode(smu);
2135 if (ret) {
2136 dev_err(adev->dev, "Load microcode failed\n");
2137 return ret;
2138 }
2139 }
2140
2141 if (smu->ppt_funcs->check_fw_status) {
2142 ret = smu->ppt_funcs->check_fw_status(smu);
2143 if (ret) {
2144 dev_err(adev->dev, "SMC is not ready\n");
2145 return ret;
2146 }
2147 }
2148
2149 return ret;
2150}
2151
2152static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
2153{
2154 int ret = 0;
2155
2156 mutex_lock(&smu->mutex);
2157
2158 if (smu->ppt_funcs->set_gfx_cgpg)
2159 ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled);
2160
2161 mutex_unlock(&smu->mutex);
2162
2163 return ret;
2164}
2165
2166static int smu_set_fan_speed_rpm(void *handle, uint32_t speed)
2167{
2168 struct smu_context *smu = handle;
2169 u32 percent;
2170 int ret = 0;
2171
2172 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2173 return -EOPNOTSUPP;
2174
2175 mutex_lock(&smu->mutex);
2176
2177 if (smu->ppt_funcs->set_fan_speed_percent) {
2178 percent = speed * 100 / smu->fan_max_rpm;
2179 ret = smu->ppt_funcs->set_fan_speed_percent(smu, percent);
2180 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
2181 smu->user_dpm_profile.fan_speed_percent = percent;
2182 }
2183
2184 mutex_unlock(&smu->mutex);
2185
2186 return ret;
2187}
2188
2189/**
2190 * smu_get_power_limit - Request one of the SMU Power Limits
2191 *
2192 * @handle: pointer to smu context
2193 * @limit: requested limit is written back to this variable
2194 * @pp_limit_level: &pp_power_limit_level which limit of the power to return
2195 * @pp_power_type: &pp_power_type type of power
2196 * Return: 0 on success, <0 on error
2197 *
2198 */
2199int smu_get_power_limit(void *handle,
2200 uint32_t *limit,
2201 enum pp_power_limit_level pp_limit_level,
2202 enum pp_power_type pp_power_type)
2203{
2204 struct smu_context *smu = handle;
2205 enum smu_ppt_limit_level limit_level;
2206 uint32_t limit_type;
2207 int ret = 0;
2208
2209 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2210 return -EOPNOTSUPP;
2211
2212 switch(pp_power_type) {
2213 case PP_PWR_TYPE_SUSTAINED:
2214 limit_type = SMU_DEFAULT_PPT_LIMIT;
2215 break;
2216 case PP_PWR_TYPE_FAST:
2217 limit_type = SMU_FAST_PPT_LIMIT;
2218 break;
2219 default:
2220 return -EOPNOTSUPP;
2221 break;
2222 }
2223
2224 switch(pp_limit_level){
2225 case PP_PWR_LIMIT_CURRENT:
2226 limit_level = SMU_PPT_LIMIT_CURRENT;
2227 break;
2228 case PP_PWR_LIMIT_DEFAULT:
2229 limit_level = SMU_PPT_LIMIT_DEFAULT;
2230 break;
2231 case PP_PWR_LIMIT_MAX:
2232 limit_level = SMU_PPT_LIMIT_MAX;
2233 break;
2234 case PP_PWR_LIMIT_MIN:
2235 default:
2236 return -EOPNOTSUPP;
2237 break;
2238 }
2239
2240 mutex_lock(&smu->mutex);
2241
2242 if (limit_type != SMU_DEFAULT_PPT_LIMIT) {
2243 if (smu->ppt_funcs->get_ppt_limit)
2244 ret = smu->ppt_funcs->get_ppt_limit(smu, limit, limit_type, limit_level);
2245 } else {
2246 switch (limit_level) {
2247 case SMU_PPT_LIMIT_CURRENT:
2248 if ((smu->adev->asic_type == CHIP_ALDEBARAN) ||
2249 (smu->adev->asic_type == CHIP_SIENNA_CICHLID) ||
2250 (smu->adev->asic_type == CHIP_NAVY_FLOUNDER) ||
2251 (smu->adev->asic_type == CHIP_DIMGREY_CAVEFISH) ||
2252 (smu->adev->asic_type == CHIP_BEIGE_GOBY))
2253 ret = smu_get_asic_power_limits(smu,
2254 &smu->current_power_limit,
2255 NULL,
2256 NULL);
2257 *limit = smu->current_power_limit;
2258 break;
2259 case SMU_PPT_LIMIT_DEFAULT:
2260 *limit = smu->default_power_limit;
2261 break;
2262 case SMU_PPT_LIMIT_MAX:
2263 *limit = smu->max_power_limit;
2264 break;
2265 default:
2266 break;
2267 }
2268 }
2269
2270 mutex_unlock(&smu->mutex);
2271
2272 return ret;
2273}
2274
2275static int smu_set_power_limit(void *handle, uint32_t limit)
2276{
2277 struct smu_context *smu = handle;
2278 uint32_t limit_type = limit >> 24;
2279 int ret = 0;
2280
2281 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2282 return -EOPNOTSUPP;
2283
2284 mutex_lock(&smu->mutex);
2285
2286 if (limit_type != SMU_DEFAULT_PPT_LIMIT)
2287 if (smu->ppt_funcs->set_power_limit) {
2288 ret = smu->ppt_funcs->set_power_limit(smu, limit);
2289 goto out;
2290 }
2291
2292 if (limit > smu->max_power_limit) {
2293 dev_err(smu->adev->dev,
2294 "New power limit (%d) is over the max allowed %d\n",
2295 limit, smu->max_power_limit);
2296 ret = -EINVAL;
2297 goto out;
2298 }
2299
2300 if (!limit)
2301 limit = smu->current_power_limit;
2302
2303 if (smu->ppt_funcs->set_power_limit) {
2304 ret = smu->ppt_funcs->set_power_limit(smu, limit);
2305 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
2306 smu->user_dpm_profile.power_limit = limit;
2307 }
2308
2309out:
2310 mutex_unlock(&smu->mutex);
2311
2312 return ret;
2313}
2314
2315static int smu_print_smuclk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
2316{
2317 int ret = 0;
2318
2319 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2320 return -EOPNOTSUPP;
2321
2322 mutex_lock(&smu->mutex);
2323
2324 if (smu->ppt_funcs->print_clk_levels)
2325 ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
2326
2327 mutex_unlock(&smu->mutex);
2328
2329 return ret;
2330}
2331
2332static int smu_print_ppclk_levels(void *handle,
2333 enum pp_clock_type type,
2334 char *buf)
2335{
2336 struct smu_context *smu = handle;
2337 enum smu_clk_type clk_type;
2338
2339 switch (type) {
2340 case PP_SCLK:
2341 clk_type = SMU_SCLK; break;
2342 case PP_MCLK:
2343 clk_type = SMU_MCLK; break;
2344 case PP_PCIE:
2345 clk_type = SMU_PCIE; break;
2346 case PP_SOCCLK:
2347 clk_type = SMU_SOCCLK; break;
2348 case PP_FCLK:
2349 clk_type = SMU_FCLK; break;
2350 case PP_DCEFCLK:
2351 clk_type = SMU_DCEFCLK; break;
2352 case PP_VCLK:
2353 clk_type = SMU_VCLK; break;
2354 case PP_DCLK:
2355 clk_type = SMU_DCLK; break;
2356 case OD_SCLK:
2357 clk_type = SMU_OD_SCLK; break;
2358 case OD_MCLK:
2359 clk_type = SMU_OD_MCLK; break;
2360 case OD_VDDC_CURVE:
2361 clk_type = SMU_OD_VDDC_CURVE; break;
2362 case OD_RANGE:
2363 clk_type = SMU_OD_RANGE; break;
2364 case OD_VDDGFX_OFFSET:
2365 clk_type = SMU_OD_VDDGFX_OFFSET; break;
2366 case OD_CCLK:
2367 clk_type = SMU_OD_CCLK; break;
2368 default:
2369 return -EINVAL;
2370 }
2371
2372 return smu_print_smuclk_levels(smu, clk_type, buf);
2373}
2374
2375static int smu_od_edit_dpm_table(void *handle,
2376 enum PP_OD_DPM_TABLE_COMMAND type,
2377 long *input, uint32_t size)
2378{
2379 struct smu_context *smu = handle;
2380 int ret = 0;
2381
2382 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2383 return -EOPNOTSUPP;
2384
2385 mutex_lock(&smu->mutex);
2386
2387 if (smu->ppt_funcs->od_edit_dpm_table) {
2388 ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
2389 }
2390
2391 mutex_unlock(&smu->mutex);
2392
2393 return ret;
2394}
2395
2396static int smu_read_sensor(void *handle,
2397 int sensor,
2398 void *data,
2399 int *size_arg)
2400{
2401 struct smu_context *smu = handle;
2402 struct smu_umd_pstate_table *pstate_table =
2403 &smu->pstate_table;
2404 int ret = 0;
2405 uint32_t *size, size_val;
2406
2407 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2408 return -EOPNOTSUPP;
2409
2410 if (!data || !size_arg)
2411 return -EINVAL;
2412
2413 size_val = *size_arg;
2414 size = &size_val;
2415
2416 mutex_lock(&smu->mutex);
2417
2418 if (smu->ppt_funcs->read_sensor)
2419 if (!smu->ppt_funcs->read_sensor(smu, sensor, data, size))
2420 goto unlock;
2421
2422 switch (sensor) {
2423 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
2424 *((uint32_t *)data) = pstate_table->gfxclk_pstate.standard * 100;
2425 *size = 4;
2426 break;
2427 case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
2428 *((uint32_t *)data) = pstate_table->uclk_pstate.standard * 100;
2429 *size = 4;
2430 break;
2431 case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
2432 ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2);
2433 *size = 8;
2434 break;
2435 case AMDGPU_PP_SENSOR_UVD_POWER:
2436 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0;
2437 *size = 4;
2438 break;
2439 case AMDGPU_PP_SENSOR_VCE_POWER:
2440 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
2441 *size = 4;
2442 break;
2443 case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
2444 *(uint32_t *)data = atomic_read(&smu->smu_power.power_gate.vcn_gated) ? 0: 1;
2445 *size = 4;
2446 break;
2447 case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
2448 *(uint32_t *)data = 0;
2449 *size = 4;
2450 break;
2451 default:
2452 *size = 0;
2453 ret = -EOPNOTSUPP;
2454 break;
2455 }
2456
2457unlock:
2458 mutex_unlock(&smu->mutex);
2459
2460 // assign uint32_t to int
2461 *size_arg = size_val;
2462
2463 return ret;
2464}
2465
2466static int smu_get_power_profile_mode(void *handle, char *buf)
2467{
2468 struct smu_context *smu = handle;
2469 int ret = 0;
2470
2471 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2472 return -EOPNOTSUPP;
2473
2474 mutex_lock(&smu->mutex);
2475
2476 if (smu->ppt_funcs->get_power_profile_mode)
2477 ret = smu->ppt_funcs->get_power_profile_mode(smu, buf);
2478
2479 mutex_unlock(&smu->mutex);
2480
2481 return ret;
2482}
2483
2484static int smu_set_power_profile_mode(void *handle,
2485 long *param,
2486 uint32_t param_size)
2487{
2488 struct smu_context *smu = handle;
2489 int ret = 0;
2490
2491 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2492 return -EOPNOTSUPP;
2493
2494 mutex_lock(&smu->mutex);
2495
2496 smu_bump_power_profile_mode(smu, param, param_size);
2497
2498 mutex_unlock(&smu->mutex);
2499
2500 return ret;
2501}
2502
2503
2504static u32 smu_get_fan_control_mode(void *handle)
2505{
2506 struct smu_context *smu = handle;
2507 u32 ret = 0;
2508
2509 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2510 return AMD_FAN_CTRL_NONE;
2511
2512 mutex_lock(&smu->mutex);
2513
2514 if (smu->ppt_funcs->get_fan_control_mode)
2515 ret = smu->ppt_funcs->get_fan_control_mode(smu);
2516
2517 mutex_unlock(&smu->mutex);
2518
2519 return ret;
2520}
2521
2522static int smu_set_fan_control_mode(struct smu_context *smu, int value)
2523{
2524 int ret = 0;
2525
2526 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2527 return -EOPNOTSUPP;
2528
2529 mutex_lock(&smu->mutex);
2530
2531 if (smu->ppt_funcs->set_fan_control_mode) {
2532 ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
2533 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
2534 smu->user_dpm_profile.fan_mode = value;
2535 }
2536
2537 mutex_unlock(&smu->mutex);
2538
2539 /* reset user dpm fan speed */
2540 if (!ret && value != AMD_FAN_CTRL_MANUAL &&
2541 !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
2542 smu->user_dpm_profile.fan_speed_percent = 0;
2543
2544 return ret;
2545}
2546
2547static void smu_pp_set_fan_control_mode(void *handle, u32 value)
2548{
2549 struct smu_context *smu = handle;
2550
2551 smu_set_fan_control_mode(smu, value);
2552}
2553
2554
2555static int smu_get_fan_speed_percent(void *handle, u32 *speed)
2556{
2557 struct smu_context *smu = handle;
2558 int ret = 0;
2559 uint32_t percent;
2560
2561 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2562 return -EOPNOTSUPP;
2563
2564 mutex_lock(&smu->mutex);
2565
2566 if (smu->ppt_funcs->get_fan_speed_percent) {
2567 ret = smu->ppt_funcs->get_fan_speed_percent(smu, &percent);
2568 if (!ret) {
2569 *speed = percent > 100 ? 100 : percent;
2570 }
2571 }
2572
2573 mutex_unlock(&smu->mutex);
2574
2575
2576 return ret;
2577}
2578
2579static int smu_set_fan_speed_percent(void *handle, u32 speed)
2580{
2581 struct smu_context *smu = handle;
2582 int ret = 0;
2583
2584 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2585 return -EOPNOTSUPP;
2586
2587 mutex_lock(&smu->mutex);
2588
2589 if (smu->ppt_funcs->set_fan_speed_percent) {
2590 if (speed > 100)
2591 speed = 100;
2592 ret = smu->ppt_funcs->set_fan_speed_percent(smu, speed);
2593 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
2594 smu->user_dpm_profile.fan_speed_percent = speed;
2595 }
2596
2597 mutex_unlock(&smu->mutex);
2598
2599 return ret;
2600}
2601
2602static int smu_get_fan_speed_rpm(void *handle, uint32_t *speed)
2603{
2604 struct smu_context *smu = handle;
2605 int ret = 0;
2606 u32 percent;
2607
2608 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2609 return -EOPNOTSUPP;
2610
2611 mutex_lock(&smu->mutex);
2612
2613 if (smu->ppt_funcs->get_fan_speed_percent) {
2614 ret = smu->ppt_funcs->get_fan_speed_percent(smu, &percent);
2615 *speed = percent * smu->fan_max_rpm / 100;
2616 }
2617
2618 mutex_unlock(&smu->mutex);
2619
2620 return ret;
2621}
2622
2623static int smu_set_deep_sleep_dcefclk(void *handle, uint32_t clk)
2624{
2625 struct smu_context *smu = handle;
2626 int ret = 0;
2627
2628 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2629 return -EOPNOTSUPP;
2630
2631 mutex_lock(&smu->mutex);
2632
2633 ret = smu_set_min_dcef_deep_sleep(smu, clk);
2634
2635 mutex_unlock(&smu->mutex);
2636
2637 return ret;
2638}
2639
2640static int smu_get_clock_by_type_with_latency(void *handle,
2641 enum amd_pp_clock_type type,
2642 struct pp_clock_levels_with_latency *clocks)
2643{
2644 struct smu_context *smu = handle;
2645 enum smu_clk_type clk_type;
2646 int ret = 0;
2647
2648 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2649 return -EOPNOTSUPP;
2650
2651 mutex_lock(&smu->mutex);
2652
2653 if (smu->ppt_funcs->get_clock_by_type_with_latency) {
2654 switch (type) {
2655 case amd_pp_sys_clock:
2656 clk_type = SMU_GFXCLK;
2657 break;
2658 case amd_pp_mem_clock:
2659 clk_type = SMU_MCLK;
2660 break;
2661 case amd_pp_dcef_clock:
2662 clk_type = SMU_DCEFCLK;
2663 break;
2664 case amd_pp_disp_clock:
2665 clk_type = SMU_DISPCLK;
2666 break;
2667 default:
2668 dev_err(smu->adev->dev, "Invalid clock type!\n");
2669 mutex_unlock(&smu->mutex);
2670 return -EINVAL;
2671 }
2672
2673 ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks);
2674 }
2675
2676 mutex_unlock(&smu->mutex);
2677
2678 return ret;
2679}
2680
2681static int smu_display_clock_voltage_request(void *handle,
2682 struct pp_display_clock_request *clock_req)
2683{
2684 struct smu_context *smu = handle;
2685 int ret = 0;
2686
2687 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2688 return -EOPNOTSUPP;
2689
2690 mutex_lock(&smu->mutex);
2691
2692 if (smu->ppt_funcs->display_clock_voltage_request)
2693 ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req);
2694
2695 mutex_unlock(&smu->mutex);
2696
2697 return ret;
2698}
2699
2700
2701static int smu_display_disable_memory_clock_switch(void *handle,
2702 bool disable_memory_clock_switch)
2703{
2704 struct smu_context *smu = handle;
2705 int ret = -EINVAL;
2706
2707 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2708 return -EOPNOTSUPP;
2709
2710 mutex_lock(&smu->mutex);
2711
2712 if (smu->ppt_funcs->display_disable_memory_clock_switch)
2713 ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch);
2714
2715 mutex_unlock(&smu->mutex);
2716
2717 return ret;
2718}
2719
2720static int smu_set_xgmi_pstate(void *handle,
2721 uint32_t pstate)
2722{
2723 struct smu_context *smu = handle;
2724 int ret = 0;
2725
2726 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2727 return -EOPNOTSUPP;
2728
2729 mutex_lock(&smu->mutex);
2730
2731 if (smu->ppt_funcs->set_xgmi_pstate)
2732 ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
2733
2734 mutex_unlock(&smu->mutex);
2735
2736 if(ret)
2737 dev_err(smu->adev->dev, "Failed to set XGMI pstate!\n");
2738
2739 return ret;
2740}
2741
2742static int smu_get_baco_capability(void *handle, bool *cap)
2743{
2744 struct smu_context *smu = handle;
2745 int ret = 0;
2746
2747 *cap = false;
2748
2749 if (!smu->pm_enabled)
2750 return 0;
2751
2752 mutex_lock(&smu->mutex);
2753
2754 if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support)
2755 *cap = smu->ppt_funcs->baco_is_support(smu);
2756
2757 mutex_unlock(&smu->mutex);
2758
2759 return ret;
2760}
2761
2762static int smu_baco_set_state(void *handle, int state)
2763{
2764 struct smu_context *smu = handle;
2765 int ret = 0;
2766
2767 if (!smu->pm_enabled)
2768 return -EOPNOTSUPP;
2769
2770 if (state == 0) {
2771 mutex_lock(&smu->mutex);
2772
2773 if (smu->ppt_funcs->baco_exit)
2774 ret = smu->ppt_funcs->baco_exit(smu);
2775
2776 mutex_unlock(&smu->mutex);
2777 } else if (state == 1) {
2778 mutex_lock(&smu->mutex);
2779
2780 if (smu->ppt_funcs->baco_enter)
2781 ret = smu->ppt_funcs->baco_enter(smu);
2782
2783 mutex_unlock(&smu->mutex);
2784
2785 } else {
2786 return -EINVAL;
2787 }
2788
2789 if (ret)
2790 dev_err(smu->adev->dev, "Failed to %s BACO state!\n",
2791 (state)?"enter":"exit");
2792
2793 return ret;
2794}
2795
2796bool smu_mode1_reset_is_support(struct smu_context *smu)
2797{
2798 bool ret = false;
2799
2800 if (!smu->pm_enabled)
2801 return false;
2802
2803 mutex_lock(&smu->mutex);
2804
2805 if (smu->ppt_funcs && smu->ppt_funcs->mode1_reset_is_support)
2806 ret = smu->ppt_funcs->mode1_reset_is_support(smu);
2807
2808 mutex_unlock(&smu->mutex);
2809
2810 return ret;
2811}
2812
2813bool smu_mode2_reset_is_support(struct smu_context *smu)
2814{
2815 bool ret = false;
2816
2817 if (!smu->pm_enabled)
2818 return false;
2819
2820 mutex_lock(&smu->mutex);
2821
2822 if (smu->ppt_funcs && smu->ppt_funcs->mode2_reset_is_support)
2823 ret = smu->ppt_funcs->mode2_reset_is_support(smu);
2824
2825 mutex_unlock(&smu->mutex);
2826
2827 return ret;
2828}
2829
2830int smu_mode1_reset(struct smu_context *smu)
2831{
2832 int ret = 0;
2833
2834 if (!smu->pm_enabled)
2835 return -EOPNOTSUPP;
2836
2837 mutex_lock(&smu->mutex);
2838
2839 if (smu->ppt_funcs->mode1_reset)
2840 ret = smu->ppt_funcs->mode1_reset(smu);
2841
2842 mutex_unlock(&smu->mutex);
2843
2844 return ret;
2845}
2846
2847static int smu_mode2_reset(void *handle)
2848{
2849 struct smu_context *smu = handle;
2850 int ret = 0;
2851
2852 if (!smu->pm_enabled)
2853 return -EOPNOTSUPP;
2854
2855 mutex_lock(&smu->mutex);
2856
2857 if (smu->ppt_funcs->mode2_reset)
2858 ret = smu->ppt_funcs->mode2_reset(smu);
2859
2860 mutex_unlock(&smu->mutex);
2861
2862 if (ret)
2863 dev_err(smu->adev->dev, "Mode2 reset failed!\n");
2864
2865 return ret;
2866}
2867
2868static int smu_get_max_sustainable_clocks_by_dc(void *handle,
2869 struct pp_smu_nv_clock_table *max_clocks)
2870{
2871 struct smu_context *smu = handle;
2872 int ret = 0;
2873
2874 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2875 return -EOPNOTSUPP;
2876
2877 mutex_lock(&smu->mutex);
2878
2879 if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
2880 ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks);
2881
2882 mutex_unlock(&smu->mutex);
2883
2884 return ret;
2885}
2886
2887static int smu_get_uclk_dpm_states(void *handle,
2888 unsigned int *clock_values_in_khz,
2889 unsigned int *num_states)
2890{
2891 struct smu_context *smu = handle;
2892 int ret = 0;
2893
2894 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2895 return -EOPNOTSUPP;
2896
2897 mutex_lock(&smu->mutex);
2898
2899 if (smu->ppt_funcs->get_uclk_dpm_states)
2900 ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states);
2901
2902 mutex_unlock(&smu->mutex);
2903
2904 return ret;
2905}
2906
2907static enum amd_pm_state_type smu_get_current_power_state(void *handle)
2908{
2909 struct smu_context *smu = handle;
2910 enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
2911
2912 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2913 return -EOPNOTSUPP;
2914
2915 mutex_lock(&smu->mutex);
2916
2917 if (smu->ppt_funcs->get_current_power_state)
2918 pm_state = smu->ppt_funcs->get_current_power_state(smu);
2919
2920 mutex_unlock(&smu->mutex);
2921
2922 return pm_state;
2923}
2924
2925static int smu_get_dpm_clock_table(void *handle,
2926 struct dpm_clocks *clock_table)
2927{
2928 struct smu_context *smu = handle;
2929 int ret = 0;
2930
2931 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2932 return -EOPNOTSUPP;
2933
2934 mutex_lock(&smu->mutex);
2935
2936 if (smu->ppt_funcs->get_dpm_clock_table)
2937 ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table);
2938
2939 mutex_unlock(&smu->mutex);
2940
2941 return ret;
2942}
2943
2944static ssize_t smu_sys_get_gpu_metrics(void *handle, void **table)
2945{
2946 struct smu_context *smu = handle;
2947 ssize_t size;
2948
2949 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2950 return -EOPNOTSUPP;
2951
2952 if (!smu->ppt_funcs->get_gpu_metrics)
2953 return -EOPNOTSUPP;
2954
2955 mutex_lock(&smu->mutex);
2956
2957 size = smu->ppt_funcs->get_gpu_metrics(smu, table);
2958
2959 mutex_unlock(&smu->mutex);
2960
2961 return size;
2962}
2963
2964static int smu_enable_mgpu_fan_boost(void *handle)
2965{
2966 struct smu_context *smu = handle;
2967 int ret = 0;
2968
2969 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2970 return -EOPNOTSUPP;
2971
2972 mutex_lock(&smu->mutex);
2973
2974 if (smu->ppt_funcs->enable_mgpu_fan_boost)
2975 ret = smu->ppt_funcs->enable_mgpu_fan_boost(smu);
2976
2977 mutex_unlock(&smu->mutex);
2978
2979 return ret;
2980}
2981
2982static int smu_gfx_state_change_set(void *handle,
2983 uint32_t state)
2984{
2985 struct smu_context *smu = handle;
2986 int ret = 0;
2987
2988 mutex_lock(&smu->mutex);
2989 if (smu->ppt_funcs->gfx_state_change_set)
2990 ret = smu->ppt_funcs->gfx_state_change_set(smu, state);
2991 mutex_unlock(&smu->mutex);
2992
2993 return ret;
2994}
2995
2996int smu_set_light_sbr(struct smu_context *smu, bool enable)
2997{
2998 int ret = 0;
2999
3000 mutex_lock(&smu->mutex);
3001 if (smu->ppt_funcs->set_light_sbr)
3002 ret = smu->ppt_funcs->set_light_sbr(smu, enable);
3003 mutex_unlock(&smu->mutex);
3004
3005 return ret;
3006}
3007
3008static int smu_get_prv_buffer_details(void *handle, void **addr, size_t *size)
3009{
3010 struct smu_context *smu = handle;
3011 struct smu_table_context *smu_table = &smu->smu_table;
3012 struct smu_table *memory_pool = &smu_table->memory_pool;
3013
3014 if (!addr || !size)
3015 return -EINVAL;
3016
3017 *addr = NULL;
3018 *size = 0;
3019 mutex_lock(&smu->mutex);
3020 if (memory_pool->bo) {
3021 *addr = memory_pool->cpu_addr;
3022 *size = memory_pool->size;
3023 }
3024 mutex_unlock(&smu->mutex);
3025
3026 return 0;
3027}
3028
3029static const struct amd_pm_funcs swsmu_pm_funcs = {
3030 /* export for sysfs */
3031 .set_fan_control_mode = smu_pp_set_fan_control_mode,
3032 .get_fan_control_mode = smu_get_fan_control_mode,
3033 .set_fan_speed_percent = smu_set_fan_speed_percent,
3034 .get_fan_speed_percent = smu_get_fan_speed_percent,
3035 .force_clock_level = smu_force_ppclk_levels,
3036 .print_clock_levels = smu_print_ppclk_levels,
3037 .force_performance_level = smu_force_performance_level,
3038 .read_sensor = smu_read_sensor,
3039 .get_performance_level = smu_get_performance_level,
3040 .get_current_power_state = smu_get_current_power_state,
3041 .get_fan_speed_rpm = smu_get_fan_speed_rpm,
3042 .set_fan_speed_rpm = smu_set_fan_speed_rpm,
3043 .get_pp_num_states = smu_get_power_num_states,
3044 .get_pp_table = smu_sys_get_pp_table,
3045 .set_pp_table = smu_sys_set_pp_table,
3046 .switch_power_profile = smu_switch_power_profile,
3047 /* export to amdgpu */
3048 .dispatch_tasks = smu_handle_dpm_task,
3049 .load_firmware = smu_load_microcode,
3050 .set_powergating_by_smu = smu_dpm_set_power_gate,
3051 .set_power_limit = smu_set_power_limit,
3052 .get_power_limit = smu_get_power_limit,
3053 .get_power_profile_mode = smu_get_power_profile_mode,
3054 .set_power_profile_mode = smu_set_power_profile_mode,
3055 .odn_edit_dpm_table = smu_od_edit_dpm_table,
3056 .set_mp1_state = smu_set_mp1_state,
3057 .gfx_state_change_set = smu_gfx_state_change_set,
3058 /* export to DC */
3059 .get_sclk = smu_get_sclk,
3060 .get_mclk = smu_get_mclk,
3061 .display_configuration_change = smu_display_configuration_change,
3062 .get_clock_by_type_with_latency = smu_get_clock_by_type_with_latency,
3063 .display_clock_voltage_request = smu_display_clock_voltage_request,
3064 .enable_mgpu_fan_boost = smu_enable_mgpu_fan_boost,
3065 .set_active_display_count = smu_set_display_count,
3066 .set_min_deep_sleep_dcefclk = smu_set_deep_sleep_dcefclk,
3067 .get_asic_baco_capability = smu_get_baco_capability,
3068 .set_asic_baco_state = smu_baco_set_state,
3069 .get_ppfeature_status = smu_sys_get_pp_feature_mask,
3070 .set_ppfeature_status = smu_sys_set_pp_feature_mask,
3071 .asic_reset_mode_2 = smu_mode2_reset,
3072 .set_df_cstate = smu_set_df_cstate,
3073 .set_xgmi_pstate = smu_set_xgmi_pstate,
3074 .get_gpu_metrics = smu_sys_get_gpu_metrics,
3075 .set_watermarks_for_clock_ranges = smu_set_watermarks_for_clock_ranges,
3076 .display_disable_memory_clock_switch = smu_display_disable_memory_clock_switch,
3077 .get_max_sustainable_clocks_by_dc = smu_get_max_sustainable_clocks_by_dc,
3078 .get_uclk_dpm_states = smu_get_uclk_dpm_states,
3079 .get_dpm_clock_table = smu_get_dpm_clock_table,
3080 .get_smu_prv_buf_details = smu_get_prv_buffer_details,
3081};
3082
3083int smu_wait_for_event(struct amdgpu_device *adev, enum smu_event_type event,
3084 uint64_t event_arg)
3085{
3086 int ret = -EINVAL;
3087 struct smu_context *smu = &adev->smu;
3088
3089 if (smu->ppt_funcs->wait_for_event) {
3090 mutex_lock(&smu->mutex);
3091 ret = smu->ppt_funcs->wait_for_event(smu, event, event_arg);
3092 mutex_unlock(&smu->mutex);
3093 }
3094
3095 return ret;
3096}
1/*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#define SWSMU_CODE_LAYER_L1
24
25#include <linux/firmware.h>
26#include <linux/pci.h>
27#include <linux/power_supply.h>
28#include <linux/reboot.h>
29
30#include "amdgpu.h"
31#include "amdgpu_smu.h"
32#include "smu_internal.h"
33#include "atom.h"
34#include "arcturus_ppt.h"
35#include "navi10_ppt.h"
36#include "sienna_cichlid_ppt.h"
37#include "renoir_ppt.h"
38#include "vangogh_ppt.h"
39#include "aldebaran_ppt.h"
40#include "yellow_carp_ppt.h"
41#include "cyan_skillfish_ppt.h"
42#include "smu_v13_0_0_ppt.h"
43#include "smu_v13_0_4_ppt.h"
44#include "smu_v13_0_5_ppt.h"
45#include "smu_v13_0_6_ppt.h"
46#include "smu_v13_0_7_ppt.h"
47#include "smu_v14_0_0_ppt.h"
48#include "smu_v14_0_2_ppt.h"
49#include "amd_pcie.h"
50
51/*
52 * DO NOT use these for err/warn/info/debug messages.
53 * Use dev_err, dev_warn, dev_info and dev_dbg instead.
54 * They are more MGPU friendly.
55 */
56#undef pr_err
57#undef pr_warn
58#undef pr_info
59#undef pr_debug
60
61static const struct amd_pm_funcs swsmu_pm_funcs;
62static int smu_force_smuclk_levels(struct smu_context *smu,
63 enum smu_clk_type clk_type,
64 uint32_t mask);
65static int smu_handle_task(struct smu_context *smu,
66 enum amd_dpm_forced_level level,
67 enum amd_pp_task task_id);
68static int smu_reset(struct smu_context *smu);
69static int smu_set_fan_speed_pwm(void *handle, u32 speed);
70static int smu_set_fan_control_mode(void *handle, u32 value);
71static int smu_set_power_limit(void *handle, uint32_t limit);
72static int smu_set_fan_speed_rpm(void *handle, uint32_t speed);
73static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled);
74static int smu_set_mp1_state(void *handle, enum pp_mp1_state mp1_state);
75static void smu_power_profile_mode_get(struct smu_context *smu,
76 enum PP_SMC_POWER_PROFILE profile_mode);
77static void smu_power_profile_mode_put(struct smu_context *smu,
78 enum PP_SMC_POWER_PROFILE profile_mode);
79
80static int smu_sys_get_pp_feature_mask(void *handle,
81 char *buf)
82{
83 struct smu_context *smu = handle;
84
85 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
86 return -EOPNOTSUPP;
87
88 return smu_get_pp_feature_mask(smu, buf);
89}
90
91static int smu_sys_set_pp_feature_mask(void *handle,
92 uint64_t new_mask)
93{
94 struct smu_context *smu = handle;
95
96 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
97 return -EOPNOTSUPP;
98
99 return smu_set_pp_feature_mask(smu, new_mask);
100}
101
102int smu_set_residency_gfxoff(struct smu_context *smu, bool value)
103{
104 if (!smu->ppt_funcs->set_gfx_off_residency)
105 return -EINVAL;
106
107 return smu_set_gfx_off_residency(smu, value);
108}
109
110int smu_get_residency_gfxoff(struct smu_context *smu, u32 *value)
111{
112 if (!smu->ppt_funcs->get_gfx_off_residency)
113 return -EINVAL;
114
115 return smu_get_gfx_off_residency(smu, value);
116}
117
118int smu_get_entrycount_gfxoff(struct smu_context *smu, u64 *value)
119{
120 if (!smu->ppt_funcs->get_gfx_off_entrycount)
121 return -EINVAL;
122
123 return smu_get_gfx_off_entrycount(smu, value);
124}
125
126int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value)
127{
128 if (!smu->ppt_funcs->get_gfx_off_status)
129 return -EINVAL;
130
131 *value = smu_get_gfx_off_status(smu);
132
133 return 0;
134}
135
136int smu_set_soft_freq_range(struct smu_context *smu,
137 enum smu_clk_type clk_type,
138 uint32_t min,
139 uint32_t max)
140{
141 int ret = 0;
142
143 if (smu->ppt_funcs->set_soft_freq_limited_range)
144 ret = smu->ppt_funcs->set_soft_freq_limited_range(smu,
145 clk_type,
146 min,
147 max,
148 false);
149
150 return ret;
151}
152
153int smu_get_dpm_freq_range(struct smu_context *smu,
154 enum smu_clk_type clk_type,
155 uint32_t *min,
156 uint32_t *max)
157{
158 int ret = -ENOTSUPP;
159
160 if (!min && !max)
161 return -EINVAL;
162
163 if (smu->ppt_funcs->get_dpm_ultimate_freq)
164 ret = smu->ppt_funcs->get_dpm_ultimate_freq(smu,
165 clk_type,
166 min,
167 max);
168
169 return ret;
170}
171
172int smu_set_gfx_power_up_by_imu(struct smu_context *smu)
173{
174 int ret = 0;
175 struct amdgpu_device *adev = smu->adev;
176
177 if (smu->ppt_funcs->set_gfx_power_up_by_imu) {
178 ret = smu->ppt_funcs->set_gfx_power_up_by_imu(smu);
179 if (ret)
180 dev_err(adev->dev, "Failed to enable gfx imu!\n");
181 }
182 return ret;
183}
184
185static u32 smu_get_mclk(void *handle, bool low)
186{
187 struct smu_context *smu = handle;
188 uint32_t clk_freq;
189 int ret = 0;
190
191 ret = smu_get_dpm_freq_range(smu, SMU_UCLK,
192 low ? &clk_freq : NULL,
193 !low ? &clk_freq : NULL);
194 if (ret)
195 return 0;
196 return clk_freq * 100;
197}
198
199static u32 smu_get_sclk(void *handle, bool low)
200{
201 struct smu_context *smu = handle;
202 uint32_t clk_freq;
203 int ret = 0;
204
205 ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK,
206 low ? &clk_freq : NULL,
207 !low ? &clk_freq : NULL);
208 if (ret)
209 return 0;
210 return clk_freq * 100;
211}
212
213static int smu_set_gfx_imu_enable(struct smu_context *smu)
214{
215 struct amdgpu_device *adev = smu->adev;
216
217 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
218 return 0;
219
220 if (amdgpu_in_reset(smu->adev) || adev->in_s0ix)
221 return 0;
222
223 return smu_set_gfx_power_up_by_imu(smu);
224}
225
226static bool is_vcn_enabled(struct amdgpu_device *adev)
227{
228 int i;
229
230 for (i = 0; i < adev->num_ip_blocks; i++) {
231 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_VCN ||
232 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_JPEG) &&
233 !adev->ip_blocks[i].status.valid)
234 return false;
235 }
236
237 return true;
238}
239
240static int smu_dpm_set_vcn_enable(struct smu_context *smu,
241 bool enable)
242{
243 struct smu_power_context *smu_power = &smu->smu_power;
244 struct smu_power_gate *power_gate = &smu_power->power_gate;
245 int ret = 0;
246
247 /*
248 * don't poweron vcn/jpeg when they are skipped.
249 */
250 if (!is_vcn_enabled(smu->adev))
251 return 0;
252
253 if (!smu->ppt_funcs->dpm_set_vcn_enable)
254 return 0;
255
256 if (atomic_read(&power_gate->vcn_gated) ^ enable)
257 return 0;
258
259 ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable, 0xff);
260 if (!ret)
261 atomic_set(&power_gate->vcn_gated, !enable);
262
263 return ret;
264}
265
266static int smu_dpm_set_jpeg_enable(struct smu_context *smu,
267 bool enable)
268{
269 struct smu_power_context *smu_power = &smu->smu_power;
270 struct smu_power_gate *power_gate = &smu_power->power_gate;
271 int ret = 0;
272
273 if (!is_vcn_enabled(smu->adev))
274 return 0;
275
276 if (!smu->ppt_funcs->dpm_set_jpeg_enable)
277 return 0;
278
279 if (atomic_read(&power_gate->jpeg_gated) ^ enable)
280 return 0;
281
282 ret = smu->ppt_funcs->dpm_set_jpeg_enable(smu, enable);
283 if (!ret)
284 atomic_set(&power_gate->jpeg_gated, !enable);
285
286 return ret;
287}
288
289static int smu_dpm_set_vpe_enable(struct smu_context *smu,
290 bool enable)
291{
292 struct smu_power_context *smu_power = &smu->smu_power;
293 struct smu_power_gate *power_gate = &smu_power->power_gate;
294 int ret = 0;
295
296 if (!smu->ppt_funcs->dpm_set_vpe_enable)
297 return 0;
298
299 if (atomic_read(&power_gate->vpe_gated) ^ enable)
300 return 0;
301
302 ret = smu->ppt_funcs->dpm_set_vpe_enable(smu, enable);
303 if (!ret)
304 atomic_set(&power_gate->vpe_gated, !enable);
305
306 return ret;
307}
308
309static int smu_dpm_set_umsch_mm_enable(struct smu_context *smu,
310 bool enable)
311{
312 struct smu_power_context *smu_power = &smu->smu_power;
313 struct smu_power_gate *power_gate = &smu_power->power_gate;
314 int ret = 0;
315
316 if (!smu->adev->enable_umsch_mm)
317 return 0;
318
319 if (!smu->ppt_funcs->dpm_set_umsch_mm_enable)
320 return 0;
321
322 if (atomic_read(&power_gate->umsch_mm_gated) ^ enable)
323 return 0;
324
325 ret = smu->ppt_funcs->dpm_set_umsch_mm_enable(smu, enable);
326 if (!ret)
327 atomic_set(&power_gate->umsch_mm_gated, !enable);
328
329 return ret;
330}
331
332static int smu_set_mall_enable(struct smu_context *smu)
333{
334 int ret = 0;
335
336 if (!smu->ppt_funcs->set_mall_enable)
337 return 0;
338
339 ret = smu->ppt_funcs->set_mall_enable(smu);
340
341 return ret;
342}
343
344/**
345 * smu_dpm_set_power_gate - power gate/ungate the specific IP block
346 *
347 * @handle: smu_context pointer
348 * @block_type: the IP block to power gate/ungate
349 * @gate: to power gate if true, ungate otherwise
350 *
351 * This API uses no smu->mutex lock protection due to:
352 * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce).
353 * This is guarded to be race condition free by the caller.
354 * 2. Or get called on user setting request of power_dpm_force_performance_level.
355 * Under this case, the smu->mutex lock protection is already enforced on
356 * the parent API smu_force_performance_level of the call path.
357 */
358static int smu_dpm_set_power_gate(void *handle,
359 uint32_t block_type,
360 bool gate)
361{
362 struct smu_context *smu = handle;
363 int ret = 0;
364
365 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) {
366 dev_WARN(smu->adev->dev,
367 "SMU uninitialized but power %s requested for %u!\n",
368 gate ? "gate" : "ungate", block_type);
369 return -EOPNOTSUPP;
370 }
371
372 switch (block_type) {
373 /*
374 * Some legacy code of amdgpu_vcn.c and vcn_v2*.c still uses
375 * AMD_IP_BLOCK_TYPE_UVD for VCN. So, here both of them are kept.
376 */
377 case AMD_IP_BLOCK_TYPE_UVD:
378 case AMD_IP_BLOCK_TYPE_VCN:
379 ret = smu_dpm_set_vcn_enable(smu, !gate);
380 if (ret)
381 dev_err(smu->adev->dev, "Failed to power %s VCN!\n",
382 gate ? "gate" : "ungate");
383 break;
384 case AMD_IP_BLOCK_TYPE_GFX:
385 ret = smu_gfx_off_control(smu, gate);
386 if (ret)
387 dev_err(smu->adev->dev, "Failed to %s gfxoff!\n",
388 gate ? "enable" : "disable");
389 break;
390 case AMD_IP_BLOCK_TYPE_SDMA:
391 ret = smu_powergate_sdma(smu, gate);
392 if (ret)
393 dev_err(smu->adev->dev, "Failed to power %s SDMA!\n",
394 gate ? "gate" : "ungate");
395 break;
396 case AMD_IP_BLOCK_TYPE_JPEG:
397 ret = smu_dpm_set_jpeg_enable(smu, !gate);
398 if (ret)
399 dev_err(smu->adev->dev, "Failed to power %s JPEG!\n",
400 gate ? "gate" : "ungate");
401 break;
402 case AMD_IP_BLOCK_TYPE_VPE:
403 ret = smu_dpm_set_vpe_enable(smu, !gate);
404 if (ret)
405 dev_err(smu->adev->dev, "Failed to power %s VPE!\n",
406 gate ? "gate" : "ungate");
407 break;
408 default:
409 dev_err(smu->adev->dev, "Unsupported block type!\n");
410 return -EINVAL;
411 }
412
413 return ret;
414}
415
416/**
417 * smu_set_user_clk_dependencies - set user profile clock dependencies
418 *
419 * @smu: smu_context pointer
420 * @clk: enum smu_clk_type type
421 *
422 * Enable/Disable the clock dependency for the @clk type.
423 */
424static void smu_set_user_clk_dependencies(struct smu_context *smu, enum smu_clk_type clk)
425{
426 if (smu->adev->in_suspend)
427 return;
428
429 if (clk == SMU_MCLK) {
430 smu->user_dpm_profile.clk_dependency = 0;
431 smu->user_dpm_profile.clk_dependency = BIT(SMU_FCLK) | BIT(SMU_SOCCLK);
432 } else if (clk == SMU_FCLK) {
433 /* MCLK takes precedence over FCLK */
434 if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK)))
435 return;
436
437 smu->user_dpm_profile.clk_dependency = 0;
438 smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_SOCCLK);
439 } else if (clk == SMU_SOCCLK) {
440 /* MCLK takes precedence over SOCCLK */
441 if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK)))
442 return;
443
444 smu->user_dpm_profile.clk_dependency = 0;
445 smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_FCLK);
446 } else
447 /* Add clk dependencies here, if any */
448 return;
449}
450
451/**
452 * smu_restore_dpm_user_profile - reinstate user dpm profile
453 *
454 * @smu: smu_context pointer
455 *
456 * Restore the saved user power configurations include power limit,
457 * clock frequencies, fan control mode and fan speed.
458 */
459static void smu_restore_dpm_user_profile(struct smu_context *smu)
460{
461 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
462 int ret = 0;
463
464 if (!smu->adev->in_suspend)
465 return;
466
467 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
468 return;
469
470 /* Enable restore flag */
471 smu->user_dpm_profile.flags |= SMU_DPM_USER_PROFILE_RESTORE;
472
473 /* set the user dpm power limit */
474 if (smu->user_dpm_profile.power_limit) {
475 ret = smu_set_power_limit(smu, smu->user_dpm_profile.power_limit);
476 if (ret)
477 dev_err(smu->adev->dev, "Failed to set power limit value\n");
478 }
479
480 /* set the user dpm clock configurations */
481 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
482 enum smu_clk_type clk_type;
483
484 for (clk_type = 0; clk_type < SMU_CLK_COUNT; clk_type++) {
485 /*
486 * Iterate over smu clk type and force the saved user clk
487 * configs, skip if clock dependency is enabled
488 */
489 if (!(smu->user_dpm_profile.clk_dependency & BIT(clk_type)) &&
490 smu->user_dpm_profile.clk_mask[clk_type]) {
491 ret = smu_force_smuclk_levels(smu, clk_type,
492 smu->user_dpm_profile.clk_mask[clk_type]);
493 if (ret)
494 dev_err(smu->adev->dev,
495 "Failed to set clock type = %d\n", clk_type);
496 }
497 }
498 }
499
500 /* set the user dpm fan configurations */
501 if (smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_MANUAL ||
502 smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_NONE) {
503 ret = smu_set_fan_control_mode(smu, smu->user_dpm_profile.fan_mode);
504 if (ret != -EOPNOTSUPP) {
505 smu->user_dpm_profile.fan_speed_pwm = 0;
506 smu->user_dpm_profile.fan_speed_rpm = 0;
507 smu->user_dpm_profile.fan_mode = AMD_FAN_CTRL_AUTO;
508 dev_err(smu->adev->dev, "Failed to set manual fan control mode\n");
509 }
510
511 if (smu->user_dpm_profile.fan_speed_pwm) {
512 ret = smu_set_fan_speed_pwm(smu, smu->user_dpm_profile.fan_speed_pwm);
513 if (ret != -EOPNOTSUPP)
514 dev_err(smu->adev->dev, "Failed to set manual fan speed in pwm\n");
515 }
516
517 if (smu->user_dpm_profile.fan_speed_rpm) {
518 ret = smu_set_fan_speed_rpm(smu, smu->user_dpm_profile.fan_speed_rpm);
519 if (ret != -EOPNOTSUPP)
520 dev_err(smu->adev->dev, "Failed to set manual fan speed in rpm\n");
521 }
522 }
523
524 /* Restore user customized OD settings */
525 if (smu->user_dpm_profile.user_od) {
526 if (smu->ppt_funcs->restore_user_od_settings) {
527 ret = smu->ppt_funcs->restore_user_od_settings(smu);
528 if (ret)
529 dev_err(smu->adev->dev, "Failed to upload customized OD settings\n");
530 }
531 }
532
533 /* Disable restore flag */
534 smu->user_dpm_profile.flags &= ~SMU_DPM_USER_PROFILE_RESTORE;
535}
536
537static int smu_get_power_num_states(void *handle,
538 struct pp_states_info *state_info)
539{
540 if (!state_info)
541 return -EINVAL;
542
543 /* not support power state */
544 memset(state_info, 0, sizeof(struct pp_states_info));
545 state_info->nums = 1;
546 state_info->states[0] = POWER_STATE_TYPE_DEFAULT;
547
548 return 0;
549}
550
551bool is_support_sw_smu(struct amdgpu_device *adev)
552{
553 /* vega20 is 11.0.2, but it's supported via the powerplay code */
554 if (adev->asic_type == CHIP_VEGA20)
555 return false;
556
557 if ((amdgpu_ip_version(adev, MP1_HWIP, 0) >= IP_VERSION(11, 0, 0)) &&
558 amdgpu_device_ip_is_valid(adev, AMD_IP_BLOCK_TYPE_SMC))
559 return true;
560
561 return false;
562}
563
564bool is_support_cclk_dpm(struct amdgpu_device *adev)
565{
566 struct smu_context *smu = adev->powerplay.pp_handle;
567
568 if (!smu_feature_is_enabled(smu, SMU_FEATURE_CCLK_DPM_BIT))
569 return false;
570
571 return true;
572}
573
574
575static int smu_sys_get_pp_table(void *handle,
576 char **table)
577{
578 struct smu_context *smu = handle;
579 struct smu_table_context *smu_table = &smu->smu_table;
580
581 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
582 return -EOPNOTSUPP;
583
584 if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
585 return -EINVAL;
586
587 if (smu_table->hardcode_pptable)
588 *table = smu_table->hardcode_pptable;
589 else
590 *table = smu_table->power_play_table;
591
592 return smu_table->power_play_table_size;
593}
594
595static int smu_sys_set_pp_table(void *handle,
596 const char *buf,
597 size_t size)
598{
599 struct smu_context *smu = handle;
600 struct smu_table_context *smu_table = &smu->smu_table;
601 ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
602 int ret = 0;
603
604 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
605 return -EOPNOTSUPP;
606
607 if (header->usStructureSize != size) {
608 dev_err(smu->adev->dev, "pp table size not matched !\n");
609 return -EIO;
610 }
611
612 if (!smu_table->hardcode_pptable || smu_table->power_play_table_size < size) {
613 kfree(smu_table->hardcode_pptable);
614 smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
615 if (!smu_table->hardcode_pptable)
616 return -ENOMEM;
617 }
618
619 memcpy(smu_table->hardcode_pptable, buf, size);
620 smu_table->power_play_table = smu_table->hardcode_pptable;
621 smu_table->power_play_table_size = size;
622
623 /*
624 * Special hw_fini action(for Navi1x, the DPMs disablement will be
625 * skipped) may be needed for custom pptable uploading.
626 */
627 smu->uploading_custom_pp_table = true;
628
629 ret = smu_reset(smu);
630 if (ret)
631 dev_info(smu->adev->dev, "smu reset failed, ret = %d\n", ret);
632
633 smu->uploading_custom_pp_table = false;
634
635 return ret;
636}
637
638static int smu_get_driver_allowed_feature_mask(struct smu_context *smu)
639{
640 struct smu_feature *feature = &smu->smu_feature;
641 uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
642 int ret = 0;
643
644 /*
645 * With SCPM enabled, the allowed featuremasks setting(via
646 * PPSMC_MSG_SetAllowedFeaturesMaskLow/High) is not permitted.
647 * That means there is no way to let PMFW knows the settings below.
648 * Thus, we just assume all the features are allowed under
649 * such scenario.
650 */
651 if (smu->adev->scpm_enabled) {
652 bitmap_fill(feature->allowed, SMU_FEATURE_MAX);
653 return 0;
654 }
655
656 bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
657
658 ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
659 SMU_FEATURE_MAX/32);
660 if (ret)
661 return ret;
662
663 bitmap_or(feature->allowed, feature->allowed,
664 (unsigned long *)allowed_feature_mask,
665 feature->feature_num);
666
667 return ret;
668}
669
670static int smu_set_funcs(struct amdgpu_device *adev)
671{
672 struct smu_context *smu = adev->powerplay.pp_handle;
673
674 if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
675 smu->od_enabled = true;
676
677 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
678 case IP_VERSION(11, 0, 0):
679 case IP_VERSION(11, 0, 5):
680 case IP_VERSION(11, 0, 9):
681 navi10_set_ppt_funcs(smu);
682 break;
683 case IP_VERSION(11, 0, 7):
684 case IP_VERSION(11, 0, 11):
685 case IP_VERSION(11, 0, 12):
686 case IP_VERSION(11, 0, 13):
687 sienna_cichlid_set_ppt_funcs(smu);
688 break;
689 case IP_VERSION(12, 0, 0):
690 case IP_VERSION(12, 0, 1):
691 renoir_set_ppt_funcs(smu);
692 break;
693 case IP_VERSION(11, 5, 0):
694 vangogh_set_ppt_funcs(smu);
695 break;
696 case IP_VERSION(13, 0, 1):
697 case IP_VERSION(13, 0, 3):
698 case IP_VERSION(13, 0, 8):
699 yellow_carp_set_ppt_funcs(smu);
700 break;
701 case IP_VERSION(13, 0, 4):
702 case IP_VERSION(13, 0, 11):
703 smu_v13_0_4_set_ppt_funcs(smu);
704 break;
705 case IP_VERSION(13, 0, 5):
706 smu_v13_0_5_set_ppt_funcs(smu);
707 break;
708 case IP_VERSION(11, 0, 8):
709 cyan_skillfish_set_ppt_funcs(smu);
710 break;
711 case IP_VERSION(11, 0, 2):
712 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
713 arcturus_set_ppt_funcs(smu);
714 /* OD is not supported on Arcturus */
715 smu->od_enabled = false;
716 break;
717 case IP_VERSION(13, 0, 2):
718 aldebaran_set_ppt_funcs(smu);
719 /* Enable pp_od_clk_voltage node */
720 smu->od_enabled = true;
721 break;
722 case IP_VERSION(13, 0, 0):
723 case IP_VERSION(13, 0, 10):
724 smu_v13_0_0_set_ppt_funcs(smu);
725 break;
726 case IP_VERSION(13, 0, 6):
727 case IP_VERSION(13, 0, 14):
728 smu_v13_0_6_set_ppt_funcs(smu);
729 /* Enable pp_od_clk_voltage node */
730 smu->od_enabled = true;
731 break;
732 case IP_VERSION(13, 0, 7):
733 smu_v13_0_7_set_ppt_funcs(smu);
734 break;
735 case IP_VERSION(14, 0, 0):
736 case IP_VERSION(14, 0, 1):
737 case IP_VERSION(14, 0, 4):
738 smu_v14_0_0_set_ppt_funcs(smu);
739 break;
740 case IP_VERSION(14, 0, 2):
741 case IP_VERSION(14, 0, 3):
742 smu_v14_0_2_set_ppt_funcs(smu);
743 break;
744 default:
745 return -EINVAL;
746 }
747
748 return 0;
749}
750
751static int smu_early_init(struct amdgpu_ip_block *ip_block)
752{
753 struct amdgpu_device *adev = ip_block->adev;
754 struct smu_context *smu;
755 int r;
756
757 smu = kzalloc(sizeof(struct smu_context), GFP_KERNEL);
758 if (!smu)
759 return -ENOMEM;
760
761 smu->adev = adev;
762 smu->pm_enabled = !!amdgpu_dpm;
763 smu->is_apu = false;
764 smu->smu_baco.state = SMU_BACO_STATE_NONE;
765 smu->smu_baco.platform_support = false;
766 smu->smu_baco.maco_support = false;
767 smu->user_dpm_profile.fan_mode = -1;
768 smu->power_profile_mode = PP_SMC_POWER_PROFILE_UNKNOWN;
769
770 mutex_init(&smu->message_lock);
771
772 adev->powerplay.pp_handle = smu;
773 adev->powerplay.pp_funcs = &swsmu_pm_funcs;
774
775 r = smu_set_funcs(adev);
776 if (r)
777 return r;
778 return smu_init_microcode(smu);
779}
780
781static int smu_set_default_dpm_table(struct smu_context *smu)
782{
783 struct amdgpu_device *adev = smu->adev;
784 struct smu_power_context *smu_power = &smu->smu_power;
785 struct smu_power_gate *power_gate = &smu_power->power_gate;
786 int vcn_gate, jpeg_gate;
787 int ret = 0;
788
789 if (!smu->ppt_funcs->set_default_dpm_table)
790 return 0;
791
792 if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
793 vcn_gate = atomic_read(&power_gate->vcn_gated);
794 if (adev->pg_flags & AMD_PG_SUPPORT_JPEG)
795 jpeg_gate = atomic_read(&power_gate->jpeg_gated);
796
797 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
798 ret = smu_dpm_set_vcn_enable(smu, true);
799 if (ret)
800 return ret;
801 }
802
803 if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) {
804 ret = smu_dpm_set_jpeg_enable(smu, true);
805 if (ret)
806 goto err_out;
807 }
808
809 ret = smu->ppt_funcs->set_default_dpm_table(smu);
810 if (ret)
811 dev_err(smu->adev->dev,
812 "Failed to setup default dpm clock tables!\n");
813
814 if (adev->pg_flags & AMD_PG_SUPPORT_JPEG)
815 smu_dpm_set_jpeg_enable(smu, !jpeg_gate);
816err_out:
817 if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
818 smu_dpm_set_vcn_enable(smu, !vcn_gate);
819
820 return ret;
821}
822
823static int smu_apply_default_config_table_settings(struct smu_context *smu)
824{
825 struct amdgpu_device *adev = smu->adev;
826 int ret = 0;
827
828 ret = smu_get_default_config_table_settings(smu,
829 &adev->pm.config_table);
830 if (ret)
831 return ret;
832
833 return smu_set_config_table(smu, &adev->pm.config_table);
834}
835
836static int smu_late_init(struct amdgpu_ip_block *ip_block)
837{
838 struct amdgpu_device *adev = ip_block->adev;
839 struct smu_context *smu = adev->powerplay.pp_handle;
840 int ret = 0;
841
842 smu_set_fine_grain_gfx_freq_parameters(smu);
843
844 if (!smu->pm_enabled)
845 return 0;
846
847 ret = smu_post_init(smu);
848 if (ret) {
849 dev_err(adev->dev, "Failed to post smu init!\n");
850 return ret;
851 }
852
853 /*
854 * Explicitly notify PMFW the power mode the system in. Since
855 * the PMFW may boot the ASIC with a different mode.
856 * For those supporting ACDC switch via gpio, PMFW will
857 * handle the switch automatically. Driver involvement
858 * is unnecessary.
859 */
860 adev->pm.ac_power = power_supply_is_system_supplied() > 0;
861 smu_set_ac_dc(smu);
862
863 if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 1)) ||
864 (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 3)))
865 return 0;
866
867 if (!amdgpu_sriov_vf(adev) || smu->od_enabled) {
868 ret = smu_set_default_od_settings(smu);
869 if (ret) {
870 dev_err(adev->dev, "Failed to setup default OD settings!\n");
871 return ret;
872 }
873 }
874
875 ret = smu_populate_umd_state_clk(smu);
876 if (ret) {
877 dev_err(adev->dev, "Failed to populate UMD state clocks!\n");
878 return ret;
879 }
880
881 ret = smu_get_asic_power_limits(smu,
882 &smu->current_power_limit,
883 &smu->default_power_limit,
884 &smu->max_power_limit,
885 &smu->min_power_limit);
886 if (ret) {
887 dev_err(adev->dev, "Failed to get asic power limits!\n");
888 return ret;
889 }
890
891 if (!amdgpu_sriov_vf(adev))
892 smu_get_unique_id(smu);
893
894 smu_get_fan_parameters(smu);
895
896 smu_handle_task(smu,
897 smu->smu_dpm.dpm_level,
898 AMD_PP_TASK_COMPLETE_INIT);
899
900 ret = smu_apply_default_config_table_settings(smu);
901 if (ret && (ret != -EOPNOTSUPP)) {
902 dev_err(adev->dev, "Failed to apply default DriverSmuConfig settings!\n");
903 return ret;
904 }
905
906 smu_restore_dpm_user_profile(smu);
907
908 return 0;
909}
910
911static int smu_init_fb_allocations(struct smu_context *smu)
912{
913 struct amdgpu_device *adev = smu->adev;
914 struct smu_table_context *smu_table = &smu->smu_table;
915 struct smu_table *tables = smu_table->tables;
916 struct smu_table *driver_table = &(smu_table->driver_table);
917 uint32_t max_table_size = 0;
918 int ret, i;
919
920 /* VRAM allocation for tool table */
921 if (tables[SMU_TABLE_PMSTATUSLOG].size) {
922 ret = amdgpu_bo_create_kernel(adev,
923 tables[SMU_TABLE_PMSTATUSLOG].size,
924 tables[SMU_TABLE_PMSTATUSLOG].align,
925 tables[SMU_TABLE_PMSTATUSLOG].domain,
926 &tables[SMU_TABLE_PMSTATUSLOG].bo,
927 &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
928 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
929 if (ret) {
930 dev_err(adev->dev, "VRAM allocation for tool table failed!\n");
931 return ret;
932 }
933 }
934
935 driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT;
936 /* VRAM allocation for driver table */
937 for (i = 0; i < SMU_TABLE_COUNT; i++) {
938 if (tables[i].size == 0)
939 continue;
940
941 /* If one of the tables has VRAM domain restriction, keep it in
942 * VRAM
943 */
944 if ((tables[i].domain &
945 (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) ==
946 AMDGPU_GEM_DOMAIN_VRAM)
947 driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
948
949 if (i == SMU_TABLE_PMSTATUSLOG)
950 continue;
951
952 if (max_table_size < tables[i].size)
953 max_table_size = tables[i].size;
954 }
955
956 driver_table->size = max_table_size;
957 driver_table->align = PAGE_SIZE;
958
959 ret = amdgpu_bo_create_kernel(adev,
960 driver_table->size,
961 driver_table->align,
962 driver_table->domain,
963 &driver_table->bo,
964 &driver_table->mc_address,
965 &driver_table->cpu_addr);
966 if (ret) {
967 dev_err(adev->dev, "VRAM allocation for driver table failed!\n");
968 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
969 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
970 &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
971 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
972 }
973
974 return ret;
975}
976
977static int smu_fini_fb_allocations(struct smu_context *smu)
978{
979 struct smu_table_context *smu_table = &smu->smu_table;
980 struct smu_table *tables = smu_table->tables;
981 struct smu_table *driver_table = &(smu_table->driver_table);
982
983 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
984 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
985 &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
986 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
987
988 amdgpu_bo_free_kernel(&driver_table->bo,
989 &driver_table->mc_address,
990 &driver_table->cpu_addr);
991
992 return 0;
993}
994
995/**
996 * smu_alloc_memory_pool - allocate memory pool in the system memory
997 *
998 * @smu: amdgpu_device pointer
999 *
1000 * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
1001 * and DramLogSetDramAddr can notify it changed.
1002 *
1003 * Returns 0 on success, error on failure.
1004 */
1005static int smu_alloc_memory_pool(struct smu_context *smu)
1006{
1007 struct amdgpu_device *adev = smu->adev;
1008 struct smu_table_context *smu_table = &smu->smu_table;
1009 struct smu_table *memory_pool = &smu_table->memory_pool;
1010 uint64_t pool_size = smu->pool_size;
1011 int ret = 0;
1012
1013 if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
1014 return ret;
1015
1016 memory_pool->size = pool_size;
1017 memory_pool->align = PAGE_SIZE;
1018 memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT;
1019
1020 switch (pool_size) {
1021 case SMU_MEMORY_POOL_SIZE_256_MB:
1022 case SMU_MEMORY_POOL_SIZE_512_MB:
1023 case SMU_MEMORY_POOL_SIZE_1_GB:
1024 case SMU_MEMORY_POOL_SIZE_2_GB:
1025 ret = amdgpu_bo_create_kernel(adev,
1026 memory_pool->size,
1027 memory_pool->align,
1028 memory_pool->domain,
1029 &memory_pool->bo,
1030 &memory_pool->mc_address,
1031 &memory_pool->cpu_addr);
1032 if (ret)
1033 dev_err(adev->dev, "VRAM allocation for dramlog failed!\n");
1034 break;
1035 default:
1036 break;
1037 }
1038
1039 return ret;
1040}
1041
1042static int smu_free_memory_pool(struct smu_context *smu)
1043{
1044 struct smu_table_context *smu_table = &smu->smu_table;
1045 struct smu_table *memory_pool = &smu_table->memory_pool;
1046
1047 if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
1048 return 0;
1049
1050 amdgpu_bo_free_kernel(&memory_pool->bo,
1051 &memory_pool->mc_address,
1052 &memory_pool->cpu_addr);
1053
1054 memset(memory_pool, 0, sizeof(struct smu_table));
1055
1056 return 0;
1057}
1058
1059static int smu_alloc_dummy_read_table(struct smu_context *smu)
1060{
1061 struct smu_table_context *smu_table = &smu->smu_table;
1062 struct smu_table *dummy_read_1_table =
1063 &smu_table->dummy_read_1_table;
1064 struct amdgpu_device *adev = smu->adev;
1065 int ret = 0;
1066
1067 if (!dummy_read_1_table->size)
1068 return 0;
1069
1070 ret = amdgpu_bo_create_kernel(adev,
1071 dummy_read_1_table->size,
1072 dummy_read_1_table->align,
1073 dummy_read_1_table->domain,
1074 &dummy_read_1_table->bo,
1075 &dummy_read_1_table->mc_address,
1076 &dummy_read_1_table->cpu_addr);
1077 if (ret)
1078 dev_err(adev->dev, "VRAM allocation for dummy read table failed!\n");
1079
1080 return ret;
1081}
1082
1083static void smu_free_dummy_read_table(struct smu_context *smu)
1084{
1085 struct smu_table_context *smu_table = &smu->smu_table;
1086 struct smu_table *dummy_read_1_table =
1087 &smu_table->dummy_read_1_table;
1088
1089
1090 amdgpu_bo_free_kernel(&dummy_read_1_table->bo,
1091 &dummy_read_1_table->mc_address,
1092 &dummy_read_1_table->cpu_addr);
1093
1094 memset(dummy_read_1_table, 0, sizeof(struct smu_table));
1095}
1096
1097static int smu_smc_table_sw_init(struct smu_context *smu)
1098{
1099 int ret;
1100
1101 /**
1102 * Create smu_table structure, and init smc tables such as
1103 * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
1104 */
1105 ret = smu_init_smc_tables(smu);
1106 if (ret) {
1107 dev_err(smu->adev->dev, "Failed to init smc tables!\n");
1108 return ret;
1109 }
1110
1111 /**
1112 * Create smu_power_context structure, and allocate smu_dpm_context and
1113 * context size to fill the smu_power_context data.
1114 */
1115 ret = smu_init_power(smu);
1116 if (ret) {
1117 dev_err(smu->adev->dev, "Failed to init smu_init_power!\n");
1118 return ret;
1119 }
1120
1121 /*
1122 * allocate vram bos to store smc table contents.
1123 */
1124 ret = smu_init_fb_allocations(smu);
1125 if (ret)
1126 return ret;
1127
1128 ret = smu_alloc_memory_pool(smu);
1129 if (ret)
1130 return ret;
1131
1132 ret = smu_alloc_dummy_read_table(smu);
1133 if (ret)
1134 return ret;
1135
1136 ret = smu_i2c_init(smu);
1137 if (ret)
1138 return ret;
1139
1140 return 0;
1141}
1142
1143static int smu_smc_table_sw_fini(struct smu_context *smu)
1144{
1145 int ret;
1146
1147 smu_i2c_fini(smu);
1148
1149 smu_free_dummy_read_table(smu);
1150
1151 ret = smu_free_memory_pool(smu);
1152 if (ret)
1153 return ret;
1154
1155 ret = smu_fini_fb_allocations(smu);
1156 if (ret)
1157 return ret;
1158
1159 ret = smu_fini_power(smu);
1160 if (ret) {
1161 dev_err(smu->adev->dev, "Failed to init smu_fini_power!\n");
1162 return ret;
1163 }
1164
1165 ret = smu_fini_smc_tables(smu);
1166 if (ret) {
1167 dev_err(smu->adev->dev, "Failed to smu_fini_smc_tables!\n");
1168 return ret;
1169 }
1170
1171 return 0;
1172}
1173
1174static void smu_throttling_logging_work_fn(struct work_struct *work)
1175{
1176 struct smu_context *smu = container_of(work, struct smu_context,
1177 throttling_logging_work);
1178
1179 smu_log_thermal_throttling(smu);
1180}
1181
1182static void smu_interrupt_work_fn(struct work_struct *work)
1183{
1184 struct smu_context *smu = container_of(work, struct smu_context,
1185 interrupt_work);
1186
1187 if (smu->ppt_funcs && smu->ppt_funcs->interrupt_work)
1188 smu->ppt_funcs->interrupt_work(smu);
1189}
1190
1191static void smu_swctf_delayed_work_handler(struct work_struct *work)
1192{
1193 struct smu_context *smu =
1194 container_of(work, struct smu_context, swctf_delayed_work.work);
1195 struct smu_temperature_range *range =
1196 &smu->thermal_range;
1197 struct amdgpu_device *adev = smu->adev;
1198 uint32_t hotspot_tmp, size;
1199
1200 /*
1201 * If the hotspot temperature is confirmed as below SW CTF setting point
1202 * after the delay enforced, nothing will be done.
1203 * Otherwise, a graceful shutdown will be performed to prevent further damage.
1204 */
1205 if (range->software_shutdown_temp &&
1206 smu->ppt_funcs->read_sensor &&
1207 !smu->ppt_funcs->read_sensor(smu,
1208 AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
1209 &hotspot_tmp,
1210 &size) &&
1211 hotspot_tmp / 1000 < range->software_shutdown_temp)
1212 return;
1213
1214 dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
1215 dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
1216 orderly_poweroff(true);
1217}
1218
1219static void smu_init_xgmi_plpd_mode(struct smu_context *smu)
1220{
1221 struct smu_dpm_context *dpm_ctxt = &(smu->smu_dpm);
1222 struct smu_dpm_policy_ctxt *policy_ctxt;
1223 struct smu_dpm_policy *policy;
1224
1225 policy = smu_get_pm_policy(smu, PP_PM_POLICY_XGMI_PLPD);
1226 if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 2)) {
1227 if (policy)
1228 policy->current_level = XGMI_PLPD_DEFAULT;
1229 return;
1230 }
1231
1232 /* PMFW put PLPD into default policy after enabling the feature */
1233 if (smu_feature_is_enabled(smu,
1234 SMU_FEATURE_XGMI_PER_LINK_PWR_DWN_BIT)) {
1235 if (policy)
1236 policy->current_level = XGMI_PLPD_DEFAULT;
1237 } else {
1238 policy_ctxt = dpm_ctxt->dpm_policies;
1239 if (policy_ctxt)
1240 policy_ctxt->policy_mask &=
1241 ~BIT(PP_PM_POLICY_XGMI_PLPD);
1242 }
1243}
1244
1245static bool smu_is_workload_profile_available(struct smu_context *smu,
1246 u32 profile)
1247{
1248 if (profile >= PP_SMC_POWER_PROFILE_COUNT)
1249 return false;
1250 return smu->workload_map && smu->workload_map[profile].valid_mapping;
1251}
1252
1253static void smu_init_power_profile(struct smu_context *smu)
1254{
1255 if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_UNKNOWN) {
1256 if (smu->is_apu ||
1257 !smu_is_workload_profile_available(
1258 smu, PP_SMC_POWER_PROFILE_FULLSCREEN3D))
1259 smu->power_profile_mode =
1260 PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
1261 else
1262 smu->power_profile_mode =
1263 PP_SMC_POWER_PROFILE_FULLSCREEN3D;
1264 }
1265 smu_power_profile_mode_get(smu, smu->power_profile_mode);
1266}
1267
1268static int smu_sw_init(struct amdgpu_ip_block *ip_block)
1269{
1270 struct amdgpu_device *adev = ip_block->adev;
1271 struct smu_context *smu = adev->powerplay.pp_handle;
1272 int ret;
1273
1274 smu->pool_size = adev->pm.smu_prv_buffer_size;
1275 smu->smu_feature.feature_num = SMU_FEATURE_MAX;
1276 bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
1277 bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
1278
1279 INIT_WORK(&smu->throttling_logging_work, smu_throttling_logging_work_fn);
1280 INIT_WORK(&smu->interrupt_work, smu_interrupt_work_fn);
1281 atomic64_set(&smu->throttle_int_counter, 0);
1282 smu->watermarks_bitmap = 0;
1283
1284 atomic_set(&smu->smu_power.power_gate.vcn_gated, 1);
1285 atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1);
1286 atomic_set(&smu->smu_power.power_gate.vpe_gated, 1);
1287 atomic_set(&smu->smu_power.power_gate.umsch_mm_gated, 1);
1288
1289 smu_init_power_profile(smu);
1290 smu->display_config = &adev->pm.pm_display_cfg;
1291
1292 smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
1293 smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
1294
1295 INIT_DELAYED_WORK(&smu->swctf_delayed_work,
1296 smu_swctf_delayed_work_handler);
1297
1298 ret = smu_smc_table_sw_init(smu);
1299 if (ret) {
1300 dev_err(adev->dev, "Failed to sw init smc table!\n");
1301 return ret;
1302 }
1303
1304 /* get boot_values from vbios to set revision, gfxclk, and etc. */
1305 ret = smu_get_vbios_bootup_values(smu);
1306 if (ret) {
1307 dev_err(adev->dev, "Failed to get VBIOS boot clock values!\n");
1308 return ret;
1309 }
1310
1311 ret = smu_init_pptable_microcode(smu);
1312 if (ret) {
1313 dev_err(adev->dev, "Failed to setup pptable firmware!\n");
1314 return ret;
1315 }
1316
1317 ret = smu_register_irq_handler(smu);
1318 if (ret) {
1319 dev_err(adev->dev, "Failed to register smc irq handler!\n");
1320 return ret;
1321 }
1322
1323 /* If there is no way to query fan control mode, fan control is not supported */
1324 if (!smu->ppt_funcs->get_fan_control_mode)
1325 smu->adev->pm.no_fan = true;
1326
1327 return 0;
1328}
1329
1330static int smu_sw_fini(struct amdgpu_ip_block *ip_block)
1331{
1332 struct amdgpu_device *adev = ip_block->adev;
1333 struct smu_context *smu = adev->powerplay.pp_handle;
1334 int ret;
1335
1336 ret = smu_smc_table_sw_fini(smu);
1337 if (ret) {
1338 dev_err(adev->dev, "Failed to sw fini smc table!\n");
1339 return ret;
1340 }
1341
1342 if (smu->custom_profile_params) {
1343 kfree(smu->custom_profile_params);
1344 smu->custom_profile_params = NULL;
1345 }
1346
1347 smu_fini_microcode(smu);
1348
1349 return 0;
1350}
1351
1352static int smu_get_thermal_temperature_range(struct smu_context *smu)
1353{
1354 struct amdgpu_device *adev = smu->adev;
1355 struct smu_temperature_range *range =
1356 &smu->thermal_range;
1357 int ret = 0;
1358
1359 if (!smu->ppt_funcs->get_thermal_temperature_range)
1360 return 0;
1361
1362 ret = smu->ppt_funcs->get_thermal_temperature_range(smu, range);
1363 if (ret)
1364 return ret;
1365
1366 adev->pm.dpm.thermal.min_temp = range->min;
1367 adev->pm.dpm.thermal.max_temp = range->max;
1368 adev->pm.dpm.thermal.max_edge_emergency_temp = range->edge_emergency_max;
1369 adev->pm.dpm.thermal.min_hotspot_temp = range->hotspot_min;
1370 adev->pm.dpm.thermal.max_hotspot_crit_temp = range->hotspot_crit_max;
1371 adev->pm.dpm.thermal.max_hotspot_emergency_temp = range->hotspot_emergency_max;
1372 adev->pm.dpm.thermal.min_mem_temp = range->mem_min;
1373 adev->pm.dpm.thermal.max_mem_crit_temp = range->mem_crit_max;
1374 adev->pm.dpm.thermal.max_mem_emergency_temp = range->mem_emergency_max;
1375
1376 return ret;
1377}
1378
1379/**
1380 * smu_wbrf_handle_exclusion_ranges - consume the wbrf exclusion ranges
1381 *
1382 * @smu: smu_context pointer
1383 *
1384 * Retrieve the wbrf exclusion ranges and send them to PMFW for proper handling.
1385 * Returns 0 on success, error on failure.
1386 */
1387static int smu_wbrf_handle_exclusion_ranges(struct smu_context *smu)
1388{
1389 struct wbrf_ranges_in_out wbrf_exclusion = {0};
1390 struct freq_band_range *wifi_bands = wbrf_exclusion.band_list;
1391 struct amdgpu_device *adev = smu->adev;
1392 uint32_t num_of_wbrf_ranges = MAX_NUM_OF_WBRF_RANGES;
1393 uint64_t start, end;
1394 int ret, i, j;
1395
1396 ret = amd_wbrf_retrieve_freq_band(adev->dev, &wbrf_exclusion);
1397 if (ret) {
1398 dev_err(adev->dev, "Failed to retrieve exclusion ranges!\n");
1399 return ret;
1400 }
1401
1402 /*
1403 * The exclusion ranges array we got might be filled with holes and duplicate
1404 * entries. For example:
1405 * {(2400, 2500), (0, 0), (6882, 6962), (2400, 2500), (0, 0), (6117, 6189), (0, 0)...}
1406 * We need to do some sortups to eliminate those holes and duplicate entries.
1407 * Expected output: {(2400, 2500), (6117, 6189), (6882, 6962), (0, 0)...}
1408 */
1409 for (i = 0; i < num_of_wbrf_ranges; i++) {
1410 start = wifi_bands[i].start;
1411 end = wifi_bands[i].end;
1412
1413 /* get the last valid entry to fill the intermediate hole */
1414 if (!start && !end) {
1415 for (j = num_of_wbrf_ranges - 1; j > i; j--)
1416 if (wifi_bands[j].start && wifi_bands[j].end)
1417 break;
1418
1419 /* no valid entry left */
1420 if (j <= i)
1421 break;
1422
1423 start = wifi_bands[i].start = wifi_bands[j].start;
1424 end = wifi_bands[i].end = wifi_bands[j].end;
1425 wifi_bands[j].start = 0;
1426 wifi_bands[j].end = 0;
1427 num_of_wbrf_ranges = j;
1428 }
1429
1430 /* eliminate duplicate entries */
1431 for (j = i + 1; j < num_of_wbrf_ranges; j++) {
1432 if ((wifi_bands[j].start == start) && (wifi_bands[j].end == end)) {
1433 wifi_bands[j].start = 0;
1434 wifi_bands[j].end = 0;
1435 }
1436 }
1437 }
1438
1439 /* Send the sorted wifi_bands to PMFW */
1440 ret = smu_set_wbrf_exclusion_ranges(smu, wifi_bands);
1441 /* Try to set the wifi_bands again */
1442 if (unlikely(ret == -EBUSY)) {
1443 mdelay(5);
1444 ret = smu_set_wbrf_exclusion_ranges(smu, wifi_bands);
1445 }
1446
1447 return ret;
1448}
1449
1450/**
1451 * smu_wbrf_event_handler - handle notify events
1452 *
1453 * @nb: notifier block
1454 * @action: event type
1455 * @_arg: event data
1456 *
1457 * Calls relevant amdgpu function in response to wbrf event
1458 * notification from kernel.
1459 */
1460static int smu_wbrf_event_handler(struct notifier_block *nb,
1461 unsigned long action, void *_arg)
1462{
1463 struct smu_context *smu = container_of(nb, struct smu_context, wbrf_notifier);
1464
1465 switch (action) {
1466 case WBRF_CHANGED:
1467 schedule_delayed_work(&smu->wbrf_delayed_work,
1468 msecs_to_jiffies(SMU_WBRF_EVENT_HANDLING_PACE));
1469 break;
1470 default:
1471 return NOTIFY_DONE;
1472 }
1473
1474 return NOTIFY_OK;
1475}
1476
1477/**
1478 * smu_wbrf_delayed_work_handler - callback on delayed work timer expired
1479 *
1480 * @work: struct work_struct pointer
1481 *
1482 * Flood is over and driver will consume the latest exclusion ranges.
1483 */
1484static void smu_wbrf_delayed_work_handler(struct work_struct *work)
1485{
1486 struct smu_context *smu = container_of(work, struct smu_context, wbrf_delayed_work.work);
1487
1488 smu_wbrf_handle_exclusion_ranges(smu);
1489}
1490
1491/**
1492 * smu_wbrf_support_check - check wbrf support
1493 *
1494 * @smu: smu_context pointer
1495 *
1496 * Verifies the ACPI interface whether wbrf is supported.
1497 */
1498static void smu_wbrf_support_check(struct smu_context *smu)
1499{
1500 struct amdgpu_device *adev = smu->adev;
1501
1502 smu->wbrf_supported = smu_is_asic_wbrf_supported(smu) && amdgpu_wbrf &&
1503 acpi_amd_wbrf_supported_consumer(adev->dev);
1504
1505 if (smu->wbrf_supported)
1506 dev_info(adev->dev, "RF interference mitigation is supported\n");
1507}
1508
1509/**
1510 * smu_wbrf_init - init driver wbrf support
1511 *
1512 * @smu: smu_context pointer
1513 *
1514 * Verifies the AMD ACPI interfaces and registers with the wbrf
1515 * notifier chain if wbrf feature is supported.
1516 * Returns 0 on success, error on failure.
1517 */
1518static int smu_wbrf_init(struct smu_context *smu)
1519{
1520 int ret;
1521
1522 if (!smu->wbrf_supported)
1523 return 0;
1524
1525 INIT_DELAYED_WORK(&smu->wbrf_delayed_work, smu_wbrf_delayed_work_handler);
1526
1527 smu->wbrf_notifier.notifier_call = smu_wbrf_event_handler;
1528 ret = amd_wbrf_register_notifier(&smu->wbrf_notifier);
1529 if (ret)
1530 return ret;
1531
1532 /*
1533 * Some wifiband exclusion ranges may be already there
1534 * before our driver loaded. To make sure our driver
1535 * is awared of those exclusion ranges.
1536 */
1537 schedule_delayed_work(&smu->wbrf_delayed_work,
1538 msecs_to_jiffies(SMU_WBRF_EVENT_HANDLING_PACE));
1539
1540 return 0;
1541}
1542
1543/**
1544 * smu_wbrf_fini - tear down driver wbrf support
1545 *
1546 * @smu: smu_context pointer
1547 *
1548 * Unregisters with the wbrf notifier chain.
1549 */
1550static void smu_wbrf_fini(struct smu_context *smu)
1551{
1552 if (!smu->wbrf_supported)
1553 return;
1554
1555 amd_wbrf_unregister_notifier(&smu->wbrf_notifier);
1556
1557 cancel_delayed_work_sync(&smu->wbrf_delayed_work);
1558}
1559
1560static int smu_smc_hw_setup(struct smu_context *smu)
1561{
1562 struct smu_feature *feature = &smu->smu_feature;
1563 struct amdgpu_device *adev = smu->adev;
1564 uint8_t pcie_gen = 0, pcie_width = 0;
1565 uint64_t features_supported;
1566 int ret = 0;
1567
1568 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
1569 case IP_VERSION(11, 0, 7):
1570 case IP_VERSION(11, 0, 11):
1571 case IP_VERSION(11, 5, 0):
1572 case IP_VERSION(11, 0, 12):
1573 if (adev->in_suspend && smu_is_dpm_running(smu)) {
1574 dev_info(adev->dev, "dpm has been enabled\n");
1575 ret = smu_system_features_control(smu, true);
1576 if (ret)
1577 dev_err(adev->dev, "Failed system features control!\n");
1578 return ret;
1579 }
1580 break;
1581 default:
1582 break;
1583 }
1584
1585 ret = smu_init_display_count(smu, 0);
1586 if (ret) {
1587 dev_info(adev->dev, "Failed to pre-set display count as 0!\n");
1588 return ret;
1589 }
1590
1591 ret = smu_set_driver_table_location(smu);
1592 if (ret) {
1593 dev_err(adev->dev, "Failed to SetDriverDramAddr!\n");
1594 return ret;
1595 }
1596
1597 /*
1598 * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
1599 */
1600 ret = smu_set_tool_table_location(smu);
1601 if (ret) {
1602 dev_err(adev->dev, "Failed to SetToolsDramAddr!\n");
1603 return ret;
1604 }
1605
1606 /*
1607 * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
1608 * pool location.
1609 */
1610 ret = smu_notify_memory_pool_location(smu);
1611 if (ret) {
1612 dev_err(adev->dev, "Failed to SetDramLogDramAddr!\n");
1613 return ret;
1614 }
1615
1616 /*
1617 * It is assumed the pptable used before runpm is same as
1618 * the one used afterwards. Thus, we can reuse the stored
1619 * copy and do not need to resetup the pptable again.
1620 */
1621 if (!adev->in_runpm) {
1622 ret = smu_setup_pptable(smu);
1623 if (ret) {
1624 dev_err(adev->dev, "Failed to setup pptable!\n");
1625 return ret;
1626 }
1627 }
1628
1629 /* smu_dump_pptable(smu); */
1630
1631 /*
1632 * With SCPM enabled, PSP is responsible for the PPTable transferring
1633 * (to SMU). Driver involvement is not needed and permitted.
1634 */
1635 if (!adev->scpm_enabled) {
1636 /*
1637 * Copy pptable bo in the vram to smc with SMU MSGs such as
1638 * SetDriverDramAddr and TransferTableDram2Smu.
1639 */
1640 ret = smu_write_pptable(smu);
1641 if (ret) {
1642 dev_err(adev->dev, "Failed to transfer pptable to SMC!\n");
1643 return ret;
1644 }
1645 }
1646
1647 /* issue Run*Btc msg */
1648 ret = smu_run_btc(smu);
1649 if (ret)
1650 return ret;
1651
1652 /* Enable UclkShadow on wbrf supported */
1653 if (smu->wbrf_supported) {
1654 ret = smu_enable_uclk_shadow(smu, true);
1655 if (ret) {
1656 dev_err(adev->dev, "Failed to enable UclkShadow feature to support wbrf!\n");
1657 return ret;
1658 }
1659 }
1660
1661 /*
1662 * With SCPM enabled, these actions(and relevant messages) are
1663 * not needed and permitted.
1664 */
1665 if (!adev->scpm_enabled) {
1666 ret = smu_feature_set_allowed_mask(smu);
1667 if (ret) {
1668 dev_err(adev->dev, "Failed to set driver allowed features mask!\n");
1669 return ret;
1670 }
1671 }
1672
1673 ret = smu_system_features_control(smu, true);
1674 if (ret) {
1675 dev_err(adev->dev, "Failed to enable requested dpm features!\n");
1676 return ret;
1677 }
1678
1679 smu_init_xgmi_plpd_mode(smu);
1680
1681 ret = smu_feature_get_enabled_mask(smu, &features_supported);
1682 if (ret) {
1683 dev_err(adev->dev, "Failed to retrieve supported dpm features!\n");
1684 return ret;
1685 }
1686 bitmap_copy(feature->supported,
1687 (unsigned long *)&features_supported,
1688 feature->feature_num);
1689
1690 if (!smu_is_dpm_running(smu))
1691 dev_info(adev->dev, "dpm has been disabled\n");
1692
1693 /*
1694 * Set initialized values (get from vbios) to dpm tables context such as
1695 * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
1696 * type of clks.
1697 */
1698 ret = smu_set_default_dpm_table(smu);
1699 if (ret) {
1700 dev_err(adev->dev, "Failed to setup default dpm clock tables!\n");
1701 return ret;
1702 }
1703
1704 if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5)
1705 pcie_gen = 4;
1706 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
1707 pcie_gen = 3;
1708 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
1709 pcie_gen = 2;
1710 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
1711 pcie_gen = 1;
1712 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
1713 pcie_gen = 0;
1714
1715 /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
1716 * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
1717 * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32
1718 */
1719 if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X32)
1720 pcie_width = 7;
1721 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
1722 pcie_width = 6;
1723 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
1724 pcie_width = 5;
1725 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
1726 pcie_width = 4;
1727 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
1728 pcie_width = 3;
1729 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
1730 pcie_width = 2;
1731 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
1732 pcie_width = 1;
1733 ret = smu_update_pcie_parameters(smu, pcie_gen, pcie_width);
1734 if (ret) {
1735 dev_err(adev->dev, "Attempt to override pcie params failed!\n");
1736 return ret;
1737 }
1738
1739 ret = smu_get_thermal_temperature_range(smu);
1740 if (ret) {
1741 dev_err(adev->dev, "Failed to get thermal temperature ranges!\n");
1742 return ret;
1743 }
1744
1745 ret = smu_enable_thermal_alert(smu);
1746 if (ret) {
1747 dev_err(adev->dev, "Failed to enable thermal alert!\n");
1748 return ret;
1749 }
1750
1751 ret = smu_notify_display_change(smu);
1752 if (ret) {
1753 dev_err(adev->dev, "Failed to notify display change!\n");
1754 return ret;
1755 }
1756
1757 /*
1758 * Set min deep sleep dce fclk with bootup value from vbios via
1759 * SetMinDeepSleepDcefclk MSG.
1760 */
1761 ret = smu_set_min_dcef_deep_sleep(smu,
1762 smu->smu_table.boot_values.dcefclk / 100);
1763 if (ret) {
1764 dev_err(adev->dev, "Error setting min deepsleep dcefclk\n");
1765 return ret;
1766 }
1767
1768 /* Init wbrf support. Properly setup the notifier */
1769 ret = smu_wbrf_init(smu);
1770 if (ret)
1771 dev_err(adev->dev, "Error during wbrf init call\n");
1772
1773 return ret;
1774}
1775
1776static int smu_start_smc_engine(struct smu_context *smu)
1777{
1778 struct amdgpu_device *adev = smu->adev;
1779 int ret = 0;
1780
1781 smu->smc_fw_state = SMU_FW_INIT;
1782
1783 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1784 if (amdgpu_ip_version(adev, MP1_HWIP, 0) < IP_VERSION(11, 0, 0)) {
1785 if (smu->ppt_funcs->load_microcode) {
1786 ret = smu->ppt_funcs->load_microcode(smu);
1787 if (ret)
1788 return ret;
1789 }
1790 }
1791 }
1792
1793 if (smu->ppt_funcs->check_fw_status) {
1794 ret = smu->ppt_funcs->check_fw_status(smu);
1795 if (ret) {
1796 dev_err(adev->dev, "SMC is not ready\n");
1797 return ret;
1798 }
1799 }
1800
1801 /*
1802 * Send msg GetDriverIfVersion to check if the return value is equal
1803 * with DRIVER_IF_VERSION of smc header.
1804 */
1805 ret = smu_check_fw_version(smu);
1806 if (ret)
1807 return ret;
1808
1809 return ret;
1810}
1811
1812static int smu_hw_init(struct amdgpu_ip_block *ip_block)
1813{
1814 int ret;
1815 struct amdgpu_device *adev = ip_block->adev;
1816 struct smu_context *smu = adev->powerplay.pp_handle;
1817
1818 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) {
1819 smu->pm_enabled = false;
1820 return 0;
1821 }
1822
1823 ret = smu_start_smc_engine(smu);
1824 if (ret) {
1825 dev_err(adev->dev, "SMC engine is not correctly up!\n");
1826 return ret;
1827 }
1828
1829 /*
1830 * Check whether wbrf is supported. This needs to be done
1831 * before SMU setup starts since part of SMU configuration
1832 * relies on this.
1833 */
1834 smu_wbrf_support_check(smu);
1835
1836 if (smu->is_apu) {
1837 ret = smu_set_gfx_imu_enable(smu);
1838 if (ret)
1839 return ret;
1840 smu_dpm_set_vcn_enable(smu, true);
1841 smu_dpm_set_jpeg_enable(smu, true);
1842 smu_dpm_set_vpe_enable(smu, true);
1843 smu_dpm_set_umsch_mm_enable(smu, true);
1844 smu_set_mall_enable(smu);
1845 smu_set_gfx_cgpg(smu, true);
1846 }
1847
1848 if (!smu->pm_enabled)
1849 return 0;
1850
1851 ret = smu_get_driver_allowed_feature_mask(smu);
1852 if (ret)
1853 return ret;
1854
1855 ret = smu_smc_hw_setup(smu);
1856 if (ret) {
1857 dev_err(adev->dev, "Failed to setup smc hw!\n");
1858 return ret;
1859 }
1860
1861 /*
1862 * Move maximum sustainable clock retrieving here considering
1863 * 1. It is not needed on resume(from S3).
1864 * 2. DAL settings come between .hw_init and .late_init of SMU.
1865 * And DAL needs to know the maximum sustainable clocks. Thus
1866 * it cannot be put in .late_init().
1867 */
1868 ret = smu_init_max_sustainable_clocks(smu);
1869 if (ret) {
1870 dev_err(adev->dev, "Failed to init max sustainable clocks!\n");
1871 return ret;
1872 }
1873
1874 adev->pm.dpm_enabled = true;
1875
1876 dev_info(adev->dev, "SMU is initialized successfully!\n");
1877
1878 return 0;
1879}
1880
1881static int smu_disable_dpms(struct smu_context *smu)
1882{
1883 struct amdgpu_device *adev = smu->adev;
1884 int ret = 0;
1885 bool use_baco = !smu->is_apu &&
1886 ((amdgpu_in_reset(adev) &&
1887 (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
1888 ((adev->in_runpm || adev->in_s4) && amdgpu_asic_supports_baco(adev)));
1889
1890 /*
1891 * For SMU 13.0.0 and 13.0.7, PMFW will handle the DPM features(disablement or others)
1892 * properly on suspend/reset/unload. Driver involvement may cause some unexpected issues.
1893 */
1894 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
1895 case IP_VERSION(13, 0, 0):
1896 case IP_VERSION(13, 0, 7):
1897 case IP_VERSION(13, 0, 10):
1898 case IP_VERSION(14, 0, 2):
1899 case IP_VERSION(14, 0, 3):
1900 return 0;
1901 default:
1902 break;
1903 }
1904
1905 /*
1906 * For custom pptable uploading, skip the DPM features
1907 * disable process on Navi1x ASICs.
1908 * - As the gfx related features are under control of
1909 * RLC on those ASICs. RLC reinitialization will be
1910 * needed to reenable them. That will cost much more
1911 * efforts.
1912 *
1913 * - SMU firmware can handle the DPM reenablement
1914 * properly.
1915 */
1916 if (smu->uploading_custom_pp_table) {
1917 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
1918 case IP_VERSION(11, 0, 0):
1919 case IP_VERSION(11, 0, 5):
1920 case IP_VERSION(11, 0, 9):
1921 case IP_VERSION(11, 0, 7):
1922 case IP_VERSION(11, 0, 11):
1923 case IP_VERSION(11, 5, 0):
1924 case IP_VERSION(11, 0, 12):
1925 case IP_VERSION(11, 0, 13):
1926 return 0;
1927 default:
1928 break;
1929 }
1930 }
1931
1932 /*
1933 * For Sienna_Cichlid, PMFW will handle the features disablement properly
1934 * on BACO in. Driver involvement is unnecessary.
1935 */
1936 if (use_baco) {
1937 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
1938 case IP_VERSION(11, 0, 7):
1939 case IP_VERSION(11, 0, 0):
1940 case IP_VERSION(11, 0, 5):
1941 case IP_VERSION(11, 0, 9):
1942 case IP_VERSION(13, 0, 7):
1943 return 0;
1944 default:
1945 break;
1946 }
1947 }
1948
1949 /*
1950 * For GFX11 and subsequent APUs, PMFW will handle the features disablement properly
1951 * for gpu reset and S0i3 cases. Driver involvement is unnecessary.
1952 */
1953 if (IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) >= 11 &&
1954 smu->is_apu && (amdgpu_in_reset(adev) || adev->in_s0ix))
1955 return 0;
1956
1957 /*
1958 * For gpu reset, runpm and hibernation through BACO,
1959 * BACO feature has to be kept enabled.
1960 */
1961 if (use_baco && smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) {
1962 ret = smu_disable_all_features_with_exception(smu,
1963 SMU_FEATURE_BACO_BIT);
1964 if (ret)
1965 dev_err(adev->dev, "Failed to disable smu features except BACO.\n");
1966 } else {
1967 /* DisableAllSmuFeatures message is not permitted with SCPM enabled */
1968 if (!adev->scpm_enabled) {
1969 ret = smu_system_features_control(smu, false);
1970 if (ret)
1971 dev_err(adev->dev, "Failed to disable smu features.\n");
1972 }
1973 }
1974
1975 /* Notify SMU RLC is going to be off, stop RLC and SMU interaction.
1976 * otherwise SMU will hang while interacting with RLC if RLC is halted
1977 * this is a WA for Vangogh asic which fix the SMU hang issue.
1978 */
1979 ret = smu_notify_rlc_state(smu, false);
1980 if (ret) {
1981 dev_err(adev->dev, "Fail to notify rlc status!\n");
1982 return ret;
1983 }
1984
1985 if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 4, 2) &&
1986 !((adev->flags & AMD_IS_APU) && adev->gfx.imu.funcs) &&
1987 !amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs->stop)
1988 adev->gfx.rlc.funcs->stop(adev);
1989
1990 return ret;
1991}
1992
1993static int smu_smc_hw_cleanup(struct smu_context *smu)
1994{
1995 struct amdgpu_device *adev = smu->adev;
1996 int ret = 0;
1997
1998 smu_wbrf_fini(smu);
1999
2000 cancel_work_sync(&smu->throttling_logging_work);
2001 cancel_work_sync(&smu->interrupt_work);
2002
2003 ret = smu_disable_thermal_alert(smu);
2004 if (ret) {
2005 dev_err(adev->dev, "Fail to disable thermal alert!\n");
2006 return ret;
2007 }
2008
2009 cancel_delayed_work_sync(&smu->swctf_delayed_work);
2010
2011 ret = smu_disable_dpms(smu);
2012 if (ret) {
2013 dev_err(adev->dev, "Fail to disable dpm features!\n");
2014 return ret;
2015 }
2016
2017 return 0;
2018}
2019
2020static int smu_reset_mp1_state(struct smu_context *smu)
2021{
2022 struct amdgpu_device *adev = smu->adev;
2023 int ret = 0;
2024
2025 if ((!adev->in_runpm) && (!adev->in_suspend) &&
2026 (!amdgpu_in_reset(adev)) && amdgpu_ip_version(adev, MP1_HWIP, 0) ==
2027 IP_VERSION(13, 0, 10) &&
2028 !amdgpu_device_has_display_hardware(adev))
2029 ret = smu_set_mp1_state(smu, PP_MP1_STATE_UNLOAD);
2030
2031 return ret;
2032}
2033
2034static int smu_hw_fini(struct amdgpu_ip_block *ip_block)
2035{
2036 struct amdgpu_device *adev = ip_block->adev;
2037 struct smu_context *smu = adev->powerplay.pp_handle;
2038 int ret;
2039
2040 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
2041 return 0;
2042
2043 smu_dpm_set_vcn_enable(smu, false);
2044 smu_dpm_set_jpeg_enable(smu, false);
2045 smu_dpm_set_vpe_enable(smu, false);
2046 smu_dpm_set_umsch_mm_enable(smu, false);
2047
2048 adev->vcn.cur_state = AMD_PG_STATE_GATE;
2049 adev->jpeg.cur_state = AMD_PG_STATE_GATE;
2050
2051 if (!smu->pm_enabled)
2052 return 0;
2053
2054 adev->pm.dpm_enabled = false;
2055
2056 ret = smu_smc_hw_cleanup(smu);
2057 if (ret)
2058 return ret;
2059
2060 ret = smu_reset_mp1_state(smu);
2061 if (ret)
2062 return ret;
2063
2064 return 0;
2065}
2066
2067static void smu_late_fini(struct amdgpu_ip_block *ip_block)
2068{
2069 struct amdgpu_device *adev = ip_block->adev;
2070 struct smu_context *smu = adev->powerplay.pp_handle;
2071
2072 kfree(smu);
2073}
2074
2075static int smu_reset(struct smu_context *smu)
2076{
2077 struct amdgpu_device *adev = smu->adev;
2078 struct amdgpu_ip_block *ip_block;
2079 int ret;
2080
2081 ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_SMC);
2082 if (!ip_block)
2083 return -EINVAL;
2084
2085 ret = smu_hw_fini(ip_block);
2086 if (ret)
2087 return ret;
2088
2089 ret = smu_hw_init(ip_block);
2090 if (ret)
2091 return ret;
2092
2093 ret = smu_late_init(ip_block);
2094 if (ret)
2095 return ret;
2096
2097 return 0;
2098}
2099
2100static int smu_suspend(struct amdgpu_ip_block *ip_block)
2101{
2102 struct amdgpu_device *adev = ip_block->adev;
2103 struct smu_context *smu = adev->powerplay.pp_handle;
2104 int ret;
2105 uint64_t count;
2106
2107 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
2108 return 0;
2109
2110 if (!smu->pm_enabled)
2111 return 0;
2112
2113 adev->pm.dpm_enabled = false;
2114
2115 ret = smu_smc_hw_cleanup(smu);
2116 if (ret)
2117 return ret;
2118
2119 smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
2120
2121 smu_set_gfx_cgpg(smu, false);
2122
2123 /*
2124 * pwfw resets entrycount when device is suspended, so we save the
2125 * last value to be used when we resume to keep it consistent
2126 */
2127 ret = smu_get_entrycount_gfxoff(smu, &count);
2128 if (!ret)
2129 adev->gfx.gfx_off_entrycount = count;
2130
2131 /* clear this on suspend so it will get reprogrammed on resume */
2132 smu->workload_mask = 0;
2133
2134 return 0;
2135}
2136
2137static int smu_resume(struct amdgpu_ip_block *ip_block)
2138{
2139 int ret;
2140 struct amdgpu_device *adev = ip_block->adev;
2141 struct smu_context *smu = adev->powerplay.pp_handle;
2142
2143 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
2144 return 0;
2145
2146 if (!smu->pm_enabled)
2147 return 0;
2148
2149 dev_info(adev->dev, "SMU is resuming...\n");
2150
2151 ret = smu_start_smc_engine(smu);
2152 if (ret) {
2153 dev_err(adev->dev, "SMC engine is not correctly up!\n");
2154 return ret;
2155 }
2156
2157 ret = smu_smc_hw_setup(smu);
2158 if (ret) {
2159 dev_err(adev->dev, "Failed to setup smc hw!\n");
2160 return ret;
2161 }
2162
2163 ret = smu_set_gfx_imu_enable(smu);
2164 if (ret)
2165 return ret;
2166
2167 smu_set_gfx_cgpg(smu, true);
2168
2169 smu->disable_uclk_switch = 0;
2170
2171 adev->pm.dpm_enabled = true;
2172
2173 dev_info(adev->dev, "SMU is resumed successfully!\n");
2174
2175 return 0;
2176}
2177
2178static int smu_display_configuration_change(void *handle,
2179 const struct amd_pp_display_configuration *display_config)
2180{
2181 struct smu_context *smu = handle;
2182
2183 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2184 return -EOPNOTSUPP;
2185
2186 if (!display_config)
2187 return -EINVAL;
2188
2189 smu_set_min_dcef_deep_sleep(smu,
2190 display_config->min_dcef_deep_sleep_set_clk / 100);
2191
2192 return 0;
2193}
2194
2195static int smu_set_clockgating_state(void *handle,
2196 enum amd_clockgating_state state)
2197{
2198 return 0;
2199}
2200
2201static int smu_set_powergating_state(void *handle,
2202 enum amd_powergating_state state)
2203{
2204 return 0;
2205}
2206
2207static int smu_enable_umd_pstate(void *handle,
2208 enum amd_dpm_forced_level *level)
2209{
2210 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
2211 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
2212 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
2213 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
2214
2215 struct smu_context *smu = (struct smu_context*)(handle);
2216 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2217
2218 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
2219 return -EINVAL;
2220
2221 if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
2222 /* enter umd pstate, save current level, disable gfx cg*/
2223 if (*level & profile_mode_mask) {
2224 smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
2225 smu_gpo_control(smu, false);
2226 smu_gfx_ulv_control(smu, false);
2227 smu_deep_sleep_control(smu, false);
2228 amdgpu_asic_update_umd_stable_pstate(smu->adev, true);
2229 }
2230 } else {
2231 /* exit umd pstate, restore level, enable gfx cg*/
2232 if (!(*level & profile_mode_mask)) {
2233 if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
2234 *level = smu_dpm_ctx->saved_dpm_level;
2235 amdgpu_asic_update_umd_stable_pstate(smu->adev, false);
2236 smu_deep_sleep_control(smu, true);
2237 smu_gfx_ulv_control(smu, true);
2238 smu_gpo_control(smu, true);
2239 }
2240 }
2241
2242 return 0;
2243}
2244
2245static int smu_bump_power_profile_mode(struct smu_context *smu,
2246 long *custom_params,
2247 u32 custom_params_max_idx)
2248{
2249 u32 workload_mask = 0;
2250 int i, ret = 0;
2251
2252 for (i = 0; i < PP_SMC_POWER_PROFILE_COUNT; i++) {
2253 if (smu->workload_refcount[i])
2254 workload_mask |= 1 << i;
2255 }
2256
2257 if (smu->workload_mask == workload_mask)
2258 return 0;
2259
2260 if (smu->ppt_funcs->set_power_profile_mode)
2261 ret = smu->ppt_funcs->set_power_profile_mode(smu, workload_mask,
2262 custom_params,
2263 custom_params_max_idx);
2264
2265 if (!ret)
2266 smu->workload_mask = workload_mask;
2267
2268 return ret;
2269}
2270
2271static void smu_power_profile_mode_get(struct smu_context *smu,
2272 enum PP_SMC_POWER_PROFILE profile_mode)
2273{
2274 smu->workload_refcount[profile_mode]++;
2275}
2276
2277static void smu_power_profile_mode_put(struct smu_context *smu,
2278 enum PP_SMC_POWER_PROFILE profile_mode)
2279{
2280 if (smu->workload_refcount[profile_mode])
2281 smu->workload_refcount[profile_mode]--;
2282}
2283
2284static int smu_adjust_power_state_dynamic(struct smu_context *smu,
2285 enum amd_dpm_forced_level level,
2286 bool skip_display_settings)
2287{
2288 int ret = 0;
2289 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2290
2291 if (!skip_display_settings) {
2292 ret = smu_display_config_changed(smu);
2293 if (ret) {
2294 dev_err(smu->adev->dev, "Failed to change display config!");
2295 return ret;
2296 }
2297 }
2298
2299 ret = smu_apply_clocks_adjust_rules(smu);
2300 if (ret) {
2301 dev_err(smu->adev->dev, "Failed to apply clocks adjust rules!");
2302 return ret;
2303 }
2304
2305 if (!skip_display_settings) {
2306 ret = smu_notify_smc_display_config(smu);
2307 if (ret) {
2308 dev_err(smu->adev->dev, "Failed to notify smc display config!");
2309 return ret;
2310 }
2311 }
2312
2313 if (smu_dpm_ctx->dpm_level != level) {
2314 ret = smu_asic_set_performance_level(smu, level);
2315 if (ret) {
2316 dev_err(smu->adev->dev, "Failed to set performance level!");
2317 return ret;
2318 }
2319
2320 /* update the saved copy */
2321 smu_dpm_ctx->dpm_level = level;
2322 }
2323
2324 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
2325 smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
2326 smu_bump_power_profile_mode(smu, NULL, 0);
2327
2328 return ret;
2329}
2330
2331static int smu_handle_task(struct smu_context *smu,
2332 enum amd_dpm_forced_level level,
2333 enum amd_pp_task task_id)
2334{
2335 int ret = 0;
2336
2337 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2338 return -EOPNOTSUPP;
2339
2340 switch (task_id) {
2341 case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
2342 ret = smu_pre_display_config_changed(smu);
2343 if (ret)
2344 return ret;
2345 ret = smu_adjust_power_state_dynamic(smu, level, false);
2346 break;
2347 case AMD_PP_TASK_COMPLETE_INIT:
2348 ret = smu_adjust_power_state_dynamic(smu, level, true);
2349 break;
2350 case AMD_PP_TASK_READJUST_POWER_STATE:
2351 ret = smu_adjust_power_state_dynamic(smu, level, true);
2352 break;
2353 default:
2354 break;
2355 }
2356
2357 return ret;
2358}
2359
2360static int smu_handle_dpm_task(void *handle,
2361 enum amd_pp_task task_id,
2362 enum amd_pm_state_type *user_state)
2363{
2364 struct smu_context *smu = handle;
2365 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
2366
2367 return smu_handle_task(smu, smu_dpm->dpm_level, task_id);
2368
2369}
2370
2371static int smu_switch_power_profile(void *handle,
2372 enum PP_SMC_POWER_PROFILE type,
2373 bool enable)
2374{
2375 struct smu_context *smu = handle;
2376 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2377 int ret;
2378
2379 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2380 return -EOPNOTSUPP;
2381
2382 if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
2383 return -EINVAL;
2384
2385 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
2386 smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
2387 if (enable)
2388 smu_power_profile_mode_get(smu, type);
2389 else
2390 smu_power_profile_mode_put(smu, type);
2391 ret = smu_bump_power_profile_mode(smu, NULL, 0);
2392 if (ret) {
2393 if (enable)
2394 smu_power_profile_mode_put(smu, type);
2395 else
2396 smu_power_profile_mode_get(smu, type);
2397 return ret;
2398 }
2399 }
2400
2401 return 0;
2402}
2403
2404static enum amd_dpm_forced_level smu_get_performance_level(void *handle)
2405{
2406 struct smu_context *smu = handle;
2407 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2408
2409 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2410 return -EOPNOTSUPP;
2411
2412 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
2413 return -EINVAL;
2414
2415 return smu_dpm_ctx->dpm_level;
2416}
2417
2418static int smu_force_performance_level(void *handle,
2419 enum amd_dpm_forced_level level)
2420{
2421 struct smu_context *smu = handle;
2422 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2423 int ret = 0;
2424
2425 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2426 return -EOPNOTSUPP;
2427
2428 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
2429 return -EINVAL;
2430
2431 ret = smu_enable_umd_pstate(smu, &level);
2432 if (ret)
2433 return ret;
2434
2435 ret = smu_handle_task(smu, level,
2436 AMD_PP_TASK_READJUST_POWER_STATE);
2437
2438 /* reset user dpm clock state */
2439 if (!ret && smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
2440 memset(smu->user_dpm_profile.clk_mask, 0, sizeof(smu->user_dpm_profile.clk_mask));
2441 smu->user_dpm_profile.clk_dependency = 0;
2442 }
2443
2444 return ret;
2445}
2446
2447static int smu_set_display_count(void *handle, uint32_t count)
2448{
2449 struct smu_context *smu = handle;
2450
2451 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2452 return -EOPNOTSUPP;
2453
2454 return smu_init_display_count(smu, count);
2455}
2456
2457static int smu_force_smuclk_levels(struct smu_context *smu,
2458 enum smu_clk_type clk_type,
2459 uint32_t mask)
2460{
2461 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2462 int ret = 0;
2463
2464 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2465 return -EOPNOTSUPP;
2466
2467 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
2468 dev_dbg(smu->adev->dev, "force clock level is for dpm manual mode only.\n");
2469 return -EINVAL;
2470 }
2471
2472 if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) {
2473 ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
2474 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
2475 smu->user_dpm_profile.clk_mask[clk_type] = mask;
2476 smu_set_user_clk_dependencies(smu, clk_type);
2477 }
2478 }
2479
2480 return ret;
2481}
2482
2483static int smu_force_ppclk_levels(void *handle,
2484 enum pp_clock_type type,
2485 uint32_t mask)
2486{
2487 struct smu_context *smu = handle;
2488 enum smu_clk_type clk_type;
2489
2490 switch (type) {
2491 case PP_SCLK:
2492 clk_type = SMU_SCLK; break;
2493 case PP_MCLK:
2494 clk_type = SMU_MCLK; break;
2495 case PP_PCIE:
2496 clk_type = SMU_PCIE; break;
2497 case PP_SOCCLK:
2498 clk_type = SMU_SOCCLK; break;
2499 case PP_FCLK:
2500 clk_type = SMU_FCLK; break;
2501 case PP_DCEFCLK:
2502 clk_type = SMU_DCEFCLK; break;
2503 case PP_VCLK:
2504 clk_type = SMU_VCLK; break;
2505 case PP_VCLK1:
2506 clk_type = SMU_VCLK1; break;
2507 case PP_DCLK:
2508 clk_type = SMU_DCLK; break;
2509 case PP_DCLK1:
2510 clk_type = SMU_DCLK1; break;
2511 case OD_SCLK:
2512 clk_type = SMU_OD_SCLK; break;
2513 case OD_MCLK:
2514 clk_type = SMU_OD_MCLK; break;
2515 case OD_VDDC_CURVE:
2516 clk_type = SMU_OD_VDDC_CURVE; break;
2517 case OD_RANGE:
2518 clk_type = SMU_OD_RANGE; break;
2519 default:
2520 return -EINVAL;
2521 }
2522
2523 return smu_force_smuclk_levels(smu, clk_type, mask);
2524}
2525
2526/*
2527 * On system suspending or resetting, the dpm_enabled
2528 * flag will be cleared. So that those SMU services which
2529 * are not supported will be gated.
2530 * However, the mp1 state setting should still be granted
2531 * even if the dpm_enabled cleared.
2532 */
2533static int smu_set_mp1_state(void *handle,
2534 enum pp_mp1_state mp1_state)
2535{
2536 struct smu_context *smu = handle;
2537 int ret = 0;
2538
2539 if (!smu->pm_enabled)
2540 return -EOPNOTSUPP;
2541
2542 if (smu->ppt_funcs &&
2543 smu->ppt_funcs->set_mp1_state)
2544 ret = smu->ppt_funcs->set_mp1_state(smu, mp1_state);
2545
2546 return ret;
2547}
2548
2549static int smu_set_df_cstate(void *handle,
2550 enum pp_df_cstate state)
2551{
2552 struct smu_context *smu = handle;
2553 int ret = 0;
2554
2555 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2556 return -EOPNOTSUPP;
2557
2558 if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
2559 return 0;
2560
2561 ret = smu->ppt_funcs->set_df_cstate(smu, state);
2562 if (ret)
2563 dev_err(smu->adev->dev, "[SetDfCstate] failed!\n");
2564
2565 return ret;
2566}
2567
2568int smu_write_watermarks_table(struct smu_context *smu)
2569{
2570 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2571 return -EOPNOTSUPP;
2572
2573 return smu_set_watermarks_table(smu, NULL);
2574}
2575
2576static int smu_set_watermarks_for_clock_ranges(void *handle,
2577 struct pp_smu_wm_range_sets *clock_ranges)
2578{
2579 struct smu_context *smu = handle;
2580
2581 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2582 return -EOPNOTSUPP;
2583
2584 if (smu->disable_watermark)
2585 return 0;
2586
2587 return smu_set_watermarks_table(smu, clock_ranges);
2588}
2589
2590int smu_set_ac_dc(struct smu_context *smu)
2591{
2592 int ret = 0;
2593
2594 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2595 return -EOPNOTSUPP;
2596
2597 /* controlled by firmware */
2598 if (smu->dc_controlled_by_gpio)
2599 return 0;
2600
2601 ret = smu_set_power_source(smu,
2602 smu->adev->pm.ac_power ? SMU_POWER_SOURCE_AC :
2603 SMU_POWER_SOURCE_DC);
2604 if (ret)
2605 dev_err(smu->adev->dev, "Failed to switch to %s mode!\n",
2606 smu->adev->pm.ac_power ? "AC" : "DC");
2607
2608 return ret;
2609}
2610
2611const struct amd_ip_funcs smu_ip_funcs = {
2612 .name = "smu",
2613 .early_init = smu_early_init,
2614 .late_init = smu_late_init,
2615 .sw_init = smu_sw_init,
2616 .sw_fini = smu_sw_fini,
2617 .hw_init = smu_hw_init,
2618 .hw_fini = smu_hw_fini,
2619 .late_fini = smu_late_fini,
2620 .suspend = smu_suspend,
2621 .resume = smu_resume,
2622 .is_idle = NULL,
2623 .check_soft_reset = NULL,
2624 .wait_for_idle = NULL,
2625 .soft_reset = NULL,
2626 .set_clockgating_state = smu_set_clockgating_state,
2627 .set_powergating_state = smu_set_powergating_state,
2628};
2629
2630const struct amdgpu_ip_block_version smu_v11_0_ip_block = {
2631 .type = AMD_IP_BLOCK_TYPE_SMC,
2632 .major = 11,
2633 .minor = 0,
2634 .rev = 0,
2635 .funcs = &smu_ip_funcs,
2636};
2637
2638const struct amdgpu_ip_block_version smu_v12_0_ip_block = {
2639 .type = AMD_IP_BLOCK_TYPE_SMC,
2640 .major = 12,
2641 .minor = 0,
2642 .rev = 0,
2643 .funcs = &smu_ip_funcs,
2644};
2645
2646const struct amdgpu_ip_block_version smu_v13_0_ip_block = {
2647 .type = AMD_IP_BLOCK_TYPE_SMC,
2648 .major = 13,
2649 .minor = 0,
2650 .rev = 0,
2651 .funcs = &smu_ip_funcs,
2652};
2653
2654const struct amdgpu_ip_block_version smu_v14_0_ip_block = {
2655 .type = AMD_IP_BLOCK_TYPE_SMC,
2656 .major = 14,
2657 .minor = 0,
2658 .rev = 0,
2659 .funcs = &smu_ip_funcs,
2660};
2661
2662static int smu_load_microcode(void *handle)
2663{
2664 struct smu_context *smu = handle;
2665 struct amdgpu_device *adev = smu->adev;
2666 int ret = 0;
2667
2668 if (!smu->pm_enabled)
2669 return -EOPNOTSUPP;
2670
2671 /* This should be used for non PSP loading */
2672 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
2673 return 0;
2674
2675 if (smu->ppt_funcs->load_microcode) {
2676 ret = smu->ppt_funcs->load_microcode(smu);
2677 if (ret) {
2678 dev_err(adev->dev, "Load microcode failed\n");
2679 return ret;
2680 }
2681 }
2682
2683 if (smu->ppt_funcs->check_fw_status) {
2684 ret = smu->ppt_funcs->check_fw_status(smu);
2685 if (ret) {
2686 dev_err(adev->dev, "SMC is not ready\n");
2687 return ret;
2688 }
2689 }
2690
2691 return ret;
2692}
2693
2694static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
2695{
2696 int ret = 0;
2697
2698 if (smu->ppt_funcs->set_gfx_cgpg)
2699 ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled);
2700
2701 return ret;
2702}
2703
2704static int smu_set_fan_speed_rpm(void *handle, uint32_t speed)
2705{
2706 struct smu_context *smu = handle;
2707 int ret = 0;
2708
2709 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2710 return -EOPNOTSUPP;
2711
2712 if (!smu->ppt_funcs->set_fan_speed_rpm)
2713 return -EOPNOTSUPP;
2714
2715 if (speed == U32_MAX)
2716 return -EINVAL;
2717
2718 ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
2719 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
2720 smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_RPM;
2721 smu->user_dpm_profile.fan_speed_rpm = speed;
2722
2723 /* Override custom PWM setting as they cannot co-exist */
2724 smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_PWM;
2725 smu->user_dpm_profile.fan_speed_pwm = 0;
2726 }
2727
2728 return ret;
2729}
2730
2731/**
2732 * smu_get_power_limit - Request one of the SMU Power Limits
2733 *
2734 * @handle: pointer to smu context
2735 * @limit: requested limit is written back to this variable
2736 * @pp_limit_level: &pp_power_limit_level which limit of the power to return
2737 * @pp_power_type: &pp_power_type type of power
2738 * Return: 0 on success, <0 on error
2739 *
2740 */
2741int smu_get_power_limit(void *handle,
2742 uint32_t *limit,
2743 enum pp_power_limit_level pp_limit_level,
2744 enum pp_power_type pp_power_type)
2745{
2746 struct smu_context *smu = handle;
2747 struct amdgpu_device *adev = smu->adev;
2748 enum smu_ppt_limit_level limit_level;
2749 uint32_t limit_type;
2750 int ret = 0;
2751
2752 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2753 return -EOPNOTSUPP;
2754
2755 switch (pp_power_type) {
2756 case PP_PWR_TYPE_SUSTAINED:
2757 limit_type = SMU_DEFAULT_PPT_LIMIT;
2758 break;
2759 case PP_PWR_TYPE_FAST:
2760 limit_type = SMU_FAST_PPT_LIMIT;
2761 break;
2762 default:
2763 return -EOPNOTSUPP;
2764 }
2765
2766 switch (pp_limit_level) {
2767 case PP_PWR_LIMIT_CURRENT:
2768 limit_level = SMU_PPT_LIMIT_CURRENT;
2769 break;
2770 case PP_PWR_LIMIT_DEFAULT:
2771 limit_level = SMU_PPT_LIMIT_DEFAULT;
2772 break;
2773 case PP_PWR_LIMIT_MAX:
2774 limit_level = SMU_PPT_LIMIT_MAX;
2775 break;
2776 case PP_PWR_LIMIT_MIN:
2777 limit_level = SMU_PPT_LIMIT_MIN;
2778 break;
2779 default:
2780 return -EOPNOTSUPP;
2781 }
2782
2783 if (limit_type != SMU_DEFAULT_PPT_LIMIT) {
2784 if (smu->ppt_funcs->get_ppt_limit)
2785 ret = smu->ppt_funcs->get_ppt_limit(smu, limit, limit_type, limit_level);
2786 } else {
2787 switch (limit_level) {
2788 case SMU_PPT_LIMIT_CURRENT:
2789 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
2790 case IP_VERSION(13, 0, 2):
2791 case IP_VERSION(13, 0, 6):
2792 case IP_VERSION(13, 0, 14):
2793 case IP_VERSION(11, 0, 7):
2794 case IP_VERSION(11, 0, 11):
2795 case IP_VERSION(11, 0, 12):
2796 case IP_VERSION(11, 0, 13):
2797 ret = smu_get_asic_power_limits(smu,
2798 &smu->current_power_limit,
2799 NULL, NULL, NULL);
2800 break;
2801 default:
2802 break;
2803 }
2804 *limit = smu->current_power_limit;
2805 break;
2806 case SMU_PPT_LIMIT_DEFAULT:
2807 *limit = smu->default_power_limit;
2808 break;
2809 case SMU_PPT_LIMIT_MAX:
2810 *limit = smu->max_power_limit;
2811 break;
2812 case SMU_PPT_LIMIT_MIN:
2813 *limit = smu->min_power_limit;
2814 break;
2815 default:
2816 return -EINVAL;
2817 }
2818 }
2819
2820 return ret;
2821}
2822
2823static int smu_set_power_limit(void *handle, uint32_t limit)
2824{
2825 struct smu_context *smu = handle;
2826 uint32_t limit_type = limit >> 24;
2827 int ret = 0;
2828
2829 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2830 return -EOPNOTSUPP;
2831
2832 limit &= (1<<24)-1;
2833 if (limit_type != SMU_DEFAULT_PPT_LIMIT)
2834 if (smu->ppt_funcs->set_power_limit)
2835 return smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
2836
2837 if ((limit > smu->max_power_limit) || (limit < smu->min_power_limit)) {
2838 dev_err(smu->adev->dev,
2839 "New power limit (%d) is out of range [%d,%d]\n",
2840 limit, smu->min_power_limit, smu->max_power_limit);
2841 return -EINVAL;
2842 }
2843
2844 if (!limit)
2845 limit = smu->current_power_limit;
2846
2847 if (smu->ppt_funcs->set_power_limit) {
2848 ret = smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
2849 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
2850 smu->user_dpm_profile.power_limit = limit;
2851 }
2852
2853 return ret;
2854}
2855
2856static int smu_print_smuclk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
2857{
2858 int ret = 0;
2859
2860 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2861 return -EOPNOTSUPP;
2862
2863 if (smu->ppt_funcs->print_clk_levels)
2864 ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
2865
2866 return ret;
2867}
2868
2869static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type)
2870{
2871 enum smu_clk_type clk_type;
2872
2873 switch (type) {
2874 case PP_SCLK:
2875 clk_type = SMU_SCLK; break;
2876 case PP_MCLK:
2877 clk_type = SMU_MCLK; break;
2878 case PP_PCIE:
2879 clk_type = SMU_PCIE; break;
2880 case PP_SOCCLK:
2881 clk_type = SMU_SOCCLK; break;
2882 case PP_FCLK:
2883 clk_type = SMU_FCLK; break;
2884 case PP_DCEFCLK:
2885 clk_type = SMU_DCEFCLK; break;
2886 case PP_VCLK:
2887 clk_type = SMU_VCLK; break;
2888 case PP_VCLK1:
2889 clk_type = SMU_VCLK1; break;
2890 case PP_DCLK:
2891 clk_type = SMU_DCLK; break;
2892 case PP_DCLK1:
2893 clk_type = SMU_DCLK1; break;
2894 case OD_SCLK:
2895 clk_type = SMU_OD_SCLK; break;
2896 case OD_MCLK:
2897 clk_type = SMU_OD_MCLK; break;
2898 case OD_VDDC_CURVE:
2899 clk_type = SMU_OD_VDDC_CURVE; break;
2900 case OD_RANGE:
2901 clk_type = SMU_OD_RANGE; break;
2902 case OD_VDDGFX_OFFSET:
2903 clk_type = SMU_OD_VDDGFX_OFFSET; break;
2904 case OD_CCLK:
2905 clk_type = SMU_OD_CCLK; break;
2906 case OD_FAN_CURVE:
2907 clk_type = SMU_OD_FAN_CURVE; break;
2908 case OD_ACOUSTIC_LIMIT:
2909 clk_type = SMU_OD_ACOUSTIC_LIMIT; break;
2910 case OD_ACOUSTIC_TARGET:
2911 clk_type = SMU_OD_ACOUSTIC_TARGET; break;
2912 case OD_FAN_TARGET_TEMPERATURE:
2913 clk_type = SMU_OD_FAN_TARGET_TEMPERATURE; break;
2914 case OD_FAN_MINIMUM_PWM:
2915 clk_type = SMU_OD_FAN_MINIMUM_PWM; break;
2916 case OD_FAN_ZERO_RPM_ENABLE:
2917 clk_type = SMU_OD_FAN_ZERO_RPM_ENABLE; break;
2918 case OD_FAN_ZERO_RPM_STOP_TEMP:
2919 clk_type = SMU_OD_FAN_ZERO_RPM_STOP_TEMP; break;
2920 default:
2921 clk_type = SMU_CLK_COUNT; break;
2922 }
2923
2924 return clk_type;
2925}
2926
2927static int smu_print_ppclk_levels(void *handle,
2928 enum pp_clock_type type,
2929 char *buf)
2930{
2931 struct smu_context *smu = handle;
2932 enum smu_clk_type clk_type;
2933
2934 clk_type = smu_convert_to_smuclk(type);
2935 if (clk_type == SMU_CLK_COUNT)
2936 return -EINVAL;
2937
2938 return smu_print_smuclk_levels(smu, clk_type, buf);
2939}
2940
2941static int smu_emit_ppclk_levels(void *handle, enum pp_clock_type type, char *buf, int *offset)
2942{
2943 struct smu_context *smu = handle;
2944 enum smu_clk_type clk_type;
2945
2946 clk_type = smu_convert_to_smuclk(type);
2947 if (clk_type == SMU_CLK_COUNT)
2948 return -EINVAL;
2949
2950 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2951 return -EOPNOTSUPP;
2952
2953 if (!smu->ppt_funcs->emit_clk_levels)
2954 return -ENOENT;
2955
2956 return smu->ppt_funcs->emit_clk_levels(smu, clk_type, buf, offset);
2957
2958}
2959
2960static int smu_od_edit_dpm_table(void *handle,
2961 enum PP_OD_DPM_TABLE_COMMAND type,
2962 long *input, uint32_t size)
2963{
2964 struct smu_context *smu = handle;
2965 int ret = 0;
2966
2967 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2968 return -EOPNOTSUPP;
2969
2970 if (smu->ppt_funcs->od_edit_dpm_table) {
2971 ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
2972 }
2973
2974 return ret;
2975}
2976
2977static int smu_read_sensor(void *handle,
2978 int sensor,
2979 void *data,
2980 int *size_arg)
2981{
2982 struct smu_context *smu = handle;
2983 struct smu_umd_pstate_table *pstate_table =
2984 &smu->pstate_table;
2985 int ret = 0;
2986 uint32_t *size, size_val;
2987
2988 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2989 return -EOPNOTSUPP;
2990
2991 if (!data || !size_arg)
2992 return -EINVAL;
2993
2994 size_val = *size_arg;
2995 size = &size_val;
2996
2997 if (smu->ppt_funcs->read_sensor)
2998 if (!smu->ppt_funcs->read_sensor(smu, sensor, data, size))
2999 goto unlock;
3000
3001 switch (sensor) {
3002 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
3003 *((uint32_t *)data) = pstate_table->gfxclk_pstate.standard * 100;
3004 *size = 4;
3005 break;
3006 case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
3007 *((uint32_t *)data) = pstate_table->uclk_pstate.standard * 100;
3008 *size = 4;
3009 break;
3010 case AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK:
3011 *((uint32_t *)data) = pstate_table->gfxclk_pstate.peak * 100;
3012 *size = 4;
3013 break;
3014 case AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK:
3015 *((uint32_t *)data) = pstate_table->uclk_pstate.peak * 100;
3016 *size = 4;
3017 break;
3018 case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
3019 ret = smu_feature_get_enabled_mask(smu, (uint64_t *)data);
3020 *size = 8;
3021 break;
3022 case AMDGPU_PP_SENSOR_UVD_POWER:
3023 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0;
3024 *size = 4;
3025 break;
3026 case AMDGPU_PP_SENSOR_VCE_POWER:
3027 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
3028 *size = 4;
3029 break;
3030 case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
3031 *(uint32_t *)data = atomic_read(&smu->smu_power.power_gate.vcn_gated) ? 0 : 1;
3032 *size = 4;
3033 break;
3034 case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
3035 *(uint32_t *)data = 0;
3036 *size = 4;
3037 break;
3038 default:
3039 *size = 0;
3040 ret = -EOPNOTSUPP;
3041 break;
3042 }
3043
3044unlock:
3045 // assign uint32_t to int
3046 *size_arg = size_val;
3047
3048 return ret;
3049}
3050
3051static int smu_get_apu_thermal_limit(void *handle, uint32_t *limit)
3052{
3053 int ret = -EOPNOTSUPP;
3054 struct smu_context *smu = handle;
3055
3056 if (smu->ppt_funcs && smu->ppt_funcs->get_apu_thermal_limit)
3057 ret = smu->ppt_funcs->get_apu_thermal_limit(smu, limit);
3058
3059 return ret;
3060}
3061
3062static int smu_set_apu_thermal_limit(void *handle, uint32_t limit)
3063{
3064 int ret = -EOPNOTSUPP;
3065 struct smu_context *smu = handle;
3066
3067 if (smu->ppt_funcs && smu->ppt_funcs->set_apu_thermal_limit)
3068 ret = smu->ppt_funcs->set_apu_thermal_limit(smu, limit);
3069
3070 return ret;
3071}
3072
3073static int smu_get_power_profile_mode(void *handle, char *buf)
3074{
3075 struct smu_context *smu = handle;
3076
3077 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
3078 !smu->ppt_funcs->get_power_profile_mode)
3079 return -EOPNOTSUPP;
3080 if (!buf)
3081 return -EINVAL;
3082
3083 return smu->ppt_funcs->get_power_profile_mode(smu, buf);
3084}
3085
3086static int smu_set_power_profile_mode(void *handle,
3087 long *param,
3088 uint32_t param_size)
3089{
3090 struct smu_context *smu = handle;
3091 bool custom = false;
3092 int ret = 0;
3093
3094 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
3095 !smu->ppt_funcs->set_power_profile_mode)
3096 return -EOPNOTSUPP;
3097
3098 if (param[param_size] == PP_SMC_POWER_PROFILE_CUSTOM) {
3099 custom = true;
3100 /* clear frontend mask so custom changes propogate */
3101 smu->workload_mask = 0;
3102 }
3103
3104 if ((param[param_size] != smu->power_profile_mode) || custom) {
3105 /* clear the old user preference */
3106 smu_power_profile_mode_put(smu, smu->power_profile_mode);
3107 /* set the new user preference */
3108 smu_power_profile_mode_get(smu, param[param_size]);
3109 ret = smu_bump_power_profile_mode(smu,
3110 custom ? param : NULL,
3111 custom ? param_size : 0);
3112 if (ret)
3113 smu_power_profile_mode_put(smu, param[param_size]);
3114 else
3115 /* store the user's preference */
3116 smu->power_profile_mode = param[param_size];
3117 }
3118
3119 return ret;
3120}
3121
3122static int smu_get_fan_control_mode(void *handle, u32 *fan_mode)
3123{
3124 struct smu_context *smu = handle;
3125
3126 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3127 return -EOPNOTSUPP;
3128
3129 if (!smu->ppt_funcs->get_fan_control_mode)
3130 return -EOPNOTSUPP;
3131
3132 if (!fan_mode)
3133 return -EINVAL;
3134
3135 *fan_mode = smu->ppt_funcs->get_fan_control_mode(smu);
3136
3137 return 0;
3138}
3139
3140static int smu_set_fan_control_mode(void *handle, u32 value)
3141{
3142 struct smu_context *smu = handle;
3143 int ret = 0;
3144
3145 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3146 return -EOPNOTSUPP;
3147
3148 if (!smu->ppt_funcs->set_fan_control_mode)
3149 return -EOPNOTSUPP;
3150
3151 if (value == U32_MAX)
3152 return -EINVAL;
3153
3154 ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
3155 if (ret)
3156 goto out;
3157
3158 if (!(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
3159 smu->user_dpm_profile.fan_mode = value;
3160
3161 /* reset user dpm fan speed */
3162 if (value != AMD_FAN_CTRL_MANUAL) {
3163 smu->user_dpm_profile.fan_speed_pwm = 0;
3164 smu->user_dpm_profile.fan_speed_rpm = 0;
3165 smu->user_dpm_profile.flags &= ~(SMU_CUSTOM_FAN_SPEED_RPM | SMU_CUSTOM_FAN_SPEED_PWM);
3166 }
3167 }
3168
3169out:
3170 return ret;
3171}
3172
3173static int smu_get_fan_speed_pwm(void *handle, u32 *speed)
3174{
3175 struct smu_context *smu = handle;
3176 int ret = 0;
3177
3178 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3179 return -EOPNOTSUPP;
3180
3181 if (!smu->ppt_funcs->get_fan_speed_pwm)
3182 return -EOPNOTSUPP;
3183
3184 if (!speed)
3185 return -EINVAL;
3186
3187 ret = smu->ppt_funcs->get_fan_speed_pwm(smu, speed);
3188
3189 return ret;
3190}
3191
3192static int smu_set_fan_speed_pwm(void *handle, u32 speed)
3193{
3194 struct smu_context *smu = handle;
3195 int ret = 0;
3196
3197 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3198 return -EOPNOTSUPP;
3199
3200 if (!smu->ppt_funcs->set_fan_speed_pwm)
3201 return -EOPNOTSUPP;
3202
3203 if (speed == U32_MAX)
3204 return -EINVAL;
3205
3206 ret = smu->ppt_funcs->set_fan_speed_pwm(smu, speed);
3207 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
3208 smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_PWM;
3209 smu->user_dpm_profile.fan_speed_pwm = speed;
3210
3211 /* Override custom RPM setting as they cannot co-exist */
3212 smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_RPM;
3213 smu->user_dpm_profile.fan_speed_rpm = 0;
3214 }
3215
3216 return ret;
3217}
3218
3219static int smu_get_fan_speed_rpm(void *handle, uint32_t *speed)
3220{
3221 struct smu_context *smu = handle;
3222 int ret = 0;
3223
3224 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3225 return -EOPNOTSUPP;
3226
3227 if (!smu->ppt_funcs->get_fan_speed_rpm)
3228 return -EOPNOTSUPP;
3229
3230 if (!speed)
3231 return -EINVAL;
3232
3233 ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
3234
3235 return ret;
3236}
3237
3238static int smu_set_deep_sleep_dcefclk(void *handle, uint32_t clk)
3239{
3240 struct smu_context *smu = handle;
3241
3242 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3243 return -EOPNOTSUPP;
3244
3245 return smu_set_min_dcef_deep_sleep(smu, clk);
3246}
3247
3248static int smu_get_clock_by_type_with_latency(void *handle,
3249 enum amd_pp_clock_type type,
3250 struct pp_clock_levels_with_latency *clocks)
3251{
3252 struct smu_context *smu = handle;
3253 enum smu_clk_type clk_type;
3254 int ret = 0;
3255
3256 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3257 return -EOPNOTSUPP;
3258
3259 if (smu->ppt_funcs->get_clock_by_type_with_latency) {
3260 switch (type) {
3261 case amd_pp_sys_clock:
3262 clk_type = SMU_GFXCLK;
3263 break;
3264 case amd_pp_mem_clock:
3265 clk_type = SMU_MCLK;
3266 break;
3267 case amd_pp_dcef_clock:
3268 clk_type = SMU_DCEFCLK;
3269 break;
3270 case amd_pp_disp_clock:
3271 clk_type = SMU_DISPCLK;
3272 break;
3273 default:
3274 dev_err(smu->adev->dev, "Invalid clock type!\n");
3275 return -EINVAL;
3276 }
3277
3278 ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks);
3279 }
3280
3281 return ret;
3282}
3283
3284static int smu_display_clock_voltage_request(void *handle,
3285 struct pp_display_clock_request *clock_req)
3286{
3287 struct smu_context *smu = handle;
3288 int ret = 0;
3289
3290 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3291 return -EOPNOTSUPP;
3292
3293 if (smu->ppt_funcs->display_clock_voltage_request)
3294 ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req);
3295
3296 return ret;
3297}
3298
3299
3300static int smu_display_disable_memory_clock_switch(void *handle,
3301 bool disable_memory_clock_switch)
3302{
3303 struct smu_context *smu = handle;
3304 int ret = -EINVAL;
3305
3306 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3307 return -EOPNOTSUPP;
3308
3309 if (smu->ppt_funcs->display_disable_memory_clock_switch)
3310 ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch);
3311
3312 return ret;
3313}
3314
3315static int smu_set_xgmi_pstate(void *handle,
3316 uint32_t pstate)
3317{
3318 struct smu_context *smu = handle;
3319 int ret = 0;
3320
3321 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3322 return -EOPNOTSUPP;
3323
3324 if (smu->ppt_funcs->set_xgmi_pstate)
3325 ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
3326
3327 if (ret)
3328 dev_err(smu->adev->dev, "Failed to set XGMI pstate!\n");
3329
3330 return ret;
3331}
3332
3333static int smu_get_baco_capability(void *handle)
3334{
3335 struct smu_context *smu = handle;
3336
3337 if (!smu->pm_enabled)
3338 return false;
3339
3340 if (!smu->ppt_funcs || !smu->ppt_funcs->get_bamaco_support)
3341 return false;
3342
3343 return smu->ppt_funcs->get_bamaco_support(smu);
3344}
3345
3346static int smu_baco_set_state(void *handle, int state)
3347{
3348 struct smu_context *smu = handle;
3349 int ret = 0;
3350
3351 if (!smu->pm_enabled)
3352 return -EOPNOTSUPP;
3353
3354 if (state == 0) {
3355 if (smu->ppt_funcs->baco_exit)
3356 ret = smu->ppt_funcs->baco_exit(smu);
3357 } else if (state == 1) {
3358 if (smu->ppt_funcs->baco_enter)
3359 ret = smu->ppt_funcs->baco_enter(smu);
3360 } else {
3361 return -EINVAL;
3362 }
3363
3364 if (ret)
3365 dev_err(smu->adev->dev, "Failed to %s BACO state!\n",
3366 (state)?"enter":"exit");
3367
3368 return ret;
3369}
3370
3371bool smu_mode1_reset_is_support(struct smu_context *smu)
3372{
3373 bool ret = false;
3374
3375 if (!smu->pm_enabled)
3376 return false;
3377
3378 if (smu->ppt_funcs && smu->ppt_funcs->mode1_reset_is_support)
3379 ret = smu->ppt_funcs->mode1_reset_is_support(smu);
3380
3381 return ret;
3382}
3383
3384bool smu_mode2_reset_is_support(struct smu_context *smu)
3385{
3386 bool ret = false;
3387
3388 if (!smu->pm_enabled)
3389 return false;
3390
3391 if (smu->ppt_funcs && smu->ppt_funcs->mode2_reset_is_support)
3392 ret = smu->ppt_funcs->mode2_reset_is_support(smu);
3393
3394 return ret;
3395}
3396
3397int smu_mode1_reset(struct smu_context *smu)
3398{
3399 int ret = 0;
3400
3401 if (!smu->pm_enabled)
3402 return -EOPNOTSUPP;
3403
3404 if (smu->ppt_funcs->mode1_reset)
3405 ret = smu->ppt_funcs->mode1_reset(smu);
3406
3407 return ret;
3408}
3409
3410static int smu_mode2_reset(void *handle)
3411{
3412 struct smu_context *smu = handle;
3413 int ret = 0;
3414
3415 if (!smu->pm_enabled)
3416 return -EOPNOTSUPP;
3417
3418 if (smu->ppt_funcs->mode2_reset)
3419 ret = smu->ppt_funcs->mode2_reset(smu);
3420
3421 if (ret)
3422 dev_err(smu->adev->dev, "Mode2 reset failed!\n");
3423
3424 return ret;
3425}
3426
3427static int smu_enable_gfx_features(void *handle)
3428{
3429 struct smu_context *smu = handle;
3430 int ret = 0;
3431
3432 if (!smu->pm_enabled)
3433 return -EOPNOTSUPP;
3434
3435 if (smu->ppt_funcs->enable_gfx_features)
3436 ret = smu->ppt_funcs->enable_gfx_features(smu);
3437
3438 if (ret)
3439 dev_err(smu->adev->dev, "enable gfx features failed!\n");
3440
3441 return ret;
3442}
3443
3444static int smu_get_max_sustainable_clocks_by_dc(void *handle,
3445 struct pp_smu_nv_clock_table *max_clocks)
3446{
3447 struct smu_context *smu = handle;
3448 int ret = 0;
3449
3450 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3451 return -EOPNOTSUPP;
3452
3453 if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
3454 ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks);
3455
3456 return ret;
3457}
3458
3459static int smu_get_uclk_dpm_states(void *handle,
3460 unsigned int *clock_values_in_khz,
3461 unsigned int *num_states)
3462{
3463 struct smu_context *smu = handle;
3464 int ret = 0;
3465
3466 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3467 return -EOPNOTSUPP;
3468
3469 if (smu->ppt_funcs->get_uclk_dpm_states)
3470 ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states);
3471
3472 return ret;
3473}
3474
3475static enum amd_pm_state_type smu_get_current_power_state(void *handle)
3476{
3477 struct smu_context *smu = handle;
3478 enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
3479
3480 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3481 return -EOPNOTSUPP;
3482
3483 if (smu->ppt_funcs->get_current_power_state)
3484 pm_state = smu->ppt_funcs->get_current_power_state(smu);
3485
3486 return pm_state;
3487}
3488
3489static int smu_get_dpm_clock_table(void *handle,
3490 struct dpm_clocks *clock_table)
3491{
3492 struct smu_context *smu = handle;
3493 int ret = 0;
3494
3495 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3496 return -EOPNOTSUPP;
3497
3498 if (smu->ppt_funcs->get_dpm_clock_table)
3499 ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table);
3500
3501 return ret;
3502}
3503
3504static ssize_t smu_sys_get_gpu_metrics(void *handle, void **table)
3505{
3506 struct smu_context *smu = handle;
3507
3508 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3509 return -EOPNOTSUPP;
3510
3511 if (!smu->ppt_funcs->get_gpu_metrics)
3512 return -EOPNOTSUPP;
3513
3514 return smu->ppt_funcs->get_gpu_metrics(smu, table);
3515}
3516
3517static ssize_t smu_sys_get_pm_metrics(void *handle, void *pm_metrics,
3518 size_t size)
3519{
3520 struct smu_context *smu = handle;
3521
3522 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3523 return -EOPNOTSUPP;
3524
3525 if (!smu->ppt_funcs->get_pm_metrics)
3526 return -EOPNOTSUPP;
3527
3528 return smu->ppt_funcs->get_pm_metrics(smu, pm_metrics, size);
3529}
3530
3531static int smu_enable_mgpu_fan_boost(void *handle)
3532{
3533 struct smu_context *smu = handle;
3534 int ret = 0;
3535
3536 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3537 return -EOPNOTSUPP;
3538
3539 if (smu->ppt_funcs->enable_mgpu_fan_boost)
3540 ret = smu->ppt_funcs->enable_mgpu_fan_boost(smu);
3541
3542 return ret;
3543}
3544
3545static int smu_gfx_state_change_set(void *handle,
3546 uint32_t state)
3547{
3548 struct smu_context *smu = handle;
3549 int ret = 0;
3550
3551 if (smu->ppt_funcs->gfx_state_change_set)
3552 ret = smu->ppt_funcs->gfx_state_change_set(smu, state);
3553
3554 return ret;
3555}
3556
3557int smu_handle_passthrough_sbr(struct smu_context *smu, bool enable)
3558{
3559 int ret = 0;
3560
3561 if (smu->ppt_funcs->smu_handle_passthrough_sbr)
3562 ret = smu->ppt_funcs->smu_handle_passthrough_sbr(smu, enable);
3563
3564 return ret;
3565}
3566
3567int smu_get_ecc_info(struct smu_context *smu, void *umc_ecc)
3568{
3569 int ret = -EOPNOTSUPP;
3570
3571 if (smu->ppt_funcs &&
3572 smu->ppt_funcs->get_ecc_info)
3573 ret = smu->ppt_funcs->get_ecc_info(smu, umc_ecc);
3574
3575 return ret;
3576
3577}
3578
3579static int smu_get_prv_buffer_details(void *handle, void **addr, size_t *size)
3580{
3581 struct smu_context *smu = handle;
3582 struct smu_table_context *smu_table = &smu->smu_table;
3583 struct smu_table *memory_pool = &smu_table->memory_pool;
3584
3585 if (!addr || !size)
3586 return -EINVAL;
3587
3588 *addr = NULL;
3589 *size = 0;
3590 if (memory_pool->bo) {
3591 *addr = memory_pool->cpu_addr;
3592 *size = memory_pool->size;
3593 }
3594
3595 return 0;
3596}
3597
3598static void smu_print_dpm_policy(struct smu_dpm_policy *policy, char *sysbuf,
3599 size_t *size)
3600{
3601 size_t offset = *size;
3602 int level;
3603
3604 for_each_set_bit(level, &policy->level_mask, PP_POLICY_MAX_LEVELS) {
3605 if (level == policy->current_level)
3606 offset += sysfs_emit_at(sysbuf, offset,
3607 "%d : %s*\n", level,
3608 policy->desc->get_desc(policy, level));
3609 else
3610 offset += sysfs_emit_at(sysbuf, offset,
3611 "%d : %s\n", level,
3612 policy->desc->get_desc(policy, level));
3613 }
3614
3615 *size = offset;
3616}
3617
3618ssize_t smu_get_pm_policy_info(struct smu_context *smu,
3619 enum pp_pm_policy p_type, char *sysbuf)
3620{
3621 struct smu_dpm_context *dpm_ctxt = &smu->smu_dpm;
3622 struct smu_dpm_policy_ctxt *policy_ctxt;
3623 struct smu_dpm_policy *dpm_policy;
3624 size_t offset = 0;
3625
3626 policy_ctxt = dpm_ctxt->dpm_policies;
3627 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || !policy_ctxt ||
3628 !policy_ctxt->policy_mask)
3629 return -EOPNOTSUPP;
3630
3631 if (p_type == PP_PM_POLICY_NONE)
3632 return -EINVAL;
3633
3634 dpm_policy = smu_get_pm_policy(smu, p_type);
3635 if (!dpm_policy || !dpm_policy->level_mask || !dpm_policy->desc)
3636 return -ENOENT;
3637
3638 if (!sysbuf)
3639 return -EINVAL;
3640
3641 smu_print_dpm_policy(dpm_policy, sysbuf, &offset);
3642
3643 return offset;
3644}
3645
3646struct smu_dpm_policy *smu_get_pm_policy(struct smu_context *smu,
3647 enum pp_pm_policy p_type)
3648{
3649 struct smu_dpm_context *dpm_ctxt = &smu->smu_dpm;
3650 struct smu_dpm_policy_ctxt *policy_ctxt;
3651 int i;
3652
3653 policy_ctxt = dpm_ctxt->dpm_policies;
3654 if (!policy_ctxt)
3655 return NULL;
3656
3657 for (i = 0; i < hweight32(policy_ctxt->policy_mask); ++i) {
3658 if (policy_ctxt->policies[i].policy_type == p_type)
3659 return &policy_ctxt->policies[i];
3660 }
3661
3662 return NULL;
3663}
3664
3665int smu_set_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type,
3666 int level)
3667{
3668 struct smu_dpm_context *dpm_ctxt = &smu->smu_dpm;
3669 struct smu_dpm_policy *dpm_policy = NULL;
3670 struct smu_dpm_policy_ctxt *policy_ctxt;
3671 int ret = -EOPNOTSUPP;
3672
3673 policy_ctxt = dpm_ctxt->dpm_policies;
3674 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || !policy_ctxt ||
3675 !policy_ctxt->policy_mask)
3676 return ret;
3677
3678 if (level < 0 || level >= PP_POLICY_MAX_LEVELS)
3679 return -EINVAL;
3680
3681 dpm_policy = smu_get_pm_policy(smu, p_type);
3682
3683 if (!dpm_policy || !dpm_policy->level_mask || !dpm_policy->set_policy)
3684 return ret;
3685
3686 if (dpm_policy->current_level == level)
3687 return 0;
3688
3689 ret = dpm_policy->set_policy(smu, level);
3690
3691 if (!ret)
3692 dpm_policy->current_level = level;
3693
3694 return ret;
3695}
3696
3697static const struct amd_pm_funcs swsmu_pm_funcs = {
3698 /* export for sysfs */
3699 .set_fan_control_mode = smu_set_fan_control_mode,
3700 .get_fan_control_mode = smu_get_fan_control_mode,
3701 .set_fan_speed_pwm = smu_set_fan_speed_pwm,
3702 .get_fan_speed_pwm = smu_get_fan_speed_pwm,
3703 .force_clock_level = smu_force_ppclk_levels,
3704 .print_clock_levels = smu_print_ppclk_levels,
3705 .emit_clock_levels = smu_emit_ppclk_levels,
3706 .force_performance_level = smu_force_performance_level,
3707 .read_sensor = smu_read_sensor,
3708 .get_apu_thermal_limit = smu_get_apu_thermal_limit,
3709 .set_apu_thermal_limit = smu_set_apu_thermal_limit,
3710 .get_performance_level = smu_get_performance_level,
3711 .get_current_power_state = smu_get_current_power_state,
3712 .get_fan_speed_rpm = smu_get_fan_speed_rpm,
3713 .set_fan_speed_rpm = smu_set_fan_speed_rpm,
3714 .get_pp_num_states = smu_get_power_num_states,
3715 .get_pp_table = smu_sys_get_pp_table,
3716 .set_pp_table = smu_sys_set_pp_table,
3717 .switch_power_profile = smu_switch_power_profile,
3718 /* export to amdgpu */
3719 .dispatch_tasks = smu_handle_dpm_task,
3720 .load_firmware = smu_load_microcode,
3721 .set_powergating_by_smu = smu_dpm_set_power_gate,
3722 .set_power_limit = smu_set_power_limit,
3723 .get_power_limit = smu_get_power_limit,
3724 .get_power_profile_mode = smu_get_power_profile_mode,
3725 .set_power_profile_mode = smu_set_power_profile_mode,
3726 .odn_edit_dpm_table = smu_od_edit_dpm_table,
3727 .set_mp1_state = smu_set_mp1_state,
3728 .gfx_state_change_set = smu_gfx_state_change_set,
3729 /* export to DC */
3730 .get_sclk = smu_get_sclk,
3731 .get_mclk = smu_get_mclk,
3732 .display_configuration_change = smu_display_configuration_change,
3733 .get_clock_by_type_with_latency = smu_get_clock_by_type_with_latency,
3734 .display_clock_voltage_request = smu_display_clock_voltage_request,
3735 .enable_mgpu_fan_boost = smu_enable_mgpu_fan_boost,
3736 .set_active_display_count = smu_set_display_count,
3737 .set_min_deep_sleep_dcefclk = smu_set_deep_sleep_dcefclk,
3738 .get_asic_baco_capability = smu_get_baco_capability,
3739 .set_asic_baco_state = smu_baco_set_state,
3740 .get_ppfeature_status = smu_sys_get_pp_feature_mask,
3741 .set_ppfeature_status = smu_sys_set_pp_feature_mask,
3742 .asic_reset_mode_2 = smu_mode2_reset,
3743 .asic_reset_enable_gfx_features = smu_enable_gfx_features,
3744 .set_df_cstate = smu_set_df_cstate,
3745 .set_xgmi_pstate = smu_set_xgmi_pstate,
3746 .get_gpu_metrics = smu_sys_get_gpu_metrics,
3747 .get_pm_metrics = smu_sys_get_pm_metrics,
3748 .set_watermarks_for_clock_ranges = smu_set_watermarks_for_clock_ranges,
3749 .display_disable_memory_clock_switch = smu_display_disable_memory_clock_switch,
3750 .get_max_sustainable_clocks_by_dc = smu_get_max_sustainable_clocks_by_dc,
3751 .get_uclk_dpm_states = smu_get_uclk_dpm_states,
3752 .get_dpm_clock_table = smu_get_dpm_clock_table,
3753 .get_smu_prv_buf_details = smu_get_prv_buffer_details,
3754};
3755
3756int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event,
3757 uint64_t event_arg)
3758{
3759 int ret = -EINVAL;
3760
3761 if (smu->ppt_funcs->wait_for_event)
3762 ret = smu->ppt_funcs->wait_for_event(smu, event, event_arg);
3763
3764 return ret;
3765}
3766
3767int smu_stb_collect_info(struct smu_context *smu, void *buf, uint32_t size)
3768{
3769
3770 if (!smu->ppt_funcs->stb_collect_info || !smu->stb_context.enabled)
3771 return -EOPNOTSUPP;
3772
3773 /* Confirm the buffer allocated is of correct size */
3774 if (size != smu->stb_context.stb_buf_size)
3775 return -EINVAL;
3776
3777 /*
3778 * No need to lock smu mutex as we access STB directly through MMIO
3779 * and not going through SMU messaging route (for now at least).
3780 * For registers access rely on implementation internal locking.
3781 */
3782 return smu->ppt_funcs->stb_collect_info(smu, buf, size);
3783}
3784
3785#if defined(CONFIG_DEBUG_FS)
3786
3787static int smu_stb_debugfs_open(struct inode *inode, struct file *filp)
3788{
3789 struct amdgpu_device *adev = filp->f_inode->i_private;
3790 struct smu_context *smu = adev->powerplay.pp_handle;
3791 unsigned char *buf;
3792 int r;
3793
3794 buf = kvmalloc_array(smu->stb_context.stb_buf_size, sizeof(*buf), GFP_KERNEL);
3795 if (!buf)
3796 return -ENOMEM;
3797
3798 r = smu_stb_collect_info(smu, buf, smu->stb_context.stb_buf_size);
3799 if (r)
3800 goto out;
3801
3802 filp->private_data = buf;
3803
3804 return 0;
3805
3806out:
3807 kvfree(buf);
3808 return r;
3809}
3810
3811static ssize_t smu_stb_debugfs_read(struct file *filp, char __user *buf, size_t size,
3812 loff_t *pos)
3813{
3814 struct amdgpu_device *adev = filp->f_inode->i_private;
3815 struct smu_context *smu = adev->powerplay.pp_handle;
3816
3817
3818 if (!filp->private_data)
3819 return -EINVAL;
3820
3821 return simple_read_from_buffer(buf,
3822 size,
3823 pos, filp->private_data,
3824 smu->stb_context.stb_buf_size);
3825}
3826
3827static int smu_stb_debugfs_release(struct inode *inode, struct file *filp)
3828{
3829 kvfree(filp->private_data);
3830 filp->private_data = NULL;
3831
3832 return 0;
3833}
3834
3835/*
3836 * We have to define not only read method but also
3837 * open and release because .read takes up to PAGE_SIZE
3838 * data each time so and so is invoked multiple times.
3839 * We allocate the STB buffer in .open and release it
3840 * in .release
3841 */
3842static const struct file_operations smu_stb_debugfs_fops = {
3843 .owner = THIS_MODULE,
3844 .open = smu_stb_debugfs_open,
3845 .read = smu_stb_debugfs_read,
3846 .release = smu_stb_debugfs_release,
3847 .llseek = default_llseek,
3848};
3849
3850#endif
3851
3852void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev)
3853{
3854#if defined(CONFIG_DEBUG_FS)
3855
3856 struct smu_context *smu = adev->powerplay.pp_handle;
3857
3858 if (!smu || (!smu->stb_context.stb_buf_size))
3859 return;
3860
3861 debugfs_create_file_size("amdgpu_smu_stb_dump",
3862 S_IRUSR,
3863 adev_to_drm(adev)->primary->debugfs_root,
3864 adev,
3865 &smu_stb_debugfs_fops,
3866 smu->stb_context.stb_buf_size);
3867#endif
3868}
3869
3870int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size)
3871{
3872 int ret = 0;
3873
3874 if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_pages_num)
3875 ret = smu->ppt_funcs->send_hbm_bad_pages_num(smu, size);
3876
3877 return ret;
3878}
3879
3880int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size)
3881{
3882 int ret = 0;
3883
3884 if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_channel_flag)
3885 ret = smu->ppt_funcs->send_hbm_bad_channel_flag(smu, size);
3886
3887 return ret;
3888}
3889
3890int smu_send_rma_reason(struct smu_context *smu)
3891{
3892 int ret = 0;
3893
3894 if (smu->ppt_funcs && smu->ppt_funcs->send_rma_reason)
3895 ret = smu->ppt_funcs->send_rma_reason(smu);
3896
3897 return ret;
3898}