Loading...
1/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24
25#include "amdgpu.h"
26#include "amdgpu_atombios.h"
27#include "amdgpu_i2c.h"
28#include "amdgpu_dpm.h"
29#include "atom.h"
30#include "amd_pcie.h"
31#include "amdgpu_display.h"
32#include "hwmgr.h"
33#include <linux/power_supply.h>
34#include "amdgpu_smu.h"
35
36#define amdgpu_dpm_enable_bapm(adev, e) \
37 ((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e)))
38
39#define amdgpu_dpm_is_legacy_dpm(adev) ((adev)->powerplay.pp_handle == (adev))
40
41int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
42{
43 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
44 int ret = 0;
45
46 if (!pp_funcs->get_sclk)
47 return 0;
48
49 mutex_lock(&adev->pm.mutex);
50 ret = pp_funcs->get_sclk((adev)->powerplay.pp_handle,
51 low);
52 mutex_unlock(&adev->pm.mutex);
53
54 return ret;
55}
56
57int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
58{
59 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
60 int ret = 0;
61
62 if (!pp_funcs->get_mclk)
63 return 0;
64
65 mutex_lock(&adev->pm.mutex);
66 ret = pp_funcs->get_mclk((adev)->powerplay.pp_handle,
67 low);
68 mutex_unlock(&adev->pm.mutex);
69
70 return ret;
71}
72
73int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate)
74{
75 int ret = 0;
76 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
77 enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON;
78
79 if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state) {
80 dev_dbg(adev->dev, "IP block%d already in the target %s state!",
81 block_type, gate ? "gate" : "ungate");
82 return 0;
83 }
84
85 mutex_lock(&adev->pm.mutex);
86
87 switch (block_type) {
88 case AMD_IP_BLOCK_TYPE_UVD:
89 case AMD_IP_BLOCK_TYPE_VCE:
90 case AMD_IP_BLOCK_TYPE_GFX:
91 case AMD_IP_BLOCK_TYPE_VCN:
92 case AMD_IP_BLOCK_TYPE_SDMA:
93 case AMD_IP_BLOCK_TYPE_JPEG:
94 case AMD_IP_BLOCK_TYPE_GMC:
95 case AMD_IP_BLOCK_TYPE_ACP:
96 case AMD_IP_BLOCK_TYPE_VPE:
97 if (pp_funcs && pp_funcs->set_powergating_by_smu)
98 ret = (pp_funcs->set_powergating_by_smu(
99 (adev)->powerplay.pp_handle, block_type, gate));
100 break;
101 default:
102 break;
103 }
104
105 if (!ret)
106 atomic_set(&adev->pm.pwr_state[block_type], pwr_state);
107
108 mutex_unlock(&adev->pm.mutex);
109
110 return ret;
111}
112
113int amdgpu_dpm_set_gfx_power_up_by_imu(struct amdgpu_device *adev)
114{
115 struct smu_context *smu = adev->powerplay.pp_handle;
116 int ret = -EOPNOTSUPP;
117
118 mutex_lock(&adev->pm.mutex);
119 ret = smu_set_gfx_power_up_by_imu(smu);
120 mutex_unlock(&adev->pm.mutex);
121
122 msleep(10);
123
124 return ret;
125}
126
127int amdgpu_dpm_baco_enter(struct amdgpu_device *adev)
128{
129 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
130 void *pp_handle = adev->powerplay.pp_handle;
131 int ret = 0;
132
133 if (!pp_funcs || !pp_funcs->set_asic_baco_state)
134 return -ENOENT;
135
136 mutex_lock(&adev->pm.mutex);
137
138 /* enter BACO state */
139 ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
140
141 mutex_unlock(&adev->pm.mutex);
142
143 return ret;
144}
145
146int amdgpu_dpm_baco_exit(struct amdgpu_device *adev)
147{
148 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
149 void *pp_handle = adev->powerplay.pp_handle;
150 int ret = 0;
151
152 if (!pp_funcs || !pp_funcs->set_asic_baco_state)
153 return -ENOENT;
154
155 mutex_lock(&adev->pm.mutex);
156
157 /* exit BACO state */
158 ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
159
160 mutex_unlock(&adev->pm.mutex);
161
162 return ret;
163}
164
165int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
166 enum pp_mp1_state mp1_state)
167{
168 int ret = 0;
169 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
170
171 if (pp_funcs && pp_funcs->set_mp1_state) {
172 mutex_lock(&adev->pm.mutex);
173
174 ret = pp_funcs->set_mp1_state(
175 adev->powerplay.pp_handle,
176 mp1_state);
177
178 mutex_unlock(&adev->pm.mutex);
179 }
180
181 return ret;
182}
183
184int amdgpu_dpm_notify_rlc_state(struct amdgpu_device *adev, bool en)
185{
186 int ret = 0;
187 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
188
189 if (pp_funcs && pp_funcs->notify_rlc_state) {
190 mutex_lock(&adev->pm.mutex);
191
192 ret = pp_funcs->notify_rlc_state(
193 adev->powerplay.pp_handle,
194 en);
195
196 mutex_unlock(&adev->pm.mutex);
197 }
198
199 return ret;
200}
201
202bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
203{
204 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
205 void *pp_handle = adev->powerplay.pp_handle;
206 bool ret;
207
208 if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
209 return false;
210 /* Don't use baco for reset in S3.
211 * This is a workaround for some platforms
212 * where entering BACO during suspend
213 * seems to cause reboots or hangs.
214 * This might be related to the fact that BACO controls
215 * power to the whole GPU including devices like audio and USB.
216 * Powering down/up everything may adversely affect these other
217 * devices. Needs more investigation.
218 */
219 if (adev->in_s3)
220 return false;
221
222 mutex_lock(&adev->pm.mutex);
223
224 ret = pp_funcs->get_asic_baco_capability(pp_handle);
225
226 mutex_unlock(&adev->pm.mutex);
227
228 return ret;
229}
230
231int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev)
232{
233 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
234 void *pp_handle = adev->powerplay.pp_handle;
235 int ret = 0;
236
237 if (!pp_funcs || !pp_funcs->asic_reset_mode_2)
238 return -ENOENT;
239
240 mutex_lock(&adev->pm.mutex);
241
242 ret = pp_funcs->asic_reset_mode_2(pp_handle);
243
244 mutex_unlock(&adev->pm.mutex);
245
246 return ret;
247}
248
249int amdgpu_dpm_enable_gfx_features(struct amdgpu_device *adev)
250{
251 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
252 void *pp_handle = adev->powerplay.pp_handle;
253 int ret = 0;
254
255 if (!pp_funcs || !pp_funcs->asic_reset_enable_gfx_features)
256 return -ENOENT;
257
258 mutex_lock(&adev->pm.mutex);
259
260 ret = pp_funcs->asic_reset_enable_gfx_features(pp_handle);
261
262 mutex_unlock(&adev->pm.mutex);
263
264 return ret;
265}
266
267int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
268{
269 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
270 void *pp_handle = adev->powerplay.pp_handle;
271 int ret = 0;
272
273 if (!pp_funcs || !pp_funcs->set_asic_baco_state)
274 return -ENOENT;
275
276 mutex_lock(&adev->pm.mutex);
277
278 /* enter BACO state */
279 ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
280 if (ret)
281 goto out;
282
283 /* exit BACO state */
284 ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
285
286out:
287 mutex_unlock(&adev->pm.mutex);
288 return ret;
289}
290
291bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev)
292{
293 struct smu_context *smu = adev->powerplay.pp_handle;
294 bool support_mode1_reset = false;
295
296 if (is_support_sw_smu(adev)) {
297 mutex_lock(&adev->pm.mutex);
298 support_mode1_reset = smu_mode1_reset_is_support(smu);
299 mutex_unlock(&adev->pm.mutex);
300 }
301
302 return support_mode1_reset;
303}
304
305int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev)
306{
307 struct smu_context *smu = adev->powerplay.pp_handle;
308 int ret = -EOPNOTSUPP;
309
310 if (is_support_sw_smu(adev)) {
311 mutex_lock(&adev->pm.mutex);
312 ret = smu_mode1_reset(smu);
313 mutex_unlock(&adev->pm.mutex);
314 }
315
316 return ret;
317}
318
319int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
320 enum PP_SMC_POWER_PROFILE type,
321 bool en)
322{
323 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
324 int ret = 0;
325
326 if (amdgpu_sriov_vf(adev))
327 return 0;
328
329 if (pp_funcs && pp_funcs->switch_power_profile) {
330 mutex_lock(&adev->pm.mutex);
331 ret = pp_funcs->switch_power_profile(
332 adev->powerplay.pp_handle, type, en);
333 mutex_unlock(&adev->pm.mutex);
334 }
335
336 return ret;
337}
338
339int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
340 uint32_t pstate)
341{
342 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
343 int ret = 0;
344
345 if (pp_funcs && pp_funcs->set_xgmi_pstate) {
346 mutex_lock(&adev->pm.mutex);
347 ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
348 pstate);
349 mutex_unlock(&adev->pm.mutex);
350 }
351
352 return ret;
353}
354
355int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
356 uint32_t cstate)
357{
358 int ret = 0;
359 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
360 void *pp_handle = adev->powerplay.pp_handle;
361
362 if (pp_funcs && pp_funcs->set_df_cstate) {
363 mutex_lock(&adev->pm.mutex);
364 ret = pp_funcs->set_df_cstate(pp_handle, cstate);
365 mutex_unlock(&adev->pm.mutex);
366 }
367
368 return ret;
369}
370
371int amdgpu_dpm_get_xgmi_plpd_mode(struct amdgpu_device *adev, char **mode_desc)
372{
373 struct smu_context *smu = adev->powerplay.pp_handle;
374 int mode = XGMI_PLPD_NONE;
375
376 if (is_support_sw_smu(adev)) {
377 mode = smu->plpd_mode;
378 if (mode_desc == NULL)
379 return mode;
380 switch (smu->plpd_mode) {
381 case XGMI_PLPD_DISALLOW:
382 *mode_desc = "disallow";
383 break;
384 case XGMI_PLPD_DEFAULT:
385 *mode_desc = "default";
386 break;
387 case XGMI_PLPD_OPTIMIZED:
388 *mode_desc = "optimized";
389 break;
390 case XGMI_PLPD_NONE:
391 default:
392 *mode_desc = "none";
393 break;
394 }
395 }
396
397 return mode;
398}
399
400int amdgpu_dpm_set_xgmi_plpd_mode(struct amdgpu_device *adev, int mode)
401{
402 struct smu_context *smu = adev->powerplay.pp_handle;
403 int ret = -EOPNOTSUPP;
404
405 if (is_support_sw_smu(adev)) {
406 mutex_lock(&adev->pm.mutex);
407 ret = smu_set_xgmi_plpd_mode(smu, mode);
408 mutex_unlock(&adev->pm.mutex);
409 }
410
411 return ret;
412}
413
414int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev)
415{
416 void *pp_handle = adev->powerplay.pp_handle;
417 const struct amd_pm_funcs *pp_funcs =
418 adev->powerplay.pp_funcs;
419 int ret = 0;
420
421 if (pp_funcs && pp_funcs->enable_mgpu_fan_boost) {
422 mutex_lock(&adev->pm.mutex);
423 ret = pp_funcs->enable_mgpu_fan_boost(pp_handle);
424 mutex_unlock(&adev->pm.mutex);
425 }
426
427 return ret;
428}
429
430int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev,
431 uint32_t msg_id)
432{
433 void *pp_handle = adev->powerplay.pp_handle;
434 const struct amd_pm_funcs *pp_funcs =
435 adev->powerplay.pp_funcs;
436 int ret = 0;
437
438 if (pp_funcs && pp_funcs->set_clockgating_by_smu) {
439 mutex_lock(&adev->pm.mutex);
440 ret = pp_funcs->set_clockgating_by_smu(pp_handle,
441 msg_id);
442 mutex_unlock(&adev->pm.mutex);
443 }
444
445 return ret;
446}
447
448int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev,
449 bool acquire)
450{
451 void *pp_handle = adev->powerplay.pp_handle;
452 const struct amd_pm_funcs *pp_funcs =
453 adev->powerplay.pp_funcs;
454 int ret = -EOPNOTSUPP;
455
456 if (pp_funcs && pp_funcs->smu_i2c_bus_access) {
457 mutex_lock(&adev->pm.mutex);
458 ret = pp_funcs->smu_i2c_bus_access(pp_handle,
459 acquire);
460 mutex_unlock(&adev->pm.mutex);
461 }
462
463 return ret;
464}
465
466void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
467{
468 if (adev->pm.dpm_enabled) {
469 mutex_lock(&adev->pm.mutex);
470 if (power_supply_is_system_supplied() > 0)
471 adev->pm.ac_power = true;
472 else
473 adev->pm.ac_power = false;
474
475 if (adev->powerplay.pp_funcs &&
476 adev->powerplay.pp_funcs->enable_bapm)
477 amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
478
479 if (is_support_sw_smu(adev))
480 smu_set_ac_dc(adev->powerplay.pp_handle);
481
482 mutex_unlock(&adev->pm.mutex);
483 }
484}
485
486int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
487 void *data, uint32_t *size)
488{
489 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
490 int ret = -EINVAL;
491
492 if (!data || !size)
493 return -EINVAL;
494
495 if (pp_funcs && pp_funcs->read_sensor) {
496 mutex_lock(&adev->pm.mutex);
497 ret = pp_funcs->read_sensor(adev->powerplay.pp_handle,
498 sensor,
499 data,
500 size);
501 mutex_unlock(&adev->pm.mutex);
502 }
503
504 return ret;
505}
506
507int amdgpu_dpm_get_apu_thermal_limit(struct amdgpu_device *adev, uint32_t *limit)
508{
509 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
510 int ret = -EOPNOTSUPP;
511
512 if (pp_funcs && pp_funcs->get_apu_thermal_limit) {
513 mutex_lock(&adev->pm.mutex);
514 ret = pp_funcs->get_apu_thermal_limit(adev->powerplay.pp_handle, limit);
515 mutex_unlock(&adev->pm.mutex);
516 }
517
518 return ret;
519}
520
521int amdgpu_dpm_set_apu_thermal_limit(struct amdgpu_device *adev, uint32_t limit)
522{
523 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
524 int ret = -EOPNOTSUPP;
525
526 if (pp_funcs && pp_funcs->set_apu_thermal_limit) {
527 mutex_lock(&adev->pm.mutex);
528 ret = pp_funcs->set_apu_thermal_limit(adev->powerplay.pp_handle, limit);
529 mutex_unlock(&adev->pm.mutex);
530 }
531
532 return ret;
533}
534
535void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev)
536{
537 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
538 int i;
539
540 if (!adev->pm.dpm_enabled)
541 return;
542
543 if (!pp_funcs->pm_compute_clocks)
544 return;
545
546 if (adev->mode_info.num_crtc)
547 amdgpu_display_bandwidth_update(adev);
548
549 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
550 struct amdgpu_ring *ring = adev->rings[i];
551 if (ring && ring->sched.ready)
552 amdgpu_fence_wait_empty(ring);
553 }
554
555 mutex_lock(&adev->pm.mutex);
556 pp_funcs->pm_compute_clocks(adev->powerplay.pp_handle);
557 mutex_unlock(&adev->pm.mutex);
558}
559
560void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
561{
562 int ret = 0;
563
564 if (adev->family == AMDGPU_FAMILY_SI) {
565 mutex_lock(&adev->pm.mutex);
566 if (enable) {
567 adev->pm.dpm.uvd_active = true;
568 adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
569 } else {
570 adev->pm.dpm.uvd_active = false;
571 }
572 mutex_unlock(&adev->pm.mutex);
573
574 amdgpu_dpm_compute_clocks(adev);
575 return;
576 }
577
578 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
579 if (ret)
580 DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
581 enable ? "enable" : "disable", ret);
582}
583
584void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
585{
586 int ret = 0;
587
588 if (adev->family == AMDGPU_FAMILY_SI) {
589 mutex_lock(&adev->pm.mutex);
590 if (enable) {
591 adev->pm.dpm.vce_active = true;
592 /* XXX select vce level based on ring/task */
593 adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
594 } else {
595 adev->pm.dpm.vce_active = false;
596 }
597 mutex_unlock(&adev->pm.mutex);
598
599 amdgpu_dpm_compute_clocks(adev);
600 return;
601 }
602
603 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
604 if (ret)
605 DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
606 enable ? "enable" : "disable", ret);
607}
608
609void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
610{
611 int ret = 0;
612
613 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable);
614 if (ret)
615 DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
616 enable ? "enable" : "disable", ret);
617}
618
619void amdgpu_dpm_enable_vpe(struct amdgpu_device *adev, bool enable)
620{
621 int ret = 0;
622
623 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VPE, !enable);
624 if (ret)
625 DRM_ERROR("Dpm %s vpe failed, ret = %d.\n",
626 enable ? "enable" : "disable", ret);
627}
628
629int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
630{
631 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
632 int r = 0;
633
634 if (!pp_funcs || !pp_funcs->load_firmware)
635 return 0;
636
637 mutex_lock(&adev->pm.mutex);
638 r = pp_funcs->load_firmware(adev->powerplay.pp_handle);
639 if (r) {
640 pr_err("smu firmware loading failed\n");
641 goto out;
642 }
643
644 if (smu_version)
645 *smu_version = adev->pm.fw_version;
646
647out:
648 mutex_unlock(&adev->pm.mutex);
649 return r;
650}
651
652int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable)
653{
654 int ret = 0;
655
656 if (is_support_sw_smu(adev)) {
657 mutex_lock(&adev->pm.mutex);
658 ret = smu_handle_passthrough_sbr(adev->powerplay.pp_handle,
659 enable);
660 mutex_unlock(&adev->pm.mutex);
661 }
662
663 return ret;
664}
665
666int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size)
667{
668 struct smu_context *smu = adev->powerplay.pp_handle;
669 int ret = 0;
670
671 if (!is_support_sw_smu(adev))
672 return -EOPNOTSUPP;
673
674 mutex_lock(&adev->pm.mutex);
675 ret = smu_send_hbm_bad_pages_num(smu, size);
676 mutex_unlock(&adev->pm.mutex);
677
678 return ret;
679}
680
681int amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device *adev, uint32_t size)
682{
683 struct smu_context *smu = adev->powerplay.pp_handle;
684 int ret = 0;
685
686 if (!is_support_sw_smu(adev))
687 return -EOPNOTSUPP;
688
689 mutex_lock(&adev->pm.mutex);
690 ret = smu_send_hbm_bad_channel_flag(smu, size);
691 mutex_unlock(&adev->pm.mutex);
692
693 return ret;
694}
695
696int amdgpu_dpm_send_rma_reason(struct amdgpu_device *adev)
697{
698 struct smu_context *smu = adev->powerplay.pp_handle;
699 int ret;
700
701 if (!is_support_sw_smu(adev))
702 return -EOPNOTSUPP;
703
704 mutex_lock(&adev->pm.mutex);
705 ret = smu_send_rma_reason(smu);
706 mutex_unlock(&adev->pm.mutex);
707
708 return ret;
709}
710
711int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev,
712 enum pp_clock_type type,
713 uint32_t *min,
714 uint32_t *max)
715{
716 int ret = 0;
717
718 if (type != PP_SCLK)
719 return -EINVAL;
720
721 if (!is_support_sw_smu(adev))
722 return -EOPNOTSUPP;
723
724 mutex_lock(&adev->pm.mutex);
725 ret = smu_get_dpm_freq_range(adev->powerplay.pp_handle,
726 SMU_SCLK,
727 min,
728 max);
729 mutex_unlock(&adev->pm.mutex);
730
731 return ret;
732}
733
734int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev,
735 enum pp_clock_type type,
736 uint32_t min,
737 uint32_t max)
738{
739 struct smu_context *smu = adev->powerplay.pp_handle;
740 int ret = 0;
741
742 if (type != PP_SCLK)
743 return -EINVAL;
744
745 if (!is_support_sw_smu(adev))
746 return -EOPNOTSUPP;
747
748 mutex_lock(&adev->pm.mutex);
749 ret = smu_set_soft_freq_range(smu,
750 SMU_SCLK,
751 min,
752 max);
753 mutex_unlock(&adev->pm.mutex);
754
755 return ret;
756}
757
758int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev)
759{
760 struct smu_context *smu = adev->powerplay.pp_handle;
761 int ret = 0;
762
763 if (!is_support_sw_smu(adev))
764 return 0;
765
766 mutex_lock(&adev->pm.mutex);
767 ret = smu_write_watermarks_table(smu);
768 mutex_unlock(&adev->pm.mutex);
769
770 return ret;
771}
772
773int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev,
774 enum smu_event_type event,
775 uint64_t event_arg)
776{
777 struct smu_context *smu = adev->powerplay.pp_handle;
778 int ret = 0;
779
780 if (!is_support_sw_smu(adev))
781 return -EOPNOTSUPP;
782
783 mutex_lock(&adev->pm.mutex);
784 ret = smu_wait_for_event(smu, event, event_arg);
785 mutex_unlock(&adev->pm.mutex);
786
787 return ret;
788}
789
790int amdgpu_dpm_set_residency_gfxoff(struct amdgpu_device *adev, bool value)
791{
792 struct smu_context *smu = adev->powerplay.pp_handle;
793 int ret = 0;
794
795 if (!is_support_sw_smu(adev))
796 return -EOPNOTSUPP;
797
798 mutex_lock(&adev->pm.mutex);
799 ret = smu_set_residency_gfxoff(smu, value);
800 mutex_unlock(&adev->pm.mutex);
801
802 return ret;
803}
804
805int amdgpu_dpm_get_residency_gfxoff(struct amdgpu_device *adev, u32 *value)
806{
807 struct smu_context *smu = adev->powerplay.pp_handle;
808 int ret = 0;
809
810 if (!is_support_sw_smu(adev))
811 return -EOPNOTSUPP;
812
813 mutex_lock(&adev->pm.mutex);
814 ret = smu_get_residency_gfxoff(smu, value);
815 mutex_unlock(&adev->pm.mutex);
816
817 return ret;
818}
819
820int amdgpu_dpm_get_entrycount_gfxoff(struct amdgpu_device *adev, u64 *value)
821{
822 struct smu_context *smu = adev->powerplay.pp_handle;
823 int ret = 0;
824
825 if (!is_support_sw_smu(adev))
826 return -EOPNOTSUPP;
827
828 mutex_lock(&adev->pm.mutex);
829 ret = smu_get_entrycount_gfxoff(smu, value);
830 mutex_unlock(&adev->pm.mutex);
831
832 return ret;
833}
834
835int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value)
836{
837 struct smu_context *smu = adev->powerplay.pp_handle;
838 int ret = 0;
839
840 if (!is_support_sw_smu(adev))
841 return -EOPNOTSUPP;
842
843 mutex_lock(&adev->pm.mutex);
844 ret = smu_get_status_gfxoff(smu, value);
845 mutex_unlock(&adev->pm.mutex);
846
847 return ret;
848}
849
850uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev)
851{
852 struct smu_context *smu = adev->powerplay.pp_handle;
853
854 if (!is_support_sw_smu(adev))
855 return 0;
856
857 return atomic64_read(&smu->throttle_int_counter);
858}
859
860/* amdgpu_dpm_gfx_state_change - Handle gfx power state change set
861 * @adev: amdgpu_device pointer
862 * @state: gfx power state(1 -sGpuChangeState_D0Entry and 2 -sGpuChangeState_D3Entry)
863 *
864 */
865void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev,
866 enum gfx_change_state state)
867{
868 mutex_lock(&adev->pm.mutex);
869 if (adev->powerplay.pp_funcs &&
870 adev->powerplay.pp_funcs->gfx_state_change_set)
871 ((adev)->powerplay.pp_funcs->gfx_state_change_set(
872 (adev)->powerplay.pp_handle, state));
873 mutex_unlock(&adev->pm.mutex);
874}
875
876int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev,
877 void *umc_ecc)
878{
879 struct smu_context *smu = adev->powerplay.pp_handle;
880 int ret = 0;
881
882 if (!is_support_sw_smu(adev))
883 return -EOPNOTSUPP;
884
885 mutex_lock(&adev->pm.mutex);
886 ret = smu_get_ecc_info(smu, umc_ecc);
887 mutex_unlock(&adev->pm.mutex);
888
889 return ret;
890}
891
892struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev,
893 uint32_t idx)
894{
895 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
896 struct amd_vce_state *vstate = NULL;
897
898 if (!pp_funcs->get_vce_clock_state)
899 return NULL;
900
901 mutex_lock(&adev->pm.mutex);
902 vstate = pp_funcs->get_vce_clock_state(adev->powerplay.pp_handle,
903 idx);
904 mutex_unlock(&adev->pm.mutex);
905
906 return vstate;
907}
908
909void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev,
910 enum amd_pm_state_type *state)
911{
912 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
913
914 mutex_lock(&adev->pm.mutex);
915
916 if (!pp_funcs->get_current_power_state) {
917 *state = adev->pm.dpm.user_state;
918 goto out;
919 }
920
921 *state = pp_funcs->get_current_power_state(adev->powerplay.pp_handle);
922 if (*state < POWER_STATE_TYPE_DEFAULT ||
923 *state > POWER_STATE_TYPE_INTERNAL_3DPERF)
924 *state = adev->pm.dpm.user_state;
925
926out:
927 mutex_unlock(&adev->pm.mutex);
928}
929
930void amdgpu_dpm_set_power_state(struct amdgpu_device *adev,
931 enum amd_pm_state_type state)
932{
933 mutex_lock(&adev->pm.mutex);
934 adev->pm.dpm.user_state = state;
935 mutex_unlock(&adev->pm.mutex);
936
937 if (is_support_sw_smu(adev))
938 return;
939
940 if (amdgpu_dpm_dispatch_task(adev,
941 AMD_PP_TASK_ENABLE_USER_STATE,
942 &state) == -EOPNOTSUPP)
943 amdgpu_dpm_compute_clocks(adev);
944}
945
946enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev)
947{
948 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
949 enum amd_dpm_forced_level level;
950
951 if (!pp_funcs)
952 return AMD_DPM_FORCED_LEVEL_AUTO;
953
954 mutex_lock(&adev->pm.mutex);
955 if (pp_funcs->get_performance_level)
956 level = pp_funcs->get_performance_level(adev->powerplay.pp_handle);
957 else
958 level = adev->pm.dpm.forced_level;
959 mutex_unlock(&adev->pm.mutex);
960
961 return level;
962}
963
964int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
965 enum amd_dpm_forced_level level)
966{
967 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
968 enum amd_dpm_forced_level current_level;
969 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
970 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
971 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
972 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
973
974 if (!pp_funcs || !pp_funcs->force_performance_level)
975 return 0;
976
977 if (adev->pm.dpm.thermal_active)
978 return -EINVAL;
979
980 current_level = amdgpu_dpm_get_performance_level(adev);
981 if (current_level == level)
982 return 0;
983
984 if (adev->asic_type == CHIP_RAVEN) {
985 if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) {
986 if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
987 level == AMD_DPM_FORCED_LEVEL_MANUAL)
988 amdgpu_gfx_off_ctrl(adev, false);
989 else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL &&
990 level != AMD_DPM_FORCED_LEVEL_MANUAL)
991 amdgpu_gfx_off_ctrl(adev, true);
992 }
993 }
994
995 if (!(current_level & profile_mode_mask) &&
996 (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT))
997 return -EINVAL;
998
999 if (!(current_level & profile_mode_mask) &&
1000 (level & profile_mode_mask)) {
1001 /* enter UMD Pstate */
1002 amdgpu_device_ip_set_powergating_state(adev,
1003 AMD_IP_BLOCK_TYPE_GFX,
1004 AMD_PG_STATE_UNGATE);
1005 amdgpu_device_ip_set_clockgating_state(adev,
1006 AMD_IP_BLOCK_TYPE_GFX,
1007 AMD_CG_STATE_UNGATE);
1008 } else if ((current_level & profile_mode_mask) &&
1009 !(level & profile_mode_mask)) {
1010 /* exit UMD Pstate */
1011 amdgpu_device_ip_set_clockgating_state(adev,
1012 AMD_IP_BLOCK_TYPE_GFX,
1013 AMD_CG_STATE_GATE);
1014 amdgpu_device_ip_set_powergating_state(adev,
1015 AMD_IP_BLOCK_TYPE_GFX,
1016 AMD_PG_STATE_GATE);
1017 }
1018
1019 mutex_lock(&adev->pm.mutex);
1020
1021 if (pp_funcs->force_performance_level(adev->powerplay.pp_handle,
1022 level)) {
1023 mutex_unlock(&adev->pm.mutex);
1024 return -EINVAL;
1025 }
1026
1027 adev->pm.dpm.forced_level = level;
1028
1029 mutex_unlock(&adev->pm.mutex);
1030
1031 return 0;
1032}
1033
1034int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev,
1035 struct pp_states_info *states)
1036{
1037 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1038 int ret = 0;
1039
1040 if (!pp_funcs->get_pp_num_states)
1041 return -EOPNOTSUPP;
1042
1043 mutex_lock(&adev->pm.mutex);
1044 ret = pp_funcs->get_pp_num_states(adev->powerplay.pp_handle,
1045 states);
1046 mutex_unlock(&adev->pm.mutex);
1047
1048 return ret;
1049}
1050
1051int amdgpu_dpm_dispatch_task(struct amdgpu_device *adev,
1052 enum amd_pp_task task_id,
1053 enum amd_pm_state_type *user_state)
1054{
1055 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1056 int ret = 0;
1057
1058 if (!pp_funcs->dispatch_tasks)
1059 return -EOPNOTSUPP;
1060
1061 mutex_lock(&adev->pm.mutex);
1062 ret = pp_funcs->dispatch_tasks(adev->powerplay.pp_handle,
1063 task_id,
1064 user_state);
1065 mutex_unlock(&adev->pm.mutex);
1066
1067 return ret;
1068}
1069
1070int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table)
1071{
1072 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1073 int ret = 0;
1074
1075 if (!pp_funcs->get_pp_table)
1076 return 0;
1077
1078 mutex_lock(&adev->pm.mutex);
1079 ret = pp_funcs->get_pp_table(adev->powerplay.pp_handle,
1080 table);
1081 mutex_unlock(&adev->pm.mutex);
1082
1083 return ret;
1084}
1085
1086int amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device *adev,
1087 uint32_t type,
1088 long *input,
1089 uint32_t size)
1090{
1091 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1092 int ret = 0;
1093
1094 if (!pp_funcs->set_fine_grain_clk_vol)
1095 return 0;
1096
1097 mutex_lock(&adev->pm.mutex);
1098 ret = pp_funcs->set_fine_grain_clk_vol(adev->powerplay.pp_handle,
1099 type,
1100 input,
1101 size);
1102 mutex_unlock(&adev->pm.mutex);
1103
1104 return ret;
1105}
1106
1107int amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device *adev,
1108 uint32_t type,
1109 long *input,
1110 uint32_t size)
1111{
1112 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1113 int ret = 0;
1114
1115 if (!pp_funcs->odn_edit_dpm_table)
1116 return 0;
1117
1118 mutex_lock(&adev->pm.mutex);
1119 ret = pp_funcs->odn_edit_dpm_table(adev->powerplay.pp_handle,
1120 type,
1121 input,
1122 size);
1123 mutex_unlock(&adev->pm.mutex);
1124
1125 return ret;
1126}
1127
1128int amdgpu_dpm_print_clock_levels(struct amdgpu_device *adev,
1129 enum pp_clock_type type,
1130 char *buf)
1131{
1132 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1133 int ret = 0;
1134
1135 if (!pp_funcs->print_clock_levels)
1136 return 0;
1137
1138 mutex_lock(&adev->pm.mutex);
1139 ret = pp_funcs->print_clock_levels(adev->powerplay.pp_handle,
1140 type,
1141 buf);
1142 mutex_unlock(&adev->pm.mutex);
1143
1144 return ret;
1145}
1146
1147int amdgpu_dpm_emit_clock_levels(struct amdgpu_device *adev,
1148 enum pp_clock_type type,
1149 char *buf,
1150 int *offset)
1151{
1152 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1153 int ret = 0;
1154
1155 if (!pp_funcs->emit_clock_levels)
1156 return -ENOENT;
1157
1158 mutex_lock(&adev->pm.mutex);
1159 ret = pp_funcs->emit_clock_levels(adev->powerplay.pp_handle,
1160 type,
1161 buf,
1162 offset);
1163 mutex_unlock(&adev->pm.mutex);
1164
1165 return ret;
1166}
1167
1168int amdgpu_dpm_set_ppfeature_status(struct amdgpu_device *adev,
1169 uint64_t ppfeature_masks)
1170{
1171 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1172 int ret = 0;
1173
1174 if (!pp_funcs->set_ppfeature_status)
1175 return 0;
1176
1177 mutex_lock(&adev->pm.mutex);
1178 ret = pp_funcs->set_ppfeature_status(adev->powerplay.pp_handle,
1179 ppfeature_masks);
1180 mutex_unlock(&adev->pm.mutex);
1181
1182 return ret;
1183}
1184
1185int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf)
1186{
1187 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1188 int ret = 0;
1189
1190 if (!pp_funcs->get_ppfeature_status)
1191 return 0;
1192
1193 mutex_lock(&adev->pm.mutex);
1194 ret = pp_funcs->get_ppfeature_status(adev->powerplay.pp_handle,
1195 buf);
1196 mutex_unlock(&adev->pm.mutex);
1197
1198 return ret;
1199}
1200
1201int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev,
1202 enum pp_clock_type type,
1203 uint32_t mask)
1204{
1205 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1206 int ret = 0;
1207
1208 if (!pp_funcs->force_clock_level)
1209 return 0;
1210
1211 mutex_lock(&adev->pm.mutex);
1212 ret = pp_funcs->force_clock_level(adev->powerplay.pp_handle,
1213 type,
1214 mask);
1215 mutex_unlock(&adev->pm.mutex);
1216
1217 return ret;
1218}
1219
1220int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev)
1221{
1222 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1223 int ret = 0;
1224
1225 if (!pp_funcs->get_sclk_od)
1226 return -EOPNOTSUPP;
1227
1228 mutex_lock(&adev->pm.mutex);
1229 ret = pp_funcs->get_sclk_od(adev->powerplay.pp_handle);
1230 mutex_unlock(&adev->pm.mutex);
1231
1232 return ret;
1233}
1234
1235int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
1236{
1237 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1238
1239 if (is_support_sw_smu(adev))
1240 return -EOPNOTSUPP;
1241
1242 mutex_lock(&adev->pm.mutex);
1243 if (pp_funcs->set_sclk_od)
1244 pp_funcs->set_sclk_od(adev->powerplay.pp_handle, value);
1245 mutex_unlock(&adev->pm.mutex);
1246
1247 if (amdgpu_dpm_dispatch_task(adev,
1248 AMD_PP_TASK_READJUST_POWER_STATE,
1249 NULL) == -EOPNOTSUPP) {
1250 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1251 amdgpu_dpm_compute_clocks(adev);
1252 }
1253
1254 return 0;
1255}
1256
1257int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev)
1258{
1259 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1260 int ret = 0;
1261
1262 if (!pp_funcs->get_mclk_od)
1263 return -EOPNOTSUPP;
1264
1265 mutex_lock(&adev->pm.mutex);
1266 ret = pp_funcs->get_mclk_od(adev->powerplay.pp_handle);
1267 mutex_unlock(&adev->pm.mutex);
1268
1269 return ret;
1270}
1271
1272int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
1273{
1274 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1275
1276 if (is_support_sw_smu(adev))
1277 return -EOPNOTSUPP;
1278
1279 mutex_lock(&adev->pm.mutex);
1280 if (pp_funcs->set_mclk_od)
1281 pp_funcs->set_mclk_od(adev->powerplay.pp_handle, value);
1282 mutex_unlock(&adev->pm.mutex);
1283
1284 if (amdgpu_dpm_dispatch_task(adev,
1285 AMD_PP_TASK_READJUST_POWER_STATE,
1286 NULL) == -EOPNOTSUPP) {
1287 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1288 amdgpu_dpm_compute_clocks(adev);
1289 }
1290
1291 return 0;
1292}
1293
1294int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev,
1295 char *buf)
1296{
1297 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1298 int ret = 0;
1299
1300 if (!pp_funcs->get_power_profile_mode)
1301 return -EOPNOTSUPP;
1302
1303 mutex_lock(&adev->pm.mutex);
1304 ret = pp_funcs->get_power_profile_mode(adev->powerplay.pp_handle,
1305 buf);
1306 mutex_unlock(&adev->pm.mutex);
1307
1308 return ret;
1309}
1310
1311int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev,
1312 long *input, uint32_t size)
1313{
1314 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1315 int ret = 0;
1316
1317 if (!pp_funcs->set_power_profile_mode)
1318 return 0;
1319
1320 mutex_lock(&adev->pm.mutex);
1321 ret = pp_funcs->set_power_profile_mode(adev->powerplay.pp_handle,
1322 input,
1323 size);
1324 mutex_unlock(&adev->pm.mutex);
1325
1326 return ret;
1327}
1328
1329int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table)
1330{
1331 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1332 int ret = 0;
1333
1334 if (!pp_funcs->get_gpu_metrics)
1335 return 0;
1336
1337 mutex_lock(&adev->pm.mutex);
1338 ret = pp_funcs->get_gpu_metrics(adev->powerplay.pp_handle,
1339 table);
1340 mutex_unlock(&adev->pm.mutex);
1341
1342 return ret;
1343}
1344
1345ssize_t amdgpu_dpm_get_pm_metrics(struct amdgpu_device *adev, void *pm_metrics,
1346 size_t size)
1347{
1348 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1349 int ret = 0;
1350
1351 if (!pp_funcs->get_pm_metrics)
1352 return -EOPNOTSUPP;
1353
1354 mutex_lock(&adev->pm.mutex);
1355 ret = pp_funcs->get_pm_metrics(adev->powerplay.pp_handle, pm_metrics,
1356 size);
1357 mutex_unlock(&adev->pm.mutex);
1358
1359 return ret;
1360}
1361
1362int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev,
1363 uint32_t *fan_mode)
1364{
1365 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1366 int ret = 0;
1367
1368 if (!pp_funcs->get_fan_control_mode)
1369 return -EOPNOTSUPP;
1370
1371 mutex_lock(&adev->pm.mutex);
1372 ret = pp_funcs->get_fan_control_mode(adev->powerplay.pp_handle,
1373 fan_mode);
1374 mutex_unlock(&adev->pm.mutex);
1375
1376 return ret;
1377}
1378
1379int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev,
1380 uint32_t speed)
1381{
1382 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1383 int ret = 0;
1384
1385 if (!pp_funcs->set_fan_speed_pwm)
1386 return -EOPNOTSUPP;
1387
1388 mutex_lock(&adev->pm.mutex);
1389 ret = pp_funcs->set_fan_speed_pwm(adev->powerplay.pp_handle,
1390 speed);
1391 mutex_unlock(&adev->pm.mutex);
1392
1393 return ret;
1394}
1395
1396int amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device *adev,
1397 uint32_t *speed)
1398{
1399 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1400 int ret = 0;
1401
1402 if (!pp_funcs->get_fan_speed_pwm)
1403 return -EOPNOTSUPP;
1404
1405 mutex_lock(&adev->pm.mutex);
1406 ret = pp_funcs->get_fan_speed_pwm(adev->powerplay.pp_handle,
1407 speed);
1408 mutex_unlock(&adev->pm.mutex);
1409
1410 return ret;
1411}
1412
1413int amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device *adev,
1414 uint32_t *speed)
1415{
1416 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1417 int ret = 0;
1418
1419 if (!pp_funcs->get_fan_speed_rpm)
1420 return -EOPNOTSUPP;
1421
1422 mutex_lock(&adev->pm.mutex);
1423 ret = pp_funcs->get_fan_speed_rpm(adev->powerplay.pp_handle,
1424 speed);
1425 mutex_unlock(&adev->pm.mutex);
1426
1427 return ret;
1428}
1429
1430int amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device *adev,
1431 uint32_t speed)
1432{
1433 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1434 int ret = 0;
1435
1436 if (!pp_funcs->set_fan_speed_rpm)
1437 return -EOPNOTSUPP;
1438
1439 mutex_lock(&adev->pm.mutex);
1440 ret = pp_funcs->set_fan_speed_rpm(adev->powerplay.pp_handle,
1441 speed);
1442 mutex_unlock(&adev->pm.mutex);
1443
1444 return ret;
1445}
1446
1447int amdgpu_dpm_set_fan_control_mode(struct amdgpu_device *adev,
1448 uint32_t mode)
1449{
1450 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1451 int ret = 0;
1452
1453 if (!pp_funcs->set_fan_control_mode)
1454 return -EOPNOTSUPP;
1455
1456 mutex_lock(&adev->pm.mutex);
1457 ret = pp_funcs->set_fan_control_mode(adev->powerplay.pp_handle,
1458 mode);
1459 mutex_unlock(&adev->pm.mutex);
1460
1461 return ret;
1462}
1463
1464int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev,
1465 uint32_t *limit,
1466 enum pp_power_limit_level pp_limit_level,
1467 enum pp_power_type power_type)
1468{
1469 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1470 int ret = 0;
1471
1472 if (!pp_funcs->get_power_limit)
1473 return -ENODATA;
1474
1475 mutex_lock(&adev->pm.mutex);
1476 ret = pp_funcs->get_power_limit(adev->powerplay.pp_handle,
1477 limit,
1478 pp_limit_level,
1479 power_type);
1480 mutex_unlock(&adev->pm.mutex);
1481
1482 return ret;
1483}
1484
1485int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev,
1486 uint32_t limit)
1487{
1488 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1489 int ret = 0;
1490
1491 if (!pp_funcs->set_power_limit)
1492 return -EINVAL;
1493
1494 mutex_lock(&adev->pm.mutex);
1495 ret = pp_funcs->set_power_limit(adev->powerplay.pp_handle,
1496 limit);
1497 mutex_unlock(&adev->pm.mutex);
1498
1499 return ret;
1500}
1501
1502int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev)
1503{
1504 bool cclk_dpm_supported = false;
1505
1506 if (!is_support_sw_smu(adev))
1507 return false;
1508
1509 mutex_lock(&adev->pm.mutex);
1510 cclk_dpm_supported = is_support_cclk_dpm(adev);
1511 mutex_unlock(&adev->pm.mutex);
1512
1513 return (int)cclk_dpm_supported;
1514}
1515
1516int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
1517 struct seq_file *m)
1518{
1519 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1520
1521 if (!pp_funcs->debugfs_print_current_performance_level)
1522 return -EOPNOTSUPP;
1523
1524 mutex_lock(&adev->pm.mutex);
1525 pp_funcs->debugfs_print_current_performance_level(adev->powerplay.pp_handle,
1526 m);
1527 mutex_unlock(&adev->pm.mutex);
1528
1529 return 0;
1530}
1531
1532int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev,
1533 void **addr,
1534 size_t *size)
1535{
1536 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1537 int ret = 0;
1538
1539 if (!pp_funcs->get_smu_prv_buf_details)
1540 return -ENOSYS;
1541
1542 mutex_lock(&adev->pm.mutex);
1543 ret = pp_funcs->get_smu_prv_buf_details(adev->powerplay.pp_handle,
1544 addr,
1545 size);
1546 mutex_unlock(&adev->pm.mutex);
1547
1548 return ret;
1549}
1550
1551int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev)
1552{
1553 if (is_support_sw_smu(adev)) {
1554 struct smu_context *smu = adev->powerplay.pp_handle;
1555
1556 return (smu->od_enabled || smu->is_apu);
1557 } else {
1558 struct pp_hwmgr *hwmgr;
1559
1560 /*
1561 * dpm on some legacy asics don't carry od_enabled member
1562 * as its pp_handle is casted directly from adev.
1563 */
1564 if (amdgpu_dpm_is_legacy_dpm(adev))
1565 return false;
1566
1567 hwmgr = (struct pp_hwmgr *)adev->powerplay.pp_handle;
1568
1569 return hwmgr->od_enabled;
1570 }
1571}
1572
1573int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev,
1574 const char *buf,
1575 size_t size)
1576{
1577 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1578 int ret = 0;
1579
1580 if (!pp_funcs->set_pp_table)
1581 return -EOPNOTSUPP;
1582
1583 mutex_lock(&adev->pm.mutex);
1584 ret = pp_funcs->set_pp_table(adev->powerplay.pp_handle,
1585 buf,
1586 size);
1587 mutex_unlock(&adev->pm.mutex);
1588
1589 return ret;
1590}
1591
1592int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev)
1593{
1594 struct smu_context *smu = adev->powerplay.pp_handle;
1595
1596 if (!is_support_sw_smu(adev))
1597 return INT_MAX;
1598
1599 return smu->cpu_core_num;
1600}
1601
1602void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev)
1603{
1604 if (!is_support_sw_smu(adev))
1605 return;
1606
1607 amdgpu_smu_stb_debug_fs_init(adev);
1608}
1609
1610int amdgpu_dpm_display_configuration_change(struct amdgpu_device *adev,
1611 const struct amd_pp_display_configuration *input)
1612{
1613 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1614 int ret = 0;
1615
1616 if (!pp_funcs->display_configuration_change)
1617 return 0;
1618
1619 mutex_lock(&adev->pm.mutex);
1620 ret = pp_funcs->display_configuration_change(adev->powerplay.pp_handle,
1621 input);
1622 mutex_unlock(&adev->pm.mutex);
1623
1624 return ret;
1625}
1626
1627int amdgpu_dpm_get_clock_by_type(struct amdgpu_device *adev,
1628 enum amd_pp_clock_type type,
1629 struct amd_pp_clocks *clocks)
1630{
1631 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1632 int ret = 0;
1633
1634 if (!pp_funcs->get_clock_by_type)
1635 return 0;
1636
1637 mutex_lock(&adev->pm.mutex);
1638 ret = pp_funcs->get_clock_by_type(adev->powerplay.pp_handle,
1639 type,
1640 clocks);
1641 mutex_unlock(&adev->pm.mutex);
1642
1643 return ret;
1644}
1645
1646int amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device *adev,
1647 struct amd_pp_simple_clock_info *clocks)
1648{
1649 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1650 int ret = 0;
1651
1652 if (!pp_funcs->get_display_mode_validation_clocks)
1653 return 0;
1654
1655 mutex_lock(&adev->pm.mutex);
1656 ret = pp_funcs->get_display_mode_validation_clocks(adev->powerplay.pp_handle,
1657 clocks);
1658 mutex_unlock(&adev->pm.mutex);
1659
1660 return ret;
1661}
1662
1663int amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device *adev,
1664 enum amd_pp_clock_type type,
1665 struct pp_clock_levels_with_latency *clocks)
1666{
1667 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1668 int ret = 0;
1669
1670 if (!pp_funcs->get_clock_by_type_with_latency)
1671 return 0;
1672
1673 mutex_lock(&adev->pm.mutex);
1674 ret = pp_funcs->get_clock_by_type_with_latency(adev->powerplay.pp_handle,
1675 type,
1676 clocks);
1677 mutex_unlock(&adev->pm.mutex);
1678
1679 return ret;
1680}
1681
1682int amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device *adev,
1683 enum amd_pp_clock_type type,
1684 struct pp_clock_levels_with_voltage *clocks)
1685{
1686 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1687 int ret = 0;
1688
1689 if (!pp_funcs->get_clock_by_type_with_voltage)
1690 return 0;
1691
1692 mutex_lock(&adev->pm.mutex);
1693 ret = pp_funcs->get_clock_by_type_with_voltage(adev->powerplay.pp_handle,
1694 type,
1695 clocks);
1696 mutex_unlock(&adev->pm.mutex);
1697
1698 return ret;
1699}
1700
1701int amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device *adev,
1702 void *clock_ranges)
1703{
1704 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1705 int ret = 0;
1706
1707 if (!pp_funcs->set_watermarks_for_clocks_ranges)
1708 return -EOPNOTSUPP;
1709
1710 mutex_lock(&adev->pm.mutex);
1711 ret = pp_funcs->set_watermarks_for_clocks_ranges(adev->powerplay.pp_handle,
1712 clock_ranges);
1713 mutex_unlock(&adev->pm.mutex);
1714
1715 return ret;
1716}
1717
1718int amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device *adev,
1719 struct pp_display_clock_request *clock)
1720{
1721 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1722 int ret = 0;
1723
1724 if (!pp_funcs->display_clock_voltage_request)
1725 return -EOPNOTSUPP;
1726
1727 mutex_lock(&adev->pm.mutex);
1728 ret = pp_funcs->display_clock_voltage_request(adev->powerplay.pp_handle,
1729 clock);
1730 mutex_unlock(&adev->pm.mutex);
1731
1732 return ret;
1733}
1734
1735int amdgpu_dpm_get_current_clocks(struct amdgpu_device *adev,
1736 struct amd_pp_clock_info *clocks)
1737{
1738 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1739 int ret = 0;
1740
1741 if (!pp_funcs->get_current_clocks)
1742 return -EOPNOTSUPP;
1743
1744 mutex_lock(&adev->pm.mutex);
1745 ret = pp_funcs->get_current_clocks(adev->powerplay.pp_handle,
1746 clocks);
1747 mutex_unlock(&adev->pm.mutex);
1748
1749 return ret;
1750}
1751
1752void amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device *adev)
1753{
1754 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1755
1756 if (!pp_funcs->notify_smu_enable_pwe)
1757 return;
1758
1759 mutex_lock(&adev->pm.mutex);
1760 pp_funcs->notify_smu_enable_pwe(adev->powerplay.pp_handle);
1761 mutex_unlock(&adev->pm.mutex);
1762}
1763
1764int amdgpu_dpm_set_active_display_count(struct amdgpu_device *adev,
1765 uint32_t count)
1766{
1767 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1768 int ret = 0;
1769
1770 if (!pp_funcs->set_active_display_count)
1771 return -EOPNOTSUPP;
1772
1773 mutex_lock(&adev->pm.mutex);
1774 ret = pp_funcs->set_active_display_count(adev->powerplay.pp_handle,
1775 count);
1776 mutex_unlock(&adev->pm.mutex);
1777
1778 return ret;
1779}
1780
1781int amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device *adev,
1782 uint32_t clock)
1783{
1784 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1785 int ret = 0;
1786
1787 if (!pp_funcs->set_min_deep_sleep_dcefclk)
1788 return -EOPNOTSUPP;
1789
1790 mutex_lock(&adev->pm.mutex);
1791 ret = pp_funcs->set_min_deep_sleep_dcefclk(adev->powerplay.pp_handle,
1792 clock);
1793 mutex_unlock(&adev->pm.mutex);
1794
1795 return ret;
1796}
1797
1798void amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device *adev,
1799 uint32_t clock)
1800{
1801 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1802
1803 if (!pp_funcs->set_hard_min_dcefclk_by_freq)
1804 return;
1805
1806 mutex_lock(&adev->pm.mutex);
1807 pp_funcs->set_hard_min_dcefclk_by_freq(adev->powerplay.pp_handle,
1808 clock);
1809 mutex_unlock(&adev->pm.mutex);
1810}
1811
1812void amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device *adev,
1813 uint32_t clock)
1814{
1815 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1816
1817 if (!pp_funcs->set_hard_min_fclk_by_freq)
1818 return;
1819
1820 mutex_lock(&adev->pm.mutex);
1821 pp_funcs->set_hard_min_fclk_by_freq(adev->powerplay.pp_handle,
1822 clock);
1823 mutex_unlock(&adev->pm.mutex);
1824}
1825
1826int amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device *adev,
1827 bool disable_memory_clock_switch)
1828{
1829 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1830 int ret = 0;
1831
1832 if (!pp_funcs->display_disable_memory_clock_switch)
1833 return 0;
1834
1835 mutex_lock(&adev->pm.mutex);
1836 ret = pp_funcs->display_disable_memory_clock_switch(adev->powerplay.pp_handle,
1837 disable_memory_clock_switch);
1838 mutex_unlock(&adev->pm.mutex);
1839
1840 return ret;
1841}
1842
1843int amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device *adev,
1844 struct pp_smu_nv_clock_table *max_clocks)
1845{
1846 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1847 int ret = 0;
1848
1849 if (!pp_funcs->get_max_sustainable_clocks_by_dc)
1850 return -EOPNOTSUPP;
1851
1852 mutex_lock(&adev->pm.mutex);
1853 ret = pp_funcs->get_max_sustainable_clocks_by_dc(adev->powerplay.pp_handle,
1854 max_clocks);
1855 mutex_unlock(&adev->pm.mutex);
1856
1857 return ret;
1858}
1859
1860enum pp_smu_status amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev,
1861 unsigned int *clock_values_in_khz,
1862 unsigned int *num_states)
1863{
1864 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1865 int ret = 0;
1866
1867 if (!pp_funcs->get_uclk_dpm_states)
1868 return -EOPNOTSUPP;
1869
1870 mutex_lock(&adev->pm.mutex);
1871 ret = pp_funcs->get_uclk_dpm_states(adev->powerplay.pp_handle,
1872 clock_values_in_khz,
1873 num_states);
1874 mutex_unlock(&adev->pm.mutex);
1875
1876 return ret;
1877}
1878
1879int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev,
1880 struct dpm_clocks *clock_table)
1881{
1882 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1883 int ret = 0;
1884
1885 if (!pp_funcs->get_dpm_clock_table)
1886 return -EOPNOTSUPP;
1887
1888 mutex_lock(&adev->pm.mutex);
1889 ret = pp_funcs->get_dpm_clock_table(adev->powerplay.pp_handle,
1890 clock_table);
1891 mutex_unlock(&adev->pm.mutex);
1892
1893 return ret;
1894}
1/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24
25#include "amdgpu.h"
26#include "amdgpu_atombios.h"
27#include "amdgpu_i2c.h"
28#include "amdgpu_dpm.h"
29#include "atom.h"
30#include "amd_pcie.h"
31#include "amdgpu_display.h"
32#include "hwmgr.h"
33#include <linux/power_supply.h>
34#include "amdgpu_smu.h"
35
36#define amdgpu_dpm_enable_bapm(adev, e) \
37 ((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e)))
38
39int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
40{
41 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
42 int ret = 0;
43
44 if (!pp_funcs->get_sclk)
45 return 0;
46
47 mutex_lock(&adev->pm.mutex);
48 ret = pp_funcs->get_sclk((adev)->powerplay.pp_handle,
49 low);
50 mutex_unlock(&adev->pm.mutex);
51
52 return ret;
53}
54
55int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
56{
57 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
58 int ret = 0;
59
60 if (!pp_funcs->get_mclk)
61 return 0;
62
63 mutex_lock(&adev->pm.mutex);
64 ret = pp_funcs->get_mclk((adev)->powerplay.pp_handle,
65 low);
66 mutex_unlock(&adev->pm.mutex);
67
68 return ret;
69}
70
71int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate)
72{
73 int ret = 0;
74 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
75 enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON;
76
77 if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state) {
78 dev_dbg(adev->dev, "IP block%d already in the target %s state!",
79 block_type, gate ? "gate" : "ungate");
80 return 0;
81 }
82
83 mutex_lock(&adev->pm.mutex);
84
85 switch (block_type) {
86 case AMD_IP_BLOCK_TYPE_UVD:
87 case AMD_IP_BLOCK_TYPE_VCE:
88 case AMD_IP_BLOCK_TYPE_GFX:
89 case AMD_IP_BLOCK_TYPE_VCN:
90 case AMD_IP_BLOCK_TYPE_SDMA:
91 case AMD_IP_BLOCK_TYPE_JPEG:
92 case AMD_IP_BLOCK_TYPE_GMC:
93 case AMD_IP_BLOCK_TYPE_ACP:
94 if (pp_funcs && pp_funcs->set_powergating_by_smu)
95 ret = (pp_funcs->set_powergating_by_smu(
96 (adev)->powerplay.pp_handle, block_type, gate));
97 break;
98 default:
99 break;
100 }
101
102 if (!ret)
103 atomic_set(&adev->pm.pwr_state[block_type], pwr_state);
104
105 mutex_unlock(&adev->pm.mutex);
106
107 return ret;
108}
109
110int amdgpu_dpm_set_gfx_power_up_by_imu(struct amdgpu_device *adev)
111{
112 struct smu_context *smu = adev->powerplay.pp_handle;
113 int ret = -EOPNOTSUPP;
114
115 mutex_lock(&adev->pm.mutex);
116 ret = smu_set_gfx_power_up_by_imu(smu);
117 mutex_unlock(&adev->pm.mutex);
118
119 msleep(10);
120
121 return ret;
122}
123
124int amdgpu_dpm_baco_enter(struct amdgpu_device *adev)
125{
126 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
127 void *pp_handle = adev->powerplay.pp_handle;
128 int ret = 0;
129
130 if (!pp_funcs || !pp_funcs->set_asic_baco_state)
131 return -ENOENT;
132
133 mutex_lock(&adev->pm.mutex);
134
135 /* enter BACO state */
136 ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
137
138 mutex_unlock(&adev->pm.mutex);
139
140 return ret;
141}
142
143int amdgpu_dpm_baco_exit(struct amdgpu_device *adev)
144{
145 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
146 void *pp_handle = adev->powerplay.pp_handle;
147 int ret = 0;
148
149 if (!pp_funcs || !pp_funcs->set_asic_baco_state)
150 return -ENOENT;
151
152 mutex_lock(&adev->pm.mutex);
153
154 /* exit BACO state */
155 ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
156
157 mutex_unlock(&adev->pm.mutex);
158
159 return ret;
160}
161
162int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
163 enum pp_mp1_state mp1_state)
164{
165 int ret = 0;
166 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
167
168 if (pp_funcs && pp_funcs->set_mp1_state) {
169 mutex_lock(&adev->pm.mutex);
170
171 ret = pp_funcs->set_mp1_state(
172 adev->powerplay.pp_handle,
173 mp1_state);
174
175 mutex_unlock(&adev->pm.mutex);
176 }
177
178 return ret;
179}
180
181bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
182{
183 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
184 void *pp_handle = adev->powerplay.pp_handle;
185 bool baco_cap;
186 int ret = 0;
187
188 if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
189 return false;
190 /* Don't use baco for reset in S3.
191 * This is a workaround for some platforms
192 * where entering BACO during suspend
193 * seems to cause reboots or hangs.
194 * This might be related to the fact that BACO controls
195 * power to the whole GPU including devices like audio and USB.
196 * Powering down/up everything may adversely affect these other
197 * devices. Needs more investigation.
198 */
199 if (adev->in_s3)
200 return false;
201
202 mutex_lock(&adev->pm.mutex);
203
204 ret = pp_funcs->get_asic_baco_capability(pp_handle,
205 &baco_cap);
206
207 mutex_unlock(&adev->pm.mutex);
208
209 return ret ? false : baco_cap;
210}
211
212int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev)
213{
214 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
215 void *pp_handle = adev->powerplay.pp_handle;
216 int ret = 0;
217
218 if (!pp_funcs || !pp_funcs->asic_reset_mode_2)
219 return -ENOENT;
220
221 mutex_lock(&adev->pm.mutex);
222
223 ret = pp_funcs->asic_reset_mode_2(pp_handle);
224
225 mutex_unlock(&adev->pm.mutex);
226
227 return ret;
228}
229
230int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
231{
232 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
233 void *pp_handle = adev->powerplay.pp_handle;
234 int ret = 0;
235
236 if (!pp_funcs || !pp_funcs->set_asic_baco_state)
237 return -ENOENT;
238
239 mutex_lock(&adev->pm.mutex);
240
241 /* enter BACO state */
242 ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
243 if (ret)
244 goto out;
245
246 /* exit BACO state */
247 ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
248
249out:
250 mutex_unlock(&adev->pm.mutex);
251 return ret;
252}
253
254bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev)
255{
256 struct smu_context *smu = adev->powerplay.pp_handle;
257 bool support_mode1_reset = false;
258
259 if (is_support_sw_smu(adev)) {
260 mutex_lock(&adev->pm.mutex);
261 support_mode1_reset = smu_mode1_reset_is_support(smu);
262 mutex_unlock(&adev->pm.mutex);
263 }
264
265 return support_mode1_reset;
266}
267
268int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev)
269{
270 struct smu_context *smu = adev->powerplay.pp_handle;
271 int ret = -EOPNOTSUPP;
272
273 if (is_support_sw_smu(adev)) {
274 mutex_lock(&adev->pm.mutex);
275 ret = smu_mode1_reset(smu);
276 mutex_unlock(&adev->pm.mutex);
277 }
278
279 return ret;
280}
281
282int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
283 enum PP_SMC_POWER_PROFILE type,
284 bool en)
285{
286 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
287 int ret = 0;
288
289 if (amdgpu_sriov_vf(adev))
290 return 0;
291
292 if (pp_funcs && pp_funcs->switch_power_profile) {
293 mutex_lock(&adev->pm.mutex);
294 ret = pp_funcs->switch_power_profile(
295 adev->powerplay.pp_handle, type, en);
296 mutex_unlock(&adev->pm.mutex);
297 }
298
299 return ret;
300}
301
302int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
303 uint32_t pstate)
304{
305 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
306 int ret = 0;
307
308 if (pp_funcs && pp_funcs->set_xgmi_pstate) {
309 mutex_lock(&adev->pm.mutex);
310 ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
311 pstate);
312 mutex_unlock(&adev->pm.mutex);
313 }
314
315 return ret;
316}
317
318int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
319 uint32_t cstate)
320{
321 int ret = 0;
322 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
323 void *pp_handle = adev->powerplay.pp_handle;
324
325 if (pp_funcs && pp_funcs->set_df_cstate) {
326 mutex_lock(&adev->pm.mutex);
327 ret = pp_funcs->set_df_cstate(pp_handle, cstate);
328 mutex_unlock(&adev->pm.mutex);
329 }
330
331 return ret;
332}
333
334int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en)
335{
336 struct smu_context *smu = adev->powerplay.pp_handle;
337 int ret = 0;
338
339 if (is_support_sw_smu(adev)) {
340 mutex_lock(&adev->pm.mutex);
341 ret = smu_allow_xgmi_power_down(smu, en);
342 mutex_unlock(&adev->pm.mutex);
343 }
344
345 return ret;
346}
347
348int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev)
349{
350 void *pp_handle = adev->powerplay.pp_handle;
351 const struct amd_pm_funcs *pp_funcs =
352 adev->powerplay.pp_funcs;
353 int ret = 0;
354
355 if (pp_funcs && pp_funcs->enable_mgpu_fan_boost) {
356 mutex_lock(&adev->pm.mutex);
357 ret = pp_funcs->enable_mgpu_fan_boost(pp_handle);
358 mutex_unlock(&adev->pm.mutex);
359 }
360
361 return ret;
362}
363
364int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev,
365 uint32_t msg_id)
366{
367 void *pp_handle = adev->powerplay.pp_handle;
368 const struct amd_pm_funcs *pp_funcs =
369 adev->powerplay.pp_funcs;
370 int ret = 0;
371
372 if (pp_funcs && pp_funcs->set_clockgating_by_smu) {
373 mutex_lock(&adev->pm.mutex);
374 ret = pp_funcs->set_clockgating_by_smu(pp_handle,
375 msg_id);
376 mutex_unlock(&adev->pm.mutex);
377 }
378
379 return ret;
380}
381
382int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev,
383 bool acquire)
384{
385 void *pp_handle = adev->powerplay.pp_handle;
386 const struct amd_pm_funcs *pp_funcs =
387 adev->powerplay.pp_funcs;
388 int ret = -EOPNOTSUPP;
389
390 if (pp_funcs && pp_funcs->smu_i2c_bus_access) {
391 mutex_lock(&adev->pm.mutex);
392 ret = pp_funcs->smu_i2c_bus_access(pp_handle,
393 acquire);
394 mutex_unlock(&adev->pm.mutex);
395 }
396
397 return ret;
398}
399
400void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
401{
402 if (adev->pm.dpm_enabled) {
403 mutex_lock(&adev->pm.mutex);
404 if (power_supply_is_system_supplied() > 0)
405 adev->pm.ac_power = true;
406 else
407 adev->pm.ac_power = false;
408
409 if (adev->powerplay.pp_funcs &&
410 adev->powerplay.pp_funcs->enable_bapm)
411 amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
412
413 if (is_support_sw_smu(adev))
414 smu_set_ac_dc(adev->powerplay.pp_handle);
415
416 mutex_unlock(&adev->pm.mutex);
417 }
418}
419
420int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
421 void *data, uint32_t *size)
422{
423 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
424 int ret = -EINVAL;
425
426 if (!data || !size)
427 return -EINVAL;
428
429 if (pp_funcs && pp_funcs->read_sensor) {
430 mutex_lock(&adev->pm.mutex);
431 ret = pp_funcs->read_sensor(adev->powerplay.pp_handle,
432 sensor,
433 data,
434 size);
435 mutex_unlock(&adev->pm.mutex);
436 }
437
438 return ret;
439}
440
441void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev)
442{
443 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
444 int i;
445
446 if (!adev->pm.dpm_enabled)
447 return;
448
449 if (!pp_funcs->pm_compute_clocks)
450 return;
451
452 if (adev->mode_info.num_crtc)
453 amdgpu_display_bandwidth_update(adev);
454
455 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
456 struct amdgpu_ring *ring = adev->rings[i];
457 if (ring && ring->sched.ready)
458 amdgpu_fence_wait_empty(ring);
459 }
460
461 mutex_lock(&adev->pm.mutex);
462 pp_funcs->pm_compute_clocks(adev->powerplay.pp_handle);
463 mutex_unlock(&adev->pm.mutex);
464}
465
466void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
467{
468 int ret = 0;
469
470 if (adev->family == AMDGPU_FAMILY_SI) {
471 mutex_lock(&adev->pm.mutex);
472 if (enable) {
473 adev->pm.dpm.uvd_active = true;
474 adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
475 } else {
476 adev->pm.dpm.uvd_active = false;
477 }
478 mutex_unlock(&adev->pm.mutex);
479
480 amdgpu_dpm_compute_clocks(adev);
481 return;
482 }
483
484 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
485 if (ret)
486 DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
487 enable ? "enable" : "disable", ret);
488}
489
490void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
491{
492 int ret = 0;
493
494 if (adev->family == AMDGPU_FAMILY_SI) {
495 mutex_lock(&adev->pm.mutex);
496 if (enable) {
497 adev->pm.dpm.vce_active = true;
498 /* XXX select vce level based on ring/task */
499 adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
500 } else {
501 adev->pm.dpm.vce_active = false;
502 }
503 mutex_unlock(&adev->pm.mutex);
504
505 amdgpu_dpm_compute_clocks(adev);
506 return;
507 }
508
509 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
510 if (ret)
511 DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
512 enable ? "enable" : "disable", ret);
513}
514
515void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
516{
517 int ret = 0;
518
519 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable);
520 if (ret)
521 DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
522 enable ? "enable" : "disable", ret);
523}
524
525int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
526{
527 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
528 int r = 0;
529
530 if (!pp_funcs || !pp_funcs->load_firmware)
531 return 0;
532
533 mutex_lock(&adev->pm.mutex);
534 r = pp_funcs->load_firmware(adev->powerplay.pp_handle);
535 if (r) {
536 pr_err("smu firmware loading failed\n");
537 goto out;
538 }
539
540 if (smu_version)
541 *smu_version = adev->pm.fw_version;
542
543out:
544 mutex_unlock(&adev->pm.mutex);
545 return r;
546}
547
548int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable)
549{
550 int ret = 0;
551
552 if (is_support_sw_smu(adev)) {
553 mutex_lock(&adev->pm.mutex);
554 ret = smu_handle_passthrough_sbr(adev->powerplay.pp_handle,
555 enable);
556 mutex_unlock(&adev->pm.mutex);
557 }
558
559 return ret;
560}
561
562int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size)
563{
564 struct smu_context *smu = adev->powerplay.pp_handle;
565 int ret = 0;
566
567 if (!is_support_sw_smu(adev))
568 return -EOPNOTSUPP;
569
570 mutex_lock(&adev->pm.mutex);
571 ret = smu_send_hbm_bad_pages_num(smu, size);
572 mutex_unlock(&adev->pm.mutex);
573
574 return ret;
575}
576
577int amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device *adev, uint32_t size)
578{
579 struct smu_context *smu = adev->powerplay.pp_handle;
580 int ret = 0;
581
582 if (!is_support_sw_smu(adev))
583 return -EOPNOTSUPP;
584
585 mutex_lock(&adev->pm.mutex);
586 ret = smu_send_hbm_bad_channel_flag(smu, size);
587 mutex_unlock(&adev->pm.mutex);
588
589 return ret;
590}
591
592int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev,
593 enum pp_clock_type type,
594 uint32_t *min,
595 uint32_t *max)
596{
597 int ret = 0;
598
599 if (type != PP_SCLK)
600 return -EINVAL;
601
602 if (!is_support_sw_smu(adev))
603 return -EOPNOTSUPP;
604
605 mutex_lock(&adev->pm.mutex);
606 ret = smu_get_dpm_freq_range(adev->powerplay.pp_handle,
607 SMU_SCLK,
608 min,
609 max);
610 mutex_unlock(&adev->pm.mutex);
611
612 return ret;
613}
614
615int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev,
616 enum pp_clock_type type,
617 uint32_t min,
618 uint32_t max)
619{
620 struct smu_context *smu = adev->powerplay.pp_handle;
621 int ret = 0;
622
623 if (type != PP_SCLK)
624 return -EINVAL;
625
626 if (!is_support_sw_smu(adev))
627 return -EOPNOTSUPP;
628
629 mutex_lock(&adev->pm.mutex);
630 ret = smu_set_soft_freq_range(smu,
631 SMU_SCLK,
632 min,
633 max);
634 mutex_unlock(&adev->pm.mutex);
635
636 return ret;
637}
638
639int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev)
640{
641 struct smu_context *smu = adev->powerplay.pp_handle;
642 int ret = 0;
643
644 if (!is_support_sw_smu(adev))
645 return 0;
646
647 mutex_lock(&adev->pm.mutex);
648 ret = smu_write_watermarks_table(smu);
649 mutex_unlock(&adev->pm.mutex);
650
651 return ret;
652}
653
654int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev,
655 enum smu_event_type event,
656 uint64_t event_arg)
657{
658 struct smu_context *smu = adev->powerplay.pp_handle;
659 int ret = 0;
660
661 if (!is_support_sw_smu(adev))
662 return -EOPNOTSUPP;
663
664 mutex_lock(&adev->pm.mutex);
665 ret = smu_wait_for_event(smu, event, event_arg);
666 mutex_unlock(&adev->pm.mutex);
667
668 return ret;
669}
670
671int amdgpu_dpm_set_residency_gfxoff(struct amdgpu_device *adev, bool value)
672{
673 struct smu_context *smu = adev->powerplay.pp_handle;
674 int ret = 0;
675
676 if (!is_support_sw_smu(adev))
677 return -EOPNOTSUPP;
678
679 mutex_lock(&adev->pm.mutex);
680 ret = smu_set_residency_gfxoff(smu, value);
681 mutex_unlock(&adev->pm.mutex);
682
683 return ret;
684}
685
686int amdgpu_dpm_get_residency_gfxoff(struct amdgpu_device *adev, u32 *value)
687{
688 struct smu_context *smu = adev->powerplay.pp_handle;
689 int ret = 0;
690
691 if (!is_support_sw_smu(adev))
692 return -EOPNOTSUPP;
693
694 mutex_lock(&adev->pm.mutex);
695 ret = smu_get_residency_gfxoff(smu, value);
696 mutex_unlock(&adev->pm.mutex);
697
698 return ret;
699}
700
701int amdgpu_dpm_get_entrycount_gfxoff(struct amdgpu_device *adev, u64 *value)
702{
703 struct smu_context *smu = adev->powerplay.pp_handle;
704 int ret = 0;
705
706 if (!is_support_sw_smu(adev))
707 return -EOPNOTSUPP;
708
709 mutex_lock(&adev->pm.mutex);
710 ret = smu_get_entrycount_gfxoff(smu, value);
711 mutex_unlock(&adev->pm.mutex);
712
713 return ret;
714}
715
716int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value)
717{
718 struct smu_context *smu = adev->powerplay.pp_handle;
719 int ret = 0;
720
721 if (!is_support_sw_smu(adev))
722 return -EOPNOTSUPP;
723
724 mutex_lock(&adev->pm.mutex);
725 ret = smu_get_status_gfxoff(smu, value);
726 mutex_unlock(&adev->pm.mutex);
727
728 return ret;
729}
730
731uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev)
732{
733 struct smu_context *smu = adev->powerplay.pp_handle;
734
735 if (!is_support_sw_smu(adev))
736 return 0;
737
738 return atomic64_read(&smu->throttle_int_counter);
739}
740
741/* amdgpu_dpm_gfx_state_change - Handle gfx power state change set
742 * @adev: amdgpu_device pointer
743 * @state: gfx power state(1 -sGpuChangeState_D0Entry and 2 -sGpuChangeState_D3Entry)
744 *
745 */
746void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev,
747 enum gfx_change_state state)
748{
749 mutex_lock(&adev->pm.mutex);
750 if (adev->powerplay.pp_funcs &&
751 adev->powerplay.pp_funcs->gfx_state_change_set)
752 ((adev)->powerplay.pp_funcs->gfx_state_change_set(
753 (adev)->powerplay.pp_handle, state));
754 mutex_unlock(&adev->pm.mutex);
755}
756
757int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev,
758 void *umc_ecc)
759{
760 struct smu_context *smu = adev->powerplay.pp_handle;
761 int ret = 0;
762
763 if (!is_support_sw_smu(adev))
764 return -EOPNOTSUPP;
765
766 mutex_lock(&adev->pm.mutex);
767 ret = smu_get_ecc_info(smu, umc_ecc);
768 mutex_unlock(&adev->pm.mutex);
769
770 return ret;
771}
772
773struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev,
774 uint32_t idx)
775{
776 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
777 struct amd_vce_state *vstate = NULL;
778
779 if (!pp_funcs->get_vce_clock_state)
780 return NULL;
781
782 mutex_lock(&adev->pm.mutex);
783 vstate = pp_funcs->get_vce_clock_state(adev->powerplay.pp_handle,
784 idx);
785 mutex_unlock(&adev->pm.mutex);
786
787 return vstate;
788}
789
790void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev,
791 enum amd_pm_state_type *state)
792{
793 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
794
795 mutex_lock(&adev->pm.mutex);
796
797 if (!pp_funcs->get_current_power_state) {
798 *state = adev->pm.dpm.user_state;
799 goto out;
800 }
801
802 *state = pp_funcs->get_current_power_state(adev->powerplay.pp_handle);
803 if (*state < POWER_STATE_TYPE_DEFAULT ||
804 *state > POWER_STATE_TYPE_INTERNAL_3DPERF)
805 *state = adev->pm.dpm.user_state;
806
807out:
808 mutex_unlock(&adev->pm.mutex);
809}
810
811void amdgpu_dpm_set_power_state(struct amdgpu_device *adev,
812 enum amd_pm_state_type state)
813{
814 mutex_lock(&adev->pm.mutex);
815 adev->pm.dpm.user_state = state;
816 mutex_unlock(&adev->pm.mutex);
817
818 if (is_support_sw_smu(adev))
819 return;
820
821 if (amdgpu_dpm_dispatch_task(adev,
822 AMD_PP_TASK_ENABLE_USER_STATE,
823 &state) == -EOPNOTSUPP)
824 amdgpu_dpm_compute_clocks(adev);
825}
826
827enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev)
828{
829 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
830 enum amd_dpm_forced_level level;
831
832 if (!pp_funcs)
833 return AMD_DPM_FORCED_LEVEL_AUTO;
834
835 mutex_lock(&adev->pm.mutex);
836 if (pp_funcs->get_performance_level)
837 level = pp_funcs->get_performance_level(adev->powerplay.pp_handle);
838 else
839 level = adev->pm.dpm.forced_level;
840 mutex_unlock(&adev->pm.mutex);
841
842 return level;
843}
844
845int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
846 enum amd_dpm_forced_level level)
847{
848 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
849 enum amd_dpm_forced_level current_level;
850 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
851 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
852 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
853 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
854
855 if (!pp_funcs || !pp_funcs->force_performance_level)
856 return 0;
857
858 if (adev->pm.dpm.thermal_active)
859 return -EINVAL;
860
861 current_level = amdgpu_dpm_get_performance_level(adev);
862 if (current_level == level)
863 return 0;
864
865 if (adev->asic_type == CHIP_RAVEN) {
866 if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) {
867 if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
868 level == AMD_DPM_FORCED_LEVEL_MANUAL)
869 amdgpu_gfx_off_ctrl(adev, false);
870 else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL &&
871 level != AMD_DPM_FORCED_LEVEL_MANUAL)
872 amdgpu_gfx_off_ctrl(adev, true);
873 }
874 }
875
876 if (!(current_level & profile_mode_mask) &&
877 (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT))
878 return -EINVAL;
879
880 if (!(current_level & profile_mode_mask) &&
881 (level & profile_mode_mask)) {
882 /* enter UMD Pstate */
883 amdgpu_device_ip_set_powergating_state(adev,
884 AMD_IP_BLOCK_TYPE_GFX,
885 AMD_PG_STATE_UNGATE);
886 amdgpu_device_ip_set_clockgating_state(adev,
887 AMD_IP_BLOCK_TYPE_GFX,
888 AMD_CG_STATE_UNGATE);
889 } else if ((current_level & profile_mode_mask) &&
890 !(level & profile_mode_mask)) {
891 /* exit UMD Pstate */
892 amdgpu_device_ip_set_clockgating_state(adev,
893 AMD_IP_BLOCK_TYPE_GFX,
894 AMD_CG_STATE_GATE);
895 amdgpu_device_ip_set_powergating_state(adev,
896 AMD_IP_BLOCK_TYPE_GFX,
897 AMD_PG_STATE_GATE);
898 }
899
900 mutex_lock(&adev->pm.mutex);
901
902 if (pp_funcs->force_performance_level(adev->powerplay.pp_handle,
903 level)) {
904 mutex_unlock(&adev->pm.mutex);
905 return -EINVAL;
906 }
907
908 adev->pm.dpm.forced_level = level;
909
910 mutex_unlock(&adev->pm.mutex);
911
912 return 0;
913}
914
915int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev,
916 struct pp_states_info *states)
917{
918 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
919 int ret = 0;
920
921 if (!pp_funcs->get_pp_num_states)
922 return -EOPNOTSUPP;
923
924 mutex_lock(&adev->pm.mutex);
925 ret = pp_funcs->get_pp_num_states(adev->powerplay.pp_handle,
926 states);
927 mutex_unlock(&adev->pm.mutex);
928
929 return ret;
930}
931
932int amdgpu_dpm_dispatch_task(struct amdgpu_device *adev,
933 enum amd_pp_task task_id,
934 enum amd_pm_state_type *user_state)
935{
936 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
937 int ret = 0;
938
939 if (!pp_funcs->dispatch_tasks)
940 return -EOPNOTSUPP;
941
942 mutex_lock(&adev->pm.mutex);
943 ret = pp_funcs->dispatch_tasks(adev->powerplay.pp_handle,
944 task_id,
945 user_state);
946 mutex_unlock(&adev->pm.mutex);
947
948 return ret;
949}
950
951int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table)
952{
953 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
954 int ret = 0;
955
956 if (!pp_funcs->get_pp_table)
957 return 0;
958
959 mutex_lock(&adev->pm.mutex);
960 ret = pp_funcs->get_pp_table(adev->powerplay.pp_handle,
961 table);
962 mutex_unlock(&adev->pm.mutex);
963
964 return ret;
965}
966
967int amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device *adev,
968 uint32_t type,
969 long *input,
970 uint32_t size)
971{
972 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
973 int ret = 0;
974
975 if (!pp_funcs->set_fine_grain_clk_vol)
976 return 0;
977
978 mutex_lock(&adev->pm.mutex);
979 ret = pp_funcs->set_fine_grain_clk_vol(adev->powerplay.pp_handle,
980 type,
981 input,
982 size);
983 mutex_unlock(&adev->pm.mutex);
984
985 return ret;
986}
987
988int amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device *adev,
989 uint32_t type,
990 long *input,
991 uint32_t size)
992{
993 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
994 int ret = 0;
995
996 if (!pp_funcs->odn_edit_dpm_table)
997 return 0;
998
999 mutex_lock(&adev->pm.mutex);
1000 ret = pp_funcs->odn_edit_dpm_table(adev->powerplay.pp_handle,
1001 type,
1002 input,
1003 size);
1004 mutex_unlock(&adev->pm.mutex);
1005
1006 return ret;
1007}
1008
1009int amdgpu_dpm_print_clock_levels(struct amdgpu_device *adev,
1010 enum pp_clock_type type,
1011 char *buf)
1012{
1013 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1014 int ret = 0;
1015
1016 if (!pp_funcs->print_clock_levels)
1017 return 0;
1018
1019 mutex_lock(&adev->pm.mutex);
1020 ret = pp_funcs->print_clock_levels(adev->powerplay.pp_handle,
1021 type,
1022 buf);
1023 mutex_unlock(&adev->pm.mutex);
1024
1025 return ret;
1026}
1027
1028int amdgpu_dpm_emit_clock_levels(struct amdgpu_device *adev,
1029 enum pp_clock_type type,
1030 char *buf,
1031 int *offset)
1032{
1033 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1034 int ret = 0;
1035
1036 if (!pp_funcs->emit_clock_levels)
1037 return -ENOENT;
1038
1039 mutex_lock(&adev->pm.mutex);
1040 ret = pp_funcs->emit_clock_levels(adev->powerplay.pp_handle,
1041 type,
1042 buf,
1043 offset);
1044 mutex_unlock(&adev->pm.mutex);
1045
1046 return ret;
1047}
1048
1049int amdgpu_dpm_set_ppfeature_status(struct amdgpu_device *adev,
1050 uint64_t ppfeature_masks)
1051{
1052 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1053 int ret = 0;
1054
1055 if (!pp_funcs->set_ppfeature_status)
1056 return 0;
1057
1058 mutex_lock(&adev->pm.mutex);
1059 ret = pp_funcs->set_ppfeature_status(adev->powerplay.pp_handle,
1060 ppfeature_masks);
1061 mutex_unlock(&adev->pm.mutex);
1062
1063 return ret;
1064}
1065
1066int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf)
1067{
1068 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1069 int ret = 0;
1070
1071 if (!pp_funcs->get_ppfeature_status)
1072 return 0;
1073
1074 mutex_lock(&adev->pm.mutex);
1075 ret = pp_funcs->get_ppfeature_status(adev->powerplay.pp_handle,
1076 buf);
1077 mutex_unlock(&adev->pm.mutex);
1078
1079 return ret;
1080}
1081
1082int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev,
1083 enum pp_clock_type type,
1084 uint32_t mask)
1085{
1086 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1087 int ret = 0;
1088
1089 if (!pp_funcs->force_clock_level)
1090 return 0;
1091
1092 mutex_lock(&adev->pm.mutex);
1093 ret = pp_funcs->force_clock_level(adev->powerplay.pp_handle,
1094 type,
1095 mask);
1096 mutex_unlock(&adev->pm.mutex);
1097
1098 return ret;
1099}
1100
1101int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev)
1102{
1103 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1104 int ret = 0;
1105
1106 if (!pp_funcs->get_sclk_od)
1107 return 0;
1108
1109 mutex_lock(&adev->pm.mutex);
1110 ret = pp_funcs->get_sclk_od(adev->powerplay.pp_handle);
1111 mutex_unlock(&adev->pm.mutex);
1112
1113 return ret;
1114}
1115
1116int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
1117{
1118 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1119
1120 if (is_support_sw_smu(adev))
1121 return 0;
1122
1123 mutex_lock(&adev->pm.mutex);
1124 if (pp_funcs->set_sclk_od)
1125 pp_funcs->set_sclk_od(adev->powerplay.pp_handle, value);
1126 mutex_unlock(&adev->pm.mutex);
1127
1128 if (amdgpu_dpm_dispatch_task(adev,
1129 AMD_PP_TASK_READJUST_POWER_STATE,
1130 NULL) == -EOPNOTSUPP) {
1131 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1132 amdgpu_dpm_compute_clocks(adev);
1133 }
1134
1135 return 0;
1136}
1137
1138int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev)
1139{
1140 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1141 int ret = 0;
1142
1143 if (!pp_funcs->get_mclk_od)
1144 return 0;
1145
1146 mutex_lock(&adev->pm.mutex);
1147 ret = pp_funcs->get_mclk_od(adev->powerplay.pp_handle);
1148 mutex_unlock(&adev->pm.mutex);
1149
1150 return ret;
1151}
1152
1153int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
1154{
1155 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1156
1157 if (is_support_sw_smu(adev))
1158 return 0;
1159
1160 mutex_lock(&adev->pm.mutex);
1161 if (pp_funcs->set_mclk_od)
1162 pp_funcs->set_mclk_od(adev->powerplay.pp_handle, value);
1163 mutex_unlock(&adev->pm.mutex);
1164
1165 if (amdgpu_dpm_dispatch_task(adev,
1166 AMD_PP_TASK_READJUST_POWER_STATE,
1167 NULL) == -EOPNOTSUPP) {
1168 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1169 amdgpu_dpm_compute_clocks(adev);
1170 }
1171
1172 return 0;
1173}
1174
1175int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev,
1176 char *buf)
1177{
1178 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1179 int ret = 0;
1180
1181 if (!pp_funcs->get_power_profile_mode)
1182 return -EOPNOTSUPP;
1183
1184 mutex_lock(&adev->pm.mutex);
1185 ret = pp_funcs->get_power_profile_mode(adev->powerplay.pp_handle,
1186 buf);
1187 mutex_unlock(&adev->pm.mutex);
1188
1189 return ret;
1190}
1191
1192int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev,
1193 long *input, uint32_t size)
1194{
1195 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1196 int ret = 0;
1197
1198 if (!pp_funcs->set_power_profile_mode)
1199 return 0;
1200
1201 mutex_lock(&adev->pm.mutex);
1202 ret = pp_funcs->set_power_profile_mode(adev->powerplay.pp_handle,
1203 input,
1204 size);
1205 mutex_unlock(&adev->pm.mutex);
1206
1207 return ret;
1208}
1209
1210int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table)
1211{
1212 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1213 int ret = 0;
1214
1215 if (!pp_funcs->get_gpu_metrics)
1216 return 0;
1217
1218 mutex_lock(&adev->pm.mutex);
1219 ret = pp_funcs->get_gpu_metrics(adev->powerplay.pp_handle,
1220 table);
1221 mutex_unlock(&adev->pm.mutex);
1222
1223 return ret;
1224}
1225
1226int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev,
1227 uint32_t *fan_mode)
1228{
1229 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1230 int ret = 0;
1231
1232 if (!pp_funcs->get_fan_control_mode)
1233 return -EOPNOTSUPP;
1234
1235 mutex_lock(&adev->pm.mutex);
1236 ret = pp_funcs->get_fan_control_mode(adev->powerplay.pp_handle,
1237 fan_mode);
1238 mutex_unlock(&adev->pm.mutex);
1239
1240 return ret;
1241}
1242
1243int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev,
1244 uint32_t speed)
1245{
1246 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1247 int ret = 0;
1248
1249 if (!pp_funcs->set_fan_speed_pwm)
1250 return -EOPNOTSUPP;
1251
1252 mutex_lock(&adev->pm.mutex);
1253 ret = pp_funcs->set_fan_speed_pwm(adev->powerplay.pp_handle,
1254 speed);
1255 mutex_unlock(&adev->pm.mutex);
1256
1257 return ret;
1258}
1259
1260int amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device *adev,
1261 uint32_t *speed)
1262{
1263 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1264 int ret = 0;
1265
1266 if (!pp_funcs->get_fan_speed_pwm)
1267 return -EOPNOTSUPP;
1268
1269 mutex_lock(&adev->pm.mutex);
1270 ret = pp_funcs->get_fan_speed_pwm(adev->powerplay.pp_handle,
1271 speed);
1272 mutex_unlock(&adev->pm.mutex);
1273
1274 return ret;
1275}
1276
1277int amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device *adev,
1278 uint32_t *speed)
1279{
1280 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1281 int ret = 0;
1282
1283 if (!pp_funcs->get_fan_speed_rpm)
1284 return -EOPNOTSUPP;
1285
1286 mutex_lock(&adev->pm.mutex);
1287 ret = pp_funcs->get_fan_speed_rpm(adev->powerplay.pp_handle,
1288 speed);
1289 mutex_unlock(&adev->pm.mutex);
1290
1291 return ret;
1292}
1293
1294int amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device *adev,
1295 uint32_t speed)
1296{
1297 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1298 int ret = 0;
1299
1300 if (!pp_funcs->set_fan_speed_rpm)
1301 return -EOPNOTSUPP;
1302
1303 mutex_lock(&adev->pm.mutex);
1304 ret = pp_funcs->set_fan_speed_rpm(adev->powerplay.pp_handle,
1305 speed);
1306 mutex_unlock(&adev->pm.mutex);
1307
1308 return ret;
1309}
1310
1311int amdgpu_dpm_set_fan_control_mode(struct amdgpu_device *adev,
1312 uint32_t mode)
1313{
1314 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1315 int ret = 0;
1316
1317 if (!pp_funcs->set_fan_control_mode)
1318 return -EOPNOTSUPP;
1319
1320 mutex_lock(&adev->pm.mutex);
1321 ret = pp_funcs->set_fan_control_mode(adev->powerplay.pp_handle,
1322 mode);
1323 mutex_unlock(&adev->pm.mutex);
1324
1325 return ret;
1326}
1327
1328int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev,
1329 uint32_t *limit,
1330 enum pp_power_limit_level pp_limit_level,
1331 enum pp_power_type power_type)
1332{
1333 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1334 int ret = 0;
1335
1336 if (!pp_funcs->get_power_limit)
1337 return -ENODATA;
1338
1339 mutex_lock(&adev->pm.mutex);
1340 ret = pp_funcs->get_power_limit(adev->powerplay.pp_handle,
1341 limit,
1342 pp_limit_level,
1343 power_type);
1344 mutex_unlock(&adev->pm.mutex);
1345
1346 return ret;
1347}
1348
1349int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev,
1350 uint32_t limit)
1351{
1352 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1353 int ret = 0;
1354
1355 if (!pp_funcs->set_power_limit)
1356 return -EINVAL;
1357
1358 mutex_lock(&adev->pm.mutex);
1359 ret = pp_funcs->set_power_limit(adev->powerplay.pp_handle,
1360 limit);
1361 mutex_unlock(&adev->pm.mutex);
1362
1363 return ret;
1364}
1365
1366int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev)
1367{
1368 bool cclk_dpm_supported = false;
1369
1370 if (!is_support_sw_smu(adev))
1371 return false;
1372
1373 mutex_lock(&adev->pm.mutex);
1374 cclk_dpm_supported = is_support_cclk_dpm(adev);
1375 mutex_unlock(&adev->pm.mutex);
1376
1377 return (int)cclk_dpm_supported;
1378}
1379
1380int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
1381 struct seq_file *m)
1382{
1383 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1384
1385 if (!pp_funcs->debugfs_print_current_performance_level)
1386 return -EOPNOTSUPP;
1387
1388 mutex_lock(&adev->pm.mutex);
1389 pp_funcs->debugfs_print_current_performance_level(adev->powerplay.pp_handle,
1390 m);
1391 mutex_unlock(&adev->pm.mutex);
1392
1393 return 0;
1394}
1395
1396int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev,
1397 void **addr,
1398 size_t *size)
1399{
1400 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1401 int ret = 0;
1402
1403 if (!pp_funcs->get_smu_prv_buf_details)
1404 return -ENOSYS;
1405
1406 mutex_lock(&adev->pm.mutex);
1407 ret = pp_funcs->get_smu_prv_buf_details(adev->powerplay.pp_handle,
1408 addr,
1409 size);
1410 mutex_unlock(&adev->pm.mutex);
1411
1412 return ret;
1413}
1414
1415int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev)
1416{
1417 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
1418 struct smu_context *smu = adev->powerplay.pp_handle;
1419
1420 if ((is_support_sw_smu(adev) && smu->od_enabled) ||
1421 (is_support_sw_smu(adev) && smu->is_apu) ||
1422 (!is_support_sw_smu(adev) && hwmgr->od_enabled))
1423 return true;
1424
1425 return false;
1426}
1427
1428int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev,
1429 const char *buf,
1430 size_t size)
1431{
1432 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1433 int ret = 0;
1434
1435 if (!pp_funcs->set_pp_table)
1436 return -EOPNOTSUPP;
1437
1438 mutex_lock(&adev->pm.mutex);
1439 ret = pp_funcs->set_pp_table(adev->powerplay.pp_handle,
1440 buf,
1441 size);
1442 mutex_unlock(&adev->pm.mutex);
1443
1444 return ret;
1445}
1446
1447int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev)
1448{
1449 struct smu_context *smu = adev->powerplay.pp_handle;
1450
1451 if (!is_support_sw_smu(adev))
1452 return INT_MAX;
1453
1454 return smu->cpu_core_num;
1455}
1456
1457void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev)
1458{
1459 if (!is_support_sw_smu(adev))
1460 return;
1461
1462 amdgpu_smu_stb_debug_fs_init(adev);
1463}
1464
1465int amdgpu_dpm_display_configuration_change(struct amdgpu_device *adev,
1466 const struct amd_pp_display_configuration *input)
1467{
1468 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1469 int ret = 0;
1470
1471 if (!pp_funcs->display_configuration_change)
1472 return 0;
1473
1474 mutex_lock(&adev->pm.mutex);
1475 ret = pp_funcs->display_configuration_change(adev->powerplay.pp_handle,
1476 input);
1477 mutex_unlock(&adev->pm.mutex);
1478
1479 return ret;
1480}
1481
1482int amdgpu_dpm_get_clock_by_type(struct amdgpu_device *adev,
1483 enum amd_pp_clock_type type,
1484 struct amd_pp_clocks *clocks)
1485{
1486 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1487 int ret = 0;
1488
1489 if (!pp_funcs->get_clock_by_type)
1490 return 0;
1491
1492 mutex_lock(&adev->pm.mutex);
1493 ret = pp_funcs->get_clock_by_type(adev->powerplay.pp_handle,
1494 type,
1495 clocks);
1496 mutex_unlock(&adev->pm.mutex);
1497
1498 return ret;
1499}
1500
1501int amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device *adev,
1502 struct amd_pp_simple_clock_info *clocks)
1503{
1504 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1505 int ret = 0;
1506
1507 if (!pp_funcs->get_display_mode_validation_clocks)
1508 return 0;
1509
1510 mutex_lock(&adev->pm.mutex);
1511 ret = pp_funcs->get_display_mode_validation_clocks(adev->powerplay.pp_handle,
1512 clocks);
1513 mutex_unlock(&adev->pm.mutex);
1514
1515 return ret;
1516}
1517
1518int amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device *adev,
1519 enum amd_pp_clock_type type,
1520 struct pp_clock_levels_with_latency *clocks)
1521{
1522 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1523 int ret = 0;
1524
1525 if (!pp_funcs->get_clock_by_type_with_latency)
1526 return 0;
1527
1528 mutex_lock(&adev->pm.mutex);
1529 ret = pp_funcs->get_clock_by_type_with_latency(adev->powerplay.pp_handle,
1530 type,
1531 clocks);
1532 mutex_unlock(&adev->pm.mutex);
1533
1534 return ret;
1535}
1536
1537int amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device *adev,
1538 enum amd_pp_clock_type type,
1539 struct pp_clock_levels_with_voltage *clocks)
1540{
1541 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1542 int ret = 0;
1543
1544 if (!pp_funcs->get_clock_by_type_with_voltage)
1545 return 0;
1546
1547 mutex_lock(&adev->pm.mutex);
1548 ret = pp_funcs->get_clock_by_type_with_voltage(adev->powerplay.pp_handle,
1549 type,
1550 clocks);
1551 mutex_unlock(&adev->pm.mutex);
1552
1553 return ret;
1554}
1555
1556int amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device *adev,
1557 void *clock_ranges)
1558{
1559 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1560 int ret = 0;
1561
1562 if (!pp_funcs->set_watermarks_for_clocks_ranges)
1563 return -EOPNOTSUPP;
1564
1565 mutex_lock(&adev->pm.mutex);
1566 ret = pp_funcs->set_watermarks_for_clocks_ranges(adev->powerplay.pp_handle,
1567 clock_ranges);
1568 mutex_unlock(&adev->pm.mutex);
1569
1570 return ret;
1571}
1572
1573int amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device *adev,
1574 struct pp_display_clock_request *clock)
1575{
1576 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1577 int ret = 0;
1578
1579 if (!pp_funcs->display_clock_voltage_request)
1580 return -EOPNOTSUPP;
1581
1582 mutex_lock(&adev->pm.mutex);
1583 ret = pp_funcs->display_clock_voltage_request(adev->powerplay.pp_handle,
1584 clock);
1585 mutex_unlock(&adev->pm.mutex);
1586
1587 return ret;
1588}
1589
1590int amdgpu_dpm_get_current_clocks(struct amdgpu_device *adev,
1591 struct amd_pp_clock_info *clocks)
1592{
1593 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1594 int ret = 0;
1595
1596 if (!pp_funcs->get_current_clocks)
1597 return -EOPNOTSUPP;
1598
1599 mutex_lock(&adev->pm.mutex);
1600 ret = pp_funcs->get_current_clocks(adev->powerplay.pp_handle,
1601 clocks);
1602 mutex_unlock(&adev->pm.mutex);
1603
1604 return ret;
1605}
1606
1607void amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device *adev)
1608{
1609 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1610
1611 if (!pp_funcs->notify_smu_enable_pwe)
1612 return;
1613
1614 mutex_lock(&adev->pm.mutex);
1615 pp_funcs->notify_smu_enable_pwe(adev->powerplay.pp_handle);
1616 mutex_unlock(&adev->pm.mutex);
1617}
1618
1619int amdgpu_dpm_set_active_display_count(struct amdgpu_device *adev,
1620 uint32_t count)
1621{
1622 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1623 int ret = 0;
1624
1625 if (!pp_funcs->set_active_display_count)
1626 return -EOPNOTSUPP;
1627
1628 mutex_lock(&adev->pm.mutex);
1629 ret = pp_funcs->set_active_display_count(adev->powerplay.pp_handle,
1630 count);
1631 mutex_unlock(&adev->pm.mutex);
1632
1633 return ret;
1634}
1635
1636int amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device *adev,
1637 uint32_t clock)
1638{
1639 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1640 int ret = 0;
1641
1642 if (!pp_funcs->set_min_deep_sleep_dcefclk)
1643 return -EOPNOTSUPP;
1644
1645 mutex_lock(&adev->pm.mutex);
1646 ret = pp_funcs->set_min_deep_sleep_dcefclk(adev->powerplay.pp_handle,
1647 clock);
1648 mutex_unlock(&adev->pm.mutex);
1649
1650 return ret;
1651}
1652
1653void amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device *adev,
1654 uint32_t clock)
1655{
1656 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1657
1658 if (!pp_funcs->set_hard_min_dcefclk_by_freq)
1659 return;
1660
1661 mutex_lock(&adev->pm.mutex);
1662 pp_funcs->set_hard_min_dcefclk_by_freq(adev->powerplay.pp_handle,
1663 clock);
1664 mutex_unlock(&adev->pm.mutex);
1665}
1666
1667void amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device *adev,
1668 uint32_t clock)
1669{
1670 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1671
1672 if (!pp_funcs->set_hard_min_fclk_by_freq)
1673 return;
1674
1675 mutex_lock(&adev->pm.mutex);
1676 pp_funcs->set_hard_min_fclk_by_freq(adev->powerplay.pp_handle,
1677 clock);
1678 mutex_unlock(&adev->pm.mutex);
1679}
1680
1681int amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device *adev,
1682 bool disable_memory_clock_switch)
1683{
1684 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1685 int ret = 0;
1686
1687 if (!pp_funcs->display_disable_memory_clock_switch)
1688 return 0;
1689
1690 mutex_lock(&adev->pm.mutex);
1691 ret = pp_funcs->display_disable_memory_clock_switch(adev->powerplay.pp_handle,
1692 disable_memory_clock_switch);
1693 mutex_unlock(&adev->pm.mutex);
1694
1695 return ret;
1696}
1697
1698int amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device *adev,
1699 struct pp_smu_nv_clock_table *max_clocks)
1700{
1701 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1702 int ret = 0;
1703
1704 if (!pp_funcs->get_max_sustainable_clocks_by_dc)
1705 return -EOPNOTSUPP;
1706
1707 mutex_lock(&adev->pm.mutex);
1708 ret = pp_funcs->get_max_sustainable_clocks_by_dc(adev->powerplay.pp_handle,
1709 max_clocks);
1710 mutex_unlock(&adev->pm.mutex);
1711
1712 return ret;
1713}
1714
1715enum pp_smu_status amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev,
1716 unsigned int *clock_values_in_khz,
1717 unsigned int *num_states)
1718{
1719 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1720 int ret = 0;
1721
1722 if (!pp_funcs->get_uclk_dpm_states)
1723 return -EOPNOTSUPP;
1724
1725 mutex_lock(&adev->pm.mutex);
1726 ret = pp_funcs->get_uclk_dpm_states(adev->powerplay.pp_handle,
1727 clock_values_in_khz,
1728 num_states);
1729 mutex_unlock(&adev->pm.mutex);
1730
1731 return ret;
1732}
1733
1734int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev,
1735 struct dpm_clocks *clock_table)
1736{
1737 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1738 int ret = 0;
1739
1740 if (!pp_funcs->get_dpm_clock_table)
1741 return -EOPNOTSUPP;
1742
1743 mutex_lock(&adev->pm.mutex);
1744 ret = pp_funcs->get_dpm_clock_table(adev->powerplay.pp_handle,
1745 clock_table);
1746 mutex_unlock(&adev->pm.mutex);
1747
1748 return ret;
1749}