Loading...
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <linux/types.h>
24#include <linux/kernel.h>
25#include <linux/gfp.h>
26#include <linux/slab.h>
27#include "amd_shared.h"
28#include "amd_powerplay.h"
29#include "pp_instance.h"
30#include "power_state.h"
31#include "eventmanager.h"
32#include "pp_debug.h"
33
34#define PP_CHECK(handle) \
35 do { \
36 if ((handle) == NULL || (handle)->pp_valid != PP_VALID) \
37 return -EINVAL; \
38 } while (0)
39
40static int pp_early_init(void *handle)
41{
42 return 0;
43}
44
45static int pp_sw_init(void *handle)
46{
47 struct pp_instance *pp_handle;
48 struct pp_hwmgr *hwmgr;
49 int ret = 0;
50
51 if (handle == NULL)
52 return -EINVAL;
53
54 pp_handle = (struct pp_instance *)handle;
55 hwmgr = pp_handle->hwmgr;
56
57 if (hwmgr == NULL || hwmgr->pptable_func == NULL ||
58 hwmgr->hwmgr_func == NULL ||
59 hwmgr->pptable_func->pptable_init == NULL ||
60 hwmgr->hwmgr_func->backend_init == NULL)
61 return -EINVAL;
62
63 ret = hwmgr->pptable_func->pptable_init(hwmgr);
64
65 if (ret == 0)
66 ret = hwmgr->hwmgr_func->backend_init(hwmgr);
67
68 if (ret)
69 printk("amdgpu: powerplay initialization failed\n");
70 else
71 printk("amdgpu: powerplay initialized\n");
72
73 return ret;
74}
75
76static int pp_sw_fini(void *handle)
77{
78 struct pp_instance *pp_handle;
79 struct pp_hwmgr *hwmgr;
80 int ret = 0;
81
82 if (handle == NULL)
83 return -EINVAL;
84
85 pp_handle = (struct pp_instance *)handle;
86 hwmgr = pp_handle->hwmgr;
87
88 if (hwmgr != NULL || hwmgr->hwmgr_func != NULL ||
89 hwmgr->hwmgr_func->backend_fini != NULL)
90 ret = hwmgr->hwmgr_func->backend_fini(hwmgr);
91
92 return ret;
93}
94
95static int pp_hw_init(void *handle)
96{
97 struct pp_instance *pp_handle;
98 struct pp_smumgr *smumgr;
99 struct pp_eventmgr *eventmgr;
100 int ret = 0;
101
102 if (handle == NULL)
103 return -EINVAL;
104
105 pp_handle = (struct pp_instance *)handle;
106 smumgr = pp_handle->smu_mgr;
107
108 if (smumgr == NULL || smumgr->smumgr_funcs == NULL ||
109 smumgr->smumgr_funcs->smu_init == NULL ||
110 smumgr->smumgr_funcs->start_smu == NULL)
111 return -EINVAL;
112
113 ret = smumgr->smumgr_funcs->smu_init(smumgr);
114 if (ret) {
115 printk(KERN_ERR "[ powerplay ] smc initialization failed\n");
116 return ret;
117 }
118
119 ret = smumgr->smumgr_funcs->start_smu(smumgr);
120 if (ret) {
121 printk(KERN_ERR "[ powerplay ] smc start failed\n");
122 smumgr->smumgr_funcs->smu_fini(smumgr);
123 return ret;
124 }
125
126 hw_init_power_state_table(pp_handle->hwmgr);
127 eventmgr = pp_handle->eventmgr;
128
129 if (eventmgr == NULL || eventmgr->pp_eventmgr_init == NULL)
130 return -EINVAL;
131
132 ret = eventmgr->pp_eventmgr_init(eventmgr);
133 return 0;
134}
135
136static int pp_hw_fini(void *handle)
137{
138 struct pp_instance *pp_handle;
139 struct pp_smumgr *smumgr;
140 struct pp_eventmgr *eventmgr;
141
142 if (handle == NULL)
143 return -EINVAL;
144
145 pp_handle = (struct pp_instance *)handle;
146 eventmgr = pp_handle->eventmgr;
147
148 if (eventmgr != NULL || eventmgr->pp_eventmgr_fini != NULL)
149 eventmgr->pp_eventmgr_fini(eventmgr);
150
151 smumgr = pp_handle->smu_mgr;
152
153 if (smumgr != NULL || smumgr->smumgr_funcs != NULL ||
154 smumgr->smumgr_funcs->smu_fini != NULL)
155 smumgr->smumgr_funcs->smu_fini(smumgr);
156
157 return 0;
158}
159
160static bool pp_is_idle(void *handle)
161{
162 return 0;
163}
164
165static int pp_wait_for_idle(void *handle)
166{
167 return 0;
168}
169
170static int pp_sw_reset(void *handle)
171{
172 return 0;
173}
174
175static void pp_print_status(void *handle)
176{
177
178}
179
180static int pp_set_clockgating_state(void *handle,
181 enum amd_clockgating_state state)
182{
183 return 0;
184}
185
186static int pp_set_powergating_state(void *handle,
187 enum amd_powergating_state state)
188{
189 return 0;
190}
191
192static int pp_suspend(void *handle)
193{
194 struct pp_instance *pp_handle;
195 struct pp_eventmgr *eventmgr;
196 struct pem_event_data event_data = { {0} };
197
198 if (handle == NULL)
199 return -EINVAL;
200
201 pp_handle = (struct pp_instance *)handle;
202 eventmgr = pp_handle->eventmgr;
203 pem_handle_event(eventmgr, AMD_PP_EVENT_SUSPEND, &event_data);
204 return 0;
205}
206
207static int pp_resume(void *handle)
208{
209 struct pp_instance *pp_handle;
210 struct pp_eventmgr *eventmgr;
211 struct pem_event_data event_data = { {0} };
212 struct pp_smumgr *smumgr;
213 int ret;
214
215 if (handle == NULL)
216 return -EINVAL;
217
218 pp_handle = (struct pp_instance *)handle;
219 smumgr = pp_handle->smu_mgr;
220
221 if (smumgr == NULL || smumgr->smumgr_funcs == NULL ||
222 smumgr->smumgr_funcs->start_smu == NULL)
223 return -EINVAL;
224
225 ret = smumgr->smumgr_funcs->start_smu(smumgr);
226 if (ret) {
227 printk(KERN_ERR "[ powerplay ] smc start failed\n");
228 smumgr->smumgr_funcs->smu_fini(smumgr);
229 return ret;
230 }
231
232 eventmgr = pp_handle->eventmgr;
233 pem_handle_event(eventmgr, AMD_PP_EVENT_RESUME, &event_data);
234
235 return 0;
236}
237
238const struct amd_ip_funcs pp_ip_funcs = {
239 .early_init = pp_early_init,
240 .late_init = NULL,
241 .sw_init = pp_sw_init,
242 .sw_fini = pp_sw_fini,
243 .hw_init = pp_hw_init,
244 .hw_fini = pp_hw_fini,
245 .suspend = pp_suspend,
246 .resume = pp_resume,
247 .is_idle = pp_is_idle,
248 .wait_for_idle = pp_wait_for_idle,
249 .soft_reset = pp_sw_reset,
250 .print_status = pp_print_status,
251 .set_clockgating_state = pp_set_clockgating_state,
252 .set_powergating_state = pp_set_powergating_state,
253};
254
255static int pp_dpm_load_fw(void *handle)
256{
257 return 0;
258}
259
260static int pp_dpm_fw_loading_complete(void *handle)
261{
262 return 0;
263}
264
265static int pp_dpm_force_performance_level(void *handle,
266 enum amd_dpm_forced_level level)
267{
268 struct pp_instance *pp_handle;
269 struct pp_hwmgr *hwmgr;
270
271 if (handle == NULL)
272 return -EINVAL;
273
274 pp_handle = (struct pp_instance *)handle;
275
276 hwmgr = pp_handle->hwmgr;
277
278 if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
279 hwmgr->hwmgr_func->force_dpm_level == NULL)
280 return -EINVAL;
281
282 hwmgr->hwmgr_func->force_dpm_level(hwmgr, level);
283
284 return 0;
285}
286
287static enum amd_dpm_forced_level pp_dpm_get_performance_level(
288 void *handle)
289{
290 struct pp_hwmgr *hwmgr;
291
292 if (handle == NULL)
293 return -EINVAL;
294
295 hwmgr = ((struct pp_instance *)handle)->hwmgr;
296
297 if (hwmgr == NULL)
298 return -EINVAL;
299
300 return (((struct pp_instance *)handle)->hwmgr->dpm_level);
301}
302
303static int pp_dpm_get_sclk(void *handle, bool low)
304{
305 struct pp_hwmgr *hwmgr;
306
307 if (handle == NULL)
308 return -EINVAL;
309
310 hwmgr = ((struct pp_instance *)handle)->hwmgr;
311
312 if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
313 hwmgr->hwmgr_func->get_sclk == NULL)
314 return -EINVAL;
315
316 return hwmgr->hwmgr_func->get_sclk(hwmgr, low);
317}
318
319static int pp_dpm_get_mclk(void *handle, bool low)
320{
321 struct pp_hwmgr *hwmgr;
322
323 if (handle == NULL)
324 return -EINVAL;
325
326 hwmgr = ((struct pp_instance *)handle)->hwmgr;
327
328 if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
329 hwmgr->hwmgr_func->get_mclk == NULL)
330 return -EINVAL;
331
332 return hwmgr->hwmgr_func->get_mclk(hwmgr, low);
333}
334
335static int pp_dpm_powergate_vce(void *handle, bool gate)
336{
337 struct pp_hwmgr *hwmgr;
338
339 if (handle == NULL)
340 return -EINVAL;
341
342 hwmgr = ((struct pp_instance *)handle)->hwmgr;
343
344 if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
345 hwmgr->hwmgr_func->powergate_vce == NULL)
346 return -EINVAL;
347
348 return hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
349}
350
351static int pp_dpm_powergate_uvd(void *handle, bool gate)
352{
353 struct pp_hwmgr *hwmgr;
354
355 if (handle == NULL)
356 return -EINVAL;
357
358 hwmgr = ((struct pp_instance *)handle)->hwmgr;
359
360 if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
361 hwmgr->hwmgr_func->powergate_uvd == NULL)
362 return -EINVAL;
363
364 return hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
365}
366
367static enum PP_StateUILabel power_state_convert(enum amd_pm_state_type state)
368{
369 switch (state) {
370 case POWER_STATE_TYPE_BATTERY:
371 return PP_StateUILabel_Battery;
372 case POWER_STATE_TYPE_BALANCED:
373 return PP_StateUILabel_Balanced;
374 case POWER_STATE_TYPE_PERFORMANCE:
375 return PP_StateUILabel_Performance;
376 default:
377 return PP_StateUILabel_None;
378 }
379}
380
381int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id, void *input, void *output)
382{
383 int ret = 0;
384 struct pp_instance *pp_handle;
385 struct pem_event_data data = { {0} };
386
387 pp_handle = (struct pp_instance *)handle;
388
389 if (pp_handle == NULL)
390 return -EINVAL;
391
392 switch (event_id) {
393 case AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE:
394 ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
395 break;
396 case AMD_PP_EVENT_ENABLE_USER_STATE:
397 {
398 enum amd_pm_state_type ps;
399
400 if (input == NULL)
401 return -EINVAL;
402 ps = *(unsigned long *)input;
403
404 data.requested_ui_label = power_state_convert(ps);
405 ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
406 break;
407 }
408 case AMD_PP_EVENT_COMPLETE_INIT:
409 ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
410 break;
411 default:
412 break;
413 }
414 return ret;
415}
416
417enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
418{
419 struct pp_hwmgr *hwmgr;
420 struct pp_power_state *state;
421
422 if (handle == NULL)
423 return -EINVAL;
424
425 hwmgr = ((struct pp_instance *)handle)->hwmgr;
426
427 if (hwmgr == NULL || hwmgr->current_ps == NULL)
428 return -EINVAL;
429
430 state = hwmgr->current_ps;
431
432 switch (state->classification.ui_label) {
433 case PP_StateUILabel_Battery:
434 return POWER_STATE_TYPE_BATTERY;
435 case PP_StateUILabel_Balanced:
436 return POWER_STATE_TYPE_BALANCED;
437 case PP_StateUILabel_Performance:
438 return POWER_STATE_TYPE_PERFORMANCE;
439 default:
440 if (state->classification.flags & PP_StateClassificationFlag_Boot)
441 return POWER_STATE_TYPE_INTERNAL_BOOT;
442 else
443 return POWER_STATE_TYPE_DEFAULT;
444 }
445}
446
447static void
448pp_debugfs_print_current_performance_level(void *handle,
449 struct seq_file *m)
450{
451 struct pp_hwmgr *hwmgr;
452
453 if (handle == NULL)
454 return;
455
456 hwmgr = ((struct pp_instance *)handle)->hwmgr;
457
458 if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
459 hwmgr->hwmgr_func->print_current_perforce_level == NULL)
460 return;
461
462 hwmgr->hwmgr_func->print_current_perforce_level(hwmgr, m);
463}
464
465static int pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
466{
467 struct pp_hwmgr *hwmgr;
468
469 if (handle == NULL)
470 return -EINVAL;
471
472 hwmgr = ((struct pp_instance *)handle)->hwmgr;
473
474 if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
475 hwmgr->hwmgr_func->set_fan_control_mode == NULL)
476 return -EINVAL;
477
478 return hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode);
479}
480
481static int pp_dpm_get_fan_control_mode(void *handle)
482{
483 struct pp_hwmgr *hwmgr;
484
485 if (handle == NULL)
486 return -EINVAL;
487
488 hwmgr = ((struct pp_instance *)handle)->hwmgr;
489
490 if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
491 hwmgr->hwmgr_func->get_fan_control_mode == NULL)
492 return -EINVAL;
493
494 return hwmgr->hwmgr_func->get_fan_control_mode(hwmgr);
495}
496
497static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent)
498{
499 struct pp_hwmgr *hwmgr;
500
501 if (handle == NULL)
502 return -EINVAL;
503
504 hwmgr = ((struct pp_instance *)handle)->hwmgr;
505
506 if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
507 hwmgr->hwmgr_func->set_fan_speed_percent == NULL)
508 return -EINVAL;
509
510 return hwmgr->hwmgr_func->set_fan_speed_percent(hwmgr, percent);
511}
512
513static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed)
514{
515 struct pp_hwmgr *hwmgr;
516
517 if (handle == NULL)
518 return -EINVAL;
519
520 hwmgr = ((struct pp_instance *)handle)->hwmgr;
521
522 if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
523 hwmgr->hwmgr_func->get_fan_speed_percent == NULL)
524 return -EINVAL;
525
526 return hwmgr->hwmgr_func->get_fan_speed_percent(hwmgr, speed);
527}
528
529static int pp_dpm_get_temperature(void *handle)
530{
531 struct pp_hwmgr *hwmgr;
532
533 if (handle == NULL)
534 return -EINVAL;
535
536 hwmgr = ((struct pp_instance *)handle)->hwmgr;
537
538 if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
539 hwmgr->hwmgr_func->get_temperature == NULL)
540 return -EINVAL;
541
542 return hwmgr->hwmgr_func->get_temperature(hwmgr);
543}
544
545static int pp_dpm_get_pp_num_states(void *handle,
546 struct pp_states_info *data)
547{
548 struct pp_hwmgr *hwmgr;
549 int i;
550
551 if (!handle)
552 return -EINVAL;
553
554 hwmgr = ((struct pp_instance *)handle)->hwmgr;
555
556 if (hwmgr == NULL || hwmgr->ps == NULL)
557 return -EINVAL;
558
559 data->nums = hwmgr->num_ps;
560
561 for (i = 0; i < hwmgr->num_ps; i++) {
562 struct pp_power_state *state = (struct pp_power_state *)
563 ((unsigned long)hwmgr->ps + i * hwmgr->ps_size);
564 switch (state->classification.ui_label) {
565 case PP_StateUILabel_Battery:
566 data->states[i] = POWER_STATE_TYPE_BATTERY;
567 break;
568 case PP_StateUILabel_Balanced:
569 data->states[i] = POWER_STATE_TYPE_BALANCED;
570 break;
571 case PP_StateUILabel_Performance:
572 data->states[i] = POWER_STATE_TYPE_PERFORMANCE;
573 break;
574 default:
575 if (state->classification.flags & PP_StateClassificationFlag_Boot)
576 data->states[i] = POWER_STATE_TYPE_INTERNAL_BOOT;
577 else
578 data->states[i] = POWER_STATE_TYPE_DEFAULT;
579 }
580 }
581
582 return 0;
583}
584
585static int pp_dpm_get_pp_table(void *handle, char **table)
586{
587 struct pp_hwmgr *hwmgr;
588
589 if (!handle)
590 return -EINVAL;
591
592 hwmgr = ((struct pp_instance *)handle)->hwmgr;
593
594 if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
595 hwmgr->hwmgr_func->get_pp_table == NULL)
596 return -EINVAL;
597
598 return hwmgr->hwmgr_func->get_pp_table(hwmgr, table);
599}
600
601static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
602{
603 struct pp_hwmgr *hwmgr;
604
605 if (!handle)
606 return -EINVAL;
607
608 hwmgr = ((struct pp_instance *)handle)->hwmgr;
609
610 if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
611 hwmgr->hwmgr_func->set_pp_table == NULL)
612 return -EINVAL;
613
614 return hwmgr->hwmgr_func->set_pp_table(hwmgr, buf, size);
615}
616
617static int pp_dpm_force_clock_level(void *handle,
618 enum pp_clock_type type, int level)
619{
620 struct pp_hwmgr *hwmgr;
621
622 if (!handle)
623 return -EINVAL;
624
625 hwmgr = ((struct pp_instance *)handle)->hwmgr;
626
627 if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
628 hwmgr->hwmgr_func->force_clock_level == NULL)
629 return -EINVAL;
630
631 return hwmgr->hwmgr_func->force_clock_level(hwmgr, type, level);
632}
633
634static int pp_dpm_print_clock_levels(void *handle,
635 enum pp_clock_type type, char *buf)
636{
637 struct pp_hwmgr *hwmgr;
638
639 if (!handle)
640 return -EINVAL;
641
642 hwmgr = ((struct pp_instance *)handle)->hwmgr;
643
644 if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
645 hwmgr->hwmgr_func->print_clock_levels == NULL)
646 return -EINVAL;
647
648 return hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
649}
650
651const struct amd_powerplay_funcs pp_dpm_funcs = {
652 .get_temperature = pp_dpm_get_temperature,
653 .load_firmware = pp_dpm_load_fw,
654 .wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
655 .force_performance_level = pp_dpm_force_performance_level,
656 .get_performance_level = pp_dpm_get_performance_level,
657 .get_current_power_state = pp_dpm_get_current_power_state,
658 .get_sclk = pp_dpm_get_sclk,
659 .get_mclk = pp_dpm_get_mclk,
660 .powergate_vce = pp_dpm_powergate_vce,
661 .powergate_uvd = pp_dpm_powergate_uvd,
662 .dispatch_tasks = pp_dpm_dispatch_tasks,
663 .print_current_performance_level = pp_debugfs_print_current_performance_level,
664 .set_fan_control_mode = pp_dpm_set_fan_control_mode,
665 .get_fan_control_mode = pp_dpm_get_fan_control_mode,
666 .set_fan_speed_percent = pp_dpm_set_fan_speed_percent,
667 .get_fan_speed_percent = pp_dpm_get_fan_speed_percent,
668 .get_pp_num_states = pp_dpm_get_pp_num_states,
669 .get_pp_table = pp_dpm_get_pp_table,
670 .set_pp_table = pp_dpm_set_pp_table,
671 .force_clock_level = pp_dpm_force_clock_level,
672 .print_clock_levels = pp_dpm_print_clock_levels,
673};
674
675static int amd_pp_instance_init(struct amd_pp_init *pp_init,
676 struct amd_powerplay *amd_pp)
677{
678 int ret;
679 struct pp_instance *handle;
680
681 handle = kzalloc(sizeof(struct pp_instance), GFP_KERNEL);
682 if (handle == NULL)
683 return -ENOMEM;
684
685 handle->pp_valid = PP_VALID;
686
687 ret = smum_init(pp_init, handle);
688 if (ret)
689 goto fail_smum;
690
691 ret = hwmgr_init(pp_init, handle);
692 if (ret)
693 goto fail_hwmgr;
694
695 ret = eventmgr_init(handle);
696 if (ret)
697 goto fail_eventmgr;
698
699 amd_pp->pp_handle = handle;
700 return 0;
701
702fail_eventmgr:
703 hwmgr_fini(handle->hwmgr);
704fail_hwmgr:
705 smum_fini(handle->smu_mgr);
706fail_smum:
707 kfree(handle);
708 return ret;
709}
710
711static int amd_pp_instance_fini(void *handle)
712{
713 struct pp_instance *instance = (struct pp_instance *)handle;
714
715 if (instance == NULL)
716 return -EINVAL;
717
718 eventmgr_fini(instance->eventmgr);
719
720 hwmgr_fini(instance->hwmgr);
721
722 smum_fini(instance->smu_mgr);
723
724 kfree(handle);
725 return 0;
726}
727
728int amd_powerplay_init(struct amd_pp_init *pp_init,
729 struct amd_powerplay *amd_pp)
730{
731 int ret;
732
733 if (pp_init == NULL || amd_pp == NULL)
734 return -EINVAL;
735
736 ret = amd_pp_instance_init(pp_init, amd_pp);
737
738 if (ret)
739 return ret;
740
741 amd_pp->ip_funcs = &pp_ip_funcs;
742 amd_pp->pp_funcs = &pp_dpm_funcs;
743
744 return 0;
745}
746
747int amd_powerplay_fini(void *handle)
748{
749 amd_pp_instance_fini(handle);
750
751 return 0;
752}
753
754/* export this function to DAL */
755
756int amd_powerplay_display_configuration_change(void *handle,
757 const struct amd_pp_display_configuration *display_config)
758{
759 struct pp_hwmgr *hwmgr;
760
761 PP_CHECK((struct pp_instance *)handle);
762
763 hwmgr = ((struct pp_instance *)handle)->hwmgr;
764
765 phm_store_dal_configuration_data(hwmgr, display_config);
766
767 return 0;
768}
769
770int amd_powerplay_get_display_power_level(void *handle,
771 struct amd_pp_simple_clock_info *output)
772{
773 struct pp_hwmgr *hwmgr;
774
775 PP_CHECK((struct pp_instance *)handle);
776
777 if (output == NULL)
778 return -EINVAL;
779
780 hwmgr = ((struct pp_instance *)handle)->hwmgr;
781
782 return phm_get_dal_power_level(hwmgr, output);
783}
784
785int amd_powerplay_get_current_clocks(void *handle,
786 struct amd_pp_clock_info *clocks)
787{
788 struct pp_hwmgr *hwmgr;
789 struct amd_pp_simple_clock_info simple_clocks;
790 struct pp_clock_info hw_clocks;
791
792 PP_CHECK((struct pp_instance *)handle);
793
794 if (clocks == NULL)
795 return -EINVAL;
796
797 hwmgr = ((struct pp_instance *)handle)->hwmgr;
798
799 phm_get_dal_power_level(hwmgr, &simple_clocks);
800
801 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PowerContainment)) {
802 if (0 != phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks, PHM_PerformanceLevelDesignation_PowerContainment))
803 PP_ASSERT_WITH_CODE(0, "Error in PHM_GetPowerContainmentClockInfo", return -1);
804 } else {
805 if (0 != phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks, PHM_PerformanceLevelDesignation_Activity))
806 PP_ASSERT_WITH_CODE(0, "Error in PHM_GetClockInfo", return -1);
807 }
808
809 clocks->min_engine_clock = hw_clocks.min_eng_clk;
810 clocks->max_engine_clock = hw_clocks.max_eng_clk;
811 clocks->min_memory_clock = hw_clocks.min_mem_clk;
812 clocks->max_memory_clock = hw_clocks.max_mem_clk;
813 clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
814 clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
815
816 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
817 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
818
819 clocks->max_clocks_state = simple_clocks.level;
820
821 if (0 == phm_get_current_shallow_sleep_clocks(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks)) {
822 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
823 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
824 }
825
826 return 0;
827
828}
829
830int amd_powerplay_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks)
831{
832 int result = -1;
833
834 struct pp_hwmgr *hwmgr;
835
836 PP_CHECK((struct pp_instance *)handle);
837
838 if (clocks == NULL)
839 return -EINVAL;
840
841 hwmgr = ((struct pp_instance *)handle)->hwmgr;
842
843 result = phm_get_clock_by_type(hwmgr, type, clocks);
844
845 return result;
846}
847
848int amd_powerplay_get_display_mode_validation_clocks(void *handle,
849 struct amd_pp_simple_clock_info *clocks)
850{
851 int result = -1;
852 struct pp_hwmgr *hwmgr;
853
854 PP_CHECK((struct pp_instance *)handle);
855
856 if (clocks == NULL)
857 return -EINVAL;
858
859 hwmgr = ((struct pp_instance *)handle)->hwmgr;
860
861 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))
862 result = phm_get_max_high_clocks(hwmgr, clocks);
863
864 return result;
865}
866
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include "pp_debug.h"
24#include <linux/types.h>
25#include <linux/kernel.h>
26#include <linux/gfp.h>
27#include <linux/slab.h>
28#include "amd_shared.h"
29#include "amd_powerplay.h"
30#include "power_state.h"
31#include "amdgpu.h"
32#include "hwmgr.h"
33
34#define PP_DPM_DISABLED 0xCCCC
35
36static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
37 enum amd_pm_state_type *user_state);
38
39static const struct amd_pm_funcs pp_dpm_funcs;
40
41static inline int pp_check(struct pp_hwmgr *hwmgr)
42{
43 if (hwmgr == NULL || hwmgr->smumgr_funcs == NULL)
44 return -EINVAL;
45
46 if (hwmgr->pm_en == 0 || hwmgr->hwmgr_func == NULL)
47 return PP_DPM_DISABLED;
48
49 return 0;
50}
51
52static int amd_powerplay_create(struct amdgpu_device *adev)
53{
54 struct pp_hwmgr *hwmgr;
55
56 if (adev == NULL)
57 return -EINVAL;
58
59 hwmgr = kzalloc(sizeof(struct pp_hwmgr), GFP_KERNEL);
60 if (hwmgr == NULL)
61 return -ENOMEM;
62
63 hwmgr->adev = adev;
64 hwmgr->pm_en = (amdgpu_dpm != 0 && !amdgpu_sriov_vf(adev)) ? true : false;
65 hwmgr->device = amdgpu_cgs_create_device(adev);
66 mutex_init(&hwmgr->smu_lock);
67 hwmgr->chip_family = adev->family;
68 hwmgr->chip_id = adev->asic_type;
69 hwmgr->feature_mask = amdgpu_pp_feature_mask;
70 adev->powerplay.pp_handle = hwmgr;
71 adev->powerplay.pp_funcs = &pp_dpm_funcs;
72 return 0;
73}
74
75
76static int amd_powerplay_destroy(struct amdgpu_device *adev)
77{
78 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
79
80 kfree(hwmgr->hardcode_pp_table);
81 hwmgr->hardcode_pp_table = NULL;
82
83 kfree(hwmgr);
84 hwmgr = NULL;
85
86 return 0;
87}
88
89static int pp_early_init(void *handle)
90{
91 int ret;
92 struct amdgpu_device *adev = handle;
93
94 ret = amd_powerplay_create(adev);
95
96 if (ret != 0)
97 return ret;
98
99 ret = hwmgr_early_init(adev->powerplay.pp_handle);
100 if (ret)
101 return -EINVAL;
102
103 return 0;
104}
105
106static int pp_sw_init(void *handle)
107{
108 struct amdgpu_device *adev = handle;
109 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
110 int ret = 0;
111
112 ret = pp_check(hwmgr);
113
114 if (ret >= 0) {
115 if (hwmgr->smumgr_funcs->smu_init == NULL)
116 return -EINVAL;
117
118 ret = hwmgr->smumgr_funcs->smu_init(hwmgr);
119
120 phm_register_irq_handlers(hwmgr);
121
122 pr_debug("amdgpu: powerplay sw initialized\n");
123 }
124
125 return ret;
126}
127
128static int pp_sw_fini(void *handle)
129{
130 struct amdgpu_device *adev = handle;
131 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
132 int ret = 0;
133
134 ret = pp_check(hwmgr);
135 if (ret >= 0) {
136 if (hwmgr->smumgr_funcs->smu_fini != NULL)
137 hwmgr->smumgr_funcs->smu_fini(hwmgr);
138 }
139
140 if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
141 amdgpu_ucode_fini_bo(adev);
142
143 return 0;
144}
145
146static int pp_hw_init(void *handle)
147{
148 int ret = 0;
149 struct amdgpu_device *adev = handle;
150 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
151
152 if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
153 amdgpu_ucode_init_bo(adev);
154
155 ret = pp_check(hwmgr);
156
157 if (ret >= 0) {
158 if (hwmgr->smumgr_funcs->start_smu == NULL)
159 return -EINVAL;
160
161 if (hwmgr->smumgr_funcs->start_smu(hwmgr)) {
162 pr_err("smc start failed\n");
163 hwmgr->smumgr_funcs->smu_fini(hwmgr);
164 return -EINVAL;
165 }
166 if (ret == PP_DPM_DISABLED)
167 goto exit;
168 ret = hwmgr_hw_init(hwmgr);
169 if (ret)
170 goto exit;
171 }
172 return ret;
173exit:
174 hwmgr->pm_en = 0;
175 cgs_notify_dpm_enabled(hwmgr->device, false);
176 return 0;
177
178}
179
180static int pp_hw_fini(void *handle)
181{
182 struct amdgpu_device *adev = handle;
183 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
184 int ret = 0;
185
186 ret = pp_check(hwmgr);
187 if (ret == 0)
188 hwmgr_hw_fini(hwmgr);
189
190 return 0;
191}
192
193static int pp_late_init(void *handle)
194{
195 struct amdgpu_device *adev = handle;
196 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
197 int ret = 0;
198
199 ret = pp_check(hwmgr);
200
201 if (ret == 0)
202 pp_dpm_dispatch_tasks(hwmgr,
203 AMD_PP_TASK_COMPLETE_INIT, NULL);
204
205 return 0;
206}
207
208static void pp_late_fini(void *handle)
209{
210 struct amdgpu_device *adev = handle;
211
212 amd_powerplay_destroy(adev);
213}
214
215
216static bool pp_is_idle(void *handle)
217{
218 return false;
219}
220
221static int pp_wait_for_idle(void *handle)
222{
223 return 0;
224}
225
226static int pp_sw_reset(void *handle)
227{
228 return 0;
229}
230
231static int pp_set_powergating_state(void *handle,
232 enum amd_powergating_state state)
233{
234 struct amdgpu_device *adev = handle;
235 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
236 int ret = 0;
237
238 ret = pp_check(hwmgr);
239
240 if (ret)
241 return ret;
242
243 if (hwmgr->hwmgr_func->enable_per_cu_power_gating == NULL) {
244 pr_info("%s was not implemented.\n", __func__);
245 return 0;
246 }
247
248 /* Enable/disable GFX per cu powergating through SMU */
249 return hwmgr->hwmgr_func->enable_per_cu_power_gating(hwmgr,
250 state == AMD_PG_STATE_GATE);
251}
252
253static int pp_suspend(void *handle)
254{
255 struct amdgpu_device *adev = handle;
256 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
257 int ret = 0;
258
259 ret = pp_check(hwmgr);
260 if (ret == 0)
261 hwmgr_hw_suspend(hwmgr);
262 return 0;
263}
264
265static int pp_resume(void *handle)
266{
267 struct amdgpu_device *adev = handle;
268 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
269 int ret;
270
271 ret = pp_check(hwmgr);
272
273 if (ret < 0)
274 return ret;
275
276 if (hwmgr->smumgr_funcs->start_smu == NULL)
277 return -EINVAL;
278
279 if (hwmgr->smumgr_funcs->start_smu(hwmgr)) {
280 pr_err("smc start failed\n");
281 hwmgr->smumgr_funcs->smu_fini(hwmgr);
282 return -EINVAL;
283 }
284
285 if (ret == PP_DPM_DISABLED)
286 return 0;
287
288 return hwmgr_hw_resume(hwmgr);
289}
290
291static int pp_set_clockgating_state(void *handle,
292 enum amd_clockgating_state state)
293{
294 return 0;
295}
296
297static const struct amd_ip_funcs pp_ip_funcs = {
298 .name = "powerplay",
299 .early_init = pp_early_init,
300 .late_init = pp_late_init,
301 .sw_init = pp_sw_init,
302 .sw_fini = pp_sw_fini,
303 .hw_init = pp_hw_init,
304 .hw_fini = pp_hw_fini,
305 .late_fini = pp_late_fini,
306 .suspend = pp_suspend,
307 .resume = pp_resume,
308 .is_idle = pp_is_idle,
309 .wait_for_idle = pp_wait_for_idle,
310 .soft_reset = pp_sw_reset,
311 .set_clockgating_state = pp_set_clockgating_state,
312 .set_powergating_state = pp_set_powergating_state,
313};
314
315const struct amdgpu_ip_block_version pp_smu_ip_block =
316{
317 .type = AMD_IP_BLOCK_TYPE_SMC,
318 .major = 1,
319 .minor = 0,
320 .rev = 0,
321 .funcs = &pp_ip_funcs,
322};
323
324static int pp_dpm_load_fw(void *handle)
325{
326 return 0;
327}
328
329static int pp_dpm_fw_loading_complete(void *handle)
330{
331 return 0;
332}
333
334static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id)
335{
336 struct pp_hwmgr *hwmgr = handle;
337 int ret = 0;
338
339 ret = pp_check(hwmgr);
340
341 if (ret)
342 return ret;
343
344 if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
345 pr_info("%s was not implemented.\n", __func__);
346 return 0;
347 }
348
349 return hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
350}
351
352static void pp_dpm_en_umd_pstate(struct pp_hwmgr *hwmgr,
353 enum amd_dpm_forced_level *level)
354{
355 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
356 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
357 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
358 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
359
360 if (!(hwmgr->dpm_level & profile_mode_mask)) {
361 /* enter umd pstate, save current level, disable gfx cg*/
362 if (*level & profile_mode_mask) {
363 hwmgr->saved_dpm_level = hwmgr->dpm_level;
364 hwmgr->en_umd_pstate = true;
365 cgs_set_clockgating_state(hwmgr->device,
366 AMD_IP_BLOCK_TYPE_GFX,
367 AMD_CG_STATE_UNGATE);
368 cgs_set_powergating_state(hwmgr->device,
369 AMD_IP_BLOCK_TYPE_GFX,
370 AMD_PG_STATE_UNGATE);
371 }
372 } else {
373 /* exit umd pstate, restore level, enable gfx cg*/
374 if (!(*level & profile_mode_mask)) {
375 if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
376 *level = hwmgr->saved_dpm_level;
377 hwmgr->en_umd_pstate = false;
378 cgs_set_clockgating_state(hwmgr->device,
379 AMD_IP_BLOCK_TYPE_GFX,
380 AMD_CG_STATE_GATE);
381 cgs_set_powergating_state(hwmgr->device,
382 AMD_IP_BLOCK_TYPE_GFX,
383 AMD_PG_STATE_GATE);
384 }
385 }
386}
387
388static int pp_dpm_force_performance_level(void *handle,
389 enum amd_dpm_forced_level level)
390{
391 struct pp_hwmgr *hwmgr = handle;
392 int ret = 0;
393
394 ret = pp_check(hwmgr);
395
396 if (ret)
397 return ret;
398
399 if (level == hwmgr->dpm_level)
400 return 0;
401
402 mutex_lock(&hwmgr->smu_lock);
403 pp_dpm_en_umd_pstate(hwmgr, &level);
404 hwmgr->request_dpm_level = level;
405 hwmgr_handle_task(hwmgr, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
406 mutex_unlock(&hwmgr->smu_lock);
407
408 return 0;
409}
410
411static enum amd_dpm_forced_level pp_dpm_get_performance_level(
412 void *handle)
413{
414 struct pp_hwmgr *hwmgr = handle;
415 int ret = 0;
416 enum amd_dpm_forced_level level;
417
418 ret = pp_check(hwmgr);
419
420 if (ret)
421 return ret;
422
423 mutex_lock(&hwmgr->smu_lock);
424 level = hwmgr->dpm_level;
425 mutex_unlock(&hwmgr->smu_lock);
426 return level;
427}
428
429static uint32_t pp_dpm_get_sclk(void *handle, bool low)
430{
431 struct pp_hwmgr *hwmgr = handle;
432 int ret = 0;
433 uint32_t clk = 0;
434
435 ret = pp_check(hwmgr);
436
437 if (ret)
438 return ret;
439
440 if (hwmgr->hwmgr_func->get_sclk == NULL) {
441 pr_info("%s was not implemented.\n", __func__);
442 return 0;
443 }
444 mutex_lock(&hwmgr->smu_lock);
445 clk = hwmgr->hwmgr_func->get_sclk(hwmgr, low);
446 mutex_unlock(&hwmgr->smu_lock);
447 return clk;
448}
449
450static uint32_t pp_dpm_get_mclk(void *handle, bool low)
451{
452 struct pp_hwmgr *hwmgr = handle;
453 int ret = 0;
454 uint32_t clk = 0;
455
456 ret = pp_check(hwmgr);
457
458 if (ret)
459 return ret;
460
461 if (hwmgr->hwmgr_func->get_mclk == NULL) {
462 pr_info("%s was not implemented.\n", __func__);
463 return 0;
464 }
465 mutex_lock(&hwmgr->smu_lock);
466 clk = hwmgr->hwmgr_func->get_mclk(hwmgr, low);
467 mutex_unlock(&hwmgr->smu_lock);
468 return clk;
469}
470
471static void pp_dpm_powergate_vce(void *handle, bool gate)
472{
473 struct pp_hwmgr *hwmgr = handle;
474 int ret = 0;
475
476 ret = pp_check(hwmgr);
477
478 if (ret)
479 return;
480
481 if (hwmgr->hwmgr_func->powergate_vce == NULL) {
482 pr_info("%s was not implemented.\n", __func__);
483 return;
484 }
485 mutex_lock(&hwmgr->smu_lock);
486 hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
487 mutex_unlock(&hwmgr->smu_lock);
488}
489
490static void pp_dpm_powergate_uvd(void *handle, bool gate)
491{
492 struct pp_hwmgr *hwmgr = handle;
493 int ret = 0;
494
495 ret = pp_check(hwmgr);
496
497 if (ret)
498 return;
499
500 if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
501 pr_info("%s was not implemented.\n", __func__);
502 return;
503 }
504 mutex_lock(&hwmgr->smu_lock);
505 hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
506 mutex_unlock(&hwmgr->smu_lock);
507}
508
509static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
510 enum amd_pm_state_type *user_state)
511{
512 int ret = 0;
513 struct pp_hwmgr *hwmgr = handle;
514
515 ret = pp_check(hwmgr);
516
517 if (ret)
518 return ret;
519
520 mutex_lock(&hwmgr->smu_lock);
521 ret = hwmgr_handle_task(hwmgr, task_id, user_state);
522 mutex_unlock(&hwmgr->smu_lock);
523
524 return ret;
525}
526
527static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
528{
529 struct pp_hwmgr *hwmgr = handle;
530 struct pp_power_state *state;
531 int ret = 0;
532 enum amd_pm_state_type pm_type;
533
534 ret = pp_check(hwmgr);
535
536 if (ret)
537 return ret;
538
539 if (hwmgr->current_ps == NULL)
540 return -EINVAL;
541
542 mutex_lock(&hwmgr->smu_lock);
543
544 state = hwmgr->current_ps;
545
546 switch (state->classification.ui_label) {
547 case PP_StateUILabel_Battery:
548 pm_type = POWER_STATE_TYPE_BATTERY;
549 break;
550 case PP_StateUILabel_Balanced:
551 pm_type = POWER_STATE_TYPE_BALANCED;
552 break;
553 case PP_StateUILabel_Performance:
554 pm_type = POWER_STATE_TYPE_PERFORMANCE;
555 break;
556 default:
557 if (state->classification.flags & PP_StateClassificationFlag_Boot)
558 pm_type = POWER_STATE_TYPE_INTERNAL_BOOT;
559 else
560 pm_type = POWER_STATE_TYPE_DEFAULT;
561 break;
562 }
563 mutex_unlock(&hwmgr->smu_lock);
564
565 return pm_type;
566}
567
568static void pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
569{
570 struct pp_hwmgr *hwmgr = handle;
571 int ret = 0;
572
573 ret = pp_check(hwmgr);
574
575 if (ret)
576 return;
577
578 if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) {
579 pr_info("%s was not implemented.\n", __func__);
580 return;
581 }
582 mutex_lock(&hwmgr->smu_lock);
583 hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode);
584 mutex_unlock(&hwmgr->smu_lock);
585}
586
587static uint32_t pp_dpm_get_fan_control_mode(void *handle)
588{
589 struct pp_hwmgr *hwmgr = handle;
590 int ret = 0;
591 uint32_t mode = 0;
592
593 ret = pp_check(hwmgr);
594
595 if (ret)
596 return ret;
597
598 if (hwmgr->hwmgr_func->get_fan_control_mode == NULL) {
599 pr_info("%s was not implemented.\n", __func__);
600 return 0;
601 }
602 mutex_lock(&hwmgr->smu_lock);
603 mode = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr);
604 mutex_unlock(&hwmgr->smu_lock);
605 return mode;
606}
607
608static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent)
609{
610 struct pp_hwmgr *hwmgr = handle;
611 int ret = 0;
612
613 ret = pp_check(hwmgr);
614
615 if (ret)
616 return ret;
617
618 if (hwmgr->hwmgr_func->set_fan_speed_percent == NULL) {
619 pr_info("%s was not implemented.\n", __func__);
620 return 0;
621 }
622 mutex_lock(&hwmgr->smu_lock);
623 ret = hwmgr->hwmgr_func->set_fan_speed_percent(hwmgr, percent);
624 mutex_unlock(&hwmgr->smu_lock);
625 return ret;
626}
627
628static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed)
629{
630 struct pp_hwmgr *hwmgr = handle;
631 int ret = 0;
632
633 ret = pp_check(hwmgr);
634
635 if (ret)
636 return ret;
637
638 if (hwmgr->hwmgr_func->get_fan_speed_percent == NULL) {
639 pr_info("%s was not implemented.\n", __func__);
640 return 0;
641 }
642
643 mutex_lock(&hwmgr->smu_lock);
644 ret = hwmgr->hwmgr_func->get_fan_speed_percent(hwmgr, speed);
645 mutex_unlock(&hwmgr->smu_lock);
646 return ret;
647}
648
649static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
650{
651 struct pp_hwmgr *hwmgr = handle;
652 int ret = 0;
653
654 ret = pp_check(hwmgr);
655
656 if (ret)
657 return ret;
658
659 if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL)
660 return -EINVAL;
661
662 mutex_lock(&hwmgr->smu_lock);
663 ret = hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm);
664 mutex_unlock(&hwmgr->smu_lock);
665 return ret;
666}
667
668static int pp_dpm_get_pp_num_states(void *handle,
669 struct pp_states_info *data)
670{
671 struct pp_hwmgr *hwmgr = handle;
672 int i;
673 int ret = 0;
674
675 memset(data, 0, sizeof(*data));
676
677 ret = pp_check(hwmgr);
678
679 if (ret)
680 return ret;
681
682 if (hwmgr->ps == NULL)
683 return -EINVAL;
684
685 mutex_lock(&hwmgr->smu_lock);
686
687 data->nums = hwmgr->num_ps;
688
689 for (i = 0; i < hwmgr->num_ps; i++) {
690 struct pp_power_state *state = (struct pp_power_state *)
691 ((unsigned long)hwmgr->ps + i * hwmgr->ps_size);
692 switch (state->classification.ui_label) {
693 case PP_StateUILabel_Battery:
694 data->states[i] = POWER_STATE_TYPE_BATTERY;
695 break;
696 case PP_StateUILabel_Balanced:
697 data->states[i] = POWER_STATE_TYPE_BALANCED;
698 break;
699 case PP_StateUILabel_Performance:
700 data->states[i] = POWER_STATE_TYPE_PERFORMANCE;
701 break;
702 default:
703 if (state->classification.flags & PP_StateClassificationFlag_Boot)
704 data->states[i] = POWER_STATE_TYPE_INTERNAL_BOOT;
705 else
706 data->states[i] = POWER_STATE_TYPE_DEFAULT;
707 }
708 }
709 mutex_unlock(&hwmgr->smu_lock);
710 return 0;
711}
712
713static int pp_dpm_get_pp_table(void *handle, char **table)
714{
715 struct pp_hwmgr *hwmgr = handle;
716 int ret = 0;
717 int size = 0;
718
719 ret = pp_check(hwmgr);
720
721 if (ret)
722 return ret;
723
724 if (!hwmgr->soft_pp_table)
725 return -EINVAL;
726
727 mutex_lock(&hwmgr->smu_lock);
728 *table = (char *)hwmgr->soft_pp_table;
729 size = hwmgr->soft_pp_table_size;
730 mutex_unlock(&hwmgr->smu_lock);
731 return size;
732}
733
734static int amd_powerplay_reset(void *handle)
735{
736 struct pp_hwmgr *hwmgr = handle;
737 int ret;
738
739 ret = pp_check(hwmgr);
740 if (ret)
741 return ret;
742
743 ret = hwmgr_hw_fini(hwmgr);
744 if (ret)
745 return ret;
746
747 ret = hwmgr_hw_init(hwmgr);
748 if (ret)
749 return ret;
750
751 return hwmgr_handle_task(hwmgr, AMD_PP_TASK_COMPLETE_INIT, NULL);
752}
753
754static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
755{
756 struct pp_hwmgr *hwmgr = handle;
757 int ret = 0;
758
759 ret = pp_check(hwmgr);
760
761 if (ret)
762 return ret;
763
764 mutex_lock(&hwmgr->smu_lock);
765 if (!hwmgr->hardcode_pp_table) {
766 hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table,
767 hwmgr->soft_pp_table_size,
768 GFP_KERNEL);
769 if (!hwmgr->hardcode_pp_table) {
770 mutex_unlock(&hwmgr->smu_lock);
771 return -ENOMEM;
772 }
773 }
774
775 memcpy(hwmgr->hardcode_pp_table, buf, size);
776
777 hwmgr->soft_pp_table = hwmgr->hardcode_pp_table;
778 mutex_unlock(&hwmgr->smu_lock);
779
780 ret = amd_powerplay_reset(handle);
781 if (ret)
782 return ret;
783
784 if (hwmgr->hwmgr_func->avfs_control) {
785 ret = hwmgr->hwmgr_func->avfs_control(hwmgr, false);
786 if (ret)
787 return ret;
788 }
789
790 return 0;
791}
792
793static int pp_dpm_force_clock_level(void *handle,
794 enum pp_clock_type type, uint32_t mask)
795{
796 struct pp_hwmgr *hwmgr = handle;
797 int ret = 0;
798
799 ret = pp_check(hwmgr);
800
801 if (ret)
802 return ret;
803
804 if (hwmgr->hwmgr_func->force_clock_level == NULL) {
805 pr_info("%s was not implemented.\n", __func__);
806 return 0;
807 }
808 mutex_lock(&hwmgr->smu_lock);
809 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)
810 ret = hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask);
811 else
812 ret = -EINVAL;
813 mutex_unlock(&hwmgr->smu_lock);
814 return ret;
815}
816
817static int pp_dpm_print_clock_levels(void *handle,
818 enum pp_clock_type type, char *buf)
819{
820 struct pp_hwmgr *hwmgr = handle;
821 int ret = 0;
822
823 ret = pp_check(hwmgr);
824
825 if (ret)
826 return ret;
827
828 if (hwmgr->hwmgr_func->print_clock_levels == NULL) {
829 pr_info("%s was not implemented.\n", __func__);
830 return 0;
831 }
832 mutex_lock(&hwmgr->smu_lock);
833 ret = hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
834 mutex_unlock(&hwmgr->smu_lock);
835 return ret;
836}
837
838static int pp_dpm_get_sclk_od(void *handle)
839{
840 struct pp_hwmgr *hwmgr = handle;
841 int ret = 0;
842
843 ret = pp_check(hwmgr);
844
845 if (ret)
846 return ret;
847
848 if (hwmgr->hwmgr_func->get_sclk_od == NULL) {
849 pr_info("%s was not implemented.\n", __func__);
850 return 0;
851 }
852 mutex_lock(&hwmgr->smu_lock);
853 ret = hwmgr->hwmgr_func->get_sclk_od(hwmgr);
854 mutex_unlock(&hwmgr->smu_lock);
855 return ret;
856}
857
858static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
859{
860 struct pp_hwmgr *hwmgr = handle;
861 int ret = 0;
862
863 ret = pp_check(hwmgr);
864
865 if (ret)
866 return ret;
867
868 if (hwmgr->hwmgr_func->set_sclk_od == NULL) {
869 pr_info("%s was not implemented.\n", __func__);
870 return 0;
871 }
872
873 mutex_lock(&hwmgr->smu_lock);
874 ret = hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
875 mutex_unlock(&hwmgr->smu_lock);
876 return ret;
877}
878
879static int pp_dpm_get_mclk_od(void *handle)
880{
881 struct pp_hwmgr *hwmgr = handle;
882 int ret = 0;
883
884 ret = pp_check(hwmgr);
885
886 if (ret)
887 return ret;
888
889 if (hwmgr->hwmgr_func->get_mclk_od == NULL) {
890 pr_info("%s was not implemented.\n", __func__);
891 return 0;
892 }
893 mutex_lock(&hwmgr->smu_lock);
894 ret = hwmgr->hwmgr_func->get_mclk_od(hwmgr);
895 mutex_unlock(&hwmgr->smu_lock);
896 return ret;
897}
898
899static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
900{
901 struct pp_hwmgr *hwmgr = handle;
902 int ret = 0;
903
904 ret = pp_check(hwmgr);
905
906 if (ret)
907 return ret;
908
909 if (hwmgr->hwmgr_func->set_mclk_od == NULL) {
910 pr_info("%s was not implemented.\n", __func__);
911 return 0;
912 }
913 mutex_lock(&hwmgr->smu_lock);
914 ret = hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
915 mutex_unlock(&hwmgr->smu_lock);
916 return ret;
917}
918
919static int pp_dpm_read_sensor(void *handle, int idx,
920 void *value, int *size)
921{
922 struct pp_hwmgr *hwmgr = handle;
923 int ret = 0;
924
925 ret = pp_check(hwmgr);
926 if (ret)
927 return ret;
928
929 if (value == NULL)
930 return -EINVAL;
931
932 switch (idx) {
933 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
934 *((uint32_t *)value) = hwmgr->pstate_sclk;
935 return 0;
936 case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
937 *((uint32_t *)value) = hwmgr->pstate_mclk;
938 return 0;
939 default:
940 mutex_lock(&hwmgr->smu_lock);
941 ret = hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value, size);
942 mutex_unlock(&hwmgr->smu_lock);
943 return ret;
944 }
945}
946
947static struct amd_vce_state*
948pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
949{
950 struct pp_hwmgr *hwmgr = handle;
951 int ret = 0;
952
953 ret = pp_check(hwmgr);
954
955 if (ret)
956 return NULL;
957
958 if (hwmgr && idx < hwmgr->num_vce_state_tables)
959 return &hwmgr->vce_states[idx];
960 return NULL;
961}
962
963static int pp_get_power_profile_mode(void *handle, char *buf)
964{
965 struct pp_hwmgr *hwmgr = handle;
966
967 if (!buf || pp_check(hwmgr))
968 return -EINVAL;
969
970 if (hwmgr->hwmgr_func->get_power_profile_mode == NULL) {
971 pr_info("%s was not implemented.\n", __func__);
972 return snprintf(buf, PAGE_SIZE, "\n");
973 }
974
975 return hwmgr->hwmgr_func->get_power_profile_mode(hwmgr, buf);
976}
977
978static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
979{
980 struct pp_hwmgr *hwmgr = handle;
981 int ret = -EINVAL;
982
983 if (pp_check(hwmgr))
984 return -EINVAL;
985
986 if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
987 pr_info("%s was not implemented.\n", __func__);
988 return -EINVAL;
989 }
990 mutex_lock(&hwmgr->smu_lock);
991 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)
992 ret = hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, input, size);
993 mutex_unlock(&hwmgr->smu_lock);
994 return ret;
995}
996
997static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint32_t size)
998{
999 struct pp_hwmgr *hwmgr = handle;
1000
1001 if (pp_check(hwmgr))
1002 return -EINVAL;
1003
1004 if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) {
1005 pr_info("%s was not implemented.\n", __func__);
1006 return -EINVAL;
1007 }
1008
1009 return hwmgr->hwmgr_func->odn_edit_dpm_table(hwmgr, type, input, size);
1010}
1011
1012static int pp_dpm_switch_power_profile(void *handle,
1013 enum PP_SMC_POWER_PROFILE type, bool en)
1014{
1015 struct pp_hwmgr *hwmgr = handle;
1016 long workload;
1017 uint32_t index;
1018
1019 if (pp_check(hwmgr))
1020 return -EINVAL;
1021
1022 if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
1023 pr_info("%s was not implemented.\n", __func__);
1024 return -EINVAL;
1025 }
1026
1027 if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
1028 return -EINVAL;
1029
1030 mutex_lock(&hwmgr->smu_lock);
1031
1032 if (!en) {
1033 hwmgr->workload_mask &= ~(1 << hwmgr->workload_prority[type]);
1034 index = fls(hwmgr->workload_mask);
1035 index = index > 0 && index <= Workload_Policy_Max ? index - 1 : 0;
1036 workload = hwmgr->workload_setting[index];
1037 } else {
1038 hwmgr->workload_mask |= (1 << hwmgr->workload_prority[type]);
1039 index = fls(hwmgr->workload_mask);
1040 index = index <= Workload_Policy_Max ? index - 1 : 0;
1041 workload = hwmgr->workload_setting[index];
1042 }
1043
1044 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
1045 hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0);
1046 mutex_unlock(&hwmgr->smu_lock);
1047
1048 return 0;
1049}
1050
1051static int pp_dpm_notify_smu_memory_info(void *handle,
1052 uint32_t virtual_addr_low,
1053 uint32_t virtual_addr_hi,
1054 uint32_t mc_addr_low,
1055 uint32_t mc_addr_hi,
1056 uint32_t size)
1057{
1058 struct pp_hwmgr *hwmgr = handle;
1059 int ret = 0;
1060
1061 ret = pp_check(hwmgr);
1062
1063 if (ret)
1064 return ret;
1065
1066 if (hwmgr->hwmgr_func->notify_cac_buffer_info == NULL) {
1067 pr_info("%s was not implemented.\n", __func__);
1068 return -EINVAL;
1069 }
1070
1071 mutex_lock(&hwmgr->smu_lock);
1072
1073 ret = hwmgr->hwmgr_func->notify_cac_buffer_info(hwmgr, virtual_addr_low,
1074 virtual_addr_hi, mc_addr_low, mc_addr_hi,
1075 size);
1076
1077 mutex_unlock(&hwmgr->smu_lock);
1078
1079 return ret;
1080}
1081
1082static int pp_set_power_limit(void *handle, uint32_t limit)
1083{
1084 struct pp_hwmgr *hwmgr = handle;
1085 int ret = 0;
1086
1087 ret = pp_check(hwmgr);
1088
1089 if (ret)
1090 return ret;
1091
1092 if (hwmgr->hwmgr_func->set_power_limit == NULL) {
1093 pr_info("%s was not implemented.\n", __func__);
1094 return -EINVAL;
1095 }
1096
1097 if (limit == 0)
1098 limit = hwmgr->default_power_limit;
1099
1100 if (limit > hwmgr->default_power_limit)
1101 return -EINVAL;
1102
1103 mutex_lock(&hwmgr->smu_lock);
1104 hwmgr->hwmgr_func->set_power_limit(hwmgr, limit);
1105 hwmgr->power_limit = limit;
1106 mutex_unlock(&hwmgr->smu_lock);
1107 return ret;
1108}
1109
1110static int pp_get_power_limit(void *handle, uint32_t *limit, bool default_limit)
1111{
1112 struct pp_hwmgr *hwmgr = handle;
1113 int ret = 0;
1114
1115 ret = pp_check(hwmgr);
1116
1117 if (ret)
1118 return ret;
1119
1120 if (limit == NULL)
1121 return -EINVAL;
1122
1123 mutex_lock(&hwmgr->smu_lock);
1124
1125 if (default_limit)
1126 *limit = hwmgr->default_power_limit;
1127 else
1128 *limit = hwmgr->power_limit;
1129
1130 mutex_unlock(&hwmgr->smu_lock);
1131
1132 return ret;
1133}
1134
1135static int pp_display_configuration_change(void *handle,
1136 const struct amd_pp_display_configuration *display_config)
1137{
1138 struct pp_hwmgr *hwmgr = handle;
1139 int ret = 0;
1140
1141 ret = pp_check(hwmgr);
1142
1143 if (ret)
1144 return ret;
1145
1146 mutex_lock(&hwmgr->smu_lock);
1147 phm_store_dal_configuration_data(hwmgr, display_config);
1148 mutex_unlock(&hwmgr->smu_lock);
1149 return 0;
1150}
1151
1152static int pp_get_display_power_level(void *handle,
1153 struct amd_pp_simple_clock_info *output)
1154{
1155 struct pp_hwmgr *hwmgr = handle;
1156 int ret = 0;
1157
1158 ret = pp_check(hwmgr);
1159
1160 if (ret)
1161 return ret;
1162
1163 if (output == NULL)
1164 return -EINVAL;
1165
1166 mutex_lock(&hwmgr->smu_lock);
1167 ret = phm_get_dal_power_level(hwmgr, output);
1168 mutex_unlock(&hwmgr->smu_lock);
1169 return ret;
1170}
1171
1172static int pp_get_current_clocks(void *handle,
1173 struct amd_pp_clock_info *clocks)
1174{
1175 struct amd_pp_simple_clock_info simple_clocks;
1176 struct pp_clock_info hw_clocks;
1177 struct pp_hwmgr *hwmgr = handle;
1178 int ret = 0;
1179
1180 ret = pp_check(hwmgr);
1181
1182 if (ret)
1183 return ret;
1184
1185 mutex_lock(&hwmgr->smu_lock);
1186
1187 phm_get_dal_power_level(hwmgr, &simple_clocks);
1188
1189 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1190 PHM_PlatformCaps_PowerContainment))
1191 ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
1192 &hw_clocks, PHM_PerformanceLevelDesignation_PowerContainment);
1193 else
1194 ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
1195 &hw_clocks, PHM_PerformanceLevelDesignation_Activity);
1196
1197 if (ret) {
1198 pr_info("Error in phm_get_clock_info \n");
1199 mutex_unlock(&hwmgr->smu_lock);
1200 return -EINVAL;
1201 }
1202
1203 clocks->min_engine_clock = hw_clocks.min_eng_clk;
1204 clocks->max_engine_clock = hw_clocks.max_eng_clk;
1205 clocks->min_memory_clock = hw_clocks.min_mem_clk;
1206 clocks->max_memory_clock = hw_clocks.max_mem_clk;
1207 clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
1208 clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
1209
1210 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1211 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1212
1213 clocks->max_clocks_state = simple_clocks.level;
1214
1215 if (0 == phm_get_current_shallow_sleep_clocks(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks)) {
1216 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1217 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1218 }
1219 mutex_unlock(&hwmgr->smu_lock);
1220 return 0;
1221}
1222
1223static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks)
1224{
1225 struct pp_hwmgr *hwmgr = handle;
1226 int ret = 0;
1227
1228 ret = pp_check(hwmgr);
1229
1230 if (ret)
1231 return ret;
1232
1233 if (clocks == NULL)
1234 return -EINVAL;
1235
1236 mutex_lock(&hwmgr->smu_lock);
1237 ret = phm_get_clock_by_type(hwmgr, type, clocks);
1238 mutex_unlock(&hwmgr->smu_lock);
1239 return ret;
1240}
1241
1242static int pp_get_clock_by_type_with_latency(void *handle,
1243 enum amd_pp_clock_type type,
1244 struct pp_clock_levels_with_latency *clocks)
1245{
1246 struct pp_hwmgr *hwmgr = handle;
1247 int ret = 0;
1248
1249 ret = pp_check(hwmgr);
1250 if (ret)
1251 return ret;
1252
1253 if (!clocks)
1254 return -EINVAL;
1255
1256 mutex_lock(&hwmgr->smu_lock);
1257 ret = phm_get_clock_by_type_with_latency(hwmgr, type, clocks);
1258 mutex_unlock(&hwmgr->smu_lock);
1259 return ret;
1260}
1261
1262static int pp_get_clock_by_type_with_voltage(void *handle,
1263 enum amd_pp_clock_type type,
1264 struct pp_clock_levels_with_voltage *clocks)
1265{
1266 struct pp_hwmgr *hwmgr = handle;
1267 int ret = 0;
1268
1269 ret = pp_check(hwmgr);
1270 if (ret)
1271 return ret;
1272
1273 if (!clocks)
1274 return -EINVAL;
1275
1276 mutex_lock(&hwmgr->smu_lock);
1277
1278 ret = phm_get_clock_by_type_with_voltage(hwmgr, type, clocks);
1279
1280 mutex_unlock(&hwmgr->smu_lock);
1281 return ret;
1282}
1283
1284static int pp_set_watermarks_for_clocks_ranges(void *handle,
1285 struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
1286{
1287 struct pp_hwmgr *hwmgr = handle;
1288 int ret = 0;
1289
1290 ret = pp_check(hwmgr);
1291 if (ret)
1292 return ret;
1293
1294 if (!wm_with_clock_ranges)
1295 return -EINVAL;
1296
1297 mutex_lock(&hwmgr->smu_lock);
1298 ret = phm_set_watermarks_for_clocks_ranges(hwmgr,
1299 wm_with_clock_ranges);
1300 mutex_unlock(&hwmgr->smu_lock);
1301
1302 return ret;
1303}
1304
1305static int pp_display_clock_voltage_request(void *handle,
1306 struct pp_display_clock_request *clock)
1307{
1308 struct pp_hwmgr *hwmgr = handle;
1309 int ret = 0;
1310
1311 ret = pp_check(hwmgr);
1312 if (ret)
1313 return ret;
1314
1315 if (!clock)
1316 return -EINVAL;
1317
1318 mutex_lock(&hwmgr->smu_lock);
1319 ret = phm_display_clock_voltage_request(hwmgr, clock);
1320 mutex_unlock(&hwmgr->smu_lock);
1321
1322 return ret;
1323}
1324
1325static int pp_get_display_mode_validation_clocks(void *handle,
1326 struct amd_pp_simple_clock_info *clocks)
1327{
1328 struct pp_hwmgr *hwmgr = handle;
1329 int ret = 0;
1330
1331 ret = pp_check(hwmgr);
1332
1333 if (ret)
1334 return ret;
1335
1336 if (clocks == NULL)
1337 return -EINVAL;
1338
1339 mutex_lock(&hwmgr->smu_lock);
1340
1341 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))
1342 ret = phm_get_max_high_clocks(hwmgr, clocks);
1343
1344 mutex_unlock(&hwmgr->smu_lock);
1345 return ret;
1346}
1347
1348static int pp_set_mmhub_powergating_by_smu(void *handle)
1349{
1350 struct pp_hwmgr *hwmgr = handle;
1351 int ret = 0;
1352
1353 ret = pp_check(hwmgr);
1354
1355 if (ret)
1356 return ret;
1357
1358 if (hwmgr->hwmgr_func->set_mmhub_powergating_by_smu == NULL) {
1359 pr_info("%s was not implemented.\n", __func__);
1360 return 0;
1361 }
1362
1363 return hwmgr->hwmgr_func->set_mmhub_powergating_by_smu(hwmgr);
1364}
1365
1366static const struct amd_pm_funcs pp_dpm_funcs = {
1367 .load_firmware = pp_dpm_load_fw,
1368 .wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
1369 .force_performance_level = pp_dpm_force_performance_level,
1370 .get_performance_level = pp_dpm_get_performance_level,
1371 .get_current_power_state = pp_dpm_get_current_power_state,
1372 .powergate_vce = pp_dpm_powergate_vce,
1373 .powergate_uvd = pp_dpm_powergate_uvd,
1374 .dispatch_tasks = pp_dpm_dispatch_tasks,
1375 .set_fan_control_mode = pp_dpm_set_fan_control_mode,
1376 .get_fan_control_mode = pp_dpm_get_fan_control_mode,
1377 .set_fan_speed_percent = pp_dpm_set_fan_speed_percent,
1378 .get_fan_speed_percent = pp_dpm_get_fan_speed_percent,
1379 .get_fan_speed_rpm = pp_dpm_get_fan_speed_rpm,
1380 .get_pp_num_states = pp_dpm_get_pp_num_states,
1381 .get_pp_table = pp_dpm_get_pp_table,
1382 .set_pp_table = pp_dpm_set_pp_table,
1383 .force_clock_level = pp_dpm_force_clock_level,
1384 .print_clock_levels = pp_dpm_print_clock_levels,
1385 .get_sclk_od = pp_dpm_get_sclk_od,
1386 .set_sclk_od = pp_dpm_set_sclk_od,
1387 .get_mclk_od = pp_dpm_get_mclk_od,
1388 .set_mclk_od = pp_dpm_set_mclk_od,
1389 .read_sensor = pp_dpm_read_sensor,
1390 .get_vce_clock_state = pp_dpm_get_vce_clock_state,
1391 .switch_power_profile = pp_dpm_switch_power_profile,
1392 .set_clockgating_by_smu = pp_set_clockgating_by_smu,
1393 .notify_smu_memory_info = pp_dpm_notify_smu_memory_info,
1394 .get_power_profile_mode = pp_get_power_profile_mode,
1395 .set_power_profile_mode = pp_set_power_profile_mode,
1396 .odn_edit_dpm_table = pp_odn_edit_dpm_table,
1397 .set_power_limit = pp_set_power_limit,
1398 .get_power_limit = pp_get_power_limit,
1399/* export to DC */
1400 .get_sclk = pp_dpm_get_sclk,
1401 .get_mclk = pp_dpm_get_mclk,
1402 .display_configuration_change = pp_display_configuration_change,
1403 .get_display_power_level = pp_get_display_power_level,
1404 .get_current_clocks = pp_get_current_clocks,
1405 .get_clock_by_type = pp_get_clock_by_type,
1406 .get_clock_by_type_with_latency = pp_get_clock_by_type_with_latency,
1407 .get_clock_by_type_with_voltage = pp_get_clock_by_type_with_voltage,
1408 .set_watermarks_for_clocks_ranges = pp_set_watermarks_for_clocks_ranges,
1409 .display_clock_voltage_request = pp_display_clock_voltage_request,
1410 .get_display_mode_validation_clocks = pp_get_display_mode_validation_clocks,
1411 .set_mmhub_powergating_by_smu = pp_set_mmhub_powergating_by_smu,
1412};