Loading...
1/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24
25#include "radeon.h"
26#include "radeon_asic.h"
27#include "r600d.h"
28#include "r600_dpm.h"
29#include "atom.h"
30
31const u32 r600_utc[R600_PM_NUMBER_OF_TC] = {
32 R600_UTC_DFLT_00,
33 R600_UTC_DFLT_01,
34 R600_UTC_DFLT_02,
35 R600_UTC_DFLT_03,
36 R600_UTC_DFLT_04,
37 R600_UTC_DFLT_05,
38 R600_UTC_DFLT_06,
39 R600_UTC_DFLT_07,
40 R600_UTC_DFLT_08,
41 R600_UTC_DFLT_09,
42 R600_UTC_DFLT_10,
43 R600_UTC_DFLT_11,
44 R600_UTC_DFLT_12,
45 R600_UTC_DFLT_13,
46 R600_UTC_DFLT_14,
47};
48
49const u32 r600_dtc[R600_PM_NUMBER_OF_TC] = {
50 R600_DTC_DFLT_00,
51 R600_DTC_DFLT_01,
52 R600_DTC_DFLT_02,
53 R600_DTC_DFLT_03,
54 R600_DTC_DFLT_04,
55 R600_DTC_DFLT_05,
56 R600_DTC_DFLT_06,
57 R600_DTC_DFLT_07,
58 R600_DTC_DFLT_08,
59 R600_DTC_DFLT_09,
60 R600_DTC_DFLT_10,
61 R600_DTC_DFLT_11,
62 R600_DTC_DFLT_12,
63 R600_DTC_DFLT_13,
64 R600_DTC_DFLT_14,
65};
66
67void r600_dpm_print_class_info(u32 class, u32 class2)
68{
69 const char *s;
70
71 switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
72 case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
73 default:
74 s = "none";
75 break;
76 case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
77 s = "battery";
78 break;
79 case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
80 s = "balanced";
81 break;
82 case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
83 s = "performance";
84 break;
85 }
86 printk("\tui class: %s\n", s);
87
88 printk("\tinternal class:");
89 if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
90 (class2 == 0))
91 pr_cont(" none");
92 else {
93 if (class & ATOM_PPLIB_CLASSIFICATION_BOOT)
94 pr_cont(" boot");
95 if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
96 pr_cont(" thermal");
97 if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
98 pr_cont(" limited_pwr");
99 if (class & ATOM_PPLIB_CLASSIFICATION_REST)
100 pr_cont(" rest");
101 if (class & ATOM_PPLIB_CLASSIFICATION_FORCED)
102 pr_cont(" forced");
103 if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
104 pr_cont(" 3d_perf");
105 if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE)
106 pr_cont(" ovrdrv");
107 if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
108 pr_cont(" uvd");
109 if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW)
110 pr_cont(" 3d_low");
111 if (class & ATOM_PPLIB_CLASSIFICATION_ACPI)
112 pr_cont(" acpi");
113 if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
114 pr_cont(" uvd_hd2");
115 if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
116 pr_cont(" uvd_hd");
117 if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
118 pr_cont(" uvd_sd");
119 if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
120 pr_cont(" limited_pwr2");
121 if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
122 pr_cont(" ulv");
123 if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
124 pr_cont(" uvd_mvc");
125 }
126 pr_cont("\n");
127}
128
129void r600_dpm_print_cap_info(u32 caps)
130{
131 printk("\tcaps:");
132 if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
133 pr_cont(" single_disp");
134 if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK)
135 pr_cont(" video");
136 if (caps & ATOM_PPLIB_DISALLOW_ON_DC)
137 pr_cont(" no_dc");
138 pr_cont("\n");
139}
140
141void r600_dpm_print_ps_status(struct radeon_device *rdev,
142 struct radeon_ps *rps)
143{
144 printk("\tstatus:");
145 if (rps == rdev->pm.dpm.current_ps)
146 pr_cont(" c");
147 if (rps == rdev->pm.dpm.requested_ps)
148 pr_cont(" r");
149 if (rps == rdev->pm.dpm.boot_ps)
150 pr_cont(" b");
151 pr_cont("\n");
152}
153
154u32 r600_dpm_get_vblank_time(struct radeon_device *rdev)
155{
156 struct drm_device *dev = rdev_to_drm(rdev);
157 struct drm_crtc *crtc;
158 struct radeon_crtc *radeon_crtc;
159 u32 vblank_in_pixels;
160 u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
161
162 if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
163 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
164 radeon_crtc = to_radeon_crtc(crtc);
165 if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
166 vblank_in_pixels =
167 radeon_crtc->hw_mode.crtc_htotal *
168 (radeon_crtc->hw_mode.crtc_vblank_end -
169 radeon_crtc->hw_mode.crtc_vdisplay +
170 (radeon_crtc->v_border * 2));
171
172 vblank_time_us = vblank_in_pixels * 1000 / radeon_crtc->hw_mode.clock;
173 break;
174 }
175 }
176 }
177
178 return vblank_time_us;
179}
180
181u32 r600_dpm_get_vrefresh(struct radeon_device *rdev)
182{
183 struct drm_device *dev = rdev_to_drm(rdev);
184 struct drm_crtc *crtc;
185 struct radeon_crtc *radeon_crtc;
186 u32 vrefresh = 0;
187
188 if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
189 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
190 radeon_crtc = to_radeon_crtc(crtc);
191 if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
192 vrefresh = drm_mode_vrefresh(&radeon_crtc->hw_mode);
193 break;
194 }
195 }
196 }
197 return vrefresh;
198}
199
200void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
201 u32 *p, u32 *u)
202{
203 u32 b_c = 0;
204 u32 i_c;
205 u32 tmp;
206
207 i_c = (i * r_c) / 100;
208 tmp = i_c >> p_b;
209
210 while (tmp) {
211 b_c++;
212 tmp >>= 1;
213 }
214
215 *u = (b_c + 1) / 2;
216 *p = i_c / (1 << (2 * (*u)));
217}
218
219int r600_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th)
220{
221 u32 k, a, ah, al;
222 u32 t1;
223
224 if ((fl == 0) || (fh == 0) || (fl > fh))
225 return -EINVAL;
226
227 k = (100 * fh) / fl;
228 t1 = (t * (k - 100));
229 a = (1000 * (100 * h + t1)) / (10000 + (t1 / 100));
230 a = (a + 5) / 10;
231 ah = ((a * t) + 5000) / 10000;
232 al = a - ah;
233
234 *th = t - ah;
235 *tl = t + al;
236
237 return 0;
238}
239
240void r600_gfx_clockgating_enable(struct radeon_device *rdev, bool enable)
241{
242 int i;
243
244 if (enable) {
245 WREG32_P(SCLK_PWRMGT_CNTL, DYN_GFX_CLK_OFF_EN, ~DYN_GFX_CLK_OFF_EN);
246 } else {
247 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN);
248
249 WREG32(CG_RLC_REQ_AND_RSP, 0x2);
250
251 for (i = 0; i < rdev->usec_timeout; i++) {
252 if (((RREG32(CG_RLC_REQ_AND_RSP) & CG_RLC_RSP_TYPE_MASK) >> CG_RLC_RSP_TYPE_SHIFT) == 1)
253 break;
254 udelay(1);
255 }
256
257 WREG32(CG_RLC_REQ_AND_RSP, 0x0);
258
259 WREG32(GRBM_PWR_CNTL, 0x1);
260 RREG32(GRBM_PWR_CNTL);
261 }
262}
263
264void r600_dynamicpm_enable(struct radeon_device *rdev, bool enable)
265{
266 if (enable)
267 WREG32_P(GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, ~GLOBAL_PWRMGT_EN);
268 else
269 WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN);
270}
271
272void r600_enable_thermal_protection(struct radeon_device *rdev, bool enable)
273{
274 if (enable)
275 WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS);
276 else
277 WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS);
278}
279
280void r600_enable_acpi_pm(struct radeon_device *rdev)
281{
282 WREG32_P(GENERAL_PWRMGT, STATIC_PM_EN, ~STATIC_PM_EN);
283}
284
285void r600_enable_dynamic_pcie_gen2(struct radeon_device *rdev, bool enable)
286{
287 if (enable)
288 WREG32_P(GENERAL_PWRMGT, ENABLE_GEN2PCIE, ~ENABLE_GEN2PCIE);
289 else
290 WREG32_P(GENERAL_PWRMGT, 0, ~ENABLE_GEN2PCIE);
291}
292
293bool r600_dynamicpm_enabled(struct radeon_device *rdev)
294{
295 if (RREG32(GENERAL_PWRMGT) & GLOBAL_PWRMGT_EN)
296 return true;
297 else
298 return false;
299}
300
301void r600_enable_sclk_control(struct radeon_device *rdev, bool enable)
302{
303 if (enable)
304 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF);
305 else
306 WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF);
307}
308
309void r600_enable_mclk_control(struct radeon_device *rdev, bool enable)
310{
311 if (enable)
312 WREG32_P(MCLK_PWRMGT_CNTL, 0, ~MPLL_PWRMGT_OFF);
313 else
314 WREG32_P(MCLK_PWRMGT_CNTL, MPLL_PWRMGT_OFF, ~MPLL_PWRMGT_OFF);
315}
316
317void r600_enable_spll_bypass(struct radeon_device *rdev, bool enable)
318{
319 if (enable)
320 WREG32_P(CG_SPLL_FUNC_CNTL, SPLL_BYPASS_EN, ~SPLL_BYPASS_EN);
321 else
322 WREG32_P(CG_SPLL_FUNC_CNTL, 0, ~SPLL_BYPASS_EN);
323}
324
325void r600_wait_for_spll_change(struct radeon_device *rdev)
326{
327 int i;
328
329 for (i = 0; i < rdev->usec_timeout; i++) {
330 if (RREG32(CG_SPLL_FUNC_CNTL) & SPLL_CHG_STATUS)
331 break;
332 udelay(1);
333 }
334}
335
336void r600_set_bsp(struct radeon_device *rdev, u32 u, u32 p)
337{
338 WREG32(CG_BSP, BSP(p) | BSU(u));
339}
340
341void r600_set_at(struct radeon_device *rdev,
342 u32 l_to_m, u32 m_to_h,
343 u32 h_to_m, u32 m_to_l)
344{
345 WREG32(CG_RT, FLS(l_to_m) | FMS(m_to_h));
346 WREG32(CG_LT, FHS(h_to_m) | FMS(m_to_l));
347}
348
349void r600_set_tc(struct radeon_device *rdev,
350 u32 index, u32 u_t, u32 d_t)
351{
352 WREG32(CG_FFCT_0 + (index * 4), UTC_0(u_t) | DTC_0(d_t));
353}
354
355void r600_select_td(struct radeon_device *rdev,
356 enum r600_td td)
357{
358 if (td == R600_TD_AUTO)
359 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_FORCE_TREND_SEL);
360 else
361 WREG32_P(SCLK_PWRMGT_CNTL, FIR_FORCE_TREND_SEL, ~FIR_FORCE_TREND_SEL);
362 if (td == R600_TD_UP)
363 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_TREND_MODE);
364 if (td == R600_TD_DOWN)
365 WREG32_P(SCLK_PWRMGT_CNTL, FIR_TREND_MODE, ~FIR_TREND_MODE);
366}
367
368void r600_set_vrc(struct radeon_device *rdev, u32 vrv)
369{
370 WREG32(CG_FTV, vrv);
371}
372
373void r600_set_tpu(struct radeon_device *rdev, u32 u)
374{
375 WREG32_P(CG_TPC, TPU(u), ~TPU_MASK);
376}
377
378void r600_set_tpc(struct radeon_device *rdev, u32 c)
379{
380 WREG32_P(CG_TPC, TPCC(c), ~TPCC_MASK);
381}
382
383void r600_set_sstu(struct radeon_device *rdev, u32 u)
384{
385 WREG32_P(CG_SSP, CG_SSTU(u), ~CG_SSTU_MASK);
386}
387
388void r600_set_sst(struct radeon_device *rdev, u32 t)
389{
390 WREG32_P(CG_SSP, CG_SST(t), ~CG_SST_MASK);
391}
392
393void r600_set_git(struct radeon_device *rdev, u32 t)
394{
395 WREG32_P(CG_GIT, CG_GICST(t), ~CG_GICST_MASK);
396}
397
398void r600_set_fctu(struct radeon_device *rdev, u32 u)
399{
400 WREG32_P(CG_FC_T, FC_TU(u), ~FC_TU_MASK);
401}
402
403void r600_set_fct(struct radeon_device *rdev, u32 t)
404{
405 WREG32_P(CG_FC_T, FC_T(t), ~FC_T_MASK);
406}
407
408void r600_set_ctxcgtt3d_rphc(struct radeon_device *rdev, u32 p)
409{
410 WREG32_P(CG_CTX_CGTT3D_R, PHC(p), ~PHC_MASK);
411}
412
413void r600_set_ctxcgtt3d_rsdc(struct radeon_device *rdev, u32 s)
414{
415 WREG32_P(CG_CTX_CGTT3D_R, SDC(s), ~SDC_MASK);
416}
417
418void r600_set_vddc3d_oorsu(struct radeon_device *rdev, u32 u)
419{
420 WREG32_P(CG_VDDC3D_OOR, SU(u), ~SU_MASK);
421}
422
423void r600_set_vddc3d_oorphc(struct radeon_device *rdev, u32 p)
424{
425 WREG32_P(CG_VDDC3D_OOR, PHC(p), ~PHC_MASK);
426}
427
428void r600_set_vddc3d_oorsdc(struct radeon_device *rdev, u32 s)
429{
430 WREG32_P(CG_VDDC3D_OOR, SDC(s), ~SDC_MASK);
431}
432
433void r600_set_mpll_lock_time(struct radeon_device *rdev, u32 lock_time)
434{
435 WREG32_P(MPLL_TIME, MPLL_LOCK_TIME(lock_time), ~MPLL_LOCK_TIME_MASK);
436}
437
438void r600_set_mpll_reset_time(struct radeon_device *rdev, u32 reset_time)
439{
440 WREG32_P(MPLL_TIME, MPLL_RESET_TIME(reset_time), ~MPLL_RESET_TIME_MASK);
441}
442
443void r600_engine_clock_entry_enable(struct radeon_device *rdev,
444 u32 index, bool enable)
445{
446 if (enable)
447 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
448 STEP_0_SPLL_ENTRY_VALID, ~STEP_0_SPLL_ENTRY_VALID);
449 else
450 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
451 0, ~STEP_0_SPLL_ENTRY_VALID);
452}
453
454void r600_engine_clock_entry_enable_pulse_skipping(struct radeon_device *rdev,
455 u32 index, bool enable)
456{
457 if (enable)
458 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
459 STEP_0_SPLL_STEP_ENABLE, ~STEP_0_SPLL_STEP_ENABLE);
460 else
461 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
462 0, ~STEP_0_SPLL_STEP_ENABLE);
463}
464
465void r600_engine_clock_entry_enable_post_divider(struct radeon_device *rdev,
466 u32 index, bool enable)
467{
468 if (enable)
469 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
470 STEP_0_POST_DIV_EN, ~STEP_0_POST_DIV_EN);
471 else
472 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
473 0, ~STEP_0_POST_DIV_EN);
474}
475
476void r600_engine_clock_entry_set_post_divider(struct radeon_device *rdev,
477 u32 index, u32 divider)
478{
479 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2),
480 STEP_0_SPLL_POST_DIV(divider), ~STEP_0_SPLL_POST_DIV_MASK);
481}
482
483void r600_engine_clock_entry_set_reference_divider(struct radeon_device *rdev,
484 u32 index, u32 divider)
485{
486 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2),
487 STEP_0_SPLL_REF_DIV(divider), ~STEP_0_SPLL_REF_DIV_MASK);
488}
489
490void r600_engine_clock_entry_set_feedback_divider(struct radeon_device *rdev,
491 u32 index, u32 divider)
492{
493 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2),
494 STEP_0_SPLL_FB_DIV(divider), ~STEP_0_SPLL_FB_DIV_MASK);
495}
496
497void r600_engine_clock_entry_set_step_time(struct radeon_device *rdev,
498 u32 index, u32 step_time)
499{
500 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2),
501 STEP_0_SPLL_STEP_TIME(step_time), ~STEP_0_SPLL_STEP_TIME_MASK);
502}
503
504void r600_vid_rt_set_ssu(struct radeon_device *rdev, u32 u)
505{
506 WREG32_P(VID_RT, SSTU(u), ~SSTU_MASK);
507}
508
509void r600_vid_rt_set_vru(struct radeon_device *rdev, u32 u)
510{
511 WREG32_P(VID_RT, VID_CRTU(u), ~VID_CRTU_MASK);
512}
513
514void r600_vid_rt_set_vrt(struct radeon_device *rdev, u32 rt)
515{
516 WREG32_P(VID_RT, VID_CRT(rt), ~VID_CRT_MASK);
517}
518
519void r600_voltage_control_enable_pins(struct radeon_device *rdev,
520 u64 mask)
521{
522 WREG32(LOWER_GPIO_ENABLE, mask & 0xffffffff);
523 WREG32(UPPER_GPIO_ENABLE, upper_32_bits(mask));
524}
525
526
527void r600_voltage_control_program_voltages(struct radeon_device *rdev,
528 enum r600_power_level index, u64 pins)
529{
530 u32 tmp, mask;
531 u32 ix = 3 - (3 & index);
532
533 WREG32(CTXSW_VID_LOWER_GPIO_CNTL + (ix * 4), pins & 0xffffffff);
534
535 mask = 7 << (3 * ix);
536 tmp = RREG32(VID_UPPER_GPIO_CNTL);
537 tmp = (tmp & ~mask) | ((pins >> (32 - (3 * ix))) & mask);
538 WREG32(VID_UPPER_GPIO_CNTL, tmp);
539}
540
541void r600_voltage_control_deactivate_static_control(struct radeon_device *rdev,
542 u64 mask)
543{
544 u32 gpio;
545
546 gpio = RREG32(GPIOPAD_MASK);
547 gpio &= ~mask;
548 WREG32(GPIOPAD_MASK, gpio);
549
550 gpio = RREG32(GPIOPAD_EN);
551 gpio &= ~mask;
552 WREG32(GPIOPAD_EN, gpio);
553
554 gpio = RREG32(GPIOPAD_A);
555 gpio &= ~mask;
556 WREG32(GPIOPAD_A, gpio);
557}
558
559void r600_power_level_enable(struct radeon_device *rdev,
560 enum r600_power_level index, bool enable)
561{
562 u32 ix = 3 - (3 & index);
563
564 if (enable)
565 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), CTXSW_FREQ_STATE_ENABLE,
566 ~CTXSW_FREQ_STATE_ENABLE);
567 else
568 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), 0,
569 ~CTXSW_FREQ_STATE_ENABLE);
570}
571
572void r600_power_level_set_voltage_index(struct radeon_device *rdev,
573 enum r600_power_level index, u32 voltage_index)
574{
575 u32 ix = 3 - (3 & index);
576
577 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4),
578 CTXSW_FREQ_VIDS_CFG_INDEX(voltage_index), ~CTXSW_FREQ_VIDS_CFG_INDEX_MASK);
579}
580
581void r600_power_level_set_mem_clock_index(struct radeon_device *rdev,
582 enum r600_power_level index, u32 mem_clock_index)
583{
584 u32 ix = 3 - (3 & index);
585
586 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4),
587 CTXSW_FREQ_MCLK_CFG_INDEX(mem_clock_index), ~CTXSW_FREQ_MCLK_CFG_INDEX_MASK);
588}
589
590void r600_power_level_set_eng_clock_index(struct radeon_device *rdev,
591 enum r600_power_level index, u32 eng_clock_index)
592{
593 u32 ix = 3 - (3 & index);
594
595 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4),
596 CTXSW_FREQ_SCLK_CFG_INDEX(eng_clock_index), ~CTXSW_FREQ_SCLK_CFG_INDEX_MASK);
597}
598
599void r600_power_level_set_watermark_id(struct radeon_device *rdev,
600 enum r600_power_level index,
601 enum r600_display_watermark watermark_id)
602{
603 u32 ix = 3 - (3 & index);
604 u32 tmp = 0;
605
606 if (watermark_id == R600_DISPLAY_WATERMARK_HIGH)
607 tmp = CTXSW_FREQ_DISPLAY_WATERMARK;
608 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), tmp, ~CTXSW_FREQ_DISPLAY_WATERMARK);
609}
610
611void r600_power_level_set_pcie_gen2(struct radeon_device *rdev,
612 enum r600_power_level index, bool compatible)
613{
614 u32 ix = 3 - (3 & index);
615 u32 tmp = 0;
616
617 if (compatible)
618 tmp = CTXSW_FREQ_GEN2PCIE_VOLT;
619 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), tmp, ~CTXSW_FREQ_GEN2PCIE_VOLT);
620}
621
622enum r600_power_level r600_power_level_get_current_index(struct radeon_device *rdev)
623{
624 u32 tmp;
625
626 tmp = RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK;
627 tmp >>= CURRENT_PROFILE_INDEX_SHIFT;
628 return tmp;
629}
630
631enum r600_power_level r600_power_level_get_target_index(struct radeon_device *rdev)
632{
633 u32 tmp;
634
635 tmp = RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & TARGET_PROFILE_INDEX_MASK;
636 tmp >>= TARGET_PROFILE_INDEX_SHIFT;
637 return tmp;
638}
639
640void r600_power_level_set_enter_index(struct radeon_device *rdev,
641 enum r600_power_level index)
642{
643 WREG32_P(TARGET_AND_CURRENT_PROFILE_INDEX, DYN_PWR_ENTER_INDEX(index),
644 ~DYN_PWR_ENTER_INDEX_MASK);
645}
646
647void r600_wait_for_power_level_unequal(struct radeon_device *rdev,
648 enum r600_power_level index)
649{
650 int i;
651
652 for (i = 0; i < rdev->usec_timeout; i++) {
653 if (r600_power_level_get_target_index(rdev) != index)
654 break;
655 udelay(1);
656 }
657
658 for (i = 0; i < rdev->usec_timeout; i++) {
659 if (r600_power_level_get_current_index(rdev) != index)
660 break;
661 udelay(1);
662 }
663}
664
665void r600_wait_for_power_level(struct radeon_device *rdev,
666 enum r600_power_level index)
667{
668 int i;
669
670 for (i = 0; i < rdev->usec_timeout; i++) {
671 if (r600_power_level_get_target_index(rdev) == index)
672 break;
673 udelay(1);
674 }
675
676 for (i = 0; i < rdev->usec_timeout; i++) {
677 if (r600_power_level_get_current_index(rdev) == index)
678 break;
679 udelay(1);
680 }
681}
682
683void r600_start_dpm(struct radeon_device *rdev)
684{
685 r600_enable_sclk_control(rdev, false);
686 r600_enable_mclk_control(rdev, false);
687
688 r600_dynamicpm_enable(rdev, true);
689
690 radeon_wait_for_vblank(rdev, 0);
691 radeon_wait_for_vblank(rdev, 1);
692
693 r600_enable_spll_bypass(rdev, true);
694 r600_wait_for_spll_change(rdev);
695 r600_enable_spll_bypass(rdev, false);
696 r600_wait_for_spll_change(rdev);
697
698 r600_enable_spll_bypass(rdev, true);
699 r600_wait_for_spll_change(rdev);
700 r600_enable_spll_bypass(rdev, false);
701 r600_wait_for_spll_change(rdev);
702
703 r600_enable_sclk_control(rdev, true);
704 r600_enable_mclk_control(rdev, true);
705}
706
707void r600_stop_dpm(struct radeon_device *rdev)
708{
709 r600_dynamicpm_enable(rdev, false);
710}
711
712int r600_dpm_pre_set_power_state(struct radeon_device *rdev)
713{
714 return 0;
715}
716
717void r600_dpm_post_set_power_state(struct radeon_device *rdev)
718{
719
720}
721
722bool r600_is_uvd_state(u32 class, u32 class2)
723{
724 if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
725 return true;
726 if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
727 return true;
728 if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
729 return true;
730 if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
731 return true;
732 if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
733 return true;
734 return false;
735}
736
737static int r600_set_thermal_temperature_range(struct radeon_device *rdev,
738 int min_temp, int max_temp)
739{
740 int low_temp = 0 * 1000;
741 int high_temp = 255 * 1000;
742
743 if (low_temp < min_temp)
744 low_temp = min_temp;
745 if (high_temp > max_temp)
746 high_temp = max_temp;
747 if (high_temp < low_temp) {
748 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
749 return -EINVAL;
750 }
751
752 WREG32_P(CG_THERMAL_INT, DIG_THERM_INTH(high_temp / 1000), ~DIG_THERM_INTH_MASK);
753 WREG32_P(CG_THERMAL_INT, DIG_THERM_INTL(low_temp / 1000), ~DIG_THERM_INTL_MASK);
754 WREG32_P(CG_THERMAL_CTRL, DIG_THERM_DPM(high_temp / 1000), ~DIG_THERM_DPM_MASK);
755
756 rdev->pm.dpm.thermal.min_temp = low_temp;
757 rdev->pm.dpm.thermal.max_temp = high_temp;
758
759 return 0;
760}
761
762bool r600_is_internal_thermal_sensor(enum radeon_int_thermal_type sensor)
763{
764 switch (sensor) {
765 case THERMAL_TYPE_RV6XX:
766 case THERMAL_TYPE_RV770:
767 case THERMAL_TYPE_EVERGREEN:
768 case THERMAL_TYPE_SUMO:
769 case THERMAL_TYPE_NI:
770 case THERMAL_TYPE_SI:
771 case THERMAL_TYPE_CI:
772 case THERMAL_TYPE_KV:
773 return true;
774 case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
775 case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
776 return false; /* need special handling */
777 case THERMAL_TYPE_NONE:
778 case THERMAL_TYPE_EXTERNAL:
779 case THERMAL_TYPE_EXTERNAL_GPIO:
780 default:
781 return false;
782 }
783}
784
785int r600_dpm_late_enable(struct radeon_device *rdev)
786{
787 int ret;
788
789 if (rdev->irq.installed &&
790 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
791 ret = r600_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
792 if (ret)
793 return ret;
794 rdev->irq.dpm_thermal = true;
795 radeon_irq_set(rdev);
796 }
797
798 return 0;
799}
800
801union power_info {
802 struct _ATOM_POWERPLAY_INFO info;
803 struct _ATOM_POWERPLAY_INFO_V2 info_2;
804 struct _ATOM_POWERPLAY_INFO_V3 info_3;
805 struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
806 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
807 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
808 struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
809 struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
810};
811
812union fan_info {
813 struct _ATOM_PPLIB_FANTABLE fan;
814 struct _ATOM_PPLIB_FANTABLE2 fan2;
815 struct _ATOM_PPLIB_FANTABLE3 fan3;
816};
817
818static int r600_parse_clk_voltage_dep_table(struct radeon_clock_voltage_dependency_table *radeon_table,
819 ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table)
820{
821 int i;
822 ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
823
824 radeon_table->entries = kcalloc(atom_table->ucNumEntries,
825 sizeof(struct radeon_clock_voltage_dependency_entry),
826 GFP_KERNEL);
827 if (!radeon_table->entries)
828 return -ENOMEM;
829
830 entry = &atom_table->entries[0];
831 for (i = 0; i < atom_table->ucNumEntries; i++) {
832 radeon_table->entries[i].clk = le16_to_cpu(entry->usClockLow) |
833 (entry->ucClockHigh << 16);
834 radeon_table->entries[i].v = le16_to_cpu(entry->usVoltage);
835 entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *)
836 ((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record));
837 }
838 radeon_table->count = atom_table->ucNumEntries;
839
840 return 0;
841}
842
843int r600_get_platform_caps(struct radeon_device *rdev)
844{
845 struct radeon_mode_info *mode_info = &rdev->mode_info;
846 union power_info *power_info;
847 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
848 u16 data_offset;
849 u8 frev, crev;
850
851 if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
852 &frev, &crev, &data_offset))
853 return -EINVAL;
854 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
855
856 rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
857 rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
858 rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
859
860 return 0;
861}
862
863/* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
864#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
865#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
866#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
867#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
868#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
869#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
870
871int r600_parse_extended_power_table(struct radeon_device *rdev)
872{
873 struct radeon_mode_info *mode_info = &rdev->mode_info;
874 union power_info *power_info;
875 union fan_info *fan_info;
876 ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table;
877 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
878 u16 data_offset;
879 u8 frev, crev;
880 int ret, i;
881
882 if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
883 &frev, &crev, &data_offset))
884 return -EINVAL;
885 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
886
887 /* fan table */
888 if (le16_to_cpu(power_info->pplib.usTableSize) >=
889 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
890 if (power_info->pplib3.usFanTableOffset) {
891 fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset +
892 le16_to_cpu(power_info->pplib3.usFanTableOffset));
893 rdev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst;
894 rdev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin);
895 rdev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed);
896 rdev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh);
897 rdev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin);
898 rdev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed);
899 rdev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh);
900 if (fan_info->fan.ucFanTableFormat >= 2)
901 rdev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax);
902 else
903 rdev->pm.dpm.fan.t_max = 10900;
904 rdev->pm.dpm.fan.cycle_delay = 100000;
905 if (fan_info->fan.ucFanTableFormat >= 3) {
906 rdev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode;
907 rdev->pm.dpm.fan.default_max_fan_pwm =
908 le16_to_cpu(fan_info->fan3.usFanPWMMax);
909 rdev->pm.dpm.fan.default_fan_output_sensitivity = 4836;
910 rdev->pm.dpm.fan.fan_output_sensitivity =
911 le16_to_cpu(fan_info->fan3.usFanOutputSensitivity);
912 }
913 rdev->pm.dpm.fan.ucode_fan_control = true;
914 }
915 }
916
917 /* clock dependancy tables, shedding tables */
918 if (le16_to_cpu(power_info->pplib.usTableSize) >=
919 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) {
920 if (power_info->pplib4.usVddcDependencyOnSCLKOffset) {
921 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
922 (mode_info->atom_context->bios + data_offset +
923 le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
924 ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
925 dep_table);
926 if (ret)
927 return ret;
928 }
929 if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
930 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
931 (mode_info->atom_context->bios + data_offset +
932 le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
933 ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
934 dep_table);
935 if (ret) {
936 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries);
937 return ret;
938 }
939 }
940 if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
941 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
942 (mode_info->atom_context->bios + data_offset +
943 le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
944 ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
945 dep_table);
946 if (ret) {
947 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries);
948 kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries);
949 return ret;
950 }
951 }
952 if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
953 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
954 (mode_info->atom_context->bios + data_offset +
955 le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
956 ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
957 dep_table);
958 if (ret) {
959 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries);
960 kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries);
961 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries);
962 return ret;
963 }
964 }
965 if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
966 ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
967 (ATOM_PPLIB_Clock_Voltage_Limit_Table *)
968 (mode_info->atom_context->bios + data_offset +
969 le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset));
970 if (clk_v->ucNumEntries) {
971 rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk =
972 le16_to_cpu(clk_v->entries[0].usSclkLow) |
973 (clk_v->entries[0].ucSclkHigh << 16);
974 rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk =
975 le16_to_cpu(clk_v->entries[0].usMclkLow) |
976 (clk_v->entries[0].ucMclkHigh << 16);
977 rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc =
978 le16_to_cpu(clk_v->entries[0].usVddc);
979 rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci =
980 le16_to_cpu(clk_v->entries[0].usVddci);
981 }
982 }
983 if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) {
984 ATOM_PPLIB_PhaseSheddingLimits_Table *psl =
985 (ATOM_PPLIB_PhaseSheddingLimits_Table *)
986 (mode_info->atom_context->bios + data_offset +
987 le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
988 ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
989
990 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
991 kcalloc(psl->ucNumEntries,
992 sizeof(struct radeon_phase_shedding_limits_entry),
993 GFP_KERNEL);
994 if (!rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
995 r600_free_extended_power_table(rdev);
996 return -ENOMEM;
997 }
998
999 entry = &psl->entries[0];
1000 for (i = 0; i < psl->ucNumEntries; i++) {
1001 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
1002 le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16);
1003 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
1004 le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16);
1005 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
1006 le16_to_cpu(entry->usVoltage);
1007 entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *)
1008 ((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record));
1009 }
1010 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
1011 psl->ucNumEntries;
1012 }
1013 }
1014
1015 /* cac data */
1016 if (le16_to_cpu(power_info->pplib.usTableSize) >=
1017 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) {
1018 rdev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit);
1019 rdev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit);
1020 rdev->pm.dpm.near_tdp_limit_adjusted = rdev->pm.dpm.near_tdp_limit;
1021 rdev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit);
1022 if (rdev->pm.dpm.tdp_od_limit)
1023 rdev->pm.dpm.power_control = true;
1024 else
1025 rdev->pm.dpm.power_control = false;
1026 rdev->pm.dpm.tdp_adjustment = 0;
1027 rdev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold);
1028 rdev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage);
1029 rdev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope);
1030 if (power_info->pplib5.usCACLeakageTableOffset) {
1031 ATOM_PPLIB_CAC_Leakage_Table *cac_table =
1032 (ATOM_PPLIB_CAC_Leakage_Table *)
1033 (mode_info->atom_context->bios + data_offset +
1034 le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
1035 ATOM_PPLIB_CAC_Leakage_Record *entry;
1036 u32 size = cac_table->ucNumEntries * sizeof(struct radeon_cac_leakage_table);
1037 rdev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
1038 if (!rdev->pm.dpm.dyn_state.cac_leakage_table.entries) {
1039 r600_free_extended_power_table(rdev);
1040 return -ENOMEM;
1041 }
1042 entry = &cac_table->entries[0];
1043 for (i = 0; i < cac_table->ucNumEntries; i++) {
1044 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
1045 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 =
1046 le16_to_cpu(entry->usVddc1);
1047 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 =
1048 le16_to_cpu(entry->usVddc2);
1049 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 =
1050 le16_to_cpu(entry->usVddc3);
1051 } else {
1052 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
1053 le16_to_cpu(entry->usVddc);
1054 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
1055 le32_to_cpu(entry->ulLeakageValue);
1056 }
1057 entry = (ATOM_PPLIB_CAC_Leakage_Record *)
1058 ((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record));
1059 }
1060 rdev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
1061 }
1062 }
1063
1064 /* ext tables */
1065 if (le16_to_cpu(power_info->pplib.usTableSize) >=
1066 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
1067 ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
1068 (mode_info->atom_context->bios + data_offset +
1069 le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
1070 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) &&
1071 ext_hdr->usVCETableOffset) {
1072 VCEClockInfoArray *array = (VCEClockInfoArray *)
1073 (mode_info->atom_context->bios + data_offset +
1074 le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
1075 ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
1076 (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
1077 (mode_info->atom_context->bios + data_offset +
1078 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
1079 1 + array->ucNumEntries * sizeof(VCEClockInfo));
1080 ATOM_PPLIB_VCE_State_Table *states =
1081 (ATOM_PPLIB_VCE_State_Table *)
1082 (mode_info->atom_context->bios + data_offset +
1083 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
1084 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) +
1085 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)));
1086 ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry;
1087 ATOM_PPLIB_VCE_State_Record *state_entry;
1088 VCEClockInfo *vce_clk;
1089 u32 size = limits->numEntries *
1090 sizeof(struct radeon_vce_clock_voltage_dependency_entry);
1091 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
1092 kzalloc(size, GFP_KERNEL);
1093 if (!rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
1094 r600_free_extended_power_table(rdev);
1095 return -ENOMEM;
1096 }
1097 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
1098 limits->numEntries;
1099 entry = &limits->entries[0];
1100 state_entry = &states->entries[0];
1101 for (i = 0; i < limits->numEntries; i++) {
1102 vce_clk = (VCEClockInfo *)
1103 ((u8 *)&array->entries[0] +
1104 (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
1105 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk =
1106 le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
1107 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk =
1108 le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
1109 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v =
1110 le16_to_cpu(entry->usVoltage);
1111 entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
1112 ((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
1113 }
1114 for (i = 0; i < states->numEntries; i++) {
1115 if (i >= RADEON_MAX_VCE_LEVELS)
1116 break;
1117 vce_clk = (VCEClockInfo *)
1118 ((u8 *)&array->entries[0] +
1119 (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
1120 rdev->pm.dpm.vce_states[i].evclk =
1121 le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
1122 rdev->pm.dpm.vce_states[i].ecclk =
1123 le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
1124 rdev->pm.dpm.vce_states[i].clk_idx =
1125 state_entry->ucClockInfoIndex & 0x3f;
1126 rdev->pm.dpm.vce_states[i].pstate =
1127 (state_entry->ucClockInfoIndex & 0xc0) >> 6;
1128 state_entry = (ATOM_PPLIB_VCE_State_Record *)
1129 ((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record));
1130 }
1131 }
1132 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) &&
1133 ext_hdr->usUVDTableOffset) {
1134 UVDClockInfoArray *array = (UVDClockInfoArray *)
1135 (mode_info->atom_context->bios + data_offset +
1136 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1);
1137 ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits =
1138 (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *)
1139 (mode_info->atom_context->bios + data_offset +
1140 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 +
1141 1 + (array->ucNumEntries * sizeof (UVDClockInfo)));
1142 ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry;
1143 u32 size = limits->numEntries *
1144 sizeof(struct radeon_uvd_clock_voltage_dependency_entry);
1145 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
1146 kzalloc(size, GFP_KERNEL);
1147 if (!rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
1148 r600_free_extended_power_table(rdev);
1149 return -ENOMEM;
1150 }
1151 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
1152 limits->numEntries;
1153 entry = &limits->entries[0];
1154 for (i = 0; i < limits->numEntries; i++) {
1155 UVDClockInfo *uvd_clk = (UVDClockInfo *)
1156 ((u8 *)&array->entries[0] +
1157 (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo)));
1158 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk =
1159 le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16);
1160 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
1161 le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
1162 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
1163 le16_to_cpu(entry->usVoltage);
1164 entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
1165 ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
1166 }
1167 }
1168 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) &&
1169 ext_hdr->usSAMUTableOffset) {
1170 ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits =
1171 (ATOM_PPLIB_SAMClk_Voltage_Limit_Table *)
1172 (mode_info->atom_context->bios + data_offset +
1173 le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1);
1174 ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry;
1175 u32 size = limits->numEntries *
1176 sizeof(struct radeon_clock_voltage_dependency_entry);
1177 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
1178 kzalloc(size, GFP_KERNEL);
1179 if (!rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
1180 r600_free_extended_power_table(rdev);
1181 return -ENOMEM;
1182 }
1183 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
1184 limits->numEntries;
1185 entry = &limits->entries[0];
1186 for (i = 0; i < limits->numEntries; i++) {
1187 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk =
1188 le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16);
1189 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v =
1190 le16_to_cpu(entry->usVoltage);
1191 entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *)
1192 ((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record));
1193 }
1194 }
1195 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
1196 ext_hdr->usPPMTableOffset) {
1197 ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
1198 (mode_info->atom_context->bios + data_offset +
1199 le16_to_cpu(ext_hdr->usPPMTableOffset));
1200 rdev->pm.dpm.dyn_state.ppm_table =
1201 kzalloc(sizeof(struct radeon_ppm_table), GFP_KERNEL);
1202 if (!rdev->pm.dpm.dyn_state.ppm_table) {
1203 r600_free_extended_power_table(rdev);
1204 return -ENOMEM;
1205 }
1206 rdev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
1207 rdev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
1208 le16_to_cpu(ppm->usCpuCoreNumber);
1209 rdev->pm.dpm.dyn_state.ppm_table->platform_tdp =
1210 le32_to_cpu(ppm->ulPlatformTDP);
1211 rdev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp =
1212 le32_to_cpu(ppm->ulSmallACPlatformTDP);
1213 rdev->pm.dpm.dyn_state.ppm_table->platform_tdc =
1214 le32_to_cpu(ppm->ulPlatformTDC);
1215 rdev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc =
1216 le32_to_cpu(ppm->ulSmallACPlatformTDC);
1217 rdev->pm.dpm.dyn_state.ppm_table->apu_tdp =
1218 le32_to_cpu(ppm->ulApuTDP);
1219 rdev->pm.dpm.dyn_state.ppm_table->dgpu_tdp =
1220 le32_to_cpu(ppm->ulDGpuTDP);
1221 rdev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power =
1222 le32_to_cpu(ppm->ulDGpuUlvPower);
1223 rdev->pm.dpm.dyn_state.ppm_table->tj_max =
1224 le32_to_cpu(ppm->ulTjmax);
1225 }
1226 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) &&
1227 ext_hdr->usACPTableOffset) {
1228 ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits =
1229 (ATOM_PPLIB_ACPClk_Voltage_Limit_Table *)
1230 (mode_info->atom_context->bios + data_offset +
1231 le16_to_cpu(ext_hdr->usACPTableOffset) + 1);
1232 ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry;
1233 u32 size = limits->numEntries *
1234 sizeof(struct radeon_clock_voltage_dependency_entry);
1235 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
1236 kzalloc(size, GFP_KERNEL);
1237 if (!rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
1238 r600_free_extended_power_table(rdev);
1239 return -ENOMEM;
1240 }
1241 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
1242 limits->numEntries;
1243 entry = &limits->entries[0];
1244 for (i = 0; i < limits->numEntries; i++) {
1245 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk =
1246 le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16);
1247 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v =
1248 le16_to_cpu(entry->usVoltage);
1249 entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *)
1250 ((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record));
1251 }
1252 }
1253 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) &&
1254 ext_hdr->usPowerTuneTableOffset) {
1255 u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset +
1256 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
1257 ATOM_PowerTune_Table *pt;
1258 rdev->pm.dpm.dyn_state.cac_tdp_table =
1259 kzalloc(sizeof(struct radeon_cac_tdp_table), GFP_KERNEL);
1260 if (!rdev->pm.dpm.dyn_state.cac_tdp_table) {
1261 r600_free_extended_power_table(rdev);
1262 return -ENOMEM;
1263 }
1264 if (rev > 0) {
1265 ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
1266 (mode_info->atom_context->bios + data_offset +
1267 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
1268 rdev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
1269 le16_to_cpu(ppt->usMaximumPowerDeliveryLimit);
1270 pt = &ppt->power_tune_table;
1271 } else {
1272 ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
1273 (mode_info->atom_context->bios + data_offset +
1274 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
1275 rdev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255;
1276 pt = &ppt->power_tune_table;
1277 }
1278 rdev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP);
1279 rdev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp =
1280 le16_to_cpu(pt->usConfigurableTDP);
1281 rdev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC);
1282 rdev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit =
1283 le16_to_cpu(pt->usBatteryPowerLimit);
1284 rdev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit =
1285 le16_to_cpu(pt->usSmallPowerLimit);
1286 rdev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage =
1287 le16_to_cpu(pt->usLowCACLeakage);
1288 rdev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage =
1289 le16_to_cpu(pt->usHighCACLeakage);
1290 }
1291 }
1292
1293 return 0;
1294}
1295
1296void r600_free_extended_power_table(struct radeon_device *rdev)
1297{
1298 struct radeon_dpm_dynamic_state *dyn_state = &rdev->pm.dpm.dyn_state;
1299
1300 kfree(dyn_state->vddc_dependency_on_sclk.entries);
1301 kfree(dyn_state->vddci_dependency_on_mclk.entries);
1302 kfree(dyn_state->vddc_dependency_on_mclk.entries);
1303 kfree(dyn_state->mvdd_dependency_on_mclk.entries);
1304 kfree(dyn_state->cac_leakage_table.entries);
1305 kfree(dyn_state->phase_shedding_limits_table.entries);
1306 kfree(dyn_state->ppm_table);
1307 kfree(dyn_state->cac_tdp_table);
1308 kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
1309 kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
1310 kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
1311 kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
1312}
1313
1314enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev,
1315 u32 sys_mask,
1316 enum radeon_pcie_gen asic_gen,
1317 enum radeon_pcie_gen default_gen)
1318{
1319 switch (asic_gen) {
1320 case RADEON_PCIE_GEN1:
1321 return RADEON_PCIE_GEN1;
1322 case RADEON_PCIE_GEN2:
1323 return RADEON_PCIE_GEN2;
1324 case RADEON_PCIE_GEN3:
1325 return RADEON_PCIE_GEN3;
1326 default:
1327 if ((sys_mask & RADEON_PCIE_SPEED_80) && (default_gen == RADEON_PCIE_GEN3))
1328 return RADEON_PCIE_GEN3;
1329 else if ((sys_mask & RADEON_PCIE_SPEED_50) && (default_gen == RADEON_PCIE_GEN2))
1330 return RADEON_PCIE_GEN2;
1331 else
1332 return RADEON_PCIE_GEN1;
1333 }
1334 return RADEON_PCIE_GEN1;
1335}
1336
1337u16 r600_get_pcie_lane_support(struct radeon_device *rdev,
1338 u16 asic_lanes,
1339 u16 default_lanes)
1340{
1341 switch (asic_lanes) {
1342 case 0:
1343 default:
1344 return default_lanes;
1345 case 1:
1346 return 1;
1347 case 2:
1348 return 2;
1349 case 4:
1350 return 4;
1351 case 8:
1352 return 8;
1353 case 12:
1354 return 12;
1355 case 16:
1356 return 16;
1357 }
1358}
1359
1360u8 r600_encode_pci_lane_width(u32 lanes)
1361{
1362 static const u8 encoded_lanes[] = {
1363 0, 1, 2, 0, 3, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0, 0, 6
1364 };
1365
1366 if (lanes > 16)
1367 return 0;
1368
1369 return encoded_lanes[lanes];
1370}
1/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24
25#include <drm/drmP.h>
26#include "radeon.h"
27#include "radeon_asic.h"
28#include "r600d.h"
29#include "r600_dpm.h"
30#include "atom.h"
31
32const u32 r600_utc[R600_PM_NUMBER_OF_TC] =
33{
34 R600_UTC_DFLT_00,
35 R600_UTC_DFLT_01,
36 R600_UTC_DFLT_02,
37 R600_UTC_DFLT_03,
38 R600_UTC_DFLT_04,
39 R600_UTC_DFLT_05,
40 R600_UTC_DFLT_06,
41 R600_UTC_DFLT_07,
42 R600_UTC_DFLT_08,
43 R600_UTC_DFLT_09,
44 R600_UTC_DFLT_10,
45 R600_UTC_DFLT_11,
46 R600_UTC_DFLT_12,
47 R600_UTC_DFLT_13,
48 R600_UTC_DFLT_14,
49};
50
51const u32 r600_dtc[R600_PM_NUMBER_OF_TC] =
52{
53 R600_DTC_DFLT_00,
54 R600_DTC_DFLT_01,
55 R600_DTC_DFLT_02,
56 R600_DTC_DFLT_03,
57 R600_DTC_DFLT_04,
58 R600_DTC_DFLT_05,
59 R600_DTC_DFLT_06,
60 R600_DTC_DFLT_07,
61 R600_DTC_DFLT_08,
62 R600_DTC_DFLT_09,
63 R600_DTC_DFLT_10,
64 R600_DTC_DFLT_11,
65 R600_DTC_DFLT_12,
66 R600_DTC_DFLT_13,
67 R600_DTC_DFLT_14,
68};
69
70void r600_dpm_print_class_info(u32 class, u32 class2)
71{
72 const char *s;
73
74 switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
75 case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
76 default:
77 s = "none";
78 break;
79 case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
80 s = "battery";
81 break;
82 case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
83 s = "balanced";
84 break;
85 case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
86 s = "performance";
87 break;
88 }
89 printk("\tui class: %s\n", s);
90
91 printk("\tinternal class:");
92 if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
93 (class2 == 0))
94 pr_cont(" none");
95 else {
96 if (class & ATOM_PPLIB_CLASSIFICATION_BOOT)
97 pr_cont(" boot");
98 if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
99 pr_cont(" thermal");
100 if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
101 pr_cont(" limited_pwr");
102 if (class & ATOM_PPLIB_CLASSIFICATION_REST)
103 pr_cont(" rest");
104 if (class & ATOM_PPLIB_CLASSIFICATION_FORCED)
105 pr_cont(" forced");
106 if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
107 pr_cont(" 3d_perf");
108 if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE)
109 pr_cont(" ovrdrv");
110 if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
111 pr_cont(" uvd");
112 if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW)
113 pr_cont(" 3d_low");
114 if (class & ATOM_PPLIB_CLASSIFICATION_ACPI)
115 pr_cont(" acpi");
116 if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
117 pr_cont(" uvd_hd2");
118 if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
119 pr_cont(" uvd_hd");
120 if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
121 pr_cont(" uvd_sd");
122 if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
123 pr_cont(" limited_pwr2");
124 if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
125 pr_cont(" ulv");
126 if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
127 pr_cont(" uvd_mvc");
128 }
129 pr_cont("\n");
130}
131
132void r600_dpm_print_cap_info(u32 caps)
133{
134 printk("\tcaps:");
135 if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
136 pr_cont(" single_disp");
137 if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK)
138 pr_cont(" video");
139 if (caps & ATOM_PPLIB_DISALLOW_ON_DC)
140 pr_cont(" no_dc");
141 pr_cont("\n");
142}
143
144void r600_dpm_print_ps_status(struct radeon_device *rdev,
145 struct radeon_ps *rps)
146{
147 printk("\tstatus:");
148 if (rps == rdev->pm.dpm.current_ps)
149 pr_cont(" c");
150 if (rps == rdev->pm.dpm.requested_ps)
151 pr_cont(" r");
152 if (rps == rdev->pm.dpm.boot_ps)
153 pr_cont(" b");
154 pr_cont("\n");
155}
156
157u32 r600_dpm_get_vblank_time(struct radeon_device *rdev)
158{
159 struct drm_device *dev = rdev->ddev;
160 struct drm_crtc *crtc;
161 struct radeon_crtc *radeon_crtc;
162 u32 vblank_in_pixels;
163 u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
164
165 if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
166 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
167 radeon_crtc = to_radeon_crtc(crtc);
168 if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
169 vblank_in_pixels =
170 radeon_crtc->hw_mode.crtc_htotal *
171 (radeon_crtc->hw_mode.crtc_vblank_end -
172 radeon_crtc->hw_mode.crtc_vdisplay +
173 (radeon_crtc->v_border * 2));
174
175 vblank_time_us = vblank_in_pixels * 1000 / radeon_crtc->hw_mode.clock;
176 break;
177 }
178 }
179 }
180
181 return vblank_time_us;
182}
183
184u32 r600_dpm_get_vrefresh(struct radeon_device *rdev)
185{
186 struct drm_device *dev = rdev->ddev;
187 struct drm_crtc *crtc;
188 struct radeon_crtc *radeon_crtc;
189 u32 vrefresh = 0;
190
191 if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
192 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
193 radeon_crtc = to_radeon_crtc(crtc);
194 if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
195 vrefresh = drm_mode_vrefresh(&radeon_crtc->hw_mode);
196 break;
197 }
198 }
199 }
200 return vrefresh;
201}
202
203void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
204 u32 *p, u32 *u)
205{
206 u32 b_c = 0;
207 u32 i_c;
208 u32 tmp;
209
210 i_c = (i * r_c) / 100;
211 tmp = i_c >> p_b;
212
213 while (tmp) {
214 b_c++;
215 tmp >>= 1;
216 }
217
218 *u = (b_c + 1) / 2;
219 *p = i_c / (1 << (2 * (*u)));
220}
221
222int r600_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th)
223{
224 u32 k, a, ah, al;
225 u32 t1;
226
227 if ((fl == 0) || (fh == 0) || (fl > fh))
228 return -EINVAL;
229
230 k = (100 * fh) / fl;
231 t1 = (t * (k - 100));
232 a = (1000 * (100 * h + t1)) / (10000 + (t1 / 100));
233 a = (a + 5) / 10;
234 ah = ((a * t) + 5000) / 10000;
235 al = a - ah;
236
237 *th = t - ah;
238 *tl = t + al;
239
240 return 0;
241}
242
243void r600_gfx_clockgating_enable(struct radeon_device *rdev, bool enable)
244{
245 int i;
246
247 if (enable) {
248 WREG32_P(SCLK_PWRMGT_CNTL, DYN_GFX_CLK_OFF_EN, ~DYN_GFX_CLK_OFF_EN);
249 } else {
250 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN);
251
252 WREG32(CG_RLC_REQ_AND_RSP, 0x2);
253
254 for (i = 0; i < rdev->usec_timeout; i++) {
255 if (((RREG32(CG_RLC_REQ_AND_RSP) & CG_RLC_RSP_TYPE_MASK) >> CG_RLC_RSP_TYPE_SHIFT) == 1)
256 break;
257 udelay(1);
258 }
259
260 WREG32(CG_RLC_REQ_AND_RSP, 0x0);
261
262 WREG32(GRBM_PWR_CNTL, 0x1);
263 RREG32(GRBM_PWR_CNTL);
264 }
265}
266
267void r600_dynamicpm_enable(struct radeon_device *rdev, bool enable)
268{
269 if (enable)
270 WREG32_P(GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, ~GLOBAL_PWRMGT_EN);
271 else
272 WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN);
273}
274
275void r600_enable_thermal_protection(struct radeon_device *rdev, bool enable)
276{
277 if (enable)
278 WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS);
279 else
280 WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS);
281}
282
283void r600_enable_acpi_pm(struct radeon_device *rdev)
284{
285 WREG32_P(GENERAL_PWRMGT, STATIC_PM_EN, ~STATIC_PM_EN);
286}
287
288void r600_enable_dynamic_pcie_gen2(struct radeon_device *rdev, bool enable)
289{
290 if (enable)
291 WREG32_P(GENERAL_PWRMGT, ENABLE_GEN2PCIE, ~ENABLE_GEN2PCIE);
292 else
293 WREG32_P(GENERAL_PWRMGT, 0, ~ENABLE_GEN2PCIE);
294}
295
296bool r600_dynamicpm_enabled(struct radeon_device *rdev)
297{
298 if (RREG32(GENERAL_PWRMGT) & GLOBAL_PWRMGT_EN)
299 return true;
300 else
301 return false;
302}
303
304void r600_enable_sclk_control(struct radeon_device *rdev, bool enable)
305{
306 if (enable)
307 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF);
308 else
309 WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF);
310}
311
312void r600_enable_mclk_control(struct radeon_device *rdev, bool enable)
313{
314 if (enable)
315 WREG32_P(MCLK_PWRMGT_CNTL, 0, ~MPLL_PWRMGT_OFF);
316 else
317 WREG32_P(MCLK_PWRMGT_CNTL, MPLL_PWRMGT_OFF, ~MPLL_PWRMGT_OFF);
318}
319
320void r600_enable_spll_bypass(struct radeon_device *rdev, bool enable)
321{
322 if (enable)
323 WREG32_P(CG_SPLL_FUNC_CNTL, SPLL_BYPASS_EN, ~SPLL_BYPASS_EN);
324 else
325 WREG32_P(CG_SPLL_FUNC_CNTL, 0, ~SPLL_BYPASS_EN);
326}
327
328void r600_wait_for_spll_change(struct radeon_device *rdev)
329{
330 int i;
331
332 for (i = 0; i < rdev->usec_timeout; i++) {
333 if (RREG32(CG_SPLL_FUNC_CNTL) & SPLL_CHG_STATUS)
334 break;
335 udelay(1);
336 }
337}
338
339void r600_set_bsp(struct radeon_device *rdev, u32 u, u32 p)
340{
341 WREG32(CG_BSP, BSP(p) | BSU(u));
342}
343
344void r600_set_at(struct radeon_device *rdev,
345 u32 l_to_m, u32 m_to_h,
346 u32 h_to_m, u32 m_to_l)
347{
348 WREG32(CG_RT, FLS(l_to_m) | FMS(m_to_h));
349 WREG32(CG_LT, FHS(h_to_m) | FMS(m_to_l));
350}
351
352void r600_set_tc(struct radeon_device *rdev,
353 u32 index, u32 u_t, u32 d_t)
354{
355 WREG32(CG_FFCT_0 + (index * 4), UTC_0(u_t) | DTC_0(d_t));
356}
357
358void r600_select_td(struct radeon_device *rdev,
359 enum r600_td td)
360{
361 if (td == R600_TD_AUTO)
362 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_FORCE_TREND_SEL);
363 else
364 WREG32_P(SCLK_PWRMGT_CNTL, FIR_FORCE_TREND_SEL, ~FIR_FORCE_TREND_SEL);
365 if (td == R600_TD_UP)
366 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_TREND_MODE);
367 if (td == R600_TD_DOWN)
368 WREG32_P(SCLK_PWRMGT_CNTL, FIR_TREND_MODE, ~FIR_TREND_MODE);
369}
370
371void r600_set_vrc(struct radeon_device *rdev, u32 vrv)
372{
373 WREG32(CG_FTV, vrv);
374}
375
376void r600_set_tpu(struct radeon_device *rdev, u32 u)
377{
378 WREG32_P(CG_TPC, TPU(u), ~TPU_MASK);
379}
380
381void r600_set_tpc(struct radeon_device *rdev, u32 c)
382{
383 WREG32_P(CG_TPC, TPCC(c), ~TPCC_MASK);
384}
385
386void r600_set_sstu(struct radeon_device *rdev, u32 u)
387{
388 WREG32_P(CG_SSP, CG_SSTU(u), ~CG_SSTU_MASK);
389}
390
391void r600_set_sst(struct radeon_device *rdev, u32 t)
392{
393 WREG32_P(CG_SSP, CG_SST(t), ~CG_SST_MASK);
394}
395
396void r600_set_git(struct radeon_device *rdev, u32 t)
397{
398 WREG32_P(CG_GIT, CG_GICST(t), ~CG_GICST_MASK);
399}
400
401void r600_set_fctu(struct radeon_device *rdev, u32 u)
402{
403 WREG32_P(CG_FC_T, FC_TU(u), ~FC_TU_MASK);
404}
405
406void r600_set_fct(struct radeon_device *rdev, u32 t)
407{
408 WREG32_P(CG_FC_T, FC_T(t), ~FC_T_MASK);
409}
410
411void r600_set_ctxcgtt3d_rphc(struct radeon_device *rdev, u32 p)
412{
413 WREG32_P(CG_CTX_CGTT3D_R, PHC(p), ~PHC_MASK);
414}
415
416void r600_set_ctxcgtt3d_rsdc(struct radeon_device *rdev, u32 s)
417{
418 WREG32_P(CG_CTX_CGTT3D_R, SDC(s), ~SDC_MASK);
419}
420
421void r600_set_vddc3d_oorsu(struct radeon_device *rdev, u32 u)
422{
423 WREG32_P(CG_VDDC3D_OOR, SU(u), ~SU_MASK);
424}
425
426void r600_set_vddc3d_oorphc(struct radeon_device *rdev, u32 p)
427{
428 WREG32_P(CG_VDDC3D_OOR, PHC(p), ~PHC_MASK);
429}
430
431void r600_set_vddc3d_oorsdc(struct radeon_device *rdev, u32 s)
432{
433 WREG32_P(CG_VDDC3D_OOR, SDC(s), ~SDC_MASK);
434}
435
436void r600_set_mpll_lock_time(struct radeon_device *rdev, u32 lock_time)
437{
438 WREG32_P(MPLL_TIME, MPLL_LOCK_TIME(lock_time), ~MPLL_LOCK_TIME_MASK);
439}
440
441void r600_set_mpll_reset_time(struct radeon_device *rdev, u32 reset_time)
442{
443 WREG32_P(MPLL_TIME, MPLL_RESET_TIME(reset_time), ~MPLL_RESET_TIME_MASK);
444}
445
446void r600_engine_clock_entry_enable(struct radeon_device *rdev,
447 u32 index, bool enable)
448{
449 if (enable)
450 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
451 STEP_0_SPLL_ENTRY_VALID, ~STEP_0_SPLL_ENTRY_VALID);
452 else
453 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
454 0, ~STEP_0_SPLL_ENTRY_VALID);
455}
456
457void r600_engine_clock_entry_enable_pulse_skipping(struct radeon_device *rdev,
458 u32 index, bool enable)
459{
460 if (enable)
461 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
462 STEP_0_SPLL_STEP_ENABLE, ~STEP_0_SPLL_STEP_ENABLE);
463 else
464 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
465 0, ~STEP_0_SPLL_STEP_ENABLE);
466}
467
468void r600_engine_clock_entry_enable_post_divider(struct radeon_device *rdev,
469 u32 index, bool enable)
470{
471 if (enable)
472 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
473 STEP_0_POST_DIV_EN, ~STEP_0_POST_DIV_EN);
474 else
475 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
476 0, ~STEP_0_POST_DIV_EN);
477}
478
479void r600_engine_clock_entry_set_post_divider(struct radeon_device *rdev,
480 u32 index, u32 divider)
481{
482 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2),
483 STEP_0_SPLL_POST_DIV(divider), ~STEP_0_SPLL_POST_DIV_MASK);
484}
485
486void r600_engine_clock_entry_set_reference_divider(struct radeon_device *rdev,
487 u32 index, u32 divider)
488{
489 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2),
490 STEP_0_SPLL_REF_DIV(divider), ~STEP_0_SPLL_REF_DIV_MASK);
491}
492
493void r600_engine_clock_entry_set_feedback_divider(struct radeon_device *rdev,
494 u32 index, u32 divider)
495{
496 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2),
497 STEP_0_SPLL_FB_DIV(divider), ~STEP_0_SPLL_FB_DIV_MASK);
498}
499
500void r600_engine_clock_entry_set_step_time(struct radeon_device *rdev,
501 u32 index, u32 step_time)
502{
503 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2),
504 STEP_0_SPLL_STEP_TIME(step_time), ~STEP_0_SPLL_STEP_TIME_MASK);
505}
506
507void r600_vid_rt_set_ssu(struct radeon_device *rdev, u32 u)
508{
509 WREG32_P(VID_RT, SSTU(u), ~SSTU_MASK);
510}
511
512void r600_vid_rt_set_vru(struct radeon_device *rdev, u32 u)
513{
514 WREG32_P(VID_RT, VID_CRTU(u), ~VID_CRTU_MASK);
515}
516
517void r600_vid_rt_set_vrt(struct radeon_device *rdev, u32 rt)
518{
519 WREG32_P(VID_RT, VID_CRT(rt), ~VID_CRT_MASK);
520}
521
522void r600_voltage_control_enable_pins(struct radeon_device *rdev,
523 u64 mask)
524{
525 WREG32(LOWER_GPIO_ENABLE, mask & 0xffffffff);
526 WREG32(UPPER_GPIO_ENABLE, upper_32_bits(mask));
527}
528
529
530void r600_voltage_control_program_voltages(struct radeon_device *rdev,
531 enum r600_power_level index, u64 pins)
532{
533 u32 tmp, mask;
534 u32 ix = 3 - (3 & index);
535
536 WREG32(CTXSW_VID_LOWER_GPIO_CNTL + (ix * 4), pins & 0xffffffff);
537
538 mask = 7 << (3 * ix);
539 tmp = RREG32(VID_UPPER_GPIO_CNTL);
540 tmp = (tmp & ~mask) | ((pins >> (32 - (3 * ix))) & mask);
541 WREG32(VID_UPPER_GPIO_CNTL, tmp);
542}
543
544void r600_voltage_control_deactivate_static_control(struct radeon_device *rdev,
545 u64 mask)
546{
547 u32 gpio;
548
549 gpio = RREG32(GPIOPAD_MASK);
550 gpio &= ~mask;
551 WREG32(GPIOPAD_MASK, gpio);
552
553 gpio = RREG32(GPIOPAD_EN);
554 gpio &= ~mask;
555 WREG32(GPIOPAD_EN, gpio);
556
557 gpio = RREG32(GPIOPAD_A);
558 gpio &= ~mask;
559 WREG32(GPIOPAD_A, gpio);
560}
561
562void r600_power_level_enable(struct radeon_device *rdev,
563 enum r600_power_level index, bool enable)
564{
565 u32 ix = 3 - (3 & index);
566
567 if (enable)
568 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), CTXSW_FREQ_STATE_ENABLE,
569 ~CTXSW_FREQ_STATE_ENABLE);
570 else
571 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), 0,
572 ~CTXSW_FREQ_STATE_ENABLE);
573}
574
575void r600_power_level_set_voltage_index(struct radeon_device *rdev,
576 enum r600_power_level index, u32 voltage_index)
577{
578 u32 ix = 3 - (3 & index);
579
580 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4),
581 CTXSW_FREQ_VIDS_CFG_INDEX(voltage_index), ~CTXSW_FREQ_VIDS_CFG_INDEX_MASK);
582}
583
584void r600_power_level_set_mem_clock_index(struct radeon_device *rdev,
585 enum r600_power_level index, u32 mem_clock_index)
586{
587 u32 ix = 3 - (3 & index);
588
589 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4),
590 CTXSW_FREQ_MCLK_CFG_INDEX(mem_clock_index), ~CTXSW_FREQ_MCLK_CFG_INDEX_MASK);
591}
592
593void r600_power_level_set_eng_clock_index(struct radeon_device *rdev,
594 enum r600_power_level index, u32 eng_clock_index)
595{
596 u32 ix = 3 - (3 & index);
597
598 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4),
599 CTXSW_FREQ_SCLK_CFG_INDEX(eng_clock_index), ~CTXSW_FREQ_SCLK_CFG_INDEX_MASK);
600}
601
602void r600_power_level_set_watermark_id(struct radeon_device *rdev,
603 enum r600_power_level index,
604 enum r600_display_watermark watermark_id)
605{
606 u32 ix = 3 - (3 & index);
607 u32 tmp = 0;
608
609 if (watermark_id == R600_DISPLAY_WATERMARK_HIGH)
610 tmp = CTXSW_FREQ_DISPLAY_WATERMARK;
611 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), tmp, ~CTXSW_FREQ_DISPLAY_WATERMARK);
612}
613
614void r600_power_level_set_pcie_gen2(struct radeon_device *rdev,
615 enum r600_power_level index, bool compatible)
616{
617 u32 ix = 3 - (3 & index);
618 u32 tmp = 0;
619
620 if (compatible)
621 tmp = CTXSW_FREQ_GEN2PCIE_VOLT;
622 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), tmp, ~CTXSW_FREQ_GEN2PCIE_VOLT);
623}
624
625enum r600_power_level r600_power_level_get_current_index(struct radeon_device *rdev)
626{
627 u32 tmp;
628
629 tmp = RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK;
630 tmp >>= CURRENT_PROFILE_INDEX_SHIFT;
631 return tmp;
632}
633
634enum r600_power_level r600_power_level_get_target_index(struct radeon_device *rdev)
635{
636 u32 tmp;
637
638 tmp = RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & TARGET_PROFILE_INDEX_MASK;
639 tmp >>= TARGET_PROFILE_INDEX_SHIFT;
640 return tmp;
641}
642
643void r600_power_level_set_enter_index(struct radeon_device *rdev,
644 enum r600_power_level index)
645{
646 WREG32_P(TARGET_AND_CURRENT_PROFILE_INDEX, DYN_PWR_ENTER_INDEX(index),
647 ~DYN_PWR_ENTER_INDEX_MASK);
648}
649
650void r600_wait_for_power_level_unequal(struct radeon_device *rdev,
651 enum r600_power_level index)
652{
653 int i;
654
655 for (i = 0; i < rdev->usec_timeout; i++) {
656 if (r600_power_level_get_target_index(rdev) != index)
657 break;
658 udelay(1);
659 }
660
661 for (i = 0; i < rdev->usec_timeout; i++) {
662 if (r600_power_level_get_current_index(rdev) != index)
663 break;
664 udelay(1);
665 }
666}
667
668void r600_wait_for_power_level(struct radeon_device *rdev,
669 enum r600_power_level index)
670{
671 int i;
672
673 for (i = 0; i < rdev->usec_timeout; i++) {
674 if (r600_power_level_get_target_index(rdev) == index)
675 break;
676 udelay(1);
677 }
678
679 for (i = 0; i < rdev->usec_timeout; i++) {
680 if (r600_power_level_get_current_index(rdev) == index)
681 break;
682 udelay(1);
683 }
684}
685
686void r600_start_dpm(struct radeon_device *rdev)
687{
688 r600_enable_sclk_control(rdev, false);
689 r600_enable_mclk_control(rdev, false);
690
691 r600_dynamicpm_enable(rdev, true);
692
693 radeon_wait_for_vblank(rdev, 0);
694 radeon_wait_for_vblank(rdev, 1);
695
696 r600_enable_spll_bypass(rdev, true);
697 r600_wait_for_spll_change(rdev);
698 r600_enable_spll_bypass(rdev, false);
699 r600_wait_for_spll_change(rdev);
700
701 r600_enable_spll_bypass(rdev, true);
702 r600_wait_for_spll_change(rdev);
703 r600_enable_spll_bypass(rdev, false);
704 r600_wait_for_spll_change(rdev);
705
706 r600_enable_sclk_control(rdev, true);
707 r600_enable_mclk_control(rdev, true);
708}
709
710void r600_stop_dpm(struct radeon_device *rdev)
711{
712 r600_dynamicpm_enable(rdev, false);
713}
714
715int r600_dpm_pre_set_power_state(struct radeon_device *rdev)
716{
717 return 0;
718}
719
720void r600_dpm_post_set_power_state(struct radeon_device *rdev)
721{
722
723}
724
725bool r600_is_uvd_state(u32 class, u32 class2)
726{
727 if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
728 return true;
729 if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
730 return true;
731 if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
732 return true;
733 if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
734 return true;
735 if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
736 return true;
737 return false;
738}
739
740static int r600_set_thermal_temperature_range(struct radeon_device *rdev,
741 int min_temp, int max_temp)
742{
743 int low_temp = 0 * 1000;
744 int high_temp = 255 * 1000;
745
746 if (low_temp < min_temp)
747 low_temp = min_temp;
748 if (high_temp > max_temp)
749 high_temp = max_temp;
750 if (high_temp < low_temp) {
751 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
752 return -EINVAL;
753 }
754
755 WREG32_P(CG_THERMAL_INT, DIG_THERM_INTH(high_temp / 1000), ~DIG_THERM_INTH_MASK);
756 WREG32_P(CG_THERMAL_INT, DIG_THERM_INTL(low_temp / 1000), ~DIG_THERM_INTL_MASK);
757 WREG32_P(CG_THERMAL_CTRL, DIG_THERM_DPM(high_temp / 1000), ~DIG_THERM_DPM_MASK);
758
759 rdev->pm.dpm.thermal.min_temp = low_temp;
760 rdev->pm.dpm.thermal.max_temp = high_temp;
761
762 return 0;
763}
764
765bool r600_is_internal_thermal_sensor(enum radeon_int_thermal_type sensor)
766{
767 switch (sensor) {
768 case THERMAL_TYPE_RV6XX:
769 case THERMAL_TYPE_RV770:
770 case THERMAL_TYPE_EVERGREEN:
771 case THERMAL_TYPE_SUMO:
772 case THERMAL_TYPE_NI:
773 case THERMAL_TYPE_SI:
774 case THERMAL_TYPE_CI:
775 case THERMAL_TYPE_KV:
776 return true;
777 case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
778 case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
779 return false; /* need special handling */
780 case THERMAL_TYPE_NONE:
781 case THERMAL_TYPE_EXTERNAL:
782 case THERMAL_TYPE_EXTERNAL_GPIO:
783 default:
784 return false;
785 }
786}
787
788int r600_dpm_late_enable(struct radeon_device *rdev)
789{
790 int ret;
791
792 if (rdev->irq.installed &&
793 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
794 ret = r600_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
795 if (ret)
796 return ret;
797 rdev->irq.dpm_thermal = true;
798 radeon_irq_set(rdev);
799 }
800
801 return 0;
802}
803
804union power_info {
805 struct _ATOM_POWERPLAY_INFO info;
806 struct _ATOM_POWERPLAY_INFO_V2 info_2;
807 struct _ATOM_POWERPLAY_INFO_V3 info_3;
808 struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
809 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
810 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
811 struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
812 struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
813};
814
815union fan_info {
816 struct _ATOM_PPLIB_FANTABLE fan;
817 struct _ATOM_PPLIB_FANTABLE2 fan2;
818 struct _ATOM_PPLIB_FANTABLE3 fan3;
819};
820
821static int r600_parse_clk_voltage_dep_table(struct radeon_clock_voltage_dependency_table *radeon_table,
822 ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table)
823{
824 u32 size = atom_table->ucNumEntries *
825 sizeof(struct radeon_clock_voltage_dependency_entry);
826 int i;
827 ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
828
829 radeon_table->entries = kzalloc(size, GFP_KERNEL);
830 if (!radeon_table->entries)
831 return -ENOMEM;
832
833 entry = &atom_table->entries[0];
834 for (i = 0; i < atom_table->ucNumEntries; i++) {
835 radeon_table->entries[i].clk = le16_to_cpu(entry->usClockLow) |
836 (entry->ucClockHigh << 16);
837 radeon_table->entries[i].v = le16_to_cpu(entry->usVoltage);
838 entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *)
839 ((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record));
840 }
841 radeon_table->count = atom_table->ucNumEntries;
842
843 return 0;
844}
845
846int r600_get_platform_caps(struct radeon_device *rdev)
847{
848 struct radeon_mode_info *mode_info = &rdev->mode_info;
849 union power_info *power_info;
850 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
851 u16 data_offset;
852 u8 frev, crev;
853
854 if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
855 &frev, &crev, &data_offset))
856 return -EINVAL;
857 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
858
859 rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
860 rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
861 rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
862
863 return 0;
864}
865
866/* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
867#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
868#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
869#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
870#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
871#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
872#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
873
874int r600_parse_extended_power_table(struct radeon_device *rdev)
875{
876 struct radeon_mode_info *mode_info = &rdev->mode_info;
877 union power_info *power_info;
878 union fan_info *fan_info;
879 ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table;
880 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
881 u16 data_offset;
882 u8 frev, crev;
883 int ret, i;
884
885 if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
886 &frev, &crev, &data_offset))
887 return -EINVAL;
888 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
889
890 /* fan table */
891 if (le16_to_cpu(power_info->pplib.usTableSize) >=
892 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
893 if (power_info->pplib3.usFanTableOffset) {
894 fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset +
895 le16_to_cpu(power_info->pplib3.usFanTableOffset));
896 rdev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst;
897 rdev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin);
898 rdev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed);
899 rdev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh);
900 rdev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin);
901 rdev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed);
902 rdev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh);
903 if (fan_info->fan.ucFanTableFormat >= 2)
904 rdev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax);
905 else
906 rdev->pm.dpm.fan.t_max = 10900;
907 rdev->pm.dpm.fan.cycle_delay = 100000;
908 if (fan_info->fan.ucFanTableFormat >= 3) {
909 rdev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode;
910 rdev->pm.dpm.fan.default_max_fan_pwm =
911 le16_to_cpu(fan_info->fan3.usFanPWMMax);
912 rdev->pm.dpm.fan.default_fan_output_sensitivity = 4836;
913 rdev->pm.dpm.fan.fan_output_sensitivity =
914 le16_to_cpu(fan_info->fan3.usFanOutputSensitivity);
915 }
916 rdev->pm.dpm.fan.ucode_fan_control = true;
917 }
918 }
919
920 /* clock dependancy tables, shedding tables */
921 if (le16_to_cpu(power_info->pplib.usTableSize) >=
922 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) {
923 if (power_info->pplib4.usVddcDependencyOnSCLKOffset) {
924 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
925 (mode_info->atom_context->bios + data_offset +
926 le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
927 ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
928 dep_table);
929 if (ret)
930 return ret;
931 }
932 if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
933 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
934 (mode_info->atom_context->bios + data_offset +
935 le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
936 ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
937 dep_table);
938 if (ret) {
939 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries);
940 return ret;
941 }
942 }
943 if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
944 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
945 (mode_info->atom_context->bios + data_offset +
946 le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
947 ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
948 dep_table);
949 if (ret) {
950 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries);
951 kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries);
952 return ret;
953 }
954 }
955 if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
956 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
957 (mode_info->atom_context->bios + data_offset +
958 le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
959 ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
960 dep_table);
961 if (ret) {
962 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries);
963 kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries);
964 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries);
965 return ret;
966 }
967 }
968 if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
969 ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
970 (ATOM_PPLIB_Clock_Voltage_Limit_Table *)
971 (mode_info->atom_context->bios + data_offset +
972 le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset));
973 if (clk_v->ucNumEntries) {
974 rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk =
975 le16_to_cpu(clk_v->entries[0].usSclkLow) |
976 (clk_v->entries[0].ucSclkHigh << 16);
977 rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk =
978 le16_to_cpu(clk_v->entries[0].usMclkLow) |
979 (clk_v->entries[0].ucMclkHigh << 16);
980 rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc =
981 le16_to_cpu(clk_v->entries[0].usVddc);
982 rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci =
983 le16_to_cpu(clk_v->entries[0].usVddci);
984 }
985 }
986 if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) {
987 ATOM_PPLIB_PhaseSheddingLimits_Table *psl =
988 (ATOM_PPLIB_PhaseSheddingLimits_Table *)
989 (mode_info->atom_context->bios + data_offset +
990 le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
991 ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
992
993 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
994 kzalloc(psl->ucNumEntries *
995 sizeof(struct radeon_phase_shedding_limits_entry),
996 GFP_KERNEL);
997 if (!rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
998 r600_free_extended_power_table(rdev);
999 return -ENOMEM;
1000 }
1001
1002 entry = &psl->entries[0];
1003 for (i = 0; i < psl->ucNumEntries; i++) {
1004 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
1005 le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16);
1006 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
1007 le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16);
1008 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
1009 le16_to_cpu(entry->usVoltage);
1010 entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *)
1011 ((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record));
1012 }
1013 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
1014 psl->ucNumEntries;
1015 }
1016 }
1017
1018 /* cac data */
1019 if (le16_to_cpu(power_info->pplib.usTableSize) >=
1020 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) {
1021 rdev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit);
1022 rdev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit);
1023 rdev->pm.dpm.near_tdp_limit_adjusted = rdev->pm.dpm.near_tdp_limit;
1024 rdev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit);
1025 if (rdev->pm.dpm.tdp_od_limit)
1026 rdev->pm.dpm.power_control = true;
1027 else
1028 rdev->pm.dpm.power_control = false;
1029 rdev->pm.dpm.tdp_adjustment = 0;
1030 rdev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold);
1031 rdev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage);
1032 rdev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope);
1033 if (power_info->pplib5.usCACLeakageTableOffset) {
1034 ATOM_PPLIB_CAC_Leakage_Table *cac_table =
1035 (ATOM_PPLIB_CAC_Leakage_Table *)
1036 (mode_info->atom_context->bios + data_offset +
1037 le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
1038 ATOM_PPLIB_CAC_Leakage_Record *entry;
1039 u32 size = cac_table->ucNumEntries * sizeof(struct radeon_cac_leakage_table);
1040 rdev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
1041 if (!rdev->pm.dpm.dyn_state.cac_leakage_table.entries) {
1042 r600_free_extended_power_table(rdev);
1043 return -ENOMEM;
1044 }
1045 entry = &cac_table->entries[0];
1046 for (i = 0; i < cac_table->ucNumEntries; i++) {
1047 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
1048 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 =
1049 le16_to_cpu(entry->usVddc1);
1050 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 =
1051 le16_to_cpu(entry->usVddc2);
1052 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 =
1053 le16_to_cpu(entry->usVddc3);
1054 } else {
1055 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
1056 le16_to_cpu(entry->usVddc);
1057 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
1058 le32_to_cpu(entry->ulLeakageValue);
1059 }
1060 entry = (ATOM_PPLIB_CAC_Leakage_Record *)
1061 ((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record));
1062 }
1063 rdev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
1064 }
1065 }
1066
1067 /* ext tables */
1068 if (le16_to_cpu(power_info->pplib.usTableSize) >=
1069 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
1070 ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
1071 (mode_info->atom_context->bios + data_offset +
1072 le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
1073 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) &&
1074 ext_hdr->usVCETableOffset) {
1075 VCEClockInfoArray *array = (VCEClockInfoArray *)
1076 (mode_info->atom_context->bios + data_offset +
1077 le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
1078 ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
1079 (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
1080 (mode_info->atom_context->bios + data_offset +
1081 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
1082 1 + array->ucNumEntries * sizeof(VCEClockInfo));
1083 ATOM_PPLIB_VCE_State_Table *states =
1084 (ATOM_PPLIB_VCE_State_Table *)
1085 (mode_info->atom_context->bios + data_offset +
1086 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
1087 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) +
1088 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)));
1089 ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry;
1090 ATOM_PPLIB_VCE_State_Record *state_entry;
1091 VCEClockInfo *vce_clk;
1092 u32 size = limits->numEntries *
1093 sizeof(struct radeon_vce_clock_voltage_dependency_entry);
1094 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
1095 kzalloc(size, GFP_KERNEL);
1096 if (!rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
1097 r600_free_extended_power_table(rdev);
1098 return -ENOMEM;
1099 }
1100 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
1101 limits->numEntries;
1102 entry = &limits->entries[0];
1103 state_entry = &states->entries[0];
1104 for (i = 0; i < limits->numEntries; i++) {
1105 vce_clk = (VCEClockInfo *)
1106 ((u8 *)&array->entries[0] +
1107 (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
1108 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk =
1109 le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
1110 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk =
1111 le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
1112 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v =
1113 le16_to_cpu(entry->usVoltage);
1114 entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
1115 ((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
1116 }
1117 for (i = 0; i < states->numEntries; i++) {
1118 if (i >= RADEON_MAX_VCE_LEVELS)
1119 break;
1120 vce_clk = (VCEClockInfo *)
1121 ((u8 *)&array->entries[0] +
1122 (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
1123 rdev->pm.dpm.vce_states[i].evclk =
1124 le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
1125 rdev->pm.dpm.vce_states[i].ecclk =
1126 le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
1127 rdev->pm.dpm.vce_states[i].clk_idx =
1128 state_entry->ucClockInfoIndex & 0x3f;
1129 rdev->pm.dpm.vce_states[i].pstate =
1130 (state_entry->ucClockInfoIndex & 0xc0) >> 6;
1131 state_entry = (ATOM_PPLIB_VCE_State_Record *)
1132 ((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record));
1133 }
1134 }
1135 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) &&
1136 ext_hdr->usUVDTableOffset) {
1137 UVDClockInfoArray *array = (UVDClockInfoArray *)
1138 (mode_info->atom_context->bios + data_offset +
1139 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1);
1140 ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits =
1141 (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *)
1142 (mode_info->atom_context->bios + data_offset +
1143 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 +
1144 1 + (array->ucNumEntries * sizeof (UVDClockInfo)));
1145 ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry;
1146 u32 size = limits->numEntries *
1147 sizeof(struct radeon_uvd_clock_voltage_dependency_entry);
1148 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
1149 kzalloc(size, GFP_KERNEL);
1150 if (!rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
1151 r600_free_extended_power_table(rdev);
1152 return -ENOMEM;
1153 }
1154 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
1155 limits->numEntries;
1156 entry = &limits->entries[0];
1157 for (i = 0; i < limits->numEntries; i++) {
1158 UVDClockInfo *uvd_clk = (UVDClockInfo *)
1159 ((u8 *)&array->entries[0] +
1160 (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo)));
1161 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk =
1162 le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16);
1163 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
1164 le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
1165 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
1166 le16_to_cpu(entry->usVoltage);
1167 entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
1168 ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
1169 }
1170 }
1171 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) &&
1172 ext_hdr->usSAMUTableOffset) {
1173 ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits =
1174 (ATOM_PPLIB_SAMClk_Voltage_Limit_Table *)
1175 (mode_info->atom_context->bios + data_offset +
1176 le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1);
1177 ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry;
1178 u32 size = limits->numEntries *
1179 sizeof(struct radeon_clock_voltage_dependency_entry);
1180 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
1181 kzalloc(size, GFP_KERNEL);
1182 if (!rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
1183 r600_free_extended_power_table(rdev);
1184 return -ENOMEM;
1185 }
1186 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
1187 limits->numEntries;
1188 entry = &limits->entries[0];
1189 for (i = 0; i < limits->numEntries; i++) {
1190 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk =
1191 le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16);
1192 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v =
1193 le16_to_cpu(entry->usVoltage);
1194 entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *)
1195 ((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record));
1196 }
1197 }
1198 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
1199 ext_hdr->usPPMTableOffset) {
1200 ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
1201 (mode_info->atom_context->bios + data_offset +
1202 le16_to_cpu(ext_hdr->usPPMTableOffset));
1203 rdev->pm.dpm.dyn_state.ppm_table =
1204 kzalloc(sizeof(struct radeon_ppm_table), GFP_KERNEL);
1205 if (!rdev->pm.dpm.dyn_state.ppm_table) {
1206 r600_free_extended_power_table(rdev);
1207 return -ENOMEM;
1208 }
1209 rdev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
1210 rdev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
1211 le16_to_cpu(ppm->usCpuCoreNumber);
1212 rdev->pm.dpm.dyn_state.ppm_table->platform_tdp =
1213 le32_to_cpu(ppm->ulPlatformTDP);
1214 rdev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp =
1215 le32_to_cpu(ppm->ulSmallACPlatformTDP);
1216 rdev->pm.dpm.dyn_state.ppm_table->platform_tdc =
1217 le32_to_cpu(ppm->ulPlatformTDC);
1218 rdev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc =
1219 le32_to_cpu(ppm->ulSmallACPlatformTDC);
1220 rdev->pm.dpm.dyn_state.ppm_table->apu_tdp =
1221 le32_to_cpu(ppm->ulApuTDP);
1222 rdev->pm.dpm.dyn_state.ppm_table->dgpu_tdp =
1223 le32_to_cpu(ppm->ulDGpuTDP);
1224 rdev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power =
1225 le32_to_cpu(ppm->ulDGpuUlvPower);
1226 rdev->pm.dpm.dyn_state.ppm_table->tj_max =
1227 le32_to_cpu(ppm->ulTjmax);
1228 }
1229 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) &&
1230 ext_hdr->usACPTableOffset) {
1231 ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits =
1232 (ATOM_PPLIB_ACPClk_Voltage_Limit_Table *)
1233 (mode_info->atom_context->bios + data_offset +
1234 le16_to_cpu(ext_hdr->usACPTableOffset) + 1);
1235 ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry;
1236 u32 size = limits->numEntries *
1237 sizeof(struct radeon_clock_voltage_dependency_entry);
1238 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
1239 kzalloc(size, GFP_KERNEL);
1240 if (!rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
1241 r600_free_extended_power_table(rdev);
1242 return -ENOMEM;
1243 }
1244 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
1245 limits->numEntries;
1246 entry = &limits->entries[0];
1247 for (i = 0; i < limits->numEntries; i++) {
1248 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk =
1249 le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16);
1250 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v =
1251 le16_to_cpu(entry->usVoltage);
1252 entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *)
1253 ((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record));
1254 }
1255 }
1256 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) &&
1257 ext_hdr->usPowerTuneTableOffset) {
1258 u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset +
1259 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
1260 ATOM_PowerTune_Table *pt;
1261 rdev->pm.dpm.dyn_state.cac_tdp_table =
1262 kzalloc(sizeof(struct radeon_cac_tdp_table), GFP_KERNEL);
1263 if (!rdev->pm.dpm.dyn_state.cac_tdp_table) {
1264 r600_free_extended_power_table(rdev);
1265 return -ENOMEM;
1266 }
1267 if (rev > 0) {
1268 ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
1269 (mode_info->atom_context->bios + data_offset +
1270 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
1271 rdev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
1272 le16_to_cpu(ppt->usMaximumPowerDeliveryLimit);
1273 pt = &ppt->power_tune_table;
1274 } else {
1275 ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
1276 (mode_info->atom_context->bios + data_offset +
1277 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
1278 rdev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255;
1279 pt = &ppt->power_tune_table;
1280 }
1281 rdev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP);
1282 rdev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp =
1283 le16_to_cpu(pt->usConfigurableTDP);
1284 rdev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC);
1285 rdev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit =
1286 le16_to_cpu(pt->usBatteryPowerLimit);
1287 rdev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit =
1288 le16_to_cpu(pt->usSmallPowerLimit);
1289 rdev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage =
1290 le16_to_cpu(pt->usLowCACLeakage);
1291 rdev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage =
1292 le16_to_cpu(pt->usHighCACLeakage);
1293 }
1294 }
1295
1296 return 0;
1297}
1298
1299void r600_free_extended_power_table(struct radeon_device *rdev)
1300{
1301 struct radeon_dpm_dynamic_state *dyn_state = &rdev->pm.dpm.dyn_state;
1302
1303 kfree(dyn_state->vddc_dependency_on_sclk.entries);
1304 kfree(dyn_state->vddci_dependency_on_mclk.entries);
1305 kfree(dyn_state->vddc_dependency_on_mclk.entries);
1306 kfree(dyn_state->mvdd_dependency_on_mclk.entries);
1307 kfree(dyn_state->cac_leakage_table.entries);
1308 kfree(dyn_state->phase_shedding_limits_table.entries);
1309 kfree(dyn_state->ppm_table);
1310 kfree(dyn_state->cac_tdp_table);
1311 kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
1312 kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
1313 kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
1314 kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
1315}
1316
1317enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev,
1318 u32 sys_mask,
1319 enum radeon_pcie_gen asic_gen,
1320 enum radeon_pcie_gen default_gen)
1321{
1322 switch (asic_gen) {
1323 case RADEON_PCIE_GEN1:
1324 return RADEON_PCIE_GEN1;
1325 case RADEON_PCIE_GEN2:
1326 return RADEON_PCIE_GEN2;
1327 case RADEON_PCIE_GEN3:
1328 return RADEON_PCIE_GEN3;
1329 default:
1330 if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == RADEON_PCIE_GEN3))
1331 return RADEON_PCIE_GEN3;
1332 else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == RADEON_PCIE_GEN2))
1333 return RADEON_PCIE_GEN2;
1334 else
1335 return RADEON_PCIE_GEN1;
1336 }
1337 return RADEON_PCIE_GEN1;
1338}
1339
1340u16 r600_get_pcie_lane_support(struct radeon_device *rdev,
1341 u16 asic_lanes,
1342 u16 default_lanes)
1343{
1344 switch (asic_lanes) {
1345 case 0:
1346 default:
1347 return default_lanes;
1348 case 1:
1349 return 1;
1350 case 2:
1351 return 2;
1352 case 4:
1353 return 4;
1354 case 8:
1355 return 8;
1356 case 12:
1357 return 12;
1358 case 16:
1359 return 16;
1360 }
1361}
1362
1363u8 r600_encode_pci_lane_width(u32 lanes)
1364{
1365 u8 encoded_lanes[] = { 0, 1, 2, 0, 3, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0, 0, 6 };
1366
1367 if (lanes > 16)
1368 return 0;
1369
1370 return encoded_lanes[lanes];
1371}