Loading...
1/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24
25#include "amdgpu.h"
26#include "amdgpu_atombios.h"
27#include "amdgpu_i2c.h"
28#include "amdgpu_dpm.h"
29#include "atom.h"
30#include "amd_pcie.h"
31
32void amdgpu_dpm_print_class_info(u32 class, u32 class2)
33{
34 const char *s;
35
36 switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
37 case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
38 default:
39 s = "none";
40 break;
41 case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
42 s = "battery";
43 break;
44 case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
45 s = "balanced";
46 break;
47 case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
48 s = "performance";
49 break;
50 }
51 printk("\tui class: %s\n", s);
52 printk("\tinternal class:");
53 if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
54 (class2 == 0))
55 pr_cont(" none");
56 else {
57 if (class & ATOM_PPLIB_CLASSIFICATION_BOOT)
58 pr_cont(" boot");
59 if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
60 pr_cont(" thermal");
61 if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
62 pr_cont(" limited_pwr");
63 if (class & ATOM_PPLIB_CLASSIFICATION_REST)
64 pr_cont(" rest");
65 if (class & ATOM_PPLIB_CLASSIFICATION_FORCED)
66 pr_cont(" forced");
67 if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
68 pr_cont(" 3d_perf");
69 if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE)
70 pr_cont(" ovrdrv");
71 if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
72 pr_cont(" uvd");
73 if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW)
74 pr_cont(" 3d_low");
75 if (class & ATOM_PPLIB_CLASSIFICATION_ACPI)
76 pr_cont(" acpi");
77 if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
78 pr_cont(" uvd_hd2");
79 if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
80 pr_cont(" uvd_hd");
81 if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
82 pr_cont(" uvd_sd");
83 if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
84 pr_cont(" limited_pwr2");
85 if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
86 pr_cont(" ulv");
87 if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
88 pr_cont(" uvd_mvc");
89 }
90 pr_cont("\n");
91}
92
93void amdgpu_dpm_print_cap_info(u32 caps)
94{
95 printk("\tcaps:");
96 if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
97 pr_cont(" single_disp");
98 if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK)
99 pr_cont(" video");
100 if (caps & ATOM_PPLIB_DISALLOW_ON_DC)
101 pr_cont(" no_dc");
102 pr_cont("\n");
103}
104
105void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
106 struct amdgpu_ps *rps)
107{
108 printk("\tstatus:");
109 if (rps == adev->pm.dpm.current_ps)
110 pr_cont(" c");
111 if (rps == adev->pm.dpm.requested_ps)
112 pr_cont(" r");
113 if (rps == adev->pm.dpm.boot_ps)
114 pr_cont(" b");
115 pr_cont("\n");
116}
117
118void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev)
119{
120 struct drm_device *ddev = adev->ddev;
121 struct drm_crtc *crtc;
122 struct amdgpu_crtc *amdgpu_crtc;
123
124 adev->pm.dpm.new_active_crtcs = 0;
125 adev->pm.dpm.new_active_crtc_count = 0;
126 if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
127 list_for_each_entry(crtc,
128 &ddev->mode_config.crtc_list, head) {
129 amdgpu_crtc = to_amdgpu_crtc(crtc);
130 if (amdgpu_crtc->enabled) {
131 adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id);
132 adev->pm.dpm.new_active_crtc_count++;
133 }
134 }
135 }
136}
137
138
139u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
140{
141 struct drm_device *dev = adev->ddev;
142 struct drm_crtc *crtc;
143 struct amdgpu_crtc *amdgpu_crtc;
144 u32 vblank_in_pixels;
145 u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
146
147 if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
148 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
149 amdgpu_crtc = to_amdgpu_crtc(crtc);
150 if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
151 vblank_in_pixels =
152 amdgpu_crtc->hw_mode.crtc_htotal *
153 (amdgpu_crtc->hw_mode.crtc_vblank_end -
154 amdgpu_crtc->hw_mode.crtc_vdisplay +
155 (amdgpu_crtc->v_border * 2));
156
157 vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock;
158 break;
159 }
160 }
161 }
162
163 return vblank_time_us;
164}
165
166u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev)
167{
168 struct drm_device *dev = adev->ddev;
169 struct drm_crtc *crtc;
170 struct amdgpu_crtc *amdgpu_crtc;
171 u32 vrefresh = 0;
172
173 if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
174 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
175 amdgpu_crtc = to_amdgpu_crtc(crtc);
176 if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
177 vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
178 break;
179 }
180 }
181 }
182
183 return vrefresh;
184}
185
186bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor)
187{
188 switch (sensor) {
189 case THERMAL_TYPE_RV6XX:
190 case THERMAL_TYPE_RV770:
191 case THERMAL_TYPE_EVERGREEN:
192 case THERMAL_TYPE_SUMO:
193 case THERMAL_TYPE_NI:
194 case THERMAL_TYPE_SI:
195 case THERMAL_TYPE_CI:
196 case THERMAL_TYPE_KV:
197 return true;
198 case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
199 case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
200 return false; /* need special handling */
201 case THERMAL_TYPE_NONE:
202 case THERMAL_TYPE_EXTERNAL:
203 case THERMAL_TYPE_EXTERNAL_GPIO:
204 default:
205 return false;
206 }
207}
208
209union power_info {
210 struct _ATOM_POWERPLAY_INFO info;
211 struct _ATOM_POWERPLAY_INFO_V2 info_2;
212 struct _ATOM_POWERPLAY_INFO_V3 info_3;
213 struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
214 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
215 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
216 struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
217 struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
218};
219
220union fan_info {
221 struct _ATOM_PPLIB_FANTABLE fan;
222 struct _ATOM_PPLIB_FANTABLE2 fan2;
223 struct _ATOM_PPLIB_FANTABLE3 fan3;
224};
225
226static int amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table *amdgpu_table,
227 ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table)
228{
229 u32 size = atom_table->ucNumEntries *
230 sizeof(struct amdgpu_clock_voltage_dependency_entry);
231 int i;
232 ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
233
234 amdgpu_table->entries = kzalloc(size, GFP_KERNEL);
235 if (!amdgpu_table->entries)
236 return -ENOMEM;
237
238 entry = &atom_table->entries[0];
239 for (i = 0; i < atom_table->ucNumEntries; i++) {
240 amdgpu_table->entries[i].clk = le16_to_cpu(entry->usClockLow) |
241 (entry->ucClockHigh << 16);
242 amdgpu_table->entries[i].v = le16_to_cpu(entry->usVoltage);
243 entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *)
244 ((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record));
245 }
246 amdgpu_table->count = atom_table->ucNumEntries;
247
248 return 0;
249}
250
251int amdgpu_get_platform_caps(struct amdgpu_device *adev)
252{
253 struct amdgpu_mode_info *mode_info = &adev->mode_info;
254 union power_info *power_info;
255 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
256 u16 data_offset;
257 u8 frev, crev;
258
259 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
260 &frev, &crev, &data_offset))
261 return -EINVAL;
262 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
263
264 adev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
265 adev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
266 adev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
267
268 return 0;
269}
270
271/* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
272#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
273#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
274#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
275#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
276#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
277#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
278#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8 24
279#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V9 26
280
281int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
282{
283 struct amdgpu_mode_info *mode_info = &adev->mode_info;
284 union power_info *power_info;
285 union fan_info *fan_info;
286 ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table;
287 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
288 u16 data_offset;
289 u8 frev, crev;
290 int ret, i;
291
292 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
293 &frev, &crev, &data_offset))
294 return -EINVAL;
295 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
296
297 /* fan table */
298 if (le16_to_cpu(power_info->pplib.usTableSize) >=
299 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
300 if (power_info->pplib3.usFanTableOffset) {
301 fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset +
302 le16_to_cpu(power_info->pplib3.usFanTableOffset));
303 adev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst;
304 adev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin);
305 adev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed);
306 adev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh);
307 adev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin);
308 adev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed);
309 adev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh);
310 if (fan_info->fan.ucFanTableFormat >= 2)
311 adev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax);
312 else
313 adev->pm.dpm.fan.t_max = 10900;
314 adev->pm.dpm.fan.cycle_delay = 100000;
315 if (fan_info->fan.ucFanTableFormat >= 3) {
316 adev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode;
317 adev->pm.dpm.fan.default_max_fan_pwm =
318 le16_to_cpu(fan_info->fan3.usFanPWMMax);
319 adev->pm.dpm.fan.default_fan_output_sensitivity = 4836;
320 adev->pm.dpm.fan.fan_output_sensitivity =
321 le16_to_cpu(fan_info->fan3.usFanOutputSensitivity);
322 }
323 adev->pm.dpm.fan.ucode_fan_control = true;
324 }
325 }
326
327 /* clock dependancy tables, shedding tables */
328 if (le16_to_cpu(power_info->pplib.usTableSize) >=
329 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) {
330 if (power_info->pplib4.usVddcDependencyOnSCLKOffset) {
331 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
332 (mode_info->atom_context->bios + data_offset +
333 le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
334 ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
335 dep_table);
336 if (ret) {
337 amdgpu_free_extended_power_table(adev);
338 return ret;
339 }
340 }
341 if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
342 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
343 (mode_info->atom_context->bios + data_offset +
344 le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
345 ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
346 dep_table);
347 if (ret) {
348 amdgpu_free_extended_power_table(adev);
349 return ret;
350 }
351 }
352 if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
353 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
354 (mode_info->atom_context->bios + data_offset +
355 le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
356 ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
357 dep_table);
358 if (ret) {
359 amdgpu_free_extended_power_table(adev);
360 return ret;
361 }
362 }
363 if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
364 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
365 (mode_info->atom_context->bios + data_offset +
366 le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
367 ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
368 dep_table);
369 if (ret) {
370 amdgpu_free_extended_power_table(adev);
371 return ret;
372 }
373 }
374 if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
375 ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
376 (ATOM_PPLIB_Clock_Voltage_Limit_Table *)
377 (mode_info->atom_context->bios + data_offset +
378 le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset));
379 if (clk_v->ucNumEntries) {
380 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk =
381 le16_to_cpu(clk_v->entries[0].usSclkLow) |
382 (clk_v->entries[0].ucSclkHigh << 16);
383 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk =
384 le16_to_cpu(clk_v->entries[0].usMclkLow) |
385 (clk_v->entries[0].ucMclkHigh << 16);
386 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc =
387 le16_to_cpu(clk_v->entries[0].usVddc);
388 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci =
389 le16_to_cpu(clk_v->entries[0].usVddci);
390 }
391 }
392 if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) {
393 ATOM_PPLIB_PhaseSheddingLimits_Table *psl =
394 (ATOM_PPLIB_PhaseSheddingLimits_Table *)
395 (mode_info->atom_context->bios + data_offset +
396 le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
397 ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
398
399 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
400 kcalloc(psl->ucNumEntries,
401 sizeof(struct amdgpu_phase_shedding_limits_entry),
402 GFP_KERNEL);
403 if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
404 amdgpu_free_extended_power_table(adev);
405 return -ENOMEM;
406 }
407
408 entry = &psl->entries[0];
409 for (i = 0; i < psl->ucNumEntries; i++) {
410 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
411 le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16);
412 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
413 le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16);
414 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
415 le16_to_cpu(entry->usVoltage);
416 entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *)
417 ((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record));
418 }
419 adev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
420 psl->ucNumEntries;
421 }
422 }
423
424 /* cac data */
425 if (le16_to_cpu(power_info->pplib.usTableSize) >=
426 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) {
427 adev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit);
428 adev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit);
429 adev->pm.dpm.near_tdp_limit_adjusted = adev->pm.dpm.near_tdp_limit;
430 adev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit);
431 if (adev->pm.dpm.tdp_od_limit)
432 adev->pm.dpm.power_control = true;
433 else
434 adev->pm.dpm.power_control = false;
435 adev->pm.dpm.tdp_adjustment = 0;
436 adev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold);
437 adev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage);
438 adev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope);
439 if (power_info->pplib5.usCACLeakageTableOffset) {
440 ATOM_PPLIB_CAC_Leakage_Table *cac_table =
441 (ATOM_PPLIB_CAC_Leakage_Table *)
442 (mode_info->atom_context->bios + data_offset +
443 le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
444 ATOM_PPLIB_CAC_Leakage_Record *entry;
445 u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table);
446 adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
447 if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
448 amdgpu_free_extended_power_table(adev);
449 return -ENOMEM;
450 }
451 entry = &cac_table->entries[0];
452 for (i = 0; i < cac_table->ucNumEntries; i++) {
453 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
454 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 =
455 le16_to_cpu(entry->usVddc1);
456 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 =
457 le16_to_cpu(entry->usVddc2);
458 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 =
459 le16_to_cpu(entry->usVddc3);
460 } else {
461 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
462 le16_to_cpu(entry->usVddc);
463 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
464 le32_to_cpu(entry->ulLeakageValue);
465 }
466 entry = (ATOM_PPLIB_CAC_Leakage_Record *)
467 ((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record));
468 }
469 adev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
470 }
471 }
472
473 /* ext tables */
474 if (le16_to_cpu(power_info->pplib.usTableSize) >=
475 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
476 ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
477 (mode_info->atom_context->bios + data_offset +
478 le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
479 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) &&
480 ext_hdr->usVCETableOffset) {
481 VCEClockInfoArray *array = (VCEClockInfoArray *)
482 (mode_info->atom_context->bios + data_offset +
483 le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
484 ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
485 (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
486 (mode_info->atom_context->bios + data_offset +
487 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
488 1 + array->ucNumEntries * sizeof(VCEClockInfo));
489 ATOM_PPLIB_VCE_State_Table *states =
490 (ATOM_PPLIB_VCE_State_Table *)
491 (mode_info->atom_context->bios + data_offset +
492 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
493 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) +
494 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)));
495 ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry;
496 ATOM_PPLIB_VCE_State_Record *state_entry;
497 VCEClockInfo *vce_clk;
498 u32 size = limits->numEntries *
499 sizeof(struct amdgpu_vce_clock_voltage_dependency_entry);
500 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
501 kzalloc(size, GFP_KERNEL);
502 if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
503 amdgpu_free_extended_power_table(adev);
504 return -ENOMEM;
505 }
506 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
507 limits->numEntries;
508 entry = &limits->entries[0];
509 state_entry = &states->entries[0];
510 for (i = 0; i < limits->numEntries; i++) {
511 vce_clk = (VCEClockInfo *)
512 ((u8 *)&array->entries[0] +
513 (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
514 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk =
515 le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
516 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk =
517 le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
518 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v =
519 le16_to_cpu(entry->usVoltage);
520 entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
521 ((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
522 }
523 adev->pm.dpm.num_of_vce_states =
524 states->numEntries > AMD_MAX_VCE_LEVELS ?
525 AMD_MAX_VCE_LEVELS : states->numEntries;
526 for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
527 vce_clk = (VCEClockInfo *)
528 ((u8 *)&array->entries[0] +
529 (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
530 adev->pm.dpm.vce_states[i].evclk =
531 le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
532 adev->pm.dpm.vce_states[i].ecclk =
533 le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
534 adev->pm.dpm.vce_states[i].clk_idx =
535 state_entry->ucClockInfoIndex & 0x3f;
536 adev->pm.dpm.vce_states[i].pstate =
537 (state_entry->ucClockInfoIndex & 0xc0) >> 6;
538 state_entry = (ATOM_PPLIB_VCE_State_Record *)
539 ((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record));
540 }
541 }
542 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) &&
543 ext_hdr->usUVDTableOffset) {
544 UVDClockInfoArray *array = (UVDClockInfoArray *)
545 (mode_info->atom_context->bios + data_offset +
546 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1);
547 ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits =
548 (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *)
549 (mode_info->atom_context->bios + data_offset +
550 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 +
551 1 + (array->ucNumEntries * sizeof (UVDClockInfo)));
552 ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry;
553 u32 size = limits->numEntries *
554 sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry);
555 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
556 kzalloc(size, GFP_KERNEL);
557 if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
558 amdgpu_free_extended_power_table(adev);
559 return -ENOMEM;
560 }
561 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
562 limits->numEntries;
563 entry = &limits->entries[0];
564 for (i = 0; i < limits->numEntries; i++) {
565 UVDClockInfo *uvd_clk = (UVDClockInfo *)
566 ((u8 *)&array->entries[0] +
567 (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo)));
568 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk =
569 le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16);
570 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
571 le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
572 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
573 le16_to_cpu(entry->usVoltage);
574 entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
575 ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
576 }
577 }
578 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) &&
579 ext_hdr->usSAMUTableOffset) {
580 ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits =
581 (ATOM_PPLIB_SAMClk_Voltage_Limit_Table *)
582 (mode_info->atom_context->bios + data_offset +
583 le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1);
584 ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry;
585 u32 size = limits->numEntries *
586 sizeof(struct amdgpu_clock_voltage_dependency_entry);
587 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
588 kzalloc(size, GFP_KERNEL);
589 if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
590 amdgpu_free_extended_power_table(adev);
591 return -ENOMEM;
592 }
593 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
594 limits->numEntries;
595 entry = &limits->entries[0];
596 for (i = 0; i < limits->numEntries; i++) {
597 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk =
598 le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16);
599 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v =
600 le16_to_cpu(entry->usVoltage);
601 entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *)
602 ((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record));
603 }
604 }
605 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
606 ext_hdr->usPPMTableOffset) {
607 ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
608 (mode_info->atom_context->bios + data_offset +
609 le16_to_cpu(ext_hdr->usPPMTableOffset));
610 adev->pm.dpm.dyn_state.ppm_table =
611 kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL);
612 if (!adev->pm.dpm.dyn_state.ppm_table) {
613 amdgpu_free_extended_power_table(adev);
614 return -ENOMEM;
615 }
616 adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
617 adev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
618 le16_to_cpu(ppm->usCpuCoreNumber);
619 adev->pm.dpm.dyn_state.ppm_table->platform_tdp =
620 le32_to_cpu(ppm->ulPlatformTDP);
621 adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp =
622 le32_to_cpu(ppm->ulSmallACPlatformTDP);
623 adev->pm.dpm.dyn_state.ppm_table->platform_tdc =
624 le32_to_cpu(ppm->ulPlatformTDC);
625 adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc =
626 le32_to_cpu(ppm->ulSmallACPlatformTDC);
627 adev->pm.dpm.dyn_state.ppm_table->apu_tdp =
628 le32_to_cpu(ppm->ulApuTDP);
629 adev->pm.dpm.dyn_state.ppm_table->dgpu_tdp =
630 le32_to_cpu(ppm->ulDGpuTDP);
631 adev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power =
632 le32_to_cpu(ppm->ulDGpuUlvPower);
633 adev->pm.dpm.dyn_state.ppm_table->tj_max =
634 le32_to_cpu(ppm->ulTjmax);
635 }
636 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) &&
637 ext_hdr->usACPTableOffset) {
638 ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits =
639 (ATOM_PPLIB_ACPClk_Voltage_Limit_Table *)
640 (mode_info->atom_context->bios + data_offset +
641 le16_to_cpu(ext_hdr->usACPTableOffset) + 1);
642 ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry;
643 u32 size = limits->numEntries *
644 sizeof(struct amdgpu_clock_voltage_dependency_entry);
645 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
646 kzalloc(size, GFP_KERNEL);
647 if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
648 amdgpu_free_extended_power_table(adev);
649 return -ENOMEM;
650 }
651 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
652 limits->numEntries;
653 entry = &limits->entries[0];
654 for (i = 0; i < limits->numEntries; i++) {
655 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk =
656 le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16);
657 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v =
658 le16_to_cpu(entry->usVoltage);
659 entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *)
660 ((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record));
661 }
662 }
663 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) &&
664 ext_hdr->usPowerTuneTableOffset) {
665 u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset +
666 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
667 ATOM_PowerTune_Table *pt;
668 adev->pm.dpm.dyn_state.cac_tdp_table =
669 kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL);
670 if (!adev->pm.dpm.dyn_state.cac_tdp_table) {
671 amdgpu_free_extended_power_table(adev);
672 return -ENOMEM;
673 }
674 if (rev > 0) {
675 ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
676 (mode_info->atom_context->bios + data_offset +
677 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
678 adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
679 ppt->usMaximumPowerDeliveryLimit;
680 pt = &ppt->power_tune_table;
681 } else {
682 ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
683 (mode_info->atom_context->bios + data_offset +
684 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
685 adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255;
686 pt = &ppt->power_tune_table;
687 }
688 adev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP);
689 adev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp =
690 le16_to_cpu(pt->usConfigurableTDP);
691 adev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC);
692 adev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit =
693 le16_to_cpu(pt->usBatteryPowerLimit);
694 adev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit =
695 le16_to_cpu(pt->usSmallPowerLimit);
696 adev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage =
697 le16_to_cpu(pt->usLowCACLeakage);
698 adev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage =
699 le16_to_cpu(pt->usHighCACLeakage);
700 }
701 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8) &&
702 ext_hdr->usSclkVddgfxTableOffset) {
703 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
704 (mode_info->atom_context->bios + data_offset +
705 le16_to_cpu(ext_hdr->usSclkVddgfxTableOffset));
706 ret = amdgpu_parse_clk_voltage_dep_table(
707 &adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk,
708 dep_table);
709 if (ret) {
710 kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries);
711 return ret;
712 }
713 }
714 }
715
716 return 0;
717}
718
719void amdgpu_free_extended_power_table(struct amdgpu_device *adev)
720{
721 struct amdgpu_dpm_dynamic_state *dyn_state = &adev->pm.dpm.dyn_state;
722
723 kfree(dyn_state->vddc_dependency_on_sclk.entries);
724 kfree(dyn_state->vddci_dependency_on_mclk.entries);
725 kfree(dyn_state->vddc_dependency_on_mclk.entries);
726 kfree(dyn_state->mvdd_dependency_on_mclk.entries);
727 kfree(dyn_state->cac_leakage_table.entries);
728 kfree(dyn_state->phase_shedding_limits_table.entries);
729 kfree(dyn_state->ppm_table);
730 kfree(dyn_state->cac_tdp_table);
731 kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
732 kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
733 kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
734 kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
735 kfree(dyn_state->vddgfx_dependency_on_sclk.entries);
736}
737
738static const char *pp_lib_thermal_controller_names[] = {
739 "NONE",
740 "lm63",
741 "adm1032",
742 "adm1030",
743 "max6649",
744 "lm64",
745 "f75375",
746 "RV6xx",
747 "RV770",
748 "adt7473",
749 "NONE",
750 "External GPIO",
751 "Evergreen",
752 "emc2103",
753 "Sumo",
754 "Northern Islands",
755 "Southern Islands",
756 "lm96163",
757 "Sea Islands",
758 "Kaveri/Kabini",
759};
760
761void amdgpu_add_thermal_controller(struct amdgpu_device *adev)
762{
763 struct amdgpu_mode_info *mode_info = &adev->mode_info;
764 ATOM_PPLIB_POWERPLAYTABLE *power_table;
765 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
766 ATOM_PPLIB_THERMALCONTROLLER *controller;
767 struct amdgpu_i2c_bus_rec i2c_bus;
768 u16 data_offset;
769 u8 frev, crev;
770
771 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
772 &frev, &crev, &data_offset))
773 return;
774 power_table = (ATOM_PPLIB_POWERPLAYTABLE *)
775 (mode_info->atom_context->bios + data_offset);
776 controller = &power_table->sThermalController;
777
778 /* add the i2c bus for thermal/fan chip */
779 if (controller->ucType > 0) {
780 if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN)
781 adev->pm.no_fan = true;
782 adev->pm.fan_pulses_per_revolution =
783 controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
784 if (adev->pm.fan_pulses_per_revolution) {
785 adev->pm.fan_min_rpm = controller->ucFanMinRPM;
786 adev->pm.fan_max_rpm = controller->ucFanMaxRPM;
787 }
788 if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
789 DRM_INFO("Internal thermal controller %s fan control\n",
790 (controller->ucFanParameters &
791 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
792 adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
793 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
794 DRM_INFO("Internal thermal controller %s fan control\n",
795 (controller->ucFanParameters &
796 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
797 adev->pm.int_thermal_type = THERMAL_TYPE_RV770;
798 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
799 DRM_INFO("Internal thermal controller %s fan control\n",
800 (controller->ucFanParameters &
801 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
802 adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
803 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) {
804 DRM_INFO("Internal thermal controller %s fan control\n",
805 (controller->ucFanParameters &
806 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
807 adev->pm.int_thermal_type = THERMAL_TYPE_SUMO;
808 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) {
809 DRM_INFO("Internal thermal controller %s fan control\n",
810 (controller->ucFanParameters &
811 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
812 adev->pm.int_thermal_type = THERMAL_TYPE_NI;
813 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) {
814 DRM_INFO("Internal thermal controller %s fan control\n",
815 (controller->ucFanParameters &
816 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
817 adev->pm.int_thermal_type = THERMAL_TYPE_SI;
818 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) {
819 DRM_INFO("Internal thermal controller %s fan control\n",
820 (controller->ucFanParameters &
821 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
822 adev->pm.int_thermal_type = THERMAL_TYPE_CI;
823 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) {
824 DRM_INFO("Internal thermal controller %s fan control\n",
825 (controller->ucFanParameters &
826 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
827 adev->pm.int_thermal_type = THERMAL_TYPE_KV;
828 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
829 DRM_INFO("External GPIO thermal controller %s fan control\n",
830 (controller->ucFanParameters &
831 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
832 adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
833 } else if (controller->ucType ==
834 ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
835 DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
836 (controller->ucFanParameters &
837 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
838 adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
839 } else if (controller->ucType ==
840 ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
841 DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
842 (controller->ucFanParameters &
843 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
844 adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
845 } else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
846 DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
847 pp_lib_thermal_controller_names[controller->ucType],
848 controller->ucI2cAddress >> 1,
849 (controller->ucFanParameters &
850 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
851 adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL;
852 i2c_bus = amdgpu_atombios_lookup_i2c_gpio(adev, controller->ucI2cLine);
853 adev->pm.i2c_bus = amdgpu_i2c_lookup(adev, &i2c_bus);
854 if (adev->pm.i2c_bus) {
855 struct i2c_board_info info = { };
856 const char *name = pp_lib_thermal_controller_names[controller->ucType];
857 info.addr = controller->ucI2cAddress >> 1;
858 strlcpy(info.type, name, sizeof(info.type));
859 i2c_new_client_device(&adev->pm.i2c_bus->adapter, &info);
860 }
861 } else {
862 DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
863 controller->ucType,
864 controller->ucI2cAddress >> 1,
865 (controller->ucFanParameters &
866 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
867 }
868 }
869}
870
871enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
872 u32 sys_mask,
873 enum amdgpu_pcie_gen asic_gen,
874 enum amdgpu_pcie_gen default_gen)
875{
876 switch (asic_gen) {
877 case AMDGPU_PCIE_GEN1:
878 return AMDGPU_PCIE_GEN1;
879 case AMDGPU_PCIE_GEN2:
880 return AMDGPU_PCIE_GEN2;
881 case AMDGPU_PCIE_GEN3:
882 return AMDGPU_PCIE_GEN3;
883 default:
884 if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) &&
885 (default_gen == AMDGPU_PCIE_GEN3))
886 return AMDGPU_PCIE_GEN3;
887 else if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) &&
888 (default_gen == AMDGPU_PCIE_GEN2))
889 return AMDGPU_PCIE_GEN2;
890 else
891 return AMDGPU_PCIE_GEN1;
892 }
893 return AMDGPU_PCIE_GEN1;
894}
895
896struct amd_vce_state*
897amdgpu_get_vce_clock_state(void *handle, u32 idx)
898{
899 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
900
901 if (idx < adev->pm.dpm.num_of_vce_states)
902 return &adev->pm.dpm.vce_states[idx];
903
904 return NULL;
905}
906
907int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
908{
909 uint32_t clk_freq;
910 int ret = 0;
911 if (is_support_sw_smu(adev)) {
912 ret = smu_get_dpm_freq_range(&adev->smu, SMU_GFXCLK,
913 low ? &clk_freq : NULL,
914 !low ? &clk_freq : NULL);
915 if (ret)
916 return 0;
917 return clk_freq * 100;
918
919 } else {
920 return (adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (low));
921 }
922}
923
924int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
925{
926 uint32_t clk_freq;
927 int ret = 0;
928 if (is_support_sw_smu(adev)) {
929 ret = smu_get_dpm_freq_range(&adev->smu, SMU_UCLK,
930 low ? &clk_freq : NULL,
931 !low ? &clk_freq : NULL);
932 if (ret)
933 return 0;
934 return clk_freq * 100;
935
936 } else {
937 return (adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (low));
938 }
939}
940
941int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate)
942{
943 int ret = 0;
944 bool swsmu = is_support_sw_smu(adev);
945
946 switch (block_type) {
947 case AMD_IP_BLOCK_TYPE_UVD:
948 case AMD_IP_BLOCK_TYPE_VCE:
949 if (swsmu) {
950 ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate);
951 } else if (adev->powerplay.pp_funcs &&
952 adev->powerplay.pp_funcs->set_powergating_by_smu) {
953 /*
954 * TODO: need a better lock mechanism
955 *
956 * Here adev->pm.mutex lock protection is enforced on
957 * UVD and VCE cases only. Since for other cases, there
958 * may be already lock protection in amdgpu_pm.c.
959 * This is a quick fix for the deadlock issue below.
960 * NFO: task ocltst:2028 blocked for more than 120 seconds.
961 * Tainted: G OE 5.0.0-37-generic #40~18.04.1-Ubuntu
962 * echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
963 * cltst D 0 2028 2026 0x00000000
964 * all Trace:
965 * __schedule+0x2c0/0x870
966 * schedule+0x2c/0x70
967 * schedule_preempt_disabled+0xe/0x10
968 * __mutex_lock.isra.9+0x26d/0x4e0
969 * __mutex_lock_slowpath+0x13/0x20
970 * ? __mutex_lock_slowpath+0x13/0x20
971 * mutex_lock+0x2f/0x40
972 * amdgpu_dpm_set_powergating_by_smu+0x64/0xe0 [amdgpu]
973 * gfx_v8_0_enable_gfx_static_mg_power_gating+0x3c/0x70 [amdgpu]
974 * gfx_v8_0_set_powergating_state+0x66/0x260 [amdgpu]
975 * amdgpu_device_ip_set_powergating_state+0x62/0xb0 [amdgpu]
976 * pp_dpm_force_performance_level+0xe7/0x100 [amdgpu]
977 * amdgpu_set_dpm_forced_performance_level+0x129/0x330 [amdgpu]
978 */
979 mutex_lock(&adev->pm.mutex);
980 ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu(
981 (adev)->powerplay.pp_handle, block_type, gate));
982 mutex_unlock(&adev->pm.mutex);
983 }
984 break;
985 case AMD_IP_BLOCK_TYPE_GFX:
986 case AMD_IP_BLOCK_TYPE_VCN:
987 case AMD_IP_BLOCK_TYPE_SDMA:
988 if (swsmu)
989 ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate);
990 else if (adev->powerplay.pp_funcs &&
991 adev->powerplay.pp_funcs->set_powergating_by_smu)
992 ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu(
993 (adev)->powerplay.pp_handle, block_type, gate));
994 break;
995 case AMD_IP_BLOCK_TYPE_JPEG:
996 if (swsmu)
997 ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate);
998 break;
999 case AMD_IP_BLOCK_TYPE_GMC:
1000 case AMD_IP_BLOCK_TYPE_ACP:
1001 if (adev->powerplay.pp_funcs &&
1002 adev->powerplay.pp_funcs->set_powergating_by_smu)
1003 ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu(
1004 (adev)->powerplay.pp_handle, block_type, gate));
1005 break;
1006 default:
1007 break;
1008 }
1009
1010 return ret;
1011}
1012
1013int amdgpu_dpm_baco_enter(struct amdgpu_device *adev)
1014{
1015 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1016 void *pp_handle = adev->powerplay.pp_handle;
1017 struct smu_context *smu = &adev->smu;
1018 int ret = 0;
1019
1020 if (is_support_sw_smu(adev)) {
1021 ret = smu_baco_enter(smu);
1022 } else {
1023 if (!pp_funcs || !pp_funcs->set_asic_baco_state)
1024 return -ENOENT;
1025
1026 /* enter BACO state */
1027 ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
1028 }
1029
1030 return ret;
1031}
1032
1033int amdgpu_dpm_baco_exit(struct amdgpu_device *adev)
1034{
1035 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1036 void *pp_handle = adev->powerplay.pp_handle;
1037 struct smu_context *smu = &adev->smu;
1038 int ret = 0;
1039
1040 if (is_support_sw_smu(adev)) {
1041 ret = smu_baco_exit(smu);
1042 } else {
1043 if (!pp_funcs || !pp_funcs->set_asic_baco_state)
1044 return -ENOENT;
1045
1046 /* exit BACO state */
1047 ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
1048 }
1049
1050 return ret;
1051}
1052
1053int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
1054 enum pp_mp1_state mp1_state)
1055{
1056 int ret = 0;
1057
1058 if (is_support_sw_smu(adev)) {
1059 ret = smu_set_mp1_state(&adev->smu, mp1_state);
1060 } else if (adev->powerplay.pp_funcs &&
1061 adev->powerplay.pp_funcs->set_mp1_state) {
1062 ret = adev->powerplay.pp_funcs->set_mp1_state(
1063 adev->powerplay.pp_handle,
1064 mp1_state);
1065 }
1066
1067 return ret;
1068}
1069
1070bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
1071{
1072 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1073 void *pp_handle = adev->powerplay.pp_handle;
1074 struct smu_context *smu = &adev->smu;
1075 bool baco_cap;
1076
1077 if (is_support_sw_smu(adev)) {
1078 return smu_baco_is_support(smu);
1079 } else {
1080 if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
1081 return false;
1082
1083 if (pp_funcs->get_asic_baco_capability(pp_handle, &baco_cap))
1084 return false;
1085
1086 return baco_cap ? true : false;
1087 }
1088}
1089
1090int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev)
1091{
1092 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1093 void *pp_handle = adev->powerplay.pp_handle;
1094 struct smu_context *smu = &adev->smu;
1095
1096 if (is_support_sw_smu(adev)) {
1097 return smu_mode2_reset(smu);
1098 } else {
1099 if (!pp_funcs || !pp_funcs->asic_reset_mode_2)
1100 return -ENOENT;
1101
1102 return pp_funcs->asic_reset_mode_2(pp_handle);
1103 }
1104}
1105
1106int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
1107{
1108 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1109 void *pp_handle = adev->powerplay.pp_handle;
1110 struct smu_context *smu = &adev->smu;
1111 int ret = 0;
1112
1113 dev_info(adev->dev, "GPU BACO reset\n");
1114
1115 if (is_support_sw_smu(adev)) {
1116 ret = smu_baco_enter(smu);
1117 if (ret)
1118 return ret;
1119
1120 ret = smu_baco_exit(smu);
1121 if (ret)
1122 return ret;
1123 } else {
1124 if (!pp_funcs
1125 || !pp_funcs->set_asic_baco_state)
1126 return -ENOENT;
1127
1128 /* enter BACO state */
1129 ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
1130 if (ret)
1131 return ret;
1132
1133 /* exit BACO state */
1134 ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
1135 if (ret)
1136 return ret;
1137 }
1138
1139 return 0;
1140}
1141
1142bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev)
1143{
1144 struct smu_context *smu = &adev->smu;
1145
1146 if (is_support_sw_smu(adev))
1147 return smu_mode1_reset_is_support(smu);
1148
1149 return false;
1150}
1151
1152int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev)
1153{
1154 struct smu_context *smu = &adev->smu;
1155
1156 if (is_support_sw_smu(adev))
1157 return smu_mode1_reset(smu);
1158
1159 return -EOPNOTSUPP;
1160}
1161
1162int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
1163 enum PP_SMC_POWER_PROFILE type,
1164 bool en)
1165{
1166 int ret = 0;
1167
1168 if (is_support_sw_smu(adev))
1169 ret = smu_switch_power_profile(&adev->smu, type, en);
1170 else if (adev->powerplay.pp_funcs &&
1171 adev->powerplay.pp_funcs->switch_power_profile)
1172 ret = adev->powerplay.pp_funcs->switch_power_profile(
1173 adev->powerplay.pp_handle, type, en);
1174
1175 return ret;
1176}
1177
1178int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
1179 uint32_t pstate)
1180{
1181 int ret = 0;
1182
1183 if (is_support_sw_smu(adev))
1184 ret = smu_set_xgmi_pstate(&adev->smu, pstate);
1185 else if (adev->powerplay.pp_funcs &&
1186 adev->powerplay.pp_funcs->set_xgmi_pstate)
1187 ret = adev->powerplay.pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
1188 pstate);
1189
1190 return ret;
1191}
1192
1193int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
1194 uint32_t cstate)
1195{
1196 int ret = 0;
1197 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1198 void *pp_handle = adev->powerplay.pp_handle;
1199 struct smu_context *smu = &adev->smu;
1200
1201 if (is_support_sw_smu(adev))
1202 ret = smu_set_df_cstate(smu, cstate);
1203 else if (pp_funcs &&
1204 pp_funcs->set_df_cstate)
1205 ret = pp_funcs->set_df_cstate(pp_handle, cstate);
1206
1207 return ret;
1208}
1209
1210int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en)
1211{
1212 struct smu_context *smu = &adev->smu;
1213
1214 if (is_support_sw_smu(adev))
1215 return smu_allow_xgmi_power_down(smu, en);
1216
1217 return 0;
1218}
1/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24
25#include "drmP.h"
26#include "amdgpu.h"
27#include "amdgpu_atombios.h"
28#include "amdgpu_i2c.h"
29#include "amdgpu_dpm.h"
30#include "atom.h"
31
32void amdgpu_dpm_print_class_info(u32 class, u32 class2)
33{
34 printk("\tui class: ");
35 switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
36 case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
37 default:
38 printk("none\n");
39 break;
40 case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
41 printk("battery\n");
42 break;
43 case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
44 printk("balanced\n");
45 break;
46 case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
47 printk("performance\n");
48 break;
49 }
50 printk("\tinternal class: ");
51 if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
52 (class2 == 0))
53 printk("none");
54 else {
55 if (class & ATOM_PPLIB_CLASSIFICATION_BOOT)
56 printk("boot ");
57 if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
58 printk("thermal ");
59 if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
60 printk("limited_pwr ");
61 if (class & ATOM_PPLIB_CLASSIFICATION_REST)
62 printk("rest ");
63 if (class & ATOM_PPLIB_CLASSIFICATION_FORCED)
64 printk("forced ");
65 if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
66 printk("3d_perf ");
67 if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE)
68 printk("ovrdrv ");
69 if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
70 printk("uvd ");
71 if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW)
72 printk("3d_low ");
73 if (class & ATOM_PPLIB_CLASSIFICATION_ACPI)
74 printk("acpi ");
75 if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
76 printk("uvd_hd2 ");
77 if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
78 printk("uvd_hd ");
79 if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
80 printk("uvd_sd ");
81 if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
82 printk("limited_pwr2 ");
83 if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
84 printk("ulv ");
85 if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
86 printk("uvd_mvc ");
87 }
88 printk("\n");
89}
90
91void amdgpu_dpm_print_cap_info(u32 caps)
92{
93 printk("\tcaps: ");
94 if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
95 printk("single_disp ");
96 if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK)
97 printk("video ");
98 if (caps & ATOM_PPLIB_DISALLOW_ON_DC)
99 printk("no_dc ");
100 printk("\n");
101}
102
103void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
104 struct amdgpu_ps *rps)
105{
106 printk("\tstatus: ");
107 if (rps == adev->pm.dpm.current_ps)
108 printk("c ");
109 if (rps == adev->pm.dpm.requested_ps)
110 printk("r ");
111 if (rps == adev->pm.dpm.boot_ps)
112 printk("b ");
113 printk("\n");
114}
115
116u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
117{
118 struct drm_device *dev = adev->ddev;
119 struct drm_crtc *crtc;
120 struct amdgpu_crtc *amdgpu_crtc;
121 u32 line_time_us, vblank_lines;
122 u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
123
124 if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
125 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
126 amdgpu_crtc = to_amdgpu_crtc(crtc);
127 if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
128 line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) /
129 amdgpu_crtc->hw_mode.clock;
130 vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end -
131 amdgpu_crtc->hw_mode.crtc_vdisplay +
132 (amdgpu_crtc->v_border * 2);
133 vblank_time_us = vblank_lines * line_time_us;
134 break;
135 }
136 }
137 }
138
139 return vblank_time_us;
140}
141
142u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev)
143{
144 struct drm_device *dev = adev->ddev;
145 struct drm_crtc *crtc;
146 struct amdgpu_crtc *amdgpu_crtc;
147 u32 vrefresh = 0;
148
149 if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
150 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
151 amdgpu_crtc = to_amdgpu_crtc(crtc);
152 if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
153 vrefresh = amdgpu_crtc->hw_mode.vrefresh;
154 break;
155 }
156 }
157 }
158
159 return vrefresh;
160}
161
162void amdgpu_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
163 u32 *p, u32 *u)
164{
165 u32 b_c = 0;
166 u32 i_c;
167 u32 tmp;
168
169 i_c = (i * r_c) / 100;
170 tmp = i_c >> p_b;
171
172 while (tmp) {
173 b_c++;
174 tmp >>= 1;
175 }
176
177 *u = (b_c + 1) / 2;
178 *p = i_c / (1 << (2 * (*u)));
179}
180
181int amdgpu_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th)
182{
183 u32 k, a, ah, al;
184 u32 t1;
185
186 if ((fl == 0) || (fh == 0) || (fl > fh))
187 return -EINVAL;
188
189 k = (100 * fh) / fl;
190 t1 = (t * (k - 100));
191 a = (1000 * (100 * h + t1)) / (10000 + (t1 / 100));
192 a = (a + 5) / 10;
193 ah = ((a * t) + 5000) / 10000;
194 al = a - ah;
195
196 *th = t - ah;
197 *tl = t + al;
198
199 return 0;
200}
201
202bool amdgpu_is_uvd_state(u32 class, u32 class2)
203{
204 if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
205 return true;
206 if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
207 return true;
208 if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
209 return true;
210 if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
211 return true;
212 if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
213 return true;
214 return false;
215}
216
217bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor)
218{
219 switch (sensor) {
220 case THERMAL_TYPE_RV6XX:
221 case THERMAL_TYPE_RV770:
222 case THERMAL_TYPE_EVERGREEN:
223 case THERMAL_TYPE_SUMO:
224 case THERMAL_TYPE_NI:
225 case THERMAL_TYPE_SI:
226 case THERMAL_TYPE_CI:
227 case THERMAL_TYPE_KV:
228 return true;
229 case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
230 case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
231 return false; /* need special handling */
232 case THERMAL_TYPE_NONE:
233 case THERMAL_TYPE_EXTERNAL:
234 case THERMAL_TYPE_EXTERNAL_GPIO:
235 default:
236 return false;
237 }
238}
239
240union power_info {
241 struct _ATOM_POWERPLAY_INFO info;
242 struct _ATOM_POWERPLAY_INFO_V2 info_2;
243 struct _ATOM_POWERPLAY_INFO_V3 info_3;
244 struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
245 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
246 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
247 struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
248 struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
249};
250
251union fan_info {
252 struct _ATOM_PPLIB_FANTABLE fan;
253 struct _ATOM_PPLIB_FANTABLE2 fan2;
254 struct _ATOM_PPLIB_FANTABLE3 fan3;
255};
256
257static int amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table *amdgpu_table,
258 ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table)
259{
260 u32 size = atom_table->ucNumEntries *
261 sizeof(struct amdgpu_clock_voltage_dependency_entry);
262 int i;
263 ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
264
265 amdgpu_table->entries = kzalloc(size, GFP_KERNEL);
266 if (!amdgpu_table->entries)
267 return -ENOMEM;
268
269 entry = &atom_table->entries[0];
270 for (i = 0; i < atom_table->ucNumEntries; i++) {
271 amdgpu_table->entries[i].clk = le16_to_cpu(entry->usClockLow) |
272 (entry->ucClockHigh << 16);
273 amdgpu_table->entries[i].v = le16_to_cpu(entry->usVoltage);
274 entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *)
275 ((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record));
276 }
277 amdgpu_table->count = atom_table->ucNumEntries;
278
279 return 0;
280}
281
282int amdgpu_get_platform_caps(struct amdgpu_device *adev)
283{
284 struct amdgpu_mode_info *mode_info = &adev->mode_info;
285 union power_info *power_info;
286 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
287 u16 data_offset;
288 u8 frev, crev;
289
290 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
291 &frev, &crev, &data_offset))
292 return -EINVAL;
293 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
294
295 adev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
296 adev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
297 adev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
298
299 return 0;
300}
301
302/* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
303#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
304#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
305#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
306#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
307#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
308#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
309#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8 24
310#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V9 26
311
312int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
313{
314 struct amdgpu_mode_info *mode_info = &adev->mode_info;
315 union power_info *power_info;
316 union fan_info *fan_info;
317 ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table;
318 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
319 u16 data_offset;
320 u8 frev, crev;
321 int ret, i;
322
323 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
324 &frev, &crev, &data_offset))
325 return -EINVAL;
326 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
327
328 /* fan table */
329 if (le16_to_cpu(power_info->pplib.usTableSize) >=
330 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
331 if (power_info->pplib3.usFanTableOffset) {
332 fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset +
333 le16_to_cpu(power_info->pplib3.usFanTableOffset));
334 adev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst;
335 adev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin);
336 adev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed);
337 adev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh);
338 adev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin);
339 adev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed);
340 adev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh);
341 if (fan_info->fan.ucFanTableFormat >= 2)
342 adev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax);
343 else
344 adev->pm.dpm.fan.t_max = 10900;
345 adev->pm.dpm.fan.cycle_delay = 100000;
346 if (fan_info->fan.ucFanTableFormat >= 3) {
347 adev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode;
348 adev->pm.dpm.fan.default_max_fan_pwm =
349 le16_to_cpu(fan_info->fan3.usFanPWMMax);
350 adev->pm.dpm.fan.default_fan_output_sensitivity = 4836;
351 adev->pm.dpm.fan.fan_output_sensitivity =
352 le16_to_cpu(fan_info->fan3.usFanOutputSensitivity);
353 }
354 adev->pm.dpm.fan.ucode_fan_control = true;
355 }
356 }
357
358 /* clock dependancy tables, shedding tables */
359 if (le16_to_cpu(power_info->pplib.usTableSize) >=
360 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) {
361 if (power_info->pplib4.usVddcDependencyOnSCLKOffset) {
362 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
363 (mode_info->atom_context->bios + data_offset +
364 le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
365 ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
366 dep_table);
367 if (ret) {
368 amdgpu_free_extended_power_table(adev);
369 return ret;
370 }
371 }
372 if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
373 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
374 (mode_info->atom_context->bios + data_offset +
375 le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
376 ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
377 dep_table);
378 if (ret) {
379 amdgpu_free_extended_power_table(adev);
380 return ret;
381 }
382 }
383 if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
384 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
385 (mode_info->atom_context->bios + data_offset +
386 le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
387 ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
388 dep_table);
389 if (ret) {
390 amdgpu_free_extended_power_table(adev);
391 return ret;
392 }
393 }
394 if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
395 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
396 (mode_info->atom_context->bios + data_offset +
397 le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
398 ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
399 dep_table);
400 if (ret) {
401 amdgpu_free_extended_power_table(adev);
402 return ret;
403 }
404 }
405 if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
406 ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
407 (ATOM_PPLIB_Clock_Voltage_Limit_Table *)
408 (mode_info->atom_context->bios + data_offset +
409 le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset));
410 if (clk_v->ucNumEntries) {
411 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk =
412 le16_to_cpu(clk_v->entries[0].usSclkLow) |
413 (clk_v->entries[0].ucSclkHigh << 16);
414 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk =
415 le16_to_cpu(clk_v->entries[0].usMclkLow) |
416 (clk_v->entries[0].ucMclkHigh << 16);
417 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc =
418 le16_to_cpu(clk_v->entries[0].usVddc);
419 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci =
420 le16_to_cpu(clk_v->entries[0].usVddci);
421 }
422 }
423 if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) {
424 ATOM_PPLIB_PhaseSheddingLimits_Table *psl =
425 (ATOM_PPLIB_PhaseSheddingLimits_Table *)
426 (mode_info->atom_context->bios + data_offset +
427 le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
428 ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
429
430 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
431 kzalloc(psl->ucNumEntries *
432 sizeof(struct amdgpu_phase_shedding_limits_entry),
433 GFP_KERNEL);
434 if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
435 amdgpu_free_extended_power_table(adev);
436 return -ENOMEM;
437 }
438
439 entry = &psl->entries[0];
440 for (i = 0; i < psl->ucNumEntries; i++) {
441 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
442 le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16);
443 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
444 le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16);
445 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
446 le16_to_cpu(entry->usVoltage);
447 entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *)
448 ((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record));
449 }
450 adev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
451 psl->ucNumEntries;
452 }
453 }
454
455 /* cac data */
456 if (le16_to_cpu(power_info->pplib.usTableSize) >=
457 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) {
458 adev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit);
459 adev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit);
460 adev->pm.dpm.near_tdp_limit_adjusted = adev->pm.dpm.near_tdp_limit;
461 adev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit);
462 if (adev->pm.dpm.tdp_od_limit)
463 adev->pm.dpm.power_control = true;
464 else
465 adev->pm.dpm.power_control = false;
466 adev->pm.dpm.tdp_adjustment = 0;
467 adev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold);
468 adev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage);
469 adev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope);
470 if (power_info->pplib5.usCACLeakageTableOffset) {
471 ATOM_PPLIB_CAC_Leakage_Table *cac_table =
472 (ATOM_PPLIB_CAC_Leakage_Table *)
473 (mode_info->atom_context->bios + data_offset +
474 le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
475 ATOM_PPLIB_CAC_Leakage_Record *entry;
476 u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table);
477 adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
478 if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
479 amdgpu_free_extended_power_table(adev);
480 return -ENOMEM;
481 }
482 entry = &cac_table->entries[0];
483 for (i = 0; i < cac_table->ucNumEntries; i++) {
484 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
485 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 =
486 le16_to_cpu(entry->usVddc1);
487 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 =
488 le16_to_cpu(entry->usVddc2);
489 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 =
490 le16_to_cpu(entry->usVddc3);
491 } else {
492 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
493 le16_to_cpu(entry->usVddc);
494 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
495 le32_to_cpu(entry->ulLeakageValue);
496 }
497 entry = (ATOM_PPLIB_CAC_Leakage_Record *)
498 ((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record));
499 }
500 adev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
501 }
502 }
503
504 /* ext tables */
505 if (le16_to_cpu(power_info->pplib.usTableSize) >=
506 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
507 ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
508 (mode_info->atom_context->bios + data_offset +
509 le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
510 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) &&
511 ext_hdr->usVCETableOffset) {
512 VCEClockInfoArray *array = (VCEClockInfoArray *)
513 (mode_info->atom_context->bios + data_offset +
514 le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
515 ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
516 (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
517 (mode_info->atom_context->bios + data_offset +
518 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
519 1 + array->ucNumEntries * sizeof(VCEClockInfo));
520 ATOM_PPLIB_VCE_State_Table *states =
521 (ATOM_PPLIB_VCE_State_Table *)
522 (mode_info->atom_context->bios + data_offset +
523 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
524 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) +
525 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)));
526 ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry;
527 ATOM_PPLIB_VCE_State_Record *state_entry;
528 VCEClockInfo *vce_clk;
529 u32 size = limits->numEntries *
530 sizeof(struct amdgpu_vce_clock_voltage_dependency_entry);
531 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
532 kzalloc(size, GFP_KERNEL);
533 if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
534 amdgpu_free_extended_power_table(adev);
535 return -ENOMEM;
536 }
537 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
538 limits->numEntries;
539 entry = &limits->entries[0];
540 state_entry = &states->entries[0];
541 for (i = 0; i < limits->numEntries; i++) {
542 vce_clk = (VCEClockInfo *)
543 ((u8 *)&array->entries[0] +
544 (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
545 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk =
546 le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
547 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk =
548 le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
549 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v =
550 le16_to_cpu(entry->usVoltage);
551 entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
552 ((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
553 }
554 for (i = 0; i < states->numEntries; i++) {
555 if (i >= AMDGPU_MAX_VCE_LEVELS)
556 break;
557 vce_clk = (VCEClockInfo *)
558 ((u8 *)&array->entries[0] +
559 (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
560 adev->pm.dpm.vce_states[i].evclk =
561 le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
562 adev->pm.dpm.vce_states[i].ecclk =
563 le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
564 adev->pm.dpm.vce_states[i].clk_idx =
565 state_entry->ucClockInfoIndex & 0x3f;
566 adev->pm.dpm.vce_states[i].pstate =
567 (state_entry->ucClockInfoIndex & 0xc0) >> 6;
568 state_entry = (ATOM_PPLIB_VCE_State_Record *)
569 ((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record));
570 }
571 }
572 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) &&
573 ext_hdr->usUVDTableOffset) {
574 UVDClockInfoArray *array = (UVDClockInfoArray *)
575 (mode_info->atom_context->bios + data_offset +
576 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1);
577 ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits =
578 (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *)
579 (mode_info->atom_context->bios + data_offset +
580 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 +
581 1 + (array->ucNumEntries * sizeof (UVDClockInfo)));
582 ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry;
583 u32 size = limits->numEntries *
584 sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry);
585 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
586 kzalloc(size, GFP_KERNEL);
587 if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
588 amdgpu_free_extended_power_table(adev);
589 return -ENOMEM;
590 }
591 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
592 limits->numEntries;
593 entry = &limits->entries[0];
594 for (i = 0; i < limits->numEntries; i++) {
595 UVDClockInfo *uvd_clk = (UVDClockInfo *)
596 ((u8 *)&array->entries[0] +
597 (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo)));
598 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk =
599 le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16);
600 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
601 le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
602 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
603 le16_to_cpu(entry->usVoltage);
604 entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
605 ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
606 }
607 }
608 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) &&
609 ext_hdr->usSAMUTableOffset) {
610 ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits =
611 (ATOM_PPLIB_SAMClk_Voltage_Limit_Table *)
612 (mode_info->atom_context->bios + data_offset +
613 le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1);
614 ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry;
615 u32 size = limits->numEntries *
616 sizeof(struct amdgpu_clock_voltage_dependency_entry);
617 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
618 kzalloc(size, GFP_KERNEL);
619 if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
620 amdgpu_free_extended_power_table(adev);
621 return -ENOMEM;
622 }
623 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
624 limits->numEntries;
625 entry = &limits->entries[0];
626 for (i = 0; i < limits->numEntries; i++) {
627 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk =
628 le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16);
629 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v =
630 le16_to_cpu(entry->usVoltage);
631 entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *)
632 ((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record));
633 }
634 }
635 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
636 ext_hdr->usPPMTableOffset) {
637 ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
638 (mode_info->atom_context->bios + data_offset +
639 le16_to_cpu(ext_hdr->usPPMTableOffset));
640 adev->pm.dpm.dyn_state.ppm_table =
641 kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL);
642 if (!adev->pm.dpm.dyn_state.ppm_table) {
643 amdgpu_free_extended_power_table(adev);
644 return -ENOMEM;
645 }
646 adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
647 adev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
648 le16_to_cpu(ppm->usCpuCoreNumber);
649 adev->pm.dpm.dyn_state.ppm_table->platform_tdp =
650 le32_to_cpu(ppm->ulPlatformTDP);
651 adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp =
652 le32_to_cpu(ppm->ulSmallACPlatformTDP);
653 adev->pm.dpm.dyn_state.ppm_table->platform_tdc =
654 le32_to_cpu(ppm->ulPlatformTDC);
655 adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc =
656 le32_to_cpu(ppm->ulSmallACPlatformTDC);
657 adev->pm.dpm.dyn_state.ppm_table->apu_tdp =
658 le32_to_cpu(ppm->ulApuTDP);
659 adev->pm.dpm.dyn_state.ppm_table->dgpu_tdp =
660 le32_to_cpu(ppm->ulDGpuTDP);
661 adev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power =
662 le32_to_cpu(ppm->ulDGpuUlvPower);
663 adev->pm.dpm.dyn_state.ppm_table->tj_max =
664 le32_to_cpu(ppm->ulTjmax);
665 }
666 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) &&
667 ext_hdr->usACPTableOffset) {
668 ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits =
669 (ATOM_PPLIB_ACPClk_Voltage_Limit_Table *)
670 (mode_info->atom_context->bios + data_offset +
671 le16_to_cpu(ext_hdr->usACPTableOffset) + 1);
672 ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry;
673 u32 size = limits->numEntries *
674 sizeof(struct amdgpu_clock_voltage_dependency_entry);
675 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
676 kzalloc(size, GFP_KERNEL);
677 if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
678 amdgpu_free_extended_power_table(adev);
679 return -ENOMEM;
680 }
681 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
682 limits->numEntries;
683 entry = &limits->entries[0];
684 for (i = 0; i < limits->numEntries; i++) {
685 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk =
686 le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16);
687 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v =
688 le16_to_cpu(entry->usVoltage);
689 entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *)
690 ((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record));
691 }
692 }
693 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) &&
694 ext_hdr->usPowerTuneTableOffset) {
695 u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset +
696 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
697 ATOM_PowerTune_Table *pt;
698 adev->pm.dpm.dyn_state.cac_tdp_table =
699 kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL);
700 if (!adev->pm.dpm.dyn_state.cac_tdp_table) {
701 amdgpu_free_extended_power_table(adev);
702 return -ENOMEM;
703 }
704 if (rev > 0) {
705 ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
706 (mode_info->atom_context->bios + data_offset +
707 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
708 adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
709 ppt->usMaximumPowerDeliveryLimit;
710 pt = &ppt->power_tune_table;
711 } else {
712 ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
713 (mode_info->atom_context->bios + data_offset +
714 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
715 adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255;
716 pt = &ppt->power_tune_table;
717 }
718 adev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP);
719 adev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp =
720 le16_to_cpu(pt->usConfigurableTDP);
721 adev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC);
722 adev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit =
723 le16_to_cpu(pt->usBatteryPowerLimit);
724 adev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit =
725 le16_to_cpu(pt->usSmallPowerLimit);
726 adev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage =
727 le16_to_cpu(pt->usLowCACLeakage);
728 adev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage =
729 le16_to_cpu(pt->usHighCACLeakage);
730 }
731 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8) &&
732 ext_hdr->usSclkVddgfxTableOffset) {
733 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
734 (mode_info->atom_context->bios + data_offset +
735 le16_to_cpu(ext_hdr->usSclkVddgfxTableOffset));
736 ret = amdgpu_parse_clk_voltage_dep_table(
737 &adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk,
738 dep_table);
739 if (ret) {
740 kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries);
741 return ret;
742 }
743 }
744 }
745
746 return 0;
747}
748
749void amdgpu_free_extended_power_table(struct amdgpu_device *adev)
750{
751 struct amdgpu_dpm_dynamic_state *dyn_state = &adev->pm.dpm.dyn_state;
752
753 kfree(dyn_state->vddc_dependency_on_sclk.entries);
754 kfree(dyn_state->vddci_dependency_on_mclk.entries);
755 kfree(dyn_state->vddc_dependency_on_mclk.entries);
756 kfree(dyn_state->mvdd_dependency_on_mclk.entries);
757 kfree(dyn_state->cac_leakage_table.entries);
758 kfree(dyn_state->phase_shedding_limits_table.entries);
759 kfree(dyn_state->ppm_table);
760 kfree(dyn_state->cac_tdp_table);
761 kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
762 kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
763 kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
764 kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
765 kfree(dyn_state->vddgfx_dependency_on_sclk.entries);
766}
767
768static const char *pp_lib_thermal_controller_names[] = {
769 "NONE",
770 "lm63",
771 "adm1032",
772 "adm1030",
773 "max6649",
774 "lm64",
775 "f75375",
776 "RV6xx",
777 "RV770",
778 "adt7473",
779 "NONE",
780 "External GPIO",
781 "Evergreen",
782 "emc2103",
783 "Sumo",
784 "Northern Islands",
785 "Southern Islands",
786 "lm96163",
787 "Sea Islands",
788 "Kaveri/Kabini",
789};
790
791void amdgpu_add_thermal_controller(struct amdgpu_device *adev)
792{
793 struct amdgpu_mode_info *mode_info = &adev->mode_info;
794 ATOM_PPLIB_POWERPLAYTABLE *power_table;
795 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
796 ATOM_PPLIB_THERMALCONTROLLER *controller;
797 struct amdgpu_i2c_bus_rec i2c_bus;
798 u16 data_offset;
799 u8 frev, crev;
800
801 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
802 &frev, &crev, &data_offset))
803 return;
804 power_table = (ATOM_PPLIB_POWERPLAYTABLE *)
805 (mode_info->atom_context->bios + data_offset);
806 controller = &power_table->sThermalController;
807
808 /* add the i2c bus for thermal/fan chip */
809 if (controller->ucType > 0) {
810 if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN)
811 adev->pm.no_fan = true;
812 adev->pm.fan_pulses_per_revolution =
813 controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
814 if (adev->pm.fan_pulses_per_revolution) {
815 adev->pm.fan_min_rpm = controller->ucFanMinRPM;
816 adev->pm.fan_max_rpm = controller->ucFanMaxRPM;
817 }
818 if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
819 DRM_INFO("Internal thermal controller %s fan control\n",
820 (controller->ucFanParameters &
821 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
822 adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
823 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
824 DRM_INFO("Internal thermal controller %s fan control\n",
825 (controller->ucFanParameters &
826 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
827 adev->pm.int_thermal_type = THERMAL_TYPE_RV770;
828 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
829 DRM_INFO("Internal thermal controller %s fan control\n",
830 (controller->ucFanParameters &
831 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
832 adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
833 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) {
834 DRM_INFO("Internal thermal controller %s fan control\n",
835 (controller->ucFanParameters &
836 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
837 adev->pm.int_thermal_type = THERMAL_TYPE_SUMO;
838 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) {
839 DRM_INFO("Internal thermal controller %s fan control\n",
840 (controller->ucFanParameters &
841 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
842 adev->pm.int_thermal_type = THERMAL_TYPE_NI;
843 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) {
844 DRM_INFO("Internal thermal controller %s fan control\n",
845 (controller->ucFanParameters &
846 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
847 adev->pm.int_thermal_type = THERMAL_TYPE_SI;
848 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) {
849 DRM_INFO("Internal thermal controller %s fan control\n",
850 (controller->ucFanParameters &
851 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
852 adev->pm.int_thermal_type = THERMAL_TYPE_CI;
853 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) {
854 DRM_INFO("Internal thermal controller %s fan control\n",
855 (controller->ucFanParameters &
856 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
857 adev->pm.int_thermal_type = THERMAL_TYPE_KV;
858 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
859 DRM_INFO("External GPIO thermal controller %s fan control\n",
860 (controller->ucFanParameters &
861 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
862 adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
863 } else if (controller->ucType ==
864 ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
865 DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
866 (controller->ucFanParameters &
867 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
868 adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
869 } else if (controller->ucType ==
870 ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
871 DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
872 (controller->ucFanParameters &
873 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
874 adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
875 } else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
876 DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
877 pp_lib_thermal_controller_names[controller->ucType],
878 controller->ucI2cAddress >> 1,
879 (controller->ucFanParameters &
880 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
881 adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL;
882 i2c_bus = amdgpu_atombios_lookup_i2c_gpio(adev, controller->ucI2cLine);
883 adev->pm.i2c_bus = amdgpu_i2c_lookup(adev, &i2c_bus);
884 if (adev->pm.i2c_bus) {
885 struct i2c_board_info info = { };
886 const char *name = pp_lib_thermal_controller_names[controller->ucType];
887 info.addr = controller->ucI2cAddress >> 1;
888 strlcpy(info.type, name, sizeof(info.type));
889 i2c_new_device(&adev->pm.i2c_bus->adapter, &info);
890 }
891 } else {
892 DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
893 controller->ucType,
894 controller->ucI2cAddress >> 1,
895 (controller->ucFanParameters &
896 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
897 }
898 }
899}
900
901enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
902 u32 sys_mask,
903 enum amdgpu_pcie_gen asic_gen,
904 enum amdgpu_pcie_gen default_gen)
905{
906 switch (asic_gen) {
907 case AMDGPU_PCIE_GEN1:
908 return AMDGPU_PCIE_GEN1;
909 case AMDGPU_PCIE_GEN2:
910 return AMDGPU_PCIE_GEN2;
911 case AMDGPU_PCIE_GEN3:
912 return AMDGPU_PCIE_GEN3;
913 default:
914 if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == AMDGPU_PCIE_GEN3))
915 return AMDGPU_PCIE_GEN3;
916 else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == AMDGPU_PCIE_GEN2))
917 return AMDGPU_PCIE_GEN2;
918 else
919 return AMDGPU_PCIE_GEN1;
920 }
921 return AMDGPU_PCIE_GEN1;
922}
923
924u16 amdgpu_get_pcie_lane_support(struct amdgpu_device *adev,
925 u16 asic_lanes,
926 u16 default_lanes)
927{
928 switch (asic_lanes) {
929 case 0:
930 default:
931 return default_lanes;
932 case 1:
933 return 1;
934 case 2:
935 return 2;
936 case 4:
937 return 4;
938 case 8:
939 return 8;
940 case 12:
941 return 12;
942 case 16:
943 return 16;
944 }
945}
946
947u8 amdgpu_encode_pci_lane_width(u32 lanes)
948{
949 u8 encoded_lanes[] = { 0, 1, 2, 0, 3, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0, 0, 6 };
950
951 if (lanes > 16)
952 return 0;
953
954 return encoded_lanes[lanes];
955}