Linux Audio

Check our new training course

Loading...
v4.10.11
  1/*
  2 * Copyright 2011 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 * Authors: Alex Deucher
 23 */
 24
 25#include "drmP.h"
 26#include "amdgpu.h"
 27#include "amdgpu_atombios.h"
 28#include "amdgpu_i2c.h"
 29#include "amdgpu_dpm.h"
 30#include "atom.h"
 31
 32void amdgpu_dpm_print_class_info(u32 class, u32 class2)
 33{
 34	printk("\tui class: ");
 
 35	switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
 36	case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
 37	default:
 38		printk("none\n");
 39		break;
 40	case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
 41		printk("battery\n");
 42		break;
 43	case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
 44		printk("balanced\n");
 45		break;
 46	case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
 47		printk("performance\n");
 48		break;
 49	}
 50	printk("\tinternal class: ");
 
 51	if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
 52	    (class2 == 0))
 53		printk("none");
 54	else {
 55		if (class & ATOM_PPLIB_CLASSIFICATION_BOOT)
 56			printk("boot ");
 57		if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
 58			printk("thermal ");
 59		if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
 60			printk("limited_pwr ");
 61		if (class & ATOM_PPLIB_CLASSIFICATION_REST)
 62			printk("rest ");
 63		if (class & ATOM_PPLIB_CLASSIFICATION_FORCED)
 64			printk("forced ");
 65		if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
 66			printk("3d_perf ");
 67		if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE)
 68			printk("ovrdrv ");
 69		if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
 70			printk("uvd ");
 71		if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW)
 72			printk("3d_low ");
 73		if (class & ATOM_PPLIB_CLASSIFICATION_ACPI)
 74			printk("acpi ");
 75		if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
 76			printk("uvd_hd2 ");
 77		if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
 78			printk("uvd_hd ");
 79		if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
 80			printk("uvd_sd ");
 81		if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
 82			printk("limited_pwr2 ");
 83		if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
 84			printk("ulv ");
 85		if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
 86			printk("uvd_mvc ");
 87	}
 88	printk("\n");
 89}
 90
 91void amdgpu_dpm_print_cap_info(u32 caps)
 92{
 93	printk("\tcaps: ");
 94	if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
 95		printk("single_disp ");
 96	if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK)
 97		printk("video ");
 98	if (caps & ATOM_PPLIB_DISALLOW_ON_DC)
 99		printk("no_dc ");
100	printk("\n");
101}
102
103void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
104				struct amdgpu_ps *rps)
105{
106	printk("\tstatus: ");
107	if (rps == adev->pm.dpm.current_ps)
108		printk("c ");
109	if (rps == adev->pm.dpm.requested_ps)
110		printk("r ");
111	if (rps == adev->pm.dpm.boot_ps)
112		printk("b ");
113	printk("\n");
114}
115
116
117u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
118{
119	struct drm_device *dev = adev->ddev;
120	struct drm_crtc *crtc;
121	struct amdgpu_crtc *amdgpu_crtc;
122	u32 vblank_in_pixels;
123	u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
124
125	if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
126		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
127			amdgpu_crtc = to_amdgpu_crtc(crtc);
128			if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
129				vblank_in_pixels =
130					amdgpu_crtc->hw_mode.crtc_htotal *
131					(amdgpu_crtc->hw_mode.crtc_vblank_end -
132					amdgpu_crtc->hw_mode.crtc_vdisplay +
133					(amdgpu_crtc->v_border * 2));
134
135				vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock;
136				break;
137			}
138		}
139	}
140
141	return vblank_time_us;
142}
143
144u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev)
145{
146	struct drm_device *dev = adev->ddev;
147	struct drm_crtc *crtc;
148	struct amdgpu_crtc *amdgpu_crtc;
149	u32 vrefresh = 0;
150
151	if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
152		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
153			amdgpu_crtc = to_amdgpu_crtc(crtc);
154			if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
155				vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
156				break;
157			}
158		}
159	}
160
161	return vrefresh;
162}
163
164void amdgpu_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
165			      u32 *p, u32 *u)
166{
167	u32 b_c = 0;
168	u32 i_c;
169	u32 tmp;
170
171	i_c = (i * r_c) / 100;
172	tmp = i_c >> p_b;
173
174	while (tmp) {
175		b_c++;
176		tmp >>= 1;
177	}
178
179	*u = (b_c + 1) / 2;
180	*p = i_c / (1 << (2 * (*u)));
181}
182
183int amdgpu_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th)
184{
185	u32 k, a, ah, al;
186	u32 t1;
187
188	if ((fl == 0) || (fh == 0) || (fl > fh))
189		return -EINVAL;
190
191	k = (100 * fh) / fl;
192	t1 = (t * (k - 100));
193	a = (1000 * (100 * h + t1)) / (10000 + (t1 / 100));
194	a = (a + 5) / 10;
195	ah = ((a * t) + 5000) / 10000;
196	al = a - ah;
197
198	*th = t - ah;
199	*tl = t + al;
200
201	return 0;
202}
203
204bool amdgpu_is_uvd_state(u32 class, u32 class2)
205{
206	if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
207		return true;
208	if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
209		return true;
210	if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
211		return true;
212	if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
213		return true;
214	if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
215		return true;
216	return false;
217}
218
219bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor)
220{
221	switch (sensor) {
222	case THERMAL_TYPE_RV6XX:
223	case THERMAL_TYPE_RV770:
224	case THERMAL_TYPE_EVERGREEN:
225	case THERMAL_TYPE_SUMO:
226	case THERMAL_TYPE_NI:
227	case THERMAL_TYPE_SI:
228	case THERMAL_TYPE_CI:
229	case THERMAL_TYPE_KV:
230		return true;
231	case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
232	case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
233		return false; /* need special handling */
234	case THERMAL_TYPE_NONE:
235	case THERMAL_TYPE_EXTERNAL:
236	case THERMAL_TYPE_EXTERNAL_GPIO:
237	default:
238		return false;
239	}
240}
241
242union power_info {
243	struct _ATOM_POWERPLAY_INFO info;
244	struct _ATOM_POWERPLAY_INFO_V2 info_2;
245	struct _ATOM_POWERPLAY_INFO_V3 info_3;
246	struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
247	struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
248	struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
249	struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
250	struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
251};
252
253union fan_info {
254	struct _ATOM_PPLIB_FANTABLE fan;
255	struct _ATOM_PPLIB_FANTABLE2 fan2;
256	struct _ATOM_PPLIB_FANTABLE3 fan3;
257};
258
259static int amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table *amdgpu_table,
260					      ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table)
261{
262	u32 size = atom_table->ucNumEntries *
263		sizeof(struct amdgpu_clock_voltage_dependency_entry);
264	int i;
265	ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
266
267	amdgpu_table->entries = kzalloc(size, GFP_KERNEL);
268	if (!amdgpu_table->entries)
269		return -ENOMEM;
270
271	entry = &atom_table->entries[0];
272	for (i = 0; i < atom_table->ucNumEntries; i++) {
273		amdgpu_table->entries[i].clk = le16_to_cpu(entry->usClockLow) |
274			(entry->ucClockHigh << 16);
275		amdgpu_table->entries[i].v = le16_to_cpu(entry->usVoltage);
276		entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *)
277			((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record));
278	}
279	amdgpu_table->count = atom_table->ucNumEntries;
280
281	return 0;
282}
283
284int amdgpu_get_platform_caps(struct amdgpu_device *adev)
285{
286	struct amdgpu_mode_info *mode_info = &adev->mode_info;
287	union power_info *power_info;
288	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
289	u16 data_offset;
290	u8 frev, crev;
291
292	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
293				   &frev, &crev, &data_offset))
294		return -EINVAL;
295	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
296
297	adev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
298	adev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
299	adev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
300
301	return 0;
302}
303
304/* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
305#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
306#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
307#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
308#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
309#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
310#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
311#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8 24
312#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V9 26
313
314int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
315{
316	struct amdgpu_mode_info *mode_info = &adev->mode_info;
317	union power_info *power_info;
318	union fan_info *fan_info;
319	ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table;
320	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
321	u16 data_offset;
322	u8 frev, crev;
323	int ret, i;
324
325	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
326				   &frev, &crev, &data_offset))
327		return -EINVAL;
328	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
329
330	/* fan table */
331	if (le16_to_cpu(power_info->pplib.usTableSize) >=
332	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
333		if (power_info->pplib3.usFanTableOffset) {
334			fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset +
335						      le16_to_cpu(power_info->pplib3.usFanTableOffset));
336			adev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst;
337			adev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin);
338			adev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed);
339			adev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh);
340			adev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin);
341			adev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed);
342			adev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh);
343			if (fan_info->fan.ucFanTableFormat >= 2)
344				adev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax);
345			else
346				adev->pm.dpm.fan.t_max = 10900;
347			adev->pm.dpm.fan.cycle_delay = 100000;
348			if (fan_info->fan.ucFanTableFormat >= 3) {
349				adev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode;
350				adev->pm.dpm.fan.default_max_fan_pwm =
351					le16_to_cpu(fan_info->fan3.usFanPWMMax);
352				adev->pm.dpm.fan.default_fan_output_sensitivity = 4836;
353				adev->pm.dpm.fan.fan_output_sensitivity =
354					le16_to_cpu(fan_info->fan3.usFanOutputSensitivity);
355			}
356			adev->pm.dpm.fan.ucode_fan_control = true;
357		}
358	}
359
360	/* clock dependancy tables, shedding tables */
361	if (le16_to_cpu(power_info->pplib.usTableSize) >=
362	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) {
363		if (power_info->pplib4.usVddcDependencyOnSCLKOffset) {
364			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
365				(mode_info->atom_context->bios + data_offset +
366				 le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
367			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
368								 dep_table);
369			if (ret) {
370				amdgpu_free_extended_power_table(adev);
371				return ret;
372			}
373		}
374		if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
375			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
376				(mode_info->atom_context->bios + data_offset +
377				 le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
378			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
379								 dep_table);
380			if (ret) {
381				amdgpu_free_extended_power_table(adev);
382				return ret;
383			}
384		}
385		if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
386			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
387				(mode_info->atom_context->bios + data_offset +
388				 le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
389			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
390								 dep_table);
391			if (ret) {
392				amdgpu_free_extended_power_table(adev);
393				return ret;
394			}
395		}
396		if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
397			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
398				(mode_info->atom_context->bios + data_offset +
399				 le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
400			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
401								 dep_table);
402			if (ret) {
403				amdgpu_free_extended_power_table(adev);
404				return ret;
405			}
406		}
407		if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
408			ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
409				(ATOM_PPLIB_Clock_Voltage_Limit_Table *)
410				(mode_info->atom_context->bios + data_offset +
411				 le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset));
412			if (clk_v->ucNumEntries) {
413				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk =
414					le16_to_cpu(clk_v->entries[0].usSclkLow) |
415					(clk_v->entries[0].ucSclkHigh << 16);
416				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk =
417					le16_to_cpu(clk_v->entries[0].usMclkLow) |
418					(clk_v->entries[0].ucMclkHigh << 16);
419				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc =
420					le16_to_cpu(clk_v->entries[0].usVddc);
421				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci =
422					le16_to_cpu(clk_v->entries[0].usVddci);
423			}
424		}
425		if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) {
426			ATOM_PPLIB_PhaseSheddingLimits_Table *psl =
427				(ATOM_PPLIB_PhaseSheddingLimits_Table *)
428				(mode_info->atom_context->bios + data_offset +
429				 le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
430			ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
431
432			adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
433				kzalloc(psl->ucNumEntries *
434					sizeof(struct amdgpu_phase_shedding_limits_entry),
435					GFP_KERNEL);
436			if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
437				amdgpu_free_extended_power_table(adev);
438				return -ENOMEM;
439			}
440
441			entry = &psl->entries[0];
442			for (i = 0; i < psl->ucNumEntries; i++) {
443				adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
444					le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16);
445				adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
446					le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16);
447				adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
448					le16_to_cpu(entry->usVoltage);
449				entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *)
450					((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record));
451			}
452			adev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
453				psl->ucNumEntries;
454		}
455	}
456
457	/* cac data */
458	if (le16_to_cpu(power_info->pplib.usTableSize) >=
459	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) {
460		adev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit);
461		adev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit);
462		adev->pm.dpm.near_tdp_limit_adjusted = adev->pm.dpm.near_tdp_limit;
463		adev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit);
464		if (adev->pm.dpm.tdp_od_limit)
465			adev->pm.dpm.power_control = true;
466		else
467			adev->pm.dpm.power_control = false;
468		adev->pm.dpm.tdp_adjustment = 0;
469		adev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold);
470		adev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage);
471		adev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope);
472		if (power_info->pplib5.usCACLeakageTableOffset) {
473			ATOM_PPLIB_CAC_Leakage_Table *cac_table =
474				(ATOM_PPLIB_CAC_Leakage_Table *)
475				(mode_info->atom_context->bios + data_offset +
476				 le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
477			ATOM_PPLIB_CAC_Leakage_Record *entry;
478			u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table);
479			adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
480			if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
481				amdgpu_free_extended_power_table(adev);
482				return -ENOMEM;
483			}
484			entry = &cac_table->entries[0];
485			for (i = 0; i < cac_table->ucNumEntries; i++) {
486				if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
487					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 =
488						le16_to_cpu(entry->usVddc1);
489					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 =
490						le16_to_cpu(entry->usVddc2);
491					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 =
492						le16_to_cpu(entry->usVddc3);
493				} else {
494					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
495						le16_to_cpu(entry->usVddc);
496					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
497						le32_to_cpu(entry->ulLeakageValue);
498				}
499				entry = (ATOM_PPLIB_CAC_Leakage_Record *)
500					((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record));
501			}
502			adev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
503		}
504	}
505
506	/* ext tables */
507	if (le16_to_cpu(power_info->pplib.usTableSize) >=
508	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
509		ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
510			(mode_info->atom_context->bios + data_offset +
511			 le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
512		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) &&
513			ext_hdr->usVCETableOffset) {
514			VCEClockInfoArray *array = (VCEClockInfoArray *)
515				(mode_info->atom_context->bios + data_offset +
516				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
517			ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
518				(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
519				(mode_info->atom_context->bios + data_offset +
520				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
521				 1 + array->ucNumEntries * sizeof(VCEClockInfo));
522			ATOM_PPLIB_VCE_State_Table *states =
523				(ATOM_PPLIB_VCE_State_Table *)
524				(mode_info->atom_context->bios + data_offset +
525				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
526				 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) +
527				 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)));
528			ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry;
529			ATOM_PPLIB_VCE_State_Record *state_entry;
530			VCEClockInfo *vce_clk;
531			u32 size = limits->numEntries *
532				sizeof(struct amdgpu_vce_clock_voltage_dependency_entry);
533			adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
534				kzalloc(size, GFP_KERNEL);
535			if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
536				amdgpu_free_extended_power_table(adev);
537				return -ENOMEM;
538			}
539			adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
540				limits->numEntries;
541			entry = &limits->entries[0];
542			state_entry = &states->entries[0];
543			for (i = 0; i < limits->numEntries; i++) {
544				vce_clk = (VCEClockInfo *)
545					((u8 *)&array->entries[0] +
546					 (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
547				adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk =
548					le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
549				adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk =
550					le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
551				adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v =
552					le16_to_cpu(entry->usVoltage);
553				entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
554					((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
555			}
556			adev->pm.dpm.num_of_vce_states =
557					states->numEntries > AMD_MAX_VCE_LEVELS ?
558					AMD_MAX_VCE_LEVELS : states->numEntries;
559			for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
560				vce_clk = (VCEClockInfo *)
561					((u8 *)&array->entries[0] +
562					 (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
563				adev->pm.dpm.vce_states[i].evclk =
564					le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
565				adev->pm.dpm.vce_states[i].ecclk =
566					le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
567				adev->pm.dpm.vce_states[i].clk_idx =
568					state_entry->ucClockInfoIndex & 0x3f;
569				adev->pm.dpm.vce_states[i].pstate =
570					(state_entry->ucClockInfoIndex & 0xc0) >> 6;
571				state_entry = (ATOM_PPLIB_VCE_State_Record *)
572					((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record));
573			}
574		}
575		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) &&
576			ext_hdr->usUVDTableOffset) {
577			UVDClockInfoArray *array = (UVDClockInfoArray *)
578				(mode_info->atom_context->bios + data_offset +
579				 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1);
580			ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits =
581				(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *)
582				(mode_info->atom_context->bios + data_offset +
583				 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 +
584				 1 + (array->ucNumEntries * sizeof (UVDClockInfo)));
585			ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry;
586			u32 size = limits->numEntries *
587				sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry);
588			adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
589				kzalloc(size, GFP_KERNEL);
590			if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
591				amdgpu_free_extended_power_table(adev);
592				return -ENOMEM;
593			}
594			adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
595				limits->numEntries;
596			entry = &limits->entries[0];
597			for (i = 0; i < limits->numEntries; i++) {
598				UVDClockInfo *uvd_clk = (UVDClockInfo *)
599					((u8 *)&array->entries[0] +
600					 (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo)));
601				adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk =
602					le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16);
603				adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
604					le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
605				adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
606					le16_to_cpu(entry->usVoltage);
607				entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
608					((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
609			}
610		}
611		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) &&
612			ext_hdr->usSAMUTableOffset) {
613			ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits =
614				(ATOM_PPLIB_SAMClk_Voltage_Limit_Table *)
615				(mode_info->atom_context->bios + data_offset +
616				 le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1);
617			ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry;
618			u32 size = limits->numEntries *
619				sizeof(struct amdgpu_clock_voltage_dependency_entry);
620			adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
621				kzalloc(size, GFP_KERNEL);
622			if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
623				amdgpu_free_extended_power_table(adev);
624				return -ENOMEM;
625			}
626			adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
627				limits->numEntries;
628			entry = &limits->entries[0];
629			for (i = 0; i < limits->numEntries; i++) {
630				adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk =
631					le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16);
632				adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v =
633					le16_to_cpu(entry->usVoltage);
634				entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *)
635					((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record));
636			}
637		}
638		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
639		    ext_hdr->usPPMTableOffset) {
640			ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
641				(mode_info->atom_context->bios + data_offset +
642				 le16_to_cpu(ext_hdr->usPPMTableOffset));
643			adev->pm.dpm.dyn_state.ppm_table =
644				kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL);
645			if (!adev->pm.dpm.dyn_state.ppm_table) {
646				amdgpu_free_extended_power_table(adev);
647				return -ENOMEM;
648			}
649			adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
650			adev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
651				le16_to_cpu(ppm->usCpuCoreNumber);
652			adev->pm.dpm.dyn_state.ppm_table->platform_tdp =
653				le32_to_cpu(ppm->ulPlatformTDP);
654			adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp =
655				le32_to_cpu(ppm->ulSmallACPlatformTDP);
656			adev->pm.dpm.dyn_state.ppm_table->platform_tdc =
657				le32_to_cpu(ppm->ulPlatformTDC);
658			adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc =
659				le32_to_cpu(ppm->ulSmallACPlatformTDC);
660			adev->pm.dpm.dyn_state.ppm_table->apu_tdp =
661				le32_to_cpu(ppm->ulApuTDP);
662			adev->pm.dpm.dyn_state.ppm_table->dgpu_tdp =
663				le32_to_cpu(ppm->ulDGpuTDP);
664			adev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power =
665				le32_to_cpu(ppm->ulDGpuUlvPower);
666			adev->pm.dpm.dyn_state.ppm_table->tj_max =
667				le32_to_cpu(ppm->ulTjmax);
668		}
669		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) &&
670			ext_hdr->usACPTableOffset) {
671			ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits =
672				(ATOM_PPLIB_ACPClk_Voltage_Limit_Table *)
673				(mode_info->atom_context->bios + data_offset +
674				 le16_to_cpu(ext_hdr->usACPTableOffset) + 1);
675			ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry;
676			u32 size = limits->numEntries *
677				sizeof(struct amdgpu_clock_voltage_dependency_entry);
678			adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
679				kzalloc(size, GFP_KERNEL);
680			if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
681				amdgpu_free_extended_power_table(adev);
682				return -ENOMEM;
683			}
684			adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
685				limits->numEntries;
686			entry = &limits->entries[0];
687			for (i = 0; i < limits->numEntries; i++) {
688				adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk =
689					le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16);
690				adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v =
691					le16_to_cpu(entry->usVoltage);
692				entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *)
693					((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record));
694			}
695		}
696		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) &&
697			ext_hdr->usPowerTuneTableOffset) {
698			u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset +
699					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
700			ATOM_PowerTune_Table *pt;
701			adev->pm.dpm.dyn_state.cac_tdp_table =
702				kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL);
703			if (!adev->pm.dpm.dyn_state.cac_tdp_table) {
704				amdgpu_free_extended_power_table(adev);
705				return -ENOMEM;
706			}
707			if (rev > 0) {
708				ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
709					(mode_info->atom_context->bios + data_offset +
710					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
711				adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
712					ppt->usMaximumPowerDeliveryLimit;
713				pt = &ppt->power_tune_table;
714			} else {
715				ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
716					(mode_info->atom_context->bios + data_offset +
717					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
718				adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255;
719				pt = &ppt->power_tune_table;
720			}
721			adev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP);
722			adev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp =
723				le16_to_cpu(pt->usConfigurableTDP);
724			adev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC);
725			adev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit =
726				le16_to_cpu(pt->usBatteryPowerLimit);
727			adev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit =
728				le16_to_cpu(pt->usSmallPowerLimit);
729			adev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage =
730				le16_to_cpu(pt->usLowCACLeakage);
731			adev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage =
732				le16_to_cpu(pt->usHighCACLeakage);
733		}
734		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8) &&
735				ext_hdr->usSclkVddgfxTableOffset) {
736			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
737				(mode_info->atom_context->bios + data_offset +
738				 le16_to_cpu(ext_hdr->usSclkVddgfxTableOffset));
739			ret = amdgpu_parse_clk_voltage_dep_table(
740					&adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk,
741					dep_table);
742			if (ret) {
743				kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries);
744				return ret;
745			}
746		}
747	}
748
749	return 0;
750}
751
752void amdgpu_free_extended_power_table(struct amdgpu_device *adev)
753{
754	struct amdgpu_dpm_dynamic_state *dyn_state = &adev->pm.dpm.dyn_state;
755
756	kfree(dyn_state->vddc_dependency_on_sclk.entries);
757	kfree(dyn_state->vddci_dependency_on_mclk.entries);
758	kfree(dyn_state->vddc_dependency_on_mclk.entries);
759	kfree(dyn_state->mvdd_dependency_on_mclk.entries);
760	kfree(dyn_state->cac_leakage_table.entries);
761	kfree(dyn_state->phase_shedding_limits_table.entries);
762	kfree(dyn_state->ppm_table);
763	kfree(dyn_state->cac_tdp_table);
764	kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
765	kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
766	kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
767	kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
768	kfree(dyn_state->vddgfx_dependency_on_sclk.entries);
769}
770
771static const char *pp_lib_thermal_controller_names[] = {
772	"NONE",
773	"lm63",
774	"adm1032",
775	"adm1030",
776	"max6649",
777	"lm64",
778	"f75375",
779	"RV6xx",
780	"RV770",
781	"adt7473",
782	"NONE",
783	"External GPIO",
784	"Evergreen",
785	"emc2103",
786	"Sumo",
787	"Northern Islands",
788	"Southern Islands",
789	"lm96163",
790	"Sea Islands",
791	"Kaveri/Kabini",
792};
793
794void amdgpu_add_thermal_controller(struct amdgpu_device *adev)
795{
796	struct amdgpu_mode_info *mode_info = &adev->mode_info;
797	ATOM_PPLIB_POWERPLAYTABLE *power_table;
798	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
799	ATOM_PPLIB_THERMALCONTROLLER *controller;
800	struct amdgpu_i2c_bus_rec i2c_bus;
801	u16 data_offset;
802	u8 frev, crev;
803
804	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
805				   &frev, &crev, &data_offset))
806		return;
807	power_table = (ATOM_PPLIB_POWERPLAYTABLE *)
808		(mode_info->atom_context->bios + data_offset);
809	controller = &power_table->sThermalController;
810
811	/* add the i2c bus for thermal/fan chip */
812	if (controller->ucType > 0) {
813		if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN)
814			adev->pm.no_fan = true;
815		adev->pm.fan_pulses_per_revolution =
816			controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
817		if (adev->pm.fan_pulses_per_revolution) {
818			adev->pm.fan_min_rpm = controller->ucFanMinRPM;
819			adev->pm.fan_max_rpm = controller->ucFanMaxRPM;
820		}
821		if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
822			DRM_INFO("Internal thermal controller %s fan control\n",
823				 (controller->ucFanParameters &
824				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
825			adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
826		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
827			DRM_INFO("Internal thermal controller %s fan control\n",
828				 (controller->ucFanParameters &
829				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
830			adev->pm.int_thermal_type = THERMAL_TYPE_RV770;
831		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
832			DRM_INFO("Internal thermal controller %s fan control\n",
833				 (controller->ucFanParameters &
834				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
835			adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
836		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) {
837			DRM_INFO("Internal thermal controller %s fan control\n",
838				 (controller->ucFanParameters &
839				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
840			adev->pm.int_thermal_type = THERMAL_TYPE_SUMO;
841		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) {
842			DRM_INFO("Internal thermal controller %s fan control\n",
843				 (controller->ucFanParameters &
844				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
845			adev->pm.int_thermal_type = THERMAL_TYPE_NI;
846		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) {
847			DRM_INFO("Internal thermal controller %s fan control\n",
848				 (controller->ucFanParameters &
849				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
850			adev->pm.int_thermal_type = THERMAL_TYPE_SI;
851		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) {
852			DRM_INFO("Internal thermal controller %s fan control\n",
853				 (controller->ucFanParameters &
854				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
855			adev->pm.int_thermal_type = THERMAL_TYPE_CI;
856		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) {
857			DRM_INFO("Internal thermal controller %s fan control\n",
858				 (controller->ucFanParameters &
859				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
860			adev->pm.int_thermal_type = THERMAL_TYPE_KV;
861		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
862			DRM_INFO("External GPIO thermal controller %s fan control\n",
863				 (controller->ucFanParameters &
864				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
865			adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
866		} else if (controller->ucType ==
867			   ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
868			DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
869				 (controller->ucFanParameters &
870				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
871			adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
872		} else if (controller->ucType ==
873			   ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
874			DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
875				 (controller->ucFanParameters &
876				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
877			adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
878		} else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
879			DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
880				 pp_lib_thermal_controller_names[controller->ucType],
881				 controller->ucI2cAddress >> 1,
882				 (controller->ucFanParameters &
883				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
884			adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL;
885			i2c_bus = amdgpu_atombios_lookup_i2c_gpio(adev, controller->ucI2cLine);
886			adev->pm.i2c_bus = amdgpu_i2c_lookup(adev, &i2c_bus);
887			if (adev->pm.i2c_bus) {
888				struct i2c_board_info info = { };
889				const char *name = pp_lib_thermal_controller_names[controller->ucType];
890				info.addr = controller->ucI2cAddress >> 1;
891				strlcpy(info.type, name, sizeof(info.type));
892				i2c_new_device(&adev->pm.i2c_bus->adapter, &info);
893			}
894		} else {
895			DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
896				 controller->ucType,
897				 controller->ucI2cAddress >> 1,
898				 (controller->ucFanParameters &
899				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
900		}
901	}
902}
903
904enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
905						 u32 sys_mask,
906						 enum amdgpu_pcie_gen asic_gen,
907						 enum amdgpu_pcie_gen default_gen)
908{
909	switch (asic_gen) {
910	case AMDGPU_PCIE_GEN1:
911		return AMDGPU_PCIE_GEN1;
912	case AMDGPU_PCIE_GEN2:
913		return AMDGPU_PCIE_GEN2;
914	case AMDGPU_PCIE_GEN3:
915		return AMDGPU_PCIE_GEN3;
916	default:
917		if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == AMDGPU_PCIE_GEN3))
918			return AMDGPU_PCIE_GEN3;
919		else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == AMDGPU_PCIE_GEN2))
920			return AMDGPU_PCIE_GEN2;
921		else
922			return AMDGPU_PCIE_GEN1;
923	}
924	return AMDGPU_PCIE_GEN1;
925}
926
927u16 amdgpu_get_pcie_lane_support(struct amdgpu_device *adev,
928				 u16 asic_lanes,
929				 u16 default_lanes)
930{
931	switch (asic_lanes) {
932	case 0:
933	default:
934		return default_lanes;
935	case 1:
936		return 1;
937	case 2:
938		return 2;
939	case 4:
940		return 4;
941	case 8:
942		return 8;
943	case 12:
944		return 12;
945	case 16:
946		return 16;
947	}
948}
949
950u8 amdgpu_encode_pci_lane_width(u32 lanes)
951{
952	u8 encoded_lanes[] = { 0, 1, 2, 0, 3, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0, 0, 6 };
953
954	if (lanes > 16)
955		return 0;
956
957	return encoded_lanes[lanes];
958}
959
960struct amd_vce_state*
961amdgpu_get_vce_clock_state(struct amdgpu_device *adev, unsigned idx)
962{
 
 
963	if (idx < adev->pm.dpm.num_of_vce_states)
964		return &adev->pm.dpm.vce_states[idx];
965
966	return NULL;
967}
v4.17
  1/*
  2 * Copyright 2011 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 * Authors: Alex Deucher
 23 */
 24
 25#include <drm/drmP.h>
 26#include "amdgpu.h"
 27#include "amdgpu_atombios.h"
 28#include "amdgpu_i2c.h"
 29#include "amdgpu_dpm.h"
 30#include "atom.h"
 31
 32void amdgpu_dpm_print_class_info(u32 class, u32 class2)
 33{
 34	const char *s;
 35
 36	switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
 37	case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
 38	default:
 39		s = "none";
 40		break;
 41	case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
 42		s = "battery";
 43		break;
 44	case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
 45		s = "balanced";
 46		break;
 47	case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
 48		s = "performance";
 49		break;
 50	}
 51	printk("\tui class: %s\n", s);
 52	printk("\tinternal class:");
 53	if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
 54	    (class2 == 0))
 55		pr_cont(" none");
 56	else {
 57		if (class & ATOM_PPLIB_CLASSIFICATION_BOOT)
 58			pr_cont(" boot");
 59		if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
 60			pr_cont(" thermal");
 61		if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
 62			pr_cont(" limited_pwr");
 63		if (class & ATOM_PPLIB_CLASSIFICATION_REST)
 64			pr_cont(" rest");
 65		if (class & ATOM_PPLIB_CLASSIFICATION_FORCED)
 66			pr_cont(" forced");
 67		if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
 68			pr_cont(" 3d_perf");
 69		if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE)
 70			pr_cont(" ovrdrv");
 71		if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
 72			pr_cont(" uvd");
 73		if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW)
 74			pr_cont(" 3d_low");
 75		if (class & ATOM_PPLIB_CLASSIFICATION_ACPI)
 76			pr_cont(" acpi");
 77		if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
 78			pr_cont(" uvd_hd2");
 79		if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
 80			pr_cont(" uvd_hd");
 81		if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
 82			pr_cont(" uvd_sd");
 83		if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
 84			pr_cont(" limited_pwr2");
 85		if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
 86			pr_cont(" ulv");
 87		if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
 88			pr_cont(" uvd_mvc");
 89	}
 90	pr_cont("\n");
 91}
 92
 93void amdgpu_dpm_print_cap_info(u32 caps)
 94{
 95	printk("\tcaps:");
 96	if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
 97		pr_cont(" single_disp");
 98	if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK)
 99		pr_cont(" video");
100	if (caps & ATOM_PPLIB_DISALLOW_ON_DC)
101		pr_cont(" no_dc");
102	pr_cont("\n");
103}
104
105void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
106				struct amdgpu_ps *rps)
107{
108	printk("\tstatus:");
109	if (rps == adev->pm.dpm.current_ps)
110		pr_cont(" c");
111	if (rps == adev->pm.dpm.requested_ps)
112		pr_cont(" r");
113	if (rps == adev->pm.dpm.boot_ps)
114		pr_cont(" b");
115	pr_cont("\n");
116}
117
118
119u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
120{
121	struct drm_device *dev = adev->ddev;
122	struct drm_crtc *crtc;
123	struct amdgpu_crtc *amdgpu_crtc;
124	u32 vblank_in_pixels;
125	u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
126
127	if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
128		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
129			amdgpu_crtc = to_amdgpu_crtc(crtc);
130			if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
131				vblank_in_pixels =
132					amdgpu_crtc->hw_mode.crtc_htotal *
133					(amdgpu_crtc->hw_mode.crtc_vblank_end -
134					amdgpu_crtc->hw_mode.crtc_vdisplay +
135					(amdgpu_crtc->v_border * 2));
136
137				vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock;
138				break;
139			}
140		}
141	}
142
143	return vblank_time_us;
144}
145
146u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev)
147{
148	struct drm_device *dev = adev->ddev;
149	struct drm_crtc *crtc;
150	struct amdgpu_crtc *amdgpu_crtc;
151	u32 vrefresh = 0;
152
153	if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
154		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
155			amdgpu_crtc = to_amdgpu_crtc(crtc);
156			if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
157				vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
158				break;
159			}
160		}
161	}
162
163	return vrefresh;
164}
165
166void amdgpu_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
167			      u32 *p, u32 *u)
168{
169	u32 b_c = 0;
170	u32 i_c;
171	u32 tmp;
172
173	i_c = (i * r_c) / 100;
174	tmp = i_c >> p_b;
175
176	while (tmp) {
177		b_c++;
178		tmp >>= 1;
179	}
180
181	*u = (b_c + 1) / 2;
182	*p = i_c / (1 << (2 * (*u)));
183}
184
185int amdgpu_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th)
186{
187	u32 k, a, ah, al;
188	u32 t1;
189
190	if ((fl == 0) || (fh == 0) || (fl > fh))
191		return -EINVAL;
192
193	k = (100 * fh) / fl;
194	t1 = (t * (k - 100));
195	a = (1000 * (100 * h + t1)) / (10000 + (t1 / 100));
196	a = (a + 5) / 10;
197	ah = ((a * t) + 5000) / 10000;
198	al = a - ah;
199
200	*th = t - ah;
201	*tl = t + al;
202
203	return 0;
204}
205
206bool amdgpu_is_uvd_state(u32 class, u32 class2)
207{
208	if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
209		return true;
210	if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
211		return true;
212	if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
213		return true;
214	if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
215		return true;
216	if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
217		return true;
218	return false;
219}
220
221bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor)
222{
223	switch (sensor) {
224	case THERMAL_TYPE_RV6XX:
225	case THERMAL_TYPE_RV770:
226	case THERMAL_TYPE_EVERGREEN:
227	case THERMAL_TYPE_SUMO:
228	case THERMAL_TYPE_NI:
229	case THERMAL_TYPE_SI:
230	case THERMAL_TYPE_CI:
231	case THERMAL_TYPE_KV:
232		return true;
233	case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
234	case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
235		return false; /* need special handling */
236	case THERMAL_TYPE_NONE:
237	case THERMAL_TYPE_EXTERNAL:
238	case THERMAL_TYPE_EXTERNAL_GPIO:
239	default:
240		return false;
241	}
242}
243
244union power_info {
245	struct _ATOM_POWERPLAY_INFO info;
246	struct _ATOM_POWERPLAY_INFO_V2 info_2;
247	struct _ATOM_POWERPLAY_INFO_V3 info_3;
248	struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
249	struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
250	struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
251	struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
252	struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
253};
254
255union fan_info {
256	struct _ATOM_PPLIB_FANTABLE fan;
257	struct _ATOM_PPLIB_FANTABLE2 fan2;
258	struct _ATOM_PPLIB_FANTABLE3 fan3;
259};
260
261static int amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table *amdgpu_table,
262					      ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table)
263{
264	u32 size = atom_table->ucNumEntries *
265		sizeof(struct amdgpu_clock_voltage_dependency_entry);
266	int i;
267	ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
268
269	amdgpu_table->entries = kzalloc(size, GFP_KERNEL);
270	if (!amdgpu_table->entries)
271		return -ENOMEM;
272
273	entry = &atom_table->entries[0];
274	for (i = 0; i < atom_table->ucNumEntries; i++) {
275		amdgpu_table->entries[i].clk = le16_to_cpu(entry->usClockLow) |
276			(entry->ucClockHigh << 16);
277		amdgpu_table->entries[i].v = le16_to_cpu(entry->usVoltage);
278		entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *)
279			((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record));
280	}
281	amdgpu_table->count = atom_table->ucNumEntries;
282
283	return 0;
284}
285
286int amdgpu_get_platform_caps(struct amdgpu_device *adev)
287{
288	struct amdgpu_mode_info *mode_info = &adev->mode_info;
289	union power_info *power_info;
290	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
291	u16 data_offset;
292	u8 frev, crev;
293
294	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
295				   &frev, &crev, &data_offset))
296		return -EINVAL;
297	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
298
299	adev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
300	adev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
301	adev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
302
303	return 0;
304}
305
306/* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
307#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
308#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
309#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
310#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
311#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
312#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
313#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8 24
314#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V9 26
315
316int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
317{
318	struct amdgpu_mode_info *mode_info = &adev->mode_info;
319	union power_info *power_info;
320	union fan_info *fan_info;
321	ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table;
322	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
323	u16 data_offset;
324	u8 frev, crev;
325	int ret, i;
326
327	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
328				   &frev, &crev, &data_offset))
329		return -EINVAL;
330	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
331
332	/* fan table */
333	if (le16_to_cpu(power_info->pplib.usTableSize) >=
334	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
335		if (power_info->pplib3.usFanTableOffset) {
336			fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset +
337						      le16_to_cpu(power_info->pplib3.usFanTableOffset));
338			adev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst;
339			adev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin);
340			adev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed);
341			adev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh);
342			adev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin);
343			adev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed);
344			adev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh);
345			if (fan_info->fan.ucFanTableFormat >= 2)
346				adev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax);
347			else
348				adev->pm.dpm.fan.t_max = 10900;
349			adev->pm.dpm.fan.cycle_delay = 100000;
350			if (fan_info->fan.ucFanTableFormat >= 3) {
351				adev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode;
352				adev->pm.dpm.fan.default_max_fan_pwm =
353					le16_to_cpu(fan_info->fan3.usFanPWMMax);
354				adev->pm.dpm.fan.default_fan_output_sensitivity = 4836;
355				adev->pm.dpm.fan.fan_output_sensitivity =
356					le16_to_cpu(fan_info->fan3.usFanOutputSensitivity);
357			}
358			adev->pm.dpm.fan.ucode_fan_control = true;
359		}
360	}
361
362	/* clock dependancy tables, shedding tables */
363	if (le16_to_cpu(power_info->pplib.usTableSize) >=
364	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) {
365		if (power_info->pplib4.usVddcDependencyOnSCLKOffset) {
366			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
367				(mode_info->atom_context->bios + data_offset +
368				 le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
369			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
370								 dep_table);
371			if (ret) {
372				amdgpu_free_extended_power_table(adev);
373				return ret;
374			}
375		}
376		if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
377			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
378				(mode_info->atom_context->bios + data_offset +
379				 le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
380			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
381								 dep_table);
382			if (ret) {
383				amdgpu_free_extended_power_table(adev);
384				return ret;
385			}
386		}
387		if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
388			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
389				(mode_info->atom_context->bios + data_offset +
390				 le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
391			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
392								 dep_table);
393			if (ret) {
394				amdgpu_free_extended_power_table(adev);
395				return ret;
396			}
397		}
398		if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
399			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
400				(mode_info->atom_context->bios + data_offset +
401				 le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
402			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
403								 dep_table);
404			if (ret) {
405				amdgpu_free_extended_power_table(adev);
406				return ret;
407			}
408		}
409		if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
410			ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
411				(ATOM_PPLIB_Clock_Voltage_Limit_Table *)
412				(mode_info->atom_context->bios + data_offset +
413				 le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset));
414			if (clk_v->ucNumEntries) {
415				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk =
416					le16_to_cpu(clk_v->entries[0].usSclkLow) |
417					(clk_v->entries[0].ucSclkHigh << 16);
418				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk =
419					le16_to_cpu(clk_v->entries[0].usMclkLow) |
420					(clk_v->entries[0].ucMclkHigh << 16);
421				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc =
422					le16_to_cpu(clk_v->entries[0].usVddc);
423				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci =
424					le16_to_cpu(clk_v->entries[0].usVddci);
425			}
426		}
427		if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) {
428			ATOM_PPLIB_PhaseSheddingLimits_Table *psl =
429				(ATOM_PPLIB_PhaseSheddingLimits_Table *)
430				(mode_info->atom_context->bios + data_offset +
431				 le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
432			ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
433
434			adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
435				kzalloc(psl->ucNumEntries *
436					sizeof(struct amdgpu_phase_shedding_limits_entry),
437					GFP_KERNEL);
438			if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
439				amdgpu_free_extended_power_table(adev);
440				return -ENOMEM;
441			}
442
443			entry = &psl->entries[0];
444			for (i = 0; i < psl->ucNumEntries; i++) {
445				adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
446					le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16);
447				adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
448					le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16);
449				adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
450					le16_to_cpu(entry->usVoltage);
451				entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *)
452					((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record));
453			}
454			adev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
455				psl->ucNumEntries;
456		}
457	}
458
459	/* cac data */
460	if (le16_to_cpu(power_info->pplib.usTableSize) >=
461	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) {
462		adev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit);
463		adev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit);
464		adev->pm.dpm.near_tdp_limit_adjusted = adev->pm.dpm.near_tdp_limit;
465		adev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit);
466		if (adev->pm.dpm.tdp_od_limit)
467			adev->pm.dpm.power_control = true;
468		else
469			adev->pm.dpm.power_control = false;
470		adev->pm.dpm.tdp_adjustment = 0;
471		adev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold);
472		adev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage);
473		adev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope);
474		if (power_info->pplib5.usCACLeakageTableOffset) {
475			ATOM_PPLIB_CAC_Leakage_Table *cac_table =
476				(ATOM_PPLIB_CAC_Leakage_Table *)
477				(mode_info->atom_context->bios + data_offset +
478				 le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
479			ATOM_PPLIB_CAC_Leakage_Record *entry;
480			u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table);
481			adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
482			if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
483				amdgpu_free_extended_power_table(adev);
484				return -ENOMEM;
485			}
486			entry = &cac_table->entries[0];
487			for (i = 0; i < cac_table->ucNumEntries; i++) {
488				if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
489					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 =
490						le16_to_cpu(entry->usVddc1);
491					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 =
492						le16_to_cpu(entry->usVddc2);
493					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 =
494						le16_to_cpu(entry->usVddc3);
495				} else {
496					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
497						le16_to_cpu(entry->usVddc);
498					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
499						le32_to_cpu(entry->ulLeakageValue);
500				}
501				entry = (ATOM_PPLIB_CAC_Leakage_Record *)
502					((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record));
503			}
504			adev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
505		}
506	}
507
508	/* ext tables */
509	if (le16_to_cpu(power_info->pplib.usTableSize) >=
510	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
511		ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
512			(mode_info->atom_context->bios + data_offset +
513			 le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
514		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) &&
515			ext_hdr->usVCETableOffset) {
516			VCEClockInfoArray *array = (VCEClockInfoArray *)
517				(mode_info->atom_context->bios + data_offset +
518				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
519			ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
520				(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
521				(mode_info->atom_context->bios + data_offset +
522				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
523				 1 + array->ucNumEntries * sizeof(VCEClockInfo));
524			ATOM_PPLIB_VCE_State_Table *states =
525				(ATOM_PPLIB_VCE_State_Table *)
526				(mode_info->atom_context->bios + data_offset +
527				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
528				 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) +
529				 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)));
530			ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry;
531			ATOM_PPLIB_VCE_State_Record *state_entry;
532			VCEClockInfo *vce_clk;
533			u32 size = limits->numEntries *
534				sizeof(struct amdgpu_vce_clock_voltage_dependency_entry);
535			adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
536				kzalloc(size, GFP_KERNEL);
537			if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
538				amdgpu_free_extended_power_table(adev);
539				return -ENOMEM;
540			}
541			adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
542				limits->numEntries;
543			entry = &limits->entries[0];
544			state_entry = &states->entries[0];
545			for (i = 0; i < limits->numEntries; i++) {
546				vce_clk = (VCEClockInfo *)
547					((u8 *)&array->entries[0] +
548					 (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
549				adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk =
550					le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
551				adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk =
552					le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
553				adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v =
554					le16_to_cpu(entry->usVoltage);
555				entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
556					((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
557			}
558			adev->pm.dpm.num_of_vce_states =
559					states->numEntries > AMD_MAX_VCE_LEVELS ?
560					AMD_MAX_VCE_LEVELS : states->numEntries;
561			for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
562				vce_clk = (VCEClockInfo *)
563					((u8 *)&array->entries[0] +
564					 (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
565				adev->pm.dpm.vce_states[i].evclk =
566					le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
567				adev->pm.dpm.vce_states[i].ecclk =
568					le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
569				adev->pm.dpm.vce_states[i].clk_idx =
570					state_entry->ucClockInfoIndex & 0x3f;
571				adev->pm.dpm.vce_states[i].pstate =
572					(state_entry->ucClockInfoIndex & 0xc0) >> 6;
573				state_entry = (ATOM_PPLIB_VCE_State_Record *)
574					((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record));
575			}
576		}
577		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) &&
578			ext_hdr->usUVDTableOffset) {
579			UVDClockInfoArray *array = (UVDClockInfoArray *)
580				(mode_info->atom_context->bios + data_offset +
581				 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1);
582			ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits =
583				(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *)
584				(mode_info->atom_context->bios + data_offset +
585				 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 +
586				 1 + (array->ucNumEntries * sizeof (UVDClockInfo)));
587			ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry;
588			u32 size = limits->numEntries *
589				sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry);
590			adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
591				kzalloc(size, GFP_KERNEL);
592			if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
593				amdgpu_free_extended_power_table(adev);
594				return -ENOMEM;
595			}
596			adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
597				limits->numEntries;
598			entry = &limits->entries[0];
599			for (i = 0; i < limits->numEntries; i++) {
600				UVDClockInfo *uvd_clk = (UVDClockInfo *)
601					((u8 *)&array->entries[0] +
602					 (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo)));
603				adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk =
604					le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16);
605				adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
606					le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
607				adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
608					le16_to_cpu(entry->usVoltage);
609				entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
610					((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
611			}
612		}
613		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) &&
614			ext_hdr->usSAMUTableOffset) {
615			ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits =
616				(ATOM_PPLIB_SAMClk_Voltage_Limit_Table *)
617				(mode_info->atom_context->bios + data_offset +
618				 le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1);
619			ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry;
620			u32 size = limits->numEntries *
621				sizeof(struct amdgpu_clock_voltage_dependency_entry);
622			adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
623				kzalloc(size, GFP_KERNEL);
624			if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
625				amdgpu_free_extended_power_table(adev);
626				return -ENOMEM;
627			}
628			adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
629				limits->numEntries;
630			entry = &limits->entries[0];
631			for (i = 0; i < limits->numEntries; i++) {
632				adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk =
633					le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16);
634				adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v =
635					le16_to_cpu(entry->usVoltage);
636				entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *)
637					((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record));
638			}
639		}
640		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
641		    ext_hdr->usPPMTableOffset) {
642			ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
643				(mode_info->atom_context->bios + data_offset +
644				 le16_to_cpu(ext_hdr->usPPMTableOffset));
645			adev->pm.dpm.dyn_state.ppm_table =
646				kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL);
647			if (!adev->pm.dpm.dyn_state.ppm_table) {
648				amdgpu_free_extended_power_table(adev);
649				return -ENOMEM;
650			}
651			adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
652			adev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
653				le16_to_cpu(ppm->usCpuCoreNumber);
654			adev->pm.dpm.dyn_state.ppm_table->platform_tdp =
655				le32_to_cpu(ppm->ulPlatformTDP);
656			adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp =
657				le32_to_cpu(ppm->ulSmallACPlatformTDP);
658			adev->pm.dpm.dyn_state.ppm_table->platform_tdc =
659				le32_to_cpu(ppm->ulPlatformTDC);
660			adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc =
661				le32_to_cpu(ppm->ulSmallACPlatformTDC);
662			adev->pm.dpm.dyn_state.ppm_table->apu_tdp =
663				le32_to_cpu(ppm->ulApuTDP);
664			adev->pm.dpm.dyn_state.ppm_table->dgpu_tdp =
665				le32_to_cpu(ppm->ulDGpuTDP);
666			adev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power =
667				le32_to_cpu(ppm->ulDGpuUlvPower);
668			adev->pm.dpm.dyn_state.ppm_table->tj_max =
669				le32_to_cpu(ppm->ulTjmax);
670		}
671		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) &&
672			ext_hdr->usACPTableOffset) {
673			ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits =
674				(ATOM_PPLIB_ACPClk_Voltage_Limit_Table *)
675				(mode_info->atom_context->bios + data_offset +
676				 le16_to_cpu(ext_hdr->usACPTableOffset) + 1);
677			ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry;
678			u32 size = limits->numEntries *
679				sizeof(struct amdgpu_clock_voltage_dependency_entry);
680			adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
681				kzalloc(size, GFP_KERNEL);
682			if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
683				amdgpu_free_extended_power_table(adev);
684				return -ENOMEM;
685			}
686			adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
687				limits->numEntries;
688			entry = &limits->entries[0];
689			for (i = 0; i < limits->numEntries; i++) {
690				adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk =
691					le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16);
692				adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v =
693					le16_to_cpu(entry->usVoltage);
694				entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *)
695					((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record));
696			}
697		}
698		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) &&
699			ext_hdr->usPowerTuneTableOffset) {
700			u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset +
701					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
702			ATOM_PowerTune_Table *pt;
703			adev->pm.dpm.dyn_state.cac_tdp_table =
704				kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL);
705			if (!adev->pm.dpm.dyn_state.cac_tdp_table) {
706				amdgpu_free_extended_power_table(adev);
707				return -ENOMEM;
708			}
709			if (rev > 0) {
710				ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
711					(mode_info->atom_context->bios + data_offset +
712					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
713				adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
714					ppt->usMaximumPowerDeliveryLimit;
715				pt = &ppt->power_tune_table;
716			} else {
717				ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
718					(mode_info->atom_context->bios + data_offset +
719					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
720				adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255;
721				pt = &ppt->power_tune_table;
722			}
723			adev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP);
724			adev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp =
725				le16_to_cpu(pt->usConfigurableTDP);
726			adev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC);
727			adev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit =
728				le16_to_cpu(pt->usBatteryPowerLimit);
729			adev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit =
730				le16_to_cpu(pt->usSmallPowerLimit);
731			adev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage =
732				le16_to_cpu(pt->usLowCACLeakage);
733			adev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage =
734				le16_to_cpu(pt->usHighCACLeakage);
735		}
736		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8) &&
737				ext_hdr->usSclkVddgfxTableOffset) {
738			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
739				(mode_info->atom_context->bios + data_offset +
740				 le16_to_cpu(ext_hdr->usSclkVddgfxTableOffset));
741			ret = amdgpu_parse_clk_voltage_dep_table(
742					&adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk,
743					dep_table);
744			if (ret) {
745				kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries);
746				return ret;
747			}
748		}
749	}
750
751	return 0;
752}
753
754void amdgpu_free_extended_power_table(struct amdgpu_device *adev)
755{
756	struct amdgpu_dpm_dynamic_state *dyn_state = &adev->pm.dpm.dyn_state;
757
758	kfree(dyn_state->vddc_dependency_on_sclk.entries);
759	kfree(dyn_state->vddci_dependency_on_mclk.entries);
760	kfree(dyn_state->vddc_dependency_on_mclk.entries);
761	kfree(dyn_state->mvdd_dependency_on_mclk.entries);
762	kfree(dyn_state->cac_leakage_table.entries);
763	kfree(dyn_state->phase_shedding_limits_table.entries);
764	kfree(dyn_state->ppm_table);
765	kfree(dyn_state->cac_tdp_table);
766	kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
767	kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
768	kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
769	kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
770	kfree(dyn_state->vddgfx_dependency_on_sclk.entries);
771}
772
773static const char *pp_lib_thermal_controller_names[] = {
774	"NONE",
775	"lm63",
776	"adm1032",
777	"adm1030",
778	"max6649",
779	"lm64",
780	"f75375",
781	"RV6xx",
782	"RV770",
783	"adt7473",
784	"NONE",
785	"External GPIO",
786	"Evergreen",
787	"emc2103",
788	"Sumo",
789	"Northern Islands",
790	"Southern Islands",
791	"lm96163",
792	"Sea Islands",
793	"Kaveri/Kabini",
794};
795
796void amdgpu_add_thermal_controller(struct amdgpu_device *adev)
797{
798	struct amdgpu_mode_info *mode_info = &adev->mode_info;
799	ATOM_PPLIB_POWERPLAYTABLE *power_table;
800	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
801	ATOM_PPLIB_THERMALCONTROLLER *controller;
802	struct amdgpu_i2c_bus_rec i2c_bus;
803	u16 data_offset;
804	u8 frev, crev;
805
806	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
807				   &frev, &crev, &data_offset))
808		return;
809	power_table = (ATOM_PPLIB_POWERPLAYTABLE *)
810		(mode_info->atom_context->bios + data_offset);
811	controller = &power_table->sThermalController;
812
813	/* add the i2c bus for thermal/fan chip */
814	if (controller->ucType > 0) {
815		if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN)
816			adev->pm.no_fan = true;
817		adev->pm.fan_pulses_per_revolution =
818			controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
819		if (adev->pm.fan_pulses_per_revolution) {
820			adev->pm.fan_min_rpm = controller->ucFanMinRPM;
821			adev->pm.fan_max_rpm = controller->ucFanMaxRPM;
822		}
823		if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
824			DRM_INFO("Internal thermal controller %s fan control\n",
825				 (controller->ucFanParameters &
826				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
827			adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
828		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
829			DRM_INFO("Internal thermal controller %s fan control\n",
830				 (controller->ucFanParameters &
831				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
832			adev->pm.int_thermal_type = THERMAL_TYPE_RV770;
833		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
834			DRM_INFO("Internal thermal controller %s fan control\n",
835				 (controller->ucFanParameters &
836				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
837			adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
838		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) {
839			DRM_INFO("Internal thermal controller %s fan control\n",
840				 (controller->ucFanParameters &
841				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
842			adev->pm.int_thermal_type = THERMAL_TYPE_SUMO;
843		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) {
844			DRM_INFO("Internal thermal controller %s fan control\n",
845				 (controller->ucFanParameters &
846				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
847			adev->pm.int_thermal_type = THERMAL_TYPE_NI;
848		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) {
849			DRM_INFO("Internal thermal controller %s fan control\n",
850				 (controller->ucFanParameters &
851				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
852			adev->pm.int_thermal_type = THERMAL_TYPE_SI;
853		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) {
854			DRM_INFO("Internal thermal controller %s fan control\n",
855				 (controller->ucFanParameters &
856				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
857			adev->pm.int_thermal_type = THERMAL_TYPE_CI;
858		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) {
859			DRM_INFO("Internal thermal controller %s fan control\n",
860				 (controller->ucFanParameters &
861				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
862			adev->pm.int_thermal_type = THERMAL_TYPE_KV;
863		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
864			DRM_INFO("External GPIO thermal controller %s fan control\n",
865				 (controller->ucFanParameters &
866				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
867			adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
868		} else if (controller->ucType ==
869			   ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
870			DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
871				 (controller->ucFanParameters &
872				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
873			adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
874		} else if (controller->ucType ==
875			   ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
876			DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
877				 (controller->ucFanParameters &
878				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
879			adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
880		} else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
881			DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
882				 pp_lib_thermal_controller_names[controller->ucType],
883				 controller->ucI2cAddress >> 1,
884				 (controller->ucFanParameters &
885				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
886			adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL;
887			i2c_bus = amdgpu_atombios_lookup_i2c_gpio(adev, controller->ucI2cLine);
888			adev->pm.i2c_bus = amdgpu_i2c_lookup(adev, &i2c_bus);
889			if (adev->pm.i2c_bus) {
890				struct i2c_board_info info = { };
891				const char *name = pp_lib_thermal_controller_names[controller->ucType];
892				info.addr = controller->ucI2cAddress >> 1;
893				strlcpy(info.type, name, sizeof(info.type));
894				i2c_new_device(&adev->pm.i2c_bus->adapter, &info);
895			}
896		} else {
897			DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
898				 controller->ucType,
899				 controller->ucI2cAddress >> 1,
900				 (controller->ucFanParameters &
901				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
902		}
903	}
904}
905
906enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
907						 u32 sys_mask,
908						 enum amdgpu_pcie_gen asic_gen,
909						 enum amdgpu_pcie_gen default_gen)
910{
911	switch (asic_gen) {
912	case AMDGPU_PCIE_GEN1:
913		return AMDGPU_PCIE_GEN1;
914	case AMDGPU_PCIE_GEN2:
915		return AMDGPU_PCIE_GEN2;
916	case AMDGPU_PCIE_GEN3:
917		return AMDGPU_PCIE_GEN3;
918	default:
919		if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == AMDGPU_PCIE_GEN3))
920			return AMDGPU_PCIE_GEN3;
921		else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == AMDGPU_PCIE_GEN2))
922			return AMDGPU_PCIE_GEN2;
923		else
924			return AMDGPU_PCIE_GEN1;
925	}
926	return AMDGPU_PCIE_GEN1;
927}
928
929u16 amdgpu_get_pcie_lane_support(struct amdgpu_device *adev,
930				 u16 asic_lanes,
931				 u16 default_lanes)
932{
933	switch (asic_lanes) {
934	case 0:
935	default:
936		return default_lanes;
937	case 1:
938		return 1;
939	case 2:
940		return 2;
941	case 4:
942		return 4;
943	case 8:
944		return 8;
945	case 12:
946		return 12;
947	case 16:
948		return 16;
949	}
950}
951
952u8 amdgpu_encode_pci_lane_width(u32 lanes)
953{
954	u8 encoded_lanes[] = { 0, 1, 2, 0, 3, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0, 0, 6 };
955
956	if (lanes > 16)
957		return 0;
958
959	return encoded_lanes[lanes];
960}
961
962struct amd_vce_state*
963amdgpu_get_vce_clock_state(void *handle, u32 idx)
964{
965	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
966
967	if (idx < adev->pm.dpm.num_of_vce_states)
968		return &adev->pm.dpm.vce_states[idx];
969
970	return NULL;
971}