Linux Audio

Check our new training course

In-person Linux kernel drivers training

Jun 16-20, 2025
Register
Loading...
v4.17
  1/*
  2 * Copyright 2011 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 * Authors: Alex Deucher
 23 */
 24
 25#include <drm/drmP.h>
 26#include "amdgpu.h"
 27#include "amdgpu_atombios.h"
 28#include "amdgpu_i2c.h"
 29#include "amdgpu_dpm.h"
 30#include "atom.h"
 31
 32void amdgpu_dpm_print_class_info(u32 class, u32 class2)
 33{
 34	const char *s;
 35
 36	switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
 37	case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
 38	default:
 39		s = "none";
 40		break;
 41	case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
 42		s = "battery";
 43		break;
 44	case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
 45		s = "balanced";
 46		break;
 47	case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
 48		s = "performance";
 49		break;
 50	}
 51	printk("\tui class: %s\n", s);
 52	printk("\tinternal class:");
 53	if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
 54	    (class2 == 0))
 55		pr_cont(" none");
 56	else {
 57		if (class & ATOM_PPLIB_CLASSIFICATION_BOOT)
 58			pr_cont(" boot");
 59		if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
 60			pr_cont(" thermal");
 61		if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
 62			pr_cont(" limited_pwr");
 63		if (class & ATOM_PPLIB_CLASSIFICATION_REST)
 64			pr_cont(" rest");
 65		if (class & ATOM_PPLIB_CLASSIFICATION_FORCED)
 66			pr_cont(" forced");
 67		if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
 68			pr_cont(" 3d_perf");
 69		if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE)
 70			pr_cont(" ovrdrv");
 71		if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
 72			pr_cont(" uvd");
 73		if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW)
 74			pr_cont(" 3d_low");
 75		if (class & ATOM_PPLIB_CLASSIFICATION_ACPI)
 76			pr_cont(" acpi");
 77		if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
 78			pr_cont(" uvd_hd2");
 79		if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
 80			pr_cont(" uvd_hd");
 81		if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
 82			pr_cont(" uvd_sd");
 83		if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
 84			pr_cont(" limited_pwr2");
 85		if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
 86			pr_cont(" ulv");
 87		if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
 88			pr_cont(" uvd_mvc");
 89	}
 90	pr_cont("\n");
 91}
 92
 93void amdgpu_dpm_print_cap_info(u32 caps)
 94{
 95	printk("\tcaps:");
 96	if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
 97		pr_cont(" single_disp");
 98	if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK)
 99		pr_cont(" video");
100	if (caps & ATOM_PPLIB_DISALLOW_ON_DC)
101		pr_cont(" no_dc");
102	pr_cont("\n");
103}
104
105void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
106				struct amdgpu_ps *rps)
107{
108	printk("\tstatus:");
109	if (rps == adev->pm.dpm.current_ps)
110		pr_cont(" c");
111	if (rps == adev->pm.dpm.requested_ps)
112		pr_cont(" r");
113	if (rps == adev->pm.dpm.boot_ps)
114		pr_cont(" b");
115	pr_cont("\n");
116}
117
118
119u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
120{
121	struct drm_device *dev = adev->ddev;
122	struct drm_crtc *crtc;
123	struct amdgpu_crtc *amdgpu_crtc;
124	u32 vblank_in_pixels;
125	u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
126
127	if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
128		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
129			amdgpu_crtc = to_amdgpu_crtc(crtc);
130			if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
131				vblank_in_pixels =
132					amdgpu_crtc->hw_mode.crtc_htotal *
133					(amdgpu_crtc->hw_mode.crtc_vblank_end -
134					amdgpu_crtc->hw_mode.crtc_vdisplay +
135					(amdgpu_crtc->v_border * 2));
136
137				vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock;
138				break;
139			}
140		}
141	}
142
143	return vblank_time_us;
144}
145
146u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev)
147{
148	struct drm_device *dev = adev->ddev;
149	struct drm_crtc *crtc;
150	struct amdgpu_crtc *amdgpu_crtc;
151	u32 vrefresh = 0;
152
153	if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
154		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
155			amdgpu_crtc = to_amdgpu_crtc(crtc);
156			if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
157				vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
158				break;
159			}
160		}
161	}
162
163	return vrefresh;
164}
165
166void amdgpu_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
167			      u32 *p, u32 *u)
168{
169	u32 b_c = 0;
170	u32 i_c;
171	u32 tmp;
172
173	i_c = (i * r_c) / 100;
174	tmp = i_c >> p_b;
175
176	while (tmp) {
177		b_c++;
178		tmp >>= 1;
179	}
180
181	*u = (b_c + 1) / 2;
182	*p = i_c / (1 << (2 * (*u)));
183}
184
185int amdgpu_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th)
186{
187	u32 k, a, ah, al;
188	u32 t1;
189
190	if ((fl == 0) || (fh == 0) || (fl > fh))
191		return -EINVAL;
192
193	k = (100 * fh) / fl;
194	t1 = (t * (k - 100));
195	a = (1000 * (100 * h + t1)) / (10000 + (t1 / 100));
196	a = (a + 5) / 10;
197	ah = ((a * t) + 5000) / 10000;
198	al = a - ah;
199
200	*th = t - ah;
201	*tl = t + al;
202
203	return 0;
204}
205
206bool amdgpu_is_uvd_state(u32 class, u32 class2)
207{
208	if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
209		return true;
210	if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
211		return true;
212	if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
213		return true;
214	if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
215		return true;
216	if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
217		return true;
218	return false;
219}
220
221bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor)
222{
223	switch (sensor) {
224	case THERMAL_TYPE_RV6XX:
225	case THERMAL_TYPE_RV770:
226	case THERMAL_TYPE_EVERGREEN:
227	case THERMAL_TYPE_SUMO:
228	case THERMAL_TYPE_NI:
229	case THERMAL_TYPE_SI:
230	case THERMAL_TYPE_CI:
231	case THERMAL_TYPE_KV:
232		return true;
233	case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
234	case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
235		return false; /* need special handling */
236	case THERMAL_TYPE_NONE:
237	case THERMAL_TYPE_EXTERNAL:
238	case THERMAL_TYPE_EXTERNAL_GPIO:
239	default:
240		return false;
241	}
242}
243
244union power_info {
245	struct _ATOM_POWERPLAY_INFO info;
246	struct _ATOM_POWERPLAY_INFO_V2 info_2;
247	struct _ATOM_POWERPLAY_INFO_V3 info_3;
248	struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
249	struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
250	struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
251	struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
252	struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
253};
254
255union fan_info {
256	struct _ATOM_PPLIB_FANTABLE fan;
257	struct _ATOM_PPLIB_FANTABLE2 fan2;
258	struct _ATOM_PPLIB_FANTABLE3 fan3;
259};
260
261static int amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table *amdgpu_table,
262					      ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table)
263{
264	u32 size = atom_table->ucNumEntries *
265		sizeof(struct amdgpu_clock_voltage_dependency_entry);
266	int i;
267	ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
268
269	amdgpu_table->entries = kzalloc(size, GFP_KERNEL);
270	if (!amdgpu_table->entries)
271		return -ENOMEM;
272
273	entry = &atom_table->entries[0];
274	for (i = 0; i < atom_table->ucNumEntries; i++) {
275		amdgpu_table->entries[i].clk = le16_to_cpu(entry->usClockLow) |
276			(entry->ucClockHigh << 16);
277		amdgpu_table->entries[i].v = le16_to_cpu(entry->usVoltage);
278		entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *)
279			((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record));
280	}
281	amdgpu_table->count = atom_table->ucNumEntries;
282
283	return 0;
284}
285
286int amdgpu_get_platform_caps(struct amdgpu_device *adev)
287{
288	struct amdgpu_mode_info *mode_info = &adev->mode_info;
289	union power_info *power_info;
290	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
291	u16 data_offset;
292	u8 frev, crev;
293
294	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
295				   &frev, &crev, &data_offset))
296		return -EINVAL;
297	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
298
299	adev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
300	adev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
301	adev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
302
303	return 0;
304}
305
306/* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
307#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
308#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
309#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
310#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
311#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
312#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
313#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8 24
314#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V9 26
315
316int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
317{
318	struct amdgpu_mode_info *mode_info = &adev->mode_info;
319	union power_info *power_info;
320	union fan_info *fan_info;
321	ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table;
322	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
323	u16 data_offset;
324	u8 frev, crev;
325	int ret, i;
326
327	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
328				   &frev, &crev, &data_offset))
329		return -EINVAL;
330	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
331
332	/* fan table */
333	if (le16_to_cpu(power_info->pplib.usTableSize) >=
334	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
335		if (power_info->pplib3.usFanTableOffset) {
336			fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset +
337						      le16_to_cpu(power_info->pplib3.usFanTableOffset));
338			adev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst;
339			adev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin);
340			adev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed);
341			adev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh);
342			adev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin);
343			adev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed);
344			adev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh);
345			if (fan_info->fan.ucFanTableFormat >= 2)
346				adev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax);
347			else
348				adev->pm.dpm.fan.t_max = 10900;
349			adev->pm.dpm.fan.cycle_delay = 100000;
350			if (fan_info->fan.ucFanTableFormat >= 3) {
351				adev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode;
352				adev->pm.dpm.fan.default_max_fan_pwm =
353					le16_to_cpu(fan_info->fan3.usFanPWMMax);
354				adev->pm.dpm.fan.default_fan_output_sensitivity = 4836;
355				adev->pm.dpm.fan.fan_output_sensitivity =
356					le16_to_cpu(fan_info->fan3.usFanOutputSensitivity);
357			}
358			adev->pm.dpm.fan.ucode_fan_control = true;
359		}
360	}
361
362	/* clock dependancy tables, shedding tables */
363	if (le16_to_cpu(power_info->pplib.usTableSize) >=
364	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) {
365		if (power_info->pplib4.usVddcDependencyOnSCLKOffset) {
366			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
367				(mode_info->atom_context->bios + data_offset +
368				 le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
369			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
370								 dep_table);
371			if (ret) {
372				amdgpu_free_extended_power_table(adev);
373				return ret;
374			}
375		}
376		if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
377			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
378				(mode_info->atom_context->bios + data_offset +
379				 le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
380			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
381								 dep_table);
382			if (ret) {
383				amdgpu_free_extended_power_table(adev);
384				return ret;
385			}
386		}
387		if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
388			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
389				(mode_info->atom_context->bios + data_offset +
390				 le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
391			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
392								 dep_table);
393			if (ret) {
394				amdgpu_free_extended_power_table(adev);
395				return ret;
396			}
397		}
398		if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
399			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
400				(mode_info->atom_context->bios + data_offset +
401				 le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
402			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
403								 dep_table);
404			if (ret) {
405				amdgpu_free_extended_power_table(adev);
406				return ret;
407			}
408		}
409		if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
410			ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
411				(ATOM_PPLIB_Clock_Voltage_Limit_Table *)
412				(mode_info->atom_context->bios + data_offset +
413				 le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset));
414			if (clk_v->ucNumEntries) {
415				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk =
416					le16_to_cpu(clk_v->entries[0].usSclkLow) |
417					(clk_v->entries[0].ucSclkHigh << 16);
418				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk =
419					le16_to_cpu(clk_v->entries[0].usMclkLow) |
420					(clk_v->entries[0].ucMclkHigh << 16);
421				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc =
422					le16_to_cpu(clk_v->entries[0].usVddc);
423				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci =
424					le16_to_cpu(clk_v->entries[0].usVddci);
425			}
426		}
427		if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) {
428			ATOM_PPLIB_PhaseSheddingLimits_Table *psl =
429				(ATOM_PPLIB_PhaseSheddingLimits_Table *)
430				(mode_info->atom_context->bios + data_offset +
431				 le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
432			ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
433
434			adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
435				kzalloc(psl->ucNumEntries *
436					sizeof(struct amdgpu_phase_shedding_limits_entry),
437					GFP_KERNEL);
438			if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
439				amdgpu_free_extended_power_table(adev);
440				return -ENOMEM;
441			}
442
443			entry = &psl->entries[0];
444			for (i = 0; i < psl->ucNumEntries; i++) {
445				adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
446					le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16);
447				adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
448					le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16);
449				adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
450					le16_to_cpu(entry->usVoltage);
451				entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *)
452					((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record));
453			}
454			adev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
455				psl->ucNumEntries;
456		}
457	}
458
459	/* cac data */
460	if (le16_to_cpu(power_info->pplib.usTableSize) >=
461	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) {
462		adev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit);
463		adev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit);
464		adev->pm.dpm.near_tdp_limit_adjusted = adev->pm.dpm.near_tdp_limit;
465		adev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit);
466		if (adev->pm.dpm.tdp_od_limit)
467			adev->pm.dpm.power_control = true;
468		else
469			adev->pm.dpm.power_control = false;
470		adev->pm.dpm.tdp_adjustment = 0;
471		adev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold);
472		adev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage);
473		adev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope);
474		if (power_info->pplib5.usCACLeakageTableOffset) {
475			ATOM_PPLIB_CAC_Leakage_Table *cac_table =
476				(ATOM_PPLIB_CAC_Leakage_Table *)
477				(mode_info->atom_context->bios + data_offset +
478				 le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
479			ATOM_PPLIB_CAC_Leakage_Record *entry;
480			u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table);
481			adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
482			if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
483				amdgpu_free_extended_power_table(adev);
484				return -ENOMEM;
485			}
486			entry = &cac_table->entries[0];
487			for (i = 0; i < cac_table->ucNumEntries; i++) {
488				if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
489					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 =
490						le16_to_cpu(entry->usVddc1);
491					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 =
492						le16_to_cpu(entry->usVddc2);
493					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 =
494						le16_to_cpu(entry->usVddc3);
495				} else {
496					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
497						le16_to_cpu(entry->usVddc);
498					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
499						le32_to_cpu(entry->ulLeakageValue);
500				}
501				entry = (ATOM_PPLIB_CAC_Leakage_Record *)
502					((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record));
503			}
504			adev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
505		}
506	}
507
508	/* ext tables */
509	if (le16_to_cpu(power_info->pplib.usTableSize) >=
510	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
511		ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
512			(mode_info->atom_context->bios + data_offset +
513			 le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
514		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) &&
515			ext_hdr->usVCETableOffset) {
516			VCEClockInfoArray *array = (VCEClockInfoArray *)
517				(mode_info->atom_context->bios + data_offset +
518				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
519			ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
520				(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
521				(mode_info->atom_context->bios + data_offset +
522				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
523				 1 + array->ucNumEntries * sizeof(VCEClockInfo));
524			ATOM_PPLIB_VCE_State_Table *states =
525				(ATOM_PPLIB_VCE_State_Table *)
526				(mode_info->atom_context->bios + data_offset +
527				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
528				 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) +
529				 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)));
530			ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry;
531			ATOM_PPLIB_VCE_State_Record *state_entry;
532			VCEClockInfo *vce_clk;
533			u32 size = limits->numEntries *
534				sizeof(struct amdgpu_vce_clock_voltage_dependency_entry);
535			adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
536				kzalloc(size, GFP_KERNEL);
537			if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
538				amdgpu_free_extended_power_table(adev);
539				return -ENOMEM;
540			}
541			adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
542				limits->numEntries;
543			entry = &limits->entries[0];
544			state_entry = &states->entries[0];
545			for (i = 0; i < limits->numEntries; i++) {
546				vce_clk = (VCEClockInfo *)
547					((u8 *)&array->entries[0] +
548					 (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
549				adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk =
550					le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
551				adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk =
552					le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
553				adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v =
554					le16_to_cpu(entry->usVoltage);
555				entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
556					((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
557			}
558			adev->pm.dpm.num_of_vce_states =
559					states->numEntries > AMD_MAX_VCE_LEVELS ?
560					AMD_MAX_VCE_LEVELS : states->numEntries;
561			for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
562				vce_clk = (VCEClockInfo *)
563					((u8 *)&array->entries[0] +
564					 (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
565				adev->pm.dpm.vce_states[i].evclk =
566					le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
567				adev->pm.dpm.vce_states[i].ecclk =
568					le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
569				adev->pm.dpm.vce_states[i].clk_idx =
570					state_entry->ucClockInfoIndex & 0x3f;
571				adev->pm.dpm.vce_states[i].pstate =
572					(state_entry->ucClockInfoIndex & 0xc0) >> 6;
573				state_entry = (ATOM_PPLIB_VCE_State_Record *)
574					((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record));
575			}
576		}
577		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) &&
578			ext_hdr->usUVDTableOffset) {
579			UVDClockInfoArray *array = (UVDClockInfoArray *)
580				(mode_info->atom_context->bios + data_offset +
581				 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1);
582			ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits =
583				(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *)
584				(mode_info->atom_context->bios + data_offset +
585				 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 +
586				 1 + (array->ucNumEntries * sizeof (UVDClockInfo)));
587			ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry;
588			u32 size = limits->numEntries *
589				sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry);
590			adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
591				kzalloc(size, GFP_KERNEL);
592			if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
593				amdgpu_free_extended_power_table(adev);
594				return -ENOMEM;
595			}
596			adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
597				limits->numEntries;
598			entry = &limits->entries[0];
599			for (i = 0; i < limits->numEntries; i++) {
600				UVDClockInfo *uvd_clk = (UVDClockInfo *)
601					((u8 *)&array->entries[0] +
602					 (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo)));
603				adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk =
604					le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16);
605				adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
606					le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
607				adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
608					le16_to_cpu(entry->usVoltage);
609				entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
610					((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
611			}
612		}
613		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) &&
614			ext_hdr->usSAMUTableOffset) {
615			ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits =
616				(ATOM_PPLIB_SAMClk_Voltage_Limit_Table *)
617				(mode_info->atom_context->bios + data_offset +
618				 le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1);
619			ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry;
620			u32 size = limits->numEntries *
621				sizeof(struct amdgpu_clock_voltage_dependency_entry);
622			adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
623				kzalloc(size, GFP_KERNEL);
624			if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
625				amdgpu_free_extended_power_table(adev);
626				return -ENOMEM;
627			}
628			adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
629				limits->numEntries;
630			entry = &limits->entries[0];
631			for (i = 0; i < limits->numEntries; i++) {
632				adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk =
633					le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16);
634				adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v =
635					le16_to_cpu(entry->usVoltage);
636				entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *)
637					((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record));
638			}
639		}
640		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
641		    ext_hdr->usPPMTableOffset) {
642			ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
643				(mode_info->atom_context->bios + data_offset +
644				 le16_to_cpu(ext_hdr->usPPMTableOffset));
645			adev->pm.dpm.dyn_state.ppm_table =
646				kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL);
647			if (!adev->pm.dpm.dyn_state.ppm_table) {
648				amdgpu_free_extended_power_table(adev);
649				return -ENOMEM;
650			}
651			adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
652			adev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
653				le16_to_cpu(ppm->usCpuCoreNumber);
654			adev->pm.dpm.dyn_state.ppm_table->platform_tdp =
655				le32_to_cpu(ppm->ulPlatformTDP);
656			adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp =
657				le32_to_cpu(ppm->ulSmallACPlatformTDP);
658			adev->pm.dpm.dyn_state.ppm_table->platform_tdc =
659				le32_to_cpu(ppm->ulPlatformTDC);
660			adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc =
661				le32_to_cpu(ppm->ulSmallACPlatformTDC);
662			adev->pm.dpm.dyn_state.ppm_table->apu_tdp =
663				le32_to_cpu(ppm->ulApuTDP);
664			adev->pm.dpm.dyn_state.ppm_table->dgpu_tdp =
665				le32_to_cpu(ppm->ulDGpuTDP);
666			adev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power =
667				le32_to_cpu(ppm->ulDGpuUlvPower);
668			adev->pm.dpm.dyn_state.ppm_table->tj_max =
669				le32_to_cpu(ppm->ulTjmax);
670		}
671		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) &&
672			ext_hdr->usACPTableOffset) {
673			ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits =
674				(ATOM_PPLIB_ACPClk_Voltage_Limit_Table *)
675				(mode_info->atom_context->bios + data_offset +
676				 le16_to_cpu(ext_hdr->usACPTableOffset) + 1);
677			ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry;
678			u32 size = limits->numEntries *
679				sizeof(struct amdgpu_clock_voltage_dependency_entry);
680			adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
681				kzalloc(size, GFP_KERNEL);
682			if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
683				amdgpu_free_extended_power_table(adev);
684				return -ENOMEM;
685			}
686			adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
687				limits->numEntries;
688			entry = &limits->entries[0];
689			for (i = 0; i < limits->numEntries; i++) {
690				adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk =
691					le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16);
692				adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v =
693					le16_to_cpu(entry->usVoltage);
694				entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *)
695					((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record));
696			}
697		}
698		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) &&
699			ext_hdr->usPowerTuneTableOffset) {
700			u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset +
701					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
702			ATOM_PowerTune_Table *pt;
703			adev->pm.dpm.dyn_state.cac_tdp_table =
704				kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL);
705			if (!adev->pm.dpm.dyn_state.cac_tdp_table) {
706				amdgpu_free_extended_power_table(adev);
707				return -ENOMEM;
708			}
709			if (rev > 0) {
710				ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
711					(mode_info->atom_context->bios + data_offset +
712					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
713				adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
714					ppt->usMaximumPowerDeliveryLimit;
715				pt = &ppt->power_tune_table;
716			} else {
717				ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
718					(mode_info->atom_context->bios + data_offset +
719					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
720				adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255;
721				pt = &ppt->power_tune_table;
722			}
723			adev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP);
724			adev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp =
725				le16_to_cpu(pt->usConfigurableTDP);
726			adev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC);
727			adev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit =
728				le16_to_cpu(pt->usBatteryPowerLimit);
729			adev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit =
730				le16_to_cpu(pt->usSmallPowerLimit);
731			adev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage =
732				le16_to_cpu(pt->usLowCACLeakage);
733			adev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage =
734				le16_to_cpu(pt->usHighCACLeakage);
735		}
736		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8) &&
737				ext_hdr->usSclkVddgfxTableOffset) {
738			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
739				(mode_info->atom_context->bios + data_offset +
740				 le16_to_cpu(ext_hdr->usSclkVddgfxTableOffset));
741			ret = amdgpu_parse_clk_voltage_dep_table(
742					&adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk,
743					dep_table);
744			if (ret) {
745				kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries);
746				return ret;
747			}
748		}
749	}
750
751	return 0;
752}
753
754void amdgpu_free_extended_power_table(struct amdgpu_device *adev)
755{
756	struct amdgpu_dpm_dynamic_state *dyn_state = &adev->pm.dpm.dyn_state;
757
758	kfree(dyn_state->vddc_dependency_on_sclk.entries);
759	kfree(dyn_state->vddci_dependency_on_mclk.entries);
760	kfree(dyn_state->vddc_dependency_on_mclk.entries);
761	kfree(dyn_state->mvdd_dependency_on_mclk.entries);
762	kfree(dyn_state->cac_leakage_table.entries);
763	kfree(dyn_state->phase_shedding_limits_table.entries);
764	kfree(dyn_state->ppm_table);
765	kfree(dyn_state->cac_tdp_table);
766	kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
767	kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
768	kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
769	kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
770	kfree(dyn_state->vddgfx_dependency_on_sclk.entries);
771}
772
773static const char *pp_lib_thermal_controller_names[] = {
774	"NONE",
775	"lm63",
776	"adm1032",
777	"adm1030",
778	"max6649",
779	"lm64",
780	"f75375",
781	"RV6xx",
782	"RV770",
783	"adt7473",
784	"NONE",
785	"External GPIO",
786	"Evergreen",
787	"emc2103",
788	"Sumo",
789	"Northern Islands",
790	"Southern Islands",
791	"lm96163",
792	"Sea Islands",
793	"Kaveri/Kabini",
794};
795
796void amdgpu_add_thermal_controller(struct amdgpu_device *adev)
797{
798	struct amdgpu_mode_info *mode_info = &adev->mode_info;
799	ATOM_PPLIB_POWERPLAYTABLE *power_table;
800	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
801	ATOM_PPLIB_THERMALCONTROLLER *controller;
802	struct amdgpu_i2c_bus_rec i2c_bus;
803	u16 data_offset;
804	u8 frev, crev;
805
806	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
807				   &frev, &crev, &data_offset))
808		return;
809	power_table = (ATOM_PPLIB_POWERPLAYTABLE *)
810		(mode_info->atom_context->bios + data_offset);
811	controller = &power_table->sThermalController;
812
813	/* add the i2c bus for thermal/fan chip */
814	if (controller->ucType > 0) {
815		if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN)
816			adev->pm.no_fan = true;
817		adev->pm.fan_pulses_per_revolution =
818			controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
819		if (adev->pm.fan_pulses_per_revolution) {
820			adev->pm.fan_min_rpm = controller->ucFanMinRPM;
821			adev->pm.fan_max_rpm = controller->ucFanMaxRPM;
822		}
823		if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
824			DRM_INFO("Internal thermal controller %s fan control\n",
825				 (controller->ucFanParameters &
826				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
827			adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
828		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
829			DRM_INFO("Internal thermal controller %s fan control\n",
830				 (controller->ucFanParameters &
831				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
832			adev->pm.int_thermal_type = THERMAL_TYPE_RV770;
833		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
834			DRM_INFO("Internal thermal controller %s fan control\n",
835				 (controller->ucFanParameters &
836				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
837			adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
838		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) {
839			DRM_INFO("Internal thermal controller %s fan control\n",
840				 (controller->ucFanParameters &
841				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
842			adev->pm.int_thermal_type = THERMAL_TYPE_SUMO;
843		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) {
844			DRM_INFO("Internal thermal controller %s fan control\n",
845				 (controller->ucFanParameters &
846				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
847			adev->pm.int_thermal_type = THERMAL_TYPE_NI;
848		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) {
849			DRM_INFO("Internal thermal controller %s fan control\n",
850				 (controller->ucFanParameters &
851				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
852			adev->pm.int_thermal_type = THERMAL_TYPE_SI;
853		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) {
854			DRM_INFO("Internal thermal controller %s fan control\n",
855				 (controller->ucFanParameters &
856				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
857			adev->pm.int_thermal_type = THERMAL_TYPE_CI;
858		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) {
859			DRM_INFO("Internal thermal controller %s fan control\n",
860				 (controller->ucFanParameters &
861				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
862			adev->pm.int_thermal_type = THERMAL_TYPE_KV;
863		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
864			DRM_INFO("External GPIO thermal controller %s fan control\n",
865				 (controller->ucFanParameters &
866				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
867			adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
868		} else if (controller->ucType ==
869			   ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
870			DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
871				 (controller->ucFanParameters &
872				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
873			adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
874		} else if (controller->ucType ==
875			   ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
876			DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
877				 (controller->ucFanParameters &
878				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
879			adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
880		} else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
881			DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
882				 pp_lib_thermal_controller_names[controller->ucType],
883				 controller->ucI2cAddress >> 1,
884				 (controller->ucFanParameters &
885				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
886			adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL;
887			i2c_bus = amdgpu_atombios_lookup_i2c_gpio(adev, controller->ucI2cLine);
888			adev->pm.i2c_bus = amdgpu_i2c_lookup(adev, &i2c_bus);
889			if (adev->pm.i2c_bus) {
890				struct i2c_board_info info = { };
891				const char *name = pp_lib_thermal_controller_names[controller->ucType];
892				info.addr = controller->ucI2cAddress >> 1;
893				strlcpy(info.type, name, sizeof(info.type));
894				i2c_new_device(&adev->pm.i2c_bus->adapter, &info);
895			}
896		} else {
897			DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
898				 controller->ucType,
899				 controller->ucI2cAddress >> 1,
900				 (controller->ucFanParameters &
901				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
902		}
903	}
904}
905
906enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
907						 u32 sys_mask,
908						 enum amdgpu_pcie_gen asic_gen,
909						 enum amdgpu_pcie_gen default_gen)
910{
911	switch (asic_gen) {
912	case AMDGPU_PCIE_GEN1:
913		return AMDGPU_PCIE_GEN1;
914	case AMDGPU_PCIE_GEN2:
915		return AMDGPU_PCIE_GEN2;
916	case AMDGPU_PCIE_GEN3:
917		return AMDGPU_PCIE_GEN3;
918	default:
919		if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == AMDGPU_PCIE_GEN3))
920			return AMDGPU_PCIE_GEN3;
921		else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == AMDGPU_PCIE_GEN2))
922			return AMDGPU_PCIE_GEN2;
923		else
924			return AMDGPU_PCIE_GEN1;
925	}
926	return AMDGPU_PCIE_GEN1;
927}
928
929u16 amdgpu_get_pcie_lane_support(struct amdgpu_device *adev,
930				 u16 asic_lanes,
931				 u16 default_lanes)
932{
933	switch (asic_lanes) {
934	case 0:
935	default:
936		return default_lanes;
937	case 1:
938		return 1;
939	case 2:
940		return 2;
941	case 4:
942		return 4;
943	case 8:
944		return 8;
945	case 12:
946		return 12;
947	case 16:
948		return 16;
949	}
950}
951
952u8 amdgpu_encode_pci_lane_width(u32 lanes)
953{
954	u8 encoded_lanes[] = { 0, 1, 2, 0, 3, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0, 0, 6 };
955
956	if (lanes > 16)
957		return 0;
958
959	return encoded_lanes[lanes];
960}
961
962struct amd_vce_state*
963amdgpu_get_vce_clock_state(void *handle, u32 idx)
964{
965	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
966
967	if (idx < adev->pm.dpm.num_of_vce_states)
968		return &adev->pm.dpm.vce_states[idx];
969
970	return NULL;
971}
v4.6
  1/*
  2 * Copyright 2011 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 * Authors: Alex Deucher
 23 */
 24
 25#include "drmP.h"
 26#include "amdgpu.h"
 27#include "amdgpu_atombios.h"
 28#include "amdgpu_i2c.h"
 29#include "amdgpu_dpm.h"
 30#include "atom.h"
 31
 32void amdgpu_dpm_print_class_info(u32 class, u32 class2)
 33{
 34	printk("\tui class: ");
 
 35	switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
 36	case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
 37	default:
 38		printk("none\n");
 39		break;
 40	case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
 41		printk("battery\n");
 42		break;
 43	case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
 44		printk("balanced\n");
 45		break;
 46	case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
 47		printk("performance\n");
 48		break;
 49	}
 50	printk("\tinternal class: ");
 
 51	if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
 52	    (class2 == 0))
 53		printk("none");
 54	else {
 55		if (class & ATOM_PPLIB_CLASSIFICATION_BOOT)
 56			printk("boot ");
 57		if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
 58			printk("thermal ");
 59		if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
 60			printk("limited_pwr ");
 61		if (class & ATOM_PPLIB_CLASSIFICATION_REST)
 62			printk("rest ");
 63		if (class & ATOM_PPLIB_CLASSIFICATION_FORCED)
 64			printk("forced ");
 65		if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
 66			printk("3d_perf ");
 67		if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE)
 68			printk("ovrdrv ");
 69		if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
 70			printk("uvd ");
 71		if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW)
 72			printk("3d_low ");
 73		if (class & ATOM_PPLIB_CLASSIFICATION_ACPI)
 74			printk("acpi ");
 75		if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
 76			printk("uvd_hd2 ");
 77		if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
 78			printk("uvd_hd ");
 79		if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
 80			printk("uvd_sd ");
 81		if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
 82			printk("limited_pwr2 ");
 83		if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
 84			printk("ulv ");
 85		if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
 86			printk("uvd_mvc ");
 87	}
 88	printk("\n");
 89}
 90
 91void amdgpu_dpm_print_cap_info(u32 caps)
 92{
 93	printk("\tcaps: ");
 94	if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
 95		printk("single_disp ");
 96	if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK)
 97		printk("video ");
 98	if (caps & ATOM_PPLIB_DISALLOW_ON_DC)
 99		printk("no_dc ");
100	printk("\n");
101}
102
103void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
104				struct amdgpu_ps *rps)
105{
106	printk("\tstatus: ");
107	if (rps == adev->pm.dpm.current_ps)
108		printk("c ");
109	if (rps == adev->pm.dpm.requested_ps)
110		printk("r ");
111	if (rps == adev->pm.dpm.boot_ps)
112		printk("b ");
113	printk("\n");
114}
115
 
116u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
117{
118	struct drm_device *dev = adev->ddev;
119	struct drm_crtc *crtc;
120	struct amdgpu_crtc *amdgpu_crtc;
121	u32 line_time_us, vblank_lines;
122	u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
123
124	if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
125		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
126			amdgpu_crtc = to_amdgpu_crtc(crtc);
127			if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
128				line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) /
129					amdgpu_crtc->hw_mode.clock;
130				vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end -
131					amdgpu_crtc->hw_mode.crtc_vdisplay +
132					(amdgpu_crtc->v_border * 2);
133				vblank_time_us = vblank_lines * line_time_us;
 
134				break;
135			}
136		}
137	}
138
139	return vblank_time_us;
140}
141
142u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev)
143{
144	struct drm_device *dev = adev->ddev;
145	struct drm_crtc *crtc;
146	struct amdgpu_crtc *amdgpu_crtc;
147	u32 vrefresh = 0;
148
149	if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
150		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
151			amdgpu_crtc = to_amdgpu_crtc(crtc);
152			if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
153				vrefresh = amdgpu_crtc->hw_mode.vrefresh;
154				break;
155			}
156		}
157	}
158
159	return vrefresh;
160}
161
162void amdgpu_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
163			      u32 *p, u32 *u)
164{
165	u32 b_c = 0;
166	u32 i_c;
167	u32 tmp;
168
169	i_c = (i * r_c) / 100;
170	tmp = i_c >> p_b;
171
172	while (tmp) {
173		b_c++;
174		tmp >>= 1;
175	}
176
177	*u = (b_c + 1) / 2;
178	*p = i_c / (1 << (2 * (*u)));
179}
180
181int amdgpu_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th)
182{
183	u32 k, a, ah, al;
184	u32 t1;
185
186	if ((fl == 0) || (fh == 0) || (fl > fh))
187		return -EINVAL;
188
189	k = (100 * fh) / fl;
190	t1 = (t * (k - 100));
191	a = (1000 * (100 * h + t1)) / (10000 + (t1 / 100));
192	a = (a + 5) / 10;
193	ah = ((a * t) + 5000) / 10000;
194	al = a - ah;
195
196	*th = t - ah;
197	*tl = t + al;
198
199	return 0;
200}
201
202bool amdgpu_is_uvd_state(u32 class, u32 class2)
203{
204	if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
205		return true;
206	if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
207		return true;
208	if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
209		return true;
210	if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
211		return true;
212	if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
213		return true;
214	return false;
215}
216
217bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor)
218{
219	switch (sensor) {
220	case THERMAL_TYPE_RV6XX:
221	case THERMAL_TYPE_RV770:
222	case THERMAL_TYPE_EVERGREEN:
223	case THERMAL_TYPE_SUMO:
224	case THERMAL_TYPE_NI:
225	case THERMAL_TYPE_SI:
226	case THERMAL_TYPE_CI:
227	case THERMAL_TYPE_KV:
228		return true;
229	case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
230	case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
231		return false; /* need special handling */
232	case THERMAL_TYPE_NONE:
233	case THERMAL_TYPE_EXTERNAL:
234	case THERMAL_TYPE_EXTERNAL_GPIO:
235	default:
236		return false;
237	}
238}
239
240union power_info {
241	struct _ATOM_POWERPLAY_INFO info;
242	struct _ATOM_POWERPLAY_INFO_V2 info_2;
243	struct _ATOM_POWERPLAY_INFO_V3 info_3;
244	struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
245	struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
246	struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
247	struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
248	struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
249};
250
251union fan_info {
252	struct _ATOM_PPLIB_FANTABLE fan;
253	struct _ATOM_PPLIB_FANTABLE2 fan2;
254	struct _ATOM_PPLIB_FANTABLE3 fan3;
255};
256
257static int amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table *amdgpu_table,
258					      ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table)
259{
260	u32 size = atom_table->ucNumEntries *
261		sizeof(struct amdgpu_clock_voltage_dependency_entry);
262	int i;
263	ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
264
265	amdgpu_table->entries = kzalloc(size, GFP_KERNEL);
266	if (!amdgpu_table->entries)
267		return -ENOMEM;
268
269	entry = &atom_table->entries[0];
270	for (i = 0; i < atom_table->ucNumEntries; i++) {
271		amdgpu_table->entries[i].clk = le16_to_cpu(entry->usClockLow) |
272			(entry->ucClockHigh << 16);
273		amdgpu_table->entries[i].v = le16_to_cpu(entry->usVoltage);
274		entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *)
275			((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record));
276	}
277	amdgpu_table->count = atom_table->ucNumEntries;
278
279	return 0;
280}
281
282int amdgpu_get_platform_caps(struct amdgpu_device *adev)
283{
284	struct amdgpu_mode_info *mode_info = &adev->mode_info;
285	union power_info *power_info;
286	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
287	u16 data_offset;
288	u8 frev, crev;
289
290	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
291				   &frev, &crev, &data_offset))
292		return -EINVAL;
293	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
294
295	adev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
296	adev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
297	adev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
298
299	return 0;
300}
301
302/* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
303#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
304#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
305#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
306#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
307#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
308#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
309#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8 24
310#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V9 26
311
312int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
313{
314	struct amdgpu_mode_info *mode_info = &adev->mode_info;
315	union power_info *power_info;
316	union fan_info *fan_info;
317	ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table;
318	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
319	u16 data_offset;
320	u8 frev, crev;
321	int ret, i;
322
323	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
324				   &frev, &crev, &data_offset))
325		return -EINVAL;
326	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
327
328	/* fan table */
329	if (le16_to_cpu(power_info->pplib.usTableSize) >=
330	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
331		if (power_info->pplib3.usFanTableOffset) {
332			fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset +
333						      le16_to_cpu(power_info->pplib3.usFanTableOffset));
334			adev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst;
335			adev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin);
336			adev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed);
337			adev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh);
338			adev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin);
339			adev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed);
340			adev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh);
341			if (fan_info->fan.ucFanTableFormat >= 2)
342				adev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax);
343			else
344				adev->pm.dpm.fan.t_max = 10900;
345			adev->pm.dpm.fan.cycle_delay = 100000;
346			if (fan_info->fan.ucFanTableFormat >= 3) {
347				adev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode;
348				adev->pm.dpm.fan.default_max_fan_pwm =
349					le16_to_cpu(fan_info->fan3.usFanPWMMax);
350				adev->pm.dpm.fan.default_fan_output_sensitivity = 4836;
351				adev->pm.dpm.fan.fan_output_sensitivity =
352					le16_to_cpu(fan_info->fan3.usFanOutputSensitivity);
353			}
354			adev->pm.dpm.fan.ucode_fan_control = true;
355		}
356	}
357
358	/* clock dependancy tables, shedding tables */
359	if (le16_to_cpu(power_info->pplib.usTableSize) >=
360	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) {
361		if (power_info->pplib4.usVddcDependencyOnSCLKOffset) {
362			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
363				(mode_info->atom_context->bios + data_offset +
364				 le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
365			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
366								 dep_table);
367			if (ret) {
368				amdgpu_free_extended_power_table(adev);
369				return ret;
370			}
371		}
372		if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
373			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
374				(mode_info->atom_context->bios + data_offset +
375				 le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
376			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
377								 dep_table);
378			if (ret) {
379				amdgpu_free_extended_power_table(adev);
380				return ret;
381			}
382		}
383		if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
384			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
385				(mode_info->atom_context->bios + data_offset +
386				 le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
387			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
388								 dep_table);
389			if (ret) {
390				amdgpu_free_extended_power_table(adev);
391				return ret;
392			}
393		}
394		if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
395			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
396				(mode_info->atom_context->bios + data_offset +
397				 le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
398			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
399								 dep_table);
400			if (ret) {
401				amdgpu_free_extended_power_table(adev);
402				return ret;
403			}
404		}
405		if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
406			ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
407				(ATOM_PPLIB_Clock_Voltage_Limit_Table *)
408				(mode_info->atom_context->bios + data_offset +
409				 le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset));
410			if (clk_v->ucNumEntries) {
411				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk =
412					le16_to_cpu(clk_v->entries[0].usSclkLow) |
413					(clk_v->entries[0].ucSclkHigh << 16);
414				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk =
415					le16_to_cpu(clk_v->entries[0].usMclkLow) |
416					(clk_v->entries[0].ucMclkHigh << 16);
417				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc =
418					le16_to_cpu(clk_v->entries[0].usVddc);
419				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci =
420					le16_to_cpu(clk_v->entries[0].usVddci);
421			}
422		}
423		if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) {
424			ATOM_PPLIB_PhaseSheddingLimits_Table *psl =
425				(ATOM_PPLIB_PhaseSheddingLimits_Table *)
426				(mode_info->atom_context->bios + data_offset +
427				 le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
428			ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
429
430			adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
431				kzalloc(psl->ucNumEntries *
432					sizeof(struct amdgpu_phase_shedding_limits_entry),
433					GFP_KERNEL);
434			if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
435				amdgpu_free_extended_power_table(adev);
436				return -ENOMEM;
437			}
438
439			entry = &psl->entries[0];
440			for (i = 0; i < psl->ucNumEntries; i++) {
441				adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
442					le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16);
443				adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
444					le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16);
445				adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
446					le16_to_cpu(entry->usVoltage);
447				entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *)
448					((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record));
449			}
450			adev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
451				psl->ucNumEntries;
452		}
453	}
454
455	/* cac data */
456	if (le16_to_cpu(power_info->pplib.usTableSize) >=
457	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) {
458		adev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit);
459		adev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit);
460		adev->pm.dpm.near_tdp_limit_adjusted = adev->pm.dpm.near_tdp_limit;
461		adev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit);
462		if (adev->pm.dpm.tdp_od_limit)
463			adev->pm.dpm.power_control = true;
464		else
465			adev->pm.dpm.power_control = false;
466		adev->pm.dpm.tdp_adjustment = 0;
467		adev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold);
468		adev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage);
469		adev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope);
470		if (power_info->pplib5.usCACLeakageTableOffset) {
471			ATOM_PPLIB_CAC_Leakage_Table *cac_table =
472				(ATOM_PPLIB_CAC_Leakage_Table *)
473				(mode_info->atom_context->bios + data_offset +
474				 le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
475			ATOM_PPLIB_CAC_Leakage_Record *entry;
476			u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table);
477			adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
478			if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
479				amdgpu_free_extended_power_table(adev);
480				return -ENOMEM;
481			}
482			entry = &cac_table->entries[0];
483			for (i = 0; i < cac_table->ucNumEntries; i++) {
484				if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
485					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 =
486						le16_to_cpu(entry->usVddc1);
487					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 =
488						le16_to_cpu(entry->usVddc2);
489					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 =
490						le16_to_cpu(entry->usVddc3);
491				} else {
492					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
493						le16_to_cpu(entry->usVddc);
494					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
495						le32_to_cpu(entry->ulLeakageValue);
496				}
497				entry = (ATOM_PPLIB_CAC_Leakage_Record *)
498					((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record));
499			}
500			adev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
501		}
502	}
503
504	/* ext tables */
505	if (le16_to_cpu(power_info->pplib.usTableSize) >=
506	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
507		ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
508			(mode_info->atom_context->bios + data_offset +
509			 le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
510		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) &&
511			ext_hdr->usVCETableOffset) {
512			VCEClockInfoArray *array = (VCEClockInfoArray *)
513				(mode_info->atom_context->bios + data_offset +
514				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
515			ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
516				(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
517				(mode_info->atom_context->bios + data_offset +
518				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
519				 1 + array->ucNumEntries * sizeof(VCEClockInfo));
520			ATOM_PPLIB_VCE_State_Table *states =
521				(ATOM_PPLIB_VCE_State_Table *)
522				(mode_info->atom_context->bios + data_offset +
523				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
524				 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) +
525				 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)));
526			ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry;
527			ATOM_PPLIB_VCE_State_Record *state_entry;
528			VCEClockInfo *vce_clk;
529			u32 size = limits->numEntries *
530				sizeof(struct amdgpu_vce_clock_voltage_dependency_entry);
531			adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
532				kzalloc(size, GFP_KERNEL);
533			if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
534				amdgpu_free_extended_power_table(adev);
535				return -ENOMEM;
536			}
537			adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
538				limits->numEntries;
539			entry = &limits->entries[0];
540			state_entry = &states->entries[0];
541			for (i = 0; i < limits->numEntries; i++) {
542				vce_clk = (VCEClockInfo *)
543					((u8 *)&array->entries[0] +
544					 (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
545				adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk =
546					le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
547				adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk =
548					le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
549				adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v =
550					le16_to_cpu(entry->usVoltage);
551				entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
552					((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
553			}
554			for (i = 0; i < states->numEntries; i++) {
555				if (i >= AMDGPU_MAX_VCE_LEVELS)
556					break;
 
557				vce_clk = (VCEClockInfo *)
558					((u8 *)&array->entries[0] +
559					 (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
560				adev->pm.dpm.vce_states[i].evclk =
561					le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
562				adev->pm.dpm.vce_states[i].ecclk =
563					le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
564				adev->pm.dpm.vce_states[i].clk_idx =
565					state_entry->ucClockInfoIndex & 0x3f;
566				adev->pm.dpm.vce_states[i].pstate =
567					(state_entry->ucClockInfoIndex & 0xc0) >> 6;
568				state_entry = (ATOM_PPLIB_VCE_State_Record *)
569					((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record));
570			}
571		}
572		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) &&
573			ext_hdr->usUVDTableOffset) {
574			UVDClockInfoArray *array = (UVDClockInfoArray *)
575				(mode_info->atom_context->bios + data_offset +
576				 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1);
577			ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits =
578				(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *)
579				(mode_info->atom_context->bios + data_offset +
580				 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 +
581				 1 + (array->ucNumEntries * sizeof (UVDClockInfo)));
582			ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry;
583			u32 size = limits->numEntries *
584				sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry);
585			adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
586				kzalloc(size, GFP_KERNEL);
587			if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
588				amdgpu_free_extended_power_table(adev);
589				return -ENOMEM;
590			}
591			adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
592				limits->numEntries;
593			entry = &limits->entries[0];
594			for (i = 0; i < limits->numEntries; i++) {
595				UVDClockInfo *uvd_clk = (UVDClockInfo *)
596					((u8 *)&array->entries[0] +
597					 (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo)));
598				adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk =
599					le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16);
600				adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
601					le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
602				adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
603					le16_to_cpu(entry->usVoltage);
604				entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
605					((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
606			}
607		}
608		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) &&
609			ext_hdr->usSAMUTableOffset) {
610			ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits =
611				(ATOM_PPLIB_SAMClk_Voltage_Limit_Table *)
612				(mode_info->atom_context->bios + data_offset +
613				 le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1);
614			ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry;
615			u32 size = limits->numEntries *
616				sizeof(struct amdgpu_clock_voltage_dependency_entry);
617			adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
618				kzalloc(size, GFP_KERNEL);
619			if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
620				amdgpu_free_extended_power_table(adev);
621				return -ENOMEM;
622			}
623			adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
624				limits->numEntries;
625			entry = &limits->entries[0];
626			for (i = 0; i < limits->numEntries; i++) {
627				adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk =
628					le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16);
629				adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v =
630					le16_to_cpu(entry->usVoltage);
631				entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *)
632					((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record));
633			}
634		}
635		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
636		    ext_hdr->usPPMTableOffset) {
637			ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
638				(mode_info->atom_context->bios + data_offset +
639				 le16_to_cpu(ext_hdr->usPPMTableOffset));
640			adev->pm.dpm.dyn_state.ppm_table =
641				kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL);
642			if (!adev->pm.dpm.dyn_state.ppm_table) {
643				amdgpu_free_extended_power_table(adev);
644				return -ENOMEM;
645			}
646			adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
647			adev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
648				le16_to_cpu(ppm->usCpuCoreNumber);
649			adev->pm.dpm.dyn_state.ppm_table->platform_tdp =
650				le32_to_cpu(ppm->ulPlatformTDP);
651			adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp =
652				le32_to_cpu(ppm->ulSmallACPlatformTDP);
653			adev->pm.dpm.dyn_state.ppm_table->platform_tdc =
654				le32_to_cpu(ppm->ulPlatformTDC);
655			adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc =
656				le32_to_cpu(ppm->ulSmallACPlatformTDC);
657			adev->pm.dpm.dyn_state.ppm_table->apu_tdp =
658				le32_to_cpu(ppm->ulApuTDP);
659			adev->pm.dpm.dyn_state.ppm_table->dgpu_tdp =
660				le32_to_cpu(ppm->ulDGpuTDP);
661			adev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power =
662				le32_to_cpu(ppm->ulDGpuUlvPower);
663			adev->pm.dpm.dyn_state.ppm_table->tj_max =
664				le32_to_cpu(ppm->ulTjmax);
665		}
666		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) &&
667			ext_hdr->usACPTableOffset) {
668			ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits =
669				(ATOM_PPLIB_ACPClk_Voltage_Limit_Table *)
670				(mode_info->atom_context->bios + data_offset +
671				 le16_to_cpu(ext_hdr->usACPTableOffset) + 1);
672			ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry;
673			u32 size = limits->numEntries *
674				sizeof(struct amdgpu_clock_voltage_dependency_entry);
675			adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
676				kzalloc(size, GFP_KERNEL);
677			if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
678				amdgpu_free_extended_power_table(adev);
679				return -ENOMEM;
680			}
681			adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
682				limits->numEntries;
683			entry = &limits->entries[0];
684			for (i = 0; i < limits->numEntries; i++) {
685				adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk =
686					le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16);
687				adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v =
688					le16_to_cpu(entry->usVoltage);
689				entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *)
690					((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record));
691			}
692		}
693		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) &&
694			ext_hdr->usPowerTuneTableOffset) {
695			u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset +
696					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
697			ATOM_PowerTune_Table *pt;
698			adev->pm.dpm.dyn_state.cac_tdp_table =
699				kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL);
700			if (!adev->pm.dpm.dyn_state.cac_tdp_table) {
701				amdgpu_free_extended_power_table(adev);
702				return -ENOMEM;
703			}
704			if (rev > 0) {
705				ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
706					(mode_info->atom_context->bios + data_offset +
707					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
708				adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
709					ppt->usMaximumPowerDeliveryLimit;
710				pt = &ppt->power_tune_table;
711			} else {
712				ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
713					(mode_info->atom_context->bios + data_offset +
714					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
715				adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255;
716				pt = &ppt->power_tune_table;
717			}
718			adev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP);
719			adev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp =
720				le16_to_cpu(pt->usConfigurableTDP);
721			adev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC);
722			adev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit =
723				le16_to_cpu(pt->usBatteryPowerLimit);
724			adev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit =
725				le16_to_cpu(pt->usSmallPowerLimit);
726			adev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage =
727				le16_to_cpu(pt->usLowCACLeakage);
728			adev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage =
729				le16_to_cpu(pt->usHighCACLeakage);
730		}
731		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8) &&
732				ext_hdr->usSclkVddgfxTableOffset) {
733			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
734				(mode_info->atom_context->bios + data_offset +
735				 le16_to_cpu(ext_hdr->usSclkVddgfxTableOffset));
736			ret = amdgpu_parse_clk_voltage_dep_table(
737					&adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk,
738					dep_table);
739			if (ret) {
740				kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries);
741				return ret;
742			}
743		}
744	}
745
746	return 0;
747}
748
749void amdgpu_free_extended_power_table(struct amdgpu_device *adev)
750{
751	struct amdgpu_dpm_dynamic_state *dyn_state = &adev->pm.dpm.dyn_state;
752
753	kfree(dyn_state->vddc_dependency_on_sclk.entries);
754	kfree(dyn_state->vddci_dependency_on_mclk.entries);
755	kfree(dyn_state->vddc_dependency_on_mclk.entries);
756	kfree(dyn_state->mvdd_dependency_on_mclk.entries);
757	kfree(dyn_state->cac_leakage_table.entries);
758	kfree(dyn_state->phase_shedding_limits_table.entries);
759	kfree(dyn_state->ppm_table);
760	kfree(dyn_state->cac_tdp_table);
761	kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
762	kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
763	kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
764	kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
765	kfree(dyn_state->vddgfx_dependency_on_sclk.entries);
766}
767
768static const char *pp_lib_thermal_controller_names[] = {
769	"NONE",
770	"lm63",
771	"adm1032",
772	"adm1030",
773	"max6649",
774	"lm64",
775	"f75375",
776	"RV6xx",
777	"RV770",
778	"adt7473",
779	"NONE",
780	"External GPIO",
781	"Evergreen",
782	"emc2103",
783	"Sumo",
784	"Northern Islands",
785	"Southern Islands",
786	"lm96163",
787	"Sea Islands",
788	"Kaveri/Kabini",
789};
790
791void amdgpu_add_thermal_controller(struct amdgpu_device *adev)
792{
793	struct amdgpu_mode_info *mode_info = &adev->mode_info;
794	ATOM_PPLIB_POWERPLAYTABLE *power_table;
795	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
796	ATOM_PPLIB_THERMALCONTROLLER *controller;
797	struct amdgpu_i2c_bus_rec i2c_bus;
798	u16 data_offset;
799	u8 frev, crev;
800
801	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
802				   &frev, &crev, &data_offset))
803		return;
804	power_table = (ATOM_PPLIB_POWERPLAYTABLE *)
805		(mode_info->atom_context->bios + data_offset);
806	controller = &power_table->sThermalController;
807
808	/* add the i2c bus for thermal/fan chip */
809	if (controller->ucType > 0) {
810		if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN)
811			adev->pm.no_fan = true;
812		adev->pm.fan_pulses_per_revolution =
813			controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
814		if (adev->pm.fan_pulses_per_revolution) {
815			adev->pm.fan_min_rpm = controller->ucFanMinRPM;
816			adev->pm.fan_max_rpm = controller->ucFanMaxRPM;
817		}
818		if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
819			DRM_INFO("Internal thermal controller %s fan control\n",
820				 (controller->ucFanParameters &
821				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
822			adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
823		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
824			DRM_INFO("Internal thermal controller %s fan control\n",
825				 (controller->ucFanParameters &
826				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
827			adev->pm.int_thermal_type = THERMAL_TYPE_RV770;
828		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
829			DRM_INFO("Internal thermal controller %s fan control\n",
830				 (controller->ucFanParameters &
831				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
832			adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
833		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) {
834			DRM_INFO("Internal thermal controller %s fan control\n",
835				 (controller->ucFanParameters &
836				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
837			adev->pm.int_thermal_type = THERMAL_TYPE_SUMO;
838		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) {
839			DRM_INFO("Internal thermal controller %s fan control\n",
840				 (controller->ucFanParameters &
841				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
842			adev->pm.int_thermal_type = THERMAL_TYPE_NI;
843		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) {
844			DRM_INFO("Internal thermal controller %s fan control\n",
845				 (controller->ucFanParameters &
846				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
847			adev->pm.int_thermal_type = THERMAL_TYPE_SI;
848		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) {
849			DRM_INFO("Internal thermal controller %s fan control\n",
850				 (controller->ucFanParameters &
851				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
852			adev->pm.int_thermal_type = THERMAL_TYPE_CI;
853		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) {
854			DRM_INFO("Internal thermal controller %s fan control\n",
855				 (controller->ucFanParameters &
856				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
857			adev->pm.int_thermal_type = THERMAL_TYPE_KV;
858		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
859			DRM_INFO("External GPIO thermal controller %s fan control\n",
860				 (controller->ucFanParameters &
861				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
862			adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
863		} else if (controller->ucType ==
864			   ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
865			DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
866				 (controller->ucFanParameters &
867				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
868			adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
869		} else if (controller->ucType ==
870			   ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
871			DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
872				 (controller->ucFanParameters &
873				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
874			adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
875		} else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
876			DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
877				 pp_lib_thermal_controller_names[controller->ucType],
878				 controller->ucI2cAddress >> 1,
879				 (controller->ucFanParameters &
880				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
881			adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL;
882			i2c_bus = amdgpu_atombios_lookup_i2c_gpio(adev, controller->ucI2cLine);
883			adev->pm.i2c_bus = amdgpu_i2c_lookup(adev, &i2c_bus);
884			if (adev->pm.i2c_bus) {
885				struct i2c_board_info info = { };
886				const char *name = pp_lib_thermal_controller_names[controller->ucType];
887				info.addr = controller->ucI2cAddress >> 1;
888				strlcpy(info.type, name, sizeof(info.type));
889				i2c_new_device(&adev->pm.i2c_bus->adapter, &info);
890			}
891		} else {
892			DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
893				 controller->ucType,
894				 controller->ucI2cAddress >> 1,
895				 (controller->ucFanParameters &
896				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
897		}
898	}
899}
900
901enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
902						 u32 sys_mask,
903						 enum amdgpu_pcie_gen asic_gen,
904						 enum amdgpu_pcie_gen default_gen)
905{
906	switch (asic_gen) {
907	case AMDGPU_PCIE_GEN1:
908		return AMDGPU_PCIE_GEN1;
909	case AMDGPU_PCIE_GEN2:
910		return AMDGPU_PCIE_GEN2;
911	case AMDGPU_PCIE_GEN3:
912		return AMDGPU_PCIE_GEN3;
913	default:
914		if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == AMDGPU_PCIE_GEN3))
915			return AMDGPU_PCIE_GEN3;
916		else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == AMDGPU_PCIE_GEN2))
917			return AMDGPU_PCIE_GEN2;
918		else
919			return AMDGPU_PCIE_GEN1;
920	}
921	return AMDGPU_PCIE_GEN1;
922}
923
924u16 amdgpu_get_pcie_lane_support(struct amdgpu_device *adev,
925				 u16 asic_lanes,
926				 u16 default_lanes)
927{
928	switch (asic_lanes) {
929	case 0:
930	default:
931		return default_lanes;
932	case 1:
933		return 1;
934	case 2:
935		return 2;
936	case 4:
937		return 4;
938	case 8:
939		return 8;
940	case 12:
941		return 12;
942	case 16:
943		return 16;
944	}
945}
946
947u8 amdgpu_encode_pci_lane_width(u32 lanes)
948{
949	u8 encoded_lanes[] = { 0, 1, 2, 0, 3, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0, 0, 6 };
950
951	if (lanes > 16)
952		return 0;
953
954	return encoded_lanes[lanes];
 
 
 
 
 
 
 
 
 
 
 
955}