Linux Audio

Check our new training course

Loading...
v6.9.4
  1/*
  2 * Copyright 2014 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 */
 23
 24#include <drm/amdgpu_drm.h>
 25#include "amdgpu.h"
 26#include "atom.h"
 27#include "atombios_encoders.h"
 28#include "amdgpu_pll.h"
 29#include <asm/div64.h>
 30#include <linux/gcd.h>
 31
 32/**
 33 * amdgpu_pll_reduce_ratio - fractional number reduction
 34 *
 35 * @nom: nominator
 36 * @den: denominator
 37 * @nom_min: minimum value for nominator
 38 * @den_min: minimum value for denominator
 39 *
 40 * Find the greatest common divisor and apply it on both nominator and
 41 * denominator, but make nominator and denominator are at least as large
 42 * as their minimum values.
 43 */
 44static void amdgpu_pll_reduce_ratio(unsigned *nom, unsigned *den,
 45				    unsigned nom_min, unsigned den_min)
 46{
 47	unsigned tmp;
 48
 49	/* reduce the numbers to a simpler ratio */
 50	tmp = gcd(*nom, *den);
 51	*nom /= tmp;
 52	*den /= tmp;
 53
 54	/* make sure nominator is large enough */
 55	if (*nom < nom_min) {
 56		tmp = DIV_ROUND_UP(nom_min, *nom);
 57		*nom *= tmp;
 58		*den *= tmp;
 59	}
 60
 61	/* make sure the denominator is large enough */
 62	if (*den < den_min) {
 63		tmp = DIV_ROUND_UP(den_min, *den);
 64		*nom *= tmp;
 65		*den *= tmp;
 66	}
 67}
 68
 69/**
 70 * amdgpu_pll_get_fb_ref_div - feedback and ref divider calculation
 71 *
 72 * @adev: amdgpu_device pointer
 73 * @nom: nominator
 74 * @den: denominator
 75 * @post_div: post divider
 76 * @fb_div_max: feedback divider maximum
 77 * @ref_div_max: reference divider maximum
 78 * @fb_div: resulting feedback divider
 79 * @ref_div: resulting reference divider
 80 *
 81 * Calculate feedback and reference divider for a given post divider. Makes
 82 * sure we stay within the limits.
 83 */
 84static void amdgpu_pll_get_fb_ref_div(struct amdgpu_device *adev, unsigned int nom,
 85				      unsigned int den, unsigned int post_div,
 86				      unsigned int fb_div_max, unsigned int ref_div_max,
 87				      unsigned int *fb_div, unsigned int *ref_div)
 88{
 89
 90	/* limit reference * post divider to a maximum */
 91	if (adev->family == AMDGPU_FAMILY_SI)
 92		ref_div_max = min(100 / post_div, ref_div_max);
 93	else
 94		ref_div_max = min(128 / post_div, ref_div_max);
 95
 96	/* get matching reference and feedback divider */
 97	*ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max);
 98	*fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den);
 99
100	/* limit fb divider to its maximum */
101	if (*fb_div > fb_div_max) {
102		*ref_div = DIV_ROUND_CLOSEST(*ref_div * fb_div_max, *fb_div);
103		*fb_div = fb_div_max;
104	}
105}
106
107/**
108 * amdgpu_pll_compute - compute PLL paramaters
109 *
110 * @adev: amdgpu_device pointer
111 * @pll: information about the PLL
112 * @freq: requested frequency
113 * @dot_clock_p: resulting pixel clock
114 * @fb_div_p: resulting feedback divider
115 * @frac_fb_div_p: fractional part of the feedback divider
116 * @ref_div_p: resulting reference divider
117 * @post_div_p: resulting reference divider
118 *
119 * Try to calculate the PLL parameters to generate the given frequency:
120 * dot_clock = (ref_freq * feedback_div) / (ref_div * post_div)
121 */
122void amdgpu_pll_compute(struct amdgpu_device *adev,
123			struct amdgpu_pll *pll,
124			u32 freq,
125			u32 *dot_clock_p,
126			u32 *fb_div_p,
127			u32 *frac_fb_div_p,
128			u32 *ref_div_p,
129			u32 *post_div_p)
130{
131	unsigned target_clock = pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV ?
132		freq : freq / 10;
133
134	unsigned fb_div_min, fb_div_max, fb_div;
135	unsigned post_div_min, post_div_max, post_div;
136	unsigned ref_div_min, ref_div_max, ref_div;
137	unsigned post_div_best, diff_best;
138	unsigned nom, den;
139
140	/* determine allowed feedback divider range */
141	fb_div_min = pll->min_feedback_div;
142	fb_div_max = pll->max_feedback_div;
143
144	if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV) {
145		fb_div_min *= 10;
146		fb_div_max *= 10;
147	}
148
149	/* determine allowed ref divider range */
150	if (pll->flags & AMDGPU_PLL_USE_REF_DIV)
151		ref_div_min = pll->reference_div;
152	else
153		ref_div_min = pll->min_ref_div;
154
155	if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV &&
156	    pll->flags & AMDGPU_PLL_USE_REF_DIV)
157		ref_div_max = pll->reference_div;
158	else
159		ref_div_max = pll->max_ref_div;
160
161	/* determine allowed post divider range */
162	if (pll->flags & AMDGPU_PLL_USE_POST_DIV) {
163		post_div_min = pll->post_div;
164		post_div_max = pll->post_div;
165	} else {
166		unsigned vco_min, vco_max;
167
168		if (pll->flags & AMDGPU_PLL_IS_LCD) {
169			vco_min = pll->lcd_pll_out_min;
170			vco_max = pll->lcd_pll_out_max;
171		} else {
172			vco_min = pll->pll_out_min;
173			vco_max = pll->pll_out_max;
174		}
175
176		if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV) {
177			vco_min *= 10;
178			vco_max *= 10;
179		}
180
181		post_div_min = vco_min / target_clock;
182		if ((target_clock * post_div_min) < vco_min)
183			++post_div_min;
184		if (post_div_min < pll->min_post_div)
185			post_div_min = pll->min_post_div;
186
187		post_div_max = vco_max / target_clock;
188		if ((target_clock * post_div_max) > vco_max)
189			--post_div_max;
190		if (post_div_max > pll->max_post_div)
191			post_div_max = pll->max_post_div;
192	}
193
194	/* represent the searched ratio as fractional number */
195	nom = target_clock;
196	den = pll->reference_freq;
197
198	/* reduce the numbers to a simpler ratio */
199	amdgpu_pll_reduce_ratio(&nom, &den, fb_div_min, post_div_min);
200
201	/* now search for a post divider */
202	if (pll->flags & AMDGPU_PLL_PREFER_MINM_OVER_MAXP)
203		post_div_best = post_div_min;
204	else
205		post_div_best = post_div_max;
206	diff_best = ~0;
207
208	for (post_div = post_div_min; post_div <= post_div_max; ++post_div) {
209		unsigned diff;
210		amdgpu_pll_get_fb_ref_div(adev, nom, den, post_div, fb_div_max,
211					  ref_div_max, &fb_div, &ref_div);
212		diff = abs(target_clock - (pll->reference_freq * fb_div) /
213			(ref_div * post_div));
214
215		if (diff < diff_best || (diff == diff_best &&
216		    !(pll->flags & AMDGPU_PLL_PREFER_MINM_OVER_MAXP))) {
217
218			post_div_best = post_div;
219			diff_best = diff;
220		}
221	}
222	post_div = post_div_best;
223
224	/* get the feedback and reference divider for the optimal value */
225	amdgpu_pll_get_fb_ref_div(adev, nom, den, post_div, fb_div_max, ref_div_max,
226				  &fb_div, &ref_div);
227
228	/* reduce the numbers to a simpler ratio once more */
229	/* this also makes sure that the reference divider is large enough */
230	amdgpu_pll_reduce_ratio(&fb_div, &ref_div, fb_div_min, ref_div_min);
231
232	/* avoid high jitter with small fractional dividers */
233	if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV && (fb_div % 10)) {
234		fb_div_min = max(fb_div_min, (9 - (fb_div % 10)) * 20 + 60);
235		if (fb_div < fb_div_min) {
236			unsigned tmp = DIV_ROUND_UP(fb_div_min, fb_div);
237			fb_div *= tmp;
238			ref_div *= tmp;
239		}
240	}
241
242	/* and finally save the result */
243	if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV) {
244		*fb_div_p = fb_div / 10;
245		*frac_fb_div_p = fb_div % 10;
246	} else {
247		*fb_div_p = fb_div;
248		*frac_fb_div_p = 0;
249	}
250
251	*dot_clock_p = ((pll->reference_freq * *fb_div_p * 10) +
252			(pll->reference_freq * *frac_fb_div_p)) /
253		       (ref_div * post_div * 10);
254	*ref_div_p = ref_div;
255	*post_div_p = post_div;
256
257	DRM_DEBUG_KMS("%d - %d, pll dividers - fb: %d.%d ref: %d, post %d\n",
258		      freq, *dot_clock_p * 10, *fb_div_p, *frac_fb_div_p,
259		      ref_div, post_div);
260}
261
262/**
263 * amdgpu_pll_get_use_mask - look up a mask of which pplls are in use
264 *
265 * @crtc: drm crtc
266 *
267 * Returns the mask of which PPLLs (Pixel PLLs) are in use.
268 */
269u32 amdgpu_pll_get_use_mask(struct drm_crtc *crtc)
270{
271	struct drm_device *dev = crtc->dev;
272	struct drm_crtc *test_crtc;
273	struct amdgpu_crtc *test_amdgpu_crtc;
274	u32 pll_in_use = 0;
275
276	list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
277		if (crtc == test_crtc)
278			continue;
279
280		test_amdgpu_crtc = to_amdgpu_crtc(test_crtc);
281		if (test_amdgpu_crtc->pll_id != ATOM_PPLL_INVALID)
282			pll_in_use |= (1 << test_amdgpu_crtc->pll_id);
283	}
284	return pll_in_use;
285}
286
287/**
288 * amdgpu_pll_get_shared_dp_ppll - return the PPLL used by another crtc for DP
289 *
290 * @crtc: drm crtc
291 *
292 * Returns the PPLL (Pixel PLL) used by another crtc/encoder which is
293 * also in DP mode.  For DP, a single PPLL can be used for all DP
294 * crtcs/encoders.
295 */
296int amdgpu_pll_get_shared_dp_ppll(struct drm_crtc *crtc)
297{
298	struct drm_device *dev = crtc->dev;
299	struct drm_crtc *test_crtc;
300	struct amdgpu_crtc *test_amdgpu_crtc;
301
302	list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
303		if (crtc == test_crtc)
304			continue;
305		test_amdgpu_crtc = to_amdgpu_crtc(test_crtc);
306		if (test_amdgpu_crtc->encoder &&
307		    ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(test_amdgpu_crtc->encoder))) {
308			/* for DP use the same PLL for all */
309			if (test_amdgpu_crtc->pll_id != ATOM_PPLL_INVALID)
310				return test_amdgpu_crtc->pll_id;
311		}
312	}
313	return ATOM_PPLL_INVALID;
314}
315
316/**
317 * amdgpu_pll_get_shared_nondp_ppll - return the PPLL used by another non-DP crtc
318 *
319 * @crtc: drm crtc
 
320 *
321 * Returns the PPLL (Pixel PLL) used by another non-DP crtc/encoder which can
322 * be shared (i.e., same clock).
323 */
324int amdgpu_pll_get_shared_nondp_ppll(struct drm_crtc *crtc)
325{
326	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
327	struct drm_device *dev = crtc->dev;
328	struct drm_crtc *test_crtc;
329	struct amdgpu_crtc *test_amdgpu_crtc;
330	u32 adjusted_clock, test_adjusted_clock;
331
332	adjusted_clock = amdgpu_crtc->adjusted_clock;
333
334	if (adjusted_clock == 0)
335		return ATOM_PPLL_INVALID;
336
337	list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
338		if (crtc == test_crtc)
339			continue;
340		test_amdgpu_crtc = to_amdgpu_crtc(test_crtc);
341		if (test_amdgpu_crtc->encoder &&
342		    !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(test_amdgpu_crtc->encoder))) {
343			/* check if we are already driving this connector with another crtc */
344			if (test_amdgpu_crtc->connector == amdgpu_crtc->connector) {
345				/* if we are, return that pll */
346				if (test_amdgpu_crtc->pll_id != ATOM_PPLL_INVALID)
347					return test_amdgpu_crtc->pll_id;
348			}
349			/* for non-DP check the clock */
350			test_adjusted_clock = test_amdgpu_crtc->adjusted_clock;
351			if ((crtc->mode.clock == test_crtc->mode.clock) &&
352			    (adjusted_clock == test_adjusted_clock) &&
353			    (amdgpu_crtc->ss_enabled == test_amdgpu_crtc->ss_enabled) &&
354			    (test_amdgpu_crtc->pll_id != ATOM_PPLL_INVALID))
355				return test_amdgpu_crtc->pll_id;
356		}
357	}
358	return ATOM_PPLL_INVALID;
359}
v4.6
  1/*
  2 * Copyright 2014 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 */
 23#include <drm/drmP.h>
 24#include <drm/amdgpu_drm.h>
 25#include "amdgpu.h"
 26#include "atom.h"
 27#include "atombios_encoders.h"
 
 28#include <asm/div64.h>
 29#include <linux/gcd.h>
 30
 31/**
 32 * amdgpu_pll_reduce_ratio - fractional number reduction
 33 *
 34 * @nom: nominator
 35 * @den: denominator
 36 * @nom_min: minimum value for nominator
 37 * @den_min: minimum value for denominator
 38 *
 39 * Find the greatest common divisor and apply it on both nominator and
 40 * denominator, but make nominator and denominator are at least as large
 41 * as their minimum values.
 42 */
 43static void amdgpu_pll_reduce_ratio(unsigned *nom, unsigned *den,
 44				    unsigned nom_min, unsigned den_min)
 45{
 46	unsigned tmp;
 47
 48	/* reduce the numbers to a simpler ratio */
 49	tmp = gcd(*nom, *den);
 50	*nom /= tmp;
 51	*den /= tmp;
 52
 53	/* make sure nominator is large enough */
 54	if (*nom < nom_min) {
 55		tmp = DIV_ROUND_UP(nom_min, *nom);
 56		*nom *= tmp;
 57		*den *= tmp;
 58	}
 59
 60	/* make sure the denominator is large enough */
 61	if (*den < den_min) {
 62		tmp = DIV_ROUND_UP(den_min, *den);
 63		*nom *= tmp;
 64		*den *= tmp;
 65	}
 66}
 67
 68/**
 69 * amdgpu_pll_get_fb_ref_div - feedback and ref divider calculation
 70 *
 
 71 * @nom: nominator
 72 * @den: denominator
 73 * @post_div: post divider
 74 * @fb_div_max: feedback divider maximum
 75 * @ref_div_max: reference divider maximum
 76 * @fb_div: resulting feedback divider
 77 * @ref_div: resulting reference divider
 78 *
 79 * Calculate feedback and reference divider for a given post divider. Makes
 80 * sure we stay within the limits.
 81 */
 82static void amdgpu_pll_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div,
 83				      unsigned fb_div_max, unsigned ref_div_max,
 84				      unsigned *fb_div, unsigned *ref_div)
 
 85{
 
 86	/* limit reference * post divider to a maximum */
 87	ref_div_max = min(128 / post_div, ref_div_max);
 
 
 
 88
 89	/* get matching reference and feedback divider */
 90	*ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max);
 91	*fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den);
 92
 93	/* limit fb divider to its maximum */
 94	if (*fb_div > fb_div_max) {
 95		*ref_div = DIV_ROUND_CLOSEST(*ref_div * fb_div_max, *fb_div);
 96		*fb_div = fb_div_max;
 97	}
 98}
 99
100/**
101 * amdgpu_pll_compute - compute PLL paramaters
102 *
 
103 * @pll: information about the PLL
 
104 * @dot_clock_p: resulting pixel clock
105 * fb_div_p: resulting feedback divider
106 * frac_fb_div_p: fractional part of the feedback divider
107 * ref_div_p: resulting reference divider
108 * post_div_p: resulting reference divider
109 *
110 * Try to calculate the PLL parameters to generate the given frequency:
111 * dot_clock = (ref_freq * feedback_div) / (ref_div * post_div)
112 */
113void amdgpu_pll_compute(struct amdgpu_pll *pll,
 
114			u32 freq,
115			u32 *dot_clock_p,
116			u32 *fb_div_p,
117			u32 *frac_fb_div_p,
118			u32 *ref_div_p,
119			u32 *post_div_p)
120{
121	unsigned target_clock = pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV ?
122		freq : freq / 10;
123
124	unsigned fb_div_min, fb_div_max, fb_div;
125	unsigned post_div_min, post_div_max, post_div;
126	unsigned ref_div_min, ref_div_max, ref_div;
127	unsigned post_div_best, diff_best;
128	unsigned nom, den;
129
130	/* determine allowed feedback divider range */
131	fb_div_min = pll->min_feedback_div;
132	fb_div_max = pll->max_feedback_div;
133
134	if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV) {
135		fb_div_min *= 10;
136		fb_div_max *= 10;
137	}
138
139	/* determine allowed ref divider range */
140	if (pll->flags & AMDGPU_PLL_USE_REF_DIV)
141		ref_div_min = pll->reference_div;
142	else
143		ref_div_min = pll->min_ref_div;
144
145	if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV &&
146	    pll->flags & AMDGPU_PLL_USE_REF_DIV)
147		ref_div_max = pll->reference_div;
148	else
149		ref_div_max = pll->max_ref_div;
150
151	/* determine allowed post divider range */
152	if (pll->flags & AMDGPU_PLL_USE_POST_DIV) {
153		post_div_min = pll->post_div;
154		post_div_max = pll->post_div;
155	} else {
156		unsigned vco_min, vco_max;
157
158		if (pll->flags & AMDGPU_PLL_IS_LCD) {
159			vco_min = pll->lcd_pll_out_min;
160			vco_max = pll->lcd_pll_out_max;
161		} else {
162			vco_min = pll->pll_out_min;
163			vco_max = pll->pll_out_max;
164		}
165
166		if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV) {
167			vco_min *= 10;
168			vco_max *= 10;
169		}
170
171		post_div_min = vco_min / target_clock;
172		if ((target_clock * post_div_min) < vco_min)
173			++post_div_min;
174		if (post_div_min < pll->min_post_div)
175			post_div_min = pll->min_post_div;
176
177		post_div_max = vco_max / target_clock;
178		if ((target_clock * post_div_max) > vco_max)
179			--post_div_max;
180		if (post_div_max > pll->max_post_div)
181			post_div_max = pll->max_post_div;
182	}
183
184	/* represent the searched ratio as fractional number */
185	nom = target_clock;
186	den = pll->reference_freq;
187
188	/* reduce the numbers to a simpler ratio */
189	amdgpu_pll_reduce_ratio(&nom, &den, fb_div_min, post_div_min);
190
191	/* now search for a post divider */
192	if (pll->flags & AMDGPU_PLL_PREFER_MINM_OVER_MAXP)
193		post_div_best = post_div_min;
194	else
195		post_div_best = post_div_max;
196	diff_best = ~0;
197
198	for (post_div = post_div_min; post_div <= post_div_max; ++post_div) {
199		unsigned diff;
200		amdgpu_pll_get_fb_ref_div(nom, den, post_div, fb_div_max,
201					  ref_div_max, &fb_div, &ref_div);
202		diff = abs(target_clock - (pll->reference_freq * fb_div) /
203			(ref_div * post_div));
204
205		if (diff < diff_best || (diff == diff_best &&
206		    !(pll->flags & AMDGPU_PLL_PREFER_MINM_OVER_MAXP))) {
207
208			post_div_best = post_div;
209			diff_best = diff;
210		}
211	}
212	post_div = post_div_best;
213
214	/* get the feedback and reference divider for the optimal value */
215	amdgpu_pll_get_fb_ref_div(nom, den, post_div, fb_div_max, ref_div_max,
216				  &fb_div, &ref_div);
217
218	/* reduce the numbers to a simpler ratio once more */
219	/* this also makes sure that the reference divider is large enough */
220	amdgpu_pll_reduce_ratio(&fb_div, &ref_div, fb_div_min, ref_div_min);
221
222	/* avoid high jitter with small fractional dividers */
223	if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV && (fb_div % 10)) {
224		fb_div_min = max(fb_div_min, (9 - (fb_div % 10)) * 20 + 60);
225		if (fb_div < fb_div_min) {
226			unsigned tmp = DIV_ROUND_UP(fb_div_min, fb_div);
227			fb_div *= tmp;
228			ref_div *= tmp;
229		}
230	}
231
232	/* and finally save the result */
233	if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV) {
234		*fb_div_p = fb_div / 10;
235		*frac_fb_div_p = fb_div % 10;
236	} else {
237		*fb_div_p = fb_div;
238		*frac_fb_div_p = 0;
239	}
240
241	*dot_clock_p = ((pll->reference_freq * *fb_div_p * 10) +
242			(pll->reference_freq * *frac_fb_div_p)) /
243		       (ref_div * post_div * 10);
244	*ref_div_p = ref_div;
245	*post_div_p = post_div;
246
247	DRM_DEBUG_KMS("%d - %d, pll dividers - fb: %d.%d ref: %d, post %d\n",
248		      freq, *dot_clock_p * 10, *fb_div_p, *frac_fb_div_p,
249		      ref_div, post_div);
250}
251
252/**
253 * amdgpu_pll_get_use_mask - look up a mask of which pplls are in use
254 *
255 * @crtc: drm crtc
256 *
257 * Returns the mask of which PPLLs (Pixel PLLs) are in use.
258 */
259u32 amdgpu_pll_get_use_mask(struct drm_crtc *crtc)
260{
261	struct drm_device *dev = crtc->dev;
262	struct drm_crtc *test_crtc;
263	struct amdgpu_crtc *test_amdgpu_crtc;
264	u32 pll_in_use = 0;
265
266	list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
267		if (crtc == test_crtc)
268			continue;
269
270		test_amdgpu_crtc = to_amdgpu_crtc(test_crtc);
271		if (test_amdgpu_crtc->pll_id != ATOM_PPLL_INVALID)
272			pll_in_use |= (1 << test_amdgpu_crtc->pll_id);
273	}
274	return pll_in_use;
275}
276
277/**
278 * amdgpu_pll_get_shared_dp_ppll - return the PPLL used by another crtc for DP
279 *
280 * @crtc: drm crtc
281 *
282 * Returns the PPLL (Pixel PLL) used by another crtc/encoder which is
283 * also in DP mode.  For DP, a single PPLL can be used for all DP
284 * crtcs/encoders.
285 */
286int amdgpu_pll_get_shared_dp_ppll(struct drm_crtc *crtc)
287{
288	struct drm_device *dev = crtc->dev;
289	struct drm_crtc *test_crtc;
290	struct amdgpu_crtc *test_amdgpu_crtc;
291
292	list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
293		if (crtc == test_crtc)
294			continue;
295		test_amdgpu_crtc = to_amdgpu_crtc(test_crtc);
296		if (test_amdgpu_crtc->encoder &&
297		    ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(test_amdgpu_crtc->encoder))) {
298			/* for DP use the same PLL for all */
299			if (test_amdgpu_crtc->pll_id != ATOM_PPLL_INVALID)
300				return test_amdgpu_crtc->pll_id;
301		}
302	}
303	return ATOM_PPLL_INVALID;
304}
305
306/**
307 * amdgpu_pll_get_shared_nondp_ppll - return the PPLL used by another non-DP crtc
308 *
309 * @crtc: drm crtc
310 * @encoder: drm encoder
311 *
312 * Returns the PPLL (Pixel PLL) used by another non-DP crtc/encoder which can
313 * be shared (i.e., same clock).
314 */
315int amdgpu_pll_get_shared_nondp_ppll(struct drm_crtc *crtc)
316{
317	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
318	struct drm_device *dev = crtc->dev;
319	struct drm_crtc *test_crtc;
320	struct amdgpu_crtc *test_amdgpu_crtc;
321	u32 adjusted_clock, test_adjusted_clock;
322
323	adjusted_clock = amdgpu_crtc->adjusted_clock;
324
325	if (adjusted_clock == 0)
326		return ATOM_PPLL_INVALID;
327
328	list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
329		if (crtc == test_crtc)
330			continue;
331		test_amdgpu_crtc = to_amdgpu_crtc(test_crtc);
332		if (test_amdgpu_crtc->encoder &&
333		    !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(test_amdgpu_crtc->encoder))) {
334			/* check if we are already driving this connector with another crtc */
335			if (test_amdgpu_crtc->connector == amdgpu_crtc->connector) {
336				/* if we are, return that pll */
337				if (test_amdgpu_crtc->pll_id != ATOM_PPLL_INVALID)
338					return test_amdgpu_crtc->pll_id;
339			}
340			/* for non-DP check the clock */
341			test_adjusted_clock = test_amdgpu_crtc->adjusted_clock;
342			if ((crtc->mode.clock == test_crtc->mode.clock) &&
343			    (adjusted_clock == test_adjusted_clock) &&
344			    (amdgpu_crtc->ss_enabled == test_amdgpu_crtc->ss_enabled) &&
345			    (test_amdgpu_crtc->pll_id != ATOM_PPLL_INVALID))
346				return test_amdgpu_crtc->pll_id;
347		}
348	}
349	return ATOM_PPLL_INVALID;
350}