Loading...
1/*
2 * Copyright © 2006-2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24#include <linux/math.h>
25#include <linux/string_helpers.h>
26
27#include "i915_reg.h"
28#include "intel_de.h"
29#include "intel_display_types.h"
30#include "intel_dkl_phy.h"
31#include "intel_dkl_phy_regs.h"
32#include "intel_dpio_phy.h"
33#include "intel_dpll.h"
34#include "intel_dpll_mgr.h"
35#include "intel_hti.h"
36#include "intel_mg_phy_regs.h"
37#include "intel_pch_refclk.h"
38#include "intel_tc.h"
39
40/**
41 * DOC: Display PLLs
42 *
43 * Display PLLs used for driving outputs vary by platform. While some have
44 * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
45 * from a pool. In the latter scenario, it is possible that multiple pipes
46 * share a PLL if their configurations match.
47 *
48 * This file provides an abstraction over display PLLs. The function
49 * intel_shared_dpll_init() initializes the PLLs for the given platform. The
50 * users of a PLL are tracked and that tracking is integrated with the atomic
51 * modset interface. During an atomic operation, required PLLs can be reserved
52 * for a given CRTC and encoder configuration by calling
53 * intel_reserve_shared_dplls() and previously reserved PLLs can be released
54 * with intel_release_shared_dplls().
55 * Changes to the users are first staged in the atomic state, and then made
56 * effective by calling intel_shared_dpll_swap_state() during the atomic
57 * commit phase.
58 */
59
60/* platform specific hooks for managing DPLLs */
61struct intel_shared_dpll_funcs {
62 /*
63 * Hook for enabling the pll, called from intel_enable_shared_dpll() if
64 * the pll is not already enabled.
65 */
66 void (*enable)(struct drm_i915_private *i915,
67 struct intel_shared_dpll *pll);
68
69 /*
70 * Hook for disabling the pll, called from intel_disable_shared_dpll()
71 * only when it is safe to disable the pll, i.e., there are no more
72 * tracked users for it.
73 */
74 void (*disable)(struct drm_i915_private *i915,
75 struct intel_shared_dpll *pll);
76
77 /*
78 * Hook for reading the values currently programmed to the DPLL
79 * registers. This is used for initial hw state readout and state
80 * verification after a mode set.
81 */
82 bool (*get_hw_state)(struct drm_i915_private *i915,
83 struct intel_shared_dpll *pll,
84 struct intel_dpll_hw_state *hw_state);
85
86 /*
87 * Hook for calculating the pll's output frequency based on its passed
88 * in state.
89 */
90 int (*get_freq)(struct drm_i915_private *i915,
91 const struct intel_shared_dpll *pll,
92 const struct intel_dpll_hw_state *pll_state);
93};
94
95struct intel_dpll_mgr {
96 const struct dpll_info *dpll_info;
97
98 int (*compute_dplls)(struct intel_atomic_state *state,
99 struct intel_crtc *crtc,
100 struct intel_encoder *encoder);
101 int (*get_dplls)(struct intel_atomic_state *state,
102 struct intel_crtc *crtc,
103 struct intel_encoder *encoder);
104 void (*put_dplls)(struct intel_atomic_state *state,
105 struct intel_crtc *crtc);
106 void (*update_active_dpll)(struct intel_atomic_state *state,
107 struct intel_crtc *crtc,
108 struct intel_encoder *encoder);
109 void (*update_ref_clks)(struct drm_i915_private *i915);
110 void (*dump_hw_state)(struct drm_i915_private *i915,
111 const struct intel_dpll_hw_state *hw_state);
112};
113
114static void
115intel_atomic_duplicate_dpll_state(struct drm_i915_private *i915,
116 struct intel_shared_dpll_state *shared_dpll)
117{
118 struct intel_shared_dpll *pll;
119 int i;
120
121 /* Copy shared dpll state */
122 for_each_shared_dpll(i915, pll, i)
123 shared_dpll[pll->index] = pll->state;
124}
125
126static struct intel_shared_dpll_state *
127intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
128{
129 struct intel_atomic_state *state = to_intel_atomic_state(s);
130
131 drm_WARN_ON(s->dev, !drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
132
133 if (!state->dpll_set) {
134 state->dpll_set = true;
135
136 intel_atomic_duplicate_dpll_state(to_i915(s->dev),
137 state->shared_dpll);
138 }
139
140 return state->shared_dpll;
141}
142
143/**
144 * intel_get_shared_dpll_by_id - get a DPLL given its id
145 * @i915: i915 device instance
146 * @id: pll id
147 *
148 * Returns:
149 * A pointer to the DPLL with @id
150 */
151struct intel_shared_dpll *
152intel_get_shared_dpll_by_id(struct drm_i915_private *i915,
153 enum intel_dpll_id id)
154{
155 struct intel_shared_dpll *pll;
156 int i;
157
158 for_each_shared_dpll(i915, pll, i) {
159 if (pll->info->id == id)
160 return pll;
161 }
162
163 MISSING_CASE(id);
164 return NULL;
165}
166
167/* For ILK+ */
168void assert_shared_dpll(struct drm_i915_private *i915,
169 struct intel_shared_dpll *pll,
170 bool state)
171{
172 bool cur_state;
173 struct intel_dpll_hw_state hw_state;
174
175 if (drm_WARN(&i915->drm, !pll,
176 "asserting DPLL %s with no DPLL\n", str_on_off(state)))
177 return;
178
179 cur_state = intel_dpll_get_hw_state(i915, pll, &hw_state);
180 I915_STATE_WARN(i915, cur_state != state,
181 "%s assertion failure (expected %s, current %s)\n",
182 pll->info->name, str_on_off(state),
183 str_on_off(cur_state));
184}
185
186static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
187{
188 return TC_PORT_1 + id - DPLL_ID_ICL_MGPLL1;
189}
190
191enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
192{
193 return tc_port - TC_PORT_1 + DPLL_ID_ICL_MGPLL1;
194}
195
196static i915_reg_t
197intel_combo_pll_enable_reg(struct drm_i915_private *i915,
198 struct intel_shared_dpll *pll)
199{
200 if (IS_DG1(i915))
201 return DG1_DPLL_ENABLE(pll->info->id);
202 else if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
203 (pll->info->id == DPLL_ID_EHL_DPLL4))
204 return MG_PLL_ENABLE(0);
205
206 return ICL_DPLL_ENABLE(pll->info->id);
207}
208
209static i915_reg_t
210intel_tc_pll_enable_reg(struct drm_i915_private *i915,
211 struct intel_shared_dpll *pll)
212{
213 const enum intel_dpll_id id = pll->info->id;
214 enum tc_port tc_port = icl_pll_id_to_tc_port(id);
215
216 if (IS_ALDERLAKE_P(i915))
217 return ADLP_PORTTC_PLL_ENABLE(tc_port);
218
219 return MG_PLL_ENABLE(tc_port);
220}
221
222static void _intel_enable_shared_dpll(struct drm_i915_private *i915,
223 struct intel_shared_dpll *pll)
224{
225 if (pll->info->power_domain)
226 pll->wakeref = intel_display_power_get(i915, pll->info->power_domain);
227
228 pll->info->funcs->enable(i915, pll);
229 pll->on = true;
230}
231
232static void _intel_disable_shared_dpll(struct drm_i915_private *i915,
233 struct intel_shared_dpll *pll)
234{
235 pll->info->funcs->disable(i915, pll);
236 pll->on = false;
237
238 if (pll->info->power_domain)
239 intel_display_power_put(i915, pll->info->power_domain, pll->wakeref);
240}
241
242/**
243 * intel_enable_shared_dpll - enable a CRTC's shared DPLL
244 * @crtc_state: CRTC, and its state, which has a shared DPLL
245 *
246 * Enable the shared DPLL used by @crtc.
247 */
248void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
249{
250 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
251 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
252 struct intel_shared_dpll *pll = crtc_state->shared_dpll;
253 unsigned int pipe_mask = BIT(crtc->pipe);
254 unsigned int old_mask;
255
256 if (drm_WARN_ON(&i915->drm, pll == NULL))
257 return;
258
259 mutex_lock(&i915->display.dpll.lock);
260 old_mask = pll->active_mask;
261
262 if (drm_WARN_ON(&i915->drm, !(pll->state.pipe_mask & pipe_mask)) ||
263 drm_WARN_ON(&i915->drm, pll->active_mask & pipe_mask))
264 goto out;
265
266 pll->active_mask |= pipe_mask;
267
268 drm_dbg_kms(&i915->drm,
269 "enable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
270 pll->info->name, pll->active_mask, pll->on,
271 crtc->base.base.id, crtc->base.name);
272
273 if (old_mask) {
274 drm_WARN_ON(&i915->drm, !pll->on);
275 assert_shared_dpll_enabled(i915, pll);
276 goto out;
277 }
278 drm_WARN_ON(&i915->drm, pll->on);
279
280 drm_dbg_kms(&i915->drm, "enabling %s\n", pll->info->name);
281
282 _intel_enable_shared_dpll(i915, pll);
283
284out:
285 mutex_unlock(&i915->display.dpll.lock);
286}
287
288/**
289 * intel_disable_shared_dpll - disable a CRTC's shared DPLL
290 * @crtc_state: CRTC, and its state, which has a shared DPLL
291 *
292 * Disable the shared DPLL used by @crtc.
293 */
294void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
295{
296 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
297 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
298 struct intel_shared_dpll *pll = crtc_state->shared_dpll;
299 unsigned int pipe_mask = BIT(crtc->pipe);
300
301 /* PCH only available on ILK+ */
302 if (DISPLAY_VER(i915) < 5)
303 return;
304
305 if (pll == NULL)
306 return;
307
308 mutex_lock(&i915->display.dpll.lock);
309 if (drm_WARN(&i915->drm, !(pll->active_mask & pipe_mask),
310 "%s not used by [CRTC:%d:%s]\n", pll->info->name,
311 crtc->base.base.id, crtc->base.name))
312 goto out;
313
314 drm_dbg_kms(&i915->drm,
315 "disable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
316 pll->info->name, pll->active_mask, pll->on,
317 crtc->base.base.id, crtc->base.name);
318
319 assert_shared_dpll_enabled(i915, pll);
320 drm_WARN_ON(&i915->drm, !pll->on);
321
322 pll->active_mask &= ~pipe_mask;
323 if (pll->active_mask)
324 goto out;
325
326 drm_dbg_kms(&i915->drm, "disabling %s\n", pll->info->name);
327
328 _intel_disable_shared_dpll(i915, pll);
329
330out:
331 mutex_unlock(&i915->display.dpll.lock);
332}
333
334static unsigned long
335intel_dpll_mask_all(struct drm_i915_private *i915)
336{
337 struct intel_shared_dpll *pll;
338 unsigned long dpll_mask = 0;
339 int i;
340
341 for_each_shared_dpll(i915, pll, i) {
342 drm_WARN_ON(&i915->drm, dpll_mask & BIT(pll->info->id));
343
344 dpll_mask |= BIT(pll->info->id);
345 }
346
347 return dpll_mask;
348}
349
350static struct intel_shared_dpll *
351intel_find_shared_dpll(struct intel_atomic_state *state,
352 const struct intel_crtc *crtc,
353 const struct intel_dpll_hw_state *pll_state,
354 unsigned long dpll_mask)
355{
356 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
357 unsigned long dpll_mask_all = intel_dpll_mask_all(i915);
358 struct intel_shared_dpll_state *shared_dpll;
359 struct intel_shared_dpll *unused_pll = NULL;
360 enum intel_dpll_id id;
361
362 shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
363
364 drm_WARN_ON(&i915->drm, dpll_mask & ~dpll_mask_all);
365
366 for_each_set_bit(id, &dpll_mask, fls(dpll_mask_all)) {
367 struct intel_shared_dpll *pll;
368
369 pll = intel_get_shared_dpll_by_id(i915, id);
370 if (!pll)
371 continue;
372
373 /* Only want to check enabled timings first */
374 if (shared_dpll[pll->index].pipe_mask == 0) {
375 if (!unused_pll)
376 unused_pll = pll;
377 continue;
378 }
379
380 if (memcmp(pll_state,
381 &shared_dpll[pll->index].hw_state,
382 sizeof(*pll_state)) == 0) {
383 drm_dbg_kms(&i915->drm,
384 "[CRTC:%d:%s] sharing existing %s (pipe mask 0x%x, active 0x%x)\n",
385 crtc->base.base.id, crtc->base.name,
386 pll->info->name,
387 shared_dpll[pll->index].pipe_mask,
388 pll->active_mask);
389 return pll;
390 }
391 }
392
393 /* Ok no matching timings, maybe there's a free one? */
394 if (unused_pll) {
395 drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] allocated %s\n",
396 crtc->base.base.id, crtc->base.name,
397 unused_pll->info->name);
398 return unused_pll;
399 }
400
401 return NULL;
402}
403
404/**
405 * intel_reference_shared_dpll_crtc - Get a DPLL reference for a CRTC
406 * @crtc: CRTC on which behalf the reference is taken
407 * @pll: DPLL for which the reference is taken
408 * @shared_dpll_state: the DPLL atomic state in which the reference is tracked
409 *
410 * Take a reference for @pll tracking the use of it by @crtc.
411 */
412static void
413intel_reference_shared_dpll_crtc(const struct intel_crtc *crtc,
414 const struct intel_shared_dpll *pll,
415 struct intel_shared_dpll_state *shared_dpll_state)
416{
417 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
418
419 drm_WARN_ON(&i915->drm, (shared_dpll_state->pipe_mask & BIT(crtc->pipe)) != 0);
420
421 shared_dpll_state->pipe_mask |= BIT(crtc->pipe);
422
423 drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] reserving %s\n",
424 crtc->base.base.id, crtc->base.name, pll->info->name);
425}
426
427static void
428intel_reference_shared_dpll(struct intel_atomic_state *state,
429 const struct intel_crtc *crtc,
430 const struct intel_shared_dpll *pll,
431 const struct intel_dpll_hw_state *pll_state)
432{
433 struct intel_shared_dpll_state *shared_dpll;
434
435 shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
436
437 if (shared_dpll[pll->index].pipe_mask == 0)
438 shared_dpll[pll->index].hw_state = *pll_state;
439
440 intel_reference_shared_dpll_crtc(crtc, pll, &shared_dpll[pll->index]);
441}
442
443/**
444 * intel_unreference_shared_dpll_crtc - Drop a DPLL reference for a CRTC
445 * @crtc: CRTC on which behalf the reference is dropped
446 * @pll: DPLL for which the reference is dropped
447 * @shared_dpll_state: the DPLL atomic state in which the reference is tracked
448 *
449 * Drop a reference for @pll tracking the end of use of it by @crtc.
450 */
451void
452intel_unreference_shared_dpll_crtc(const struct intel_crtc *crtc,
453 const struct intel_shared_dpll *pll,
454 struct intel_shared_dpll_state *shared_dpll_state)
455{
456 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
457
458 drm_WARN_ON(&i915->drm, (shared_dpll_state->pipe_mask & BIT(crtc->pipe)) == 0);
459
460 shared_dpll_state->pipe_mask &= ~BIT(crtc->pipe);
461
462 drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] releasing %s\n",
463 crtc->base.base.id, crtc->base.name, pll->info->name);
464}
465
466static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
467 const struct intel_crtc *crtc,
468 const struct intel_shared_dpll *pll)
469{
470 struct intel_shared_dpll_state *shared_dpll;
471
472 shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
473
474 intel_unreference_shared_dpll_crtc(crtc, pll, &shared_dpll[pll->index]);
475}
476
477static void intel_put_dpll(struct intel_atomic_state *state,
478 struct intel_crtc *crtc)
479{
480 const struct intel_crtc_state *old_crtc_state =
481 intel_atomic_get_old_crtc_state(state, crtc);
482 struct intel_crtc_state *new_crtc_state =
483 intel_atomic_get_new_crtc_state(state, crtc);
484
485 new_crtc_state->shared_dpll = NULL;
486
487 if (!old_crtc_state->shared_dpll)
488 return;
489
490 intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
491}
492
493/**
494 * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
495 * @state: atomic state
496 *
497 * This is the dpll version of drm_atomic_helper_swap_state() since the
498 * helper does not handle driver-specific global state.
499 *
500 * For consistency with atomic helpers this function does a complete swap,
501 * i.e. it also puts the current state into @state, even though there is no
502 * need for that at this moment.
503 */
504void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
505{
506 struct drm_i915_private *i915 = to_i915(state->base.dev);
507 struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
508 struct intel_shared_dpll *pll;
509 int i;
510
511 if (!state->dpll_set)
512 return;
513
514 for_each_shared_dpll(i915, pll, i)
515 swap(pll->state, shared_dpll[pll->index]);
516}
517
518static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *i915,
519 struct intel_shared_dpll *pll,
520 struct intel_dpll_hw_state *hw_state)
521{
522 const enum intel_dpll_id id = pll->info->id;
523 intel_wakeref_t wakeref;
524 u32 val;
525
526 wakeref = intel_display_power_get_if_enabled(i915,
527 POWER_DOMAIN_DISPLAY_CORE);
528 if (!wakeref)
529 return false;
530
531 val = intel_de_read(i915, PCH_DPLL(id));
532 hw_state->dpll = val;
533 hw_state->fp0 = intel_de_read(i915, PCH_FP0(id));
534 hw_state->fp1 = intel_de_read(i915, PCH_FP1(id));
535
536 intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
537
538 return val & DPLL_VCO_ENABLE;
539}
540
541static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *i915)
542{
543 u32 val;
544 bool enabled;
545
546 val = intel_de_read(i915, PCH_DREF_CONTROL);
547 enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
548 DREF_SUPERSPREAD_SOURCE_MASK));
549 I915_STATE_WARN(i915, !enabled,
550 "PCH refclk assertion failure, should be active but is disabled\n");
551}
552
553static void ibx_pch_dpll_enable(struct drm_i915_private *i915,
554 struct intel_shared_dpll *pll)
555{
556 const enum intel_dpll_id id = pll->info->id;
557
558 /* PCH refclock must be enabled first */
559 ibx_assert_pch_refclk_enabled(i915);
560
561 intel_de_write(i915, PCH_FP0(id), pll->state.hw_state.fp0);
562 intel_de_write(i915, PCH_FP1(id), pll->state.hw_state.fp1);
563
564 intel_de_write(i915, PCH_DPLL(id), pll->state.hw_state.dpll);
565
566 /* Wait for the clocks to stabilize. */
567 intel_de_posting_read(i915, PCH_DPLL(id));
568 udelay(150);
569
570 /* The pixel multiplier can only be updated once the
571 * DPLL is enabled and the clocks are stable.
572 *
573 * So write it again.
574 */
575 intel_de_write(i915, PCH_DPLL(id), pll->state.hw_state.dpll);
576 intel_de_posting_read(i915, PCH_DPLL(id));
577 udelay(200);
578}
579
580static void ibx_pch_dpll_disable(struct drm_i915_private *i915,
581 struct intel_shared_dpll *pll)
582{
583 const enum intel_dpll_id id = pll->info->id;
584
585 intel_de_write(i915, PCH_DPLL(id), 0);
586 intel_de_posting_read(i915, PCH_DPLL(id));
587 udelay(200);
588}
589
590static int ibx_compute_dpll(struct intel_atomic_state *state,
591 struct intel_crtc *crtc,
592 struct intel_encoder *encoder)
593{
594 return 0;
595}
596
597static int ibx_get_dpll(struct intel_atomic_state *state,
598 struct intel_crtc *crtc,
599 struct intel_encoder *encoder)
600{
601 struct intel_crtc_state *crtc_state =
602 intel_atomic_get_new_crtc_state(state, crtc);
603 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
604 struct intel_shared_dpll *pll;
605 enum intel_dpll_id id;
606
607 if (HAS_PCH_IBX(i915)) {
608 /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
609 id = (enum intel_dpll_id) crtc->pipe;
610 pll = intel_get_shared_dpll_by_id(i915, id);
611
612 drm_dbg_kms(&i915->drm,
613 "[CRTC:%d:%s] using pre-allocated %s\n",
614 crtc->base.base.id, crtc->base.name,
615 pll->info->name);
616 } else {
617 pll = intel_find_shared_dpll(state, crtc,
618 &crtc_state->dpll_hw_state,
619 BIT(DPLL_ID_PCH_PLL_B) |
620 BIT(DPLL_ID_PCH_PLL_A));
621 }
622
623 if (!pll)
624 return -EINVAL;
625
626 /* reference the pll */
627 intel_reference_shared_dpll(state, crtc,
628 pll, &crtc_state->dpll_hw_state);
629
630 crtc_state->shared_dpll = pll;
631
632 return 0;
633}
634
635static void ibx_dump_hw_state(struct drm_i915_private *i915,
636 const struct intel_dpll_hw_state *hw_state)
637{
638 drm_dbg_kms(&i915->drm,
639 "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
640 "fp0: 0x%x, fp1: 0x%x\n",
641 hw_state->dpll,
642 hw_state->dpll_md,
643 hw_state->fp0,
644 hw_state->fp1);
645}
646
647static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
648 .enable = ibx_pch_dpll_enable,
649 .disable = ibx_pch_dpll_disable,
650 .get_hw_state = ibx_pch_dpll_get_hw_state,
651};
652
653static const struct dpll_info pch_plls[] = {
654 { .name = "PCH DPLL A", .funcs = &ibx_pch_dpll_funcs, .id = DPLL_ID_PCH_PLL_A, },
655 { .name = "PCH DPLL B", .funcs = &ibx_pch_dpll_funcs, .id = DPLL_ID_PCH_PLL_B, },
656 {}
657};
658
659static const struct intel_dpll_mgr pch_pll_mgr = {
660 .dpll_info = pch_plls,
661 .compute_dplls = ibx_compute_dpll,
662 .get_dplls = ibx_get_dpll,
663 .put_dplls = intel_put_dpll,
664 .dump_hw_state = ibx_dump_hw_state,
665};
666
667static void hsw_ddi_wrpll_enable(struct drm_i915_private *i915,
668 struct intel_shared_dpll *pll)
669{
670 const enum intel_dpll_id id = pll->info->id;
671
672 intel_de_write(i915, WRPLL_CTL(id), pll->state.hw_state.wrpll);
673 intel_de_posting_read(i915, WRPLL_CTL(id));
674 udelay(20);
675}
676
677static void hsw_ddi_spll_enable(struct drm_i915_private *i915,
678 struct intel_shared_dpll *pll)
679{
680 intel_de_write(i915, SPLL_CTL, pll->state.hw_state.spll);
681 intel_de_posting_read(i915, SPLL_CTL);
682 udelay(20);
683}
684
685static void hsw_ddi_wrpll_disable(struct drm_i915_private *i915,
686 struct intel_shared_dpll *pll)
687{
688 const enum intel_dpll_id id = pll->info->id;
689
690 intel_de_rmw(i915, WRPLL_CTL(id), WRPLL_PLL_ENABLE, 0);
691 intel_de_posting_read(i915, WRPLL_CTL(id));
692
693 /*
694 * Try to set up the PCH reference clock once all DPLLs
695 * that depend on it have been shut down.
696 */
697 if (i915->display.dpll.pch_ssc_use & BIT(id))
698 intel_init_pch_refclk(i915);
699}
700
701static void hsw_ddi_spll_disable(struct drm_i915_private *i915,
702 struct intel_shared_dpll *pll)
703{
704 enum intel_dpll_id id = pll->info->id;
705
706 intel_de_rmw(i915, SPLL_CTL, SPLL_PLL_ENABLE, 0);
707 intel_de_posting_read(i915, SPLL_CTL);
708
709 /*
710 * Try to set up the PCH reference clock once all DPLLs
711 * that depend on it have been shut down.
712 */
713 if (i915->display.dpll.pch_ssc_use & BIT(id))
714 intel_init_pch_refclk(i915);
715}
716
717static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *i915,
718 struct intel_shared_dpll *pll,
719 struct intel_dpll_hw_state *hw_state)
720{
721 const enum intel_dpll_id id = pll->info->id;
722 intel_wakeref_t wakeref;
723 u32 val;
724
725 wakeref = intel_display_power_get_if_enabled(i915,
726 POWER_DOMAIN_DISPLAY_CORE);
727 if (!wakeref)
728 return false;
729
730 val = intel_de_read(i915, WRPLL_CTL(id));
731 hw_state->wrpll = val;
732
733 intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
734
735 return val & WRPLL_PLL_ENABLE;
736}
737
738static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *i915,
739 struct intel_shared_dpll *pll,
740 struct intel_dpll_hw_state *hw_state)
741{
742 intel_wakeref_t wakeref;
743 u32 val;
744
745 wakeref = intel_display_power_get_if_enabled(i915,
746 POWER_DOMAIN_DISPLAY_CORE);
747 if (!wakeref)
748 return false;
749
750 val = intel_de_read(i915, SPLL_CTL);
751 hw_state->spll = val;
752
753 intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
754
755 return val & SPLL_PLL_ENABLE;
756}
757
758#define LC_FREQ 2700
759#define LC_FREQ_2K U64_C(LC_FREQ * 2000)
760
761#define P_MIN 2
762#define P_MAX 64
763#define P_INC 2
764
765/* Constraints for PLL good behavior */
766#define REF_MIN 48
767#define REF_MAX 400
768#define VCO_MIN 2400
769#define VCO_MAX 4800
770
771struct hsw_wrpll_rnp {
772 unsigned p, n2, r2;
773};
774
775static unsigned hsw_wrpll_get_budget_for_freq(int clock)
776{
777 switch (clock) {
778 case 25175000:
779 case 25200000:
780 case 27000000:
781 case 27027000:
782 case 37762500:
783 case 37800000:
784 case 40500000:
785 case 40541000:
786 case 54000000:
787 case 54054000:
788 case 59341000:
789 case 59400000:
790 case 72000000:
791 case 74176000:
792 case 74250000:
793 case 81000000:
794 case 81081000:
795 case 89012000:
796 case 89100000:
797 case 108000000:
798 case 108108000:
799 case 111264000:
800 case 111375000:
801 case 148352000:
802 case 148500000:
803 case 162000000:
804 case 162162000:
805 case 222525000:
806 case 222750000:
807 case 296703000:
808 case 297000000:
809 return 0;
810 case 233500000:
811 case 245250000:
812 case 247750000:
813 case 253250000:
814 case 298000000:
815 return 1500;
816 case 169128000:
817 case 169500000:
818 case 179500000:
819 case 202000000:
820 return 2000;
821 case 256250000:
822 case 262500000:
823 case 270000000:
824 case 272500000:
825 case 273750000:
826 case 280750000:
827 case 281250000:
828 case 286000000:
829 case 291750000:
830 return 4000;
831 case 267250000:
832 case 268500000:
833 return 5000;
834 default:
835 return 1000;
836 }
837}
838
839static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
840 unsigned int r2, unsigned int n2,
841 unsigned int p,
842 struct hsw_wrpll_rnp *best)
843{
844 u64 a, b, c, d, diff, diff_best;
845
846 /* No best (r,n,p) yet */
847 if (best->p == 0) {
848 best->p = p;
849 best->n2 = n2;
850 best->r2 = r2;
851 return;
852 }
853
854 /*
855 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
856 * freq2k.
857 *
858 * delta = 1e6 *
859 * abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
860 * freq2k;
861 *
862 * and we would like delta <= budget.
863 *
864 * If the discrepancy is above the PPM-based budget, always prefer to
865 * improve upon the previous solution. However, if you're within the
866 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
867 */
868 a = freq2k * budget * p * r2;
869 b = freq2k * budget * best->p * best->r2;
870 diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
871 diff_best = abs_diff(freq2k * best->p * best->r2,
872 LC_FREQ_2K * best->n2);
873 c = 1000000 * diff;
874 d = 1000000 * diff_best;
875
876 if (a < c && b < d) {
877 /* If both are above the budget, pick the closer */
878 if (best->p * best->r2 * diff < p * r2 * diff_best) {
879 best->p = p;
880 best->n2 = n2;
881 best->r2 = r2;
882 }
883 } else if (a >= c && b < d) {
884 /* If A is below the threshold but B is above it? Update. */
885 best->p = p;
886 best->n2 = n2;
887 best->r2 = r2;
888 } else if (a >= c && b >= d) {
889 /* Both are below the limit, so pick the higher n2/(r2*r2) */
890 if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
891 best->p = p;
892 best->n2 = n2;
893 best->r2 = r2;
894 }
895 }
896 /* Otherwise a < c && b >= d, do nothing */
897}
898
899static void
900hsw_ddi_calculate_wrpll(int clock /* in Hz */,
901 unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
902{
903 u64 freq2k;
904 unsigned p, n2, r2;
905 struct hsw_wrpll_rnp best = {};
906 unsigned budget;
907
908 freq2k = clock / 100;
909
910 budget = hsw_wrpll_get_budget_for_freq(clock);
911
912 /* Special case handling for 540 pixel clock: bypass WR PLL entirely
913 * and directly pass the LC PLL to it. */
914 if (freq2k == 5400000) {
915 *n2_out = 2;
916 *p_out = 1;
917 *r2_out = 2;
918 return;
919 }
920
921 /*
922 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
923 * the WR PLL.
924 *
925 * We want R so that REF_MIN <= Ref <= REF_MAX.
926 * Injecting R2 = 2 * R gives:
927 * REF_MAX * r2 > LC_FREQ * 2 and
928 * REF_MIN * r2 < LC_FREQ * 2
929 *
930 * Which means the desired boundaries for r2 are:
931 * LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
932 *
933 */
934 for (r2 = LC_FREQ * 2 / REF_MAX + 1;
935 r2 <= LC_FREQ * 2 / REF_MIN;
936 r2++) {
937
938 /*
939 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
940 *
941 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
942 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
943 * VCO_MAX * r2 > n2 * LC_FREQ and
944 * VCO_MIN * r2 < n2 * LC_FREQ)
945 *
946 * Which means the desired boundaries for n2 are:
947 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
948 */
949 for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
950 n2 <= VCO_MAX * r2 / LC_FREQ;
951 n2++) {
952
953 for (p = P_MIN; p <= P_MAX; p += P_INC)
954 hsw_wrpll_update_rnp(freq2k, budget,
955 r2, n2, p, &best);
956 }
957 }
958
959 *n2_out = best.n2;
960 *p_out = best.p;
961 *r2_out = best.r2;
962}
963
964static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *i915,
965 const struct intel_shared_dpll *pll,
966 const struct intel_dpll_hw_state *pll_state)
967{
968 int refclk;
969 int n, p, r;
970 u32 wrpll = pll_state->wrpll;
971
972 switch (wrpll & WRPLL_REF_MASK) {
973 case WRPLL_REF_SPECIAL_HSW:
974 /* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
975 if (IS_HASWELL(i915) && !IS_HASWELL_ULT(i915)) {
976 refclk = i915->display.dpll.ref_clks.nssc;
977 break;
978 }
979 fallthrough;
980 case WRPLL_REF_PCH_SSC:
981 /*
982 * We could calculate spread here, but our checking
983 * code only cares about 5% accuracy, and spread is a max of
984 * 0.5% downspread.
985 */
986 refclk = i915->display.dpll.ref_clks.ssc;
987 break;
988 case WRPLL_REF_LCPLL:
989 refclk = 2700000;
990 break;
991 default:
992 MISSING_CASE(wrpll);
993 return 0;
994 }
995
996 r = wrpll & WRPLL_DIVIDER_REF_MASK;
997 p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
998 n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
999
1000 /* Convert to KHz, p & r have a fixed point portion */
1001 return (refclk * n / 10) / (p * r) * 2;
1002}
1003
1004static int
1005hsw_ddi_wrpll_compute_dpll(struct intel_atomic_state *state,
1006 struct intel_crtc *crtc)
1007{
1008 struct drm_i915_private *i915 = to_i915(state->base.dev);
1009 struct intel_crtc_state *crtc_state =
1010 intel_atomic_get_new_crtc_state(state, crtc);
1011 unsigned int p, n2, r2;
1012
1013 hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
1014
1015 crtc_state->dpll_hw_state.wrpll =
1016 WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
1017 WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
1018 WRPLL_DIVIDER_POST(p);
1019
1020 crtc_state->port_clock = hsw_ddi_wrpll_get_freq(i915, NULL,
1021 &crtc_state->dpll_hw_state);
1022
1023 return 0;
1024}
1025
1026static struct intel_shared_dpll *
1027hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
1028 struct intel_crtc *crtc)
1029{
1030 struct intel_crtc_state *crtc_state =
1031 intel_atomic_get_new_crtc_state(state, crtc);
1032
1033 return intel_find_shared_dpll(state, crtc,
1034 &crtc_state->dpll_hw_state,
1035 BIT(DPLL_ID_WRPLL2) |
1036 BIT(DPLL_ID_WRPLL1));
1037}
1038
1039static int
1040hsw_ddi_lcpll_compute_dpll(struct intel_crtc_state *crtc_state)
1041{
1042 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1043 int clock = crtc_state->port_clock;
1044
1045 switch (clock / 2) {
1046 case 81000:
1047 case 135000:
1048 case 270000:
1049 return 0;
1050 default:
1051 drm_dbg_kms(&i915->drm, "Invalid clock for DP: %d\n",
1052 clock);
1053 return -EINVAL;
1054 }
1055}
1056
1057static struct intel_shared_dpll *
1058hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
1059{
1060 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1061 struct intel_shared_dpll *pll;
1062 enum intel_dpll_id pll_id;
1063 int clock = crtc_state->port_clock;
1064
1065 switch (clock / 2) {
1066 case 81000:
1067 pll_id = DPLL_ID_LCPLL_810;
1068 break;
1069 case 135000:
1070 pll_id = DPLL_ID_LCPLL_1350;
1071 break;
1072 case 270000:
1073 pll_id = DPLL_ID_LCPLL_2700;
1074 break;
1075 default:
1076 MISSING_CASE(clock / 2);
1077 return NULL;
1078 }
1079
1080 pll = intel_get_shared_dpll_by_id(i915, pll_id);
1081
1082 if (!pll)
1083 return NULL;
1084
1085 return pll;
1086}
1087
1088static int hsw_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1089 const struct intel_shared_dpll *pll,
1090 const struct intel_dpll_hw_state *pll_state)
1091{
1092 int link_clock = 0;
1093
1094 switch (pll->info->id) {
1095 case DPLL_ID_LCPLL_810:
1096 link_clock = 81000;
1097 break;
1098 case DPLL_ID_LCPLL_1350:
1099 link_clock = 135000;
1100 break;
1101 case DPLL_ID_LCPLL_2700:
1102 link_clock = 270000;
1103 break;
1104 default:
1105 drm_WARN(&i915->drm, 1, "bad port clock sel\n");
1106 break;
1107 }
1108
1109 return link_clock * 2;
1110}
1111
1112static int
1113hsw_ddi_spll_compute_dpll(struct intel_atomic_state *state,
1114 struct intel_crtc *crtc)
1115{
1116 struct intel_crtc_state *crtc_state =
1117 intel_atomic_get_new_crtc_state(state, crtc);
1118
1119 if (drm_WARN_ON(crtc->base.dev, crtc_state->port_clock / 2 != 135000))
1120 return -EINVAL;
1121
1122 crtc_state->dpll_hw_state.spll =
1123 SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC;
1124
1125 return 0;
1126}
1127
1128static struct intel_shared_dpll *
1129hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
1130 struct intel_crtc *crtc)
1131{
1132 struct intel_crtc_state *crtc_state =
1133 intel_atomic_get_new_crtc_state(state, crtc);
1134
1135 return intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state,
1136 BIT(DPLL_ID_SPLL));
1137}
1138
1139static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915,
1140 const struct intel_shared_dpll *pll,
1141 const struct intel_dpll_hw_state *pll_state)
1142{
1143 int link_clock = 0;
1144
1145 switch (pll_state->spll & SPLL_FREQ_MASK) {
1146 case SPLL_FREQ_810MHz:
1147 link_clock = 81000;
1148 break;
1149 case SPLL_FREQ_1350MHz:
1150 link_clock = 135000;
1151 break;
1152 case SPLL_FREQ_2700MHz:
1153 link_clock = 270000;
1154 break;
1155 default:
1156 drm_WARN(&i915->drm, 1, "bad spll freq\n");
1157 break;
1158 }
1159
1160 return link_clock * 2;
1161}
1162
1163static int hsw_compute_dpll(struct intel_atomic_state *state,
1164 struct intel_crtc *crtc,
1165 struct intel_encoder *encoder)
1166{
1167 struct intel_crtc_state *crtc_state =
1168 intel_atomic_get_new_crtc_state(state, crtc);
1169
1170 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1171 return hsw_ddi_wrpll_compute_dpll(state, crtc);
1172 else if (intel_crtc_has_dp_encoder(crtc_state))
1173 return hsw_ddi_lcpll_compute_dpll(crtc_state);
1174 else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1175 return hsw_ddi_spll_compute_dpll(state, crtc);
1176 else
1177 return -EINVAL;
1178}
1179
1180static int hsw_get_dpll(struct intel_atomic_state *state,
1181 struct intel_crtc *crtc,
1182 struct intel_encoder *encoder)
1183{
1184 struct intel_crtc_state *crtc_state =
1185 intel_atomic_get_new_crtc_state(state, crtc);
1186 struct intel_shared_dpll *pll = NULL;
1187
1188 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1189 pll = hsw_ddi_wrpll_get_dpll(state, crtc);
1190 else if (intel_crtc_has_dp_encoder(crtc_state))
1191 pll = hsw_ddi_lcpll_get_dpll(crtc_state);
1192 else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1193 pll = hsw_ddi_spll_get_dpll(state, crtc);
1194
1195 if (!pll)
1196 return -EINVAL;
1197
1198 intel_reference_shared_dpll(state, crtc,
1199 pll, &crtc_state->dpll_hw_state);
1200
1201 crtc_state->shared_dpll = pll;
1202
1203 return 0;
1204}
1205
1206static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915)
1207{
1208 i915->display.dpll.ref_clks.ssc = 135000;
1209 /* Non-SSC is only used on non-ULT HSW. */
1210 if (intel_de_read(i915, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
1211 i915->display.dpll.ref_clks.nssc = 24000;
1212 else
1213 i915->display.dpll.ref_clks.nssc = 135000;
1214}
1215
1216static void hsw_dump_hw_state(struct drm_i915_private *i915,
1217 const struct intel_dpll_hw_state *hw_state)
1218{
1219 drm_dbg_kms(&i915->drm, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
1220 hw_state->wrpll, hw_state->spll);
1221}
1222
1223static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
1224 .enable = hsw_ddi_wrpll_enable,
1225 .disable = hsw_ddi_wrpll_disable,
1226 .get_hw_state = hsw_ddi_wrpll_get_hw_state,
1227 .get_freq = hsw_ddi_wrpll_get_freq,
1228};
1229
1230static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
1231 .enable = hsw_ddi_spll_enable,
1232 .disable = hsw_ddi_spll_disable,
1233 .get_hw_state = hsw_ddi_spll_get_hw_state,
1234 .get_freq = hsw_ddi_spll_get_freq,
1235};
1236
1237static void hsw_ddi_lcpll_enable(struct drm_i915_private *i915,
1238 struct intel_shared_dpll *pll)
1239{
1240}
1241
1242static void hsw_ddi_lcpll_disable(struct drm_i915_private *i915,
1243 struct intel_shared_dpll *pll)
1244{
1245}
1246
1247static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *i915,
1248 struct intel_shared_dpll *pll,
1249 struct intel_dpll_hw_state *hw_state)
1250{
1251 return true;
1252}
1253
1254static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
1255 .enable = hsw_ddi_lcpll_enable,
1256 .disable = hsw_ddi_lcpll_disable,
1257 .get_hw_state = hsw_ddi_lcpll_get_hw_state,
1258 .get_freq = hsw_ddi_lcpll_get_freq,
1259};
1260
1261static const struct dpll_info hsw_plls[] = {
1262 { .name = "WRPLL 1", .funcs = &hsw_ddi_wrpll_funcs, .id = DPLL_ID_WRPLL1, },
1263 { .name = "WRPLL 2", .funcs = &hsw_ddi_wrpll_funcs, .id = DPLL_ID_WRPLL2, },
1264 { .name = "SPLL", .funcs = &hsw_ddi_spll_funcs, .id = DPLL_ID_SPLL, },
1265 { .name = "LCPLL 810", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_810,
1266 .flags = INTEL_DPLL_ALWAYS_ON, },
1267 { .name = "LCPLL 1350", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_1350,
1268 .flags = INTEL_DPLL_ALWAYS_ON, },
1269 { .name = "LCPLL 2700", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_2700,
1270 .flags = INTEL_DPLL_ALWAYS_ON, },
1271 {}
1272};
1273
1274static const struct intel_dpll_mgr hsw_pll_mgr = {
1275 .dpll_info = hsw_plls,
1276 .compute_dplls = hsw_compute_dpll,
1277 .get_dplls = hsw_get_dpll,
1278 .put_dplls = intel_put_dpll,
1279 .update_ref_clks = hsw_update_dpll_ref_clks,
1280 .dump_hw_state = hsw_dump_hw_state,
1281};
1282
1283struct skl_dpll_regs {
1284 i915_reg_t ctl, cfgcr1, cfgcr2;
1285};
1286
1287/* this array is indexed by the *shared* pll id */
1288static const struct skl_dpll_regs skl_dpll_regs[4] = {
1289 {
1290 /* DPLL 0 */
1291 .ctl = LCPLL1_CTL,
1292 /* DPLL 0 doesn't support HDMI mode */
1293 },
1294 {
1295 /* DPLL 1 */
1296 .ctl = LCPLL2_CTL,
1297 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
1298 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
1299 },
1300 {
1301 /* DPLL 2 */
1302 .ctl = WRPLL_CTL(0),
1303 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
1304 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
1305 },
1306 {
1307 /* DPLL 3 */
1308 .ctl = WRPLL_CTL(1),
1309 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
1310 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
1311 },
1312};
1313
1314static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *i915,
1315 struct intel_shared_dpll *pll)
1316{
1317 const enum intel_dpll_id id = pll->info->id;
1318
1319 intel_de_rmw(i915, DPLL_CTRL1,
1320 DPLL_CTRL1_HDMI_MODE(id) | DPLL_CTRL1_SSC(id) | DPLL_CTRL1_LINK_RATE_MASK(id),
1321 pll->state.hw_state.ctrl1 << (id * 6));
1322 intel_de_posting_read(i915, DPLL_CTRL1);
1323}
1324
1325static void skl_ddi_pll_enable(struct drm_i915_private *i915,
1326 struct intel_shared_dpll *pll)
1327{
1328 const struct skl_dpll_regs *regs = skl_dpll_regs;
1329 const enum intel_dpll_id id = pll->info->id;
1330
1331 skl_ddi_pll_write_ctrl1(i915, pll);
1332
1333 intel_de_write(i915, regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
1334 intel_de_write(i915, regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
1335 intel_de_posting_read(i915, regs[id].cfgcr1);
1336 intel_de_posting_read(i915, regs[id].cfgcr2);
1337
1338 /* the enable bit is always bit 31 */
1339 intel_de_rmw(i915, regs[id].ctl, 0, LCPLL_PLL_ENABLE);
1340
1341 if (intel_de_wait_for_set(i915, DPLL_STATUS, DPLL_LOCK(id), 5))
1342 drm_err(&i915->drm, "DPLL %d not locked\n", id);
1343}
1344
1345static void skl_ddi_dpll0_enable(struct drm_i915_private *i915,
1346 struct intel_shared_dpll *pll)
1347{
1348 skl_ddi_pll_write_ctrl1(i915, pll);
1349}
1350
1351static void skl_ddi_pll_disable(struct drm_i915_private *i915,
1352 struct intel_shared_dpll *pll)
1353{
1354 const struct skl_dpll_regs *regs = skl_dpll_regs;
1355 const enum intel_dpll_id id = pll->info->id;
1356
1357 /* the enable bit is always bit 31 */
1358 intel_de_rmw(i915, regs[id].ctl, LCPLL_PLL_ENABLE, 0);
1359 intel_de_posting_read(i915, regs[id].ctl);
1360}
1361
1362static void skl_ddi_dpll0_disable(struct drm_i915_private *i915,
1363 struct intel_shared_dpll *pll)
1364{
1365}
1366
1367static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *i915,
1368 struct intel_shared_dpll *pll,
1369 struct intel_dpll_hw_state *hw_state)
1370{
1371 u32 val;
1372 const struct skl_dpll_regs *regs = skl_dpll_regs;
1373 const enum intel_dpll_id id = pll->info->id;
1374 intel_wakeref_t wakeref;
1375 bool ret;
1376
1377 wakeref = intel_display_power_get_if_enabled(i915,
1378 POWER_DOMAIN_DISPLAY_CORE);
1379 if (!wakeref)
1380 return false;
1381
1382 ret = false;
1383
1384 val = intel_de_read(i915, regs[id].ctl);
1385 if (!(val & LCPLL_PLL_ENABLE))
1386 goto out;
1387
1388 val = intel_de_read(i915, DPLL_CTRL1);
1389 hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1390
1391 /* avoid reading back stale values if HDMI mode is not enabled */
1392 if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1393 hw_state->cfgcr1 = intel_de_read(i915, regs[id].cfgcr1);
1394 hw_state->cfgcr2 = intel_de_read(i915, regs[id].cfgcr2);
1395 }
1396 ret = true;
1397
1398out:
1399 intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1400
1401 return ret;
1402}
1403
1404static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *i915,
1405 struct intel_shared_dpll *pll,
1406 struct intel_dpll_hw_state *hw_state)
1407{
1408 const struct skl_dpll_regs *regs = skl_dpll_regs;
1409 const enum intel_dpll_id id = pll->info->id;
1410 intel_wakeref_t wakeref;
1411 u32 val;
1412 bool ret;
1413
1414 wakeref = intel_display_power_get_if_enabled(i915,
1415 POWER_DOMAIN_DISPLAY_CORE);
1416 if (!wakeref)
1417 return false;
1418
1419 ret = false;
1420
1421 /* DPLL0 is always enabled since it drives CDCLK */
1422 val = intel_de_read(i915, regs[id].ctl);
1423 if (drm_WARN_ON(&i915->drm, !(val & LCPLL_PLL_ENABLE)))
1424 goto out;
1425
1426 val = intel_de_read(i915, DPLL_CTRL1);
1427 hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1428
1429 ret = true;
1430
1431out:
1432 intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1433
1434 return ret;
1435}
1436
1437struct skl_wrpll_context {
1438 u64 min_deviation; /* current minimal deviation */
1439 u64 central_freq; /* chosen central freq */
1440 u64 dco_freq; /* chosen dco freq */
1441 unsigned int p; /* chosen divider */
1442};
1443
1444/* DCO freq must be within +1%/-6% of the DCO central freq */
1445#define SKL_DCO_MAX_PDEVIATION 100
1446#define SKL_DCO_MAX_NDEVIATION 600
1447
1448static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1449 u64 central_freq,
1450 u64 dco_freq,
1451 unsigned int divider)
1452{
1453 u64 deviation;
1454
1455 deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1456 central_freq);
1457
1458 /* positive deviation */
1459 if (dco_freq >= central_freq) {
1460 if (deviation < SKL_DCO_MAX_PDEVIATION &&
1461 deviation < ctx->min_deviation) {
1462 ctx->min_deviation = deviation;
1463 ctx->central_freq = central_freq;
1464 ctx->dco_freq = dco_freq;
1465 ctx->p = divider;
1466 }
1467 /* negative deviation */
1468 } else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1469 deviation < ctx->min_deviation) {
1470 ctx->min_deviation = deviation;
1471 ctx->central_freq = central_freq;
1472 ctx->dco_freq = dco_freq;
1473 ctx->p = divider;
1474 }
1475}
1476
1477static void skl_wrpll_get_multipliers(unsigned int p,
1478 unsigned int *p0 /* out */,
1479 unsigned int *p1 /* out */,
1480 unsigned int *p2 /* out */)
1481{
1482 /* even dividers */
1483 if (p % 2 == 0) {
1484 unsigned int half = p / 2;
1485
1486 if (half == 1 || half == 2 || half == 3 || half == 5) {
1487 *p0 = 2;
1488 *p1 = 1;
1489 *p2 = half;
1490 } else if (half % 2 == 0) {
1491 *p0 = 2;
1492 *p1 = half / 2;
1493 *p2 = 2;
1494 } else if (half % 3 == 0) {
1495 *p0 = 3;
1496 *p1 = half / 3;
1497 *p2 = 2;
1498 } else if (half % 7 == 0) {
1499 *p0 = 7;
1500 *p1 = half / 7;
1501 *p2 = 2;
1502 }
1503 } else if (p == 3 || p == 9) { /* 3, 5, 7, 9, 15, 21, 35 */
1504 *p0 = 3;
1505 *p1 = 1;
1506 *p2 = p / 3;
1507 } else if (p == 5 || p == 7) {
1508 *p0 = p;
1509 *p1 = 1;
1510 *p2 = 1;
1511 } else if (p == 15) {
1512 *p0 = 3;
1513 *p1 = 1;
1514 *p2 = 5;
1515 } else if (p == 21) {
1516 *p0 = 7;
1517 *p1 = 1;
1518 *p2 = 3;
1519 } else if (p == 35) {
1520 *p0 = 7;
1521 *p1 = 1;
1522 *p2 = 5;
1523 }
1524}
1525
1526struct skl_wrpll_params {
1527 u32 dco_fraction;
1528 u32 dco_integer;
1529 u32 qdiv_ratio;
1530 u32 qdiv_mode;
1531 u32 kdiv;
1532 u32 pdiv;
1533 u32 central_freq;
1534};
1535
1536static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1537 u64 afe_clock,
1538 int ref_clock,
1539 u64 central_freq,
1540 u32 p0, u32 p1, u32 p2)
1541{
1542 u64 dco_freq;
1543
1544 switch (central_freq) {
1545 case 9600000000ULL:
1546 params->central_freq = 0;
1547 break;
1548 case 9000000000ULL:
1549 params->central_freq = 1;
1550 break;
1551 case 8400000000ULL:
1552 params->central_freq = 3;
1553 }
1554
1555 switch (p0) {
1556 case 1:
1557 params->pdiv = 0;
1558 break;
1559 case 2:
1560 params->pdiv = 1;
1561 break;
1562 case 3:
1563 params->pdiv = 2;
1564 break;
1565 case 7:
1566 params->pdiv = 4;
1567 break;
1568 default:
1569 WARN(1, "Incorrect PDiv\n");
1570 }
1571
1572 switch (p2) {
1573 case 5:
1574 params->kdiv = 0;
1575 break;
1576 case 2:
1577 params->kdiv = 1;
1578 break;
1579 case 3:
1580 params->kdiv = 2;
1581 break;
1582 case 1:
1583 params->kdiv = 3;
1584 break;
1585 default:
1586 WARN(1, "Incorrect KDiv\n");
1587 }
1588
1589 params->qdiv_ratio = p1;
1590 params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1591
1592 dco_freq = p0 * p1 * p2 * afe_clock;
1593
1594 /*
1595 * Intermediate values are in Hz.
1596 * Divide by MHz to match bsepc
1597 */
1598 params->dco_integer = div_u64(dco_freq, ref_clock * KHz(1));
1599 params->dco_fraction =
1600 div_u64((div_u64(dco_freq, ref_clock / KHz(1)) -
1601 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1602}
1603
1604static int
1605skl_ddi_calculate_wrpll(int clock /* in Hz */,
1606 int ref_clock,
1607 struct skl_wrpll_params *wrpll_params)
1608{
1609 static const u64 dco_central_freq[3] = { 8400000000ULL,
1610 9000000000ULL,
1611 9600000000ULL };
1612 static const u8 even_dividers[] = { 4, 6, 8, 10, 12, 14, 16, 18, 20,
1613 24, 28, 30, 32, 36, 40, 42, 44,
1614 48, 52, 54, 56, 60, 64, 66, 68,
1615 70, 72, 76, 78, 80, 84, 88, 90,
1616 92, 96, 98 };
1617 static const u8 odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1618 static const struct {
1619 const u8 *list;
1620 int n_dividers;
1621 } dividers[] = {
1622 { even_dividers, ARRAY_SIZE(even_dividers) },
1623 { odd_dividers, ARRAY_SIZE(odd_dividers) },
1624 };
1625 struct skl_wrpll_context ctx = {
1626 .min_deviation = U64_MAX,
1627 };
1628 unsigned int dco, d, i;
1629 unsigned int p0, p1, p2;
1630 u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
1631
1632 for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1633 for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1634 for (i = 0; i < dividers[d].n_dividers; i++) {
1635 unsigned int p = dividers[d].list[i];
1636 u64 dco_freq = p * afe_clock;
1637
1638 skl_wrpll_try_divider(&ctx,
1639 dco_central_freq[dco],
1640 dco_freq,
1641 p);
1642 /*
1643 * Skip the remaining dividers if we're sure to
1644 * have found the definitive divider, we can't
1645 * improve a 0 deviation.
1646 */
1647 if (ctx.min_deviation == 0)
1648 goto skip_remaining_dividers;
1649 }
1650 }
1651
1652skip_remaining_dividers:
1653 /*
1654 * If a solution is found with an even divider, prefer
1655 * this one.
1656 */
1657 if (d == 0 && ctx.p)
1658 break;
1659 }
1660
1661 if (!ctx.p)
1662 return -EINVAL;
1663
1664 /*
1665 * gcc incorrectly analyses that these can be used without being
1666 * initialized. To be fair, it's hard to guess.
1667 */
1668 p0 = p1 = p2 = 0;
1669 skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1670 skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock,
1671 ctx.central_freq, p0, p1, p2);
1672
1673 return 0;
1674}
1675
1676static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
1677 const struct intel_shared_dpll *pll,
1678 const struct intel_dpll_hw_state *pll_state)
1679{
1680 int ref_clock = i915->display.dpll.ref_clks.nssc;
1681 u32 p0, p1, p2, dco_freq;
1682
1683 p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
1684 p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
1685
1686 if (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_MODE(1))
1687 p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
1688 else
1689 p1 = 1;
1690
1691
1692 switch (p0) {
1693 case DPLL_CFGCR2_PDIV_1:
1694 p0 = 1;
1695 break;
1696 case DPLL_CFGCR2_PDIV_2:
1697 p0 = 2;
1698 break;
1699 case DPLL_CFGCR2_PDIV_3:
1700 p0 = 3;
1701 break;
1702 case DPLL_CFGCR2_PDIV_7_INVALID:
1703 /*
1704 * Incorrect ASUS-Z170M BIOS setting, the HW seems to ignore bit#0,
1705 * handling it the same way as PDIV_7.
1706 */
1707 drm_dbg_kms(&i915->drm, "Invalid WRPLL PDIV divider value, fixing it.\n");
1708 fallthrough;
1709 case DPLL_CFGCR2_PDIV_7:
1710 p0 = 7;
1711 break;
1712 default:
1713 MISSING_CASE(p0);
1714 return 0;
1715 }
1716
1717 switch (p2) {
1718 case DPLL_CFGCR2_KDIV_5:
1719 p2 = 5;
1720 break;
1721 case DPLL_CFGCR2_KDIV_2:
1722 p2 = 2;
1723 break;
1724 case DPLL_CFGCR2_KDIV_3:
1725 p2 = 3;
1726 break;
1727 case DPLL_CFGCR2_KDIV_1:
1728 p2 = 1;
1729 break;
1730 default:
1731 MISSING_CASE(p2);
1732 return 0;
1733 }
1734
1735 dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
1736 ref_clock;
1737
1738 dco_freq += ((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
1739 ref_clock / 0x8000;
1740
1741 if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
1742 return 0;
1743
1744 return dco_freq / (p0 * p1 * p2 * 5);
1745}
1746
1747static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
1748{
1749 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1750 struct skl_wrpll_params wrpll_params = {};
1751 u32 ctrl1, cfgcr1, cfgcr2;
1752 int ret;
1753
1754 /*
1755 * See comment in intel_dpll_hw_state to understand why we always use 0
1756 * as the DPLL id in this function.
1757 */
1758 ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1759
1760 ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
1761
1762 ret = skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
1763 i915->display.dpll.ref_clks.nssc, &wrpll_params);
1764 if (ret)
1765 return ret;
1766
1767 cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
1768 DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1769 wrpll_params.dco_integer;
1770
1771 cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1772 DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1773 DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1774 DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1775 wrpll_params.central_freq;
1776
1777 crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1778 crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
1779 crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
1780
1781 crtc_state->port_clock = skl_ddi_wrpll_get_freq(i915, NULL,
1782 &crtc_state->dpll_hw_state);
1783
1784 return 0;
1785}
1786
1787static int
1788skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1789{
1790 u32 ctrl1;
1791
1792 /*
1793 * See comment in intel_dpll_hw_state to understand why we always use 0
1794 * as the DPLL id in this function.
1795 */
1796 ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1797 switch (crtc_state->port_clock / 2) {
1798 case 81000:
1799 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1800 break;
1801 case 135000:
1802 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1803 break;
1804 case 270000:
1805 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1806 break;
1807 /* eDP 1.4 rates */
1808 case 162000:
1809 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1810 break;
1811 case 108000:
1812 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1813 break;
1814 case 216000:
1815 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1816 break;
1817 }
1818
1819 crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1820
1821 return 0;
1822}
1823
1824static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1825 const struct intel_shared_dpll *pll,
1826 const struct intel_dpll_hw_state *pll_state)
1827{
1828 int link_clock = 0;
1829
1830 switch ((pll_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0)) >>
1831 DPLL_CTRL1_LINK_RATE_SHIFT(0)) {
1832 case DPLL_CTRL1_LINK_RATE_810:
1833 link_clock = 81000;
1834 break;
1835 case DPLL_CTRL1_LINK_RATE_1080:
1836 link_clock = 108000;
1837 break;
1838 case DPLL_CTRL1_LINK_RATE_1350:
1839 link_clock = 135000;
1840 break;
1841 case DPLL_CTRL1_LINK_RATE_1620:
1842 link_clock = 162000;
1843 break;
1844 case DPLL_CTRL1_LINK_RATE_2160:
1845 link_clock = 216000;
1846 break;
1847 case DPLL_CTRL1_LINK_RATE_2700:
1848 link_clock = 270000;
1849 break;
1850 default:
1851 drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
1852 break;
1853 }
1854
1855 return link_clock * 2;
1856}
1857
1858static int skl_compute_dpll(struct intel_atomic_state *state,
1859 struct intel_crtc *crtc,
1860 struct intel_encoder *encoder)
1861{
1862 struct intel_crtc_state *crtc_state =
1863 intel_atomic_get_new_crtc_state(state, crtc);
1864
1865 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1866 return skl_ddi_hdmi_pll_dividers(crtc_state);
1867 else if (intel_crtc_has_dp_encoder(crtc_state))
1868 return skl_ddi_dp_set_dpll_hw_state(crtc_state);
1869 else
1870 return -EINVAL;
1871}
1872
1873static int skl_get_dpll(struct intel_atomic_state *state,
1874 struct intel_crtc *crtc,
1875 struct intel_encoder *encoder)
1876{
1877 struct intel_crtc_state *crtc_state =
1878 intel_atomic_get_new_crtc_state(state, crtc);
1879 struct intel_shared_dpll *pll;
1880
1881 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1882 pll = intel_find_shared_dpll(state, crtc,
1883 &crtc_state->dpll_hw_state,
1884 BIT(DPLL_ID_SKL_DPLL0));
1885 else
1886 pll = intel_find_shared_dpll(state, crtc,
1887 &crtc_state->dpll_hw_state,
1888 BIT(DPLL_ID_SKL_DPLL3) |
1889 BIT(DPLL_ID_SKL_DPLL2) |
1890 BIT(DPLL_ID_SKL_DPLL1));
1891 if (!pll)
1892 return -EINVAL;
1893
1894 intel_reference_shared_dpll(state, crtc,
1895 pll, &crtc_state->dpll_hw_state);
1896
1897 crtc_state->shared_dpll = pll;
1898
1899 return 0;
1900}
1901
1902static int skl_ddi_pll_get_freq(struct drm_i915_private *i915,
1903 const struct intel_shared_dpll *pll,
1904 const struct intel_dpll_hw_state *pll_state)
1905{
1906 /*
1907 * ctrl1 register is already shifted for each pll, just use 0 to get
1908 * the internal shift for each field
1909 */
1910 if (pll_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
1911 return skl_ddi_wrpll_get_freq(i915, pll, pll_state);
1912 else
1913 return skl_ddi_lcpll_get_freq(i915, pll, pll_state);
1914}
1915
1916static void skl_update_dpll_ref_clks(struct drm_i915_private *i915)
1917{
1918 /* No SSC ref */
1919 i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
1920}
1921
1922static void skl_dump_hw_state(struct drm_i915_private *i915,
1923 const struct intel_dpll_hw_state *hw_state)
1924{
1925 drm_dbg_kms(&i915->drm, "dpll_hw_state: "
1926 "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1927 hw_state->ctrl1,
1928 hw_state->cfgcr1,
1929 hw_state->cfgcr2);
1930}
1931
1932static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
1933 .enable = skl_ddi_pll_enable,
1934 .disable = skl_ddi_pll_disable,
1935 .get_hw_state = skl_ddi_pll_get_hw_state,
1936 .get_freq = skl_ddi_pll_get_freq,
1937};
1938
1939static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
1940 .enable = skl_ddi_dpll0_enable,
1941 .disable = skl_ddi_dpll0_disable,
1942 .get_hw_state = skl_ddi_dpll0_get_hw_state,
1943 .get_freq = skl_ddi_pll_get_freq,
1944};
1945
1946static const struct dpll_info skl_plls[] = {
1947 { .name = "DPLL 0", .funcs = &skl_ddi_dpll0_funcs, .id = DPLL_ID_SKL_DPLL0,
1948 .flags = INTEL_DPLL_ALWAYS_ON, },
1949 { .name = "DPLL 1", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL1, },
1950 { .name = "DPLL 2", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL2, },
1951 { .name = "DPLL 3", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL3, },
1952 {}
1953};
1954
1955static const struct intel_dpll_mgr skl_pll_mgr = {
1956 .dpll_info = skl_plls,
1957 .compute_dplls = skl_compute_dpll,
1958 .get_dplls = skl_get_dpll,
1959 .put_dplls = intel_put_dpll,
1960 .update_ref_clks = skl_update_dpll_ref_clks,
1961 .dump_hw_state = skl_dump_hw_state,
1962};
1963
1964static void bxt_ddi_pll_enable(struct drm_i915_private *i915,
1965 struct intel_shared_dpll *pll)
1966{
1967 u32 temp;
1968 enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1969 enum dpio_phy phy;
1970 enum dpio_channel ch;
1971
1972 bxt_port_to_phy_channel(i915, port, &phy, &ch);
1973
1974 /* Non-SSC reference */
1975 intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_REF_SEL);
1976
1977 if (IS_GEMINILAKE(i915)) {
1978 intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port),
1979 0, PORT_PLL_POWER_ENABLE);
1980
1981 if (wait_for_us((intel_de_read(i915, BXT_PORT_PLL_ENABLE(port)) &
1982 PORT_PLL_POWER_STATE), 200))
1983 drm_err(&i915->drm,
1984 "Power state not set for PLL:%d\n", port);
1985 }
1986
1987 /* Disable 10 bit clock */
1988 intel_de_rmw(i915, BXT_PORT_PLL_EBB_4(phy, ch),
1989 PORT_PLL_10BIT_CLK_ENABLE, 0);
1990
1991 /* Write P1 & P2 */
1992 intel_de_rmw(i915, BXT_PORT_PLL_EBB_0(phy, ch),
1993 PORT_PLL_P1_MASK | PORT_PLL_P2_MASK, pll->state.hw_state.ebb0);
1994
1995 /* Write M2 integer */
1996 intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 0),
1997 PORT_PLL_M2_INT_MASK, pll->state.hw_state.pll0);
1998
1999 /* Write N */
2000 intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 1),
2001 PORT_PLL_N_MASK, pll->state.hw_state.pll1);
2002
2003 /* Write M2 fraction */
2004 intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 2),
2005 PORT_PLL_M2_FRAC_MASK, pll->state.hw_state.pll2);
2006
2007 /* Write M2 fraction enable */
2008 intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 3),
2009 PORT_PLL_M2_FRAC_ENABLE, pll->state.hw_state.pll3);
2010
2011 /* Write coeff */
2012 temp = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 6));
2013 temp &= ~PORT_PLL_PROP_COEFF_MASK;
2014 temp &= ~PORT_PLL_INT_COEFF_MASK;
2015 temp &= ~PORT_PLL_GAIN_CTL_MASK;
2016 temp |= pll->state.hw_state.pll6;
2017 intel_de_write(i915, BXT_PORT_PLL(phy, ch, 6), temp);
2018
2019 /* Write calibration val */
2020 intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 8),
2021 PORT_PLL_TARGET_CNT_MASK, pll->state.hw_state.pll8);
2022
2023 intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 9),
2024 PORT_PLL_LOCK_THRESHOLD_MASK, pll->state.hw_state.pll9);
2025
2026 temp = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 10));
2027 temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
2028 temp &= ~PORT_PLL_DCO_AMP_MASK;
2029 temp |= pll->state.hw_state.pll10;
2030 intel_de_write(i915, BXT_PORT_PLL(phy, ch, 10), temp);
2031
2032 /* Recalibrate with new settings */
2033 temp = intel_de_read(i915, BXT_PORT_PLL_EBB_4(phy, ch));
2034 temp |= PORT_PLL_RECALIBRATE;
2035 intel_de_write(i915, BXT_PORT_PLL_EBB_4(phy, ch), temp);
2036 temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
2037 temp |= pll->state.hw_state.ebb4;
2038 intel_de_write(i915, BXT_PORT_PLL_EBB_4(phy, ch), temp);
2039
2040 /* Enable PLL */
2041 intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_ENABLE);
2042 intel_de_posting_read(i915, BXT_PORT_PLL_ENABLE(port));
2043
2044 if (wait_for_us((intel_de_read(i915, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
2045 200))
2046 drm_err(&i915->drm, "PLL %d not locked\n", port);
2047
2048 if (IS_GEMINILAKE(i915)) {
2049 temp = intel_de_read(i915, BXT_PORT_TX_DW5_LN0(phy, ch));
2050 temp |= DCC_DELAY_RANGE_2;
2051 intel_de_write(i915, BXT_PORT_TX_DW5_GRP(phy, ch), temp);
2052 }
2053
2054 /*
2055 * While we write to the group register to program all lanes at once we
2056 * can read only lane registers and we pick lanes 0/1 for that.
2057 */
2058 temp = intel_de_read(i915, BXT_PORT_PCS_DW12_LN01(phy, ch));
2059 temp &= ~LANE_STAGGER_MASK;
2060 temp &= ~LANESTAGGER_STRAP_OVRD;
2061 temp |= pll->state.hw_state.pcsdw12;
2062 intel_de_write(i915, BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
2063}
2064
2065static void bxt_ddi_pll_disable(struct drm_i915_private *i915,
2066 struct intel_shared_dpll *pll)
2067{
2068 enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2069
2070 intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port), PORT_PLL_ENABLE, 0);
2071 intel_de_posting_read(i915, BXT_PORT_PLL_ENABLE(port));
2072
2073 if (IS_GEMINILAKE(i915)) {
2074 intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port),
2075 PORT_PLL_POWER_ENABLE, 0);
2076
2077 if (wait_for_us(!(intel_de_read(i915, BXT_PORT_PLL_ENABLE(port)) &
2078 PORT_PLL_POWER_STATE), 200))
2079 drm_err(&i915->drm,
2080 "Power state not reset for PLL:%d\n", port);
2081 }
2082}
2083
2084static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *i915,
2085 struct intel_shared_dpll *pll,
2086 struct intel_dpll_hw_state *hw_state)
2087{
2088 enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2089 intel_wakeref_t wakeref;
2090 enum dpio_phy phy;
2091 enum dpio_channel ch;
2092 u32 val;
2093 bool ret;
2094
2095 bxt_port_to_phy_channel(i915, port, &phy, &ch);
2096
2097 wakeref = intel_display_power_get_if_enabled(i915,
2098 POWER_DOMAIN_DISPLAY_CORE);
2099 if (!wakeref)
2100 return false;
2101
2102 ret = false;
2103
2104 val = intel_de_read(i915, BXT_PORT_PLL_ENABLE(port));
2105 if (!(val & PORT_PLL_ENABLE))
2106 goto out;
2107
2108 hw_state->ebb0 = intel_de_read(i915, BXT_PORT_PLL_EBB_0(phy, ch));
2109 hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
2110
2111 hw_state->ebb4 = intel_de_read(i915, BXT_PORT_PLL_EBB_4(phy, ch));
2112 hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
2113
2114 hw_state->pll0 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 0));
2115 hw_state->pll0 &= PORT_PLL_M2_INT_MASK;
2116
2117 hw_state->pll1 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 1));
2118 hw_state->pll1 &= PORT_PLL_N_MASK;
2119
2120 hw_state->pll2 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 2));
2121 hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
2122
2123 hw_state->pll3 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 3));
2124 hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
2125
2126 hw_state->pll6 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 6));
2127 hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
2128 PORT_PLL_INT_COEFF_MASK |
2129 PORT_PLL_GAIN_CTL_MASK;
2130
2131 hw_state->pll8 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 8));
2132 hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
2133
2134 hw_state->pll9 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 9));
2135 hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
2136
2137 hw_state->pll10 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 10));
2138 hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
2139 PORT_PLL_DCO_AMP_MASK;
2140
2141 /*
2142 * While we write to the group register to program all lanes at once we
2143 * can read only lane registers. We configure all lanes the same way, so
2144 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
2145 */
2146 hw_state->pcsdw12 = intel_de_read(i915,
2147 BXT_PORT_PCS_DW12_LN01(phy, ch));
2148 if (intel_de_read(i915, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
2149 drm_dbg(&i915->drm,
2150 "lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
2151 hw_state->pcsdw12,
2152 intel_de_read(i915,
2153 BXT_PORT_PCS_DW12_LN23(phy, ch)));
2154 hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
2155
2156 ret = true;
2157
2158out:
2159 intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2160
2161 return ret;
2162}
2163
2164/* pre-calculated values for DP linkrates */
2165static const struct dpll bxt_dp_clk_val[] = {
2166 /* m2 is .22 binary fixed point */
2167 { .dot = 162000, .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2168 { .dot = 270000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2169 { .dot = 540000, .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2170 { .dot = 216000, .p1 = 3, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2171 { .dot = 243000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6133333 /* 24.3 */ },
2172 { .dot = 324000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2173 { .dot = 432000, .p1 = 3, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2174};
2175
2176static int
2177bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
2178 struct dpll *clk_div)
2179{
2180 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2181
2182 /* Calculate HDMI div */
2183 /*
2184 * FIXME: tie the following calculation into
2185 * i9xx_crtc_compute_clock
2186 */
2187 if (!bxt_find_best_dpll(crtc_state, clk_div))
2188 return -EINVAL;
2189
2190 drm_WARN_ON(&i915->drm, clk_div->m1 != 2);
2191
2192 return 0;
2193}
2194
2195static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
2196 struct dpll *clk_div)
2197{
2198 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2199 int i;
2200
2201 *clk_div = bxt_dp_clk_val[0];
2202 for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
2203 if (crtc_state->port_clock == bxt_dp_clk_val[i].dot) {
2204 *clk_div = bxt_dp_clk_val[i];
2205 break;
2206 }
2207 }
2208
2209 chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, clk_div);
2210
2211 drm_WARN_ON(&i915->drm, clk_div->vco == 0 ||
2212 clk_div->dot != crtc_state->port_clock);
2213}
2214
2215static int bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
2216 const struct dpll *clk_div)
2217{
2218 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2219 struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
2220 int clock = crtc_state->port_clock;
2221 int vco = clk_div->vco;
2222 u32 prop_coef, int_coef, gain_ctl, targ_cnt;
2223 u32 lanestagger;
2224
2225 if (vco >= 6200000 && vco <= 6700000) {
2226 prop_coef = 4;
2227 int_coef = 9;
2228 gain_ctl = 3;
2229 targ_cnt = 8;
2230 } else if ((vco > 5400000 && vco < 6200000) ||
2231 (vco >= 4800000 && vco < 5400000)) {
2232 prop_coef = 5;
2233 int_coef = 11;
2234 gain_ctl = 3;
2235 targ_cnt = 9;
2236 } else if (vco == 5400000) {
2237 prop_coef = 3;
2238 int_coef = 8;
2239 gain_ctl = 1;
2240 targ_cnt = 9;
2241 } else {
2242 drm_err(&i915->drm, "Invalid VCO\n");
2243 return -EINVAL;
2244 }
2245
2246 if (clock > 270000)
2247 lanestagger = 0x18;
2248 else if (clock > 135000)
2249 lanestagger = 0x0d;
2250 else if (clock > 67000)
2251 lanestagger = 0x07;
2252 else if (clock > 33000)
2253 lanestagger = 0x04;
2254 else
2255 lanestagger = 0x02;
2256
2257 dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
2258 dpll_hw_state->pll0 = PORT_PLL_M2_INT(clk_div->m2 >> 22);
2259 dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
2260 dpll_hw_state->pll2 = PORT_PLL_M2_FRAC(clk_div->m2 & 0x3fffff);
2261
2262 if (clk_div->m2 & 0x3fffff)
2263 dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
2264
2265 dpll_hw_state->pll6 = PORT_PLL_PROP_COEFF(prop_coef) |
2266 PORT_PLL_INT_COEFF(int_coef) |
2267 PORT_PLL_GAIN_CTL(gain_ctl);
2268
2269 dpll_hw_state->pll8 = PORT_PLL_TARGET_CNT(targ_cnt);
2270
2271 dpll_hw_state->pll9 = PORT_PLL_LOCK_THRESHOLD(5);
2272
2273 dpll_hw_state->pll10 = PORT_PLL_DCO_AMP(15) |
2274 PORT_PLL_DCO_AMP_OVR_EN_H;
2275
2276 dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
2277
2278 dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
2279
2280 return 0;
2281}
2282
2283static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
2284 const struct intel_shared_dpll *pll,
2285 const struct intel_dpll_hw_state *pll_state)
2286{
2287 struct dpll clock;
2288
2289 clock.m1 = 2;
2290 clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK, pll_state->pll0) << 22;
2291 if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
2292 clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK, pll_state->pll2);
2293 clock.n = REG_FIELD_GET(PORT_PLL_N_MASK, pll_state->pll1);
2294 clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK, pll_state->ebb0);
2295 clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK, pll_state->ebb0);
2296
2297 return chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, &clock);
2298}
2299
2300static int
2301bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2302{
2303 struct dpll clk_div = {};
2304
2305 bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
2306
2307 return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2308}
2309
2310static int
2311bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2312{
2313 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2314 struct dpll clk_div = {};
2315 int ret;
2316
2317 bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
2318
2319 ret = bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2320 if (ret)
2321 return ret;
2322
2323 crtc_state->port_clock = bxt_ddi_pll_get_freq(i915, NULL,
2324 &crtc_state->dpll_hw_state);
2325
2326 return 0;
2327}
2328
2329static int bxt_compute_dpll(struct intel_atomic_state *state,
2330 struct intel_crtc *crtc,
2331 struct intel_encoder *encoder)
2332{
2333 struct intel_crtc_state *crtc_state =
2334 intel_atomic_get_new_crtc_state(state, crtc);
2335
2336 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
2337 return bxt_ddi_hdmi_set_dpll_hw_state(crtc_state);
2338 else if (intel_crtc_has_dp_encoder(crtc_state))
2339 return bxt_ddi_dp_set_dpll_hw_state(crtc_state);
2340 else
2341 return -EINVAL;
2342}
2343
2344static int bxt_get_dpll(struct intel_atomic_state *state,
2345 struct intel_crtc *crtc,
2346 struct intel_encoder *encoder)
2347{
2348 struct intel_crtc_state *crtc_state =
2349 intel_atomic_get_new_crtc_state(state, crtc);
2350 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2351 struct intel_shared_dpll *pll;
2352 enum intel_dpll_id id;
2353
2354 /* 1:1 mapping between ports and PLLs */
2355 id = (enum intel_dpll_id) encoder->port;
2356 pll = intel_get_shared_dpll_by_id(i915, id);
2357
2358 drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] using pre-allocated %s\n",
2359 crtc->base.base.id, crtc->base.name, pll->info->name);
2360
2361 intel_reference_shared_dpll(state, crtc,
2362 pll, &crtc_state->dpll_hw_state);
2363
2364 crtc_state->shared_dpll = pll;
2365
2366 return 0;
2367}
2368
2369static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915)
2370{
2371 i915->display.dpll.ref_clks.ssc = 100000;
2372 i915->display.dpll.ref_clks.nssc = 100000;
2373 /* DSI non-SSC ref 19.2MHz */
2374}
2375
2376static void bxt_dump_hw_state(struct drm_i915_private *i915,
2377 const struct intel_dpll_hw_state *hw_state)
2378{
2379 drm_dbg_kms(&i915->drm, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
2380 "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
2381 "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
2382 hw_state->ebb0,
2383 hw_state->ebb4,
2384 hw_state->pll0,
2385 hw_state->pll1,
2386 hw_state->pll2,
2387 hw_state->pll3,
2388 hw_state->pll6,
2389 hw_state->pll8,
2390 hw_state->pll9,
2391 hw_state->pll10,
2392 hw_state->pcsdw12);
2393}
2394
2395static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
2396 .enable = bxt_ddi_pll_enable,
2397 .disable = bxt_ddi_pll_disable,
2398 .get_hw_state = bxt_ddi_pll_get_hw_state,
2399 .get_freq = bxt_ddi_pll_get_freq,
2400};
2401
2402static const struct dpll_info bxt_plls[] = {
2403 { .name = "PORT PLL A", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL0, },
2404 { .name = "PORT PLL B", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL1, },
2405 { .name = "PORT PLL C", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL2, },
2406 {}
2407};
2408
2409static const struct intel_dpll_mgr bxt_pll_mgr = {
2410 .dpll_info = bxt_plls,
2411 .compute_dplls = bxt_compute_dpll,
2412 .get_dplls = bxt_get_dpll,
2413 .put_dplls = intel_put_dpll,
2414 .update_ref_clks = bxt_update_dpll_ref_clks,
2415 .dump_hw_state = bxt_dump_hw_state,
2416};
2417
2418static void icl_wrpll_get_multipliers(int bestdiv, int *pdiv,
2419 int *qdiv, int *kdiv)
2420{
2421 /* even dividers */
2422 if (bestdiv % 2 == 0) {
2423 if (bestdiv == 2) {
2424 *pdiv = 2;
2425 *qdiv = 1;
2426 *kdiv = 1;
2427 } else if (bestdiv % 4 == 0) {
2428 *pdiv = 2;
2429 *qdiv = bestdiv / 4;
2430 *kdiv = 2;
2431 } else if (bestdiv % 6 == 0) {
2432 *pdiv = 3;
2433 *qdiv = bestdiv / 6;
2434 *kdiv = 2;
2435 } else if (bestdiv % 5 == 0) {
2436 *pdiv = 5;
2437 *qdiv = bestdiv / 10;
2438 *kdiv = 2;
2439 } else if (bestdiv % 14 == 0) {
2440 *pdiv = 7;
2441 *qdiv = bestdiv / 14;
2442 *kdiv = 2;
2443 }
2444 } else {
2445 if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2446 *pdiv = bestdiv;
2447 *qdiv = 1;
2448 *kdiv = 1;
2449 } else { /* 9, 15, 21 */
2450 *pdiv = bestdiv / 3;
2451 *qdiv = 1;
2452 *kdiv = 3;
2453 }
2454 }
2455}
2456
2457static void icl_wrpll_params_populate(struct skl_wrpll_params *params,
2458 u32 dco_freq, u32 ref_freq,
2459 int pdiv, int qdiv, int kdiv)
2460{
2461 u32 dco;
2462
2463 switch (kdiv) {
2464 case 1:
2465 params->kdiv = 1;
2466 break;
2467 case 2:
2468 params->kdiv = 2;
2469 break;
2470 case 3:
2471 params->kdiv = 4;
2472 break;
2473 default:
2474 WARN(1, "Incorrect KDiv\n");
2475 }
2476
2477 switch (pdiv) {
2478 case 2:
2479 params->pdiv = 1;
2480 break;
2481 case 3:
2482 params->pdiv = 2;
2483 break;
2484 case 5:
2485 params->pdiv = 4;
2486 break;
2487 case 7:
2488 params->pdiv = 8;
2489 break;
2490 default:
2491 WARN(1, "Incorrect PDiv\n");
2492 }
2493
2494 WARN_ON(kdiv != 2 && qdiv != 1);
2495
2496 params->qdiv_ratio = qdiv;
2497 params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2498
2499 dco = div_u64((u64)dco_freq << 15, ref_freq);
2500
2501 params->dco_integer = dco >> 15;
2502 params->dco_fraction = dco & 0x7fff;
2503}
2504
2505/*
2506 * Display WA #22010492432: ehl, tgl, adl-s, adl-p
2507 * Program half of the nominal DCO divider fraction value.
2508 */
2509static bool
2510ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
2511{
2512 return (((IS_ELKHARTLAKE(i915) || IS_JASPERLAKE(i915)) &&
2513 IS_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) ||
2514 IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) &&
2515 i915->display.dpll.ref_clks.nssc == 38400;
2516}
2517
2518struct icl_combo_pll_params {
2519 int clock;
2520 struct skl_wrpll_params wrpll;
2521};
2522
2523/*
2524 * These values alrea already adjusted: they're the bits we write to the
2525 * registers, not the logical values.
2526 */
2527static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
2528 { 540000,
2529 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [0]: 5.4 */
2530 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2531 { 270000,
2532 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [1]: 2.7 */
2533 .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2534 { 162000,
2535 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [2]: 1.62 */
2536 .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2537 { 324000,
2538 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [3]: 3.24 */
2539 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2540 { 216000,
2541 { .dco_integer = 0x168, .dco_fraction = 0x0000, /* [4]: 2.16 */
2542 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2543 { 432000,
2544 { .dco_integer = 0x168, .dco_fraction = 0x0000, /* [5]: 4.32 */
2545 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2546 { 648000,
2547 { .dco_integer = 0x195, .dco_fraction = 0x0000, /* [6]: 6.48 */
2548 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2549 { 810000,
2550 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [7]: 8.1 */
2551 .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2552};
2553
2554
2555/* Also used for 38.4 MHz values. */
2556static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
2557 { 540000,
2558 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [0]: 5.4 */
2559 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2560 { 270000,
2561 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [1]: 2.7 */
2562 .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2563 { 162000,
2564 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [2]: 1.62 */
2565 .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2566 { 324000,
2567 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [3]: 3.24 */
2568 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2569 { 216000,
2570 { .dco_integer = 0x1C2, .dco_fraction = 0x0000, /* [4]: 2.16 */
2571 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2572 { 432000,
2573 { .dco_integer = 0x1C2, .dco_fraction = 0x0000, /* [5]: 4.32 */
2574 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2575 { 648000,
2576 { .dco_integer = 0x1FA, .dco_fraction = 0x2000, /* [6]: 6.48 */
2577 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2578 { 810000,
2579 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [7]: 8.1 */
2580 .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2581};
2582
2583static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
2584 .dco_integer = 0x151, .dco_fraction = 0x4000,
2585 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2586};
2587
2588static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
2589 .dco_integer = 0x1A5, .dco_fraction = 0x7000,
2590 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2591};
2592
2593static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = {
2594 .dco_integer = 0x54, .dco_fraction = 0x3000,
2595 /* the following params are unused */
2596 .pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
2597};
2598
2599static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
2600 .dco_integer = 0x43, .dco_fraction = 0x4000,
2601 /* the following params are unused */
2602};
2603
2604static int icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
2605 struct skl_wrpll_params *pll_params)
2606{
2607 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2608 const struct icl_combo_pll_params *params =
2609 i915->display.dpll.ref_clks.nssc == 24000 ?
2610 icl_dp_combo_pll_24MHz_values :
2611 icl_dp_combo_pll_19_2MHz_values;
2612 int clock = crtc_state->port_clock;
2613 int i;
2614
2615 for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
2616 if (clock == params[i].clock) {
2617 *pll_params = params[i].wrpll;
2618 return 0;
2619 }
2620 }
2621
2622 MISSING_CASE(clock);
2623 return -EINVAL;
2624}
2625
2626static int icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
2627 struct skl_wrpll_params *pll_params)
2628{
2629 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2630
2631 if (DISPLAY_VER(i915) >= 12) {
2632 switch (i915->display.dpll.ref_clks.nssc) {
2633 default:
2634 MISSING_CASE(i915->display.dpll.ref_clks.nssc);
2635 fallthrough;
2636 case 19200:
2637 case 38400:
2638 *pll_params = tgl_tbt_pll_19_2MHz_values;
2639 break;
2640 case 24000:
2641 *pll_params = tgl_tbt_pll_24MHz_values;
2642 break;
2643 }
2644 } else {
2645 switch (i915->display.dpll.ref_clks.nssc) {
2646 default:
2647 MISSING_CASE(i915->display.dpll.ref_clks.nssc);
2648 fallthrough;
2649 case 19200:
2650 case 38400:
2651 *pll_params = icl_tbt_pll_19_2MHz_values;
2652 break;
2653 case 24000:
2654 *pll_params = icl_tbt_pll_24MHz_values;
2655 break;
2656 }
2657 }
2658
2659 return 0;
2660}
2661
2662static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915,
2663 const struct intel_shared_dpll *pll,
2664 const struct intel_dpll_hw_state *pll_state)
2665{
2666 /*
2667 * The PLL outputs multiple frequencies at the same time, selection is
2668 * made at DDI clock mux level.
2669 */
2670 drm_WARN_ON(&i915->drm, 1);
2671
2672 return 0;
2673}
2674
2675static int icl_wrpll_ref_clock(struct drm_i915_private *i915)
2676{
2677 int ref_clock = i915->display.dpll.ref_clks.nssc;
2678
2679 /*
2680 * For ICL+, the spec states: if reference frequency is 38.4,
2681 * use 19.2 because the DPLL automatically divides that by 2.
2682 */
2683 if (ref_clock == 38400)
2684 ref_clock = 19200;
2685
2686 return ref_clock;
2687}
2688
2689static int
2690icl_calc_wrpll(struct intel_crtc_state *crtc_state,
2691 struct skl_wrpll_params *wrpll_params)
2692{
2693 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2694 int ref_clock = icl_wrpll_ref_clock(i915);
2695 u32 afe_clock = crtc_state->port_clock * 5;
2696 u32 dco_min = 7998000;
2697 u32 dco_max = 10000000;
2698 u32 dco_mid = (dco_min + dco_max) / 2;
2699 static const int dividers[] = { 2, 4, 6, 8, 10, 12, 14, 16,
2700 18, 20, 24, 28, 30, 32, 36, 40,
2701 42, 44, 48, 50, 52, 54, 56, 60,
2702 64, 66, 68, 70, 72, 76, 78, 80,
2703 84, 88, 90, 92, 96, 98, 100, 102,
2704 3, 5, 7, 9, 15, 21 };
2705 u32 dco, best_dco = 0, dco_centrality = 0;
2706 u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2707 int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2708
2709 for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2710 dco = afe_clock * dividers[d];
2711
2712 if (dco <= dco_max && dco >= dco_min) {
2713 dco_centrality = abs(dco - dco_mid);
2714
2715 if (dco_centrality < best_dco_centrality) {
2716 best_dco_centrality = dco_centrality;
2717 best_div = dividers[d];
2718 best_dco = dco;
2719 }
2720 }
2721 }
2722
2723 if (best_div == 0)
2724 return -EINVAL;
2725
2726 icl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2727 icl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
2728 pdiv, qdiv, kdiv);
2729
2730 return 0;
2731}
2732
2733static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
2734 const struct intel_shared_dpll *pll,
2735 const struct intel_dpll_hw_state *pll_state)
2736{
2737 int ref_clock = icl_wrpll_ref_clock(i915);
2738 u32 dco_fraction;
2739 u32 p0, p1, p2, dco_freq;
2740
2741 p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
2742 p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
2743
2744 if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
2745 p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
2746 DPLL_CFGCR1_QDIV_RATIO_SHIFT;
2747 else
2748 p1 = 1;
2749
2750 switch (p0) {
2751 case DPLL_CFGCR1_PDIV_2:
2752 p0 = 2;
2753 break;
2754 case DPLL_CFGCR1_PDIV_3:
2755 p0 = 3;
2756 break;
2757 case DPLL_CFGCR1_PDIV_5:
2758 p0 = 5;
2759 break;
2760 case DPLL_CFGCR1_PDIV_7:
2761 p0 = 7;
2762 break;
2763 }
2764
2765 switch (p2) {
2766 case DPLL_CFGCR1_KDIV_1:
2767 p2 = 1;
2768 break;
2769 case DPLL_CFGCR1_KDIV_2:
2770 p2 = 2;
2771 break;
2772 case DPLL_CFGCR1_KDIV_3:
2773 p2 = 3;
2774 break;
2775 }
2776
2777 dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
2778 ref_clock;
2779
2780 dco_fraction = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
2781 DPLL_CFGCR0_DCO_FRACTION_SHIFT;
2782
2783 if (ehl_combo_pll_div_frac_wa_needed(i915))
2784 dco_fraction *= 2;
2785
2786 dco_freq += (dco_fraction * ref_clock) / 0x8000;
2787
2788 if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
2789 return 0;
2790
2791 return dco_freq / (p0 * p1 * p2 * 5);
2792}
2793
2794static void icl_calc_dpll_state(struct drm_i915_private *i915,
2795 const struct skl_wrpll_params *pll_params,
2796 struct intel_dpll_hw_state *pll_state)
2797{
2798 u32 dco_fraction = pll_params->dco_fraction;
2799
2800 if (ehl_combo_pll_div_frac_wa_needed(i915))
2801 dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2);
2802
2803 pll_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) |
2804 pll_params->dco_integer;
2805
2806 pll_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) |
2807 DPLL_CFGCR1_QDIV_MODE(pll_params->qdiv_mode) |
2808 DPLL_CFGCR1_KDIV(pll_params->kdiv) |
2809 DPLL_CFGCR1_PDIV(pll_params->pdiv);
2810
2811 if (DISPLAY_VER(i915) >= 12)
2812 pll_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
2813 else
2814 pll_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
2815
2816 if (i915->display.vbt.override_afc_startup)
2817 pll_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(i915->display.vbt.override_afc_startup_val);
2818}
2819
2820static int icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
2821 u32 *target_dco_khz,
2822 struct intel_dpll_hw_state *state,
2823 bool is_dkl)
2824{
2825 static const u8 div1_vals[] = { 7, 5, 3, 2 };
2826 u32 dco_min_freq, dco_max_freq;
2827 unsigned int i;
2828 int div2;
2829
2830 dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
2831 dco_max_freq = is_dp ? 8100000 : 10000000;
2832
2833 for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
2834 int div1 = div1_vals[i];
2835
2836 for (div2 = 10; div2 > 0; div2--) {
2837 int dco = div1 * div2 * clock_khz * 5;
2838 int a_divratio, tlinedrv, inputsel;
2839 u32 hsdiv;
2840
2841 if (dco < dco_min_freq || dco > dco_max_freq)
2842 continue;
2843
2844 if (div2 >= 2) {
2845 /*
2846 * Note: a_divratio not matching TGL BSpec
2847 * algorithm but matching hardcoded values and
2848 * working on HW for DP alt-mode at least
2849 */
2850 a_divratio = is_dp ? 10 : 5;
2851 tlinedrv = is_dkl ? 1 : 2;
2852 } else {
2853 a_divratio = 5;
2854 tlinedrv = 0;
2855 }
2856 inputsel = is_dp ? 0 : 1;
2857
2858 switch (div1) {
2859 default:
2860 MISSING_CASE(div1);
2861 fallthrough;
2862 case 2:
2863 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
2864 break;
2865 case 3:
2866 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
2867 break;
2868 case 5:
2869 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
2870 break;
2871 case 7:
2872 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
2873 break;
2874 }
2875
2876 *target_dco_khz = dco;
2877
2878 state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
2879
2880 state->mg_clktop2_coreclkctl1 =
2881 MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
2882
2883 state->mg_clktop2_hsclkctl =
2884 MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
2885 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
2886 hsdiv |
2887 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
2888
2889 return 0;
2890 }
2891 }
2892
2893 return -EINVAL;
2894}
2895
2896/*
2897 * The specification for this function uses real numbers, so the math had to be
2898 * adapted to integer-only calculation, that's why it looks so different.
2899 */
2900static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
2901 struct intel_dpll_hw_state *pll_state)
2902{
2903 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2904 int refclk_khz = i915->display.dpll.ref_clks.nssc;
2905 int clock = crtc_state->port_clock;
2906 u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
2907 u32 iref_ndiv, iref_trim, iref_pulse_w;
2908 u32 prop_coeff, int_coeff;
2909 u32 tdc_targetcnt, feedfwgain;
2910 u64 ssc_stepsize, ssc_steplen, ssc_steplog;
2911 u64 tmp;
2912 bool use_ssc = false;
2913 bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
2914 bool is_dkl = DISPLAY_VER(i915) >= 12;
2915 int ret;
2916
2917 ret = icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
2918 pll_state, is_dkl);
2919 if (ret)
2920 return ret;
2921
2922 m1div = 2;
2923 m2div_int = dco_khz / (refclk_khz * m1div);
2924 if (m2div_int > 255) {
2925 if (!is_dkl) {
2926 m1div = 4;
2927 m2div_int = dco_khz / (refclk_khz * m1div);
2928 }
2929
2930 if (m2div_int > 255)
2931 return -EINVAL;
2932 }
2933 m2div_rem = dco_khz % (refclk_khz * m1div);
2934
2935 tmp = (u64)m2div_rem * (1 << 22);
2936 do_div(tmp, refclk_khz * m1div);
2937 m2div_frac = tmp;
2938
2939 switch (refclk_khz) {
2940 case 19200:
2941 iref_ndiv = 1;
2942 iref_trim = 28;
2943 iref_pulse_w = 1;
2944 break;
2945 case 24000:
2946 iref_ndiv = 1;
2947 iref_trim = 25;
2948 iref_pulse_w = 2;
2949 break;
2950 case 38400:
2951 iref_ndiv = 2;
2952 iref_trim = 28;
2953 iref_pulse_w = 1;
2954 break;
2955 default:
2956 MISSING_CASE(refclk_khz);
2957 return -EINVAL;
2958 }
2959
2960 /*
2961 * tdc_res = 0.000003
2962 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
2963 *
2964 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
2965 * was supposed to be a division, but we rearranged the operations of
2966 * the formula to avoid early divisions so we don't multiply the
2967 * rounding errors.
2968 *
2969 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
2970 * we also rearrange to work with integers.
2971 *
2972 * The 0.5 transformed to 5 results in a multiplication by 10 and the
2973 * last division by 10.
2974 */
2975 tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
2976
2977 /*
2978 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
2979 * 32 bits. That's not a problem since we round the division down
2980 * anyway.
2981 */
2982 feedfwgain = (use_ssc || m2div_rem > 0) ?
2983 m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
2984
2985 if (dco_khz >= 9000000) {
2986 prop_coeff = 5;
2987 int_coeff = 10;
2988 } else {
2989 prop_coeff = 4;
2990 int_coeff = 8;
2991 }
2992
2993 if (use_ssc) {
2994 tmp = mul_u32_u32(dco_khz, 47 * 32);
2995 do_div(tmp, refclk_khz * m1div * 10000);
2996 ssc_stepsize = tmp;
2997
2998 tmp = mul_u32_u32(dco_khz, 1000);
2999 ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
3000 } else {
3001 ssc_stepsize = 0;
3002 ssc_steplen = 0;
3003 }
3004 ssc_steplog = 4;
3005
3006 /* write pll_state calculations */
3007 if (is_dkl) {
3008 pll_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
3009 DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
3010 DKL_PLL_DIV0_FBPREDIV(m1div) |
3011 DKL_PLL_DIV0_FBDIV_INT(m2div_int);
3012 if (i915->display.vbt.override_afc_startup) {
3013 u8 val = i915->display.vbt.override_afc_startup_val;
3014
3015 pll_state->mg_pll_div0 |= DKL_PLL_DIV0_AFC_STARTUP(val);
3016 }
3017
3018 pll_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
3019 DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
3020
3021 pll_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
3022 DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
3023 DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
3024 (use_ssc ? DKL_PLL_SSC_EN : 0);
3025
3026 pll_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
3027 DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);
3028
3029 pll_state->mg_pll_tdc_coldst_bias =
3030 DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
3031 DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);
3032
3033 } else {
3034 pll_state->mg_pll_div0 =
3035 (m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
3036 MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
3037 MG_PLL_DIV0_FBDIV_INT(m2div_int);
3038
3039 pll_state->mg_pll_div1 =
3040 MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
3041 MG_PLL_DIV1_DITHER_DIV_2 |
3042 MG_PLL_DIV1_NDIVRATIO(1) |
3043 MG_PLL_DIV1_FBPREDIV(m1div);
3044
3045 pll_state->mg_pll_lf =
3046 MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
3047 MG_PLL_LF_AFCCNTSEL_512 |
3048 MG_PLL_LF_GAINCTRL(1) |
3049 MG_PLL_LF_INT_COEFF(int_coeff) |
3050 MG_PLL_LF_PROP_COEFF(prop_coeff);
3051
3052 pll_state->mg_pll_frac_lock =
3053 MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
3054 MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
3055 MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
3056 MG_PLL_FRAC_LOCK_DCODITHEREN |
3057 MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
3058 if (use_ssc || m2div_rem > 0)
3059 pll_state->mg_pll_frac_lock |=
3060 MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
3061
3062 pll_state->mg_pll_ssc =
3063 (use_ssc ? MG_PLL_SSC_EN : 0) |
3064 MG_PLL_SSC_TYPE(2) |
3065 MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
3066 MG_PLL_SSC_STEPNUM(ssc_steplog) |
3067 MG_PLL_SSC_FLLEN |
3068 MG_PLL_SSC_STEPSIZE(ssc_stepsize);
3069
3070 pll_state->mg_pll_tdc_coldst_bias =
3071 MG_PLL_TDC_COLDST_COLDSTART |
3072 MG_PLL_TDC_COLDST_IREFINT_EN |
3073 MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
3074 MG_PLL_TDC_TDCOVCCORR_EN |
3075 MG_PLL_TDC_TDCSEL(3);
3076
3077 pll_state->mg_pll_bias =
3078 MG_PLL_BIAS_BIAS_GB_SEL(3) |
3079 MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
3080 MG_PLL_BIAS_BIAS_BONUS(10) |
3081 MG_PLL_BIAS_BIASCAL_EN |
3082 MG_PLL_BIAS_CTRIM(12) |
3083 MG_PLL_BIAS_VREF_RDAC(4) |
3084 MG_PLL_BIAS_IREFTRIM(iref_trim);
3085
3086 if (refclk_khz == 38400) {
3087 pll_state->mg_pll_tdc_coldst_bias_mask =
3088 MG_PLL_TDC_COLDST_COLDSTART;
3089 pll_state->mg_pll_bias_mask = 0;
3090 } else {
3091 pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
3092 pll_state->mg_pll_bias_mask = -1U;
3093 }
3094
3095 pll_state->mg_pll_tdc_coldst_bias &=
3096 pll_state->mg_pll_tdc_coldst_bias_mask;
3097 pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
3098 }
3099
3100 return 0;
3101}
3102
3103static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *i915,
3104 const struct intel_shared_dpll *pll,
3105 const struct intel_dpll_hw_state *pll_state)
3106{
3107 u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
3108 u64 tmp;
3109
3110 ref_clock = i915->display.dpll.ref_clks.nssc;
3111
3112 if (DISPLAY_VER(i915) >= 12) {
3113 m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
3114 m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
3115 m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
3116
3117 if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
3118 m2_frac = pll_state->mg_pll_bias &
3119 DKL_PLL_BIAS_FBDIV_FRAC_MASK;
3120 m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
3121 } else {
3122 m2_frac = 0;
3123 }
3124 } else {
3125 m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
3126 m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
3127
3128 if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
3129 m2_frac = pll_state->mg_pll_div0 &
3130 MG_PLL_DIV0_FBDIV_FRAC_MASK;
3131 m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
3132 } else {
3133 m2_frac = 0;
3134 }
3135 }
3136
3137 switch (pll_state->mg_clktop2_hsclkctl &
3138 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
3139 case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
3140 div1 = 2;
3141 break;
3142 case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
3143 div1 = 3;
3144 break;
3145 case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
3146 div1 = 5;
3147 break;
3148 case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
3149 div1 = 7;
3150 break;
3151 default:
3152 MISSING_CASE(pll_state->mg_clktop2_hsclkctl);
3153 return 0;
3154 }
3155
3156 div2 = (pll_state->mg_clktop2_hsclkctl &
3157 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
3158 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
3159
3160 /* div2 value of 0 is same as 1 means no div */
3161 if (div2 == 0)
3162 div2 = 1;
3163
3164 /*
3165 * Adjust the original formula to delay the division by 2^22 in order to
3166 * minimize possible rounding errors.
3167 */
3168 tmp = (u64)m1 * m2_int * ref_clock +
3169 (((u64)m1 * m2_frac * ref_clock) >> 22);
3170 tmp = div_u64(tmp, 5 * div1 * div2);
3171
3172 return tmp;
3173}
3174
3175/**
3176 * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
3177 * @crtc_state: state for the CRTC to select the DPLL for
3178 * @port_dpll_id: the active @port_dpll_id to select
3179 *
3180 * Select the given @port_dpll_id instance from the DPLLs reserved for the
3181 * CRTC.
3182 */
3183void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
3184 enum icl_port_dpll_id port_dpll_id)
3185{
3186 struct icl_port_dpll *port_dpll =
3187 &crtc_state->icl_port_dplls[port_dpll_id];
3188
3189 crtc_state->shared_dpll = port_dpll->pll;
3190 crtc_state->dpll_hw_state = port_dpll->hw_state;
3191}
3192
3193static void icl_update_active_dpll(struct intel_atomic_state *state,
3194 struct intel_crtc *crtc,
3195 struct intel_encoder *encoder)
3196{
3197 struct intel_crtc_state *crtc_state =
3198 intel_atomic_get_new_crtc_state(state, crtc);
3199 struct intel_digital_port *primary_port;
3200 enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
3201
3202 primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
3203 enc_to_mst(encoder)->primary :
3204 enc_to_dig_port(encoder);
3205
3206 if (primary_port &&
3207 (intel_tc_port_in_dp_alt_mode(primary_port) ||
3208 intel_tc_port_in_legacy_mode(primary_port)))
3209 port_dpll_id = ICL_PORT_DPLL_MG_PHY;
3210
3211 icl_set_active_port_dpll(crtc_state, port_dpll_id);
3212}
3213
3214static int icl_compute_combo_phy_dpll(struct intel_atomic_state *state,
3215 struct intel_crtc *crtc)
3216{
3217 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
3218 struct intel_crtc_state *crtc_state =
3219 intel_atomic_get_new_crtc_state(state, crtc);
3220 struct icl_port_dpll *port_dpll =
3221 &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3222 struct skl_wrpll_params pll_params = {};
3223 int ret;
3224
3225 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
3226 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
3227 ret = icl_calc_wrpll(crtc_state, &pll_params);
3228 else
3229 ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
3230
3231 if (ret)
3232 return ret;
3233
3234 icl_calc_dpll_state(i915, &pll_params, &port_dpll->hw_state);
3235
3236 /* this is mainly for the fastset check */
3237 icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
3238
3239 crtc_state->port_clock = icl_ddi_combo_pll_get_freq(i915, NULL,
3240 &port_dpll->hw_state);
3241
3242 return 0;
3243}
3244
3245static int icl_get_combo_phy_dpll(struct intel_atomic_state *state,
3246 struct intel_crtc *crtc,
3247 struct intel_encoder *encoder)
3248{
3249 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
3250 struct intel_crtc_state *crtc_state =
3251 intel_atomic_get_new_crtc_state(state, crtc);
3252 struct icl_port_dpll *port_dpll =
3253 &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3254 enum port port = encoder->port;
3255 unsigned long dpll_mask;
3256
3257 if (IS_ALDERLAKE_S(i915)) {
3258 dpll_mask =
3259 BIT(DPLL_ID_DG1_DPLL3) |
3260 BIT(DPLL_ID_DG1_DPLL2) |
3261 BIT(DPLL_ID_ICL_DPLL1) |
3262 BIT(DPLL_ID_ICL_DPLL0);
3263 } else if (IS_DG1(i915)) {
3264 if (port == PORT_D || port == PORT_E) {
3265 dpll_mask =
3266 BIT(DPLL_ID_DG1_DPLL2) |
3267 BIT(DPLL_ID_DG1_DPLL3);
3268 } else {
3269 dpll_mask =
3270 BIT(DPLL_ID_DG1_DPLL0) |
3271 BIT(DPLL_ID_DG1_DPLL1);
3272 }
3273 } else if (IS_ROCKETLAKE(i915)) {
3274 dpll_mask =
3275 BIT(DPLL_ID_EHL_DPLL4) |
3276 BIT(DPLL_ID_ICL_DPLL1) |
3277 BIT(DPLL_ID_ICL_DPLL0);
3278 } else if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
3279 port != PORT_A) {
3280 dpll_mask =
3281 BIT(DPLL_ID_EHL_DPLL4) |
3282 BIT(DPLL_ID_ICL_DPLL1) |
3283 BIT(DPLL_ID_ICL_DPLL0);
3284 } else {
3285 dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0);
3286 }
3287
3288 /* Eliminate DPLLs from consideration if reserved by HTI */
3289 dpll_mask &= ~intel_hti_dpll_mask(i915);
3290
3291 port_dpll->pll = intel_find_shared_dpll(state, crtc,
3292 &port_dpll->hw_state,
3293 dpll_mask);
3294 if (!port_dpll->pll)
3295 return -EINVAL;
3296
3297 intel_reference_shared_dpll(state, crtc,
3298 port_dpll->pll, &port_dpll->hw_state);
3299
3300 icl_update_active_dpll(state, crtc, encoder);
3301
3302 return 0;
3303}
3304
3305static int icl_compute_tc_phy_dplls(struct intel_atomic_state *state,
3306 struct intel_crtc *crtc)
3307{
3308 struct drm_i915_private *i915 = to_i915(state->base.dev);
3309 struct intel_crtc_state *crtc_state =
3310 intel_atomic_get_new_crtc_state(state, crtc);
3311 struct icl_port_dpll *port_dpll =
3312 &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3313 struct skl_wrpll_params pll_params = {};
3314 int ret;
3315
3316 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3317 ret = icl_calc_tbt_pll(crtc_state, &pll_params);
3318 if (ret)
3319 return ret;
3320
3321 icl_calc_dpll_state(i915, &pll_params, &port_dpll->hw_state);
3322
3323 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3324 ret = icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state);
3325 if (ret)
3326 return ret;
3327
3328 /* this is mainly for the fastset check */
3329 icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_MG_PHY);
3330
3331 crtc_state->port_clock = icl_ddi_mg_pll_get_freq(i915, NULL,
3332 &port_dpll->hw_state);
3333
3334 return 0;
3335}
3336
3337static int icl_get_tc_phy_dplls(struct intel_atomic_state *state,
3338 struct intel_crtc *crtc,
3339 struct intel_encoder *encoder)
3340{
3341 struct drm_i915_private *i915 = to_i915(state->base.dev);
3342 struct intel_crtc_state *crtc_state =
3343 intel_atomic_get_new_crtc_state(state, crtc);
3344 struct icl_port_dpll *port_dpll =
3345 &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3346 enum intel_dpll_id dpll_id;
3347 int ret;
3348
3349 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3350 port_dpll->pll = intel_find_shared_dpll(state, crtc,
3351 &port_dpll->hw_state,
3352 BIT(DPLL_ID_ICL_TBTPLL));
3353 if (!port_dpll->pll)
3354 return -EINVAL;
3355 intel_reference_shared_dpll(state, crtc,
3356 port_dpll->pll, &port_dpll->hw_state);
3357
3358
3359 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3360 dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(i915,
3361 encoder->port));
3362 port_dpll->pll = intel_find_shared_dpll(state, crtc,
3363 &port_dpll->hw_state,
3364 BIT(dpll_id));
3365 if (!port_dpll->pll) {
3366 ret = -EINVAL;
3367 goto err_unreference_tbt_pll;
3368 }
3369 intel_reference_shared_dpll(state, crtc,
3370 port_dpll->pll, &port_dpll->hw_state);
3371
3372 icl_update_active_dpll(state, crtc, encoder);
3373
3374 return 0;
3375
3376err_unreference_tbt_pll:
3377 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3378 intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
3379
3380 return ret;
3381}
3382
3383static int icl_compute_dplls(struct intel_atomic_state *state,
3384 struct intel_crtc *crtc,
3385 struct intel_encoder *encoder)
3386{
3387 struct drm_i915_private *i915 = to_i915(state->base.dev);
3388 enum phy phy = intel_port_to_phy(i915, encoder->port);
3389
3390 if (intel_phy_is_combo(i915, phy))
3391 return icl_compute_combo_phy_dpll(state, crtc);
3392 else if (intel_phy_is_tc(i915, phy))
3393 return icl_compute_tc_phy_dplls(state, crtc);
3394
3395 MISSING_CASE(phy);
3396
3397 return 0;
3398}
3399
3400static int icl_get_dplls(struct intel_atomic_state *state,
3401 struct intel_crtc *crtc,
3402 struct intel_encoder *encoder)
3403{
3404 struct drm_i915_private *i915 = to_i915(state->base.dev);
3405 enum phy phy = intel_port_to_phy(i915, encoder->port);
3406
3407 if (intel_phy_is_combo(i915, phy))
3408 return icl_get_combo_phy_dpll(state, crtc, encoder);
3409 else if (intel_phy_is_tc(i915, phy))
3410 return icl_get_tc_phy_dplls(state, crtc, encoder);
3411
3412 MISSING_CASE(phy);
3413
3414 return -EINVAL;
3415}
3416
3417static void icl_put_dplls(struct intel_atomic_state *state,
3418 struct intel_crtc *crtc)
3419{
3420 const struct intel_crtc_state *old_crtc_state =
3421 intel_atomic_get_old_crtc_state(state, crtc);
3422 struct intel_crtc_state *new_crtc_state =
3423 intel_atomic_get_new_crtc_state(state, crtc);
3424 enum icl_port_dpll_id id;
3425
3426 new_crtc_state->shared_dpll = NULL;
3427
3428 for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
3429 const struct icl_port_dpll *old_port_dpll =
3430 &old_crtc_state->icl_port_dplls[id];
3431 struct icl_port_dpll *new_port_dpll =
3432 &new_crtc_state->icl_port_dplls[id];
3433
3434 new_port_dpll->pll = NULL;
3435
3436 if (!old_port_dpll->pll)
3437 continue;
3438
3439 intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
3440 }
3441}
3442
3443static bool mg_pll_get_hw_state(struct drm_i915_private *i915,
3444 struct intel_shared_dpll *pll,
3445 struct intel_dpll_hw_state *hw_state)
3446{
3447 const enum intel_dpll_id id = pll->info->id;
3448 enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3449 intel_wakeref_t wakeref;
3450 bool ret = false;
3451 u32 val;
3452
3453 i915_reg_t enable_reg = intel_tc_pll_enable_reg(i915, pll);
3454
3455 wakeref = intel_display_power_get_if_enabled(i915,
3456 POWER_DOMAIN_DISPLAY_CORE);
3457 if (!wakeref)
3458 return false;
3459
3460 val = intel_de_read(i915, enable_reg);
3461 if (!(val & PLL_ENABLE))
3462 goto out;
3463
3464 hw_state->mg_refclkin_ctl = intel_de_read(i915,
3465 MG_REFCLKIN_CTL(tc_port));
3466 hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3467
3468 hw_state->mg_clktop2_coreclkctl1 =
3469 intel_de_read(i915, MG_CLKTOP2_CORECLKCTL1(tc_port));
3470 hw_state->mg_clktop2_coreclkctl1 &=
3471 MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3472
3473 hw_state->mg_clktop2_hsclkctl =
3474 intel_de_read(i915, MG_CLKTOP2_HSCLKCTL(tc_port));
3475 hw_state->mg_clktop2_hsclkctl &=
3476 MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3477 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3478 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3479 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3480
3481 hw_state->mg_pll_div0 = intel_de_read(i915, MG_PLL_DIV0(tc_port));
3482 hw_state->mg_pll_div1 = intel_de_read(i915, MG_PLL_DIV1(tc_port));
3483 hw_state->mg_pll_lf = intel_de_read(i915, MG_PLL_LF(tc_port));
3484 hw_state->mg_pll_frac_lock = intel_de_read(i915,
3485 MG_PLL_FRAC_LOCK(tc_port));
3486 hw_state->mg_pll_ssc = intel_de_read(i915, MG_PLL_SSC(tc_port));
3487
3488 hw_state->mg_pll_bias = intel_de_read(i915, MG_PLL_BIAS(tc_port));
3489 hw_state->mg_pll_tdc_coldst_bias =
3490 intel_de_read(i915, MG_PLL_TDC_COLDST_BIAS(tc_port));
3491
3492 if (i915->display.dpll.ref_clks.nssc == 38400) {
3493 hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
3494 hw_state->mg_pll_bias_mask = 0;
3495 } else {
3496 hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3497 hw_state->mg_pll_bias_mask = -1U;
3498 }
3499
3500 hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
3501 hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3502
3503 ret = true;
3504out:
3505 intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3506 return ret;
3507}
3508
3509static bool dkl_pll_get_hw_state(struct drm_i915_private *i915,
3510 struct intel_shared_dpll *pll,
3511 struct intel_dpll_hw_state *hw_state)
3512{
3513 const enum intel_dpll_id id = pll->info->id;
3514 enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3515 intel_wakeref_t wakeref;
3516 bool ret = false;
3517 u32 val;
3518
3519 wakeref = intel_display_power_get_if_enabled(i915,
3520 POWER_DOMAIN_DISPLAY_CORE);
3521 if (!wakeref)
3522 return false;
3523
3524 val = intel_de_read(i915, intel_tc_pll_enable_reg(i915, pll));
3525 if (!(val & PLL_ENABLE))
3526 goto out;
3527
3528 /*
3529 * All registers read here have the same HIP_INDEX_REG even though
3530 * they are on different building blocks
3531 */
3532 hw_state->mg_refclkin_ctl = intel_dkl_phy_read(i915,
3533 DKL_REFCLKIN_CTL(tc_port));
3534 hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3535
3536 hw_state->mg_clktop2_hsclkctl =
3537 intel_dkl_phy_read(i915, DKL_CLKTOP2_HSCLKCTL(tc_port));
3538 hw_state->mg_clktop2_hsclkctl &=
3539 MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3540 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3541 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3542 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3543
3544 hw_state->mg_clktop2_coreclkctl1 =
3545 intel_dkl_phy_read(i915, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3546 hw_state->mg_clktop2_coreclkctl1 &=
3547 MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3548
3549 hw_state->mg_pll_div0 = intel_dkl_phy_read(i915, DKL_PLL_DIV0(tc_port));
3550 val = DKL_PLL_DIV0_MASK;
3551 if (i915->display.vbt.override_afc_startup)
3552 val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3553 hw_state->mg_pll_div0 &= val;
3554
3555 hw_state->mg_pll_div1 = intel_dkl_phy_read(i915, DKL_PLL_DIV1(tc_port));
3556 hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
3557 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3558
3559 hw_state->mg_pll_ssc = intel_dkl_phy_read(i915, DKL_PLL_SSC(tc_port));
3560 hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3561 DKL_PLL_SSC_STEP_LEN_MASK |
3562 DKL_PLL_SSC_STEP_NUM_MASK |
3563 DKL_PLL_SSC_EN);
3564
3565 hw_state->mg_pll_bias = intel_dkl_phy_read(i915, DKL_PLL_BIAS(tc_port));
3566 hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
3567 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3568
3569 hw_state->mg_pll_tdc_coldst_bias =
3570 intel_dkl_phy_read(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3571 hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3572 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3573
3574 ret = true;
3575out:
3576 intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3577 return ret;
3578}
3579
3580static bool icl_pll_get_hw_state(struct drm_i915_private *i915,
3581 struct intel_shared_dpll *pll,
3582 struct intel_dpll_hw_state *hw_state,
3583 i915_reg_t enable_reg)
3584{
3585 const enum intel_dpll_id id = pll->info->id;
3586 intel_wakeref_t wakeref;
3587 bool ret = false;
3588 u32 val;
3589
3590 wakeref = intel_display_power_get_if_enabled(i915,
3591 POWER_DOMAIN_DISPLAY_CORE);
3592 if (!wakeref)
3593 return false;
3594
3595 val = intel_de_read(i915, enable_reg);
3596 if (!(val & PLL_ENABLE))
3597 goto out;
3598
3599 if (IS_ALDERLAKE_S(i915)) {
3600 hw_state->cfgcr0 = intel_de_read(i915, ADLS_DPLL_CFGCR0(id));
3601 hw_state->cfgcr1 = intel_de_read(i915, ADLS_DPLL_CFGCR1(id));
3602 } else if (IS_DG1(i915)) {
3603 hw_state->cfgcr0 = intel_de_read(i915, DG1_DPLL_CFGCR0(id));
3604 hw_state->cfgcr1 = intel_de_read(i915, DG1_DPLL_CFGCR1(id));
3605 } else if (IS_ROCKETLAKE(i915)) {
3606 hw_state->cfgcr0 = intel_de_read(i915,
3607 RKL_DPLL_CFGCR0(id));
3608 hw_state->cfgcr1 = intel_de_read(i915,
3609 RKL_DPLL_CFGCR1(id));
3610 } else if (DISPLAY_VER(i915) >= 12) {
3611 hw_state->cfgcr0 = intel_de_read(i915,
3612 TGL_DPLL_CFGCR0(id));
3613 hw_state->cfgcr1 = intel_de_read(i915,
3614 TGL_DPLL_CFGCR1(id));
3615 if (i915->display.vbt.override_afc_startup) {
3616 hw_state->div0 = intel_de_read(i915, TGL_DPLL0_DIV0(id));
3617 hw_state->div0 &= TGL_DPLL0_DIV0_AFC_STARTUP_MASK;
3618 }
3619 } else {
3620 if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
3621 id == DPLL_ID_EHL_DPLL4) {
3622 hw_state->cfgcr0 = intel_de_read(i915,
3623 ICL_DPLL_CFGCR0(4));
3624 hw_state->cfgcr1 = intel_de_read(i915,
3625 ICL_DPLL_CFGCR1(4));
3626 } else {
3627 hw_state->cfgcr0 = intel_de_read(i915,
3628 ICL_DPLL_CFGCR0(id));
3629 hw_state->cfgcr1 = intel_de_read(i915,
3630 ICL_DPLL_CFGCR1(id));
3631 }
3632 }
3633
3634 ret = true;
3635out:
3636 intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3637 return ret;
3638}
3639
3640static bool combo_pll_get_hw_state(struct drm_i915_private *i915,
3641 struct intel_shared_dpll *pll,
3642 struct intel_dpll_hw_state *hw_state)
3643{
3644 i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll);
3645
3646 return icl_pll_get_hw_state(i915, pll, hw_state, enable_reg);
3647}
3648
3649static bool tbt_pll_get_hw_state(struct drm_i915_private *i915,
3650 struct intel_shared_dpll *pll,
3651 struct intel_dpll_hw_state *hw_state)
3652{
3653 return icl_pll_get_hw_state(i915, pll, hw_state, TBT_PLL_ENABLE);
3654}
3655
3656static void icl_dpll_write(struct drm_i915_private *i915,
3657 struct intel_shared_dpll *pll)
3658{
3659 struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3660 const enum intel_dpll_id id = pll->info->id;
3661 i915_reg_t cfgcr0_reg, cfgcr1_reg, div0_reg = INVALID_MMIO_REG;
3662
3663 if (IS_ALDERLAKE_S(i915)) {
3664 cfgcr0_reg = ADLS_DPLL_CFGCR0(id);
3665 cfgcr1_reg = ADLS_DPLL_CFGCR1(id);
3666 } else if (IS_DG1(i915)) {
3667 cfgcr0_reg = DG1_DPLL_CFGCR0(id);
3668 cfgcr1_reg = DG1_DPLL_CFGCR1(id);
3669 } else if (IS_ROCKETLAKE(i915)) {
3670 cfgcr0_reg = RKL_DPLL_CFGCR0(id);
3671 cfgcr1_reg = RKL_DPLL_CFGCR1(id);
3672 } else if (DISPLAY_VER(i915) >= 12) {
3673 cfgcr0_reg = TGL_DPLL_CFGCR0(id);
3674 cfgcr1_reg = TGL_DPLL_CFGCR1(id);
3675 div0_reg = TGL_DPLL0_DIV0(id);
3676 } else {
3677 if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
3678 id == DPLL_ID_EHL_DPLL4) {
3679 cfgcr0_reg = ICL_DPLL_CFGCR0(4);
3680 cfgcr1_reg = ICL_DPLL_CFGCR1(4);
3681 } else {
3682 cfgcr0_reg = ICL_DPLL_CFGCR0(id);
3683 cfgcr1_reg = ICL_DPLL_CFGCR1(id);
3684 }
3685 }
3686
3687 intel_de_write(i915, cfgcr0_reg, hw_state->cfgcr0);
3688 intel_de_write(i915, cfgcr1_reg, hw_state->cfgcr1);
3689 drm_WARN_ON_ONCE(&i915->drm, i915->display.vbt.override_afc_startup &&
3690 !i915_mmio_reg_valid(div0_reg));
3691 if (i915->display.vbt.override_afc_startup &&
3692 i915_mmio_reg_valid(div0_reg))
3693 intel_de_rmw(i915, div0_reg,
3694 TGL_DPLL0_DIV0_AFC_STARTUP_MASK, hw_state->div0);
3695 intel_de_posting_read(i915, cfgcr1_reg);
3696}
3697
3698static void icl_mg_pll_write(struct drm_i915_private *i915,
3699 struct intel_shared_dpll *pll)
3700{
3701 struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3702 enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3703
3704 /*
3705 * Some of the following registers have reserved fields, so program
3706 * these with RMW based on a mask. The mask can be fixed or generated
3707 * during the calc/readout phase if the mask depends on some other HW
3708 * state like refclk, see icl_calc_mg_pll_state().
3709 */
3710 intel_de_rmw(i915, MG_REFCLKIN_CTL(tc_port),
3711 MG_REFCLKIN_CTL_OD_2_MUX_MASK, hw_state->mg_refclkin_ctl);
3712
3713 intel_de_rmw(i915, MG_CLKTOP2_CORECLKCTL1(tc_port),
3714 MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK,
3715 hw_state->mg_clktop2_coreclkctl1);
3716
3717 intel_de_rmw(i915, MG_CLKTOP2_HSCLKCTL(tc_port),
3718 MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3719 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3720 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3721 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK,
3722 hw_state->mg_clktop2_hsclkctl);
3723
3724 intel_de_write(i915, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
3725 intel_de_write(i915, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
3726 intel_de_write(i915, MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
3727 intel_de_write(i915, MG_PLL_FRAC_LOCK(tc_port),
3728 hw_state->mg_pll_frac_lock);
3729 intel_de_write(i915, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
3730
3731 intel_de_rmw(i915, MG_PLL_BIAS(tc_port),
3732 hw_state->mg_pll_bias_mask, hw_state->mg_pll_bias);
3733
3734 intel_de_rmw(i915, MG_PLL_TDC_COLDST_BIAS(tc_port),
3735 hw_state->mg_pll_tdc_coldst_bias_mask,
3736 hw_state->mg_pll_tdc_coldst_bias);
3737
3738 intel_de_posting_read(i915, MG_PLL_TDC_COLDST_BIAS(tc_port));
3739}
3740
3741static void dkl_pll_write(struct drm_i915_private *i915,
3742 struct intel_shared_dpll *pll)
3743{
3744 struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3745 enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3746 u32 val;
3747
3748 /*
3749 * All registers programmed here have the same HIP_INDEX_REG even
3750 * though on different building block
3751 */
3752 /* All the registers are RMW */
3753 val = intel_dkl_phy_read(i915, DKL_REFCLKIN_CTL(tc_port));
3754 val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3755 val |= hw_state->mg_refclkin_ctl;
3756 intel_dkl_phy_write(i915, DKL_REFCLKIN_CTL(tc_port), val);
3757
3758 val = intel_dkl_phy_read(i915, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3759 val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3760 val |= hw_state->mg_clktop2_coreclkctl1;
3761 intel_dkl_phy_write(i915, DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
3762
3763 val = intel_dkl_phy_read(i915, DKL_CLKTOP2_HSCLKCTL(tc_port));
3764 val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3765 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3766 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3767 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3768 val |= hw_state->mg_clktop2_hsclkctl;
3769 intel_dkl_phy_write(i915, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
3770
3771 val = DKL_PLL_DIV0_MASK;
3772 if (i915->display.vbt.override_afc_startup)
3773 val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3774 intel_dkl_phy_rmw(i915, DKL_PLL_DIV0(tc_port), val,
3775 hw_state->mg_pll_div0);
3776
3777 val = intel_dkl_phy_read(i915, DKL_PLL_DIV1(tc_port));
3778 val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
3779 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3780 val |= hw_state->mg_pll_div1;
3781 intel_dkl_phy_write(i915, DKL_PLL_DIV1(tc_port), val);
3782
3783 val = intel_dkl_phy_read(i915, DKL_PLL_SSC(tc_port));
3784 val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3785 DKL_PLL_SSC_STEP_LEN_MASK |
3786 DKL_PLL_SSC_STEP_NUM_MASK |
3787 DKL_PLL_SSC_EN);
3788 val |= hw_state->mg_pll_ssc;
3789 intel_dkl_phy_write(i915, DKL_PLL_SSC(tc_port), val);
3790
3791 val = intel_dkl_phy_read(i915, DKL_PLL_BIAS(tc_port));
3792 val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
3793 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3794 val |= hw_state->mg_pll_bias;
3795 intel_dkl_phy_write(i915, DKL_PLL_BIAS(tc_port), val);
3796
3797 val = intel_dkl_phy_read(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3798 val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3799 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3800 val |= hw_state->mg_pll_tdc_coldst_bias;
3801 intel_dkl_phy_write(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
3802
3803 intel_dkl_phy_posting_read(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3804}
3805
3806static void icl_pll_power_enable(struct drm_i915_private *i915,
3807 struct intel_shared_dpll *pll,
3808 i915_reg_t enable_reg)
3809{
3810 intel_de_rmw(i915, enable_reg, 0, PLL_POWER_ENABLE);
3811
3812 /*
3813 * The spec says we need to "wait" but it also says it should be
3814 * immediate.
3815 */
3816 if (intel_de_wait_for_set(i915, enable_reg, PLL_POWER_STATE, 1))
3817 drm_err(&i915->drm, "PLL %d Power not enabled\n",
3818 pll->info->id);
3819}
3820
3821static void icl_pll_enable(struct drm_i915_private *i915,
3822 struct intel_shared_dpll *pll,
3823 i915_reg_t enable_reg)
3824{
3825 intel_de_rmw(i915, enable_reg, 0, PLL_ENABLE);
3826
3827 /* Timeout is actually 600us. */
3828 if (intel_de_wait_for_set(i915, enable_reg, PLL_LOCK, 1))
3829 drm_err(&i915->drm, "PLL %d not locked\n", pll->info->id);
3830}
3831
3832static void adlp_cmtg_clock_gating_wa(struct drm_i915_private *i915, struct intel_shared_dpll *pll)
3833{
3834 u32 val;
3835
3836 if (!(IS_ALDERLAKE_P(i915) && IS_DISPLAY_STEP(i915, STEP_A0, STEP_B0)) ||
3837 pll->info->id != DPLL_ID_ICL_DPLL0)
3838 return;
3839 /*
3840 * Wa_16011069516:adl-p[a0]
3841 *
3842 * All CMTG regs are unreliable until CMTG clock gating is disabled,
3843 * so we can only assume the default TRANS_CMTG_CHICKEN reg value and
3844 * sanity check this assumption with a double read, which presumably
3845 * returns the correct value even with clock gating on.
3846 *
3847 * Instead of the usual place for workarounds we apply this one here,
3848 * since TRANS_CMTG_CHICKEN is only accessible while DPLL0 is enabled.
3849 */
3850 val = intel_de_read(i915, TRANS_CMTG_CHICKEN);
3851 val = intel_de_rmw(i915, TRANS_CMTG_CHICKEN, ~0, DISABLE_DPT_CLK_GATING);
3852 if (drm_WARN_ON(&i915->drm, val & ~DISABLE_DPT_CLK_GATING))
3853 drm_dbg_kms(&i915->drm, "Unexpected flags in TRANS_CMTG_CHICKEN: %08x\n", val);
3854}
3855
3856static void combo_pll_enable(struct drm_i915_private *i915,
3857 struct intel_shared_dpll *pll)
3858{
3859 i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll);
3860
3861 icl_pll_power_enable(i915, pll, enable_reg);
3862
3863 icl_dpll_write(i915, pll);
3864
3865 /*
3866 * DVFS pre sequence would be here, but in our driver the cdclk code
3867 * paths should already be setting the appropriate voltage, hence we do
3868 * nothing here.
3869 */
3870
3871 icl_pll_enable(i915, pll, enable_reg);
3872
3873 adlp_cmtg_clock_gating_wa(i915, pll);
3874
3875 /* DVFS post sequence would be here. See the comment above. */
3876}
3877
3878static void tbt_pll_enable(struct drm_i915_private *i915,
3879 struct intel_shared_dpll *pll)
3880{
3881 icl_pll_power_enable(i915, pll, TBT_PLL_ENABLE);
3882
3883 icl_dpll_write(i915, pll);
3884
3885 /*
3886 * DVFS pre sequence would be here, but in our driver the cdclk code
3887 * paths should already be setting the appropriate voltage, hence we do
3888 * nothing here.
3889 */
3890
3891 icl_pll_enable(i915, pll, TBT_PLL_ENABLE);
3892
3893 /* DVFS post sequence would be here. See the comment above. */
3894}
3895
3896static void mg_pll_enable(struct drm_i915_private *i915,
3897 struct intel_shared_dpll *pll)
3898{
3899 i915_reg_t enable_reg = intel_tc_pll_enable_reg(i915, pll);
3900
3901 icl_pll_power_enable(i915, pll, enable_reg);
3902
3903 if (DISPLAY_VER(i915) >= 12)
3904 dkl_pll_write(i915, pll);
3905 else
3906 icl_mg_pll_write(i915, pll);
3907
3908 /*
3909 * DVFS pre sequence would be here, but in our driver the cdclk code
3910 * paths should already be setting the appropriate voltage, hence we do
3911 * nothing here.
3912 */
3913
3914 icl_pll_enable(i915, pll, enable_reg);
3915
3916 /* DVFS post sequence would be here. See the comment above. */
3917}
3918
3919static void icl_pll_disable(struct drm_i915_private *i915,
3920 struct intel_shared_dpll *pll,
3921 i915_reg_t enable_reg)
3922{
3923 /* The first steps are done by intel_ddi_post_disable(). */
3924
3925 /*
3926 * DVFS pre sequence would be here, but in our driver the cdclk code
3927 * paths should already be setting the appropriate voltage, hence we do
3928 * nothing here.
3929 */
3930
3931 intel_de_rmw(i915, enable_reg, PLL_ENABLE, 0);
3932
3933 /* Timeout is actually 1us. */
3934 if (intel_de_wait_for_clear(i915, enable_reg, PLL_LOCK, 1))
3935 drm_err(&i915->drm, "PLL %d locked\n", pll->info->id);
3936
3937 /* DVFS post sequence would be here. See the comment above. */
3938
3939 intel_de_rmw(i915, enable_reg, PLL_POWER_ENABLE, 0);
3940
3941 /*
3942 * The spec says we need to "wait" but it also says it should be
3943 * immediate.
3944 */
3945 if (intel_de_wait_for_clear(i915, enable_reg, PLL_POWER_STATE, 1))
3946 drm_err(&i915->drm, "PLL %d Power not disabled\n",
3947 pll->info->id);
3948}
3949
3950static void combo_pll_disable(struct drm_i915_private *i915,
3951 struct intel_shared_dpll *pll)
3952{
3953 i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll);
3954
3955 icl_pll_disable(i915, pll, enable_reg);
3956}
3957
3958static void tbt_pll_disable(struct drm_i915_private *i915,
3959 struct intel_shared_dpll *pll)
3960{
3961 icl_pll_disable(i915, pll, TBT_PLL_ENABLE);
3962}
3963
3964static void mg_pll_disable(struct drm_i915_private *i915,
3965 struct intel_shared_dpll *pll)
3966{
3967 i915_reg_t enable_reg = intel_tc_pll_enable_reg(i915, pll);
3968
3969 icl_pll_disable(i915, pll, enable_reg);
3970}
3971
3972static void icl_update_dpll_ref_clks(struct drm_i915_private *i915)
3973{
3974 /* No SSC ref */
3975 i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
3976}
3977
3978static void icl_dump_hw_state(struct drm_i915_private *i915,
3979 const struct intel_dpll_hw_state *hw_state)
3980{
3981 drm_dbg_kms(&i915->drm,
3982 "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, div0: 0x%x, "
3983 "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
3984 "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
3985 "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
3986 "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
3987 "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
3988 hw_state->cfgcr0, hw_state->cfgcr1,
3989 hw_state->div0,
3990 hw_state->mg_refclkin_ctl,
3991 hw_state->mg_clktop2_coreclkctl1,
3992 hw_state->mg_clktop2_hsclkctl,
3993 hw_state->mg_pll_div0,
3994 hw_state->mg_pll_div1,
3995 hw_state->mg_pll_lf,
3996 hw_state->mg_pll_frac_lock,
3997 hw_state->mg_pll_ssc,
3998 hw_state->mg_pll_bias,
3999 hw_state->mg_pll_tdc_coldst_bias);
4000}
4001
4002static const struct intel_shared_dpll_funcs combo_pll_funcs = {
4003 .enable = combo_pll_enable,
4004 .disable = combo_pll_disable,
4005 .get_hw_state = combo_pll_get_hw_state,
4006 .get_freq = icl_ddi_combo_pll_get_freq,
4007};
4008
4009static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
4010 .enable = tbt_pll_enable,
4011 .disable = tbt_pll_disable,
4012 .get_hw_state = tbt_pll_get_hw_state,
4013 .get_freq = icl_ddi_tbt_pll_get_freq,
4014};
4015
4016static const struct intel_shared_dpll_funcs mg_pll_funcs = {
4017 .enable = mg_pll_enable,
4018 .disable = mg_pll_disable,
4019 .get_hw_state = mg_pll_get_hw_state,
4020 .get_freq = icl_ddi_mg_pll_get_freq,
4021};
4022
4023static const struct dpll_info icl_plls[] = {
4024 { .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4025 { .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4026 { .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL, },
4027 { .name = "MG PLL 1", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
4028 { .name = "MG PLL 2", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
4029 { .name = "MG PLL 3", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
4030 { .name = "MG PLL 4", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
4031 {}
4032};
4033
4034static const struct intel_dpll_mgr icl_pll_mgr = {
4035 .dpll_info = icl_plls,
4036 .compute_dplls = icl_compute_dplls,
4037 .get_dplls = icl_get_dplls,
4038 .put_dplls = icl_put_dplls,
4039 .update_active_dpll = icl_update_active_dpll,
4040 .update_ref_clks = icl_update_dpll_ref_clks,
4041 .dump_hw_state = icl_dump_hw_state,
4042};
4043
4044static const struct dpll_info ehl_plls[] = {
4045 { .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4046 { .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4047 { .name = "DPLL 4", .funcs = &combo_pll_funcs, .id = DPLL_ID_EHL_DPLL4,
4048 .power_domain = POWER_DOMAIN_DC_OFF, },
4049 {}
4050};
4051
4052static const struct intel_dpll_mgr ehl_pll_mgr = {
4053 .dpll_info = ehl_plls,
4054 .compute_dplls = icl_compute_dplls,
4055 .get_dplls = icl_get_dplls,
4056 .put_dplls = icl_put_dplls,
4057 .update_ref_clks = icl_update_dpll_ref_clks,
4058 .dump_hw_state = icl_dump_hw_state,
4059};
4060
4061static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
4062 .enable = mg_pll_enable,
4063 .disable = mg_pll_disable,
4064 .get_hw_state = dkl_pll_get_hw_state,
4065 .get_freq = icl_ddi_mg_pll_get_freq,
4066};
4067
4068static const struct dpll_info tgl_plls[] = {
4069 { .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4070 { .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4071 { .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL, },
4072 { .name = "TC PLL 1", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
4073 { .name = "TC PLL 2", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
4074 { .name = "TC PLL 3", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
4075 { .name = "TC PLL 4", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
4076 { .name = "TC PLL 5", .funcs = &dkl_pll_funcs, .id = DPLL_ID_TGL_MGPLL5, },
4077 { .name = "TC PLL 6", .funcs = &dkl_pll_funcs, .id = DPLL_ID_TGL_MGPLL6, },
4078 {}
4079};
4080
4081static const struct intel_dpll_mgr tgl_pll_mgr = {
4082 .dpll_info = tgl_plls,
4083 .compute_dplls = icl_compute_dplls,
4084 .get_dplls = icl_get_dplls,
4085 .put_dplls = icl_put_dplls,
4086 .update_active_dpll = icl_update_active_dpll,
4087 .update_ref_clks = icl_update_dpll_ref_clks,
4088 .dump_hw_state = icl_dump_hw_state,
4089};
4090
4091static const struct dpll_info rkl_plls[] = {
4092 { .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4093 { .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4094 { .name = "DPLL 4", .funcs = &combo_pll_funcs, .id = DPLL_ID_EHL_DPLL4, },
4095 {}
4096};
4097
4098static const struct intel_dpll_mgr rkl_pll_mgr = {
4099 .dpll_info = rkl_plls,
4100 .compute_dplls = icl_compute_dplls,
4101 .get_dplls = icl_get_dplls,
4102 .put_dplls = icl_put_dplls,
4103 .update_ref_clks = icl_update_dpll_ref_clks,
4104 .dump_hw_state = icl_dump_hw_state,
4105};
4106
4107static const struct dpll_info dg1_plls[] = {
4108 { .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL0, },
4109 { .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL1, },
4110 { .name = "DPLL 2", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL2, },
4111 { .name = "DPLL 3", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL3, },
4112 {}
4113};
4114
4115static const struct intel_dpll_mgr dg1_pll_mgr = {
4116 .dpll_info = dg1_plls,
4117 .compute_dplls = icl_compute_dplls,
4118 .get_dplls = icl_get_dplls,
4119 .put_dplls = icl_put_dplls,
4120 .update_ref_clks = icl_update_dpll_ref_clks,
4121 .dump_hw_state = icl_dump_hw_state,
4122};
4123
4124static const struct dpll_info adls_plls[] = {
4125 { .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4126 { .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4127 { .name = "DPLL 2", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL2, },
4128 { .name = "DPLL 3", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL3, },
4129 {}
4130};
4131
4132static const struct intel_dpll_mgr adls_pll_mgr = {
4133 .dpll_info = adls_plls,
4134 .compute_dplls = icl_compute_dplls,
4135 .get_dplls = icl_get_dplls,
4136 .put_dplls = icl_put_dplls,
4137 .update_ref_clks = icl_update_dpll_ref_clks,
4138 .dump_hw_state = icl_dump_hw_state,
4139};
4140
4141static const struct dpll_info adlp_plls[] = {
4142 { .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4143 { .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4144 { .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL, },
4145 { .name = "TC PLL 1", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
4146 { .name = "TC PLL 2", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
4147 { .name = "TC PLL 3", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
4148 { .name = "TC PLL 4", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
4149 {}
4150};
4151
4152static const struct intel_dpll_mgr adlp_pll_mgr = {
4153 .dpll_info = adlp_plls,
4154 .compute_dplls = icl_compute_dplls,
4155 .get_dplls = icl_get_dplls,
4156 .put_dplls = icl_put_dplls,
4157 .update_active_dpll = icl_update_active_dpll,
4158 .update_ref_clks = icl_update_dpll_ref_clks,
4159 .dump_hw_state = icl_dump_hw_state,
4160};
4161
4162/**
4163 * intel_shared_dpll_init - Initialize shared DPLLs
4164 * @i915: i915 device
4165 *
4166 * Initialize shared DPLLs for @i915.
4167 */
4168void intel_shared_dpll_init(struct drm_i915_private *i915)
4169{
4170 const struct intel_dpll_mgr *dpll_mgr = NULL;
4171 const struct dpll_info *dpll_info;
4172 int i;
4173
4174 mutex_init(&i915->display.dpll.lock);
4175
4176 if (DISPLAY_VER(i915) >= 14 || IS_DG2(i915))
4177 /* No shared DPLLs on DG2; port PLLs are part of the PHY */
4178 dpll_mgr = NULL;
4179 else if (IS_ALDERLAKE_P(i915))
4180 dpll_mgr = &adlp_pll_mgr;
4181 else if (IS_ALDERLAKE_S(i915))
4182 dpll_mgr = &adls_pll_mgr;
4183 else if (IS_DG1(i915))
4184 dpll_mgr = &dg1_pll_mgr;
4185 else if (IS_ROCKETLAKE(i915))
4186 dpll_mgr = &rkl_pll_mgr;
4187 else if (DISPLAY_VER(i915) >= 12)
4188 dpll_mgr = &tgl_pll_mgr;
4189 else if (IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915))
4190 dpll_mgr = &ehl_pll_mgr;
4191 else if (DISPLAY_VER(i915) >= 11)
4192 dpll_mgr = &icl_pll_mgr;
4193 else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
4194 dpll_mgr = &bxt_pll_mgr;
4195 else if (DISPLAY_VER(i915) == 9)
4196 dpll_mgr = &skl_pll_mgr;
4197 else if (HAS_DDI(i915))
4198 dpll_mgr = &hsw_pll_mgr;
4199 else if (HAS_PCH_IBX(i915) || HAS_PCH_CPT(i915))
4200 dpll_mgr = &pch_pll_mgr;
4201
4202 if (!dpll_mgr)
4203 return;
4204
4205 dpll_info = dpll_mgr->dpll_info;
4206
4207 for (i = 0; dpll_info[i].name; i++) {
4208 if (drm_WARN_ON(&i915->drm,
4209 i >= ARRAY_SIZE(i915->display.dpll.shared_dplls)))
4210 break;
4211
4212 /* must fit into unsigned long bitmask on 32bit */
4213 if (drm_WARN_ON(&i915->drm, dpll_info[i].id >= 32))
4214 break;
4215
4216 i915->display.dpll.shared_dplls[i].info = &dpll_info[i];
4217 i915->display.dpll.shared_dplls[i].index = i;
4218 }
4219
4220 i915->display.dpll.mgr = dpll_mgr;
4221 i915->display.dpll.num_shared_dpll = i;
4222}
4223
4224/**
4225 * intel_compute_shared_dplls - compute DPLL state CRTC and encoder combination
4226 * @state: atomic state
4227 * @crtc: CRTC to compute DPLLs for
4228 * @encoder: encoder
4229 *
4230 * This function computes the DPLL state for the given CRTC and encoder.
4231 *
4232 * The new configuration in the atomic commit @state is made effective by
4233 * calling intel_shared_dpll_swap_state().
4234 *
4235 * Returns:
4236 * 0 on success, negative error code on falure.
4237 */
4238int intel_compute_shared_dplls(struct intel_atomic_state *state,
4239 struct intel_crtc *crtc,
4240 struct intel_encoder *encoder)
4241{
4242 struct drm_i915_private *i915 = to_i915(state->base.dev);
4243 const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr;
4244
4245 if (drm_WARN_ON(&i915->drm, !dpll_mgr))
4246 return -EINVAL;
4247
4248 return dpll_mgr->compute_dplls(state, crtc, encoder);
4249}
4250
4251/**
4252 * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
4253 * @state: atomic state
4254 * @crtc: CRTC to reserve DPLLs for
4255 * @encoder: encoder
4256 *
4257 * This function reserves all required DPLLs for the given CRTC and encoder
4258 * combination in the current atomic commit @state and the new @crtc atomic
4259 * state.
4260 *
4261 * The new configuration in the atomic commit @state is made effective by
4262 * calling intel_shared_dpll_swap_state().
4263 *
4264 * The reserved DPLLs should be released by calling
4265 * intel_release_shared_dplls().
4266 *
4267 * Returns:
4268 * 0 if all required DPLLs were successfully reserved,
4269 * negative error code otherwise.
4270 */
4271int intel_reserve_shared_dplls(struct intel_atomic_state *state,
4272 struct intel_crtc *crtc,
4273 struct intel_encoder *encoder)
4274{
4275 struct drm_i915_private *i915 = to_i915(state->base.dev);
4276 const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr;
4277
4278 if (drm_WARN_ON(&i915->drm, !dpll_mgr))
4279 return -EINVAL;
4280
4281 return dpll_mgr->get_dplls(state, crtc, encoder);
4282}
4283
4284/**
4285 * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
4286 * @state: atomic state
4287 * @crtc: crtc from which the DPLLs are to be released
4288 *
4289 * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
4290 * from the current atomic commit @state and the old @crtc atomic state.
4291 *
4292 * The new configuration in the atomic commit @state is made effective by
4293 * calling intel_shared_dpll_swap_state().
4294 */
4295void intel_release_shared_dplls(struct intel_atomic_state *state,
4296 struct intel_crtc *crtc)
4297{
4298 struct drm_i915_private *i915 = to_i915(state->base.dev);
4299 const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr;
4300
4301 /*
4302 * FIXME: this function is called for every platform having a
4303 * compute_clock hook, even though the platform doesn't yet support
4304 * the shared DPLL framework and intel_reserve_shared_dplls() is not
4305 * called on those.
4306 */
4307 if (!dpll_mgr)
4308 return;
4309
4310 dpll_mgr->put_dplls(state, crtc);
4311}
4312
4313/**
4314 * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
4315 * @state: atomic state
4316 * @crtc: the CRTC for which to update the active DPLL
4317 * @encoder: encoder determining the type of port DPLL
4318 *
4319 * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
4320 * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
4321 * DPLL selected will be based on the current mode of the encoder's port.
4322 */
4323void intel_update_active_dpll(struct intel_atomic_state *state,
4324 struct intel_crtc *crtc,
4325 struct intel_encoder *encoder)
4326{
4327 struct drm_i915_private *i915 = to_i915(encoder->base.dev);
4328 const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr;
4329
4330 if (drm_WARN_ON(&i915->drm, !dpll_mgr))
4331 return;
4332
4333 dpll_mgr->update_active_dpll(state, crtc, encoder);
4334}
4335
4336/**
4337 * intel_dpll_get_freq - calculate the DPLL's output frequency
4338 * @i915: i915 device
4339 * @pll: DPLL for which to calculate the output frequency
4340 * @pll_state: DPLL state from which to calculate the output frequency
4341 *
4342 * Return the output frequency corresponding to @pll's passed in @pll_state.
4343 */
4344int intel_dpll_get_freq(struct drm_i915_private *i915,
4345 const struct intel_shared_dpll *pll,
4346 const struct intel_dpll_hw_state *pll_state)
4347{
4348 if (drm_WARN_ON(&i915->drm, !pll->info->funcs->get_freq))
4349 return 0;
4350
4351 return pll->info->funcs->get_freq(i915, pll, pll_state);
4352}
4353
4354/**
4355 * intel_dpll_get_hw_state - readout the DPLL's hardware state
4356 * @i915: i915 device
4357 * @pll: DPLL for which to calculate the output frequency
4358 * @hw_state: DPLL's hardware state
4359 *
4360 * Read out @pll's hardware state into @hw_state.
4361 */
4362bool intel_dpll_get_hw_state(struct drm_i915_private *i915,
4363 struct intel_shared_dpll *pll,
4364 struct intel_dpll_hw_state *hw_state)
4365{
4366 return pll->info->funcs->get_hw_state(i915, pll, hw_state);
4367}
4368
4369static void readout_dpll_hw_state(struct drm_i915_private *i915,
4370 struct intel_shared_dpll *pll)
4371{
4372 struct intel_crtc *crtc;
4373
4374 pll->on = intel_dpll_get_hw_state(i915, pll, &pll->state.hw_state);
4375
4376 if (pll->on && pll->info->power_domain)
4377 pll->wakeref = intel_display_power_get(i915, pll->info->power_domain);
4378
4379 pll->state.pipe_mask = 0;
4380 for_each_intel_crtc(&i915->drm, crtc) {
4381 struct intel_crtc_state *crtc_state =
4382 to_intel_crtc_state(crtc->base.state);
4383
4384 if (crtc_state->hw.active && crtc_state->shared_dpll == pll)
4385 intel_reference_shared_dpll_crtc(crtc, pll, &pll->state);
4386 }
4387 pll->active_mask = pll->state.pipe_mask;
4388
4389 drm_dbg_kms(&i915->drm,
4390 "%s hw state readout: pipe_mask 0x%x, on %i\n",
4391 pll->info->name, pll->state.pipe_mask, pll->on);
4392}
4393
4394void intel_dpll_update_ref_clks(struct drm_i915_private *i915)
4395{
4396 if (i915->display.dpll.mgr && i915->display.dpll.mgr->update_ref_clks)
4397 i915->display.dpll.mgr->update_ref_clks(i915);
4398}
4399
4400void intel_dpll_readout_hw_state(struct drm_i915_private *i915)
4401{
4402 struct intel_shared_dpll *pll;
4403 int i;
4404
4405 for_each_shared_dpll(i915, pll, i)
4406 readout_dpll_hw_state(i915, pll);
4407}
4408
4409static void sanitize_dpll_state(struct drm_i915_private *i915,
4410 struct intel_shared_dpll *pll)
4411{
4412 if (!pll->on)
4413 return;
4414
4415 adlp_cmtg_clock_gating_wa(i915, pll);
4416
4417 if (pll->active_mask)
4418 return;
4419
4420 drm_dbg_kms(&i915->drm,
4421 "%s enabled but not in use, disabling\n",
4422 pll->info->name);
4423
4424 _intel_disable_shared_dpll(i915, pll);
4425}
4426
4427void intel_dpll_sanitize_state(struct drm_i915_private *i915)
4428{
4429 struct intel_shared_dpll *pll;
4430 int i;
4431
4432 for_each_shared_dpll(i915, pll, i)
4433 sanitize_dpll_state(i915, pll);
4434}
4435
4436/**
4437 * intel_dpll_dump_hw_state - write hw_state to dmesg
4438 * @i915: i915 drm device
4439 * @hw_state: hw state to be written to the log
4440 *
4441 * Write the relevant values in @hw_state to dmesg using drm_dbg_kms.
4442 */
4443void intel_dpll_dump_hw_state(struct drm_i915_private *i915,
4444 const struct intel_dpll_hw_state *hw_state)
4445{
4446 if (i915->display.dpll.mgr) {
4447 i915->display.dpll.mgr->dump_hw_state(i915, hw_state);
4448 } else {
4449 /* fallback for platforms that don't use the shared dpll
4450 * infrastructure
4451 */
4452 drm_dbg_kms(&i915->drm,
4453 "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
4454 "fp0: 0x%x, fp1: 0x%x\n",
4455 hw_state->dpll,
4456 hw_state->dpll_md,
4457 hw_state->fp0,
4458 hw_state->fp1);
4459 }
4460}
4461
4462static void
4463verify_single_dpll_state(struct drm_i915_private *i915,
4464 struct intel_shared_dpll *pll,
4465 struct intel_crtc *crtc,
4466 const struct intel_crtc_state *new_crtc_state)
4467{
4468 struct intel_dpll_hw_state dpll_hw_state;
4469 u8 pipe_mask;
4470 bool active;
4471
4472 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
4473
4474 drm_dbg_kms(&i915->drm, "%s\n", pll->info->name);
4475
4476 active = intel_dpll_get_hw_state(i915, pll, &dpll_hw_state);
4477
4478 if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
4479 I915_STATE_WARN(i915, !pll->on && pll->active_mask,
4480 "pll in active use but not on in sw tracking\n");
4481 I915_STATE_WARN(i915, pll->on && !pll->active_mask,
4482 "pll is on but not used by any active pipe\n");
4483 I915_STATE_WARN(i915, pll->on != active,
4484 "pll on state mismatch (expected %i, found %i)\n",
4485 pll->on, active);
4486 }
4487
4488 if (!crtc) {
4489 I915_STATE_WARN(i915,
4490 pll->active_mask & ~pll->state.pipe_mask,
4491 "more active pll users than references: 0x%x vs 0x%x\n",
4492 pll->active_mask, pll->state.pipe_mask);
4493
4494 return;
4495 }
4496
4497 pipe_mask = BIT(crtc->pipe);
4498
4499 if (new_crtc_state->hw.active)
4500 I915_STATE_WARN(i915, !(pll->active_mask & pipe_mask),
4501 "pll active mismatch (expected pipe %c in active mask 0x%x)\n",
4502 pipe_name(crtc->pipe), pll->active_mask);
4503 else
4504 I915_STATE_WARN(i915, pll->active_mask & pipe_mask,
4505 "pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
4506 pipe_name(crtc->pipe), pll->active_mask);
4507
4508 I915_STATE_WARN(i915, !(pll->state.pipe_mask & pipe_mask),
4509 "pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
4510 pipe_mask, pll->state.pipe_mask);
4511
4512 I915_STATE_WARN(i915,
4513 pll->on && memcmp(&pll->state.hw_state, &dpll_hw_state,
4514 sizeof(dpll_hw_state)),
4515 "pll hw state mismatch\n");
4516}
4517
4518void intel_shared_dpll_state_verify(struct intel_atomic_state *state,
4519 struct intel_crtc *crtc)
4520{
4521 struct drm_i915_private *i915 = to_i915(state->base.dev);
4522 const struct intel_crtc_state *old_crtc_state =
4523 intel_atomic_get_old_crtc_state(state, crtc);
4524 const struct intel_crtc_state *new_crtc_state =
4525 intel_atomic_get_new_crtc_state(state, crtc);
4526
4527 if (new_crtc_state->shared_dpll)
4528 verify_single_dpll_state(i915, new_crtc_state->shared_dpll,
4529 crtc, new_crtc_state);
4530
4531 if (old_crtc_state->shared_dpll &&
4532 old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
4533 u8 pipe_mask = BIT(crtc->pipe);
4534 struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
4535
4536 I915_STATE_WARN(i915, pll->active_mask & pipe_mask,
4537 "pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
4538 pipe_name(crtc->pipe), pll->active_mask);
4539 I915_STATE_WARN(i915, pll->state.pipe_mask & pipe_mask,
4540 "pll enabled crtcs mismatch (found pipe %c in enabled mask (0x%x))\n",
4541 pipe_name(crtc->pipe), pll->state.pipe_mask);
4542 }
4543}
4544
4545void intel_shared_dpll_verify_disabled(struct intel_atomic_state *state)
4546{
4547 struct drm_i915_private *i915 = to_i915(state->base.dev);
4548 struct intel_shared_dpll *pll;
4549 int i;
4550
4551 for_each_shared_dpll(i915, pll, i)
4552 verify_single_dpll_state(i915, pll, NULL, NULL);
4553}
1/*
2 * Copyright © 2006-2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24#include <linux/string_helpers.h>
25
26#include "i915_reg.h"
27#include "intel_de.h"
28#include "intel_display_types.h"
29#include "intel_dkl_phy.h"
30#include "intel_dkl_phy_regs.h"
31#include "intel_dpio_phy.h"
32#include "intel_dpll.h"
33#include "intel_dpll_mgr.h"
34#include "intel_hti.h"
35#include "intel_mg_phy_regs.h"
36#include "intel_pch_refclk.h"
37#include "intel_tc.h"
38
39/**
40 * DOC: Display PLLs
41 *
42 * Display PLLs used for driving outputs vary by platform. While some have
43 * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
44 * from a pool. In the latter scenario, it is possible that multiple pipes
45 * share a PLL if their configurations match.
46 *
47 * This file provides an abstraction over display PLLs. The function
48 * intel_shared_dpll_init() initializes the PLLs for the given platform. The
49 * users of a PLL are tracked and that tracking is integrated with the atomic
50 * modset interface. During an atomic operation, required PLLs can be reserved
51 * for a given CRTC and encoder configuration by calling
52 * intel_reserve_shared_dplls() and previously reserved PLLs can be released
53 * with intel_release_shared_dplls().
54 * Changes to the users are first staged in the atomic state, and then made
55 * effective by calling intel_shared_dpll_swap_state() during the atomic
56 * commit phase.
57 */
58
59/* platform specific hooks for managing DPLLs */
60struct intel_shared_dpll_funcs {
61 /*
62 * Hook for enabling the pll, called from intel_enable_shared_dpll() if
63 * the pll is not already enabled.
64 */
65 void (*enable)(struct drm_i915_private *i915,
66 struct intel_shared_dpll *pll);
67
68 /*
69 * Hook for disabling the pll, called from intel_disable_shared_dpll()
70 * only when it is safe to disable the pll, i.e., there are no more
71 * tracked users for it.
72 */
73 void (*disable)(struct drm_i915_private *i915,
74 struct intel_shared_dpll *pll);
75
76 /*
77 * Hook for reading the values currently programmed to the DPLL
78 * registers. This is used for initial hw state readout and state
79 * verification after a mode set.
80 */
81 bool (*get_hw_state)(struct drm_i915_private *i915,
82 struct intel_shared_dpll *pll,
83 struct intel_dpll_hw_state *hw_state);
84
85 /*
86 * Hook for calculating the pll's output frequency based on its passed
87 * in state.
88 */
89 int (*get_freq)(struct drm_i915_private *i915,
90 const struct intel_shared_dpll *pll,
91 const struct intel_dpll_hw_state *pll_state);
92};
93
94struct intel_dpll_mgr {
95 const struct dpll_info *dpll_info;
96
97 int (*compute_dplls)(struct intel_atomic_state *state,
98 struct intel_crtc *crtc,
99 struct intel_encoder *encoder);
100 int (*get_dplls)(struct intel_atomic_state *state,
101 struct intel_crtc *crtc,
102 struct intel_encoder *encoder);
103 void (*put_dplls)(struct intel_atomic_state *state,
104 struct intel_crtc *crtc);
105 void (*update_active_dpll)(struct intel_atomic_state *state,
106 struct intel_crtc *crtc,
107 struct intel_encoder *encoder);
108 void (*update_ref_clks)(struct drm_i915_private *i915);
109 void (*dump_hw_state)(struct drm_i915_private *dev_priv,
110 const struct intel_dpll_hw_state *hw_state);
111};
112
113static void
114intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
115 struct intel_shared_dpll_state *shared_dpll)
116{
117 enum intel_dpll_id i;
118
119 /* Copy shared dpll state */
120 for (i = 0; i < dev_priv->display.dpll.num_shared_dpll; i++) {
121 struct intel_shared_dpll *pll = &dev_priv->display.dpll.shared_dplls[i];
122
123 shared_dpll[i] = pll->state;
124 }
125}
126
127static struct intel_shared_dpll_state *
128intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
129{
130 struct intel_atomic_state *state = to_intel_atomic_state(s);
131
132 drm_WARN_ON(s->dev, !drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
133
134 if (!state->dpll_set) {
135 state->dpll_set = true;
136
137 intel_atomic_duplicate_dpll_state(to_i915(s->dev),
138 state->shared_dpll);
139 }
140
141 return state->shared_dpll;
142}
143
144/**
145 * intel_get_shared_dpll_by_id - get a DPLL given its id
146 * @dev_priv: i915 device instance
147 * @id: pll id
148 *
149 * Returns:
150 * A pointer to the DPLL with @id
151 */
152struct intel_shared_dpll *
153intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
154 enum intel_dpll_id id)
155{
156 return &dev_priv->display.dpll.shared_dplls[id];
157}
158
159/* For ILK+ */
160void assert_shared_dpll(struct drm_i915_private *dev_priv,
161 struct intel_shared_dpll *pll,
162 bool state)
163{
164 bool cur_state;
165 struct intel_dpll_hw_state hw_state;
166
167 if (drm_WARN(&dev_priv->drm, !pll,
168 "asserting DPLL %s with no DPLL\n", str_on_off(state)))
169 return;
170
171 cur_state = intel_dpll_get_hw_state(dev_priv, pll, &hw_state);
172 I915_STATE_WARN(cur_state != state,
173 "%s assertion failure (expected %s, current %s)\n",
174 pll->info->name, str_on_off(state),
175 str_on_off(cur_state));
176}
177
178static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
179{
180 return TC_PORT_1 + id - DPLL_ID_ICL_MGPLL1;
181}
182
183enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
184{
185 return tc_port - TC_PORT_1 + DPLL_ID_ICL_MGPLL1;
186}
187
188static i915_reg_t
189intel_combo_pll_enable_reg(struct drm_i915_private *i915,
190 struct intel_shared_dpll *pll)
191{
192 if (IS_DG1(i915))
193 return DG1_DPLL_ENABLE(pll->info->id);
194 else if (IS_JSL_EHL(i915) && (pll->info->id == DPLL_ID_EHL_DPLL4))
195 return MG_PLL_ENABLE(0);
196
197 return ICL_DPLL_ENABLE(pll->info->id);
198}
199
200static i915_reg_t
201intel_tc_pll_enable_reg(struct drm_i915_private *i915,
202 struct intel_shared_dpll *pll)
203{
204 const enum intel_dpll_id id = pll->info->id;
205 enum tc_port tc_port = icl_pll_id_to_tc_port(id);
206
207 if (IS_ALDERLAKE_P(i915))
208 return ADLP_PORTTC_PLL_ENABLE(tc_port);
209
210 return MG_PLL_ENABLE(tc_port);
211}
212
213/**
214 * intel_enable_shared_dpll - enable a CRTC's shared DPLL
215 * @crtc_state: CRTC, and its state, which has a shared DPLL
216 *
217 * Enable the shared DPLL used by @crtc.
218 */
219void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
220{
221 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
222 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
223 struct intel_shared_dpll *pll = crtc_state->shared_dpll;
224 unsigned int pipe_mask = BIT(crtc->pipe);
225 unsigned int old_mask;
226
227 if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
228 return;
229
230 mutex_lock(&dev_priv->display.dpll.lock);
231 old_mask = pll->active_mask;
232
233 if (drm_WARN_ON(&dev_priv->drm, !(pll->state.pipe_mask & pipe_mask)) ||
234 drm_WARN_ON(&dev_priv->drm, pll->active_mask & pipe_mask))
235 goto out;
236
237 pll->active_mask |= pipe_mask;
238
239 drm_dbg_kms(&dev_priv->drm,
240 "enable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
241 pll->info->name, pll->active_mask, pll->on,
242 crtc->base.base.id, crtc->base.name);
243
244 if (old_mask) {
245 drm_WARN_ON(&dev_priv->drm, !pll->on);
246 assert_shared_dpll_enabled(dev_priv, pll);
247 goto out;
248 }
249 drm_WARN_ON(&dev_priv->drm, pll->on);
250
251 drm_dbg_kms(&dev_priv->drm, "enabling %s\n", pll->info->name);
252 pll->info->funcs->enable(dev_priv, pll);
253 pll->on = true;
254
255out:
256 mutex_unlock(&dev_priv->display.dpll.lock);
257}
258
259/**
260 * intel_disable_shared_dpll - disable a CRTC's shared DPLL
261 * @crtc_state: CRTC, and its state, which has a shared DPLL
262 *
263 * Disable the shared DPLL used by @crtc.
264 */
265void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
266{
267 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
268 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
269 struct intel_shared_dpll *pll = crtc_state->shared_dpll;
270 unsigned int pipe_mask = BIT(crtc->pipe);
271
272 /* PCH only available on ILK+ */
273 if (DISPLAY_VER(dev_priv) < 5)
274 return;
275
276 if (pll == NULL)
277 return;
278
279 mutex_lock(&dev_priv->display.dpll.lock);
280 if (drm_WARN(&dev_priv->drm, !(pll->active_mask & pipe_mask),
281 "%s not used by [CRTC:%d:%s]\n", pll->info->name,
282 crtc->base.base.id, crtc->base.name))
283 goto out;
284
285 drm_dbg_kms(&dev_priv->drm,
286 "disable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
287 pll->info->name, pll->active_mask, pll->on,
288 crtc->base.base.id, crtc->base.name);
289
290 assert_shared_dpll_enabled(dev_priv, pll);
291 drm_WARN_ON(&dev_priv->drm, !pll->on);
292
293 pll->active_mask &= ~pipe_mask;
294 if (pll->active_mask)
295 goto out;
296
297 drm_dbg_kms(&dev_priv->drm, "disabling %s\n", pll->info->name);
298 pll->info->funcs->disable(dev_priv, pll);
299 pll->on = false;
300
301out:
302 mutex_unlock(&dev_priv->display.dpll.lock);
303}
304
305static struct intel_shared_dpll *
306intel_find_shared_dpll(struct intel_atomic_state *state,
307 const struct intel_crtc *crtc,
308 const struct intel_dpll_hw_state *pll_state,
309 unsigned long dpll_mask)
310{
311 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
312 struct intel_shared_dpll *pll, *unused_pll = NULL;
313 struct intel_shared_dpll_state *shared_dpll;
314 enum intel_dpll_id i;
315
316 shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
317
318 drm_WARN_ON(&dev_priv->drm, dpll_mask & ~(BIT(I915_NUM_PLLS) - 1));
319
320 for_each_set_bit(i, &dpll_mask, I915_NUM_PLLS) {
321 pll = &dev_priv->display.dpll.shared_dplls[i];
322
323 /* Only want to check enabled timings first */
324 if (shared_dpll[i].pipe_mask == 0) {
325 if (!unused_pll)
326 unused_pll = pll;
327 continue;
328 }
329
330 if (memcmp(pll_state,
331 &shared_dpll[i].hw_state,
332 sizeof(*pll_state)) == 0) {
333 drm_dbg_kms(&dev_priv->drm,
334 "[CRTC:%d:%s] sharing existing %s (pipe mask 0x%x, active 0x%x)\n",
335 crtc->base.base.id, crtc->base.name,
336 pll->info->name,
337 shared_dpll[i].pipe_mask,
338 pll->active_mask);
339 return pll;
340 }
341 }
342
343 /* Ok no matching timings, maybe there's a free one? */
344 if (unused_pll) {
345 drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] allocated %s\n",
346 crtc->base.base.id, crtc->base.name,
347 unused_pll->info->name);
348 return unused_pll;
349 }
350
351 return NULL;
352}
353
354static void
355intel_reference_shared_dpll(struct intel_atomic_state *state,
356 const struct intel_crtc *crtc,
357 const struct intel_shared_dpll *pll,
358 const struct intel_dpll_hw_state *pll_state)
359{
360 struct drm_i915_private *i915 = to_i915(state->base.dev);
361 struct intel_shared_dpll_state *shared_dpll;
362 const enum intel_dpll_id id = pll->info->id;
363
364 shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
365
366 if (shared_dpll[id].pipe_mask == 0)
367 shared_dpll[id].hw_state = *pll_state;
368
369 drm_WARN_ON(&i915->drm, (shared_dpll[id].pipe_mask & BIT(crtc->pipe)) != 0);
370
371 shared_dpll[id].pipe_mask |= BIT(crtc->pipe);
372
373 drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] reserving %s\n",
374 crtc->base.base.id, crtc->base.name, pll->info->name);
375}
376
377static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
378 const struct intel_crtc *crtc,
379 const struct intel_shared_dpll *pll)
380{
381 struct drm_i915_private *i915 = to_i915(state->base.dev);
382 struct intel_shared_dpll_state *shared_dpll;
383 const enum intel_dpll_id id = pll->info->id;
384
385 shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
386
387 drm_WARN_ON(&i915->drm, (shared_dpll[id].pipe_mask & BIT(crtc->pipe)) == 0);
388
389 shared_dpll[id].pipe_mask &= ~BIT(crtc->pipe);
390
391 drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] releasing %s\n",
392 crtc->base.base.id, crtc->base.name, pll->info->name);
393}
394
395static void intel_put_dpll(struct intel_atomic_state *state,
396 struct intel_crtc *crtc)
397{
398 const struct intel_crtc_state *old_crtc_state =
399 intel_atomic_get_old_crtc_state(state, crtc);
400 struct intel_crtc_state *new_crtc_state =
401 intel_atomic_get_new_crtc_state(state, crtc);
402
403 new_crtc_state->shared_dpll = NULL;
404
405 if (!old_crtc_state->shared_dpll)
406 return;
407
408 intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
409}
410
411/**
412 * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
413 * @state: atomic state
414 *
415 * This is the dpll version of drm_atomic_helper_swap_state() since the
416 * helper does not handle driver-specific global state.
417 *
418 * For consistency with atomic helpers this function does a complete swap,
419 * i.e. it also puts the current state into @state, even though there is no
420 * need for that at this moment.
421 */
422void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
423{
424 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
425 struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
426 enum intel_dpll_id i;
427
428 if (!state->dpll_set)
429 return;
430
431 for (i = 0; i < dev_priv->display.dpll.num_shared_dpll; i++) {
432 struct intel_shared_dpll *pll =
433 &dev_priv->display.dpll.shared_dplls[i];
434
435 swap(pll->state, shared_dpll[i]);
436 }
437}
438
439static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
440 struct intel_shared_dpll *pll,
441 struct intel_dpll_hw_state *hw_state)
442{
443 const enum intel_dpll_id id = pll->info->id;
444 intel_wakeref_t wakeref;
445 u32 val;
446
447 wakeref = intel_display_power_get_if_enabled(dev_priv,
448 POWER_DOMAIN_DISPLAY_CORE);
449 if (!wakeref)
450 return false;
451
452 val = intel_de_read(dev_priv, PCH_DPLL(id));
453 hw_state->dpll = val;
454 hw_state->fp0 = intel_de_read(dev_priv, PCH_FP0(id));
455 hw_state->fp1 = intel_de_read(dev_priv, PCH_FP1(id));
456
457 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
458
459 return val & DPLL_VCO_ENABLE;
460}
461
462static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
463{
464 u32 val;
465 bool enabled;
466
467 I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
468
469 val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
470 enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
471 DREF_SUPERSPREAD_SOURCE_MASK));
472 I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
473}
474
475static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
476 struct intel_shared_dpll *pll)
477{
478 const enum intel_dpll_id id = pll->info->id;
479
480 /* PCH refclock must be enabled first */
481 ibx_assert_pch_refclk_enabled(dev_priv);
482
483 intel_de_write(dev_priv, PCH_FP0(id), pll->state.hw_state.fp0);
484 intel_de_write(dev_priv, PCH_FP1(id), pll->state.hw_state.fp1);
485
486 intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
487
488 /* Wait for the clocks to stabilize. */
489 intel_de_posting_read(dev_priv, PCH_DPLL(id));
490 udelay(150);
491
492 /* The pixel multiplier can only be updated once the
493 * DPLL is enabled and the clocks are stable.
494 *
495 * So write it again.
496 */
497 intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
498 intel_de_posting_read(dev_priv, PCH_DPLL(id));
499 udelay(200);
500}
501
502static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
503 struct intel_shared_dpll *pll)
504{
505 const enum intel_dpll_id id = pll->info->id;
506
507 intel_de_write(dev_priv, PCH_DPLL(id), 0);
508 intel_de_posting_read(dev_priv, PCH_DPLL(id));
509 udelay(200);
510}
511
512static int ibx_compute_dpll(struct intel_atomic_state *state,
513 struct intel_crtc *crtc,
514 struct intel_encoder *encoder)
515{
516 return 0;
517}
518
519static int ibx_get_dpll(struct intel_atomic_state *state,
520 struct intel_crtc *crtc,
521 struct intel_encoder *encoder)
522{
523 struct intel_crtc_state *crtc_state =
524 intel_atomic_get_new_crtc_state(state, crtc);
525 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
526 struct intel_shared_dpll *pll;
527 enum intel_dpll_id i;
528
529 if (HAS_PCH_IBX(dev_priv)) {
530 /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
531 i = (enum intel_dpll_id) crtc->pipe;
532 pll = &dev_priv->display.dpll.shared_dplls[i];
533
534 drm_dbg_kms(&dev_priv->drm,
535 "[CRTC:%d:%s] using pre-allocated %s\n",
536 crtc->base.base.id, crtc->base.name,
537 pll->info->name);
538 } else {
539 pll = intel_find_shared_dpll(state, crtc,
540 &crtc_state->dpll_hw_state,
541 BIT(DPLL_ID_PCH_PLL_B) |
542 BIT(DPLL_ID_PCH_PLL_A));
543 }
544
545 if (!pll)
546 return -EINVAL;
547
548 /* reference the pll */
549 intel_reference_shared_dpll(state, crtc,
550 pll, &crtc_state->dpll_hw_state);
551
552 crtc_state->shared_dpll = pll;
553
554 return 0;
555}
556
557static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
558 const struct intel_dpll_hw_state *hw_state)
559{
560 drm_dbg_kms(&dev_priv->drm,
561 "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
562 "fp0: 0x%x, fp1: 0x%x\n",
563 hw_state->dpll,
564 hw_state->dpll_md,
565 hw_state->fp0,
566 hw_state->fp1);
567}
568
569static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
570 .enable = ibx_pch_dpll_enable,
571 .disable = ibx_pch_dpll_disable,
572 .get_hw_state = ibx_pch_dpll_get_hw_state,
573};
574
575static const struct dpll_info pch_plls[] = {
576 { "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
577 { "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
578 { },
579};
580
581static const struct intel_dpll_mgr pch_pll_mgr = {
582 .dpll_info = pch_plls,
583 .compute_dplls = ibx_compute_dpll,
584 .get_dplls = ibx_get_dpll,
585 .put_dplls = intel_put_dpll,
586 .dump_hw_state = ibx_dump_hw_state,
587};
588
589static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
590 struct intel_shared_dpll *pll)
591{
592 const enum intel_dpll_id id = pll->info->id;
593
594 intel_de_write(dev_priv, WRPLL_CTL(id), pll->state.hw_state.wrpll);
595 intel_de_posting_read(dev_priv, WRPLL_CTL(id));
596 udelay(20);
597}
598
599static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
600 struct intel_shared_dpll *pll)
601{
602 intel_de_write(dev_priv, SPLL_CTL, pll->state.hw_state.spll);
603 intel_de_posting_read(dev_priv, SPLL_CTL);
604 udelay(20);
605}
606
607static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
608 struct intel_shared_dpll *pll)
609{
610 const enum intel_dpll_id id = pll->info->id;
611 u32 val;
612
613 val = intel_de_read(dev_priv, WRPLL_CTL(id));
614 intel_de_write(dev_priv, WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE);
615 intel_de_posting_read(dev_priv, WRPLL_CTL(id));
616
617 /*
618 * Try to set up the PCH reference clock once all DPLLs
619 * that depend on it have been shut down.
620 */
621 if (dev_priv->pch_ssc_use & BIT(id))
622 intel_init_pch_refclk(dev_priv);
623}
624
625static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
626 struct intel_shared_dpll *pll)
627{
628 enum intel_dpll_id id = pll->info->id;
629 u32 val;
630
631 val = intel_de_read(dev_priv, SPLL_CTL);
632 intel_de_write(dev_priv, SPLL_CTL, val & ~SPLL_PLL_ENABLE);
633 intel_de_posting_read(dev_priv, SPLL_CTL);
634
635 /*
636 * Try to set up the PCH reference clock once all DPLLs
637 * that depend on it have been shut down.
638 */
639 if (dev_priv->pch_ssc_use & BIT(id))
640 intel_init_pch_refclk(dev_priv);
641}
642
643static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
644 struct intel_shared_dpll *pll,
645 struct intel_dpll_hw_state *hw_state)
646{
647 const enum intel_dpll_id id = pll->info->id;
648 intel_wakeref_t wakeref;
649 u32 val;
650
651 wakeref = intel_display_power_get_if_enabled(dev_priv,
652 POWER_DOMAIN_DISPLAY_CORE);
653 if (!wakeref)
654 return false;
655
656 val = intel_de_read(dev_priv, WRPLL_CTL(id));
657 hw_state->wrpll = val;
658
659 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
660
661 return val & WRPLL_PLL_ENABLE;
662}
663
664static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
665 struct intel_shared_dpll *pll,
666 struct intel_dpll_hw_state *hw_state)
667{
668 intel_wakeref_t wakeref;
669 u32 val;
670
671 wakeref = intel_display_power_get_if_enabled(dev_priv,
672 POWER_DOMAIN_DISPLAY_CORE);
673 if (!wakeref)
674 return false;
675
676 val = intel_de_read(dev_priv, SPLL_CTL);
677 hw_state->spll = val;
678
679 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
680
681 return val & SPLL_PLL_ENABLE;
682}
683
684#define LC_FREQ 2700
685#define LC_FREQ_2K U64_C(LC_FREQ * 2000)
686
687#define P_MIN 2
688#define P_MAX 64
689#define P_INC 2
690
691/* Constraints for PLL good behavior */
692#define REF_MIN 48
693#define REF_MAX 400
694#define VCO_MIN 2400
695#define VCO_MAX 4800
696
697struct hsw_wrpll_rnp {
698 unsigned p, n2, r2;
699};
700
701static unsigned hsw_wrpll_get_budget_for_freq(int clock)
702{
703 switch (clock) {
704 case 25175000:
705 case 25200000:
706 case 27000000:
707 case 27027000:
708 case 37762500:
709 case 37800000:
710 case 40500000:
711 case 40541000:
712 case 54000000:
713 case 54054000:
714 case 59341000:
715 case 59400000:
716 case 72000000:
717 case 74176000:
718 case 74250000:
719 case 81000000:
720 case 81081000:
721 case 89012000:
722 case 89100000:
723 case 108000000:
724 case 108108000:
725 case 111264000:
726 case 111375000:
727 case 148352000:
728 case 148500000:
729 case 162000000:
730 case 162162000:
731 case 222525000:
732 case 222750000:
733 case 296703000:
734 case 297000000:
735 return 0;
736 case 233500000:
737 case 245250000:
738 case 247750000:
739 case 253250000:
740 case 298000000:
741 return 1500;
742 case 169128000:
743 case 169500000:
744 case 179500000:
745 case 202000000:
746 return 2000;
747 case 256250000:
748 case 262500000:
749 case 270000000:
750 case 272500000:
751 case 273750000:
752 case 280750000:
753 case 281250000:
754 case 286000000:
755 case 291750000:
756 return 4000;
757 case 267250000:
758 case 268500000:
759 return 5000;
760 default:
761 return 1000;
762 }
763}
764
765static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
766 unsigned int r2, unsigned int n2,
767 unsigned int p,
768 struct hsw_wrpll_rnp *best)
769{
770 u64 a, b, c, d, diff, diff_best;
771
772 /* No best (r,n,p) yet */
773 if (best->p == 0) {
774 best->p = p;
775 best->n2 = n2;
776 best->r2 = r2;
777 return;
778 }
779
780 /*
781 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
782 * freq2k.
783 *
784 * delta = 1e6 *
785 * abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
786 * freq2k;
787 *
788 * and we would like delta <= budget.
789 *
790 * If the discrepancy is above the PPM-based budget, always prefer to
791 * improve upon the previous solution. However, if you're within the
792 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
793 */
794 a = freq2k * budget * p * r2;
795 b = freq2k * budget * best->p * best->r2;
796 diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
797 diff_best = abs_diff(freq2k * best->p * best->r2,
798 LC_FREQ_2K * best->n2);
799 c = 1000000 * diff;
800 d = 1000000 * diff_best;
801
802 if (a < c && b < d) {
803 /* If both are above the budget, pick the closer */
804 if (best->p * best->r2 * diff < p * r2 * diff_best) {
805 best->p = p;
806 best->n2 = n2;
807 best->r2 = r2;
808 }
809 } else if (a >= c && b < d) {
810 /* If A is below the threshold but B is above it? Update. */
811 best->p = p;
812 best->n2 = n2;
813 best->r2 = r2;
814 } else if (a >= c && b >= d) {
815 /* Both are below the limit, so pick the higher n2/(r2*r2) */
816 if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
817 best->p = p;
818 best->n2 = n2;
819 best->r2 = r2;
820 }
821 }
822 /* Otherwise a < c && b >= d, do nothing */
823}
824
825static void
826hsw_ddi_calculate_wrpll(int clock /* in Hz */,
827 unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
828{
829 u64 freq2k;
830 unsigned p, n2, r2;
831 struct hsw_wrpll_rnp best = {};
832 unsigned budget;
833
834 freq2k = clock / 100;
835
836 budget = hsw_wrpll_get_budget_for_freq(clock);
837
838 /* Special case handling for 540 pixel clock: bypass WR PLL entirely
839 * and directly pass the LC PLL to it. */
840 if (freq2k == 5400000) {
841 *n2_out = 2;
842 *p_out = 1;
843 *r2_out = 2;
844 return;
845 }
846
847 /*
848 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
849 * the WR PLL.
850 *
851 * We want R so that REF_MIN <= Ref <= REF_MAX.
852 * Injecting R2 = 2 * R gives:
853 * REF_MAX * r2 > LC_FREQ * 2 and
854 * REF_MIN * r2 < LC_FREQ * 2
855 *
856 * Which means the desired boundaries for r2 are:
857 * LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
858 *
859 */
860 for (r2 = LC_FREQ * 2 / REF_MAX + 1;
861 r2 <= LC_FREQ * 2 / REF_MIN;
862 r2++) {
863
864 /*
865 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
866 *
867 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
868 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
869 * VCO_MAX * r2 > n2 * LC_FREQ and
870 * VCO_MIN * r2 < n2 * LC_FREQ)
871 *
872 * Which means the desired boundaries for n2 are:
873 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
874 */
875 for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
876 n2 <= VCO_MAX * r2 / LC_FREQ;
877 n2++) {
878
879 for (p = P_MIN; p <= P_MAX; p += P_INC)
880 hsw_wrpll_update_rnp(freq2k, budget,
881 r2, n2, p, &best);
882 }
883 }
884
885 *n2_out = best.n2;
886 *p_out = best.p;
887 *r2_out = best.r2;
888}
889
890static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
891 const struct intel_shared_dpll *pll,
892 const struct intel_dpll_hw_state *pll_state)
893{
894 int refclk;
895 int n, p, r;
896 u32 wrpll = pll_state->wrpll;
897
898 switch (wrpll & WRPLL_REF_MASK) {
899 case WRPLL_REF_SPECIAL_HSW:
900 /* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
901 if (IS_HASWELL(dev_priv) && !IS_HSW_ULT(dev_priv)) {
902 refclk = dev_priv->display.dpll.ref_clks.nssc;
903 break;
904 }
905 fallthrough;
906 case WRPLL_REF_PCH_SSC:
907 /*
908 * We could calculate spread here, but our checking
909 * code only cares about 5% accuracy, and spread is a max of
910 * 0.5% downspread.
911 */
912 refclk = dev_priv->display.dpll.ref_clks.ssc;
913 break;
914 case WRPLL_REF_LCPLL:
915 refclk = 2700000;
916 break;
917 default:
918 MISSING_CASE(wrpll);
919 return 0;
920 }
921
922 r = wrpll & WRPLL_DIVIDER_REF_MASK;
923 p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
924 n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
925
926 /* Convert to KHz, p & r have a fixed point portion */
927 return (refclk * n / 10) / (p * r) * 2;
928}
929
930static int
931hsw_ddi_wrpll_compute_dpll(struct intel_atomic_state *state,
932 struct intel_crtc *crtc)
933{
934 struct drm_i915_private *i915 = to_i915(state->base.dev);
935 struct intel_crtc_state *crtc_state =
936 intel_atomic_get_new_crtc_state(state, crtc);
937 unsigned int p, n2, r2;
938
939 hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
940
941 crtc_state->dpll_hw_state.wrpll =
942 WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
943 WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
944 WRPLL_DIVIDER_POST(p);
945
946 crtc_state->port_clock = hsw_ddi_wrpll_get_freq(i915, NULL,
947 &crtc_state->dpll_hw_state);
948
949 return 0;
950}
951
952static struct intel_shared_dpll *
953hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
954 struct intel_crtc *crtc)
955{
956 struct intel_crtc_state *crtc_state =
957 intel_atomic_get_new_crtc_state(state, crtc);
958
959 return intel_find_shared_dpll(state, crtc,
960 &crtc_state->dpll_hw_state,
961 BIT(DPLL_ID_WRPLL2) |
962 BIT(DPLL_ID_WRPLL1));
963}
964
965static int
966hsw_ddi_lcpll_compute_dpll(struct intel_crtc_state *crtc_state)
967{
968 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
969 int clock = crtc_state->port_clock;
970
971 switch (clock / 2) {
972 case 81000:
973 case 135000:
974 case 270000:
975 return 0;
976 default:
977 drm_dbg_kms(&dev_priv->drm, "Invalid clock for DP: %d\n",
978 clock);
979 return -EINVAL;
980 }
981}
982
983static struct intel_shared_dpll *
984hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
985{
986 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
987 struct intel_shared_dpll *pll;
988 enum intel_dpll_id pll_id;
989 int clock = crtc_state->port_clock;
990
991 switch (clock / 2) {
992 case 81000:
993 pll_id = DPLL_ID_LCPLL_810;
994 break;
995 case 135000:
996 pll_id = DPLL_ID_LCPLL_1350;
997 break;
998 case 270000:
999 pll_id = DPLL_ID_LCPLL_2700;
1000 break;
1001 default:
1002 MISSING_CASE(clock / 2);
1003 return NULL;
1004 }
1005
1006 pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
1007
1008 if (!pll)
1009 return NULL;
1010
1011 return pll;
1012}
1013
1014static int hsw_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1015 const struct intel_shared_dpll *pll,
1016 const struct intel_dpll_hw_state *pll_state)
1017{
1018 int link_clock = 0;
1019
1020 switch (pll->info->id) {
1021 case DPLL_ID_LCPLL_810:
1022 link_clock = 81000;
1023 break;
1024 case DPLL_ID_LCPLL_1350:
1025 link_clock = 135000;
1026 break;
1027 case DPLL_ID_LCPLL_2700:
1028 link_clock = 270000;
1029 break;
1030 default:
1031 drm_WARN(&i915->drm, 1, "bad port clock sel\n");
1032 break;
1033 }
1034
1035 return link_clock * 2;
1036}
1037
1038static int
1039hsw_ddi_spll_compute_dpll(struct intel_atomic_state *state,
1040 struct intel_crtc *crtc)
1041{
1042 struct intel_crtc_state *crtc_state =
1043 intel_atomic_get_new_crtc_state(state, crtc);
1044
1045 if (drm_WARN_ON(crtc->base.dev, crtc_state->port_clock / 2 != 135000))
1046 return -EINVAL;
1047
1048 crtc_state->dpll_hw_state.spll =
1049 SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC;
1050
1051 return 0;
1052}
1053
1054static struct intel_shared_dpll *
1055hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
1056 struct intel_crtc *crtc)
1057{
1058 struct intel_crtc_state *crtc_state =
1059 intel_atomic_get_new_crtc_state(state, crtc);
1060
1061 return intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state,
1062 BIT(DPLL_ID_SPLL));
1063}
1064
1065static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915,
1066 const struct intel_shared_dpll *pll,
1067 const struct intel_dpll_hw_state *pll_state)
1068{
1069 int link_clock = 0;
1070
1071 switch (pll_state->spll & SPLL_FREQ_MASK) {
1072 case SPLL_FREQ_810MHz:
1073 link_clock = 81000;
1074 break;
1075 case SPLL_FREQ_1350MHz:
1076 link_clock = 135000;
1077 break;
1078 case SPLL_FREQ_2700MHz:
1079 link_clock = 270000;
1080 break;
1081 default:
1082 drm_WARN(&i915->drm, 1, "bad spll freq\n");
1083 break;
1084 }
1085
1086 return link_clock * 2;
1087}
1088
1089static int hsw_compute_dpll(struct intel_atomic_state *state,
1090 struct intel_crtc *crtc,
1091 struct intel_encoder *encoder)
1092{
1093 struct intel_crtc_state *crtc_state =
1094 intel_atomic_get_new_crtc_state(state, crtc);
1095
1096 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1097 return hsw_ddi_wrpll_compute_dpll(state, crtc);
1098 else if (intel_crtc_has_dp_encoder(crtc_state))
1099 return hsw_ddi_lcpll_compute_dpll(crtc_state);
1100 else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1101 return hsw_ddi_spll_compute_dpll(state, crtc);
1102 else
1103 return -EINVAL;
1104}
1105
1106static int hsw_get_dpll(struct intel_atomic_state *state,
1107 struct intel_crtc *crtc,
1108 struct intel_encoder *encoder)
1109{
1110 struct intel_crtc_state *crtc_state =
1111 intel_atomic_get_new_crtc_state(state, crtc);
1112 struct intel_shared_dpll *pll = NULL;
1113
1114 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1115 pll = hsw_ddi_wrpll_get_dpll(state, crtc);
1116 else if (intel_crtc_has_dp_encoder(crtc_state))
1117 pll = hsw_ddi_lcpll_get_dpll(crtc_state);
1118 else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1119 pll = hsw_ddi_spll_get_dpll(state, crtc);
1120
1121 if (!pll)
1122 return -EINVAL;
1123
1124 intel_reference_shared_dpll(state, crtc,
1125 pll, &crtc_state->dpll_hw_state);
1126
1127 crtc_state->shared_dpll = pll;
1128
1129 return 0;
1130}
1131
1132static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915)
1133{
1134 i915->display.dpll.ref_clks.ssc = 135000;
1135 /* Non-SSC is only used on non-ULT HSW. */
1136 if (intel_de_read(i915, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
1137 i915->display.dpll.ref_clks.nssc = 24000;
1138 else
1139 i915->display.dpll.ref_clks.nssc = 135000;
1140}
1141
1142static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
1143 const struct intel_dpll_hw_state *hw_state)
1144{
1145 drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
1146 hw_state->wrpll, hw_state->spll);
1147}
1148
1149static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
1150 .enable = hsw_ddi_wrpll_enable,
1151 .disable = hsw_ddi_wrpll_disable,
1152 .get_hw_state = hsw_ddi_wrpll_get_hw_state,
1153 .get_freq = hsw_ddi_wrpll_get_freq,
1154};
1155
1156static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
1157 .enable = hsw_ddi_spll_enable,
1158 .disable = hsw_ddi_spll_disable,
1159 .get_hw_state = hsw_ddi_spll_get_hw_state,
1160 .get_freq = hsw_ddi_spll_get_freq,
1161};
1162
1163static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv,
1164 struct intel_shared_dpll *pll)
1165{
1166}
1167
1168static void hsw_ddi_lcpll_disable(struct drm_i915_private *dev_priv,
1169 struct intel_shared_dpll *pll)
1170{
1171}
1172
1173static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *dev_priv,
1174 struct intel_shared_dpll *pll,
1175 struct intel_dpll_hw_state *hw_state)
1176{
1177 return true;
1178}
1179
1180static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
1181 .enable = hsw_ddi_lcpll_enable,
1182 .disable = hsw_ddi_lcpll_disable,
1183 .get_hw_state = hsw_ddi_lcpll_get_hw_state,
1184 .get_freq = hsw_ddi_lcpll_get_freq,
1185};
1186
1187static const struct dpll_info hsw_plls[] = {
1188 { "WRPLL 1", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1, 0 },
1189 { "WRPLL 2", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2, 0 },
1190 { "SPLL", &hsw_ddi_spll_funcs, DPLL_ID_SPLL, 0 },
1191 { "LCPLL 810", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810, INTEL_DPLL_ALWAYS_ON },
1192 { "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
1193 { "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
1194 { },
1195};
1196
1197static const struct intel_dpll_mgr hsw_pll_mgr = {
1198 .dpll_info = hsw_plls,
1199 .compute_dplls = hsw_compute_dpll,
1200 .get_dplls = hsw_get_dpll,
1201 .put_dplls = intel_put_dpll,
1202 .update_ref_clks = hsw_update_dpll_ref_clks,
1203 .dump_hw_state = hsw_dump_hw_state,
1204};
1205
1206struct skl_dpll_regs {
1207 i915_reg_t ctl, cfgcr1, cfgcr2;
1208};
1209
1210/* this array is indexed by the *shared* pll id */
1211static const struct skl_dpll_regs skl_dpll_regs[4] = {
1212 {
1213 /* DPLL 0 */
1214 .ctl = LCPLL1_CTL,
1215 /* DPLL 0 doesn't support HDMI mode */
1216 },
1217 {
1218 /* DPLL 1 */
1219 .ctl = LCPLL2_CTL,
1220 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
1221 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
1222 },
1223 {
1224 /* DPLL 2 */
1225 .ctl = WRPLL_CTL(0),
1226 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
1227 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
1228 },
1229 {
1230 /* DPLL 3 */
1231 .ctl = WRPLL_CTL(1),
1232 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
1233 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
1234 },
1235};
1236
1237static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
1238 struct intel_shared_dpll *pll)
1239{
1240 const enum intel_dpll_id id = pll->info->id;
1241 u32 val;
1242
1243 val = intel_de_read(dev_priv, DPLL_CTRL1);
1244
1245 val &= ~(DPLL_CTRL1_HDMI_MODE(id) |
1246 DPLL_CTRL1_SSC(id) |
1247 DPLL_CTRL1_LINK_RATE_MASK(id));
1248 val |= pll->state.hw_state.ctrl1 << (id * 6);
1249
1250 intel_de_write(dev_priv, DPLL_CTRL1, val);
1251 intel_de_posting_read(dev_priv, DPLL_CTRL1);
1252}
1253
1254static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
1255 struct intel_shared_dpll *pll)
1256{
1257 const struct skl_dpll_regs *regs = skl_dpll_regs;
1258 const enum intel_dpll_id id = pll->info->id;
1259
1260 skl_ddi_pll_write_ctrl1(dev_priv, pll);
1261
1262 intel_de_write(dev_priv, regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
1263 intel_de_write(dev_priv, regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
1264 intel_de_posting_read(dev_priv, regs[id].cfgcr1);
1265 intel_de_posting_read(dev_priv, regs[id].cfgcr2);
1266
1267 /* the enable bit is always bit 31 */
1268 intel_de_write(dev_priv, regs[id].ctl,
1269 intel_de_read(dev_priv, regs[id].ctl) | LCPLL_PLL_ENABLE);
1270
1271 if (intel_de_wait_for_set(dev_priv, DPLL_STATUS, DPLL_LOCK(id), 5))
1272 drm_err(&dev_priv->drm, "DPLL %d not locked\n", id);
1273}
1274
1275static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv,
1276 struct intel_shared_dpll *pll)
1277{
1278 skl_ddi_pll_write_ctrl1(dev_priv, pll);
1279}
1280
1281static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
1282 struct intel_shared_dpll *pll)
1283{
1284 const struct skl_dpll_regs *regs = skl_dpll_regs;
1285 const enum intel_dpll_id id = pll->info->id;
1286
1287 /* the enable bit is always bit 31 */
1288 intel_de_write(dev_priv, regs[id].ctl,
1289 intel_de_read(dev_priv, regs[id].ctl) & ~LCPLL_PLL_ENABLE);
1290 intel_de_posting_read(dev_priv, regs[id].ctl);
1291}
1292
1293static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv,
1294 struct intel_shared_dpll *pll)
1295{
1296}
1297
1298static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1299 struct intel_shared_dpll *pll,
1300 struct intel_dpll_hw_state *hw_state)
1301{
1302 u32 val;
1303 const struct skl_dpll_regs *regs = skl_dpll_regs;
1304 const enum intel_dpll_id id = pll->info->id;
1305 intel_wakeref_t wakeref;
1306 bool ret;
1307
1308 wakeref = intel_display_power_get_if_enabled(dev_priv,
1309 POWER_DOMAIN_DISPLAY_CORE);
1310 if (!wakeref)
1311 return false;
1312
1313 ret = false;
1314
1315 val = intel_de_read(dev_priv, regs[id].ctl);
1316 if (!(val & LCPLL_PLL_ENABLE))
1317 goto out;
1318
1319 val = intel_de_read(dev_priv, DPLL_CTRL1);
1320 hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1321
1322 /* avoid reading back stale values if HDMI mode is not enabled */
1323 if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1324 hw_state->cfgcr1 = intel_de_read(dev_priv, regs[id].cfgcr1);
1325 hw_state->cfgcr2 = intel_de_read(dev_priv, regs[id].cfgcr2);
1326 }
1327 ret = true;
1328
1329out:
1330 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1331
1332 return ret;
1333}
1334
1335static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
1336 struct intel_shared_dpll *pll,
1337 struct intel_dpll_hw_state *hw_state)
1338{
1339 const struct skl_dpll_regs *regs = skl_dpll_regs;
1340 const enum intel_dpll_id id = pll->info->id;
1341 intel_wakeref_t wakeref;
1342 u32 val;
1343 bool ret;
1344
1345 wakeref = intel_display_power_get_if_enabled(dev_priv,
1346 POWER_DOMAIN_DISPLAY_CORE);
1347 if (!wakeref)
1348 return false;
1349
1350 ret = false;
1351
1352 /* DPLL0 is always enabled since it drives CDCLK */
1353 val = intel_de_read(dev_priv, regs[id].ctl);
1354 if (drm_WARN_ON(&dev_priv->drm, !(val & LCPLL_PLL_ENABLE)))
1355 goto out;
1356
1357 val = intel_de_read(dev_priv, DPLL_CTRL1);
1358 hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1359
1360 ret = true;
1361
1362out:
1363 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1364
1365 return ret;
1366}
1367
1368struct skl_wrpll_context {
1369 u64 min_deviation; /* current minimal deviation */
1370 u64 central_freq; /* chosen central freq */
1371 u64 dco_freq; /* chosen dco freq */
1372 unsigned int p; /* chosen divider */
1373};
1374
1375/* DCO freq must be within +1%/-6% of the DCO central freq */
1376#define SKL_DCO_MAX_PDEVIATION 100
1377#define SKL_DCO_MAX_NDEVIATION 600
1378
1379static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1380 u64 central_freq,
1381 u64 dco_freq,
1382 unsigned int divider)
1383{
1384 u64 deviation;
1385
1386 deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1387 central_freq);
1388
1389 /* positive deviation */
1390 if (dco_freq >= central_freq) {
1391 if (deviation < SKL_DCO_MAX_PDEVIATION &&
1392 deviation < ctx->min_deviation) {
1393 ctx->min_deviation = deviation;
1394 ctx->central_freq = central_freq;
1395 ctx->dco_freq = dco_freq;
1396 ctx->p = divider;
1397 }
1398 /* negative deviation */
1399 } else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1400 deviation < ctx->min_deviation) {
1401 ctx->min_deviation = deviation;
1402 ctx->central_freq = central_freq;
1403 ctx->dco_freq = dco_freq;
1404 ctx->p = divider;
1405 }
1406}
1407
1408static void skl_wrpll_get_multipliers(unsigned int p,
1409 unsigned int *p0 /* out */,
1410 unsigned int *p1 /* out */,
1411 unsigned int *p2 /* out */)
1412{
1413 /* even dividers */
1414 if (p % 2 == 0) {
1415 unsigned int half = p / 2;
1416
1417 if (half == 1 || half == 2 || half == 3 || half == 5) {
1418 *p0 = 2;
1419 *p1 = 1;
1420 *p2 = half;
1421 } else if (half % 2 == 0) {
1422 *p0 = 2;
1423 *p1 = half / 2;
1424 *p2 = 2;
1425 } else if (half % 3 == 0) {
1426 *p0 = 3;
1427 *p1 = half / 3;
1428 *p2 = 2;
1429 } else if (half % 7 == 0) {
1430 *p0 = 7;
1431 *p1 = half / 7;
1432 *p2 = 2;
1433 }
1434 } else if (p == 3 || p == 9) { /* 3, 5, 7, 9, 15, 21, 35 */
1435 *p0 = 3;
1436 *p1 = 1;
1437 *p2 = p / 3;
1438 } else if (p == 5 || p == 7) {
1439 *p0 = p;
1440 *p1 = 1;
1441 *p2 = 1;
1442 } else if (p == 15) {
1443 *p0 = 3;
1444 *p1 = 1;
1445 *p2 = 5;
1446 } else if (p == 21) {
1447 *p0 = 7;
1448 *p1 = 1;
1449 *p2 = 3;
1450 } else if (p == 35) {
1451 *p0 = 7;
1452 *p1 = 1;
1453 *p2 = 5;
1454 }
1455}
1456
1457struct skl_wrpll_params {
1458 u32 dco_fraction;
1459 u32 dco_integer;
1460 u32 qdiv_ratio;
1461 u32 qdiv_mode;
1462 u32 kdiv;
1463 u32 pdiv;
1464 u32 central_freq;
1465};
1466
1467static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1468 u64 afe_clock,
1469 int ref_clock,
1470 u64 central_freq,
1471 u32 p0, u32 p1, u32 p2)
1472{
1473 u64 dco_freq;
1474
1475 switch (central_freq) {
1476 case 9600000000ULL:
1477 params->central_freq = 0;
1478 break;
1479 case 9000000000ULL:
1480 params->central_freq = 1;
1481 break;
1482 case 8400000000ULL:
1483 params->central_freq = 3;
1484 }
1485
1486 switch (p0) {
1487 case 1:
1488 params->pdiv = 0;
1489 break;
1490 case 2:
1491 params->pdiv = 1;
1492 break;
1493 case 3:
1494 params->pdiv = 2;
1495 break;
1496 case 7:
1497 params->pdiv = 4;
1498 break;
1499 default:
1500 WARN(1, "Incorrect PDiv\n");
1501 }
1502
1503 switch (p2) {
1504 case 5:
1505 params->kdiv = 0;
1506 break;
1507 case 2:
1508 params->kdiv = 1;
1509 break;
1510 case 3:
1511 params->kdiv = 2;
1512 break;
1513 case 1:
1514 params->kdiv = 3;
1515 break;
1516 default:
1517 WARN(1, "Incorrect KDiv\n");
1518 }
1519
1520 params->qdiv_ratio = p1;
1521 params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1522
1523 dco_freq = p0 * p1 * p2 * afe_clock;
1524
1525 /*
1526 * Intermediate values are in Hz.
1527 * Divide by MHz to match bsepc
1528 */
1529 params->dco_integer = div_u64(dco_freq, ref_clock * KHz(1));
1530 params->dco_fraction =
1531 div_u64((div_u64(dco_freq, ref_clock / KHz(1)) -
1532 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1533}
1534
1535static int
1536skl_ddi_calculate_wrpll(int clock /* in Hz */,
1537 int ref_clock,
1538 struct skl_wrpll_params *wrpll_params)
1539{
1540 static const u64 dco_central_freq[3] = { 8400000000ULL,
1541 9000000000ULL,
1542 9600000000ULL };
1543 static const u8 even_dividers[] = { 4, 6, 8, 10, 12, 14, 16, 18, 20,
1544 24, 28, 30, 32, 36, 40, 42, 44,
1545 48, 52, 54, 56, 60, 64, 66, 68,
1546 70, 72, 76, 78, 80, 84, 88, 90,
1547 92, 96, 98 };
1548 static const u8 odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1549 static const struct {
1550 const u8 *list;
1551 int n_dividers;
1552 } dividers[] = {
1553 { even_dividers, ARRAY_SIZE(even_dividers) },
1554 { odd_dividers, ARRAY_SIZE(odd_dividers) },
1555 };
1556 struct skl_wrpll_context ctx = {
1557 .min_deviation = U64_MAX,
1558 };
1559 unsigned int dco, d, i;
1560 unsigned int p0, p1, p2;
1561 u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
1562
1563 for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1564 for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1565 for (i = 0; i < dividers[d].n_dividers; i++) {
1566 unsigned int p = dividers[d].list[i];
1567 u64 dco_freq = p * afe_clock;
1568
1569 skl_wrpll_try_divider(&ctx,
1570 dco_central_freq[dco],
1571 dco_freq,
1572 p);
1573 /*
1574 * Skip the remaining dividers if we're sure to
1575 * have found the definitive divider, we can't
1576 * improve a 0 deviation.
1577 */
1578 if (ctx.min_deviation == 0)
1579 goto skip_remaining_dividers;
1580 }
1581 }
1582
1583skip_remaining_dividers:
1584 /*
1585 * If a solution is found with an even divider, prefer
1586 * this one.
1587 */
1588 if (d == 0 && ctx.p)
1589 break;
1590 }
1591
1592 if (!ctx.p)
1593 return -EINVAL;
1594
1595 /*
1596 * gcc incorrectly analyses that these can be used without being
1597 * initialized. To be fair, it's hard to guess.
1598 */
1599 p0 = p1 = p2 = 0;
1600 skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1601 skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock,
1602 ctx.central_freq, p0, p1, p2);
1603
1604 return 0;
1605}
1606
1607static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
1608 const struct intel_shared_dpll *pll,
1609 const struct intel_dpll_hw_state *pll_state)
1610{
1611 int ref_clock = i915->display.dpll.ref_clks.nssc;
1612 u32 p0, p1, p2, dco_freq;
1613
1614 p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
1615 p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
1616
1617 if (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_MODE(1))
1618 p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
1619 else
1620 p1 = 1;
1621
1622
1623 switch (p0) {
1624 case DPLL_CFGCR2_PDIV_1:
1625 p0 = 1;
1626 break;
1627 case DPLL_CFGCR2_PDIV_2:
1628 p0 = 2;
1629 break;
1630 case DPLL_CFGCR2_PDIV_3:
1631 p0 = 3;
1632 break;
1633 case DPLL_CFGCR2_PDIV_7_INVALID:
1634 /*
1635 * Incorrect ASUS-Z170M BIOS setting, the HW seems to ignore bit#0,
1636 * handling it the same way as PDIV_7.
1637 */
1638 drm_dbg_kms(&i915->drm, "Invalid WRPLL PDIV divider value, fixing it.\n");
1639 fallthrough;
1640 case DPLL_CFGCR2_PDIV_7:
1641 p0 = 7;
1642 break;
1643 default:
1644 MISSING_CASE(p0);
1645 return 0;
1646 }
1647
1648 switch (p2) {
1649 case DPLL_CFGCR2_KDIV_5:
1650 p2 = 5;
1651 break;
1652 case DPLL_CFGCR2_KDIV_2:
1653 p2 = 2;
1654 break;
1655 case DPLL_CFGCR2_KDIV_3:
1656 p2 = 3;
1657 break;
1658 case DPLL_CFGCR2_KDIV_1:
1659 p2 = 1;
1660 break;
1661 default:
1662 MISSING_CASE(p2);
1663 return 0;
1664 }
1665
1666 dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
1667 ref_clock;
1668
1669 dco_freq += ((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
1670 ref_clock / 0x8000;
1671
1672 if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
1673 return 0;
1674
1675 return dco_freq / (p0 * p1 * p2 * 5);
1676}
1677
1678static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
1679{
1680 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1681 struct skl_wrpll_params wrpll_params = {};
1682 u32 ctrl1, cfgcr1, cfgcr2;
1683 int ret;
1684
1685 /*
1686 * See comment in intel_dpll_hw_state to understand why we always use 0
1687 * as the DPLL id in this function.
1688 */
1689 ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1690
1691 ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
1692
1693 ret = skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
1694 i915->display.dpll.ref_clks.nssc, &wrpll_params);
1695 if (ret)
1696 return ret;
1697
1698 cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
1699 DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1700 wrpll_params.dco_integer;
1701
1702 cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1703 DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1704 DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1705 DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1706 wrpll_params.central_freq;
1707
1708 crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1709 crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
1710 crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
1711
1712 crtc_state->port_clock = skl_ddi_wrpll_get_freq(i915, NULL,
1713 &crtc_state->dpll_hw_state);
1714
1715 return 0;
1716}
1717
1718static int
1719skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1720{
1721 u32 ctrl1;
1722
1723 /*
1724 * See comment in intel_dpll_hw_state to understand why we always use 0
1725 * as the DPLL id in this function.
1726 */
1727 ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1728 switch (crtc_state->port_clock / 2) {
1729 case 81000:
1730 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1731 break;
1732 case 135000:
1733 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1734 break;
1735 case 270000:
1736 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1737 break;
1738 /* eDP 1.4 rates */
1739 case 162000:
1740 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1741 break;
1742 case 108000:
1743 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1744 break;
1745 case 216000:
1746 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1747 break;
1748 }
1749
1750 crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1751
1752 return 0;
1753}
1754
1755static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1756 const struct intel_shared_dpll *pll,
1757 const struct intel_dpll_hw_state *pll_state)
1758{
1759 int link_clock = 0;
1760
1761 switch ((pll_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0)) >>
1762 DPLL_CTRL1_LINK_RATE_SHIFT(0)) {
1763 case DPLL_CTRL1_LINK_RATE_810:
1764 link_clock = 81000;
1765 break;
1766 case DPLL_CTRL1_LINK_RATE_1080:
1767 link_clock = 108000;
1768 break;
1769 case DPLL_CTRL1_LINK_RATE_1350:
1770 link_clock = 135000;
1771 break;
1772 case DPLL_CTRL1_LINK_RATE_1620:
1773 link_clock = 162000;
1774 break;
1775 case DPLL_CTRL1_LINK_RATE_2160:
1776 link_clock = 216000;
1777 break;
1778 case DPLL_CTRL1_LINK_RATE_2700:
1779 link_clock = 270000;
1780 break;
1781 default:
1782 drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
1783 break;
1784 }
1785
1786 return link_clock * 2;
1787}
1788
1789static int skl_compute_dpll(struct intel_atomic_state *state,
1790 struct intel_crtc *crtc,
1791 struct intel_encoder *encoder)
1792{
1793 struct intel_crtc_state *crtc_state =
1794 intel_atomic_get_new_crtc_state(state, crtc);
1795
1796 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1797 return skl_ddi_hdmi_pll_dividers(crtc_state);
1798 else if (intel_crtc_has_dp_encoder(crtc_state))
1799 return skl_ddi_dp_set_dpll_hw_state(crtc_state);
1800 else
1801 return -EINVAL;
1802}
1803
1804static int skl_get_dpll(struct intel_atomic_state *state,
1805 struct intel_crtc *crtc,
1806 struct intel_encoder *encoder)
1807{
1808 struct intel_crtc_state *crtc_state =
1809 intel_atomic_get_new_crtc_state(state, crtc);
1810 struct intel_shared_dpll *pll;
1811
1812 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1813 pll = intel_find_shared_dpll(state, crtc,
1814 &crtc_state->dpll_hw_state,
1815 BIT(DPLL_ID_SKL_DPLL0));
1816 else
1817 pll = intel_find_shared_dpll(state, crtc,
1818 &crtc_state->dpll_hw_state,
1819 BIT(DPLL_ID_SKL_DPLL3) |
1820 BIT(DPLL_ID_SKL_DPLL2) |
1821 BIT(DPLL_ID_SKL_DPLL1));
1822 if (!pll)
1823 return -EINVAL;
1824
1825 intel_reference_shared_dpll(state, crtc,
1826 pll, &crtc_state->dpll_hw_state);
1827
1828 crtc_state->shared_dpll = pll;
1829
1830 return 0;
1831}
1832
1833static int skl_ddi_pll_get_freq(struct drm_i915_private *i915,
1834 const struct intel_shared_dpll *pll,
1835 const struct intel_dpll_hw_state *pll_state)
1836{
1837 /*
1838 * ctrl1 register is already shifted for each pll, just use 0 to get
1839 * the internal shift for each field
1840 */
1841 if (pll_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
1842 return skl_ddi_wrpll_get_freq(i915, pll, pll_state);
1843 else
1844 return skl_ddi_lcpll_get_freq(i915, pll, pll_state);
1845}
1846
1847static void skl_update_dpll_ref_clks(struct drm_i915_private *i915)
1848{
1849 /* No SSC ref */
1850 i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
1851}
1852
1853static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
1854 const struct intel_dpll_hw_state *hw_state)
1855{
1856 drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: "
1857 "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1858 hw_state->ctrl1,
1859 hw_state->cfgcr1,
1860 hw_state->cfgcr2);
1861}
1862
1863static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
1864 .enable = skl_ddi_pll_enable,
1865 .disable = skl_ddi_pll_disable,
1866 .get_hw_state = skl_ddi_pll_get_hw_state,
1867 .get_freq = skl_ddi_pll_get_freq,
1868};
1869
1870static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
1871 .enable = skl_ddi_dpll0_enable,
1872 .disable = skl_ddi_dpll0_disable,
1873 .get_hw_state = skl_ddi_dpll0_get_hw_state,
1874 .get_freq = skl_ddi_pll_get_freq,
1875};
1876
1877static const struct dpll_info skl_plls[] = {
1878 { "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
1879 { "DPLL 1", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
1880 { "DPLL 2", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
1881 { "DPLL 3", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL3, 0 },
1882 { },
1883};
1884
1885static const struct intel_dpll_mgr skl_pll_mgr = {
1886 .dpll_info = skl_plls,
1887 .compute_dplls = skl_compute_dpll,
1888 .get_dplls = skl_get_dpll,
1889 .put_dplls = intel_put_dpll,
1890 .update_ref_clks = skl_update_dpll_ref_clks,
1891 .dump_hw_state = skl_dump_hw_state,
1892};
1893
1894static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
1895 struct intel_shared_dpll *pll)
1896{
1897 u32 temp;
1898 enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1899 enum dpio_phy phy;
1900 enum dpio_channel ch;
1901
1902 bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1903
1904 /* Non-SSC reference */
1905 temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1906 temp |= PORT_PLL_REF_SEL;
1907 intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1908
1909 if (IS_GEMINILAKE(dev_priv)) {
1910 temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1911 temp |= PORT_PLL_POWER_ENABLE;
1912 intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1913
1914 if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
1915 PORT_PLL_POWER_STATE), 200))
1916 drm_err(&dev_priv->drm,
1917 "Power state not set for PLL:%d\n", port);
1918 }
1919
1920 /* Disable 10 bit clock */
1921 temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1922 temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1923 intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1924
1925 /* Write P1 & P2 */
1926 temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
1927 temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
1928 temp |= pll->state.hw_state.ebb0;
1929 intel_de_write(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch), temp);
1930
1931 /* Write M2 integer */
1932 temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
1933 temp &= ~PORT_PLL_M2_INT_MASK;
1934 temp |= pll->state.hw_state.pll0;
1935 intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 0), temp);
1936
1937 /* Write N */
1938 temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
1939 temp &= ~PORT_PLL_N_MASK;
1940 temp |= pll->state.hw_state.pll1;
1941 intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 1), temp);
1942
1943 /* Write M2 fraction */
1944 temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
1945 temp &= ~PORT_PLL_M2_FRAC_MASK;
1946 temp |= pll->state.hw_state.pll2;
1947 intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 2), temp);
1948
1949 /* Write M2 fraction enable */
1950 temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
1951 temp &= ~PORT_PLL_M2_FRAC_ENABLE;
1952 temp |= pll->state.hw_state.pll3;
1953 intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 3), temp);
1954
1955 /* Write coeff */
1956 temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
1957 temp &= ~PORT_PLL_PROP_COEFF_MASK;
1958 temp &= ~PORT_PLL_INT_COEFF_MASK;
1959 temp &= ~PORT_PLL_GAIN_CTL_MASK;
1960 temp |= pll->state.hw_state.pll6;
1961 intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 6), temp);
1962
1963 /* Write calibration val */
1964 temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
1965 temp &= ~PORT_PLL_TARGET_CNT_MASK;
1966 temp |= pll->state.hw_state.pll8;
1967 intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 8), temp);
1968
1969 temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
1970 temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
1971 temp |= pll->state.hw_state.pll9;
1972 intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 9), temp);
1973
1974 temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
1975 temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
1976 temp &= ~PORT_PLL_DCO_AMP_MASK;
1977 temp |= pll->state.hw_state.pll10;
1978 intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 10), temp);
1979
1980 /* Recalibrate with new settings */
1981 temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1982 temp |= PORT_PLL_RECALIBRATE;
1983 intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1984 temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1985 temp |= pll->state.hw_state.ebb4;
1986 intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1987
1988 /* Enable PLL */
1989 temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1990 temp |= PORT_PLL_ENABLE;
1991 intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1992 intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1993
1994 if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
1995 200))
1996 drm_err(&dev_priv->drm, "PLL %d not locked\n", port);
1997
1998 if (IS_GEMINILAKE(dev_priv)) {
1999 temp = intel_de_read(dev_priv, BXT_PORT_TX_DW5_LN0(phy, ch));
2000 temp |= DCC_DELAY_RANGE_2;
2001 intel_de_write(dev_priv, BXT_PORT_TX_DW5_GRP(phy, ch), temp);
2002 }
2003
2004 /*
2005 * While we write to the group register to program all lanes at once we
2006 * can read only lane registers and we pick lanes 0/1 for that.
2007 */
2008 temp = intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN01(phy, ch));
2009 temp &= ~LANE_STAGGER_MASK;
2010 temp &= ~LANESTAGGER_STRAP_OVRD;
2011 temp |= pll->state.hw_state.pcsdw12;
2012 intel_de_write(dev_priv, BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
2013}
2014
2015static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
2016 struct intel_shared_dpll *pll)
2017{
2018 enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2019 u32 temp;
2020
2021 temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
2022 temp &= ~PORT_PLL_ENABLE;
2023 intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
2024 intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
2025
2026 if (IS_GEMINILAKE(dev_priv)) {
2027 temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
2028 temp &= ~PORT_PLL_POWER_ENABLE;
2029 intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
2030
2031 if (wait_for_us(!(intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
2032 PORT_PLL_POWER_STATE), 200))
2033 drm_err(&dev_priv->drm,
2034 "Power state not reset for PLL:%d\n", port);
2035 }
2036}
2037
2038static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2039 struct intel_shared_dpll *pll,
2040 struct intel_dpll_hw_state *hw_state)
2041{
2042 enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2043 intel_wakeref_t wakeref;
2044 enum dpio_phy phy;
2045 enum dpio_channel ch;
2046 u32 val;
2047 bool ret;
2048
2049 bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
2050
2051 wakeref = intel_display_power_get_if_enabled(dev_priv,
2052 POWER_DOMAIN_DISPLAY_CORE);
2053 if (!wakeref)
2054 return false;
2055
2056 ret = false;
2057
2058 val = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
2059 if (!(val & PORT_PLL_ENABLE))
2060 goto out;
2061
2062 hw_state->ebb0 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
2063 hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
2064
2065 hw_state->ebb4 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
2066 hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
2067
2068 hw_state->pll0 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
2069 hw_state->pll0 &= PORT_PLL_M2_INT_MASK;
2070
2071 hw_state->pll1 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
2072 hw_state->pll1 &= PORT_PLL_N_MASK;
2073
2074 hw_state->pll2 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
2075 hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
2076
2077 hw_state->pll3 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
2078 hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
2079
2080 hw_state->pll6 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
2081 hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
2082 PORT_PLL_INT_COEFF_MASK |
2083 PORT_PLL_GAIN_CTL_MASK;
2084
2085 hw_state->pll8 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
2086 hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
2087
2088 hw_state->pll9 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
2089 hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
2090
2091 hw_state->pll10 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
2092 hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
2093 PORT_PLL_DCO_AMP_MASK;
2094
2095 /*
2096 * While we write to the group register to program all lanes at once we
2097 * can read only lane registers. We configure all lanes the same way, so
2098 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
2099 */
2100 hw_state->pcsdw12 = intel_de_read(dev_priv,
2101 BXT_PORT_PCS_DW12_LN01(phy, ch));
2102 if (intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
2103 drm_dbg(&dev_priv->drm,
2104 "lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
2105 hw_state->pcsdw12,
2106 intel_de_read(dev_priv,
2107 BXT_PORT_PCS_DW12_LN23(phy, ch)));
2108 hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
2109
2110 ret = true;
2111
2112out:
2113 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2114
2115 return ret;
2116}
2117
2118/* pre-calculated values for DP linkrates */
2119static const struct dpll bxt_dp_clk_val[] = {
2120 /* m2 is .22 binary fixed point */
2121 { .dot = 162000, .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2122 { .dot = 270000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2123 { .dot = 540000, .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2124 { .dot = 216000, .p1 = 3, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2125 { .dot = 243000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6133333 /* 24.3 */ },
2126 { .dot = 324000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2127 { .dot = 432000, .p1 = 3, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2128};
2129
2130static int
2131bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
2132 struct dpll *clk_div)
2133{
2134 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2135
2136 /* Calculate HDMI div */
2137 /*
2138 * FIXME: tie the following calculation into
2139 * i9xx_crtc_compute_clock
2140 */
2141 if (!bxt_find_best_dpll(crtc_state, clk_div))
2142 return -EINVAL;
2143
2144 drm_WARN_ON(&i915->drm, clk_div->m1 != 2);
2145
2146 return 0;
2147}
2148
2149static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
2150 struct dpll *clk_div)
2151{
2152 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2153 int i;
2154
2155 *clk_div = bxt_dp_clk_val[0];
2156 for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
2157 if (crtc_state->port_clock == bxt_dp_clk_val[i].dot) {
2158 *clk_div = bxt_dp_clk_val[i];
2159 break;
2160 }
2161 }
2162
2163 chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, clk_div);
2164
2165 drm_WARN_ON(&i915->drm, clk_div->vco == 0 ||
2166 clk_div->dot != crtc_state->port_clock);
2167}
2168
2169static int bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
2170 const struct dpll *clk_div)
2171{
2172 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2173 struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
2174 int clock = crtc_state->port_clock;
2175 int vco = clk_div->vco;
2176 u32 prop_coef, int_coef, gain_ctl, targ_cnt;
2177 u32 lanestagger;
2178
2179 if (vco >= 6200000 && vco <= 6700000) {
2180 prop_coef = 4;
2181 int_coef = 9;
2182 gain_ctl = 3;
2183 targ_cnt = 8;
2184 } else if ((vco > 5400000 && vco < 6200000) ||
2185 (vco >= 4800000 && vco < 5400000)) {
2186 prop_coef = 5;
2187 int_coef = 11;
2188 gain_ctl = 3;
2189 targ_cnt = 9;
2190 } else if (vco == 5400000) {
2191 prop_coef = 3;
2192 int_coef = 8;
2193 gain_ctl = 1;
2194 targ_cnt = 9;
2195 } else {
2196 drm_err(&i915->drm, "Invalid VCO\n");
2197 return -EINVAL;
2198 }
2199
2200 if (clock > 270000)
2201 lanestagger = 0x18;
2202 else if (clock > 135000)
2203 lanestagger = 0x0d;
2204 else if (clock > 67000)
2205 lanestagger = 0x07;
2206 else if (clock > 33000)
2207 lanestagger = 0x04;
2208 else
2209 lanestagger = 0x02;
2210
2211 dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
2212 dpll_hw_state->pll0 = PORT_PLL_M2_INT(clk_div->m2 >> 22);
2213 dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
2214 dpll_hw_state->pll2 = PORT_PLL_M2_FRAC(clk_div->m2 & 0x3fffff);
2215
2216 if (clk_div->m2 & 0x3fffff)
2217 dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
2218
2219 dpll_hw_state->pll6 = PORT_PLL_PROP_COEFF(prop_coef) |
2220 PORT_PLL_INT_COEFF(int_coef) |
2221 PORT_PLL_GAIN_CTL(gain_ctl);
2222
2223 dpll_hw_state->pll8 = PORT_PLL_TARGET_CNT(targ_cnt);
2224
2225 dpll_hw_state->pll9 = PORT_PLL_LOCK_THRESHOLD(5);
2226
2227 dpll_hw_state->pll10 = PORT_PLL_DCO_AMP(15) |
2228 PORT_PLL_DCO_AMP_OVR_EN_H;
2229
2230 dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
2231
2232 dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
2233
2234 return 0;
2235}
2236
2237static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
2238 const struct intel_shared_dpll *pll,
2239 const struct intel_dpll_hw_state *pll_state)
2240{
2241 struct dpll clock;
2242
2243 clock.m1 = 2;
2244 clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK, pll_state->pll0) << 22;
2245 if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
2246 clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK, pll_state->pll2);
2247 clock.n = REG_FIELD_GET(PORT_PLL_N_MASK, pll_state->pll1);
2248 clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK, pll_state->ebb0);
2249 clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK, pll_state->ebb0);
2250
2251 return chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, &clock);
2252}
2253
2254static int
2255bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2256{
2257 struct dpll clk_div = {};
2258
2259 bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
2260
2261 return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2262}
2263
2264static int
2265bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2266{
2267 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2268 struct dpll clk_div = {};
2269 int ret;
2270
2271 bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
2272
2273 ret = bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2274 if (ret)
2275 return ret;
2276
2277 crtc_state->port_clock = bxt_ddi_pll_get_freq(i915, NULL,
2278 &crtc_state->dpll_hw_state);
2279
2280 return 0;
2281}
2282
2283static int bxt_compute_dpll(struct intel_atomic_state *state,
2284 struct intel_crtc *crtc,
2285 struct intel_encoder *encoder)
2286{
2287 struct intel_crtc_state *crtc_state =
2288 intel_atomic_get_new_crtc_state(state, crtc);
2289
2290 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
2291 return bxt_ddi_hdmi_set_dpll_hw_state(crtc_state);
2292 else if (intel_crtc_has_dp_encoder(crtc_state))
2293 return bxt_ddi_dp_set_dpll_hw_state(crtc_state);
2294 else
2295 return -EINVAL;
2296}
2297
2298static int bxt_get_dpll(struct intel_atomic_state *state,
2299 struct intel_crtc *crtc,
2300 struct intel_encoder *encoder)
2301{
2302 struct intel_crtc_state *crtc_state =
2303 intel_atomic_get_new_crtc_state(state, crtc);
2304 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2305 struct intel_shared_dpll *pll;
2306 enum intel_dpll_id id;
2307
2308 /* 1:1 mapping between ports and PLLs */
2309 id = (enum intel_dpll_id) encoder->port;
2310 pll = intel_get_shared_dpll_by_id(dev_priv, id);
2311
2312 drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] using pre-allocated %s\n",
2313 crtc->base.base.id, crtc->base.name, pll->info->name);
2314
2315 intel_reference_shared_dpll(state, crtc,
2316 pll, &crtc_state->dpll_hw_state);
2317
2318 crtc_state->shared_dpll = pll;
2319
2320 return 0;
2321}
2322
2323static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915)
2324{
2325 i915->display.dpll.ref_clks.ssc = 100000;
2326 i915->display.dpll.ref_clks.nssc = 100000;
2327 /* DSI non-SSC ref 19.2MHz */
2328}
2329
2330static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
2331 const struct intel_dpll_hw_state *hw_state)
2332{
2333 drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
2334 "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
2335 "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
2336 hw_state->ebb0,
2337 hw_state->ebb4,
2338 hw_state->pll0,
2339 hw_state->pll1,
2340 hw_state->pll2,
2341 hw_state->pll3,
2342 hw_state->pll6,
2343 hw_state->pll8,
2344 hw_state->pll9,
2345 hw_state->pll10,
2346 hw_state->pcsdw12);
2347}
2348
2349static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
2350 .enable = bxt_ddi_pll_enable,
2351 .disable = bxt_ddi_pll_disable,
2352 .get_hw_state = bxt_ddi_pll_get_hw_state,
2353 .get_freq = bxt_ddi_pll_get_freq,
2354};
2355
2356static const struct dpll_info bxt_plls[] = {
2357 { "PORT PLL A", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
2358 { "PORT PLL B", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2359 { "PORT PLL C", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2360 { },
2361};
2362
2363static const struct intel_dpll_mgr bxt_pll_mgr = {
2364 .dpll_info = bxt_plls,
2365 .compute_dplls = bxt_compute_dpll,
2366 .get_dplls = bxt_get_dpll,
2367 .put_dplls = intel_put_dpll,
2368 .update_ref_clks = bxt_update_dpll_ref_clks,
2369 .dump_hw_state = bxt_dump_hw_state,
2370};
2371
2372static void icl_wrpll_get_multipliers(int bestdiv, int *pdiv,
2373 int *qdiv, int *kdiv)
2374{
2375 /* even dividers */
2376 if (bestdiv % 2 == 0) {
2377 if (bestdiv == 2) {
2378 *pdiv = 2;
2379 *qdiv = 1;
2380 *kdiv = 1;
2381 } else if (bestdiv % 4 == 0) {
2382 *pdiv = 2;
2383 *qdiv = bestdiv / 4;
2384 *kdiv = 2;
2385 } else if (bestdiv % 6 == 0) {
2386 *pdiv = 3;
2387 *qdiv = bestdiv / 6;
2388 *kdiv = 2;
2389 } else if (bestdiv % 5 == 0) {
2390 *pdiv = 5;
2391 *qdiv = bestdiv / 10;
2392 *kdiv = 2;
2393 } else if (bestdiv % 14 == 0) {
2394 *pdiv = 7;
2395 *qdiv = bestdiv / 14;
2396 *kdiv = 2;
2397 }
2398 } else {
2399 if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2400 *pdiv = bestdiv;
2401 *qdiv = 1;
2402 *kdiv = 1;
2403 } else { /* 9, 15, 21 */
2404 *pdiv = bestdiv / 3;
2405 *qdiv = 1;
2406 *kdiv = 3;
2407 }
2408 }
2409}
2410
2411static void icl_wrpll_params_populate(struct skl_wrpll_params *params,
2412 u32 dco_freq, u32 ref_freq,
2413 int pdiv, int qdiv, int kdiv)
2414{
2415 u32 dco;
2416
2417 switch (kdiv) {
2418 case 1:
2419 params->kdiv = 1;
2420 break;
2421 case 2:
2422 params->kdiv = 2;
2423 break;
2424 case 3:
2425 params->kdiv = 4;
2426 break;
2427 default:
2428 WARN(1, "Incorrect KDiv\n");
2429 }
2430
2431 switch (pdiv) {
2432 case 2:
2433 params->pdiv = 1;
2434 break;
2435 case 3:
2436 params->pdiv = 2;
2437 break;
2438 case 5:
2439 params->pdiv = 4;
2440 break;
2441 case 7:
2442 params->pdiv = 8;
2443 break;
2444 default:
2445 WARN(1, "Incorrect PDiv\n");
2446 }
2447
2448 WARN_ON(kdiv != 2 && qdiv != 1);
2449
2450 params->qdiv_ratio = qdiv;
2451 params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2452
2453 dco = div_u64((u64)dco_freq << 15, ref_freq);
2454
2455 params->dco_integer = dco >> 15;
2456 params->dco_fraction = dco & 0x7fff;
2457}
2458
2459/*
2460 * Display WA #22010492432: ehl, tgl, adl-s, adl-p
2461 * Program half of the nominal DCO divider fraction value.
2462 */
2463static bool
2464ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
2465{
2466 return ((IS_PLATFORM(i915, INTEL_ELKHARTLAKE) &&
2467 IS_JSL_EHL_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) ||
2468 IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) &&
2469 i915->display.dpll.ref_clks.nssc == 38400;
2470}
2471
2472struct icl_combo_pll_params {
2473 int clock;
2474 struct skl_wrpll_params wrpll;
2475};
2476
2477/*
2478 * These values alrea already adjusted: they're the bits we write to the
2479 * registers, not the logical values.
2480 */
2481static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
2482 { 540000,
2483 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [0]: 5.4 */
2484 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2485 { 270000,
2486 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [1]: 2.7 */
2487 .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2488 { 162000,
2489 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [2]: 1.62 */
2490 .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2491 { 324000,
2492 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [3]: 3.24 */
2493 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2494 { 216000,
2495 { .dco_integer = 0x168, .dco_fraction = 0x0000, /* [4]: 2.16 */
2496 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2497 { 432000,
2498 { .dco_integer = 0x168, .dco_fraction = 0x0000, /* [5]: 4.32 */
2499 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2500 { 648000,
2501 { .dco_integer = 0x195, .dco_fraction = 0x0000, /* [6]: 6.48 */
2502 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2503 { 810000,
2504 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [7]: 8.1 */
2505 .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2506};
2507
2508
2509/* Also used for 38.4 MHz values. */
2510static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
2511 { 540000,
2512 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [0]: 5.4 */
2513 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2514 { 270000,
2515 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [1]: 2.7 */
2516 .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2517 { 162000,
2518 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [2]: 1.62 */
2519 .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2520 { 324000,
2521 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [3]: 3.24 */
2522 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2523 { 216000,
2524 { .dco_integer = 0x1C2, .dco_fraction = 0x0000, /* [4]: 2.16 */
2525 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2526 { 432000,
2527 { .dco_integer = 0x1C2, .dco_fraction = 0x0000, /* [5]: 4.32 */
2528 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2529 { 648000,
2530 { .dco_integer = 0x1FA, .dco_fraction = 0x2000, /* [6]: 6.48 */
2531 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2532 { 810000,
2533 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [7]: 8.1 */
2534 .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2535};
2536
2537static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
2538 .dco_integer = 0x151, .dco_fraction = 0x4000,
2539 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2540};
2541
2542static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
2543 .dco_integer = 0x1A5, .dco_fraction = 0x7000,
2544 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2545};
2546
2547static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = {
2548 .dco_integer = 0x54, .dco_fraction = 0x3000,
2549 /* the following params are unused */
2550 .pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
2551};
2552
2553static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
2554 .dco_integer = 0x43, .dco_fraction = 0x4000,
2555 /* the following params are unused */
2556};
2557
2558static int icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
2559 struct skl_wrpll_params *pll_params)
2560{
2561 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2562 const struct icl_combo_pll_params *params =
2563 dev_priv->display.dpll.ref_clks.nssc == 24000 ?
2564 icl_dp_combo_pll_24MHz_values :
2565 icl_dp_combo_pll_19_2MHz_values;
2566 int clock = crtc_state->port_clock;
2567 int i;
2568
2569 for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
2570 if (clock == params[i].clock) {
2571 *pll_params = params[i].wrpll;
2572 return 0;
2573 }
2574 }
2575
2576 MISSING_CASE(clock);
2577 return -EINVAL;
2578}
2579
2580static int icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
2581 struct skl_wrpll_params *pll_params)
2582{
2583 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2584
2585 if (DISPLAY_VER(dev_priv) >= 12) {
2586 switch (dev_priv->display.dpll.ref_clks.nssc) {
2587 default:
2588 MISSING_CASE(dev_priv->display.dpll.ref_clks.nssc);
2589 fallthrough;
2590 case 19200:
2591 case 38400:
2592 *pll_params = tgl_tbt_pll_19_2MHz_values;
2593 break;
2594 case 24000:
2595 *pll_params = tgl_tbt_pll_24MHz_values;
2596 break;
2597 }
2598 } else {
2599 switch (dev_priv->display.dpll.ref_clks.nssc) {
2600 default:
2601 MISSING_CASE(dev_priv->display.dpll.ref_clks.nssc);
2602 fallthrough;
2603 case 19200:
2604 case 38400:
2605 *pll_params = icl_tbt_pll_19_2MHz_values;
2606 break;
2607 case 24000:
2608 *pll_params = icl_tbt_pll_24MHz_values;
2609 break;
2610 }
2611 }
2612
2613 return 0;
2614}
2615
2616static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915,
2617 const struct intel_shared_dpll *pll,
2618 const struct intel_dpll_hw_state *pll_state)
2619{
2620 /*
2621 * The PLL outputs multiple frequencies at the same time, selection is
2622 * made at DDI clock mux level.
2623 */
2624 drm_WARN_ON(&i915->drm, 1);
2625
2626 return 0;
2627}
2628
2629static int icl_wrpll_ref_clock(struct drm_i915_private *i915)
2630{
2631 int ref_clock = i915->display.dpll.ref_clks.nssc;
2632
2633 /*
2634 * For ICL+, the spec states: if reference frequency is 38.4,
2635 * use 19.2 because the DPLL automatically divides that by 2.
2636 */
2637 if (ref_clock == 38400)
2638 ref_clock = 19200;
2639
2640 return ref_clock;
2641}
2642
2643static int
2644icl_calc_wrpll(struct intel_crtc_state *crtc_state,
2645 struct skl_wrpll_params *wrpll_params)
2646{
2647 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2648 int ref_clock = icl_wrpll_ref_clock(i915);
2649 u32 afe_clock = crtc_state->port_clock * 5;
2650 u32 dco_min = 7998000;
2651 u32 dco_max = 10000000;
2652 u32 dco_mid = (dco_min + dco_max) / 2;
2653 static const int dividers[] = { 2, 4, 6, 8, 10, 12, 14, 16,
2654 18, 20, 24, 28, 30, 32, 36, 40,
2655 42, 44, 48, 50, 52, 54, 56, 60,
2656 64, 66, 68, 70, 72, 76, 78, 80,
2657 84, 88, 90, 92, 96, 98, 100, 102,
2658 3, 5, 7, 9, 15, 21 };
2659 u32 dco, best_dco = 0, dco_centrality = 0;
2660 u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2661 int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2662
2663 for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2664 dco = afe_clock * dividers[d];
2665
2666 if (dco <= dco_max && dco >= dco_min) {
2667 dco_centrality = abs(dco - dco_mid);
2668
2669 if (dco_centrality < best_dco_centrality) {
2670 best_dco_centrality = dco_centrality;
2671 best_div = dividers[d];
2672 best_dco = dco;
2673 }
2674 }
2675 }
2676
2677 if (best_div == 0)
2678 return -EINVAL;
2679
2680 icl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2681 icl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
2682 pdiv, qdiv, kdiv);
2683
2684 return 0;
2685}
2686
2687static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
2688 const struct intel_shared_dpll *pll,
2689 const struct intel_dpll_hw_state *pll_state)
2690{
2691 int ref_clock = icl_wrpll_ref_clock(i915);
2692 u32 dco_fraction;
2693 u32 p0, p1, p2, dco_freq;
2694
2695 p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
2696 p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
2697
2698 if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
2699 p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
2700 DPLL_CFGCR1_QDIV_RATIO_SHIFT;
2701 else
2702 p1 = 1;
2703
2704 switch (p0) {
2705 case DPLL_CFGCR1_PDIV_2:
2706 p0 = 2;
2707 break;
2708 case DPLL_CFGCR1_PDIV_3:
2709 p0 = 3;
2710 break;
2711 case DPLL_CFGCR1_PDIV_5:
2712 p0 = 5;
2713 break;
2714 case DPLL_CFGCR1_PDIV_7:
2715 p0 = 7;
2716 break;
2717 }
2718
2719 switch (p2) {
2720 case DPLL_CFGCR1_KDIV_1:
2721 p2 = 1;
2722 break;
2723 case DPLL_CFGCR1_KDIV_2:
2724 p2 = 2;
2725 break;
2726 case DPLL_CFGCR1_KDIV_3:
2727 p2 = 3;
2728 break;
2729 }
2730
2731 dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
2732 ref_clock;
2733
2734 dco_fraction = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
2735 DPLL_CFGCR0_DCO_FRACTION_SHIFT;
2736
2737 if (ehl_combo_pll_div_frac_wa_needed(i915))
2738 dco_fraction *= 2;
2739
2740 dco_freq += (dco_fraction * ref_clock) / 0x8000;
2741
2742 if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
2743 return 0;
2744
2745 return dco_freq / (p0 * p1 * p2 * 5);
2746}
2747
2748static void icl_calc_dpll_state(struct drm_i915_private *i915,
2749 const struct skl_wrpll_params *pll_params,
2750 struct intel_dpll_hw_state *pll_state)
2751{
2752 u32 dco_fraction = pll_params->dco_fraction;
2753
2754 if (ehl_combo_pll_div_frac_wa_needed(i915))
2755 dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2);
2756
2757 pll_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) |
2758 pll_params->dco_integer;
2759
2760 pll_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) |
2761 DPLL_CFGCR1_QDIV_MODE(pll_params->qdiv_mode) |
2762 DPLL_CFGCR1_KDIV(pll_params->kdiv) |
2763 DPLL_CFGCR1_PDIV(pll_params->pdiv);
2764
2765 if (DISPLAY_VER(i915) >= 12)
2766 pll_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
2767 else
2768 pll_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
2769
2770 if (i915->display.vbt.override_afc_startup)
2771 pll_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(i915->display.vbt.override_afc_startup_val);
2772}
2773
2774static int icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
2775 u32 *target_dco_khz,
2776 struct intel_dpll_hw_state *state,
2777 bool is_dkl)
2778{
2779 static const u8 div1_vals[] = { 7, 5, 3, 2 };
2780 u32 dco_min_freq, dco_max_freq;
2781 unsigned int i;
2782 int div2;
2783
2784 dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
2785 dco_max_freq = is_dp ? 8100000 : 10000000;
2786
2787 for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
2788 int div1 = div1_vals[i];
2789
2790 for (div2 = 10; div2 > 0; div2--) {
2791 int dco = div1 * div2 * clock_khz * 5;
2792 int a_divratio, tlinedrv, inputsel;
2793 u32 hsdiv;
2794
2795 if (dco < dco_min_freq || dco > dco_max_freq)
2796 continue;
2797
2798 if (div2 >= 2) {
2799 /*
2800 * Note: a_divratio not matching TGL BSpec
2801 * algorithm but matching hardcoded values and
2802 * working on HW for DP alt-mode at least
2803 */
2804 a_divratio = is_dp ? 10 : 5;
2805 tlinedrv = is_dkl ? 1 : 2;
2806 } else {
2807 a_divratio = 5;
2808 tlinedrv = 0;
2809 }
2810 inputsel = is_dp ? 0 : 1;
2811
2812 switch (div1) {
2813 default:
2814 MISSING_CASE(div1);
2815 fallthrough;
2816 case 2:
2817 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
2818 break;
2819 case 3:
2820 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
2821 break;
2822 case 5:
2823 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
2824 break;
2825 case 7:
2826 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
2827 break;
2828 }
2829
2830 *target_dco_khz = dco;
2831
2832 state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
2833
2834 state->mg_clktop2_coreclkctl1 =
2835 MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
2836
2837 state->mg_clktop2_hsclkctl =
2838 MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
2839 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
2840 hsdiv |
2841 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
2842
2843 return 0;
2844 }
2845 }
2846
2847 return -EINVAL;
2848}
2849
2850/*
2851 * The specification for this function uses real numbers, so the math had to be
2852 * adapted to integer-only calculation, that's why it looks so different.
2853 */
2854static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
2855 struct intel_dpll_hw_state *pll_state)
2856{
2857 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2858 int refclk_khz = dev_priv->display.dpll.ref_clks.nssc;
2859 int clock = crtc_state->port_clock;
2860 u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
2861 u32 iref_ndiv, iref_trim, iref_pulse_w;
2862 u32 prop_coeff, int_coeff;
2863 u32 tdc_targetcnt, feedfwgain;
2864 u64 ssc_stepsize, ssc_steplen, ssc_steplog;
2865 u64 tmp;
2866 bool use_ssc = false;
2867 bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
2868 bool is_dkl = DISPLAY_VER(dev_priv) >= 12;
2869 int ret;
2870
2871 ret = icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
2872 pll_state, is_dkl);
2873 if (ret)
2874 return ret;
2875
2876 m1div = 2;
2877 m2div_int = dco_khz / (refclk_khz * m1div);
2878 if (m2div_int > 255) {
2879 if (!is_dkl) {
2880 m1div = 4;
2881 m2div_int = dco_khz / (refclk_khz * m1div);
2882 }
2883
2884 if (m2div_int > 255)
2885 return -EINVAL;
2886 }
2887 m2div_rem = dco_khz % (refclk_khz * m1div);
2888
2889 tmp = (u64)m2div_rem * (1 << 22);
2890 do_div(tmp, refclk_khz * m1div);
2891 m2div_frac = tmp;
2892
2893 switch (refclk_khz) {
2894 case 19200:
2895 iref_ndiv = 1;
2896 iref_trim = 28;
2897 iref_pulse_w = 1;
2898 break;
2899 case 24000:
2900 iref_ndiv = 1;
2901 iref_trim = 25;
2902 iref_pulse_w = 2;
2903 break;
2904 case 38400:
2905 iref_ndiv = 2;
2906 iref_trim = 28;
2907 iref_pulse_w = 1;
2908 break;
2909 default:
2910 MISSING_CASE(refclk_khz);
2911 return -EINVAL;
2912 }
2913
2914 /*
2915 * tdc_res = 0.000003
2916 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
2917 *
2918 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
2919 * was supposed to be a division, but we rearranged the operations of
2920 * the formula to avoid early divisions so we don't multiply the
2921 * rounding errors.
2922 *
2923 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
2924 * we also rearrange to work with integers.
2925 *
2926 * The 0.5 transformed to 5 results in a multiplication by 10 and the
2927 * last division by 10.
2928 */
2929 tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
2930
2931 /*
2932 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
2933 * 32 bits. That's not a problem since we round the division down
2934 * anyway.
2935 */
2936 feedfwgain = (use_ssc || m2div_rem > 0) ?
2937 m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
2938
2939 if (dco_khz >= 9000000) {
2940 prop_coeff = 5;
2941 int_coeff = 10;
2942 } else {
2943 prop_coeff = 4;
2944 int_coeff = 8;
2945 }
2946
2947 if (use_ssc) {
2948 tmp = mul_u32_u32(dco_khz, 47 * 32);
2949 do_div(tmp, refclk_khz * m1div * 10000);
2950 ssc_stepsize = tmp;
2951
2952 tmp = mul_u32_u32(dco_khz, 1000);
2953 ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
2954 } else {
2955 ssc_stepsize = 0;
2956 ssc_steplen = 0;
2957 }
2958 ssc_steplog = 4;
2959
2960 /* write pll_state calculations */
2961 if (is_dkl) {
2962 pll_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
2963 DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
2964 DKL_PLL_DIV0_FBPREDIV(m1div) |
2965 DKL_PLL_DIV0_FBDIV_INT(m2div_int);
2966 if (dev_priv->display.vbt.override_afc_startup) {
2967 u8 val = dev_priv->display.vbt.override_afc_startup_val;
2968
2969 pll_state->mg_pll_div0 |= DKL_PLL_DIV0_AFC_STARTUP(val);
2970 }
2971
2972 pll_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
2973 DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
2974
2975 pll_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
2976 DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
2977 DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
2978 (use_ssc ? DKL_PLL_SSC_EN : 0);
2979
2980 pll_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
2981 DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);
2982
2983 pll_state->mg_pll_tdc_coldst_bias =
2984 DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
2985 DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);
2986
2987 } else {
2988 pll_state->mg_pll_div0 =
2989 (m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
2990 MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
2991 MG_PLL_DIV0_FBDIV_INT(m2div_int);
2992
2993 pll_state->mg_pll_div1 =
2994 MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
2995 MG_PLL_DIV1_DITHER_DIV_2 |
2996 MG_PLL_DIV1_NDIVRATIO(1) |
2997 MG_PLL_DIV1_FBPREDIV(m1div);
2998
2999 pll_state->mg_pll_lf =
3000 MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
3001 MG_PLL_LF_AFCCNTSEL_512 |
3002 MG_PLL_LF_GAINCTRL(1) |
3003 MG_PLL_LF_INT_COEFF(int_coeff) |
3004 MG_PLL_LF_PROP_COEFF(prop_coeff);
3005
3006 pll_state->mg_pll_frac_lock =
3007 MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
3008 MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
3009 MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
3010 MG_PLL_FRAC_LOCK_DCODITHEREN |
3011 MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
3012 if (use_ssc || m2div_rem > 0)
3013 pll_state->mg_pll_frac_lock |=
3014 MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
3015
3016 pll_state->mg_pll_ssc =
3017 (use_ssc ? MG_PLL_SSC_EN : 0) |
3018 MG_PLL_SSC_TYPE(2) |
3019 MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
3020 MG_PLL_SSC_STEPNUM(ssc_steplog) |
3021 MG_PLL_SSC_FLLEN |
3022 MG_PLL_SSC_STEPSIZE(ssc_stepsize);
3023
3024 pll_state->mg_pll_tdc_coldst_bias =
3025 MG_PLL_TDC_COLDST_COLDSTART |
3026 MG_PLL_TDC_COLDST_IREFINT_EN |
3027 MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
3028 MG_PLL_TDC_TDCOVCCORR_EN |
3029 MG_PLL_TDC_TDCSEL(3);
3030
3031 pll_state->mg_pll_bias =
3032 MG_PLL_BIAS_BIAS_GB_SEL(3) |
3033 MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
3034 MG_PLL_BIAS_BIAS_BONUS(10) |
3035 MG_PLL_BIAS_BIASCAL_EN |
3036 MG_PLL_BIAS_CTRIM(12) |
3037 MG_PLL_BIAS_VREF_RDAC(4) |
3038 MG_PLL_BIAS_IREFTRIM(iref_trim);
3039
3040 if (refclk_khz == 38400) {
3041 pll_state->mg_pll_tdc_coldst_bias_mask =
3042 MG_PLL_TDC_COLDST_COLDSTART;
3043 pll_state->mg_pll_bias_mask = 0;
3044 } else {
3045 pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
3046 pll_state->mg_pll_bias_mask = -1U;
3047 }
3048
3049 pll_state->mg_pll_tdc_coldst_bias &=
3050 pll_state->mg_pll_tdc_coldst_bias_mask;
3051 pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
3052 }
3053
3054 return 0;
3055}
3056
3057static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *dev_priv,
3058 const struct intel_shared_dpll *pll,
3059 const struct intel_dpll_hw_state *pll_state)
3060{
3061 u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
3062 u64 tmp;
3063
3064 ref_clock = dev_priv->display.dpll.ref_clks.nssc;
3065
3066 if (DISPLAY_VER(dev_priv) >= 12) {
3067 m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
3068 m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
3069 m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
3070
3071 if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
3072 m2_frac = pll_state->mg_pll_bias &
3073 DKL_PLL_BIAS_FBDIV_FRAC_MASK;
3074 m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
3075 } else {
3076 m2_frac = 0;
3077 }
3078 } else {
3079 m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
3080 m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
3081
3082 if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
3083 m2_frac = pll_state->mg_pll_div0 &
3084 MG_PLL_DIV0_FBDIV_FRAC_MASK;
3085 m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
3086 } else {
3087 m2_frac = 0;
3088 }
3089 }
3090
3091 switch (pll_state->mg_clktop2_hsclkctl &
3092 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
3093 case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
3094 div1 = 2;
3095 break;
3096 case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
3097 div1 = 3;
3098 break;
3099 case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
3100 div1 = 5;
3101 break;
3102 case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
3103 div1 = 7;
3104 break;
3105 default:
3106 MISSING_CASE(pll_state->mg_clktop2_hsclkctl);
3107 return 0;
3108 }
3109
3110 div2 = (pll_state->mg_clktop2_hsclkctl &
3111 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
3112 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
3113
3114 /* div2 value of 0 is same as 1 means no div */
3115 if (div2 == 0)
3116 div2 = 1;
3117
3118 /*
3119 * Adjust the original formula to delay the division by 2^22 in order to
3120 * minimize possible rounding errors.
3121 */
3122 tmp = (u64)m1 * m2_int * ref_clock +
3123 (((u64)m1 * m2_frac * ref_clock) >> 22);
3124 tmp = div_u64(tmp, 5 * div1 * div2);
3125
3126 return tmp;
3127}
3128
3129/**
3130 * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
3131 * @crtc_state: state for the CRTC to select the DPLL for
3132 * @port_dpll_id: the active @port_dpll_id to select
3133 *
3134 * Select the given @port_dpll_id instance from the DPLLs reserved for the
3135 * CRTC.
3136 */
3137void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
3138 enum icl_port_dpll_id port_dpll_id)
3139{
3140 struct icl_port_dpll *port_dpll =
3141 &crtc_state->icl_port_dplls[port_dpll_id];
3142
3143 crtc_state->shared_dpll = port_dpll->pll;
3144 crtc_state->dpll_hw_state = port_dpll->hw_state;
3145}
3146
3147static void icl_update_active_dpll(struct intel_atomic_state *state,
3148 struct intel_crtc *crtc,
3149 struct intel_encoder *encoder)
3150{
3151 struct intel_crtc_state *crtc_state =
3152 intel_atomic_get_new_crtc_state(state, crtc);
3153 struct intel_digital_port *primary_port;
3154 enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
3155
3156 primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
3157 enc_to_mst(encoder)->primary :
3158 enc_to_dig_port(encoder);
3159
3160 if (primary_port &&
3161 (intel_tc_port_in_dp_alt_mode(primary_port) ||
3162 intel_tc_port_in_legacy_mode(primary_port)))
3163 port_dpll_id = ICL_PORT_DPLL_MG_PHY;
3164
3165 icl_set_active_port_dpll(crtc_state, port_dpll_id);
3166}
3167
3168static int icl_compute_combo_phy_dpll(struct intel_atomic_state *state,
3169 struct intel_crtc *crtc)
3170{
3171 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3172 struct intel_crtc_state *crtc_state =
3173 intel_atomic_get_new_crtc_state(state, crtc);
3174 struct icl_port_dpll *port_dpll =
3175 &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3176 struct skl_wrpll_params pll_params = {};
3177 int ret;
3178
3179 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
3180 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
3181 ret = icl_calc_wrpll(crtc_state, &pll_params);
3182 else
3183 ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
3184
3185 if (ret)
3186 return ret;
3187
3188 icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
3189
3190 /* this is mainly for the fastset check */
3191 icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
3192
3193 crtc_state->port_clock = icl_ddi_combo_pll_get_freq(dev_priv, NULL,
3194 &port_dpll->hw_state);
3195
3196 return 0;
3197}
3198
3199static int icl_get_combo_phy_dpll(struct intel_atomic_state *state,
3200 struct intel_crtc *crtc,
3201 struct intel_encoder *encoder)
3202{
3203 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3204 struct intel_crtc_state *crtc_state =
3205 intel_atomic_get_new_crtc_state(state, crtc);
3206 struct icl_port_dpll *port_dpll =
3207 &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3208 enum port port = encoder->port;
3209 unsigned long dpll_mask;
3210
3211 if (IS_ALDERLAKE_S(dev_priv)) {
3212 dpll_mask =
3213 BIT(DPLL_ID_DG1_DPLL3) |
3214 BIT(DPLL_ID_DG1_DPLL2) |
3215 BIT(DPLL_ID_ICL_DPLL1) |
3216 BIT(DPLL_ID_ICL_DPLL0);
3217 } else if (IS_DG1(dev_priv)) {
3218 if (port == PORT_D || port == PORT_E) {
3219 dpll_mask =
3220 BIT(DPLL_ID_DG1_DPLL2) |
3221 BIT(DPLL_ID_DG1_DPLL3);
3222 } else {
3223 dpll_mask =
3224 BIT(DPLL_ID_DG1_DPLL0) |
3225 BIT(DPLL_ID_DG1_DPLL1);
3226 }
3227 } else if (IS_ROCKETLAKE(dev_priv)) {
3228 dpll_mask =
3229 BIT(DPLL_ID_EHL_DPLL4) |
3230 BIT(DPLL_ID_ICL_DPLL1) |
3231 BIT(DPLL_ID_ICL_DPLL0);
3232 } else if (IS_JSL_EHL(dev_priv) && port != PORT_A) {
3233 dpll_mask =
3234 BIT(DPLL_ID_EHL_DPLL4) |
3235 BIT(DPLL_ID_ICL_DPLL1) |
3236 BIT(DPLL_ID_ICL_DPLL0);
3237 } else {
3238 dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0);
3239 }
3240
3241 /* Eliminate DPLLs from consideration if reserved by HTI */
3242 dpll_mask &= ~intel_hti_dpll_mask(dev_priv);
3243
3244 port_dpll->pll = intel_find_shared_dpll(state, crtc,
3245 &port_dpll->hw_state,
3246 dpll_mask);
3247 if (!port_dpll->pll)
3248 return -EINVAL;
3249
3250 intel_reference_shared_dpll(state, crtc,
3251 port_dpll->pll, &port_dpll->hw_state);
3252
3253 icl_update_active_dpll(state, crtc, encoder);
3254
3255 return 0;
3256}
3257
3258static int icl_compute_tc_phy_dplls(struct intel_atomic_state *state,
3259 struct intel_crtc *crtc)
3260{
3261 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3262 struct intel_crtc_state *crtc_state =
3263 intel_atomic_get_new_crtc_state(state, crtc);
3264 struct icl_port_dpll *port_dpll =
3265 &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3266 struct skl_wrpll_params pll_params = {};
3267 int ret;
3268
3269 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3270 ret = icl_calc_tbt_pll(crtc_state, &pll_params);
3271 if (ret)
3272 return ret;
3273
3274 icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
3275
3276 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3277 ret = icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state);
3278 if (ret)
3279 return ret;
3280
3281 /* this is mainly for the fastset check */
3282 icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_MG_PHY);
3283
3284 crtc_state->port_clock = icl_ddi_mg_pll_get_freq(dev_priv, NULL,
3285 &port_dpll->hw_state);
3286
3287 return 0;
3288}
3289
3290static int icl_get_tc_phy_dplls(struct intel_atomic_state *state,
3291 struct intel_crtc *crtc,
3292 struct intel_encoder *encoder)
3293{
3294 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3295 struct intel_crtc_state *crtc_state =
3296 intel_atomic_get_new_crtc_state(state, crtc);
3297 struct icl_port_dpll *port_dpll =
3298 &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3299 enum intel_dpll_id dpll_id;
3300 int ret;
3301
3302 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3303 port_dpll->pll = intel_find_shared_dpll(state, crtc,
3304 &port_dpll->hw_state,
3305 BIT(DPLL_ID_ICL_TBTPLL));
3306 if (!port_dpll->pll)
3307 return -EINVAL;
3308 intel_reference_shared_dpll(state, crtc,
3309 port_dpll->pll, &port_dpll->hw_state);
3310
3311
3312 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3313 dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
3314 encoder->port));
3315 port_dpll->pll = intel_find_shared_dpll(state, crtc,
3316 &port_dpll->hw_state,
3317 BIT(dpll_id));
3318 if (!port_dpll->pll) {
3319 ret = -EINVAL;
3320 goto err_unreference_tbt_pll;
3321 }
3322 intel_reference_shared_dpll(state, crtc,
3323 port_dpll->pll, &port_dpll->hw_state);
3324
3325 icl_update_active_dpll(state, crtc, encoder);
3326
3327 return 0;
3328
3329err_unreference_tbt_pll:
3330 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3331 intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
3332
3333 return ret;
3334}
3335
3336static int icl_compute_dplls(struct intel_atomic_state *state,
3337 struct intel_crtc *crtc,
3338 struct intel_encoder *encoder)
3339{
3340 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3341 enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
3342
3343 if (intel_phy_is_combo(dev_priv, phy))
3344 return icl_compute_combo_phy_dpll(state, crtc);
3345 else if (intel_phy_is_tc(dev_priv, phy))
3346 return icl_compute_tc_phy_dplls(state, crtc);
3347
3348 MISSING_CASE(phy);
3349
3350 return 0;
3351}
3352
3353static int icl_get_dplls(struct intel_atomic_state *state,
3354 struct intel_crtc *crtc,
3355 struct intel_encoder *encoder)
3356{
3357 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3358 enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
3359
3360 if (intel_phy_is_combo(dev_priv, phy))
3361 return icl_get_combo_phy_dpll(state, crtc, encoder);
3362 else if (intel_phy_is_tc(dev_priv, phy))
3363 return icl_get_tc_phy_dplls(state, crtc, encoder);
3364
3365 MISSING_CASE(phy);
3366
3367 return -EINVAL;
3368}
3369
3370static void icl_put_dplls(struct intel_atomic_state *state,
3371 struct intel_crtc *crtc)
3372{
3373 const struct intel_crtc_state *old_crtc_state =
3374 intel_atomic_get_old_crtc_state(state, crtc);
3375 struct intel_crtc_state *new_crtc_state =
3376 intel_atomic_get_new_crtc_state(state, crtc);
3377 enum icl_port_dpll_id id;
3378
3379 new_crtc_state->shared_dpll = NULL;
3380
3381 for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
3382 const struct icl_port_dpll *old_port_dpll =
3383 &old_crtc_state->icl_port_dplls[id];
3384 struct icl_port_dpll *new_port_dpll =
3385 &new_crtc_state->icl_port_dplls[id];
3386
3387 new_port_dpll->pll = NULL;
3388
3389 if (!old_port_dpll->pll)
3390 continue;
3391
3392 intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
3393 }
3394}
3395
3396static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
3397 struct intel_shared_dpll *pll,
3398 struct intel_dpll_hw_state *hw_state)
3399{
3400 const enum intel_dpll_id id = pll->info->id;
3401 enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3402 intel_wakeref_t wakeref;
3403 bool ret = false;
3404 u32 val;
3405
3406 i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
3407
3408 wakeref = intel_display_power_get_if_enabled(dev_priv,
3409 POWER_DOMAIN_DISPLAY_CORE);
3410 if (!wakeref)
3411 return false;
3412
3413 val = intel_de_read(dev_priv, enable_reg);
3414 if (!(val & PLL_ENABLE))
3415 goto out;
3416
3417 hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
3418 MG_REFCLKIN_CTL(tc_port));
3419 hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3420
3421 hw_state->mg_clktop2_coreclkctl1 =
3422 intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
3423 hw_state->mg_clktop2_coreclkctl1 &=
3424 MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3425
3426 hw_state->mg_clktop2_hsclkctl =
3427 intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
3428 hw_state->mg_clktop2_hsclkctl &=
3429 MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3430 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3431 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3432 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3433
3434 hw_state->mg_pll_div0 = intel_de_read(dev_priv, MG_PLL_DIV0(tc_port));
3435 hw_state->mg_pll_div1 = intel_de_read(dev_priv, MG_PLL_DIV1(tc_port));
3436 hw_state->mg_pll_lf = intel_de_read(dev_priv, MG_PLL_LF(tc_port));
3437 hw_state->mg_pll_frac_lock = intel_de_read(dev_priv,
3438 MG_PLL_FRAC_LOCK(tc_port));
3439 hw_state->mg_pll_ssc = intel_de_read(dev_priv, MG_PLL_SSC(tc_port));
3440
3441 hw_state->mg_pll_bias = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
3442 hw_state->mg_pll_tdc_coldst_bias =
3443 intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3444
3445 if (dev_priv->display.dpll.ref_clks.nssc == 38400) {
3446 hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
3447 hw_state->mg_pll_bias_mask = 0;
3448 } else {
3449 hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3450 hw_state->mg_pll_bias_mask = -1U;
3451 }
3452
3453 hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
3454 hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3455
3456 ret = true;
3457out:
3458 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3459 return ret;
3460}
3461
3462static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3463 struct intel_shared_dpll *pll,
3464 struct intel_dpll_hw_state *hw_state)
3465{
3466 const enum intel_dpll_id id = pll->info->id;
3467 enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3468 intel_wakeref_t wakeref;
3469 bool ret = false;
3470 u32 val;
3471
3472 wakeref = intel_display_power_get_if_enabled(dev_priv,
3473 POWER_DOMAIN_DISPLAY_CORE);
3474 if (!wakeref)
3475 return false;
3476
3477 val = intel_de_read(dev_priv, intel_tc_pll_enable_reg(dev_priv, pll));
3478 if (!(val & PLL_ENABLE))
3479 goto out;
3480
3481 /*
3482 * All registers read here have the same HIP_INDEX_REG even though
3483 * they are on different building blocks
3484 */
3485 hw_state->mg_refclkin_ctl = intel_dkl_phy_read(dev_priv,
3486 DKL_REFCLKIN_CTL(tc_port));
3487 hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3488
3489 hw_state->mg_clktop2_hsclkctl =
3490 intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
3491 hw_state->mg_clktop2_hsclkctl &=
3492 MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3493 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3494 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3495 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3496
3497 hw_state->mg_clktop2_coreclkctl1 =
3498 intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3499 hw_state->mg_clktop2_coreclkctl1 &=
3500 MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3501
3502 hw_state->mg_pll_div0 = intel_dkl_phy_read(dev_priv, DKL_PLL_DIV0(tc_port));
3503 val = DKL_PLL_DIV0_MASK;
3504 if (dev_priv->display.vbt.override_afc_startup)
3505 val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3506 hw_state->mg_pll_div0 &= val;
3507
3508 hw_state->mg_pll_div1 = intel_dkl_phy_read(dev_priv, DKL_PLL_DIV1(tc_port));
3509 hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
3510 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3511
3512 hw_state->mg_pll_ssc = intel_dkl_phy_read(dev_priv, DKL_PLL_SSC(tc_port));
3513 hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3514 DKL_PLL_SSC_STEP_LEN_MASK |
3515 DKL_PLL_SSC_STEP_NUM_MASK |
3516 DKL_PLL_SSC_EN);
3517
3518 hw_state->mg_pll_bias = intel_dkl_phy_read(dev_priv, DKL_PLL_BIAS(tc_port));
3519 hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
3520 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3521
3522 hw_state->mg_pll_tdc_coldst_bias =
3523 intel_dkl_phy_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3524 hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3525 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3526
3527 ret = true;
3528out:
3529 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3530 return ret;
3531}
3532
3533static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3534 struct intel_shared_dpll *pll,
3535 struct intel_dpll_hw_state *hw_state,
3536 i915_reg_t enable_reg)
3537{
3538 const enum intel_dpll_id id = pll->info->id;
3539 intel_wakeref_t wakeref;
3540 bool ret = false;
3541 u32 val;
3542
3543 wakeref = intel_display_power_get_if_enabled(dev_priv,
3544 POWER_DOMAIN_DISPLAY_CORE);
3545 if (!wakeref)
3546 return false;
3547
3548 val = intel_de_read(dev_priv, enable_reg);
3549 if (!(val & PLL_ENABLE))
3550 goto out;
3551
3552 if (IS_ALDERLAKE_S(dev_priv)) {
3553 hw_state->cfgcr0 = intel_de_read(dev_priv, ADLS_DPLL_CFGCR0(id));
3554 hw_state->cfgcr1 = intel_de_read(dev_priv, ADLS_DPLL_CFGCR1(id));
3555 } else if (IS_DG1(dev_priv)) {
3556 hw_state->cfgcr0 = intel_de_read(dev_priv, DG1_DPLL_CFGCR0(id));
3557 hw_state->cfgcr1 = intel_de_read(dev_priv, DG1_DPLL_CFGCR1(id));
3558 } else if (IS_ROCKETLAKE(dev_priv)) {
3559 hw_state->cfgcr0 = intel_de_read(dev_priv,
3560 RKL_DPLL_CFGCR0(id));
3561 hw_state->cfgcr1 = intel_de_read(dev_priv,
3562 RKL_DPLL_CFGCR1(id));
3563 } else if (DISPLAY_VER(dev_priv) >= 12) {
3564 hw_state->cfgcr0 = intel_de_read(dev_priv,
3565 TGL_DPLL_CFGCR0(id));
3566 hw_state->cfgcr1 = intel_de_read(dev_priv,
3567 TGL_DPLL_CFGCR1(id));
3568 if (dev_priv->display.vbt.override_afc_startup) {
3569 hw_state->div0 = intel_de_read(dev_priv, TGL_DPLL0_DIV0(id));
3570 hw_state->div0 &= TGL_DPLL0_DIV0_AFC_STARTUP_MASK;
3571 }
3572 } else {
3573 if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3574 hw_state->cfgcr0 = intel_de_read(dev_priv,
3575 ICL_DPLL_CFGCR0(4));
3576 hw_state->cfgcr1 = intel_de_read(dev_priv,
3577 ICL_DPLL_CFGCR1(4));
3578 } else {
3579 hw_state->cfgcr0 = intel_de_read(dev_priv,
3580 ICL_DPLL_CFGCR0(id));
3581 hw_state->cfgcr1 = intel_de_read(dev_priv,
3582 ICL_DPLL_CFGCR1(id));
3583 }
3584 }
3585
3586 ret = true;
3587out:
3588 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3589 return ret;
3590}
3591
3592static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv,
3593 struct intel_shared_dpll *pll,
3594 struct intel_dpll_hw_state *hw_state)
3595{
3596 i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3597
3598 return icl_pll_get_hw_state(dev_priv, pll, hw_state, enable_reg);
3599}
3600
3601static bool tbt_pll_get_hw_state(struct drm_i915_private *dev_priv,
3602 struct intel_shared_dpll *pll,
3603 struct intel_dpll_hw_state *hw_state)
3604{
3605 return icl_pll_get_hw_state(dev_priv, pll, hw_state, TBT_PLL_ENABLE);
3606}
3607
3608static void icl_dpll_write(struct drm_i915_private *dev_priv,
3609 struct intel_shared_dpll *pll)
3610{
3611 struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3612 const enum intel_dpll_id id = pll->info->id;
3613 i915_reg_t cfgcr0_reg, cfgcr1_reg, div0_reg = INVALID_MMIO_REG;
3614
3615 if (IS_ALDERLAKE_S(dev_priv)) {
3616 cfgcr0_reg = ADLS_DPLL_CFGCR0(id);
3617 cfgcr1_reg = ADLS_DPLL_CFGCR1(id);
3618 } else if (IS_DG1(dev_priv)) {
3619 cfgcr0_reg = DG1_DPLL_CFGCR0(id);
3620 cfgcr1_reg = DG1_DPLL_CFGCR1(id);
3621 } else if (IS_ROCKETLAKE(dev_priv)) {
3622 cfgcr0_reg = RKL_DPLL_CFGCR0(id);
3623 cfgcr1_reg = RKL_DPLL_CFGCR1(id);
3624 } else if (DISPLAY_VER(dev_priv) >= 12) {
3625 cfgcr0_reg = TGL_DPLL_CFGCR0(id);
3626 cfgcr1_reg = TGL_DPLL_CFGCR1(id);
3627 div0_reg = TGL_DPLL0_DIV0(id);
3628 } else {
3629 if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3630 cfgcr0_reg = ICL_DPLL_CFGCR0(4);
3631 cfgcr1_reg = ICL_DPLL_CFGCR1(4);
3632 } else {
3633 cfgcr0_reg = ICL_DPLL_CFGCR0(id);
3634 cfgcr1_reg = ICL_DPLL_CFGCR1(id);
3635 }
3636 }
3637
3638 intel_de_write(dev_priv, cfgcr0_reg, hw_state->cfgcr0);
3639 intel_de_write(dev_priv, cfgcr1_reg, hw_state->cfgcr1);
3640 drm_WARN_ON_ONCE(&dev_priv->drm, dev_priv->display.vbt.override_afc_startup &&
3641 !i915_mmio_reg_valid(div0_reg));
3642 if (dev_priv->display.vbt.override_afc_startup &&
3643 i915_mmio_reg_valid(div0_reg))
3644 intel_de_rmw(dev_priv, div0_reg, TGL_DPLL0_DIV0_AFC_STARTUP_MASK,
3645 hw_state->div0);
3646 intel_de_posting_read(dev_priv, cfgcr1_reg);
3647}
3648
3649static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
3650 struct intel_shared_dpll *pll)
3651{
3652 struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3653 enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3654 u32 val;
3655
3656 /*
3657 * Some of the following registers have reserved fields, so program
3658 * these with RMW based on a mask. The mask can be fixed or generated
3659 * during the calc/readout phase if the mask depends on some other HW
3660 * state like refclk, see icl_calc_mg_pll_state().
3661 */
3662 val = intel_de_read(dev_priv, MG_REFCLKIN_CTL(tc_port));
3663 val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3664 val |= hw_state->mg_refclkin_ctl;
3665 intel_de_write(dev_priv, MG_REFCLKIN_CTL(tc_port), val);
3666
3667 val = intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
3668 val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3669 val |= hw_state->mg_clktop2_coreclkctl1;
3670 intel_de_write(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port), val);
3671
3672 val = intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
3673 val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3674 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3675 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3676 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3677 val |= hw_state->mg_clktop2_hsclkctl;
3678 intel_de_write(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port), val);
3679
3680 intel_de_write(dev_priv, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
3681 intel_de_write(dev_priv, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
3682 intel_de_write(dev_priv, MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
3683 intel_de_write(dev_priv, MG_PLL_FRAC_LOCK(tc_port),
3684 hw_state->mg_pll_frac_lock);
3685 intel_de_write(dev_priv, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
3686
3687 val = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
3688 val &= ~hw_state->mg_pll_bias_mask;
3689 val |= hw_state->mg_pll_bias;
3690 intel_de_write(dev_priv, MG_PLL_BIAS(tc_port), val);
3691
3692 val = intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3693 val &= ~hw_state->mg_pll_tdc_coldst_bias_mask;
3694 val |= hw_state->mg_pll_tdc_coldst_bias;
3695 intel_de_write(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port), val);
3696
3697 intel_de_posting_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3698}
3699
3700static void dkl_pll_write(struct drm_i915_private *dev_priv,
3701 struct intel_shared_dpll *pll)
3702{
3703 struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3704 enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3705 u32 val;
3706
3707 /*
3708 * All registers programmed here have the same HIP_INDEX_REG even
3709 * though on different building block
3710 */
3711 /* All the registers are RMW */
3712 val = intel_dkl_phy_read(dev_priv, DKL_REFCLKIN_CTL(tc_port));
3713 val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3714 val |= hw_state->mg_refclkin_ctl;
3715 intel_dkl_phy_write(dev_priv, DKL_REFCLKIN_CTL(tc_port), val);
3716
3717 val = intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3718 val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3719 val |= hw_state->mg_clktop2_coreclkctl1;
3720 intel_dkl_phy_write(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
3721
3722 val = intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
3723 val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3724 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3725 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3726 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3727 val |= hw_state->mg_clktop2_hsclkctl;
3728 intel_dkl_phy_write(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
3729
3730 val = DKL_PLL_DIV0_MASK;
3731 if (dev_priv->display.vbt.override_afc_startup)
3732 val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3733 intel_dkl_phy_rmw(dev_priv, DKL_PLL_DIV0(tc_port), val,
3734 hw_state->mg_pll_div0);
3735
3736 val = intel_dkl_phy_read(dev_priv, DKL_PLL_DIV1(tc_port));
3737 val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
3738 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3739 val |= hw_state->mg_pll_div1;
3740 intel_dkl_phy_write(dev_priv, DKL_PLL_DIV1(tc_port), val);
3741
3742 val = intel_dkl_phy_read(dev_priv, DKL_PLL_SSC(tc_port));
3743 val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3744 DKL_PLL_SSC_STEP_LEN_MASK |
3745 DKL_PLL_SSC_STEP_NUM_MASK |
3746 DKL_PLL_SSC_EN);
3747 val |= hw_state->mg_pll_ssc;
3748 intel_dkl_phy_write(dev_priv, DKL_PLL_SSC(tc_port), val);
3749
3750 val = intel_dkl_phy_read(dev_priv, DKL_PLL_BIAS(tc_port));
3751 val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
3752 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3753 val |= hw_state->mg_pll_bias;
3754 intel_dkl_phy_write(dev_priv, DKL_PLL_BIAS(tc_port), val);
3755
3756 val = intel_dkl_phy_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3757 val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3758 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3759 val |= hw_state->mg_pll_tdc_coldst_bias;
3760 intel_dkl_phy_write(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
3761
3762 intel_dkl_phy_posting_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3763}
3764
3765static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
3766 struct intel_shared_dpll *pll,
3767 i915_reg_t enable_reg)
3768{
3769 u32 val;
3770
3771 val = intel_de_read(dev_priv, enable_reg);
3772 val |= PLL_POWER_ENABLE;
3773 intel_de_write(dev_priv, enable_reg, val);
3774
3775 /*
3776 * The spec says we need to "wait" but it also says it should be
3777 * immediate.
3778 */
3779 if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_POWER_STATE, 1))
3780 drm_err(&dev_priv->drm, "PLL %d Power not enabled\n",
3781 pll->info->id);
3782}
3783
3784static void icl_pll_enable(struct drm_i915_private *dev_priv,
3785 struct intel_shared_dpll *pll,
3786 i915_reg_t enable_reg)
3787{
3788 u32 val;
3789
3790 val = intel_de_read(dev_priv, enable_reg);
3791 val |= PLL_ENABLE;
3792 intel_de_write(dev_priv, enable_reg, val);
3793
3794 /* Timeout is actually 600us. */
3795 if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 1))
3796 drm_err(&dev_priv->drm, "PLL %d not locked\n", pll->info->id);
3797}
3798
3799static void adlp_cmtg_clock_gating_wa(struct drm_i915_private *i915, struct intel_shared_dpll *pll)
3800{
3801 u32 val;
3802
3803 if (!IS_ADLP_DISPLAY_STEP(i915, STEP_A0, STEP_B0) ||
3804 pll->info->id != DPLL_ID_ICL_DPLL0)
3805 return;
3806 /*
3807 * Wa_16011069516:adl-p[a0]
3808 *
3809 * All CMTG regs are unreliable until CMTG clock gating is disabled,
3810 * so we can only assume the default TRANS_CMTG_CHICKEN reg value and
3811 * sanity check this assumption with a double read, which presumably
3812 * returns the correct value even with clock gating on.
3813 *
3814 * Instead of the usual place for workarounds we apply this one here,
3815 * since TRANS_CMTG_CHICKEN is only accessible while DPLL0 is enabled.
3816 */
3817 val = intel_de_read(i915, TRANS_CMTG_CHICKEN);
3818 val = intel_de_read(i915, TRANS_CMTG_CHICKEN);
3819 intel_de_write(i915, TRANS_CMTG_CHICKEN, DISABLE_DPT_CLK_GATING);
3820 if (drm_WARN_ON(&i915->drm, val & ~DISABLE_DPT_CLK_GATING))
3821 drm_dbg_kms(&i915->drm, "Unexpected flags in TRANS_CMTG_CHICKEN: %08x\n", val);
3822}
3823
3824static void combo_pll_enable(struct drm_i915_private *dev_priv,
3825 struct intel_shared_dpll *pll)
3826{
3827 i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3828
3829 if (IS_JSL_EHL(dev_priv) &&
3830 pll->info->id == DPLL_ID_EHL_DPLL4) {
3831
3832 /*
3833 * We need to disable DC states when this DPLL is enabled.
3834 * This can be done by taking a reference on DPLL4 power
3835 * domain.
3836 */
3837 pll->wakeref = intel_display_power_get(dev_priv,
3838 POWER_DOMAIN_DC_OFF);
3839 }
3840
3841 icl_pll_power_enable(dev_priv, pll, enable_reg);
3842
3843 icl_dpll_write(dev_priv, pll);
3844
3845 /*
3846 * DVFS pre sequence would be here, but in our driver the cdclk code
3847 * paths should already be setting the appropriate voltage, hence we do
3848 * nothing here.
3849 */
3850
3851 icl_pll_enable(dev_priv, pll, enable_reg);
3852
3853 adlp_cmtg_clock_gating_wa(dev_priv, pll);
3854
3855 /* DVFS post sequence would be here. See the comment above. */
3856}
3857
3858static void tbt_pll_enable(struct drm_i915_private *dev_priv,
3859 struct intel_shared_dpll *pll)
3860{
3861 icl_pll_power_enable(dev_priv, pll, TBT_PLL_ENABLE);
3862
3863 icl_dpll_write(dev_priv, pll);
3864
3865 /*
3866 * DVFS pre sequence would be here, but in our driver the cdclk code
3867 * paths should already be setting the appropriate voltage, hence we do
3868 * nothing here.
3869 */
3870
3871 icl_pll_enable(dev_priv, pll, TBT_PLL_ENABLE);
3872
3873 /* DVFS post sequence would be here. See the comment above. */
3874}
3875
3876static void mg_pll_enable(struct drm_i915_private *dev_priv,
3877 struct intel_shared_dpll *pll)
3878{
3879 i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
3880
3881 icl_pll_power_enable(dev_priv, pll, enable_reg);
3882
3883 if (DISPLAY_VER(dev_priv) >= 12)
3884 dkl_pll_write(dev_priv, pll);
3885 else
3886 icl_mg_pll_write(dev_priv, pll);
3887
3888 /*
3889 * DVFS pre sequence would be here, but in our driver the cdclk code
3890 * paths should already be setting the appropriate voltage, hence we do
3891 * nothing here.
3892 */
3893
3894 icl_pll_enable(dev_priv, pll, enable_reg);
3895
3896 /* DVFS post sequence would be here. See the comment above. */
3897}
3898
3899static void icl_pll_disable(struct drm_i915_private *dev_priv,
3900 struct intel_shared_dpll *pll,
3901 i915_reg_t enable_reg)
3902{
3903 u32 val;
3904
3905 /* The first steps are done by intel_ddi_post_disable(). */
3906
3907 /*
3908 * DVFS pre sequence would be here, but in our driver the cdclk code
3909 * paths should already be setting the appropriate voltage, hence we do
3910 * nothing here.
3911 */
3912
3913 val = intel_de_read(dev_priv, enable_reg);
3914 val &= ~PLL_ENABLE;
3915 intel_de_write(dev_priv, enable_reg, val);
3916
3917 /* Timeout is actually 1us. */
3918 if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_LOCK, 1))
3919 drm_err(&dev_priv->drm, "PLL %d locked\n", pll->info->id);
3920
3921 /* DVFS post sequence would be here. See the comment above. */
3922
3923 val = intel_de_read(dev_priv, enable_reg);
3924 val &= ~PLL_POWER_ENABLE;
3925 intel_de_write(dev_priv, enable_reg, val);
3926
3927 /*
3928 * The spec says we need to "wait" but it also says it should be
3929 * immediate.
3930 */
3931 if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_POWER_STATE, 1))
3932 drm_err(&dev_priv->drm, "PLL %d Power not disabled\n",
3933 pll->info->id);
3934}
3935
3936static void combo_pll_disable(struct drm_i915_private *dev_priv,
3937 struct intel_shared_dpll *pll)
3938{
3939 i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3940
3941 icl_pll_disable(dev_priv, pll, enable_reg);
3942
3943 if (IS_JSL_EHL(dev_priv) &&
3944 pll->info->id == DPLL_ID_EHL_DPLL4)
3945 intel_display_power_put(dev_priv, POWER_DOMAIN_DC_OFF,
3946 pll->wakeref);
3947}
3948
3949static void tbt_pll_disable(struct drm_i915_private *dev_priv,
3950 struct intel_shared_dpll *pll)
3951{
3952 icl_pll_disable(dev_priv, pll, TBT_PLL_ENABLE);
3953}
3954
3955static void mg_pll_disable(struct drm_i915_private *dev_priv,
3956 struct intel_shared_dpll *pll)
3957{
3958 i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
3959
3960 icl_pll_disable(dev_priv, pll, enable_reg);
3961}
3962
3963static void icl_update_dpll_ref_clks(struct drm_i915_private *i915)
3964{
3965 /* No SSC ref */
3966 i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
3967}
3968
3969static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
3970 const struct intel_dpll_hw_state *hw_state)
3971{
3972 drm_dbg_kms(&dev_priv->drm,
3973 "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, div0: 0x%x, "
3974 "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
3975 "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
3976 "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
3977 "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
3978 "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
3979 hw_state->cfgcr0, hw_state->cfgcr1,
3980 hw_state->div0,
3981 hw_state->mg_refclkin_ctl,
3982 hw_state->mg_clktop2_coreclkctl1,
3983 hw_state->mg_clktop2_hsclkctl,
3984 hw_state->mg_pll_div0,
3985 hw_state->mg_pll_div1,
3986 hw_state->mg_pll_lf,
3987 hw_state->mg_pll_frac_lock,
3988 hw_state->mg_pll_ssc,
3989 hw_state->mg_pll_bias,
3990 hw_state->mg_pll_tdc_coldst_bias);
3991}
3992
3993static const struct intel_shared_dpll_funcs combo_pll_funcs = {
3994 .enable = combo_pll_enable,
3995 .disable = combo_pll_disable,
3996 .get_hw_state = combo_pll_get_hw_state,
3997 .get_freq = icl_ddi_combo_pll_get_freq,
3998};
3999
4000static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
4001 .enable = tbt_pll_enable,
4002 .disable = tbt_pll_disable,
4003 .get_hw_state = tbt_pll_get_hw_state,
4004 .get_freq = icl_ddi_tbt_pll_get_freq,
4005};
4006
4007static const struct intel_shared_dpll_funcs mg_pll_funcs = {
4008 .enable = mg_pll_enable,
4009 .disable = mg_pll_disable,
4010 .get_hw_state = mg_pll_get_hw_state,
4011 .get_freq = icl_ddi_mg_pll_get_freq,
4012};
4013
4014static const struct dpll_info icl_plls[] = {
4015 { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4016 { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4017 { "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4018 { "MG PLL 1", &mg_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4019 { "MG PLL 2", &mg_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4020 { "MG PLL 3", &mg_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4021 { "MG PLL 4", &mg_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4022 { },
4023};
4024
4025static const struct intel_dpll_mgr icl_pll_mgr = {
4026 .dpll_info = icl_plls,
4027 .compute_dplls = icl_compute_dplls,
4028 .get_dplls = icl_get_dplls,
4029 .put_dplls = icl_put_dplls,
4030 .update_active_dpll = icl_update_active_dpll,
4031 .update_ref_clks = icl_update_dpll_ref_clks,
4032 .dump_hw_state = icl_dump_hw_state,
4033};
4034
4035static const struct dpll_info ehl_plls[] = {
4036 { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4037 { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4038 { "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
4039 { },
4040};
4041
4042static const struct intel_dpll_mgr ehl_pll_mgr = {
4043 .dpll_info = ehl_plls,
4044 .compute_dplls = icl_compute_dplls,
4045 .get_dplls = icl_get_dplls,
4046 .put_dplls = icl_put_dplls,
4047 .update_ref_clks = icl_update_dpll_ref_clks,
4048 .dump_hw_state = icl_dump_hw_state,
4049};
4050
4051static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
4052 .enable = mg_pll_enable,
4053 .disable = mg_pll_disable,
4054 .get_hw_state = dkl_pll_get_hw_state,
4055 .get_freq = icl_ddi_mg_pll_get_freq,
4056};
4057
4058static const struct dpll_info tgl_plls[] = {
4059 { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4060 { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4061 { "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4062 { "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4063 { "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4064 { "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4065 { "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4066 { "TC PLL 5", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL5, 0 },
4067 { "TC PLL 6", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL6, 0 },
4068 { },
4069};
4070
4071static const struct intel_dpll_mgr tgl_pll_mgr = {
4072 .dpll_info = tgl_plls,
4073 .compute_dplls = icl_compute_dplls,
4074 .get_dplls = icl_get_dplls,
4075 .put_dplls = icl_put_dplls,
4076 .update_active_dpll = icl_update_active_dpll,
4077 .update_ref_clks = icl_update_dpll_ref_clks,
4078 .dump_hw_state = icl_dump_hw_state,
4079};
4080
4081static const struct dpll_info rkl_plls[] = {
4082 { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4083 { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4084 { "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
4085 { },
4086};
4087
4088static const struct intel_dpll_mgr rkl_pll_mgr = {
4089 .dpll_info = rkl_plls,
4090 .compute_dplls = icl_compute_dplls,
4091 .get_dplls = icl_get_dplls,
4092 .put_dplls = icl_put_dplls,
4093 .update_ref_clks = icl_update_dpll_ref_clks,
4094 .dump_hw_state = icl_dump_hw_state,
4095};
4096
4097static const struct dpll_info dg1_plls[] = {
4098 { "DPLL 0", &combo_pll_funcs, DPLL_ID_DG1_DPLL0, 0 },
4099 { "DPLL 1", &combo_pll_funcs, DPLL_ID_DG1_DPLL1, 0 },
4100 { "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
4101 { "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
4102 { },
4103};
4104
4105static const struct intel_dpll_mgr dg1_pll_mgr = {
4106 .dpll_info = dg1_plls,
4107 .compute_dplls = icl_compute_dplls,
4108 .get_dplls = icl_get_dplls,
4109 .put_dplls = icl_put_dplls,
4110 .update_ref_clks = icl_update_dpll_ref_clks,
4111 .dump_hw_state = icl_dump_hw_state,
4112};
4113
4114static const struct dpll_info adls_plls[] = {
4115 { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4116 { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4117 { "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
4118 { "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
4119 { },
4120};
4121
4122static const struct intel_dpll_mgr adls_pll_mgr = {
4123 .dpll_info = adls_plls,
4124 .compute_dplls = icl_compute_dplls,
4125 .get_dplls = icl_get_dplls,
4126 .put_dplls = icl_put_dplls,
4127 .update_ref_clks = icl_update_dpll_ref_clks,
4128 .dump_hw_state = icl_dump_hw_state,
4129};
4130
4131static const struct dpll_info adlp_plls[] = {
4132 { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4133 { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4134 { "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4135 { "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4136 { "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4137 { "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4138 { "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4139 { },
4140};
4141
4142static const struct intel_dpll_mgr adlp_pll_mgr = {
4143 .dpll_info = adlp_plls,
4144 .compute_dplls = icl_compute_dplls,
4145 .get_dplls = icl_get_dplls,
4146 .put_dplls = icl_put_dplls,
4147 .update_active_dpll = icl_update_active_dpll,
4148 .update_ref_clks = icl_update_dpll_ref_clks,
4149 .dump_hw_state = icl_dump_hw_state,
4150};
4151
4152/**
4153 * intel_shared_dpll_init - Initialize shared DPLLs
4154 * @dev_priv: i915 device
4155 *
4156 * Initialize shared DPLLs for @dev_priv.
4157 */
4158void intel_shared_dpll_init(struct drm_i915_private *dev_priv)
4159{
4160 const struct intel_dpll_mgr *dpll_mgr = NULL;
4161 const struct dpll_info *dpll_info;
4162 int i;
4163
4164 mutex_init(&dev_priv->display.dpll.lock);
4165
4166 if (IS_DG2(dev_priv))
4167 /* No shared DPLLs on DG2; port PLLs are part of the PHY */
4168 dpll_mgr = NULL;
4169 else if (IS_ALDERLAKE_P(dev_priv))
4170 dpll_mgr = &adlp_pll_mgr;
4171 else if (IS_ALDERLAKE_S(dev_priv))
4172 dpll_mgr = &adls_pll_mgr;
4173 else if (IS_DG1(dev_priv))
4174 dpll_mgr = &dg1_pll_mgr;
4175 else if (IS_ROCKETLAKE(dev_priv))
4176 dpll_mgr = &rkl_pll_mgr;
4177 else if (DISPLAY_VER(dev_priv) >= 12)
4178 dpll_mgr = &tgl_pll_mgr;
4179 else if (IS_JSL_EHL(dev_priv))
4180 dpll_mgr = &ehl_pll_mgr;
4181 else if (DISPLAY_VER(dev_priv) >= 11)
4182 dpll_mgr = &icl_pll_mgr;
4183 else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
4184 dpll_mgr = &bxt_pll_mgr;
4185 else if (DISPLAY_VER(dev_priv) == 9)
4186 dpll_mgr = &skl_pll_mgr;
4187 else if (HAS_DDI(dev_priv))
4188 dpll_mgr = &hsw_pll_mgr;
4189 else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
4190 dpll_mgr = &pch_pll_mgr;
4191
4192 if (!dpll_mgr) {
4193 dev_priv->display.dpll.num_shared_dpll = 0;
4194 return;
4195 }
4196
4197 dpll_info = dpll_mgr->dpll_info;
4198
4199 for (i = 0; dpll_info[i].name; i++) {
4200 if (drm_WARN_ON(&dev_priv->drm,
4201 i >= ARRAY_SIZE(dev_priv->display.dpll.shared_dplls)))
4202 break;
4203
4204 drm_WARN_ON(&dev_priv->drm, i != dpll_info[i].id);
4205 dev_priv->display.dpll.shared_dplls[i].info = &dpll_info[i];
4206 }
4207
4208 dev_priv->display.dpll.mgr = dpll_mgr;
4209 dev_priv->display.dpll.num_shared_dpll = i;
4210}
4211
4212/**
4213 * intel_compute_shared_dplls - compute DPLL state CRTC and encoder combination
4214 * @state: atomic state
4215 * @crtc: CRTC to compute DPLLs for
4216 * @encoder: encoder
4217 *
4218 * This function computes the DPLL state for the given CRTC and encoder.
4219 *
4220 * The new configuration in the atomic commit @state is made effective by
4221 * calling intel_shared_dpll_swap_state().
4222 *
4223 * Returns:
4224 * 0 on success, negative error code on falure.
4225 */
4226int intel_compute_shared_dplls(struct intel_atomic_state *state,
4227 struct intel_crtc *crtc,
4228 struct intel_encoder *encoder)
4229{
4230 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4231 const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
4232
4233 if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4234 return -EINVAL;
4235
4236 return dpll_mgr->compute_dplls(state, crtc, encoder);
4237}
4238
4239/**
4240 * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
4241 * @state: atomic state
4242 * @crtc: CRTC to reserve DPLLs for
4243 * @encoder: encoder
4244 *
4245 * This function reserves all required DPLLs for the given CRTC and encoder
4246 * combination in the current atomic commit @state and the new @crtc atomic
4247 * state.
4248 *
4249 * The new configuration in the atomic commit @state is made effective by
4250 * calling intel_shared_dpll_swap_state().
4251 *
4252 * The reserved DPLLs should be released by calling
4253 * intel_release_shared_dplls().
4254 *
4255 * Returns:
4256 * 0 if all required DPLLs were successfully reserved,
4257 * negative error code otherwise.
4258 */
4259int intel_reserve_shared_dplls(struct intel_atomic_state *state,
4260 struct intel_crtc *crtc,
4261 struct intel_encoder *encoder)
4262{
4263 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4264 const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
4265
4266 if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4267 return -EINVAL;
4268
4269 return dpll_mgr->get_dplls(state, crtc, encoder);
4270}
4271
4272/**
4273 * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
4274 * @state: atomic state
4275 * @crtc: crtc from which the DPLLs are to be released
4276 *
4277 * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
4278 * from the current atomic commit @state and the old @crtc atomic state.
4279 *
4280 * The new configuration in the atomic commit @state is made effective by
4281 * calling intel_shared_dpll_swap_state().
4282 */
4283void intel_release_shared_dplls(struct intel_atomic_state *state,
4284 struct intel_crtc *crtc)
4285{
4286 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4287 const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
4288
4289 /*
4290 * FIXME: this function is called for every platform having a
4291 * compute_clock hook, even though the platform doesn't yet support
4292 * the shared DPLL framework and intel_reserve_shared_dplls() is not
4293 * called on those.
4294 */
4295 if (!dpll_mgr)
4296 return;
4297
4298 dpll_mgr->put_dplls(state, crtc);
4299}
4300
4301/**
4302 * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
4303 * @state: atomic state
4304 * @crtc: the CRTC for which to update the active DPLL
4305 * @encoder: encoder determining the type of port DPLL
4306 *
4307 * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
4308 * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
4309 * DPLL selected will be based on the current mode of the encoder's port.
4310 */
4311void intel_update_active_dpll(struct intel_atomic_state *state,
4312 struct intel_crtc *crtc,
4313 struct intel_encoder *encoder)
4314{
4315 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4316 const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
4317
4318 if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4319 return;
4320
4321 dpll_mgr->update_active_dpll(state, crtc, encoder);
4322}
4323
4324/**
4325 * intel_dpll_get_freq - calculate the DPLL's output frequency
4326 * @i915: i915 device
4327 * @pll: DPLL for which to calculate the output frequency
4328 * @pll_state: DPLL state from which to calculate the output frequency
4329 *
4330 * Return the output frequency corresponding to @pll's passed in @pll_state.
4331 */
4332int intel_dpll_get_freq(struct drm_i915_private *i915,
4333 const struct intel_shared_dpll *pll,
4334 const struct intel_dpll_hw_state *pll_state)
4335{
4336 if (drm_WARN_ON(&i915->drm, !pll->info->funcs->get_freq))
4337 return 0;
4338
4339 return pll->info->funcs->get_freq(i915, pll, pll_state);
4340}
4341
4342/**
4343 * intel_dpll_get_hw_state - readout the DPLL's hardware state
4344 * @i915: i915 device
4345 * @pll: DPLL for which to calculate the output frequency
4346 * @hw_state: DPLL's hardware state
4347 *
4348 * Read out @pll's hardware state into @hw_state.
4349 */
4350bool intel_dpll_get_hw_state(struct drm_i915_private *i915,
4351 struct intel_shared_dpll *pll,
4352 struct intel_dpll_hw_state *hw_state)
4353{
4354 return pll->info->funcs->get_hw_state(i915, pll, hw_state);
4355}
4356
4357static void readout_dpll_hw_state(struct drm_i915_private *i915,
4358 struct intel_shared_dpll *pll)
4359{
4360 struct intel_crtc *crtc;
4361
4362 pll->on = intel_dpll_get_hw_state(i915, pll, &pll->state.hw_state);
4363
4364 if (IS_JSL_EHL(i915) && pll->on &&
4365 pll->info->id == DPLL_ID_EHL_DPLL4) {
4366 pll->wakeref = intel_display_power_get(i915,
4367 POWER_DOMAIN_DC_OFF);
4368 }
4369
4370 pll->state.pipe_mask = 0;
4371 for_each_intel_crtc(&i915->drm, crtc) {
4372 struct intel_crtc_state *crtc_state =
4373 to_intel_crtc_state(crtc->base.state);
4374
4375 if (crtc_state->hw.active && crtc_state->shared_dpll == pll)
4376 pll->state.pipe_mask |= BIT(crtc->pipe);
4377 }
4378 pll->active_mask = pll->state.pipe_mask;
4379
4380 drm_dbg_kms(&i915->drm,
4381 "%s hw state readout: pipe_mask 0x%x, on %i\n",
4382 pll->info->name, pll->state.pipe_mask, pll->on);
4383}
4384
4385void intel_dpll_update_ref_clks(struct drm_i915_private *i915)
4386{
4387 if (i915->display.dpll.mgr && i915->display.dpll.mgr->update_ref_clks)
4388 i915->display.dpll.mgr->update_ref_clks(i915);
4389}
4390
4391void intel_dpll_readout_hw_state(struct drm_i915_private *i915)
4392{
4393 int i;
4394
4395 for (i = 0; i < i915->display.dpll.num_shared_dpll; i++)
4396 readout_dpll_hw_state(i915, &i915->display.dpll.shared_dplls[i]);
4397}
4398
4399static void sanitize_dpll_state(struct drm_i915_private *i915,
4400 struct intel_shared_dpll *pll)
4401{
4402 if (!pll->on)
4403 return;
4404
4405 adlp_cmtg_clock_gating_wa(i915, pll);
4406
4407 if (pll->active_mask)
4408 return;
4409
4410 drm_dbg_kms(&i915->drm,
4411 "%s enabled but not in use, disabling\n",
4412 pll->info->name);
4413
4414 pll->info->funcs->disable(i915, pll);
4415 pll->on = false;
4416}
4417
4418void intel_dpll_sanitize_state(struct drm_i915_private *i915)
4419{
4420 int i;
4421
4422 for (i = 0; i < i915->display.dpll.num_shared_dpll; i++)
4423 sanitize_dpll_state(i915, &i915->display.dpll.shared_dplls[i]);
4424}
4425
4426/**
4427 * intel_dpll_dump_hw_state - write hw_state to dmesg
4428 * @dev_priv: i915 drm device
4429 * @hw_state: hw state to be written to the log
4430 *
4431 * Write the relevant values in @hw_state to dmesg using drm_dbg_kms.
4432 */
4433void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
4434 const struct intel_dpll_hw_state *hw_state)
4435{
4436 if (dev_priv->display.dpll.mgr) {
4437 dev_priv->display.dpll.mgr->dump_hw_state(dev_priv, hw_state);
4438 } else {
4439 /* fallback for platforms that don't use the shared dpll
4440 * infrastructure
4441 */
4442 drm_dbg_kms(&dev_priv->drm,
4443 "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
4444 "fp0: 0x%x, fp1: 0x%x\n",
4445 hw_state->dpll,
4446 hw_state->dpll_md,
4447 hw_state->fp0,
4448 hw_state->fp1);
4449 }
4450}
4451
4452static void
4453verify_single_dpll_state(struct drm_i915_private *dev_priv,
4454 struct intel_shared_dpll *pll,
4455 struct intel_crtc *crtc,
4456 struct intel_crtc_state *new_crtc_state)
4457{
4458 struct intel_dpll_hw_state dpll_hw_state;
4459 u8 pipe_mask;
4460 bool active;
4461
4462 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
4463
4464 drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name);
4465
4466 active = intel_dpll_get_hw_state(dev_priv, pll, &dpll_hw_state);
4467
4468 if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
4469 I915_STATE_WARN(!pll->on && pll->active_mask,
4470 "pll in active use but not on in sw tracking\n");
4471 I915_STATE_WARN(pll->on && !pll->active_mask,
4472 "pll is on but not used by any active pipe\n");
4473 I915_STATE_WARN(pll->on != active,
4474 "pll on state mismatch (expected %i, found %i)\n",
4475 pll->on, active);
4476 }
4477
4478 if (!crtc) {
4479 I915_STATE_WARN(pll->active_mask & ~pll->state.pipe_mask,
4480 "more active pll users than references: 0x%x vs 0x%x\n",
4481 pll->active_mask, pll->state.pipe_mask);
4482
4483 return;
4484 }
4485
4486 pipe_mask = BIT(crtc->pipe);
4487
4488 if (new_crtc_state->hw.active)
4489 I915_STATE_WARN(!(pll->active_mask & pipe_mask),
4490 "pll active mismatch (expected pipe %c in active mask 0x%x)\n",
4491 pipe_name(crtc->pipe), pll->active_mask);
4492 else
4493 I915_STATE_WARN(pll->active_mask & pipe_mask,
4494 "pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
4495 pipe_name(crtc->pipe), pll->active_mask);
4496
4497 I915_STATE_WARN(!(pll->state.pipe_mask & pipe_mask),
4498 "pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
4499 pipe_mask, pll->state.pipe_mask);
4500
4501 I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
4502 &dpll_hw_state,
4503 sizeof(dpll_hw_state)),
4504 "pll hw state mismatch\n");
4505}
4506
4507void intel_shared_dpll_state_verify(struct intel_crtc *crtc,
4508 struct intel_crtc_state *old_crtc_state,
4509 struct intel_crtc_state *new_crtc_state)
4510{
4511 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4512
4513 if (new_crtc_state->shared_dpll)
4514 verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll,
4515 crtc, new_crtc_state);
4516
4517 if (old_crtc_state->shared_dpll &&
4518 old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
4519 u8 pipe_mask = BIT(crtc->pipe);
4520 struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
4521
4522 I915_STATE_WARN(pll->active_mask & pipe_mask,
4523 "pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
4524 pipe_name(crtc->pipe), pll->active_mask);
4525 I915_STATE_WARN(pll->state.pipe_mask & pipe_mask,
4526 "pll enabled crtcs mismatch (found %x in enabled mask (0x%x))\n",
4527 pipe_name(crtc->pipe), pll->state.pipe_mask);
4528 }
4529}
4530
4531void intel_shared_dpll_verify_disabled(struct drm_i915_private *i915)
4532{
4533 int i;
4534
4535 for (i = 0; i < i915->display.dpll.num_shared_dpll; i++)
4536 verify_single_dpll_state(i915, &i915->display.dpll.shared_dplls[i],
4537 NULL, NULL);
4538}