Linux Audio

Check our new training course

Loading...
v6.13.7
   1/*
   2 * Copyright © 2006-2016 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  21 * DEALINGS IN THE SOFTWARE.
  22 */
  23
  24#include <linux/math.h>
  25#include <linux/string_helpers.h>
  26
  27#include "bxt_dpio_phy_regs.h"
  28#include "i915_reg.h"
  29#include "intel_de.h"
  30#include "intel_display_types.h"
  31#include "intel_dkl_phy.h"
  32#include "intel_dkl_phy_regs.h"
  33#include "intel_dpio_phy.h"
  34#include "intel_dpll.h"
  35#include "intel_dpll_mgr.h"
  36#include "intel_hti.h"
  37#include "intel_mg_phy_regs.h"
  38#include "intel_pch_refclk.h"
  39#include "intel_tc.h"
  40
  41/**
  42 * DOC: Display PLLs
  43 *
  44 * Display PLLs used for driving outputs vary by platform. While some have
  45 * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
  46 * from a pool. In the latter scenario, it is possible that multiple pipes
  47 * share a PLL if their configurations match.
  48 *
  49 * This file provides an abstraction over display PLLs. The function
  50 * intel_shared_dpll_init() initializes the PLLs for the given platform.  The
  51 * users of a PLL are tracked and that tracking is integrated with the atomic
  52 * modset interface. During an atomic operation, required PLLs can be reserved
  53 * for a given CRTC and encoder configuration by calling
  54 * intel_reserve_shared_dplls() and previously reserved PLLs can be released
  55 * with intel_release_shared_dplls().
  56 * Changes to the users are first staged in the atomic state, and then made
  57 * effective by calling intel_shared_dpll_swap_state() during the atomic
  58 * commit phase.
  59 */
  60
  61/* platform specific hooks for managing DPLLs */
  62struct intel_shared_dpll_funcs {
  63	/*
  64	 * Hook for enabling the pll, called from intel_enable_shared_dpll() if
  65	 * the pll is not already enabled.
  66	 */
  67	void (*enable)(struct drm_i915_private *i915,
  68		       struct intel_shared_dpll *pll,
  69		       const struct intel_dpll_hw_state *dpll_hw_state);
  70
  71	/*
  72	 * Hook for disabling the pll, called from intel_disable_shared_dpll()
  73	 * only when it is safe to disable the pll, i.e., there are no more
  74	 * tracked users for it.
  75	 */
  76	void (*disable)(struct drm_i915_private *i915,
  77			struct intel_shared_dpll *pll);
  78
  79	/*
  80	 * Hook for reading the values currently programmed to the DPLL
  81	 * registers. This is used for initial hw state readout and state
  82	 * verification after a mode set.
  83	 */
  84	bool (*get_hw_state)(struct drm_i915_private *i915,
  85			     struct intel_shared_dpll *pll,
  86			     struct intel_dpll_hw_state *dpll_hw_state);
  87
  88	/*
  89	 * Hook for calculating the pll's output frequency based on its passed
  90	 * in state.
  91	 */
  92	int (*get_freq)(struct drm_i915_private *i915,
  93			const struct intel_shared_dpll *pll,
  94			const struct intel_dpll_hw_state *dpll_hw_state);
  95};
  96
  97struct intel_dpll_mgr {
  98	const struct dpll_info *dpll_info;
  99
 100	int (*compute_dplls)(struct intel_atomic_state *state,
 101			     struct intel_crtc *crtc,
 102			     struct intel_encoder *encoder);
 103	int (*get_dplls)(struct intel_atomic_state *state,
 104			 struct intel_crtc *crtc,
 105			 struct intel_encoder *encoder);
 106	void (*put_dplls)(struct intel_atomic_state *state,
 107			  struct intel_crtc *crtc);
 108	void (*update_active_dpll)(struct intel_atomic_state *state,
 109				   struct intel_crtc *crtc,
 110				   struct intel_encoder *encoder);
 111	void (*update_ref_clks)(struct drm_i915_private *i915);
 112	void (*dump_hw_state)(struct drm_printer *p,
 113			      const struct intel_dpll_hw_state *dpll_hw_state);
 114	bool (*compare_hw_state)(const struct intel_dpll_hw_state *a,
 115				 const struct intel_dpll_hw_state *b);
 116};
 117
 118static void
 119intel_atomic_duplicate_dpll_state(struct drm_i915_private *i915,
 120				  struct intel_shared_dpll_state *shared_dpll)
 121{
 122	struct intel_shared_dpll *pll;
 123	int i;
 124
 125	/* Copy shared dpll state */
 126	for_each_shared_dpll(i915, pll, i)
 127		shared_dpll[pll->index] = pll->state;
 128}
 129
 130static struct intel_shared_dpll_state *
 131intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
 132{
 133	struct intel_atomic_state *state = to_intel_atomic_state(s);
 134
 135	drm_WARN_ON(s->dev, !drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
 136
 137	if (!state->dpll_set) {
 138		state->dpll_set = true;
 139
 140		intel_atomic_duplicate_dpll_state(to_i915(s->dev),
 141						  state->shared_dpll);
 142	}
 143
 144	return state->shared_dpll;
 145}
 146
 147/**
 148 * intel_get_shared_dpll_by_id - get a DPLL given its id
 149 * @i915: i915 device instance
 150 * @id: pll id
 151 *
 152 * Returns:
 153 * A pointer to the DPLL with @id
 154 */
 155struct intel_shared_dpll *
 156intel_get_shared_dpll_by_id(struct drm_i915_private *i915,
 157			    enum intel_dpll_id id)
 158{
 159	struct intel_shared_dpll *pll;
 160	int i;
 161
 162	for_each_shared_dpll(i915, pll, i) {
 163		if (pll->info->id == id)
 164			return pll;
 165	}
 166
 167	MISSING_CASE(id);
 168	return NULL;
 169}
 170
 171/* For ILK+ */
 172void assert_shared_dpll(struct drm_i915_private *i915,
 173			struct intel_shared_dpll *pll,
 174			bool state)
 175{
 176	struct intel_display *display = &i915->display;
 177	bool cur_state;
 178	struct intel_dpll_hw_state hw_state;
 179
 180	if (drm_WARN(display->drm, !pll,
 181		     "asserting DPLL %s with no DPLL\n", str_on_off(state)))
 182		return;
 183
 184	cur_state = intel_dpll_get_hw_state(i915, pll, &hw_state);
 185	INTEL_DISPLAY_STATE_WARN(display, cur_state != state,
 186				 "%s assertion failure (expected %s, current %s)\n",
 187				 pll->info->name, str_on_off(state),
 188				 str_on_off(cur_state));
 189}
 190
 191static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
 192{
 193	return TC_PORT_1 + id - DPLL_ID_ICL_MGPLL1;
 194}
 195
 196enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
 197{
 198	return tc_port - TC_PORT_1 + DPLL_ID_ICL_MGPLL1;
 199}
 200
 201static i915_reg_t
 202intel_combo_pll_enable_reg(struct drm_i915_private *i915,
 203			   struct intel_shared_dpll *pll)
 204{
 205	if (IS_DG1(i915))
 206		return DG1_DPLL_ENABLE(pll->info->id);
 207	else if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
 208		 (pll->info->id == DPLL_ID_EHL_DPLL4))
 209		return MG_PLL_ENABLE(0);
 210
 211	return ICL_DPLL_ENABLE(pll->info->id);
 212}
 213
 214static i915_reg_t
 215intel_tc_pll_enable_reg(struct drm_i915_private *i915,
 216			struct intel_shared_dpll *pll)
 217{
 218	const enum intel_dpll_id id = pll->info->id;
 219	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
 220
 221	if (IS_ALDERLAKE_P(i915))
 222		return ADLP_PORTTC_PLL_ENABLE(tc_port);
 223
 224	return MG_PLL_ENABLE(tc_port);
 225}
 226
 227static void _intel_enable_shared_dpll(struct drm_i915_private *i915,
 228				      struct intel_shared_dpll *pll)
 229{
 230	if (pll->info->power_domain)
 231		pll->wakeref = intel_display_power_get(i915, pll->info->power_domain);
 232
 233	pll->info->funcs->enable(i915, pll, &pll->state.hw_state);
 234	pll->on = true;
 235}
 236
 237static void _intel_disable_shared_dpll(struct drm_i915_private *i915,
 238				       struct intel_shared_dpll *pll)
 239{
 240	pll->info->funcs->disable(i915, pll);
 241	pll->on = false;
 242
 243	if (pll->info->power_domain)
 244		intel_display_power_put(i915, pll->info->power_domain, pll->wakeref);
 245}
 246
 247/**
 248 * intel_enable_shared_dpll - enable a CRTC's shared DPLL
 249 * @crtc_state: CRTC, and its state, which has a shared DPLL
 250 *
 251 * Enable the shared DPLL used by @crtc.
 252 */
 253void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
 254{
 255	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 256	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
 257	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
 258	unsigned int pipe_mask = BIT(crtc->pipe);
 259	unsigned int old_mask;
 260
 261	if (drm_WARN_ON(&i915->drm, pll == NULL))
 262		return;
 263
 264	mutex_lock(&i915->display.dpll.lock);
 265	old_mask = pll->active_mask;
 266
 267	if (drm_WARN_ON(&i915->drm, !(pll->state.pipe_mask & pipe_mask)) ||
 268	    drm_WARN_ON(&i915->drm, pll->active_mask & pipe_mask))
 269		goto out;
 270
 271	pll->active_mask |= pipe_mask;
 272
 273	drm_dbg_kms(&i915->drm,
 274		    "enable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
 275		    pll->info->name, pll->active_mask, pll->on,
 276		    crtc->base.base.id, crtc->base.name);
 277
 278	if (old_mask) {
 279		drm_WARN_ON(&i915->drm, !pll->on);
 280		assert_shared_dpll_enabled(i915, pll);
 281		goto out;
 282	}
 283	drm_WARN_ON(&i915->drm, pll->on);
 284
 285	drm_dbg_kms(&i915->drm, "enabling %s\n", pll->info->name);
 286
 287	_intel_enable_shared_dpll(i915, pll);
 288
 289out:
 290	mutex_unlock(&i915->display.dpll.lock);
 291}
 292
 293/**
 294 * intel_disable_shared_dpll - disable a CRTC's shared DPLL
 295 * @crtc_state: CRTC, and its state, which has a shared DPLL
 296 *
 297 * Disable the shared DPLL used by @crtc.
 298 */
 299void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
 300{
 301	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 302	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
 303	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
 304	unsigned int pipe_mask = BIT(crtc->pipe);
 305
 306	/* PCH only available on ILK+ */
 307	if (DISPLAY_VER(i915) < 5)
 308		return;
 309
 310	if (pll == NULL)
 311		return;
 312
 313	mutex_lock(&i915->display.dpll.lock);
 314	if (drm_WARN(&i915->drm, !(pll->active_mask & pipe_mask),
 315		     "%s not used by [CRTC:%d:%s]\n", pll->info->name,
 316		     crtc->base.base.id, crtc->base.name))
 317		goto out;
 318
 319	drm_dbg_kms(&i915->drm,
 320		    "disable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
 321		    pll->info->name, pll->active_mask, pll->on,
 322		    crtc->base.base.id, crtc->base.name);
 323
 324	assert_shared_dpll_enabled(i915, pll);
 325	drm_WARN_ON(&i915->drm, !pll->on);
 326
 327	pll->active_mask &= ~pipe_mask;
 328	if (pll->active_mask)
 329		goto out;
 330
 331	drm_dbg_kms(&i915->drm, "disabling %s\n", pll->info->name);
 332
 333	_intel_disable_shared_dpll(i915, pll);
 334
 335out:
 336	mutex_unlock(&i915->display.dpll.lock);
 337}
 338
 339static unsigned long
 340intel_dpll_mask_all(struct drm_i915_private *i915)
 341{
 342	struct intel_shared_dpll *pll;
 343	unsigned long dpll_mask = 0;
 344	int i;
 345
 346	for_each_shared_dpll(i915, pll, i) {
 347		drm_WARN_ON(&i915->drm, dpll_mask & BIT(pll->info->id));
 348
 349		dpll_mask |= BIT(pll->info->id);
 350	}
 351
 352	return dpll_mask;
 353}
 354
 355static struct intel_shared_dpll *
 356intel_find_shared_dpll(struct intel_atomic_state *state,
 357		       const struct intel_crtc *crtc,
 358		       const struct intel_dpll_hw_state *dpll_hw_state,
 359		       unsigned long dpll_mask)
 360{
 361	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
 362	unsigned long dpll_mask_all = intel_dpll_mask_all(i915);
 363	struct intel_shared_dpll_state *shared_dpll;
 364	struct intel_shared_dpll *unused_pll = NULL;
 365	enum intel_dpll_id id;
 366
 367	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
 368
 369	drm_WARN_ON(&i915->drm, dpll_mask & ~dpll_mask_all);
 370
 371	for_each_set_bit(id, &dpll_mask, fls(dpll_mask_all)) {
 372		struct intel_shared_dpll *pll;
 373
 374		pll = intel_get_shared_dpll_by_id(i915, id);
 375		if (!pll)
 376			continue;
 377
 378		/* Only want to check enabled timings first */
 379		if (shared_dpll[pll->index].pipe_mask == 0) {
 380			if (!unused_pll)
 381				unused_pll = pll;
 382			continue;
 383		}
 384
 385		if (memcmp(dpll_hw_state,
 386			   &shared_dpll[pll->index].hw_state,
 387			   sizeof(*dpll_hw_state)) == 0) {
 388			drm_dbg_kms(&i915->drm,
 389				    "[CRTC:%d:%s] sharing existing %s (pipe mask 0x%x, active 0x%x)\n",
 390				    crtc->base.base.id, crtc->base.name,
 391				    pll->info->name,
 392				    shared_dpll[pll->index].pipe_mask,
 393				    pll->active_mask);
 394			return pll;
 395		}
 396	}
 397
 398	/* Ok no matching timings, maybe there's a free one? */
 399	if (unused_pll) {
 400		drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] allocated %s\n",
 401			    crtc->base.base.id, crtc->base.name,
 402			    unused_pll->info->name);
 403		return unused_pll;
 404	}
 405
 406	return NULL;
 407}
 408
 409/**
 410 * intel_reference_shared_dpll_crtc - Get a DPLL reference for a CRTC
 411 * @crtc: CRTC on which behalf the reference is taken
 412 * @pll: DPLL for which the reference is taken
 413 * @shared_dpll_state: the DPLL atomic state in which the reference is tracked
 414 *
 415 * Take a reference for @pll tracking the use of it by @crtc.
 416 */
 417static void
 418intel_reference_shared_dpll_crtc(const struct intel_crtc *crtc,
 419				 const struct intel_shared_dpll *pll,
 420				 struct intel_shared_dpll_state *shared_dpll_state)
 421{
 422	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
 423
 424	drm_WARN_ON(&i915->drm, (shared_dpll_state->pipe_mask & BIT(crtc->pipe)) != 0);
 425
 426	shared_dpll_state->pipe_mask |= BIT(crtc->pipe);
 427
 428	drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] reserving %s\n",
 429		    crtc->base.base.id, crtc->base.name, pll->info->name);
 430}
 431
 432static void
 433intel_reference_shared_dpll(struct intel_atomic_state *state,
 434			    const struct intel_crtc *crtc,
 435			    const struct intel_shared_dpll *pll,
 436			    const struct intel_dpll_hw_state *dpll_hw_state)
 437{
 438	struct intel_shared_dpll_state *shared_dpll;
 439
 440	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
 441
 442	if (shared_dpll[pll->index].pipe_mask == 0)
 443		shared_dpll[pll->index].hw_state = *dpll_hw_state;
 444
 445	intel_reference_shared_dpll_crtc(crtc, pll, &shared_dpll[pll->index]);
 446}
 447
 448/**
 449 * intel_unreference_shared_dpll_crtc - Drop a DPLL reference for a CRTC
 450 * @crtc: CRTC on which behalf the reference is dropped
 451 * @pll: DPLL for which the reference is dropped
 452 * @shared_dpll_state: the DPLL atomic state in which the reference is tracked
 453 *
 454 * Drop a reference for @pll tracking the end of use of it by @crtc.
 455 */
 456void
 457intel_unreference_shared_dpll_crtc(const struct intel_crtc *crtc,
 458				   const struct intel_shared_dpll *pll,
 459				   struct intel_shared_dpll_state *shared_dpll_state)
 460{
 461	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
 462
 463	drm_WARN_ON(&i915->drm, (shared_dpll_state->pipe_mask & BIT(crtc->pipe)) == 0);
 464
 465	shared_dpll_state->pipe_mask &= ~BIT(crtc->pipe);
 466
 467	drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] releasing %s\n",
 468		    crtc->base.base.id, crtc->base.name, pll->info->name);
 469}
 470
 471static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
 472					  const struct intel_crtc *crtc,
 473					  const struct intel_shared_dpll *pll)
 474{
 475	struct intel_shared_dpll_state *shared_dpll;
 476
 477	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
 478
 479	intel_unreference_shared_dpll_crtc(crtc, pll, &shared_dpll[pll->index]);
 480}
 481
 482static void intel_put_dpll(struct intel_atomic_state *state,
 483			   struct intel_crtc *crtc)
 484{
 485	const struct intel_crtc_state *old_crtc_state =
 486		intel_atomic_get_old_crtc_state(state, crtc);
 487	struct intel_crtc_state *new_crtc_state =
 488		intel_atomic_get_new_crtc_state(state, crtc);
 489
 490	new_crtc_state->shared_dpll = NULL;
 491
 492	if (!old_crtc_state->shared_dpll)
 493		return;
 494
 495	intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
 496}
 497
 498/**
 499 * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
 500 * @state: atomic state
 501 *
 502 * This is the dpll version of drm_atomic_helper_swap_state() since the
 503 * helper does not handle driver-specific global state.
 504 *
 505 * For consistency with atomic helpers this function does a complete swap,
 506 * i.e. it also puts the current state into @state, even though there is no
 507 * need for that at this moment.
 508 */
 509void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
 510{
 511	struct drm_i915_private *i915 = to_i915(state->base.dev);
 512	struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
 513	struct intel_shared_dpll *pll;
 514	int i;
 515
 516	if (!state->dpll_set)
 517		return;
 518
 519	for_each_shared_dpll(i915, pll, i)
 520		swap(pll->state, shared_dpll[pll->index]);
 521}
 522
 523static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *i915,
 524				      struct intel_shared_dpll *pll,
 525				      struct intel_dpll_hw_state *dpll_hw_state)
 526{
 527	struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
 528	const enum intel_dpll_id id = pll->info->id;
 529	intel_wakeref_t wakeref;
 530	u32 val;
 531
 532	wakeref = intel_display_power_get_if_enabled(i915,
 533						     POWER_DOMAIN_DISPLAY_CORE);
 534	if (!wakeref)
 535		return false;
 536
 537	val = intel_de_read(i915, PCH_DPLL(id));
 538	hw_state->dpll = val;
 539	hw_state->fp0 = intel_de_read(i915, PCH_FP0(id));
 540	hw_state->fp1 = intel_de_read(i915, PCH_FP1(id));
 541
 542	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
 543
 544	return val & DPLL_VCO_ENABLE;
 545}
 546
 547static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *i915)
 548{
 549	struct intel_display *display = &i915->display;
 550	u32 val;
 551	bool enabled;
 552
 553	val = intel_de_read(display, PCH_DREF_CONTROL);
 554	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
 555			    DREF_SUPERSPREAD_SOURCE_MASK));
 556	INTEL_DISPLAY_STATE_WARN(display, !enabled,
 557				 "PCH refclk assertion failure, should be active but is disabled\n");
 558}
 559
 560static void ibx_pch_dpll_enable(struct drm_i915_private *i915,
 561				struct intel_shared_dpll *pll,
 562				const struct intel_dpll_hw_state *dpll_hw_state)
 563{
 564	const struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
 565	const enum intel_dpll_id id = pll->info->id;
 566
 567	/* PCH refclock must be enabled first */
 568	ibx_assert_pch_refclk_enabled(i915);
 569
 570	intel_de_write(i915, PCH_FP0(id), hw_state->fp0);
 571	intel_de_write(i915, PCH_FP1(id), hw_state->fp1);
 572
 573	intel_de_write(i915, PCH_DPLL(id), hw_state->dpll);
 574
 575	/* Wait for the clocks to stabilize. */
 576	intel_de_posting_read(i915, PCH_DPLL(id));
 577	udelay(150);
 578
 579	/* The pixel multiplier can only be updated once the
 580	 * DPLL is enabled and the clocks are stable.
 581	 *
 582	 * So write it again.
 583	 */
 584	intel_de_write(i915, PCH_DPLL(id), hw_state->dpll);
 585	intel_de_posting_read(i915, PCH_DPLL(id));
 586	udelay(200);
 587}
 588
 589static void ibx_pch_dpll_disable(struct drm_i915_private *i915,
 590				 struct intel_shared_dpll *pll)
 591{
 592	const enum intel_dpll_id id = pll->info->id;
 593
 594	intel_de_write(i915, PCH_DPLL(id), 0);
 595	intel_de_posting_read(i915, PCH_DPLL(id));
 596	udelay(200);
 597}
 598
 599static int ibx_compute_dpll(struct intel_atomic_state *state,
 600			    struct intel_crtc *crtc,
 601			    struct intel_encoder *encoder)
 602{
 603	return 0;
 604}
 605
 606static int ibx_get_dpll(struct intel_atomic_state *state,
 607			struct intel_crtc *crtc,
 608			struct intel_encoder *encoder)
 609{
 610	struct intel_crtc_state *crtc_state =
 611		intel_atomic_get_new_crtc_state(state, crtc);
 612	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
 613	struct intel_shared_dpll *pll;
 614	enum intel_dpll_id id;
 615
 616	if (HAS_PCH_IBX(i915)) {
 617		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
 618		id = (enum intel_dpll_id) crtc->pipe;
 619		pll = intel_get_shared_dpll_by_id(i915, id);
 620
 621		drm_dbg_kms(&i915->drm,
 622			    "[CRTC:%d:%s] using pre-allocated %s\n",
 623			    crtc->base.base.id, crtc->base.name,
 624			    pll->info->name);
 625	} else {
 626		pll = intel_find_shared_dpll(state, crtc,
 627					     &crtc_state->dpll_hw_state,
 628					     BIT(DPLL_ID_PCH_PLL_B) |
 629					     BIT(DPLL_ID_PCH_PLL_A));
 630	}
 631
 632	if (!pll)
 633		return -EINVAL;
 634
 635	/* reference the pll */
 636	intel_reference_shared_dpll(state, crtc,
 637				    pll, &crtc_state->dpll_hw_state);
 638
 639	crtc_state->shared_dpll = pll;
 640
 641	return 0;
 642}
 643
 644static void ibx_dump_hw_state(struct drm_printer *p,
 645			      const struct intel_dpll_hw_state *dpll_hw_state)
 646{
 647	const struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
 648
 649	drm_printf(p, "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
 650		   "fp0: 0x%x, fp1: 0x%x\n",
 651		   hw_state->dpll,
 652		   hw_state->dpll_md,
 653		   hw_state->fp0,
 654		   hw_state->fp1);
 655}
 656
 657static bool ibx_compare_hw_state(const struct intel_dpll_hw_state *_a,
 658				 const struct intel_dpll_hw_state *_b)
 659{
 660	const struct i9xx_dpll_hw_state *a = &_a->i9xx;
 661	const struct i9xx_dpll_hw_state *b = &_b->i9xx;
 662
 663	return a->dpll == b->dpll &&
 664		a->dpll_md == b->dpll_md &&
 665		a->fp0 == b->fp0 &&
 666		a->fp1 == b->fp1;
 667}
 668
 669static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
 670	.enable = ibx_pch_dpll_enable,
 671	.disable = ibx_pch_dpll_disable,
 672	.get_hw_state = ibx_pch_dpll_get_hw_state,
 673};
 674
 675static const struct dpll_info pch_plls[] = {
 676	{ .name = "PCH DPLL A", .funcs = &ibx_pch_dpll_funcs, .id = DPLL_ID_PCH_PLL_A, },
 677	{ .name = "PCH DPLL B", .funcs = &ibx_pch_dpll_funcs, .id = DPLL_ID_PCH_PLL_B, },
 678	{}
 679};
 680
 681static const struct intel_dpll_mgr pch_pll_mgr = {
 682	.dpll_info = pch_plls,
 683	.compute_dplls = ibx_compute_dpll,
 684	.get_dplls = ibx_get_dpll,
 685	.put_dplls = intel_put_dpll,
 686	.dump_hw_state = ibx_dump_hw_state,
 687	.compare_hw_state = ibx_compare_hw_state,
 688};
 689
 690static void hsw_ddi_wrpll_enable(struct drm_i915_private *i915,
 691				 struct intel_shared_dpll *pll,
 692				 const struct intel_dpll_hw_state *dpll_hw_state)
 693{
 694	const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
 695	const enum intel_dpll_id id = pll->info->id;
 696
 697	intel_de_write(i915, WRPLL_CTL(id), hw_state->wrpll);
 698	intel_de_posting_read(i915, WRPLL_CTL(id));
 699	udelay(20);
 700}
 701
 702static void hsw_ddi_spll_enable(struct drm_i915_private *i915,
 703				struct intel_shared_dpll *pll,
 704				const struct intel_dpll_hw_state *dpll_hw_state)
 705{
 706	const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
 707
 708	intel_de_write(i915, SPLL_CTL, hw_state->spll);
 709	intel_de_posting_read(i915, SPLL_CTL);
 710	udelay(20);
 711}
 712
 713static void hsw_ddi_wrpll_disable(struct drm_i915_private *i915,
 714				  struct intel_shared_dpll *pll)
 715{
 716	const enum intel_dpll_id id = pll->info->id;
 717
 718	intel_de_rmw(i915, WRPLL_CTL(id), WRPLL_PLL_ENABLE, 0);
 719	intel_de_posting_read(i915, WRPLL_CTL(id));
 720
 721	/*
 722	 * Try to set up the PCH reference clock once all DPLLs
 723	 * that depend on it have been shut down.
 724	 */
 725	if (i915->display.dpll.pch_ssc_use & BIT(id))
 726		intel_init_pch_refclk(i915);
 727}
 728
 729static void hsw_ddi_spll_disable(struct drm_i915_private *i915,
 730				 struct intel_shared_dpll *pll)
 731{
 732	enum intel_dpll_id id = pll->info->id;
 733
 734	intel_de_rmw(i915, SPLL_CTL, SPLL_PLL_ENABLE, 0);
 735	intel_de_posting_read(i915, SPLL_CTL);
 736
 737	/*
 738	 * Try to set up the PCH reference clock once all DPLLs
 739	 * that depend on it have been shut down.
 740	 */
 741	if (i915->display.dpll.pch_ssc_use & BIT(id))
 742		intel_init_pch_refclk(i915);
 743}
 744
 745static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *i915,
 746				       struct intel_shared_dpll *pll,
 747				       struct intel_dpll_hw_state *dpll_hw_state)
 748{
 749	struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
 750	const enum intel_dpll_id id = pll->info->id;
 751	intel_wakeref_t wakeref;
 752	u32 val;
 753
 754	wakeref = intel_display_power_get_if_enabled(i915,
 755						     POWER_DOMAIN_DISPLAY_CORE);
 756	if (!wakeref)
 757		return false;
 758
 759	val = intel_de_read(i915, WRPLL_CTL(id));
 760	hw_state->wrpll = val;
 761
 762	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
 763
 764	return val & WRPLL_PLL_ENABLE;
 765}
 766
 767static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *i915,
 768				      struct intel_shared_dpll *pll,
 769				      struct intel_dpll_hw_state *dpll_hw_state)
 770{
 771	struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
 772	intel_wakeref_t wakeref;
 773	u32 val;
 774
 775	wakeref = intel_display_power_get_if_enabled(i915,
 776						     POWER_DOMAIN_DISPLAY_CORE);
 777	if (!wakeref)
 778		return false;
 779
 780	val = intel_de_read(i915, SPLL_CTL);
 781	hw_state->spll = val;
 782
 783	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
 784
 785	return val & SPLL_PLL_ENABLE;
 786}
 787
 788#define LC_FREQ 2700
 789#define LC_FREQ_2K U64_C(LC_FREQ * 2000)
 790
 791#define P_MIN 2
 792#define P_MAX 64
 793#define P_INC 2
 794
 795/* Constraints for PLL good behavior */
 796#define REF_MIN 48
 797#define REF_MAX 400
 798#define VCO_MIN 2400
 799#define VCO_MAX 4800
 800
 801struct hsw_wrpll_rnp {
 802	unsigned p, n2, r2;
 803};
 804
 805static unsigned hsw_wrpll_get_budget_for_freq(int clock)
 806{
 807	switch (clock) {
 808	case 25175000:
 809	case 25200000:
 810	case 27000000:
 811	case 27027000:
 812	case 37762500:
 813	case 37800000:
 814	case 40500000:
 815	case 40541000:
 816	case 54000000:
 817	case 54054000:
 818	case 59341000:
 819	case 59400000:
 820	case 72000000:
 821	case 74176000:
 822	case 74250000:
 823	case 81000000:
 824	case 81081000:
 825	case 89012000:
 826	case 89100000:
 827	case 108000000:
 828	case 108108000:
 829	case 111264000:
 830	case 111375000:
 831	case 148352000:
 832	case 148500000:
 833	case 162000000:
 834	case 162162000:
 835	case 222525000:
 836	case 222750000:
 837	case 296703000:
 838	case 297000000:
 839		return 0;
 840	case 233500000:
 841	case 245250000:
 842	case 247750000:
 843	case 253250000:
 844	case 298000000:
 845		return 1500;
 846	case 169128000:
 847	case 169500000:
 848	case 179500000:
 849	case 202000000:
 850		return 2000;
 851	case 256250000:
 852	case 262500000:
 853	case 270000000:
 854	case 272500000:
 855	case 273750000:
 856	case 280750000:
 857	case 281250000:
 858	case 286000000:
 859	case 291750000:
 860		return 4000;
 861	case 267250000:
 862	case 268500000:
 863		return 5000;
 864	default:
 865		return 1000;
 866	}
 867}
 868
 869static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
 870				 unsigned int r2, unsigned int n2,
 871				 unsigned int p,
 872				 struct hsw_wrpll_rnp *best)
 873{
 874	u64 a, b, c, d, diff, diff_best;
 875
 876	/* No best (r,n,p) yet */
 877	if (best->p == 0) {
 878		best->p = p;
 879		best->n2 = n2;
 880		best->r2 = r2;
 881		return;
 882	}
 883
 884	/*
 885	 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
 886	 * freq2k.
 887	 *
 888	 * delta = 1e6 *
 889	 *	   abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
 890	 *	   freq2k;
 891	 *
 892	 * and we would like delta <= budget.
 893	 *
 894	 * If the discrepancy is above the PPM-based budget, always prefer to
 895	 * improve upon the previous solution.  However, if you're within the
 896	 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
 897	 */
 898	a = freq2k * budget * p * r2;
 899	b = freq2k * budget * best->p * best->r2;
 900	diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
 901	diff_best = abs_diff(freq2k * best->p * best->r2,
 902			     LC_FREQ_2K * best->n2);
 903	c = 1000000 * diff;
 904	d = 1000000 * diff_best;
 905
 906	if (a < c && b < d) {
 907		/* If both are above the budget, pick the closer */
 908		if (best->p * best->r2 * diff < p * r2 * diff_best) {
 909			best->p = p;
 910			best->n2 = n2;
 911			best->r2 = r2;
 912		}
 913	} else if (a >= c && b < d) {
 914		/* If A is below the threshold but B is above it?  Update. */
 915		best->p = p;
 916		best->n2 = n2;
 917		best->r2 = r2;
 918	} else if (a >= c && b >= d) {
 919		/* Both are below the limit, so pick the higher n2/(r2*r2) */
 920		if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
 921			best->p = p;
 922			best->n2 = n2;
 923			best->r2 = r2;
 924		}
 925	}
 926	/* Otherwise a < c && b >= d, do nothing */
 927}
 928
 929static void
 930hsw_ddi_calculate_wrpll(int clock /* in Hz */,
 931			unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
 932{
 933	u64 freq2k;
 934	unsigned p, n2, r2;
 935	struct hsw_wrpll_rnp best = {};
 936	unsigned budget;
 937
 938	freq2k = clock / 100;
 939
 940	budget = hsw_wrpll_get_budget_for_freq(clock);
 941
 942	/* Special case handling for 540 pixel clock: bypass WR PLL entirely
 943	 * and directly pass the LC PLL to it. */
 944	if (freq2k == 5400000) {
 945		*n2_out = 2;
 946		*p_out = 1;
 947		*r2_out = 2;
 948		return;
 949	}
 950
 951	/*
 952	 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
 953	 * the WR PLL.
 954	 *
 955	 * We want R so that REF_MIN <= Ref <= REF_MAX.
 956	 * Injecting R2 = 2 * R gives:
 957	 *   REF_MAX * r2 > LC_FREQ * 2 and
 958	 *   REF_MIN * r2 < LC_FREQ * 2
 959	 *
 960	 * Which means the desired boundaries for r2 are:
 961	 *  LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
 962	 *
 963	 */
 964	for (r2 = LC_FREQ * 2 / REF_MAX + 1;
 965	     r2 <= LC_FREQ * 2 / REF_MIN;
 966	     r2++) {
 967
 968		/*
 969		 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
 970		 *
 971		 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
 972		 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
 973		 *   VCO_MAX * r2 > n2 * LC_FREQ and
 974		 *   VCO_MIN * r2 < n2 * LC_FREQ)
 975		 *
 976		 * Which means the desired boundaries for n2 are:
 977		 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
 978		 */
 979		for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
 980		     n2 <= VCO_MAX * r2 / LC_FREQ;
 981		     n2++) {
 982
 983			for (p = P_MIN; p <= P_MAX; p += P_INC)
 984				hsw_wrpll_update_rnp(freq2k, budget,
 985						     r2, n2, p, &best);
 986		}
 987	}
 988
 989	*n2_out = best.n2;
 990	*p_out = best.p;
 991	*r2_out = best.r2;
 992}
 993
 994static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *i915,
 995				  const struct intel_shared_dpll *pll,
 996				  const struct intel_dpll_hw_state *dpll_hw_state)
 997{
 998	const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
 999	int refclk;
1000	int n, p, r;
1001	u32 wrpll = hw_state->wrpll;
1002
1003	switch (wrpll & WRPLL_REF_MASK) {
1004	case WRPLL_REF_SPECIAL_HSW:
1005		/* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
1006		if (IS_HASWELL(i915) && !IS_HASWELL_ULT(i915)) {
1007			refclk = i915->display.dpll.ref_clks.nssc;
1008			break;
1009		}
1010		fallthrough;
1011	case WRPLL_REF_PCH_SSC:
1012		/*
1013		 * We could calculate spread here, but our checking
1014		 * code only cares about 5% accuracy, and spread is a max of
1015		 * 0.5% downspread.
1016		 */
1017		refclk = i915->display.dpll.ref_clks.ssc;
1018		break;
1019	case WRPLL_REF_LCPLL:
1020		refclk = 2700000;
1021		break;
1022	default:
1023		MISSING_CASE(wrpll);
1024		return 0;
1025	}
1026
1027	r = wrpll & WRPLL_DIVIDER_REF_MASK;
1028	p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
1029	n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
1030
1031	/* Convert to KHz, p & r have a fixed point portion */
1032	return (refclk * n / 10) / (p * r) * 2;
1033}
1034
1035static int
1036hsw_ddi_wrpll_compute_dpll(struct intel_atomic_state *state,
1037			   struct intel_crtc *crtc)
1038{
1039	struct drm_i915_private *i915 = to_i915(state->base.dev);
1040	struct intel_crtc_state *crtc_state =
1041		intel_atomic_get_new_crtc_state(state, crtc);
1042	struct hsw_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.hsw;
1043	unsigned int p, n2, r2;
1044
1045	hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
1046
1047	hw_state->wrpll =
1048		WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
1049		WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
1050		WRPLL_DIVIDER_POST(p);
1051
1052	crtc_state->port_clock = hsw_ddi_wrpll_get_freq(i915, NULL,
1053							&crtc_state->dpll_hw_state);
1054
1055	return 0;
1056}
1057
1058static struct intel_shared_dpll *
1059hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
1060		       struct intel_crtc *crtc)
1061{
1062	struct intel_crtc_state *crtc_state =
1063		intel_atomic_get_new_crtc_state(state, crtc);
1064
1065	return intel_find_shared_dpll(state, crtc,
1066				      &crtc_state->dpll_hw_state,
1067				      BIT(DPLL_ID_WRPLL2) |
1068				      BIT(DPLL_ID_WRPLL1));
1069}
1070
1071static int
1072hsw_ddi_lcpll_compute_dpll(struct intel_crtc_state *crtc_state)
1073{
1074	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1075	int clock = crtc_state->port_clock;
1076
1077	switch (clock / 2) {
1078	case 81000:
1079	case 135000:
1080	case 270000:
1081		return 0;
1082	default:
1083		drm_dbg_kms(&i915->drm, "Invalid clock for DP: %d\n",
1084			    clock);
1085		return -EINVAL;
1086	}
1087}
1088
1089static struct intel_shared_dpll *
1090hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
1091{
1092	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1093	struct intel_shared_dpll *pll;
1094	enum intel_dpll_id pll_id;
1095	int clock = crtc_state->port_clock;
1096
1097	switch (clock / 2) {
1098	case 81000:
1099		pll_id = DPLL_ID_LCPLL_810;
1100		break;
1101	case 135000:
1102		pll_id = DPLL_ID_LCPLL_1350;
1103		break;
1104	case 270000:
1105		pll_id = DPLL_ID_LCPLL_2700;
1106		break;
1107	default:
1108		MISSING_CASE(clock / 2);
1109		return NULL;
1110	}
1111
1112	pll = intel_get_shared_dpll_by_id(i915, pll_id);
1113
1114	if (!pll)
1115		return NULL;
1116
1117	return pll;
1118}
1119
1120static int hsw_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1121				  const struct intel_shared_dpll *pll,
1122				  const struct intel_dpll_hw_state *dpll_hw_state)
1123{
1124	int link_clock = 0;
1125
1126	switch (pll->info->id) {
1127	case DPLL_ID_LCPLL_810:
1128		link_clock = 81000;
1129		break;
1130	case DPLL_ID_LCPLL_1350:
1131		link_clock = 135000;
1132		break;
1133	case DPLL_ID_LCPLL_2700:
1134		link_clock = 270000;
1135		break;
1136	default:
1137		drm_WARN(&i915->drm, 1, "bad port clock sel\n");
1138		break;
1139	}
1140
1141	return link_clock * 2;
1142}
1143
1144static int
1145hsw_ddi_spll_compute_dpll(struct intel_atomic_state *state,
1146			  struct intel_crtc *crtc)
1147{
1148	struct intel_crtc_state *crtc_state =
1149		intel_atomic_get_new_crtc_state(state, crtc);
1150	struct hsw_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.hsw;
1151
1152	if (drm_WARN_ON(crtc->base.dev, crtc_state->port_clock / 2 != 135000))
1153		return -EINVAL;
1154
1155	hw_state->spll =
1156		SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC;
1157
1158	return 0;
1159}
1160
1161static struct intel_shared_dpll *
1162hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
1163		      struct intel_crtc *crtc)
1164{
1165	struct intel_crtc_state *crtc_state =
1166		intel_atomic_get_new_crtc_state(state, crtc);
1167
1168	return intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state,
1169				      BIT(DPLL_ID_SPLL));
1170}
1171
1172static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915,
1173				 const struct intel_shared_dpll *pll,
1174				 const struct intel_dpll_hw_state *dpll_hw_state)
1175{
1176	const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
1177	int link_clock = 0;
1178
1179	switch (hw_state->spll & SPLL_FREQ_MASK) {
1180	case SPLL_FREQ_810MHz:
1181		link_clock = 81000;
1182		break;
1183	case SPLL_FREQ_1350MHz:
1184		link_clock = 135000;
1185		break;
1186	case SPLL_FREQ_2700MHz:
1187		link_clock = 270000;
1188		break;
1189	default:
1190		drm_WARN(&i915->drm, 1, "bad spll freq\n");
1191		break;
1192	}
1193
1194	return link_clock * 2;
1195}
1196
1197static int hsw_compute_dpll(struct intel_atomic_state *state,
1198			    struct intel_crtc *crtc,
1199			    struct intel_encoder *encoder)
1200{
1201	struct intel_crtc_state *crtc_state =
1202		intel_atomic_get_new_crtc_state(state, crtc);
1203
1204	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1205		return hsw_ddi_wrpll_compute_dpll(state, crtc);
1206	else if (intel_crtc_has_dp_encoder(crtc_state))
1207		return hsw_ddi_lcpll_compute_dpll(crtc_state);
1208	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1209		return hsw_ddi_spll_compute_dpll(state, crtc);
1210	else
1211		return -EINVAL;
1212}
1213
1214static int hsw_get_dpll(struct intel_atomic_state *state,
1215			struct intel_crtc *crtc,
1216			struct intel_encoder *encoder)
1217{
1218	struct intel_crtc_state *crtc_state =
1219		intel_atomic_get_new_crtc_state(state, crtc);
1220	struct intel_shared_dpll *pll = NULL;
1221
1222	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1223		pll = hsw_ddi_wrpll_get_dpll(state, crtc);
1224	else if (intel_crtc_has_dp_encoder(crtc_state))
1225		pll = hsw_ddi_lcpll_get_dpll(crtc_state);
1226	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1227		pll = hsw_ddi_spll_get_dpll(state, crtc);
1228
1229	if (!pll)
1230		return -EINVAL;
1231
1232	intel_reference_shared_dpll(state, crtc,
1233				    pll, &crtc_state->dpll_hw_state);
1234
1235	crtc_state->shared_dpll = pll;
1236
1237	return 0;
1238}
1239
1240static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915)
1241{
1242	i915->display.dpll.ref_clks.ssc = 135000;
1243	/* Non-SSC is only used on non-ULT HSW. */
1244	if (intel_de_read(i915, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
1245		i915->display.dpll.ref_clks.nssc = 24000;
1246	else
1247		i915->display.dpll.ref_clks.nssc = 135000;
1248}
1249
1250static void hsw_dump_hw_state(struct drm_printer *p,
1251			      const struct intel_dpll_hw_state *dpll_hw_state)
1252{
1253	const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
1254
1255	drm_printf(p, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
1256		   hw_state->wrpll, hw_state->spll);
1257}
1258
1259static bool hsw_compare_hw_state(const struct intel_dpll_hw_state *_a,
1260				 const struct intel_dpll_hw_state *_b)
1261{
1262	const struct hsw_dpll_hw_state *a = &_a->hsw;
1263	const struct hsw_dpll_hw_state *b = &_b->hsw;
1264
1265	return a->wrpll == b->wrpll &&
1266		a->spll == b->spll;
1267}
1268
1269static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
1270	.enable = hsw_ddi_wrpll_enable,
1271	.disable = hsw_ddi_wrpll_disable,
1272	.get_hw_state = hsw_ddi_wrpll_get_hw_state,
1273	.get_freq = hsw_ddi_wrpll_get_freq,
1274};
1275
1276static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
1277	.enable = hsw_ddi_spll_enable,
1278	.disable = hsw_ddi_spll_disable,
1279	.get_hw_state = hsw_ddi_spll_get_hw_state,
1280	.get_freq = hsw_ddi_spll_get_freq,
1281};
1282
1283static void hsw_ddi_lcpll_enable(struct drm_i915_private *i915,
1284				 struct intel_shared_dpll *pll,
1285				 const struct intel_dpll_hw_state *hw_state)
1286{
1287}
1288
1289static void hsw_ddi_lcpll_disable(struct drm_i915_private *i915,
1290				  struct intel_shared_dpll *pll)
1291{
1292}
1293
1294static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *i915,
1295				       struct intel_shared_dpll *pll,
1296				       struct intel_dpll_hw_state *dpll_hw_state)
1297{
1298	return true;
1299}
1300
1301static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
1302	.enable = hsw_ddi_lcpll_enable,
1303	.disable = hsw_ddi_lcpll_disable,
1304	.get_hw_state = hsw_ddi_lcpll_get_hw_state,
1305	.get_freq = hsw_ddi_lcpll_get_freq,
1306};
1307
1308static const struct dpll_info hsw_plls[] = {
1309	{ .name = "WRPLL 1", .funcs = &hsw_ddi_wrpll_funcs, .id = DPLL_ID_WRPLL1, },
1310	{ .name = "WRPLL 2", .funcs = &hsw_ddi_wrpll_funcs, .id = DPLL_ID_WRPLL2, },
1311	{ .name = "SPLL", .funcs = &hsw_ddi_spll_funcs, .id = DPLL_ID_SPLL, },
1312	{ .name = "LCPLL 810", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_810,
1313	  .always_on = true, },
1314	{ .name = "LCPLL 1350", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_1350,
1315	  .always_on = true, },
1316	{ .name = "LCPLL 2700", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_2700,
1317	  .always_on = true, },
1318	{}
1319};
1320
1321static const struct intel_dpll_mgr hsw_pll_mgr = {
1322	.dpll_info = hsw_plls,
1323	.compute_dplls = hsw_compute_dpll,
1324	.get_dplls = hsw_get_dpll,
1325	.put_dplls = intel_put_dpll,
1326	.update_ref_clks = hsw_update_dpll_ref_clks,
1327	.dump_hw_state = hsw_dump_hw_state,
1328	.compare_hw_state = hsw_compare_hw_state,
1329};
1330
1331struct skl_dpll_regs {
1332	i915_reg_t ctl, cfgcr1, cfgcr2;
1333};
1334
1335/* this array is indexed by the *shared* pll id */
1336static const struct skl_dpll_regs skl_dpll_regs[4] = {
1337	{
1338		/* DPLL 0 */
1339		.ctl = LCPLL1_CTL,
1340		/* DPLL 0 doesn't support HDMI mode */
1341	},
1342	{
1343		/* DPLL 1 */
1344		.ctl = LCPLL2_CTL,
1345		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
1346		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
1347	},
1348	{
1349		/* DPLL 2 */
1350		.ctl = WRPLL_CTL(0),
1351		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
1352		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
1353	},
1354	{
1355		/* DPLL 3 */
1356		.ctl = WRPLL_CTL(1),
1357		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
1358		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
1359	},
1360};
1361
1362static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *i915,
1363				    struct intel_shared_dpll *pll,
1364				    const struct skl_dpll_hw_state *hw_state)
1365{
1366	const enum intel_dpll_id id = pll->info->id;
1367
1368	intel_de_rmw(i915, DPLL_CTRL1,
1369		     DPLL_CTRL1_HDMI_MODE(id) |
1370		     DPLL_CTRL1_SSC(id) |
1371		     DPLL_CTRL1_LINK_RATE_MASK(id),
1372		     hw_state->ctrl1 << (id * 6));
1373	intel_de_posting_read(i915, DPLL_CTRL1);
1374}
1375
1376static void skl_ddi_pll_enable(struct drm_i915_private *i915,
1377			       struct intel_shared_dpll *pll,
1378			       const struct intel_dpll_hw_state *dpll_hw_state)
1379{
1380	const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1381	const struct skl_dpll_regs *regs = skl_dpll_regs;
1382	const enum intel_dpll_id id = pll->info->id;
1383
1384	skl_ddi_pll_write_ctrl1(i915, pll, hw_state);
1385
1386	intel_de_write(i915, regs[id].cfgcr1, hw_state->cfgcr1);
1387	intel_de_write(i915, regs[id].cfgcr2, hw_state->cfgcr2);
1388	intel_de_posting_read(i915, regs[id].cfgcr1);
1389	intel_de_posting_read(i915, regs[id].cfgcr2);
1390
1391	/* the enable bit is always bit 31 */
1392	intel_de_rmw(i915, regs[id].ctl, 0, LCPLL_PLL_ENABLE);
1393
1394	if (intel_de_wait_for_set(i915, DPLL_STATUS, DPLL_LOCK(id), 5))
1395		drm_err(&i915->drm, "DPLL %d not locked\n", id);
1396}
1397
1398static void skl_ddi_dpll0_enable(struct drm_i915_private *i915,
1399				 struct intel_shared_dpll *pll,
1400				 const struct intel_dpll_hw_state *dpll_hw_state)
1401{
1402	const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1403
1404	skl_ddi_pll_write_ctrl1(i915, pll, hw_state);
1405}
1406
1407static void skl_ddi_pll_disable(struct drm_i915_private *i915,
1408				struct intel_shared_dpll *pll)
1409{
1410	const struct skl_dpll_regs *regs = skl_dpll_regs;
1411	const enum intel_dpll_id id = pll->info->id;
1412
1413	/* the enable bit is always bit 31 */
1414	intel_de_rmw(i915, regs[id].ctl, LCPLL_PLL_ENABLE, 0);
1415	intel_de_posting_read(i915, regs[id].ctl);
1416}
1417
1418static void skl_ddi_dpll0_disable(struct drm_i915_private *i915,
1419				  struct intel_shared_dpll *pll)
1420{
1421}
1422
1423static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *i915,
1424				     struct intel_shared_dpll *pll,
1425				     struct intel_dpll_hw_state *dpll_hw_state)
1426{
1427	struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1428	const struct skl_dpll_regs *regs = skl_dpll_regs;
1429	const enum intel_dpll_id id = pll->info->id;
1430	intel_wakeref_t wakeref;
1431	bool ret;
1432	u32 val;
1433
1434	wakeref = intel_display_power_get_if_enabled(i915,
1435						     POWER_DOMAIN_DISPLAY_CORE);
1436	if (!wakeref)
1437		return false;
1438
1439	ret = false;
1440
1441	val = intel_de_read(i915, regs[id].ctl);
1442	if (!(val & LCPLL_PLL_ENABLE))
1443		goto out;
1444
1445	val = intel_de_read(i915, DPLL_CTRL1);
1446	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1447
1448	/* avoid reading back stale values if HDMI mode is not enabled */
1449	if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1450		hw_state->cfgcr1 = intel_de_read(i915, regs[id].cfgcr1);
1451		hw_state->cfgcr2 = intel_de_read(i915, regs[id].cfgcr2);
1452	}
1453	ret = true;
1454
1455out:
1456	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1457
1458	return ret;
1459}
1460
1461static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *i915,
1462				       struct intel_shared_dpll *pll,
1463				       struct intel_dpll_hw_state *dpll_hw_state)
1464{
1465	struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1466	const struct skl_dpll_regs *regs = skl_dpll_regs;
1467	const enum intel_dpll_id id = pll->info->id;
1468	intel_wakeref_t wakeref;
1469	u32 val;
1470	bool ret;
1471
1472	wakeref = intel_display_power_get_if_enabled(i915,
1473						     POWER_DOMAIN_DISPLAY_CORE);
1474	if (!wakeref)
1475		return false;
1476
1477	ret = false;
1478
1479	/* DPLL0 is always enabled since it drives CDCLK */
1480	val = intel_de_read(i915, regs[id].ctl);
1481	if (drm_WARN_ON(&i915->drm, !(val & LCPLL_PLL_ENABLE)))
1482		goto out;
1483
1484	val = intel_de_read(i915, DPLL_CTRL1);
1485	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1486
1487	ret = true;
1488
1489out:
1490	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1491
1492	return ret;
1493}
1494
1495struct skl_wrpll_context {
1496	u64 min_deviation;		/* current minimal deviation */
1497	u64 central_freq;		/* chosen central freq */
1498	u64 dco_freq;			/* chosen dco freq */
1499	unsigned int p;			/* chosen divider */
1500};
1501
1502/* DCO freq must be within +1%/-6%  of the DCO central freq */
1503#define SKL_DCO_MAX_PDEVIATION	100
1504#define SKL_DCO_MAX_NDEVIATION	600
1505
1506static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1507				  u64 central_freq,
1508				  u64 dco_freq,
1509				  unsigned int divider)
1510{
1511	u64 deviation;
1512
1513	deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1514			      central_freq);
1515
1516	/* positive deviation */
1517	if (dco_freq >= central_freq) {
1518		if (deviation < SKL_DCO_MAX_PDEVIATION &&
1519		    deviation < ctx->min_deviation) {
1520			ctx->min_deviation = deviation;
1521			ctx->central_freq = central_freq;
1522			ctx->dco_freq = dco_freq;
1523			ctx->p = divider;
1524		}
1525	/* negative deviation */
1526	} else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1527		   deviation < ctx->min_deviation) {
1528		ctx->min_deviation = deviation;
1529		ctx->central_freq = central_freq;
1530		ctx->dco_freq = dco_freq;
1531		ctx->p = divider;
1532	}
1533}
1534
1535static void skl_wrpll_get_multipliers(unsigned int p,
1536				      unsigned int *p0 /* out */,
1537				      unsigned int *p1 /* out */,
1538				      unsigned int *p2 /* out */)
1539{
1540	/* even dividers */
1541	if (p % 2 == 0) {
1542		unsigned int half = p / 2;
1543
1544		if (half == 1 || half == 2 || half == 3 || half == 5) {
1545			*p0 = 2;
1546			*p1 = 1;
1547			*p2 = half;
1548		} else if (half % 2 == 0) {
1549			*p0 = 2;
1550			*p1 = half / 2;
1551			*p2 = 2;
1552		} else if (half % 3 == 0) {
1553			*p0 = 3;
1554			*p1 = half / 3;
1555			*p2 = 2;
1556		} else if (half % 7 == 0) {
1557			*p0 = 7;
1558			*p1 = half / 7;
1559			*p2 = 2;
1560		}
1561	} else if (p == 3 || p == 9) {  /* 3, 5, 7, 9, 15, 21, 35 */
1562		*p0 = 3;
1563		*p1 = 1;
1564		*p2 = p / 3;
1565	} else if (p == 5 || p == 7) {
1566		*p0 = p;
1567		*p1 = 1;
1568		*p2 = 1;
1569	} else if (p == 15) {
1570		*p0 = 3;
1571		*p1 = 1;
1572		*p2 = 5;
1573	} else if (p == 21) {
1574		*p0 = 7;
1575		*p1 = 1;
1576		*p2 = 3;
1577	} else if (p == 35) {
1578		*p0 = 7;
1579		*p1 = 1;
1580		*p2 = 5;
1581	}
1582}
1583
1584struct skl_wrpll_params {
1585	u32 dco_fraction;
1586	u32 dco_integer;
1587	u32 qdiv_ratio;
1588	u32 qdiv_mode;
1589	u32 kdiv;
1590	u32 pdiv;
1591	u32 central_freq;
1592};
1593
1594static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1595				      u64 afe_clock,
1596				      int ref_clock,
1597				      u64 central_freq,
1598				      u32 p0, u32 p1, u32 p2)
1599{
1600	u64 dco_freq;
1601
1602	switch (central_freq) {
1603	case 9600000000ULL:
1604		params->central_freq = 0;
1605		break;
1606	case 9000000000ULL:
1607		params->central_freq = 1;
1608		break;
1609	case 8400000000ULL:
1610		params->central_freq = 3;
1611	}
1612
1613	switch (p0) {
1614	case 1:
1615		params->pdiv = 0;
1616		break;
1617	case 2:
1618		params->pdiv = 1;
1619		break;
1620	case 3:
1621		params->pdiv = 2;
1622		break;
1623	case 7:
1624		params->pdiv = 4;
1625		break;
1626	default:
1627		WARN(1, "Incorrect PDiv\n");
1628	}
1629
1630	switch (p2) {
1631	case 5:
1632		params->kdiv = 0;
1633		break;
1634	case 2:
1635		params->kdiv = 1;
1636		break;
1637	case 3:
1638		params->kdiv = 2;
1639		break;
1640	case 1:
1641		params->kdiv = 3;
1642		break;
1643	default:
1644		WARN(1, "Incorrect KDiv\n");
1645	}
1646
1647	params->qdiv_ratio = p1;
1648	params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1649
1650	dco_freq = p0 * p1 * p2 * afe_clock;
1651
1652	/*
1653	 * Intermediate values are in Hz.
1654	 * Divide by MHz to match bsepc
1655	 */
1656	params->dco_integer = div_u64(dco_freq, ref_clock * KHz(1));
1657	params->dco_fraction =
1658		div_u64((div_u64(dco_freq, ref_clock / KHz(1)) -
1659			 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1660}
1661
1662static int
1663skl_ddi_calculate_wrpll(int clock,
1664			int ref_clock,
1665			struct skl_wrpll_params *wrpll_params)
1666{
1667	static const u64 dco_central_freq[3] = { 8400000000ULL,
1668						 9000000000ULL,
1669						 9600000000ULL };
1670	static const u8 even_dividers[] = {  4,  6,  8, 10, 12, 14, 16, 18, 20,
1671					    24, 28, 30, 32, 36, 40, 42, 44,
1672					    48, 52, 54, 56, 60, 64, 66, 68,
1673					    70, 72, 76, 78, 80, 84, 88, 90,
1674					    92, 96, 98 };
1675	static const u8 odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1676	static const struct {
1677		const u8 *list;
1678		int n_dividers;
1679	} dividers[] = {
1680		{ even_dividers, ARRAY_SIZE(even_dividers) },
1681		{ odd_dividers, ARRAY_SIZE(odd_dividers) },
1682	};
1683	struct skl_wrpll_context ctx = {
1684		.min_deviation = U64_MAX,
1685	};
1686	unsigned int dco, d, i;
1687	unsigned int p0, p1, p2;
1688	u64 afe_clock = (u64)clock * 1000 * 5; /* AFE Clock is 5x Pixel clock, in Hz */
1689
1690	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1691		for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1692			for (i = 0; i < dividers[d].n_dividers; i++) {
1693				unsigned int p = dividers[d].list[i];
1694				u64 dco_freq = p * afe_clock;
1695
1696				skl_wrpll_try_divider(&ctx,
1697						      dco_central_freq[dco],
1698						      dco_freq,
1699						      p);
1700				/*
1701				 * Skip the remaining dividers if we're sure to
1702				 * have found the definitive divider, we can't
1703				 * improve a 0 deviation.
1704				 */
1705				if (ctx.min_deviation == 0)
1706					goto skip_remaining_dividers;
1707			}
1708		}
1709
1710skip_remaining_dividers:
1711		/*
1712		 * If a solution is found with an even divider, prefer
1713		 * this one.
1714		 */
1715		if (d == 0 && ctx.p)
1716			break;
1717	}
1718
1719	if (!ctx.p)
1720		return -EINVAL;
1721
1722	/*
1723	 * gcc incorrectly analyses that these can be used without being
1724	 * initialized. To be fair, it's hard to guess.
1725	 */
1726	p0 = p1 = p2 = 0;
1727	skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1728	skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock,
1729				  ctx.central_freq, p0, p1, p2);
1730
1731	return 0;
1732}
1733
1734static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
1735				  const struct intel_shared_dpll *pll,
1736				  const struct intel_dpll_hw_state *dpll_hw_state)
1737{
1738	const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1739	int ref_clock = i915->display.dpll.ref_clks.nssc;
1740	u32 p0, p1, p2, dco_freq;
1741
1742	p0 = hw_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
1743	p2 = hw_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
1744
1745	if (hw_state->cfgcr2 &  DPLL_CFGCR2_QDIV_MODE(1))
1746		p1 = (hw_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
1747	else
1748		p1 = 1;
1749
1750
1751	switch (p0) {
1752	case DPLL_CFGCR2_PDIV_1:
1753		p0 = 1;
1754		break;
1755	case DPLL_CFGCR2_PDIV_2:
1756		p0 = 2;
1757		break;
1758	case DPLL_CFGCR2_PDIV_3:
1759		p0 = 3;
1760		break;
1761	case DPLL_CFGCR2_PDIV_7_INVALID:
1762		/*
1763		 * Incorrect ASUS-Z170M BIOS setting, the HW seems to ignore bit#0,
1764		 * handling it the same way as PDIV_7.
1765		 */
1766		drm_dbg_kms(&i915->drm, "Invalid WRPLL PDIV divider value, fixing it.\n");
1767		fallthrough;
1768	case DPLL_CFGCR2_PDIV_7:
1769		p0 = 7;
1770		break;
1771	default:
1772		MISSING_CASE(p0);
1773		return 0;
1774	}
1775
1776	switch (p2) {
1777	case DPLL_CFGCR2_KDIV_5:
1778		p2 = 5;
1779		break;
1780	case DPLL_CFGCR2_KDIV_2:
1781		p2 = 2;
1782		break;
1783	case DPLL_CFGCR2_KDIV_3:
1784		p2 = 3;
1785		break;
1786	case DPLL_CFGCR2_KDIV_1:
1787		p2 = 1;
1788		break;
1789	default:
1790		MISSING_CASE(p2);
1791		return 0;
1792	}
1793
1794	dco_freq = (hw_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
1795		   ref_clock;
1796
1797	dco_freq += ((hw_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
1798		    ref_clock / 0x8000;
1799
1800	if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
1801		return 0;
1802
1803	return dco_freq / (p0 * p1 * p2 * 5);
1804}
1805
1806static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
1807{
1808	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1809	struct skl_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.skl;
1810	struct skl_wrpll_params wrpll_params = {};
 
1811	int ret;
1812
1813	ret = skl_ddi_calculate_wrpll(crtc_state->port_clock,
1814				      i915->display.dpll.ref_clks.nssc, &wrpll_params);
1815	if (ret)
1816		return ret;
1817
1818	/*
1819	 * See comment in intel_dpll_hw_state to understand why we always use 0
1820	 * as the DPLL id in this function.
1821	 */
1822	hw_state->ctrl1 =
1823		DPLL_CTRL1_OVERRIDE(0) |
1824		DPLL_CTRL1_HDMI_MODE(0);
1825
1826	hw_state->cfgcr1 =
1827		DPLL_CFGCR1_FREQ_ENABLE |
 
 
 
 
 
 
1828		DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1829		wrpll_params.dco_integer;
1830
1831	hw_state->cfgcr2 =
1832		DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1833		DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1834		DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1835		DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1836		wrpll_params.central_freq;
1837
 
 
 
 
1838	crtc_state->port_clock = skl_ddi_wrpll_get_freq(i915, NULL,
1839							&crtc_state->dpll_hw_state);
1840
1841	return 0;
1842}
1843
1844static int
1845skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1846{
1847	struct skl_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.skl;
1848	u32 ctrl1;
1849
1850	/*
1851	 * See comment in intel_dpll_hw_state to understand why we always use 0
1852	 * as the DPLL id in this function.
1853	 */
1854	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1855	switch (crtc_state->port_clock / 2) {
1856	case 81000:
1857		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1858		break;
1859	case 135000:
1860		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1861		break;
1862	case 270000:
1863		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1864		break;
1865		/* eDP 1.4 rates */
1866	case 162000:
1867		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1868		break;
1869	case 108000:
1870		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1871		break;
1872	case 216000:
1873		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1874		break;
1875	}
1876
1877	hw_state->ctrl1 = ctrl1;
1878
1879	return 0;
1880}
1881
1882static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1883				  const struct intel_shared_dpll *pll,
1884				  const struct intel_dpll_hw_state *dpll_hw_state)
1885{
1886	const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1887	int link_clock = 0;
1888
1889	switch ((hw_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0)) >>
1890		DPLL_CTRL1_LINK_RATE_SHIFT(0)) {
1891	case DPLL_CTRL1_LINK_RATE_810:
1892		link_clock = 81000;
1893		break;
1894	case DPLL_CTRL1_LINK_RATE_1080:
1895		link_clock = 108000;
1896		break;
1897	case DPLL_CTRL1_LINK_RATE_1350:
1898		link_clock = 135000;
1899		break;
1900	case DPLL_CTRL1_LINK_RATE_1620:
1901		link_clock = 162000;
1902		break;
1903	case DPLL_CTRL1_LINK_RATE_2160:
1904		link_clock = 216000;
1905		break;
1906	case DPLL_CTRL1_LINK_RATE_2700:
1907		link_clock = 270000;
1908		break;
1909	default:
1910		drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
1911		break;
1912	}
1913
1914	return link_clock * 2;
1915}
1916
1917static int skl_compute_dpll(struct intel_atomic_state *state,
1918			    struct intel_crtc *crtc,
1919			    struct intel_encoder *encoder)
1920{
1921	struct intel_crtc_state *crtc_state =
1922		intel_atomic_get_new_crtc_state(state, crtc);
1923
1924	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1925		return skl_ddi_hdmi_pll_dividers(crtc_state);
1926	else if (intel_crtc_has_dp_encoder(crtc_state))
1927		return skl_ddi_dp_set_dpll_hw_state(crtc_state);
1928	else
1929		return -EINVAL;
1930}
1931
1932static int skl_get_dpll(struct intel_atomic_state *state,
1933			struct intel_crtc *crtc,
1934			struct intel_encoder *encoder)
1935{
1936	struct intel_crtc_state *crtc_state =
1937		intel_atomic_get_new_crtc_state(state, crtc);
1938	struct intel_shared_dpll *pll;
1939
1940	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1941		pll = intel_find_shared_dpll(state, crtc,
1942					     &crtc_state->dpll_hw_state,
1943					     BIT(DPLL_ID_SKL_DPLL0));
1944	else
1945		pll = intel_find_shared_dpll(state, crtc,
1946					     &crtc_state->dpll_hw_state,
1947					     BIT(DPLL_ID_SKL_DPLL3) |
1948					     BIT(DPLL_ID_SKL_DPLL2) |
1949					     BIT(DPLL_ID_SKL_DPLL1));
1950	if (!pll)
1951		return -EINVAL;
1952
1953	intel_reference_shared_dpll(state, crtc,
1954				    pll, &crtc_state->dpll_hw_state);
1955
1956	crtc_state->shared_dpll = pll;
1957
1958	return 0;
1959}
1960
1961static int skl_ddi_pll_get_freq(struct drm_i915_private *i915,
1962				const struct intel_shared_dpll *pll,
1963				const struct intel_dpll_hw_state *dpll_hw_state)
1964{
1965	const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1966
1967	/*
1968	 * ctrl1 register is already shifted for each pll, just use 0 to get
1969	 * the internal shift for each field
1970	 */
1971	if (hw_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
1972		return skl_ddi_wrpll_get_freq(i915, pll, dpll_hw_state);
1973	else
1974		return skl_ddi_lcpll_get_freq(i915, pll, dpll_hw_state);
1975}
1976
1977static void skl_update_dpll_ref_clks(struct drm_i915_private *i915)
1978{
1979	/* No SSC ref */
1980	i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
1981}
1982
1983static void skl_dump_hw_state(struct drm_printer *p,
1984			      const struct intel_dpll_hw_state *dpll_hw_state)
1985{
1986	const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1987
1988	drm_printf(p, "dpll_hw_state: ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1989		   hw_state->ctrl1, hw_state->cfgcr1, hw_state->cfgcr2);
1990}
1991
1992static bool skl_compare_hw_state(const struct intel_dpll_hw_state *_a,
1993				 const struct intel_dpll_hw_state *_b)
1994{
1995	const struct skl_dpll_hw_state *a = &_a->skl;
1996	const struct skl_dpll_hw_state *b = &_b->skl;
1997
1998	return a->ctrl1 == b->ctrl1 &&
1999		a->cfgcr1 == b->cfgcr1 &&
2000		a->cfgcr2 == b->cfgcr2;
2001}
2002
2003static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
2004	.enable = skl_ddi_pll_enable,
2005	.disable = skl_ddi_pll_disable,
2006	.get_hw_state = skl_ddi_pll_get_hw_state,
2007	.get_freq = skl_ddi_pll_get_freq,
2008};
2009
2010static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
2011	.enable = skl_ddi_dpll0_enable,
2012	.disable = skl_ddi_dpll0_disable,
2013	.get_hw_state = skl_ddi_dpll0_get_hw_state,
2014	.get_freq = skl_ddi_pll_get_freq,
2015};
2016
2017static const struct dpll_info skl_plls[] = {
2018	{ .name = "DPLL 0", .funcs = &skl_ddi_dpll0_funcs, .id = DPLL_ID_SKL_DPLL0,
2019	  .always_on = true, },
2020	{ .name = "DPLL 1", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL1, },
2021	{ .name = "DPLL 2", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL2, },
2022	{ .name = "DPLL 3", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL3, },
2023	{}
2024};
2025
2026static const struct intel_dpll_mgr skl_pll_mgr = {
2027	.dpll_info = skl_plls,
2028	.compute_dplls = skl_compute_dpll,
2029	.get_dplls = skl_get_dpll,
2030	.put_dplls = intel_put_dpll,
2031	.update_ref_clks = skl_update_dpll_ref_clks,
2032	.dump_hw_state = skl_dump_hw_state,
2033	.compare_hw_state = skl_compare_hw_state,
2034};
2035
2036static void bxt_ddi_pll_enable(struct drm_i915_private *i915,
2037			       struct intel_shared_dpll *pll,
2038			       const struct intel_dpll_hw_state *dpll_hw_state)
2039{
2040	struct intel_display *display = &i915->display;
2041	const struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
2042	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2043	enum dpio_phy phy;
2044	enum dpio_channel ch;
2045	u32 temp;
2046
2047	bxt_port_to_phy_channel(display, port, &phy, &ch);
2048
2049	/* Non-SSC reference */
2050	intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_REF_SEL);
2051
2052	if (IS_GEMINILAKE(i915)) {
2053		intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port),
2054			     0, PORT_PLL_POWER_ENABLE);
2055
2056		if (wait_for_us((intel_de_read(i915, BXT_PORT_PLL_ENABLE(port)) &
2057				 PORT_PLL_POWER_STATE), 200))
2058			drm_err(&i915->drm,
2059				"Power state not set for PLL:%d\n", port);
2060	}
2061
2062	/* Disable 10 bit clock */
2063	intel_de_rmw(i915, BXT_PORT_PLL_EBB_4(phy, ch),
2064		     PORT_PLL_10BIT_CLK_ENABLE, 0);
2065
2066	/* Write P1 & P2 */
2067	intel_de_rmw(i915, BXT_PORT_PLL_EBB_0(phy, ch),
2068		     PORT_PLL_P1_MASK | PORT_PLL_P2_MASK, hw_state->ebb0);
2069
2070	/* Write M2 integer */
2071	intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 0),
2072		     PORT_PLL_M2_INT_MASK, hw_state->pll0);
2073
2074	/* Write N */
2075	intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 1),
2076		     PORT_PLL_N_MASK, hw_state->pll1);
2077
2078	/* Write M2 fraction */
2079	intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 2),
2080		     PORT_PLL_M2_FRAC_MASK, hw_state->pll2);
2081
2082	/* Write M2 fraction enable */
2083	intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 3),
2084		     PORT_PLL_M2_FRAC_ENABLE, hw_state->pll3);
2085
2086	/* Write coeff */
2087	temp = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 6));
2088	temp &= ~PORT_PLL_PROP_COEFF_MASK;
2089	temp &= ~PORT_PLL_INT_COEFF_MASK;
2090	temp &= ~PORT_PLL_GAIN_CTL_MASK;
2091	temp |= hw_state->pll6;
2092	intel_de_write(i915, BXT_PORT_PLL(phy, ch, 6), temp);
2093
2094	/* Write calibration val */
2095	intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 8),
2096		     PORT_PLL_TARGET_CNT_MASK, hw_state->pll8);
2097
2098	intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 9),
2099		     PORT_PLL_LOCK_THRESHOLD_MASK, hw_state->pll9);
2100
2101	temp = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 10));
2102	temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
2103	temp &= ~PORT_PLL_DCO_AMP_MASK;
2104	temp |= hw_state->pll10;
2105	intel_de_write(i915, BXT_PORT_PLL(phy, ch, 10), temp);
2106
2107	/* Recalibrate with new settings */
2108	temp = intel_de_read(i915, BXT_PORT_PLL_EBB_4(phy, ch));
2109	temp |= PORT_PLL_RECALIBRATE;
2110	intel_de_write(i915, BXT_PORT_PLL_EBB_4(phy, ch), temp);
2111	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
2112	temp |= hw_state->ebb4;
2113	intel_de_write(i915, BXT_PORT_PLL_EBB_4(phy, ch), temp);
2114
2115	/* Enable PLL */
2116	intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_ENABLE);
2117	intel_de_posting_read(i915, BXT_PORT_PLL_ENABLE(port));
2118
2119	if (wait_for_us((intel_de_read(i915, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
2120			200))
2121		drm_err(&i915->drm, "PLL %d not locked\n", port);
2122
2123	if (IS_GEMINILAKE(i915)) {
2124		temp = intel_de_read(i915, BXT_PORT_TX_DW5_LN(phy, ch, 0));
2125		temp |= DCC_DELAY_RANGE_2;
2126		intel_de_write(i915, BXT_PORT_TX_DW5_GRP(phy, ch), temp);
2127	}
2128
2129	/*
2130	 * While we write to the group register to program all lanes at once we
2131	 * can read only lane registers and we pick lanes 0/1 for that.
2132	 */
2133	temp = intel_de_read(i915, BXT_PORT_PCS_DW12_LN01(phy, ch));
2134	temp &= ~LANE_STAGGER_MASK;
2135	temp &= ~LANESTAGGER_STRAP_OVRD;
2136	temp |= hw_state->pcsdw12;
2137	intel_de_write(i915, BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
2138}
2139
2140static void bxt_ddi_pll_disable(struct drm_i915_private *i915,
2141				struct intel_shared_dpll *pll)
2142{
2143	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2144
2145	intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port), PORT_PLL_ENABLE, 0);
2146	intel_de_posting_read(i915, BXT_PORT_PLL_ENABLE(port));
2147
2148	if (IS_GEMINILAKE(i915)) {
2149		intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port),
2150			     PORT_PLL_POWER_ENABLE, 0);
2151
2152		if (wait_for_us(!(intel_de_read(i915, BXT_PORT_PLL_ENABLE(port)) &
2153				  PORT_PLL_POWER_STATE), 200))
2154			drm_err(&i915->drm,
2155				"Power state not reset for PLL:%d\n", port);
2156	}
2157}
2158
2159static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *i915,
2160				     struct intel_shared_dpll *pll,
2161				     struct intel_dpll_hw_state *dpll_hw_state)
2162{
2163	struct intel_display *display = &i915->display;
2164	struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
2165	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2166	intel_wakeref_t wakeref;
2167	enum dpio_phy phy;
2168	enum dpio_channel ch;
2169	u32 val;
2170	bool ret;
2171
2172	bxt_port_to_phy_channel(display, port, &phy, &ch);
2173
2174	wakeref = intel_display_power_get_if_enabled(i915,
2175						     POWER_DOMAIN_DISPLAY_CORE);
2176	if (!wakeref)
2177		return false;
2178
2179	ret = false;
2180
2181	val = intel_de_read(i915, BXT_PORT_PLL_ENABLE(port));
2182	if (!(val & PORT_PLL_ENABLE))
2183		goto out;
2184
2185	hw_state->ebb0 = intel_de_read(i915, BXT_PORT_PLL_EBB_0(phy, ch));
2186	hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
2187
2188	hw_state->ebb4 = intel_de_read(i915, BXT_PORT_PLL_EBB_4(phy, ch));
2189	hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
2190
2191	hw_state->pll0 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 0));
2192	hw_state->pll0 &= PORT_PLL_M2_INT_MASK;
2193
2194	hw_state->pll1 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 1));
2195	hw_state->pll1 &= PORT_PLL_N_MASK;
2196
2197	hw_state->pll2 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 2));
2198	hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
2199
2200	hw_state->pll3 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 3));
2201	hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
2202
2203	hw_state->pll6 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 6));
2204	hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
2205			  PORT_PLL_INT_COEFF_MASK |
2206			  PORT_PLL_GAIN_CTL_MASK;
2207
2208	hw_state->pll8 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 8));
2209	hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
2210
2211	hw_state->pll9 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 9));
2212	hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
2213
2214	hw_state->pll10 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 10));
2215	hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
2216			   PORT_PLL_DCO_AMP_MASK;
2217
2218	/*
2219	 * While we write to the group register to program all lanes at once we
2220	 * can read only lane registers. We configure all lanes the same way, so
2221	 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
2222	 */
2223	hw_state->pcsdw12 = intel_de_read(i915,
2224					  BXT_PORT_PCS_DW12_LN01(phy, ch));
2225	if (intel_de_read(i915, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
2226		drm_dbg(&i915->drm,
2227			"lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
2228			hw_state->pcsdw12,
2229			intel_de_read(i915,
2230				      BXT_PORT_PCS_DW12_LN23(phy, ch)));
2231	hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
2232
2233	ret = true;
2234
2235out:
2236	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2237
2238	return ret;
2239}
2240
2241/* pre-calculated values for DP linkrates */
2242static const struct dpll bxt_dp_clk_val[] = {
2243	/* m2 is .22 binary fixed point */
2244	{ .dot = 162000, .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2245	{ .dot = 270000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2246	{ .dot = 540000, .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2247	{ .dot = 216000, .p1 = 3, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2248	{ .dot = 243000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6133333 /* 24.3 */ },
2249	{ .dot = 324000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2250	{ .dot = 432000, .p1 = 3, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2251};
2252
2253static int
2254bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
2255			  struct dpll *clk_div)
2256{
2257	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2258
2259	/* Calculate HDMI div */
2260	/*
2261	 * FIXME: tie the following calculation into
2262	 * i9xx_crtc_compute_clock
2263	 */
2264	if (!bxt_find_best_dpll(crtc_state, clk_div))
2265		return -EINVAL;
2266
2267	drm_WARN_ON(&i915->drm, clk_div->m1 != 2);
2268
2269	return 0;
2270}
2271
2272static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
2273				    struct dpll *clk_div)
2274{
2275	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2276	int i;
2277
2278	*clk_div = bxt_dp_clk_val[0];
2279	for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
2280		if (crtc_state->port_clock == bxt_dp_clk_val[i].dot) {
2281			*clk_div = bxt_dp_clk_val[i];
2282			break;
2283		}
2284	}
2285
2286	chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, clk_div);
2287
2288	drm_WARN_ON(&i915->drm, clk_div->vco == 0 ||
2289		    clk_div->dot != crtc_state->port_clock);
2290}
2291
2292static int bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
2293				     const struct dpll *clk_div)
2294{
2295	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2296	struct bxt_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.bxt;
2297	int clock = crtc_state->port_clock;
2298	int vco = clk_div->vco;
2299	u32 prop_coef, int_coef, gain_ctl, targ_cnt;
2300	u32 lanestagger;
2301
2302	if (vco >= 6200000 && vco <= 6700000) {
2303		prop_coef = 4;
2304		int_coef = 9;
2305		gain_ctl = 3;
2306		targ_cnt = 8;
2307	} else if ((vco > 5400000 && vco < 6200000) ||
2308			(vco >= 4800000 && vco < 5400000)) {
2309		prop_coef = 5;
2310		int_coef = 11;
2311		gain_ctl = 3;
2312		targ_cnt = 9;
2313	} else if (vco == 5400000) {
2314		prop_coef = 3;
2315		int_coef = 8;
2316		gain_ctl = 1;
2317		targ_cnt = 9;
2318	} else {
2319		drm_err(&i915->drm, "Invalid VCO\n");
2320		return -EINVAL;
2321	}
2322
2323	if (clock > 270000)
2324		lanestagger = 0x18;
2325	else if (clock > 135000)
2326		lanestagger = 0x0d;
2327	else if (clock > 67000)
2328		lanestagger = 0x07;
2329	else if (clock > 33000)
2330		lanestagger = 0x04;
2331	else
2332		lanestagger = 0x02;
2333
2334	hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
2335	hw_state->pll0 = PORT_PLL_M2_INT(clk_div->m2 >> 22);
2336	hw_state->pll1 = PORT_PLL_N(clk_div->n);
2337	hw_state->pll2 = PORT_PLL_M2_FRAC(clk_div->m2 & 0x3fffff);
2338
2339	if (clk_div->m2 & 0x3fffff)
2340		hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
2341
2342	hw_state->pll6 = PORT_PLL_PROP_COEFF(prop_coef) |
2343		PORT_PLL_INT_COEFF(int_coef) |
2344		PORT_PLL_GAIN_CTL(gain_ctl);
2345
2346	hw_state->pll8 = PORT_PLL_TARGET_CNT(targ_cnt);
2347
2348	hw_state->pll9 = PORT_PLL_LOCK_THRESHOLD(5);
2349
2350	hw_state->pll10 = PORT_PLL_DCO_AMP(15) |
2351		PORT_PLL_DCO_AMP_OVR_EN_H;
2352
2353	hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
2354
2355	hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
2356
2357	return 0;
2358}
2359
2360static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
2361				const struct intel_shared_dpll *pll,
2362				const struct intel_dpll_hw_state *dpll_hw_state)
2363{
2364	const struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
2365	struct dpll clock;
2366
2367	clock.m1 = 2;
2368	clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK, hw_state->pll0) << 22;
2369	if (hw_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
2370		clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK,
2371					  hw_state->pll2);
2372	clock.n = REG_FIELD_GET(PORT_PLL_N_MASK, hw_state->pll1);
2373	clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK, hw_state->ebb0);
2374	clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK, hw_state->ebb0);
2375
2376	return chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, &clock);
2377}
2378
2379static int
2380bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2381{
2382	struct dpll clk_div = {};
2383
2384	bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
2385
2386	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2387}
2388
2389static int
2390bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2391{
2392	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2393	struct dpll clk_div = {};
2394	int ret;
2395
2396	bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
2397
2398	ret = bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2399	if (ret)
2400		return ret;
2401
2402	crtc_state->port_clock = bxt_ddi_pll_get_freq(i915, NULL,
2403						      &crtc_state->dpll_hw_state);
2404
2405	return 0;
2406}
2407
2408static int bxt_compute_dpll(struct intel_atomic_state *state,
2409			    struct intel_crtc *crtc,
2410			    struct intel_encoder *encoder)
2411{
2412	struct intel_crtc_state *crtc_state =
2413		intel_atomic_get_new_crtc_state(state, crtc);
2414
2415	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
2416		return bxt_ddi_hdmi_set_dpll_hw_state(crtc_state);
2417	else if (intel_crtc_has_dp_encoder(crtc_state))
2418		return bxt_ddi_dp_set_dpll_hw_state(crtc_state);
2419	else
2420		return -EINVAL;
2421}
2422
2423static int bxt_get_dpll(struct intel_atomic_state *state,
2424			struct intel_crtc *crtc,
2425			struct intel_encoder *encoder)
2426{
2427	struct intel_crtc_state *crtc_state =
2428		intel_atomic_get_new_crtc_state(state, crtc);
2429	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2430	struct intel_shared_dpll *pll;
2431	enum intel_dpll_id id;
2432
2433	/* 1:1 mapping between ports and PLLs */
2434	id = (enum intel_dpll_id) encoder->port;
2435	pll = intel_get_shared_dpll_by_id(i915, id);
2436
2437	drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] using pre-allocated %s\n",
2438		    crtc->base.base.id, crtc->base.name, pll->info->name);
2439
2440	intel_reference_shared_dpll(state, crtc,
2441				    pll, &crtc_state->dpll_hw_state);
2442
2443	crtc_state->shared_dpll = pll;
2444
2445	return 0;
2446}
2447
2448static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915)
2449{
2450	i915->display.dpll.ref_clks.ssc = 100000;
2451	i915->display.dpll.ref_clks.nssc = 100000;
2452	/* DSI non-SSC ref 19.2MHz */
2453}
2454
2455static void bxt_dump_hw_state(struct drm_printer *p,
2456			      const struct intel_dpll_hw_state *dpll_hw_state)
2457{
2458	const struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
2459
2460	drm_printf(p, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
2461		   "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
2462		   "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
2463		   hw_state->ebb0, hw_state->ebb4,
2464		   hw_state->pll0, hw_state->pll1, hw_state->pll2, hw_state->pll3,
2465		   hw_state->pll6, hw_state->pll8, hw_state->pll9, hw_state->pll10,
2466		   hw_state->pcsdw12);
2467}
2468
2469static bool bxt_compare_hw_state(const struct intel_dpll_hw_state *_a,
2470				 const struct intel_dpll_hw_state *_b)
2471{
2472	const struct bxt_dpll_hw_state *a = &_a->bxt;
2473	const struct bxt_dpll_hw_state *b = &_b->bxt;
2474
2475	return a->ebb0 == b->ebb0 &&
2476		a->ebb4 == b->ebb4 &&
2477		a->pll0 == b->pll0 &&
2478		a->pll1 == b->pll1 &&
2479		a->pll2 == b->pll2 &&
2480		a->pll3 == b->pll3 &&
2481		a->pll6 == b->pll6 &&
2482		a->pll8 == b->pll8 &&
2483		a->pll10 == b->pll10 &&
2484		a->pcsdw12 == b->pcsdw12;
2485}
2486
2487static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
2488	.enable = bxt_ddi_pll_enable,
2489	.disable = bxt_ddi_pll_disable,
2490	.get_hw_state = bxt_ddi_pll_get_hw_state,
2491	.get_freq = bxt_ddi_pll_get_freq,
2492};
2493
2494static const struct dpll_info bxt_plls[] = {
2495	{ .name = "PORT PLL A", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL0, },
2496	{ .name = "PORT PLL B", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL1, },
2497	{ .name = "PORT PLL C", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL2, },
2498	{}
2499};
2500
2501static const struct intel_dpll_mgr bxt_pll_mgr = {
2502	.dpll_info = bxt_plls,
2503	.compute_dplls = bxt_compute_dpll,
2504	.get_dplls = bxt_get_dpll,
2505	.put_dplls = intel_put_dpll,
2506	.update_ref_clks = bxt_update_dpll_ref_clks,
2507	.dump_hw_state = bxt_dump_hw_state,
2508	.compare_hw_state = bxt_compare_hw_state,
2509};
2510
2511static void icl_wrpll_get_multipliers(int bestdiv, int *pdiv,
2512				      int *qdiv, int *kdiv)
2513{
2514	/* even dividers */
2515	if (bestdiv % 2 == 0) {
2516		if (bestdiv == 2) {
2517			*pdiv = 2;
2518			*qdiv = 1;
2519			*kdiv = 1;
2520		} else if (bestdiv % 4 == 0) {
2521			*pdiv = 2;
2522			*qdiv = bestdiv / 4;
2523			*kdiv = 2;
2524		} else if (bestdiv % 6 == 0) {
2525			*pdiv = 3;
2526			*qdiv = bestdiv / 6;
2527			*kdiv = 2;
2528		} else if (bestdiv % 5 == 0) {
2529			*pdiv = 5;
2530			*qdiv = bestdiv / 10;
2531			*kdiv = 2;
2532		} else if (bestdiv % 14 == 0) {
2533			*pdiv = 7;
2534			*qdiv = bestdiv / 14;
2535			*kdiv = 2;
2536		}
2537	} else {
2538		if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2539			*pdiv = bestdiv;
2540			*qdiv = 1;
2541			*kdiv = 1;
2542		} else { /* 9, 15, 21 */
2543			*pdiv = bestdiv / 3;
2544			*qdiv = 1;
2545			*kdiv = 3;
2546		}
2547	}
2548}
2549
2550static void icl_wrpll_params_populate(struct skl_wrpll_params *params,
2551				      u32 dco_freq, u32 ref_freq,
2552				      int pdiv, int qdiv, int kdiv)
2553{
2554	u32 dco;
2555
2556	switch (kdiv) {
2557	case 1:
2558		params->kdiv = 1;
2559		break;
2560	case 2:
2561		params->kdiv = 2;
2562		break;
2563	case 3:
2564		params->kdiv = 4;
2565		break;
2566	default:
2567		WARN(1, "Incorrect KDiv\n");
2568	}
2569
2570	switch (pdiv) {
2571	case 2:
2572		params->pdiv = 1;
2573		break;
2574	case 3:
2575		params->pdiv = 2;
2576		break;
2577	case 5:
2578		params->pdiv = 4;
2579		break;
2580	case 7:
2581		params->pdiv = 8;
2582		break;
2583	default:
2584		WARN(1, "Incorrect PDiv\n");
2585	}
2586
2587	WARN_ON(kdiv != 2 && qdiv != 1);
2588
2589	params->qdiv_ratio = qdiv;
2590	params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2591
2592	dco = div_u64((u64)dco_freq << 15, ref_freq);
2593
2594	params->dco_integer = dco >> 15;
2595	params->dco_fraction = dco & 0x7fff;
2596}
2597
2598/*
2599 * Display WA #22010492432: ehl, tgl, adl-s, adl-p
2600 * Program half of the nominal DCO divider fraction value.
2601 */
2602static bool
2603ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
2604{
2605	return ((IS_ELKHARTLAKE(i915) &&
2606		 IS_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) ||
2607		 IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) &&
2608		 i915->display.dpll.ref_clks.nssc == 38400;
2609}
2610
2611struct icl_combo_pll_params {
2612	int clock;
2613	struct skl_wrpll_params wrpll;
2614};
2615
2616/*
2617 * These values alrea already adjusted: they're the bits we write to the
2618 * registers, not the logical values.
2619 */
2620static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
2621	{ 540000,
2622	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [0]: 5.4 */
2623	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2624	{ 270000,
2625	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [1]: 2.7 */
2626	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2627	{ 162000,
2628	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [2]: 1.62 */
2629	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2630	{ 324000,
2631	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [3]: 3.24 */
2632	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2633	{ 216000,
2634	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2635	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2636	{ 432000,
2637	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2638	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2639	{ 648000,
2640	  { .dco_integer = 0x195, .dco_fraction = 0x0000,		/* [6]: 6.48 */
2641	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2642	{ 810000,
2643	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [7]: 8.1 */
2644	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2645};
2646
2647
2648/* Also used for 38.4 MHz values. */
2649static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
2650	{ 540000,
2651	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [0]: 5.4 */
2652	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2653	{ 270000,
2654	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [1]: 2.7 */
2655	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2656	{ 162000,
2657	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [2]: 1.62 */
2658	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2659	{ 324000,
2660	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [3]: 3.24 */
2661	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2662	{ 216000,
2663	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2664	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2665	{ 432000,
2666	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2667	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2668	{ 648000,
2669	  { .dco_integer = 0x1FA, .dco_fraction = 0x2000,		/* [6]: 6.48 */
2670	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2671	{ 810000,
2672	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [7]: 8.1 */
2673	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2674};
2675
2676static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
2677	.dco_integer = 0x151, .dco_fraction = 0x4000,
2678	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2679};
2680
2681static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
2682	.dco_integer = 0x1A5, .dco_fraction = 0x7000,
2683	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2684};
2685
2686static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = {
2687	.dco_integer = 0x54, .dco_fraction = 0x3000,
2688	/* the following params are unused */
2689	.pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
2690};
2691
2692static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
2693	.dco_integer = 0x43, .dco_fraction = 0x4000,
2694	/* the following params are unused */
2695};
2696
2697static int icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
2698				 struct skl_wrpll_params *pll_params)
2699{
2700	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2701	const struct icl_combo_pll_params *params =
2702		i915->display.dpll.ref_clks.nssc == 24000 ?
2703		icl_dp_combo_pll_24MHz_values :
2704		icl_dp_combo_pll_19_2MHz_values;
2705	int clock = crtc_state->port_clock;
2706	int i;
2707
2708	for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
2709		if (clock == params[i].clock) {
2710			*pll_params = params[i].wrpll;
2711			return 0;
2712		}
2713	}
2714
2715	MISSING_CASE(clock);
2716	return -EINVAL;
2717}
2718
2719static int icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
2720			    struct skl_wrpll_params *pll_params)
2721{
2722	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2723
2724	if (DISPLAY_VER(i915) >= 12) {
2725		switch (i915->display.dpll.ref_clks.nssc) {
2726		default:
2727			MISSING_CASE(i915->display.dpll.ref_clks.nssc);
2728			fallthrough;
2729		case 19200:
2730		case 38400:
2731			*pll_params = tgl_tbt_pll_19_2MHz_values;
2732			break;
2733		case 24000:
2734			*pll_params = tgl_tbt_pll_24MHz_values;
2735			break;
2736		}
2737	} else {
2738		switch (i915->display.dpll.ref_clks.nssc) {
2739		default:
2740			MISSING_CASE(i915->display.dpll.ref_clks.nssc);
2741			fallthrough;
2742		case 19200:
2743		case 38400:
2744			*pll_params = icl_tbt_pll_19_2MHz_values;
2745			break;
2746		case 24000:
2747			*pll_params = icl_tbt_pll_24MHz_values;
2748			break;
2749		}
2750	}
2751
2752	return 0;
2753}
2754
2755static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915,
2756				    const struct intel_shared_dpll *pll,
2757				    const struct intel_dpll_hw_state *dpll_hw_state)
2758{
2759	/*
2760	 * The PLL outputs multiple frequencies at the same time, selection is
2761	 * made at DDI clock mux level.
2762	 */
2763	drm_WARN_ON(&i915->drm, 1);
2764
2765	return 0;
2766}
2767
2768static int icl_wrpll_ref_clock(struct drm_i915_private *i915)
2769{
2770	int ref_clock = i915->display.dpll.ref_clks.nssc;
2771
2772	/*
2773	 * For ICL+, the spec states: if reference frequency is 38.4,
2774	 * use 19.2 because the DPLL automatically divides that by 2.
2775	 */
2776	if (ref_clock == 38400)
2777		ref_clock = 19200;
2778
2779	return ref_clock;
2780}
2781
2782static int
2783icl_calc_wrpll(struct intel_crtc_state *crtc_state,
2784	       struct skl_wrpll_params *wrpll_params)
2785{
2786	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2787	int ref_clock = icl_wrpll_ref_clock(i915);
2788	u32 afe_clock = crtc_state->port_clock * 5;
2789	u32 dco_min = 7998000;
2790	u32 dco_max = 10000000;
2791	u32 dco_mid = (dco_min + dco_max) / 2;
2792	static const int dividers[] = {  2,  4,  6,  8, 10, 12,  14,  16,
2793					 18, 20, 24, 28, 30, 32,  36,  40,
2794					 42, 44, 48, 50, 52, 54,  56,  60,
2795					 64, 66, 68, 70, 72, 76,  78,  80,
2796					 84, 88, 90, 92, 96, 98, 100, 102,
2797					  3,  5,  7,  9, 15, 21 };
2798	u32 dco, best_dco = 0, dco_centrality = 0;
2799	u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2800	int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2801
2802	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2803		dco = afe_clock * dividers[d];
2804
2805		if (dco <= dco_max && dco >= dco_min) {
2806			dco_centrality = abs(dco - dco_mid);
2807
2808			if (dco_centrality < best_dco_centrality) {
2809				best_dco_centrality = dco_centrality;
2810				best_div = dividers[d];
2811				best_dco = dco;
2812			}
2813		}
2814	}
2815
2816	if (best_div == 0)
2817		return -EINVAL;
2818
2819	icl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2820	icl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
2821				  pdiv, qdiv, kdiv);
2822
2823	return 0;
2824}
2825
2826static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
2827				      const struct intel_shared_dpll *pll,
2828				      const struct intel_dpll_hw_state *dpll_hw_state)
2829{
2830	const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
2831	int ref_clock = icl_wrpll_ref_clock(i915);
2832	u32 dco_fraction;
2833	u32 p0, p1, p2, dco_freq;
2834
2835	p0 = hw_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
2836	p2 = hw_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
2837
2838	if (hw_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
2839		p1 = (hw_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
2840			DPLL_CFGCR1_QDIV_RATIO_SHIFT;
2841	else
2842		p1 = 1;
2843
2844	switch (p0) {
2845	case DPLL_CFGCR1_PDIV_2:
2846		p0 = 2;
2847		break;
2848	case DPLL_CFGCR1_PDIV_3:
2849		p0 = 3;
2850		break;
2851	case DPLL_CFGCR1_PDIV_5:
2852		p0 = 5;
2853		break;
2854	case DPLL_CFGCR1_PDIV_7:
2855		p0 = 7;
2856		break;
2857	}
2858
2859	switch (p2) {
2860	case DPLL_CFGCR1_KDIV_1:
2861		p2 = 1;
2862		break;
2863	case DPLL_CFGCR1_KDIV_2:
2864		p2 = 2;
2865		break;
2866	case DPLL_CFGCR1_KDIV_3:
2867		p2 = 3;
2868		break;
2869	}
2870
2871	dco_freq = (hw_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
2872		   ref_clock;
2873
2874	dco_fraction = (hw_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
2875		       DPLL_CFGCR0_DCO_FRACTION_SHIFT;
2876
2877	if (ehl_combo_pll_div_frac_wa_needed(i915))
2878		dco_fraction *= 2;
2879
2880	dco_freq += (dco_fraction * ref_clock) / 0x8000;
2881
2882	if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
2883		return 0;
2884
2885	return dco_freq / (p0 * p1 * p2 * 5);
2886}
2887
2888static void icl_calc_dpll_state(struct drm_i915_private *i915,
2889				const struct skl_wrpll_params *pll_params,
2890				struct intel_dpll_hw_state *dpll_hw_state)
2891{
2892	struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
2893	u32 dco_fraction = pll_params->dco_fraction;
2894
2895	if (ehl_combo_pll_div_frac_wa_needed(i915))
2896		dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2);
2897
2898	hw_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) |
2899			    pll_params->dco_integer;
2900
2901	hw_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) |
2902			    DPLL_CFGCR1_QDIV_MODE(pll_params->qdiv_mode) |
2903			    DPLL_CFGCR1_KDIV(pll_params->kdiv) |
2904			    DPLL_CFGCR1_PDIV(pll_params->pdiv);
2905
2906	if (DISPLAY_VER(i915) >= 12)
2907		hw_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
2908	else
2909		hw_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
2910
2911	if (i915->display.vbt.override_afc_startup)
2912		hw_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(i915->display.vbt.override_afc_startup_val);
2913}
2914
2915static int icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
2916				    u32 *target_dco_khz,
2917				    struct icl_dpll_hw_state *hw_state,
2918				    bool is_dkl)
2919{
2920	static const u8 div1_vals[] = { 7, 5, 3, 2 };
2921	u32 dco_min_freq, dco_max_freq;
2922	unsigned int i;
2923	int div2;
2924
2925	dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
2926	dco_max_freq = is_dp ? 8100000 : 10000000;
2927
2928	for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
2929		int div1 = div1_vals[i];
2930
2931		for (div2 = 10; div2 > 0; div2--) {
2932			int dco = div1 * div2 * clock_khz * 5;
2933			int a_divratio, tlinedrv, inputsel;
2934			u32 hsdiv;
2935
2936			if (dco < dco_min_freq || dco > dco_max_freq)
2937				continue;
2938
2939			if (div2 >= 2) {
2940				/*
2941				 * Note: a_divratio not matching TGL BSpec
2942				 * algorithm but matching hardcoded values and
2943				 * working on HW for DP alt-mode at least
2944				 */
2945				a_divratio = is_dp ? 10 : 5;
2946				tlinedrv = is_dkl ? 1 : 2;
2947			} else {
2948				a_divratio = 5;
2949				tlinedrv = 0;
2950			}
2951			inputsel = is_dp ? 0 : 1;
2952
2953			switch (div1) {
2954			default:
2955				MISSING_CASE(div1);
2956				fallthrough;
2957			case 2:
2958				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
2959				break;
2960			case 3:
2961				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
2962				break;
2963			case 5:
2964				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
2965				break;
2966			case 7:
2967				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
2968				break;
2969			}
2970
2971			*target_dco_khz = dco;
2972
2973			hw_state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
2974
2975			hw_state->mg_clktop2_coreclkctl1 =
2976				MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
2977
2978			hw_state->mg_clktop2_hsclkctl =
2979				MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
2980				MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
2981				hsdiv |
2982				MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
2983
2984			return 0;
2985		}
2986	}
2987
2988	return -EINVAL;
2989}
2990
2991/*
2992 * The specification for this function uses real numbers, so the math had to be
2993 * adapted to integer-only calculation, that's why it looks so different.
2994 */
2995static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
2996				 struct intel_dpll_hw_state *dpll_hw_state)
2997{
2998	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2999	struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3000	int refclk_khz = i915->display.dpll.ref_clks.nssc;
3001	int clock = crtc_state->port_clock;
3002	u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
3003	u32 iref_ndiv, iref_trim, iref_pulse_w;
3004	u32 prop_coeff, int_coeff;
3005	u32 tdc_targetcnt, feedfwgain;
3006	u64 ssc_stepsize, ssc_steplen, ssc_steplog;
3007	u64 tmp;
3008	bool use_ssc = false;
3009	bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
3010	bool is_dkl = DISPLAY_VER(i915) >= 12;
3011	int ret;
3012
3013	ret = icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
3014				       hw_state, is_dkl);
3015	if (ret)
3016		return ret;
3017
3018	m1div = 2;
3019	m2div_int = dco_khz / (refclk_khz * m1div);
3020	if (m2div_int > 255) {
3021		if (!is_dkl) {
3022			m1div = 4;
3023			m2div_int = dco_khz / (refclk_khz * m1div);
3024		}
3025
3026		if (m2div_int > 255)
3027			return -EINVAL;
3028	}
3029	m2div_rem = dco_khz % (refclk_khz * m1div);
3030
3031	tmp = (u64)m2div_rem * (1 << 22);
3032	do_div(tmp, refclk_khz * m1div);
3033	m2div_frac = tmp;
3034
3035	switch (refclk_khz) {
3036	case 19200:
3037		iref_ndiv = 1;
3038		iref_trim = 28;
3039		iref_pulse_w = 1;
3040		break;
3041	case 24000:
3042		iref_ndiv = 1;
3043		iref_trim = 25;
3044		iref_pulse_w = 2;
3045		break;
3046	case 38400:
3047		iref_ndiv = 2;
3048		iref_trim = 28;
3049		iref_pulse_w = 1;
3050		break;
3051	default:
3052		MISSING_CASE(refclk_khz);
3053		return -EINVAL;
3054	}
3055
3056	/*
3057	 * tdc_res = 0.000003
3058	 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
3059	 *
3060	 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
3061	 * was supposed to be a division, but we rearranged the operations of
3062	 * the formula to avoid early divisions so we don't multiply the
3063	 * rounding errors.
3064	 *
3065	 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
3066	 * we also rearrange to work with integers.
3067	 *
3068	 * The 0.5 transformed to 5 results in a multiplication by 10 and the
3069	 * last division by 10.
3070	 */
3071	tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
3072
3073	/*
3074	 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
3075	 * 32 bits. That's not a problem since we round the division down
3076	 * anyway.
3077	 */
3078	feedfwgain = (use_ssc || m2div_rem > 0) ?
3079		m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
3080
3081	if (dco_khz >= 9000000) {
3082		prop_coeff = 5;
3083		int_coeff = 10;
3084	} else {
3085		prop_coeff = 4;
3086		int_coeff = 8;
3087	}
3088
3089	if (use_ssc) {
3090		tmp = mul_u32_u32(dco_khz, 47 * 32);
3091		do_div(tmp, refclk_khz * m1div * 10000);
3092		ssc_stepsize = tmp;
3093
3094		tmp = mul_u32_u32(dco_khz, 1000);
3095		ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
3096	} else {
3097		ssc_stepsize = 0;
3098		ssc_steplen = 0;
3099	}
3100	ssc_steplog = 4;
3101
3102	/* write pll_state calculations */
3103	if (is_dkl) {
3104		hw_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
3105					 DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
3106					 DKL_PLL_DIV0_FBPREDIV(m1div) |
3107					 DKL_PLL_DIV0_FBDIV_INT(m2div_int);
3108		if (i915->display.vbt.override_afc_startup) {
3109			u8 val = i915->display.vbt.override_afc_startup_val;
3110
3111			hw_state->mg_pll_div0 |= DKL_PLL_DIV0_AFC_STARTUP(val);
3112		}
3113
3114		hw_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
3115					 DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
3116
3117		hw_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
3118					DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
3119					DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
3120					(use_ssc ? DKL_PLL_SSC_EN : 0);
3121
3122		hw_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
3123					  DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);
3124
3125		hw_state->mg_pll_tdc_coldst_bias =
3126				DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
3127				DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);
3128
3129	} else {
3130		hw_state->mg_pll_div0 =
3131			(m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
3132			MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
3133			MG_PLL_DIV0_FBDIV_INT(m2div_int);
3134
3135		hw_state->mg_pll_div1 =
3136			MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
3137			MG_PLL_DIV1_DITHER_DIV_2 |
3138			MG_PLL_DIV1_NDIVRATIO(1) |
3139			MG_PLL_DIV1_FBPREDIV(m1div);
3140
3141		hw_state->mg_pll_lf =
3142			MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
3143			MG_PLL_LF_AFCCNTSEL_512 |
3144			MG_PLL_LF_GAINCTRL(1) |
3145			MG_PLL_LF_INT_COEFF(int_coeff) |
3146			MG_PLL_LF_PROP_COEFF(prop_coeff);
3147
3148		hw_state->mg_pll_frac_lock =
3149			MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
3150			MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
3151			MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
3152			MG_PLL_FRAC_LOCK_DCODITHEREN |
3153			MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
3154		if (use_ssc || m2div_rem > 0)
3155			hw_state->mg_pll_frac_lock |=
3156				MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
3157
3158		hw_state->mg_pll_ssc =
3159			(use_ssc ? MG_PLL_SSC_EN : 0) |
3160			MG_PLL_SSC_TYPE(2) |
3161			MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
3162			MG_PLL_SSC_STEPNUM(ssc_steplog) |
3163			MG_PLL_SSC_FLLEN |
3164			MG_PLL_SSC_STEPSIZE(ssc_stepsize);
3165
3166		hw_state->mg_pll_tdc_coldst_bias =
3167			MG_PLL_TDC_COLDST_COLDSTART |
3168			MG_PLL_TDC_COLDST_IREFINT_EN |
3169			MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
3170			MG_PLL_TDC_TDCOVCCORR_EN |
3171			MG_PLL_TDC_TDCSEL(3);
3172
3173		hw_state->mg_pll_bias =
3174			MG_PLL_BIAS_BIAS_GB_SEL(3) |
3175			MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
3176			MG_PLL_BIAS_BIAS_BONUS(10) |
3177			MG_PLL_BIAS_BIASCAL_EN |
3178			MG_PLL_BIAS_CTRIM(12) |
3179			MG_PLL_BIAS_VREF_RDAC(4) |
3180			MG_PLL_BIAS_IREFTRIM(iref_trim);
3181
3182		if (refclk_khz == 38400) {
3183			hw_state->mg_pll_tdc_coldst_bias_mask =
3184				MG_PLL_TDC_COLDST_COLDSTART;
3185			hw_state->mg_pll_bias_mask = 0;
3186		} else {
3187			hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3188			hw_state->mg_pll_bias_mask = -1U;
3189		}
3190
3191		hw_state->mg_pll_tdc_coldst_bias &=
3192			hw_state->mg_pll_tdc_coldst_bias_mask;
3193		hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3194	}
3195
3196	return 0;
3197}
3198
3199static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *i915,
3200				   const struct intel_shared_dpll *pll,
3201				   const struct intel_dpll_hw_state *dpll_hw_state)
3202{
3203	const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3204	u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
3205	u64 tmp;
3206
3207	ref_clock = i915->display.dpll.ref_clks.nssc;
3208
3209	if (DISPLAY_VER(i915) >= 12) {
3210		m1 = hw_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
3211		m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
3212		m2_int = hw_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
3213
3214		if (hw_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
3215			m2_frac = hw_state->mg_pll_bias &
3216				  DKL_PLL_BIAS_FBDIV_FRAC_MASK;
3217			m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
3218		} else {
3219			m2_frac = 0;
3220		}
3221	} else {
3222		m1 = hw_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
3223		m2_int = hw_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
3224
3225		if (hw_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
3226			m2_frac = hw_state->mg_pll_div0 &
3227				  MG_PLL_DIV0_FBDIV_FRAC_MASK;
3228			m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
3229		} else {
3230			m2_frac = 0;
3231		}
3232	}
3233
3234	switch (hw_state->mg_clktop2_hsclkctl &
3235		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
3236	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
3237		div1 = 2;
3238		break;
3239	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
3240		div1 = 3;
3241		break;
3242	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
3243		div1 = 5;
3244		break;
3245	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
3246		div1 = 7;
3247		break;
3248	default:
3249		MISSING_CASE(hw_state->mg_clktop2_hsclkctl);
3250		return 0;
3251	}
3252
3253	div2 = (hw_state->mg_clktop2_hsclkctl &
3254		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
3255		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
3256
3257	/* div2 value of 0 is same as 1 means no div */
3258	if (div2 == 0)
3259		div2 = 1;
3260
3261	/*
3262	 * Adjust the original formula to delay the division by 2^22 in order to
3263	 * minimize possible rounding errors.
3264	 */
3265	tmp = (u64)m1 * m2_int * ref_clock +
3266	      (((u64)m1 * m2_frac * ref_clock) >> 22);
3267	tmp = div_u64(tmp, 5 * div1 * div2);
3268
3269	return tmp;
3270}
3271
3272/**
3273 * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
3274 * @crtc_state: state for the CRTC to select the DPLL for
3275 * @port_dpll_id: the active @port_dpll_id to select
3276 *
3277 * Select the given @port_dpll_id instance from the DPLLs reserved for the
3278 * CRTC.
3279 */
3280void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
3281			      enum icl_port_dpll_id port_dpll_id)
3282{
3283	struct icl_port_dpll *port_dpll =
3284		&crtc_state->icl_port_dplls[port_dpll_id];
3285
3286	crtc_state->shared_dpll = port_dpll->pll;
3287	crtc_state->dpll_hw_state = port_dpll->hw_state;
3288}
3289
3290static void icl_update_active_dpll(struct intel_atomic_state *state,
3291				   struct intel_crtc *crtc,
3292				   struct intel_encoder *encoder)
3293{
3294	struct intel_crtc_state *crtc_state =
3295		intel_atomic_get_new_crtc_state(state, crtc);
3296	struct intel_digital_port *primary_port;
3297	enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
3298
3299	primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
3300		enc_to_mst(encoder)->primary :
3301		enc_to_dig_port(encoder);
3302
3303	if (primary_port &&
3304	    (intel_tc_port_in_dp_alt_mode(primary_port) ||
3305	     intel_tc_port_in_legacy_mode(primary_port)))
3306		port_dpll_id = ICL_PORT_DPLL_MG_PHY;
3307
3308	icl_set_active_port_dpll(crtc_state, port_dpll_id);
3309}
3310
3311static int icl_compute_combo_phy_dpll(struct intel_atomic_state *state,
3312				      struct intel_crtc *crtc)
3313{
3314	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
3315	struct intel_crtc_state *crtc_state =
3316		intel_atomic_get_new_crtc_state(state, crtc);
3317	struct icl_port_dpll *port_dpll =
3318		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3319	struct skl_wrpll_params pll_params = {};
3320	int ret;
3321
3322	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
3323	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
3324		ret = icl_calc_wrpll(crtc_state, &pll_params);
3325	else
3326		ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
3327
3328	if (ret)
3329		return ret;
3330
3331	icl_calc_dpll_state(i915, &pll_params, &port_dpll->hw_state);
3332
3333	/* this is mainly for the fastset check */
3334	icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
3335
3336	crtc_state->port_clock = icl_ddi_combo_pll_get_freq(i915, NULL,
3337							    &port_dpll->hw_state);
3338
3339	return 0;
3340}
3341
3342static int icl_get_combo_phy_dpll(struct intel_atomic_state *state,
3343				  struct intel_crtc *crtc,
3344				  struct intel_encoder *encoder)
3345{
3346	struct intel_display *display = to_intel_display(crtc);
3347	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
3348	struct intel_crtc_state *crtc_state =
3349		intel_atomic_get_new_crtc_state(state, crtc);
3350	struct icl_port_dpll *port_dpll =
3351		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3352	enum port port = encoder->port;
3353	unsigned long dpll_mask;
3354
3355	if (IS_ALDERLAKE_S(i915)) {
3356		dpll_mask =
3357			BIT(DPLL_ID_DG1_DPLL3) |
3358			BIT(DPLL_ID_DG1_DPLL2) |
3359			BIT(DPLL_ID_ICL_DPLL1) |
3360			BIT(DPLL_ID_ICL_DPLL0);
3361	} else if (IS_DG1(i915)) {
3362		if (port == PORT_D || port == PORT_E) {
3363			dpll_mask =
3364				BIT(DPLL_ID_DG1_DPLL2) |
3365				BIT(DPLL_ID_DG1_DPLL3);
3366		} else {
3367			dpll_mask =
3368				BIT(DPLL_ID_DG1_DPLL0) |
3369				BIT(DPLL_ID_DG1_DPLL1);
3370		}
3371	} else if (IS_ROCKETLAKE(i915)) {
3372		dpll_mask =
3373			BIT(DPLL_ID_EHL_DPLL4) |
3374			BIT(DPLL_ID_ICL_DPLL1) |
3375			BIT(DPLL_ID_ICL_DPLL0);
3376	} else if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
3377		   port != PORT_A) {
3378		dpll_mask =
3379			BIT(DPLL_ID_EHL_DPLL4) |
3380			BIT(DPLL_ID_ICL_DPLL1) |
3381			BIT(DPLL_ID_ICL_DPLL0);
3382	} else {
3383		dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0);
3384	}
3385
3386	/* Eliminate DPLLs from consideration if reserved by HTI */
3387	dpll_mask &= ~intel_hti_dpll_mask(display);
3388
3389	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3390						&port_dpll->hw_state,
3391						dpll_mask);
3392	if (!port_dpll->pll)
3393		return -EINVAL;
3394
3395	intel_reference_shared_dpll(state, crtc,
3396				    port_dpll->pll, &port_dpll->hw_state);
3397
3398	icl_update_active_dpll(state, crtc, encoder);
3399
3400	return 0;
3401}
3402
3403static int icl_compute_tc_phy_dplls(struct intel_atomic_state *state,
3404				    struct intel_crtc *crtc)
3405{
3406	struct drm_i915_private *i915 = to_i915(state->base.dev);
3407	struct intel_crtc_state *crtc_state =
3408		intel_atomic_get_new_crtc_state(state, crtc);
3409	const struct intel_crtc_state *old_crtc_state =
3410		intel_atomic_get_old_crtc_state(state, crtc);
3411	struct icl_port_dpll *port_dpll =
3412		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3413	struct skl_wrpll_params pll_params = {};
3414	int ret;
3415
3416	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3417	ret = icl_calc_tbt_pll(crtc_state, &pll_params);
3418	if (ret)
3419		return ret;
3420
3421	icl_calc_dpll_state(i915, &pll_params, &port_dpll->hw_state);
3422
3423	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3424	ret = icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state);
3425	if (ret)
3426		return ret;
3427
3428	/* this is mainly for the fastset check */
3429	if (old_crtc_state->shared_dpll &&
3430	    old_crtc_state->shared_dpll->info->id == DPLL_ID_ICL_TBTPLL)
3431		icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
3432	else
3433		icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_MG_PHY);
3434
3435	crtc_state->port_clock = icl_ddi_mg_pll_get_freq(i915, NULL,
3436							 &port_dpll->hw_state);
3437
3438	return 0;
3439}
3440
3441static int icl_get_tc_phy_dplls(struct intel_atomic_state *state,
3442				struct intel_crtc *crtc,
3443				struct intel_encoder *encoder)
3444{
 
3445	struct intel_crtc_state *crtc_state =
3446		intel_atomic_get_new_crtc_state(state, crtc);
3447	struct icl_port_dpll *port_dpll =
3448		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3449	enum intel_dpll_id dpll_id;
3450	int ret;
3451
3452	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3453	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3454						&port_dpll->hw_state,
3455						BIT(DPLL_ID_ICL_TBTPLL));
3456	if (!port_dpll->pll)
3457		return -EINVAL;
3458	intel_reference_shared_dpll(state, crtc,
3459				    port_dpll->pll, &port_dpll->hw_state);
3460
3461
3462	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3463	dpll_id = icl_tc_port_to_pll_id(intel_encoder_to_tc(encoder));
 
3464	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3465						&port_dpll->hw_state,
3466						BIT(dpll_id));
3467	if (!port_dpll->pll) {
3468		ret = -EINVAL;
3469		goto err_unreference_tbt_pll;
3470	}
3471	intel_reference_shared_dpll(state, crtc,
3472				    port_dpll->pll, &port_dpll->hw_state);
3473
3474	icl_update_active_dpll(state, crtc, encoder);
3475
3476	return 0;
3477
3478err_unreference_tbt_pll:
3479	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3480	intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
3481
3482	return ret;
3483}
3484
3485static int icl_compute_dplls(struct intel_atomic_state *state,
3486			     struct intel_crtc *crtc,
3487			     struct intel_encoder *encoder)
3488{
3489	if (intel_encoder_is_combo(encoder))
 
 
 
3490		return icl_compute_combo_phy_dpll(state, crtc);
3491	else if (intel_encoder_is_tc(encoder))
3492		return icl_compute_tc_phy_dplls(state, crtc);
3493
3494	MISSING_CASE(encoder->port);
3495
3496	return 0;
3497}
3498
3499static int icl_get_dplls(struct intel_atomic_state *state,
3500			 struct intel_crtc *crtc,
3501			 struct intel_encoder *encoder)
3502{
3503	if (intel_encoder_is_combo(encoder))
 
 
 
3504		return icl_get_combo_phy_dpll(state, crtc, encoder);
3505	else if (intel_encoder_is_tc(encoder))
3506		return icl_get_tc_phy_dplls(state, crtc, encoder);
3507
3508	MISSING_CASE(encoder->port);
3509
3510	return -EINVAL;
3511}
3512
3513static void icl_put_dplls(struct intel_atomic_state *state,
3514			  struct intel_crtc *crtc)
3515{
3516	const struct intel_crtc_state *old_crtc_state =
3517		intel_atomic_get_old_crtc_state(state, crtc);
3518	struct intel_crtc_state *new_crtc_state =
3519		intel_atomic_get_new_crtc_state(state, crtc);
3520	enum icl_port_dpll_id id;
3521
3522	new_crtc_state->shared_dpll = NULL;
3523
3524	for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
3525		const struct icl_port_dpll *old_port_dpll =
3526			&old_crtc_state->icl_port_dplls[id];
3527		struct icl_port_dpll *new_port_dpll =
3528			&new_crtc_state->icl_port_dplls[id];
3529
3530		new_port_dpll->pll = NULL;
3531
3532		if (!old_port_dpll->pll)
3533			continue;
3534
3535		intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
3536	}
3537}
3538
3539static bool mg_pll_get_hw_state(struct drm_i915_private *i915,
3540				struct intel_shared_dpll *pll,
3541				struct intel_dpll_hw_state *dpll_hw_state)
3542{
3543	struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3544	const enum intel_dpll_id id = pll->info->id;
3545	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3546	intel_wakeref_t wakeref;
3547	bool ret = false;
3548	u32 val;
3549
3550	i915_reg_t enable_reg = intel_tc_pll_enable_reg(i915, pll);
3551
3552	wakeref = intel_display_power_get_if_enabled(i915,
3553						     POWER_DOMAIN_DISPLAY_CORE);
3554	if (!wakeref)
3555		return false;
3556
3557	val = intel_de_read(i915, enable_reg);
3558	if (!(val & PLL_ENABLE))
3559		goto out;
3560
3561	hw_state->mg_refclkin_ctl = intel_de_read(i915,
3562						  MG_REFCLKIN_CTL(tc_port));
3563	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3564
3565	hw_state->mg_clktop2_coreclkctl1 =
3566		intel_de_read(i915, MG_CLKTOP2_CORECLKCTL1(tc_port));
3567	hw_state->mg_clktop2_coreclkctl1 &=
3568		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3569
3570	hw_state->mg_clktop2_hsclkctl =
3571		intel_de_read(i915, MG_CLKTOP2_HSCLKCTL(tc_port));
3572	hw_state->mg_clktop2_hsclkctl &=
3573		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3574		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3575		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3576		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3577
3578	hw_state->mg_pll_div0 = intel_de_read(i915, MG_PLL_DIV0(tc_port));
3579	hw_state->mg_pll_div1 = intel_de_read(i915, MG_PLL_DIV1(tc_port));
3580	hw_state->mg_pll_lf = intel_de_read(i915, MG_PLL_LF(tc_port));
3581	hw_state->mg_pll_frac_lock = intel_de_read(i915,
3582						   MG_PLL_FRAC_LOCK(tc_port));
3583	hw_state->mg_pll_ssc = intel_de_read(i915, MG_PLL_SSC(tc_port));
3584
3585	hw_state->mg_pll_bias = intel_de_read(i915, MG_PLL_BIAS(tc_port));
3586	hw_state->mg_pll_tdc_coldst_bias =
3587		intel_de_read(i915, MG_PLL_TDC_COLDST_BIAS(tc_port));
3588
3589	if (i915->display.dpll.ref_clks.nssc == 38400) {
3590		hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
3591		hw_state->mg_pll_bias_mask = 0;
3592	} else {
3593		hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3594		hw_state->mg_pll_bias_mask = -1U;
3595	}
3596
3597	hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
3598	hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3599
3600	ret = true;
3601out:
3602	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3603	return ret;
3604}
3605
3606static bool dkl_pll_get_hw_state(struct drm_i915_private *i915,
3607				 struct intel_shared_dpll *pll,
3608				 struct intel_dpll_hw_state *dpll_hw_state)
3609{
3610	struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3611	const enum intel_dpll_id id = pll->info->id;
3612	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3613	intel_wakeref_t wakeref;
3614	bool ret = false;
3615	u32 val;
3616
3617	wakeref = intel_display_power_get_if_enabled(i915,
3618						     POWER_DOMAIN_DISPLAY_CORE);
3619	if (!wakeref)
3620		return false;
3621
3622	val = intel_de_read(i915, intel_tc_pll_enable_reg(i915, pll));
3623	if (!(val & PLL_ENABLE))
3624		goto out;
3625
3626	/*
3627	 * All registers read here have the same HIP_INDEX_REG even though
3628	 * they are on different building blocks
3629	 */
3630	hw_state->mg_refclkin_ctl = intel_dkl_phy_read(i915,
3631						       DKL_REFCLKIN_CTL(tc_port));
3632	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3633
3634	hw_state->mg_clktop2_hsclkctl =
3635		intel_dkl_phy_read(i915, DKL_CLKTOP2_HSCLKCTL(tc_port));
3636	hw_state->mg_clktop2_hsclkctl &=
3637		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3638		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3639		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3640		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3641
3642	hw_state->mg_clktop2_coreclkctl1 =
3643		intel_dkl_phy_read(i915, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3644	hw_state->mg_clktop2_coreclkctl1 &=
3645		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3646
3647	hw_state->mg_pll_div0 = intel_dkl_phy_read(i915, DKL_PLL_DIV0(tc_port));
3648	val = DKL_PLL_DIV0_MASK;
3649	if (i915->display.vbt.override_afc_startup)
3650		val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3651	hw_state->mg_pll_div0 &= val;
3652
3653	hw_state->mg_pll_div1 = intel_dkl_phy_read(i915, DKL_PLL_DIV1(tc_port));
3654	hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
3655				  DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3656
3657	hw_state->mg_pll_ssc = intel_dkl_phy_read(i915, DKL_PLL_SSC(tc_port));
3658	hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3659				 DKL_PLL_SSC_STEP_LEN_MASK |
3660				 DKL_PLL_SSC_STEP_NUM_MASK |
3661				 DKL_PLL_SSC_EN);
3662
3663	hw_state->mg_pll_bias = intel_dkl_phy_read(i915, DKL_PLL_BIAS(tc_port));
3664	hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
3665				  DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3666
3667	hw_state->mg_pll_tdc_coldst_bias =
3668		intel_dkl_phy_read(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3669	hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3670					     DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3671
3672	ret = true;
3673out:
3674	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3675	return ret;
3676}
3677
3678static bool icl_pll_get_hw_state(struct drm_i915_private *i915,
3679				 struct intel_shared_dpll *pll,
3680				 struct intel_dpll_hw_state *dpll_hw_state,
3681				 i915_reg_t enable_reg)
3682{
3683	struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3684	const enum intel_dpll_id id = pll->info->id;
3685	intel_wakeref_t wakeref;
3686	bool ret = false;
3687	u32 val;
3688
3689	wakeref = intel_display_power_get_if_enabled(i915,
3690						     POWER_DOMAIN_DISPLAY_CORE);
3691	if (!wakeref)
3692		return false;
3693
3694	val = intel_de_read(i915, enable_reg);
3695	if (!(val & PLL_ENABLE))
3696		goto out;
3697
3698	if (IS_ALDERLAKE_S(i915)) {
3699		hw_state->cfgcr0 = intel_de_read(i915, ADLS_DPLL_CFGCR0(id));
3700		hw_state->cfgcr1 = intel_de_read(i915, ADLS_DPLL_CFGCR1(id));
3701	} else if (IS_DG1(i915)) {
3702		hw_state->cfgcr0 = intel_de_read(i915, DG1_DPLL_CFGCR0(id));
3703		hw_state->cfgcr1 = intel_de_read(i915, DG1_DPLL_CFGCR1(id));
3704	} else if (IS_ROCKETLAKE(i915)) {
3705		hw_state->cfgcr0 = intel_de_read(i915,
3706						 RKL_DPLL_CFGCR0(id));
3707		hw_state->cfgcr1 = intel_de_read(i915,
3708						 RKL_DPLL_CFGCR1(id));
3709	} else if (DISPLAY_VER(i915) >= 12) {
3710		hw_state->cfgcr0 = intel_de_read(i915,
3711						 TGL_DPLL_CFGCR0(id));
3712		hw_state->cfgcr1 = intel_de_read(i915,
3713						 TGL_DPLL_CFGCR1(id));
3714		if (i915->display.vbt.override_afc_startup) {
3715			hw_state->div0 = intel_de_read(i915, TGL_DPLL0_DIV0(id));
3716			hw_state->div0 &= TGL_DPLL0_DIV0_AFC_STARTUP_MASK;
3717		}
3718	} else {
3719		if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
3720		    id == DPLL_ID_EHL_DPLL4) {
3721			hw_state->cfgcr0 = intel_de_read(i915,
3722							 ICL_DPLL_CFGCR0(4));
3723			hw_state->cfgcr1 = intel_de_read(i915,
3724							 ICL_DPLL_CFGCR1(4));
3725		} else {
3726			hw_state->cfgcr0 = intel_de_read(i915,
3727							 ICL_DPLL_CFGCR0(id));
3728			hw_state->cfgcr1 = intel_de_read(i915,
3729							 ICL_DPLL_CFGCR1(id));
3730		}
3731	}
3732
3733	ret = true;
3734out:
3735	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3736	return ret;
3737}
3738
3739static bool combo_pll_get_hw_state(struct drm_i915_private *i915,
3740				   struct intel_shared_dpll *pll,
3741				   struct intel_dpll_hw_state *dpll_hw_state)
3742{
3743	i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll);
3744
3745	return icl_pll_get_hw_state(i915, pll, dpll_hw_state, enable_reg);
3746}
3747
3748static bool tbt_pll_get_hw_state(struct drm_i915_private *i915,
3749				 struct intel_shared_dpll *pll,
3750				 struct intel_dpll_hw_state *dpll_hw_state)
3751{
3752	return icl_pll_get_hw_state(i915, pll, dpll_hw_state, TBT_PLL_ENABLE);
3753}
3754
3755static void icl_dpll_write(struct drm_i915_private *i915,
3756			   struct intel_shared_dpll *pll,
3757			   const struct icl_dpll_hw_state *hw_state)
3758{
 
3759	const enum intel_dpll_id id = pll->info->id;
3760	i915_reg_t cfgcr0_reg, cfgcr1_reg, div0_reg = INVALID_MMIO_REG;
3761
3762	if (IS_ALDERLAKE_S(i915)) {
3763		cfgcr0_reg = ADLS_DPLL_CFGCR0(id);
3764		cfgcr1_reg = ADLS_DPLL_CFGCR1(id);
3765	} else if (IS_DG1(i915)) {
3766		cfgcr0_reg = DG1_DPLL_CFGCR0(id);
3767		cfgcr1_reg = DG1_DPLL_CFGCR1(id);
3768	} else if (IS_ROCKETLAKE(i915)) {
3769		cfgcr0_reg = RKL_DPLL_CFGCR0(id);
3770		cfgcr1_reg = RKL_DPLL_CFGCR1(id);
3771	} else if (DISPLAY_VER(i915) >= 12) {
3772		cfgcr0_reg = TGL_DPLL_CFGCR0(id);
3773		cfgcr1_reg = TGL_DPLL_CFGCR1(id);
3774		div0_reg = TGL_DPLL0_DIV0(id);
3775	} else {
3776		if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
3777		    id == DPLL_ID_EHL_DPLL4) {
3778			cfgcr0_reg = ICL_DPLL_CFGCR0(4);
3779			cfgcr1_reg = ICL_DPLL_CFGCR1(4);
3780		} else {
3781			cfgcr0_reg = ICL_DPLL_CFGCR0(id);
3782			cfgcr1_reg = ICL_DPLL_CFGCR1(id);
3783		}
3784	}
3785
3786	intel_de_write(i915, cfgcr0_reg, hw_state->cfgcr0);
3787	intel_de_write(i915, cfgcr1_reg, hw_state->cfgcr1);
3788	drm_WARN_ON_ONCE(&i915->drm, i915->display.vbt.override_afc_startup &&
3789			 !i915_mmio_reg_valid(div0_reg));
3790	if (i915->display.vbt.override_afc_startup &&
3791	    i915_mmio_reg_valid(div0_reg))
3792		intel_de_rmw(i915, div0_reg,
3793			     TGL_DPLL0_DIV0_AFC_STARTUP_MASK, hw_state->div0);
3794	intel_de_posting_read(i915, cfgcr1_reg);
3795}
3796
3797static void icl_mg_pll_write(struct drm_i915_private *i915,
3798			     struct intel_shared_dpll *pll,
3799			     const struct icl_dpll_hw_state *hw_state)
3800{
 
3801	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3802
3803	/*
3804	 * Some of the following registers have reserved fields, so program
3805	 * these with RMW based on a mask. The mask can be fixed or generated
3806	 * during the calc/readout phase if the mask depends on some other HW
3807	 * state like refclk, see icl_calc_mg_pll_state().
3808	 */
3809	intel_de_rmw(i915, MG_REFCLKIN_CTL(tc_port),
3810		     MG_REFCLKIN_CTL_OD_2_MUX_MASK, hw_state->mg_refclkin_ctl);
3811
3812	intel_de_rmw(i915, MG_CLKTOP2_CORECLKCTL1(tc_port),
3813		     MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK,
3814		     hw_state->mg_clktop2_coreclkctl1);
3815
3816	intel_de_rmw(i915, MG_CLKTOP2_HSCLKCTL(tc_port),
3817		     MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3818		     MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3819		     MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3820		     MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK,
3821		     hw_state->mg_clktop2_hsclkctl);
3822
3823	intel_de_write(i915, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
3824	intel_de_write(i915, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
3825	intel_de_write(i915, MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
3826	intel_de_write(i915, MG_PLL_FRAC_LOCK(tc_port),
3827		       hw_state->mg_pll_frac_lock);
3828	intel_de_write(i915, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
3829
3830	intel_de_rmw(i915, MG_PLL_BIAS(tc_port),
3831		     hw_state->mg_pll_bias_mask, hw_state->mg_pll_bias);
3832
3833	intel_de_rmw(i915, MG_PLL_TDC_COLDST_BIAS(tc_port),
3834		     hw_state->mg_pll_tdc_coldst_bias_mask,
3835		     hw_state->mg_pll_tdc_coldst_bias);
3836
3837	intel_de_posting_read(i915, MG_PLL_TDC_COLDST_BIAS(tc_port));
3838}
3839
3840static void dkl_pll_write(struct drm_i915_private *i915,
3841			  struct intel_shared_dpll *pll,
3842			  const struct icl_dpll_hw_state *hw_state)
3843{
 
3844	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3845	u32 val;
3846
3847	/*
3848	 * All registers programmed here have the same HIP_INDEX_REG even
3849	 * though on different building block
3850	 */
3851	/* All the registers are RMW */
3852	val = intel_dkl_phy_read(i915, DKL_REFCLKIN_CTL(tc_port));
3853	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3854	val |= hw_state->mg_refclkin_ctl;
3855	intel_dkl_phy_write(i915, DKL_REFCLKIN_CTL(tc_port), val);
3856
3857	val = intel_dkl_phy_read(i915, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3858	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3859	val |= hw_state->mg_clktop2_coreclkctl1;
3860	intel_dkl_phy_write(i915, DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
3861
3862	val = intel_dkl_phy_read(i915, DKL_CLKTOP2_HSCLKCTL(tc_port));
3863	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3864		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3865		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3866		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3867	val |= hw_state->mg_clktop2_hsclkctl;
3868	intel_dkl_phy_write(i915, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
3869
3870	val = DKL_PLL_DIV0_MASK;
3871	if (i915->display.vbt.override_afc_startup)
3872		val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3873	intel_dkl_phy_rmw(i915, DKL_PLL_DIV0(tc_port), val,
3874			  hw_state->mg_pll_div0);
3875
3876	val = intel_dkl_phy_read(i915, DKL_PLL_DIV1(tc_port));
3877	val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
3878		 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3879	val |= hw_state->mg_pll_div1;
3880	intel_dkl_phy_write(i915, DKL_PLL_DIV1(tc_port), val);
3881
3882	val = intel_dkl_phy_read(i915, DKL_PLL_SSC(tc_port));
3883	val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3884		 DKL_PLL_SSC_STEP_LEN_MASK |
3885		 DKL_PLL_SSC_STEP_NUM_MASK |
3886		 DKL_PLL_SSC_EN);
3887	val |= hw_state->mg_pll_ssc;
3888	intel_dkl_phy_write(i915, DKL_PLL_SSC(tc_port), val);
3889
3890	val = intel_dkl_phy_read(i915, DKL_PLL_BIAS(tc_port));
3891	val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
3892		 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3893	val |= hw_state->mg_pll_bias;
3894	intel_dkl_phy_write(i915, DKL_PLL_BIAS(tc_port), val);
3895
3896	val = intel_dkl_phy_read(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3897	val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3898		 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3899	val |= hw_state->mg_pll_tdc_coldst_bias;
3900	intel_dkl_phy_write(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
3901
3902	intel_dkl_phy_posting_read(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3903}
3904
3905static void icl_pll_power_enable(struct drm_i915_private *i915,
3906				 struct intel_shared_dpll *pll,
3907				 i915_reg_t enable_reg)
3908{
3909	intel_de_rmw(i915, enable_reg, 0, PLL_POWER_ENABLE);
3910
3911	/*
3912	 * The spec says we need to "wait" but it also says it should be
3913	 * immediate.
3914	 */
3915	if (intel_de_wait_for_set(i915, enable_reg, PLL_POWER_STATE, 1))
3916		drm_err(&i915->drm, "PLL %d Power not enabled\n",
3917			pll->info->id);
3918}
3919
3920static void icl_pll_enable(struct drm_i915_private *i915,
3921			   struct intel_shared_dpll *pll,
3922			   i915_reg_t enable_reg)
3923{
3924	intel_de_rmw(i915, enable_reg, 0, PLL_ENABLE);
3925
3926	/* Timeout is actually 600us. */
3927	if (intel_de_wait_for_set(i915, enable_reg, PLL_LOCK, 1))
3928		drm_err(&i915->drm, "PLL %d not locked\n", pll->info->id);
3929}
3930
3931static void adlp_cmtg_clock_gating_wa(struct drm_i915_private *i915, struct intel_shared_dpll *pll)
3932{
3933	u32 val;
3934
3935	if (!(IS_ALDERLAKE_P(i915) && IS_DISPLAY_STEP(i915, STEP_A0, STEP_B0)) ||
3936	    pll->info->id != DPLL_ID_ICL_DPLL0)
3937		return;
3938	/*
3939	 * Wa_16011069516:adl-p[a0]
3940	 *
3941	 * All CMTG regs are unreliable until CMTG clock gating is disabled,
3942	 * so we can only assume the default TRANS_CMTG_CHICKEN reg value and
3943	 * sanity check this assumption with a double read, which presumably
3944	 * returns the correct value even with clock gating on.
3945	 *
3946	 * Instead of the usual place for workarounds we apply this one here,
3947	 * since TRANS_CMTG_CHICKEN is only accessible while DPLL0 is enabled.
3948	 */
3949	val = intel_de_read(i915, TRANS_CMTG_CHICKEN);
3950	val = intel_de_rmw(i915, TRANS_CMTG_CHICKEN, ~0, DISABLE_DPT_CLK_GATING);
3951	if (drm_WARN_ON(&i915->drm, val & ~DISABLE_DPT_CLK_GATING))
3952		drm_dbg_kms(&i915->drm, "Unexpected flags in TRANS_CMTG_CHICKEN: %08x\n", val);
3953}
3954
3955static void combo_pll_enable(struct drm_i915_private *i915,
3956			     struct intel_shared_dpll *pll,
3957			     const struct intel_dpll_hw_state *dpll_hw_state)
3958{
3959	const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3960	i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll);
3961
3962	icl_pll_power_enable(i915, pll, enable_reg);
3963
3964	icl_dpll_write(i915, pll, hw_state);
3965
3966	/*
3967	 * DVFS pre sequence would be here, but in our driver the cdclk code
3968	 * paths should already be setting the appropriate voltage, hence we do
3969	 * nothing here.
3970	 */
3971
3972	icl_pll_enable(i915, pll, enable_reg);
3973
3974	adlp_cmtg_clock_gating_wa(i915, pll);
3975
3976	/* DVFS post sequence would be here. See the comment above. */
3977}
3978
3979static void tbt_pll_enable(struct drm_i915_private *i915,
3980			   struct intel_shared_dpll *pll,
3981			   const struct intel_dpll_hw_state *dpll_hw_state)
3982{
3983	const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3984
3985	icl_pll_power_enable(i915, pll, TBT_PLL_ENABLE);
3986
3987	icl_dpll_write(i915, pll, hw_state);
3988
3989	/*
3990	 * DVFS pre sequence would be here, but in our driver the cdclk code
3991	 * paths should already be setting the appropriate voltage, hence we do
3992	 * nothing here.
3993	 */
3994
3995	icl_pll_enable(i915, pll, TBT_PLL_ENABLE);
3996
3997	/* DVFS post sequence would be here. See the comment above. */
3998}
3999
4000static void mg_pll_enable(struct drm_i915_private *i915,
4001			  struct intel_shared_dpll *pll,
4002			  const struct intel_dpll_hw_state *dpll_hw_state)
4003{
4004	const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
4005	i915_reg_t enable_reg = intel_tc_pll_enable_reg(i915, pll);
4006
4007	icl_pll_power_enable(i915, pll, enable_reg);
4008
4009	if (DISPLAY_VER(i915) >= 12)
4010		dkl_pll_write(i915, pll, hw_state);
4011	else
4012		icl_mg_pll_write(i915, pll, hw_state);
4013
4014	/*
4015	 * DVFS pre sequence would be here, but in our driver the cdclk code
4016	 * paths should already be setting the appropriate voltage, hence we do
4017	 * nothing here.
4018	 */
4019
4020	icl_pll_enable(i915, pll, enable_reg);
4021
4022	/* DVFS post sequence would be here. See the comment above. */
4023}
4024
4025static void icl_pll_disable(struct drm_i915_private *i915,
4026			    struct intel_shared_dpll *pll,
4027			    i915_reg_t enable_reg)
4028{
4029	/* The first steps are done by intel_ddi_post_disable(). */
4030
4031	/*
4032	 * DVFS pre sequence would be here, but in our driver the cdclk code
4033	 * paths should already be setting the appropriate voltage, hence we do
4034	 * nothing here.
4035	 */
4036
4037	intel_de_rmw(i915, enable_reg, PLL_ENABLE, 0);
4038
4039	/* Timeout is actually 1us. */
4040	if (intel_de_wait_for_clear(i915, enable_reg, PLL_LOCK, 1))
4041		drm_err(&i915->drm, "PLL %d locked\n", pll->info->id);
4042
4043	/* DVFS post sequence would be here. See the comment above. */
4044
4045	intel_de_rmw(i915, enable_reg, PLL_POWER_ENABLE, 0);
4046
4047	/*
4048	 * The spec says we need to "wait" but it also says it should be
4049	 * immediate.
4050	 */
4051	if (intel_de_wait_for_clear(i915, enable_reg, PLL_POWER_STATE, 1))
4052		drm_err(&i915->drm, "PLL %d Power not disabled\n",
4053			pll->info->id);
4054}
4055
4056static void combo_pll_disable(struct drm_i915_private *i915,
4057			      struct intel_shared_dpll *pll)
4058{
4059	i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll);
4060
4061	icl_pll_disable(i915, pll, enable_reg);
4062}
4063
4064static void tbt_pll_disable(struct drm_i915_private *i915,
4065			    struct intel_shared_dpll *pll)
4066{
4067	icl_pll_disable(i915, pll, TBT_PLL_ENABLE);
4068}
4069
4070static void mg_pll_disable(struct drm_i915_private *i915,
4071			   struct intel_shared_dpll *pll)
4072{
4073	i915_reg_t enable_reg = intel_tc_pll_enable_reg(i915, pll);
4074
4075	icl_pll_disable(i915, pll, enable_reg);
4076}
4077
4078static void icl_update_dpll_ref_clks(struct drm_i915_private *i915)
4079{
4080	/* No SSC ref */
4081	i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
4082}
4083
4084static void icl_dump_hw_state(struct drm_printer *p,
4085			      const struct intel_dpll_hw_state *dpll_hw_state)
4086{
4087	const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
4088
4089	drm_printf(p, "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, div0: 0x%x, "
4090		   "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
4091		   "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
4092		   "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
4093		   "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
4094		   "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
4095		   hw_state->cfgcr0, hw_state->cfgcr1, hw_state->div0,
4096		   hw_state->mg_refclkin_ctl,
4097		   hw_state->mg_clktop2_coreclkctl1,
4098		   hw_state->mg_clktop2_hsclkctl,
4099		   hw_state->mg_pll_div0,
4100		   hw_state->mg_pll_div1,
4101		   hw_state->mg_pll_lf,
4102		   hw_state->mg_pll_frac_lock,
4103		   hw_state->mg_pll_ssc,
4104		   hw_state->mg_pll_bias,
4105		   hw_state->mg_pll_tdc_coldst_bias);
4106}
4107
4108static bool icl_compare_hw_state(const struct intel_dpll_hw_state *_a,
4109				 const struct intel_dpll_hw_state *_b)
4110{
4111	const struct icl_dpll_hw_state *a = &_a->icl;
4112	const struct icl_dpll_hw_state *b = &_b->icl;
4113
4114	/* FIXME split combo vs. mg more thoroughly */
4115	return a->cfgcr0 == b->cfgcr0 &&
4116		a->cfgcr1 == b->cfgcr1 &&
4117		a->div0 == b->div0 &&
4118		a->mg_refclkin_ctl == b->mg_refclkin_ctl &&
4119		a->mg_clktop2_coreclkctl1 == b->mg_clktop2_coreclkctl1 &&
4120		a->mg_clktop2_hsclkctl == b->mg_clktop2_hsclkctl &&
4121		a->mg_pll_div0 == b->mg_pll_div0 &&
4122		a->mg_pll_div1 == b->mg_pll_div1 &&
4123		a->mg_pll_lf == b->mg_pll_lf &&
4124		a->mg_pll_frac_lock == b->mg_pll_frac_lock &&
4125		a->mg_pll_ssc == b->mg_pll_ssc &&
4126		a->mg_pll_bias == b->mg_pll_bias &&
4127		a->mg_pll_tdc_coldst_bias == b->mg_pll_tdc_coldst_bias;
4128}
4129
4130static const struct intel_shared_dpll_funcs combo_pll_funcs = {
4131	.enable = combo_pll_enable,
4132	.disable = combo_pll_disable,
4133	.get_hw_state = combo_pll_get_hw_state,
4134	.get_freq = icl_ddi_combo_pll_get_freq,
4135};
4136
4137static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
4138	.enable = tbt_pll_enable,
4139	.disable = tbt_pll_disable,
4140	.get_hw_state = tbt_pll_get_hw_state,
4141	.get_freq = icl_ddi_tbt_pll_get_freq,
4142};
4143
4144static const struct intel_shared_dpll_funcs mg_pll_funcs = {
4145	.enable = mg_pll_enable,
4146	.disable = mg_pll_disable,
4147	.get_hw_state = mg_pll_get_hw_state,
4148	.get_freq = icl_ddi_mg_pll_get_freq,
4149};
4150
4151static const struct dpll_info icl_plls[] = {
4152	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4153	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4154	{ .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
4155	  .is_alt_port_dpll = true, },
4156	{ .name = "MG PLL 1", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
4157	{ .name = "MG PLL 2", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
4158	{ .name = "MG PLL 3", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
4159	{ .name = "MG PLL 4", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
4160	{}
4161};
4162
4163static const struct intel_dpll_mgr icl_pll_mgr = {
4164	.dpll_info = icl_plls,
4165	.compute_dplls = icl_compute_dplls,
4166	.get_dplls = icl_get_dplls,
4167	.put_dplls = icl_put_dplls,
4168	.update_active_dpll = icl_update_active_dpll,
4169	.update_ref_clks = icl_update_dpll_ref_clks,
4170	.dump_hw_state = icl_dump_hw_state,
4171	.compare_hw_state = icl_compare_hw_state,
4172};
4173
4174static const struct dpll_info ehl_plls[] = {
4175	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4176	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4177	{ .name = "DPLL 4", .funcs = &combo_pll_funcs, .id = DPLL_ID_EHL_DPLL4,
4178	  .power_domain = POWER_DOMAIN_DC_OFF, },
4179	{}
4180};
4181
4182static const struct intel_dpll_mgr ehl_pll_mgr = {
4183	.dpll_info = ehl_plls,
4184	.compute_dplls = icl_compute_dplls,
4185	.get_dplls = icl_get_dplls,
4186	.put_dplls = icl_put_dplls,
4187	.update_ref_clks = icl_update_dpll_ref_clks,
4188	.dump_hw_state = icl_dump_hw_state,
4189	.compare_hw_state = icl_compare_hw_state,
4190};
4191
4192static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
4193	.enable = mg_pll_enable,
4194	.disable = mg_pll_disable,
4195	.get_hw_state = dkl_pll_get_hw_state,
4196	.get_freq = icl_ddi_mg_pll_get_freq,
4197};
4198
4199static const struct dpll_info tgl_plls[] = {
4200	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4201	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4202	{ .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
4203	  .is_alt_port_dpll = true, },
4204	{ .name = "TC PLL 1", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
4205	{ .name = "TC PLL 2", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
4206	{ .name = "TC PLL 3", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
4207	{ .name = "TC PLL 4", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
4208	{ .name = "TC PLL 5", .funcs = &dkl_pll_funcs, .id = DPLL_ID_TGL_MGPLL5, },
4209	{ .name = "TC PLL 6", .funcs = &dkl_pll_funcs, .id = DPLL_ID_TGL_MGPLL6, },
4210	{}
4211};
4212
4213static const struct intel_dpll_mgr tgl_pll_mgr = {
4214	.dpll_info = tgl_plls,
4215	.compute_dplls = icl_compute_dplls,
4216	.get_dplls = icl_get_dplls,
4217	.put_dplls = icl_put_dplls,
4218	.update_active_dpll = icl_update_active_dpll,
4219	.update_ref_clks = icl_update_dpll_ref_clks,
4220	.dump_hw_state = icl_dump_hw_state,
4221	.compare_hw_state = icl_compare_hw_state,
4222};
4223
4224static const struct dpll_info rkl_plls[] = {
4225	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4226	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4227	{ .name = "DPLL 4", .funcs = &combo_pll_funcs, .id = DPLL_ID_EHL_DPLL4, },
4228	{}
4229};
4230
4231static const struct intel_dpll_mgr rkl_pll_mgr = {
4232	.dpll_info = rkl_plls,
4233	.compute_dplls = icl_compute_dplls,
4234	.get_dplls = icl_get_dplls,
4235	.put_dplls = icl_put_dplls,
4236	.update_ref_clks = icl_update_dpll_ref_clks,
4237	.dump_hw_state = icl_dump_hw_state,
4238	.compare_hw_state = icl_compare_hw_state,
4239};
4240
4241static const struct dpll_info dg1_plls[] = {
4242	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL0, },
4243	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL1, },
4244	{ .name = "DPLL 2", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL2, },
4245	{ .name = "DPLL 3", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL3, },
4246	{}
4247};
4248
4249static const struct intel_dpll_mgr dg1_pll_mgr = {
4250	.dpll_info = dg1_plls,
4251	.compute_dplls = icl_compute_dplls,
4252	.get_dplls = icl_get_dplls,
4253	.put_dplls = icl_put_dplls,
4254	.update_ref_clks = icl_update_dpll_ref_clks,
4255	.dump_hw_state = icl_dump_hw_state,
4256	.compare_hw_state = icl_compare_hw_state,
4257};
4258
4259static const struct dpll_info adls_plls[] = {
4260	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4261	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4262	{ .name = "DPLL 2", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL2, },
4263	{ .name = "DPLL 3", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL3, },
4264	{}
4265};
4266
4267static const struct intel_dpll_mgr adls_pll_mgr = {
4268	.dpll_info = adls_plls,
4269	.compute_dplls = icl_compute_dplls,
4270	.get_dplls = icl_get_dplls,
4271	.put_dplls = icl_put_dplls,
4272	.update_ref_clks = icl_update_dpll_ref_clks,
4273	.dump_hw_state = icl_dump_hw_state,
4274	.compare_hw_state = icl_compare_hw_state,
4275};
4276
4277static const struct dpll_info adlp_plls[] = {
4278	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4279	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4280	{ .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
4281	  .is_alt_port_dpll = true, },
4282	{ .name = "TC PLL 1", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
4283	{ .name = "TC PLL 2", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
4284	{ .name = "TC PLL 3", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
4285	{ .name = "TC PLL 4", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
4286	{}
4287};
4288
4289static const struct intel_dpll_mgr adlp_pll_mgr = {
4290	.dpll_info = adlp_plls,
4291	.compute_dplls = icl_compute_dplls,
4292	.get_dplls = icl_get_dplls,
4293	.put_dplls = icl_put_dplls,
4294	.update_active_dpll = icl_update_active_dpll,
4295	.update_ref_clks = icl_update_dpll_ref_clks,
4296	.dump_hw_state = icl_dump_hw_state,
4297	.compare_hw_state = icl_compare_hw_state,
4298};
4299
4300/**
4301 * intel_shared_dpll_init - Initialize shared DPLLs
4302 * @i915: i915 device
4303 *
4304 * Initialize shared DPLLs for @i915.
4305 */
4306void intel_shared_dpll_init(struct drm_i915_private *i915)
4307{
4308	const struct intel_dpll_mgr *dpll_mgr = NULL;
4309	const struct dpll_info *dpll_info;
4310	int i;
4311
4312	mutex_init(&i915->display.dpll.lock);
4313
4314	if (DISPLAY_VER(i915) >= 14 || IS_DG2(i915))
4315		/* No shared DPLLs on DG2; port PLLs are part of the PHY */
4316		dpll_mgr = NULL;
4317	else if (IS_ALDERLAKE_P(i915))
4318		dpll_mgr = &adlp_pll_mgr;
4319	else if (IS_ALDERLAKE_S(i915))
4320		dpll_mgr = &adls_pll_mgr;
4321	else if (IS_DG1(i915))
4322		dpll_mgr = &dg1_pll_mgr;
4323	else if (IS_ROCKETLAKE(i915))
4324		dpll_mgr = &rkl_pll_mgr;
4325	else if (DISPLAY_VER(i915) >= 12)
4326		dpll_mgr = &tgl_pll_mgr;
4327	else if (IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915))
4328		dpll_mgr = &ehl_pll_mgr;
4329	else if (DISPLAY_VER(i915) >= 11)
4330		dpll_mgr = &icl_pll_mgr;
4331	else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
4332		dpll_mgr = &bxt_pll_mgr;
4333	else if (DISPLAY_VER(i915) == 9)
4334		dpll_mgr = &skl_pll_mgr;
4335	else if (HAS_DDI(i915))
4336		dpll_mgr = &hsw_pll_mgr;
4337	else if (HAS_PCH_IBX(i915) || HAS_PCH_CPT(i915))
4338		dpll_mgr = &pch_pll_mgr;
4339
4340	if (!dpll_mgr)
4341		return;
4342
4343	dpll_info = dpll_mgr->dpll_info;
4344
4345	for (i = 0; dpll_info[i].name; i++) {
4346		if (drm_WARN_ON(&i915->drm,
4347				i >= ARRAY_SIZE(i915->display.dpll.shared_dplls)))
4348			break;
4349
4350		/* must fit into unsigned long bitmask on 32bit */
4351		if (drm_WARN_ON(&i915->drm, dpll_info[i].id >= 32))
4352			break;
4353
4354		i915->display.dpll.shared_dplls[i].info = &dpll_info[i];
4355		i915->display.dpll.shared_dplls[i].index = i;
4356	}
4357
4358	i915->display.dpll.mgr = dpll_mgr;
4359	i915->display.dpll.num_shared_dpll = i;
4360}
4361
4362/**
4363 * intel_compute_shared_dplls - compute DPLL state CRTC and encoder combination
4364 * @state: atomic state
4365 * @crtc: CRTC to compute DPLLs for
4366 * @encoder: encoder
4367 *
4368 * This function computes the DPLL state for the given CRTC and encoder.
4369 *
4370 * The new configuration in the atomic commit @state is made effective by
4371 * calling intel_shared_dpll_swap_state().
4372 *
4373 * Returns:
4374 * 0 on success, negative error code on falure.
4375 */
4376int intel_compute_shared_dplls(struct intel_atomic_state *state,
4377			       struct intel_crtc *crtc,
4378			       struct intel_encoder *encoder)
4379{
4380	struct drm_i915_private *i915 = to_i915(state->base.dev);
4381	const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr;
4382
4383	if (drm_WARN_ON(&i915->drm, !dpll_mgr))
4384		return -EINVAL;
4385
4386	return dpll_mgr->compute_dplls(state, crtc, encoder);
4387}
4388
4389/**
4390 * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
4391 * @state: atomic state
4392 * @crtc: CRTC to reserve DPLLs for
4393 * @encoder: encoder
4394 *
4395 * This function reserves all required DPLLs for the given CRTC and encoder
4396 * combination in the current atomic commit @state and the new @crtc atomic
4397 * state.
4398 *
4399 * The new configuration in the atomic commit @state is made effective by
4400 * calling intel_shared_dpll_swap_state().
4401 *
4402 * The reserved DPLLs should be released by calling
4403 * intel_release_shared_dplls().
4404 *
4405 * Returns:
4406 * 0 if all required DPLLs were successfully reserved,
4407 * negative error code otherwise.
4408 */
4409int intel_reserve_shared_dplls(struct intel_atomic_state *state,
4410			       struct intel_crtc *crtc,
4411			       struct intel_encoder *encoder)
4412{
4413	struct drm_i915_private *i915 = to_i915(state->base.dev);
4414	const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr;
4415
4416	if (drm_WARN_ON(&i915->drm, !dpll_mgr))
4417		return -EINVAL;
4418
4419	return dpll_mgr->get_dplls(state, crtc, encoder);
4420}
4421
4422/**
4423 * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
4424 * @state: atomic state
4425 * @crtc: crtc from which the DPLLs are to be released
4426 *
4427 * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
4428 * from the current atomic commit @state and the old @crtc atomic state.
4429 *
4430 * The new configuration in the atomic commit @state is made effective by
4431 * calling intel_shared_dpll_swap_state().
4432 */
4433void intel_release_shared_dplls(struct intel_atomic_state *state,
4434				struct intel_crtc *crtc)
4435{
4436	struct drm_i915_private *i915 = to_i915(state->base.dev);
4437	const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr;
4438
4439	/*
4440	 * FIXME: this function is called for every platform having a
4441	 * compute_clock hook, even though the platform doesn't yet support
4442	 * the shared DPLL framework and intel_reserve_shared_dplls() is not
4443	 * called on those.
4444	 */
4445	if (!dpll_mgr)
4446		return;
4447
4448	dpll_mgr->put_dplls(state, crtc);
4449}
4450
4451/**
4452 * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
4453 * @state: atomic state
4454 * @crtc: the CRTC for which to update the active DPLL
4455 * @encoder: encoder determining the type of port DPLL
4456 *
4457 * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
4458 * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
4459 * DPLL selected will be based on the current mode of the encoder's port.
4460 */
4461void intel_update_active_dpll(struct intel_atomic_state *state,
4462			      struct intel_crtc *crtc,
4463			      struct intel_encoder *encoder)
4464{
4465	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
4466	const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr;
4467
4468	if (drm_WARN_ON(&i915->drm, !dpll_mgr))
4469		return;
4470
4471	dpll_mgr->update_active_dpll(state, crtc, encoder);
4472}
4473
4474/**
4475 * intel_dpll_get_freq - calculate the DPLL's output frequency
4476 * @i915: i915 device
4477 * @pll: DPLL for which to calculate the output frequency
4478 * @dpll_hw_state: DPLL state from which to calculate the output frequency
4479 *
4480 * Return the output frequency corresponding to @pll's passed in @dpll_hw_state.
4481 */
4482int intel_dpll_get_freq(struct drm_i915_private *i915,
4483			const struct intel_shared_dpll *pll,
4484			const struct intel_dpll_hw_state *dpll_hw_state)
4485{
4486	if (drm_WARN_ON(&i915->drm, !pll->info->funcs->get_freq))
4487		return 0;
4488
4489	return pll->info->funcs->get_freq(i915, pll, dpll_hw_state);
4490}
4491
4492/**
4493 * intel_dpll_get_hw_state - readout the DPLL's hardware state
4494 * @i915: i915 device
4495 * @pll: DPLL for which to calculate the output frequency
4496 * @dpll_hw_state: DPLL's hardware state
4497 *
4498 * Read out @pll's hardware state into @dpll_hw_state.
4499 */
4500bool intel_dpll_get_hw_state(struct drm_i915_private *i915,
4501			     struct intel_shared_dpll *pll,
4502			     struct intel_dpll_hw_state *dpll_hw_state)
4503{
4504	return pll->info->funcs->get_hw_state(i915, pll, dpll_hw_state);
4505}
4506
4507static void readout_dpll_hw_state(struct drm_i915_private *i915,
4508				  struct intel_shared_dpll *pll)
4509{
4510	struct intel_crtc *crtc;
4511
4512	pll->on = intel_dpll_get_hw_state(i915, pll, &pll->state.hw_state);
4513
4514	if (pll->on && pll->info->power_domain)
4515		pll->wakeref = intel_display_power_get(i915, pll->info->power_domain);
4516
4517	pll->state.pipe_mask = 0;
4518	for_each_intel_crtc(&i915->drm, crtc) {
4519		struct intel_crtc_state *crtc_state =
4520			to_intel_crtc_state(crtc->base.state);
4521
4522		if (crtc_state->hw.active && crtc_state->shared_dpll == pll)
4523			intel_reference_shared_dpll_crtc(crtc, pll, &pll->state);
4524	}
4525	pll->active_mask = pll->state.pipe_mask;
4526
4527	drm_dbg_kms(&i915->drm,
4528		    "%s hw state readout: pipe_mask 0x%x, on %i\n",
4529		    pll->info->name, pll->state.pipe_mask, pll->on);
4530}
4531
4532void intel_dpll_update_ref_clks(struct drm_i915_private *i915)
4533{
4534	if (i915->display.dpll.mgr && i915->display.dpll.mgr->update_ref_clks)
4535		i915->display.dpll.mgr->update_ref_clks(i915);
4536}
4537
4538void intel_dpll_readout_hw_state(struct drm_i915_private *i915)
4539{
4540	struct intel_shared_dpll *pll;
4541	int i;
4542
4543	for_each_shared_dpll(i915, pll, i)
4544		readout_dpll_hw_state(i915, pll);
4545}
4546
4547static void sanitize_dpll_state(struct drm_i915_private *i915,
4548				struct intel_shared_dpll *pll)
4549{
4550	if (!pll->on)
4551		return;
4552
4553	adlp_cmtg_clock_gating_wa(i915, pll);
4554
4555	if (pll->active_mask)
4556		return;
4557
4558	drm_dbg_kms(&i915->drm,
4559		    "%s enabled but not in use, disabling\n",
4560		    pll->info->name);
4561
4562	_intel_disable_shared_dpll(i915, pll);
4563}
4564
4565void intel_dpll_sanitize_state(struct drm_i915_private *i915)
4566{
4567	struct intel_shared_dpll *pll;
4568	int i;
4569
4570	for_each_shared_dpll(i915, pll, i)
4571		sanitize_dpll_state(i915, pll);
4572}
4573
4574/**
4575 * intel_dpll_dump_hw_state - dump hw_state
4576 * @i915: i915 drm device
4577 * @p: where to print the state to
4578 * @dpll_hw_state: hw state to be dumped
4579 *
4580 * Dumo out the relevant values in @dpll_hw_state.
4581 */
4582void intel_dpll_dump_hw_state(struct drm_i915_private *i915,
4583			      struct drm_printer *p,
4584			      const struct intel_dpll_hw_state *dpll_hw_state)
4585{
4586	if (i915->display.dpll.mgr) {
4587		i915->display.dpll.mgr->dump_hw_state(p, dpll_hw_state);
4588	} else {
4589		/* fallback for platforms that don't use the shared dpll
4590		 * infrastructure
4591		 */
4592		ibx_dump_hw_state(p, dpll_hw_state);
4593	}
4594}
4595
4596/**
4597 * intel_dpll_compare_hw_state - compare the two states
4598 * @i915: i915 drm device
4599 * @a: first DPLL hw state
4600 * @b: second DPLL hw state
4601 *
4602 * Compare DPLL hw states @a and @b.
4603 *
4604 * Returns: true if the states are equal, false if the differ
4605 */
4606bool intel_dpll_compare_hw_state(struct drm_i915_private *i915,
4607				 const struct intel_dpll_hw_state *a,
4608				 const struct intel_dpll_hw_state *b)
4609{
4610	if (i915->display.dpll.mgr) {
4611		return i915->display.dpll.mgr->compare_hw_state(a, b);
4612	} else {
4613		/* fallback for platforms that don't use the shared dpll
4614		 * infrastructure
4615		 */
4616		return ibx_compare_hw_state(a, b);
4617	}
4618}
4619
4620static void
4621verify_single_dpll_state(struct drm_i915_private *i915,
4622			 struct intel_shared_dpll *pll,
4623			 struct intel_crtc *crtc,
4624			 const struct intel_crtc_state *new_crtc_state)
4625{
4626	struct intel_display *display = &i915->display;
4627	struct intel_dpll_hw_state dpll_hw_state = {};
4628	u8 pipe_mask;
4629	bool active;
4630
 
 
 
 
4631	active = intel_dpll_get_hw_state(i915, pll, &dpll_hw_state);
4632
4633	if (!pll->info->always_on) {
4634		INTEL_DISPLAY_STATE_WARN(display, !pll->on && pll->active_mask,
4635					 "%s: pll in active use but not on in sw tracking\n",
4636					 pll->info->name);
4637		INTEL_DISPLAY_STATE_WARN(display, pll->on && !pll->active_mask,
4638					 "%s: pll is on but not used by any active pipe\n",
4639					 pll->info->name);
4640		INTEL_DISPLAY_STATE_WARN(display, pll->on != active,
4641					 "%s: pll on state mismatch (expected %i, found %i)\n",
4642					 pll->info->name, pll->on, active);
4643	}
4644
4645	if (!crtc) {
4646		INTEL_DISPLAY_STATE_WARN(display,
4647					 pll->active_mask & ~pll->state.pipe_mask,
4648					 "%s: more active pll users than references: 0x%x vs 0x%x\n",
4649					 pll->info->name, pll->active_mask, pll->state.pipe_mask);
4650
4651		return;
4652	}
4653
4654	pipe_mask = BIT(crtc->pipe);
4655
4656	if (new_crtc_state->hw.active)
4657		INTEL_DISPLAY_STATE_WARN(display, !(pll->active_mask & pipe_mask),
4658					 "%s: pll active mismatch (expected pipe %c in active mask 0x%x)\n",
4659					 pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
4660	else
4661		INTEL_DISPLAY_STATE_WARN(display, pll->active_mask & pipe_mask,
4662					 "%s: pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
4663					 pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
4664
4665	INTEL_DISPLAY_STATE_WARN(display, !(pll->state.pipe_mask & pipe_mask),
4666				 "%s: pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
4667				 pll->info->name, pipe_mask, pll->state.pipe_mask);
4668
4669	INTEL_DISPLAY_STATE_WARN(display,
4670				 pll->on && memcmp(&pll->state.hw_state, &dpll_hw_state,
4671						   sizeof(dpll_hw_state)),
4672				 "%s: pll hw state mismatch\n",
4673				 pll->info->name);
4674}
4675
4676static bool has_alt_port_dpll(const struct intel_shared_dpll *old_pll,
4677			      const struct intel_shared_dpll *new_pll)
4678{
4679	return old_pll && new_pll && old_pll != new_pll &&
4680		(old_pll->info->is_alt_port_dpll || new_pll->info->is_alt_port_dpll);
4681}
4682
4683void intel_shared_dpll_state_verify(struct intel_atomic_state *state,
4684				    struct intel_crtc *crtc)
4685{
4686	struct intel_display *display = to_intel_display(state);
4687	struct drm_i915_private *i915 = to_i915(state->base.dev);
4688	const struct intel_crtc_state *old_crtc_state =
4689		intel_atomic_get_old_crtc_state(state, crtc);
4690	const struct intel_crtc_state *new_crtc_state =
4691		intel_atomic_get_new_crtc_state(state, crtc);
4692
4693	if (new_crtc_state->shared_dpll)
4694		verify_single_dpll_state(i915, new_crtc_state->shared_dpll,
4695					 crtc, new_crtc_state);
4696
4697	if (old_crtc_state->shared_dpll &&
4698	    old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
4699		u8 pipe_mask = BIT(crtc->pipe);
4700		struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
4701
4702		INTEL_DISPLAY_STATE_WARN(display, pll->active_mask & pipe_mask,
4703					 "%s: pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
4704					 pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
4705
4706		/* TC ports have both MG/TC and TBT PLL referenced simultaneously */
4707		INTEL_DISPLAY_STATE_WARN(display, !has_alt_port_dpll(old_crtc_state->shared_dpll,
4708								     new_crtc_state->shared_dpll) &&
4709					 pll->state.pipe_mask & pipe_mask,
4710					 "%s: pll enabled crtcs mismatch (found pipe %c in enabled mask (0x%x))\n",
4711					 pll->info->name, pipe_name(crtc->pipe), pll->state.pipe_mask);
4712	}
4713}
4714
4715void intel_shared_dpll_verify_disabled(struct intel_atomic_state *state)
4716{
4717	struct drm_i915_private *i915 = to_i915(state->base.dev);
4718	struct intel_shared_dpll *pll;
4719	int i;
4720
4721	for_each_shared_dpll(i915, pll, i)
4722		verify_single_dpll_state(i915, pll, NULL, NULL);
4723}
v6.8
   1/*
   2 * Copyright © 2006-2016 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  21 * DEALINGS IN THE SOFTWARE.
  22 */
  23
  24#include <linux/math.h>
  25#include <linux/string_helpers.h>
  26
 
  27#include "i915_reg.h"
  28#include "intel_de.h"
  29#include "intel_display_types.h"
  30#include "intel_dkl_phy.h"
  31#include "intel_dkl_phy_regs.h"
  32#include "intel_dpio_phy.h"
  33#include "intel_dpll.h"
  34#include "intel_dpll_mgr.h"
  35#include "intel_hti.h"
  36#include "intel_mg_phy_regs.h"
  37#include "intel_pch_refclk.h"
  38#include "intel_tc.h"
  39
  40/**
  41 * DOC: Display PLLs
  42 *
  43 * Display PLLs used for driving outputs vary by platform. While some have
  44 * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
  45 * from a pool. In the latter scenario, it is possible that multiple pipes
  46 * share a PLL if their configurations match.
  47 *
  48 * This file provides an abstraction over display PLLs. The function
  49 * intel_shared_dpll_init() initializes the PLLs for the given platform.  The
  50 * users of a PLL are tracked and that tracking is integrated with the atomic
  51 * modset interface. During an atomic operation, required PLLs can be reserved
  52 * for a given CRTC and encoder configuration by calling
  53 * intel_reserve_shared_dplls() and previously reserved PLLs can be released
  54 * with intel_release_shared_dplls().
  55 * Changes to the users are first staged in the atomic state, and then made
  56 * effective by calling intel_shared_dpll_swap_state() during the atomic
  57 * commit phase.
  58 */
  59
  60/* platform specific hooks for managing DPLLs */
  61struct intel_shared_dpll_funcs {
  62	/*
  63	 * Hook for enabling the pll, called from intel_enable_shared_dpll() if
  64	 * the pll is not already enabled.
  65	 */
  66	void (*enable)(struct drm_i915_private *i915,
  67		       struct intel_shared_dpll *pll);
 
  68
  69	/*
  70	 * Hook for disabling the pll, called from intel_disable_shared_dpll()
  71	 * only when it is safe to disable the pll, i.e., there are no more
  72	 * tracked users for it.
  73	 */
  74	void (*disable)(struct drm_i915_private *i915,
  75			struct intel_shared_dpll *pll);
  76
  77	/*
  78	 * Hook for reading the values currently programmed to the DPLL
  79	 * registers. This is used for initial hw state readout and state
  80	 * verification after a mode set.
  81	 */
  82	bool (*get_hw_state)(struct drm_i915_private *i915,
  83			     struct intel_shared_dpll *pll,
  84			     struct intel_dpll_hw_state *hw_state);
  85
  86	/*
  87	 * Hook for calculating the pll's output frequency based on its passed
  88	 * in state.
  89	 */
  90	int (*get_freq)(struct drm_i915_private *i915,
  91			const struct intel_shared_dpll *pll,
  92			const struct intel_dpll_hw_state *pll_state);
  93};
  94
  95struct intel_dpll_mgr {
  96	const struct dpll_info *dpll_info;
  97
  98	int (*compute_dplls)(struct intel_atomic_state *state,
  99			     struct intel_crtc *crtc,
 100			     struct intel_encoder *encoder);
 101	int (*get_dplls)(struct intel_atomic_state *state,
 102			 struct intel_crtc *crtc,
 103			 struct intel_encoder *encoder);
 104	void (*put_dplls)(struct intel_atomic_state *state,
 105			  struct intel_crtc *crtc);
 106	void (*update_active_dpll)(struct intel_atomic_state *state,
 107				   struct intel_crtc *crtc,
 108				   struct intel_encoder *encoder);
 109	void (*update_ref_clks)(struct drm_i915_private *i915);
 110	void (*dump_hw_state)(struct drm_i915_private *i915,
 111			      const struct intel_dpll_hw_state *hw_state);
 
 
 112};
 113
 114static void
 115intel_atomic_duplicate_dpll_state(struct drm_i915_private *i915,
 116				  struct intel_shared_dpll_state *shared_dpll)
 117{
 118	struct intel_shared_dpll *pll;
 119	int i;
 120
 121	/* Copy shared dpll state */
 122	for_each_shared_dpll(i915, pll, i)
 123		shared_dpll[pll->index] = pll->state;
 124}
 125
 126static struct intel_shared_dpll_state *
 127intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
 128{
 129	struct intel_atomic_state *state = to_intel_atomic_state(s);
 130
 131	drm_WARN_ON(s->dev, !drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
 132
 133	if (!state->dpll_set) {
 134		state->dpll_set = true;
 135
 136		intel_atomic_duplicate_dpll_state(to_i915(s->dev),
 137						  state->shared_dpll);
 138	}
 139
 140	return state->shared_dpll;
 141}
 142
 143/**
 144 * intel_get_shared_dpll_by_id - get a DPLL given its id
 145 * @i915: i915 device instance
 146 * @id: pll id
 147 *
 148 * Returns:
 149 * A pointer to the DPLL with @id
 150 */
 151struct intel_shared_dpll *
 152intel_get_shared_dpll_by_id(struct drm_i915_private *i915,
 153			    enum intel_dpll_id id)
 154{
 155	struct intel_shared_dpll *pll;
 156	int i;
 157
 158	for_each_shared_dpll(i915, pll, i) {
 159		if (pll->info->id == id)
 160			return pll;
 161	}
 162
 163	MISSING_CASE(id);
 164	return NULL;
 165}
 166
 167/* For ILK+ */
 168void assert_shared_dpll(struct drm_i915_private *i915,
 169			struct intel_shared_dpll *pll,
 170			bool state)
 171{
 
 172	bool cur_state;
 173	struct intel_dpll_hw_state hw_state;
 174
 175	if (drm_WARN(&i915->drm, !pll,
 176		     "asserting DPLL %s with no DPLL\n", str_on_off(state)))
 177		return;
 178
 179	cur_state = intel_dpll_get_hw_state(i915, pll, &hw_state);
 180	I915_STATE_WARN(i915, cur_state != state,
 181			"%s assertion failure (expected %s, current %s)\n",
 182			pll->info->name, str_on_off(state),
 183			str_on_off(cur_state));
 184}
 185
 186static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
 187{
 188	return TC_PORT_1 + id - DPLL_ID_ICL_MGPLL1;
 189}
 190
 191enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
 192{
 193	return tc_port - TC_PORT_1 + DPLL_ID_ICL_MGPLL1;
 194}
 195
 196static i915_reg_t
 197intel_combo_pll_enable_reg(struct drm_i915_private *i915,
 198			   struct intel_shared_dpll *pll)
 199{
 200	if (IS_DG1(i915))
 201		return DG1_DPLL_ENABLE(pll->info->id);
 202	else if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
 203		 (pll->info->id == DPLL_ID_EHL_DPLL4))
 204		return MG_PLL_ENABLE(0);
 205
 206	return ICL_DPLL_ENABLE(pll->info->id);
 207}
 208
 209static i915_reg_t
 210intel_tc_pll_enable_reg(struct drm_i915_private *i915,
 211			struct intel_shared_dpll *pll)
 212{
 213	const enum intel_dpll_id id = pll->info->id;
 214	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
 215
 216	if (IS_ALDERLAKE_P(i915))
 217		return ADLP_PORTTC_PLL_ENABLE(tc_port);
 218
 219	return MG_PLL_ENABLE(tc_port);
 220}
 221
 222static void _intel_enable_shared_dpll(struct drm_i915_private *i915,
 223				      struct intel_shared_dpll *pll)
 224{
 225	if (pll->info->power_domain)
 226		pll->wakeref = intel_display_power_get(i915, pll->info->power_domain);
 227
 228	pll->info->funcs->enable(i915, pll);
 229	pll->on = true;
 230}
 231
 232static void _intel_disable_shared_dpll(struct drm_i915_private *i915,
 233				       struct intel_shared_dpll *pll)
 234{
 235	pll->info->funcs->disable(i915, pll);
 236	pll->on = false;
 237
 238	if (pll->info->power_domain)
 239		intel_display_power_put(i915, pll->info->power_domain, pll->wakeref);
 240}
 241
 242/**
 243 * intel_enable_shared_dpll - enable a CRTC's shared DPLL
 244 * @crtc_state: CRTC, and its state, which has a shared DPLL
 245 *
 246 * Enable the shared DPLL used by @crtc.
 247 */
 248void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
 249{
 250	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 251	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
 252	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
 253	unsigned int pipe_mask = BIT(crtc->pipe);
 254	unsigned int old_mask;
 255
 256	if (drm_WARN_ON(&i915->drm, pll == NULL))
 257		return;
 258
 259	mutex_lock(&i915->display.dpll.lock);
 260	old_mask = pll->active_mask;
 261
 262	if (drm_WARN_ON(&i915->drm, !(pll->state.pipe_mask & pipe_mask)) ||
 263	    drm_WARN_ON(&i915->drm, pll->active_mask & pipe_mask))
 264		goto out;
 265
 266	pll->active_mask |= pipe_mask;
 267
 268	drm_dbg_kms(&i915->drm,
 269		    "enable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
 270		    pll->info->name, pll->active_mask, pll->on,
 271		    crtc->base.base.id, crtc->base.name);
 272
 273	if (old_mask) {
 274		drm_WARN_ON(&i915->drm, !pll->on);
 275		assert_shared_dpll_enabled(i915, pll);
 276		goto out;
 277	}
 278	drm_WARN_ON(&i915->drm, pll->on);
 279
 280	drm_dbg_kms(&i915->drm, "enabling %s\n", pll->info->name);
 281
 282	_intel_enable_shared_dpll(i915, pll);
 283
 284out:
 285	mutex_unlock(&i915->display.dpll.lock);
 286}
 287
 288/**
 289 * intel_disable_shared_dpll - disable a CRTC's shared DPLL
 290 * @crtc_state: CRTC, and its state, which has a shared DPLL
 291 *
 292 * Disable the shared DPLL used by @crtc.
 293 */
 294void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
 295{
 296	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 297	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
 298	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
 299	unsigned int pipe_mask = BIT(crtc->pipe);
 300
 301	/* PCH only available on ILK+ */
 302	if (DISPLAY_VER(i915) < 5)
 303		return;
 304
 305	if (pll == NULL)
 306		return;
 307
 308	mutex_lock(&i915->display.dpll.lock);
 309	if (drm_WARN(&i915->drm, !(pll->active_mask & pipe_mask),
 310		     "%s not used by [CRTC:%d:%s]\n", pll->info->name,
 311		     crtc->base.base.id, crtc->base.name))
 312		goto out;
 313
 314	drm_dbg_kms(&i915->drm,
 315		    "disable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
 316		    pll->info->name, pll->active_mask, pll->on,
 317		    crtc->base.base.id, crtc->base.name);
 318
 319	assert_shared_dpll_enabled(i915, pll);
 320	drm_WARN_ON(&i915->drm, !pll->on);
 321
 322	pll->active_mask &= ~pipe_mask;
 323	if (pll->active_mask)
 324		goto out;
 325
 326	drm_dbg_kms(&i915->drm, "disabling %s\n", pll->info->name);
 327
 328	_intel_disable_shared_dpll(i915, pll);
 329
 330out:
 331	mutex_unlock(&i915->display.dpll.lock);
 332}
 333
 334static unsigned long
 335intel_dpll_mask_all(struct drm_i915_private *i915)
 336{
 337	struct intel_shared_dpll *pll;
 338	unsigned long dpll_mask = 0;
 339	int i;
 340
 341	for_each_shared_dpll(i915, pll, i) {
 342		drm_WARN_ON(&i915->drm, dpll_mask & BIT(pll->info->id));
 343
 344		dpll_mask |= BIT(pll->info->id);
 345	}
 346
 347	return dpll_mask;
 348}
 349
 350static struct intel_shared_dpll *
 351intel_find_shared_dpll(struct intel_atomic_state *state,
 352		       const struct intel_crtc *crtc,
 353		       const struct intel_dpll_hw_state *pll_state,
 354		       unsigned long dpll_mask)
 355{
 356	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
 357	unsigned long dpll_mask_all = intel_dpll_mask_all(i915);
 358	struct intel_shared_dpll_state *shared_dpll;
 359	struct intel_shared_dpll *unused_pll = NULL;
 360	enum intel_dpll_id id;
 361
 362	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
 363
 364	drm_WARN_ON(&i915->drm, dpll_mask & ~dpll_mask_all);
 365
 366	for_each_set_bit(id, &dpll_mask, fls(dpll_mask_all)) {
 367		struct intel_shared_dpll *pll;
 368
 369		pll = intel_get_shared_dpll_by_id(i915, id);
 370		if (!pll)
 371			continue;
 372
 373		/* Only want to check enabled timings first */
 374		if (shared_dpll[pll->index].pipe_mask == 0) {
 375			if (!unused_pll)
 376				unused_pll = pll;
 377			continue;
 378		}
 379
 380		if (memcmp(pll_state,
 381			   &shared_dpll[pll->index].hw_state,
 382			   sizeof(*pll_state)) == 0) {
 383			drm_dbg_kms(&i915->drm,
 384				    "[CRTC:%d:%s] sharing existing %s (pipe mask 0x%x, active 0x%x)\n",
 385				    crtc->base.base.id, crtc->base.name,
 386				    pll->info->name,
 387				    shared_dpll[pll->index].pipe_mask,
 388				    pll->active_mask);
 389			return pll;
 390		}
 391	}
 392
 393	/* Ok no matching timings, maybe there's a free one? */
 394	if (unused_pll) {
 395		drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] allocated %s\n",
 396			    crtc->base.base.id, crtc->base.name,
 397			    unused_pll->info->name);
 398		return unused_pll;
 399	}
 400
 401	return NULL;
 402}
 403
 404/**
 405 * intel_reference_shared_dpll_crtc - Get a DPLL reference for a CRTC
 406 * @crtc: CRTC on which behalf the reference is taken
 407 * @pll: DPLL for which the reference is taken
 408 * @shared_dpll_state: the DPLL atomic state in which the reference is tracked
 409 *
 410 * Take a reference for @pll tracking the use of it by @crtc.
 411 */
 412static void
 413intel_reference_shared_dpll_crtc(const struct intel_crtc *crtc,
 414				 const struct intel_shared_dpll *pll,
 415				 struct intel_shared_dpll_state *shared_dpll_state)
 416{
 417	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
 418
 419	drm_WARN_ON(&i915->drm, (shared_dpll_state->pipe_mask & BIT(crtc->pipe)) != 0);
 420
 421	shared_dpll_state->pipe_mask |= BIT(crtc->pipe);
 422
 423	drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] reserving %s\n",
 424		    crtc->base.base.id, crtc->base.name, pll->info->name);
 425}
 426
 427static void
 428intel_reference_shared_dpll(struct intel_atomic_state *state,
 429			    const struct intel_crtc *crtc,
 430			    const struct intel_shared_dpll *pll,
 431			    const struct intel_dpll_hw_state *pll_state)
 432{
 433	struct intel_shared_dpll_state *shared_dpll;
 434
 435	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
 436
 437	if (shared_dpll[pll->index].pipe_mask == 0)
 438		shared_dpll[pll->index].hw_state = *pll_state;
 439
 440	intel_reference_shared_dpll_crtc(crtc, pll, &shared_dpll[pll->index]);
 441}
 442
 443/**
 444 * intel_unreference_shared_dpll_crtc - Drop a DPLL reference for a CRTC
 445 * @crtc: CRTC on which behalf the reference is dropped
 446 * @pll: DPLL for which the reference is dropped
 447 * @shared_dpll_state: the DPLL atomic state in which the reference is tracked
 448 *
 449 * Drop a reference for @pll tracking the end of use of it by @crtc.
 450 */
 451void
 452intel_unreference_shared_dpll_crtc(const struct intel_crtc *crtc,
 453				   const struct intel_shared_dpll *pll,
 454				   struct intel_shared_dpll_state *shared_dpll_state)
 455{
 456	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
 457
 458	drm_WARN_ON(&i915->drm, (shared_dpll_state->pipe_mask & BIT(crtc->pipe)) == 0);
 459
 460	shared_dpll_state->pipe_mask &= ~BIT(crtc->pipe);
 461
 462	drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] releasing %s\n",
 463		    crtc->base.base.id, crtc->base.name, pll->info->name);
 464}
 465
 466static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
 467					  const struct intel_crtc *crtc,
 468					  const struct intel_shared_dpll *pll)
 469{
 470	struct intel_shared_dpll_state *shared_dpll;
 471
 472	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
 473
 474	intel_unreference_shared_dpll_crtc(crtc, pll, &shared_dpll[pll->index]);
 475}
 476
 477static void intel_put_dpll(struct intel_atomic_state *state,
 478			   struct intel_crtc *crtc)
 479{
 480	const struct intel_crtc_state *old_crtc_state =
 481		intel_atomic_get_old_crtc_state(state, crtc);
 482	struct intel_crtc_state *new_crtc_state =
 483		intel_atomic_get_new_crtc_state(state, crtc);
 484
 485	new_crtc_state->shared_dpll = NULL;
 486
 487	if (!old_crtc_state->shared_dpll)
 488		return;
 489
 490	intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
 491}
 492
 493/**
 494 * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
 495 * @state: atomic state
 496 *
 497 * This is the dpll version of drm_atomic_helper_swap_state() since the
 498 * helper does not handle driver-specific global state.
 499 *
 500 * For consistency with atomic helpers this function does a complete swap,
 501 * i.e. it also puts the current state into @state, even though there is no
 502 * need for that at this moment.
 503 */
 504void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
 505{
 506	struct drm_i915_private *i915 = to_i915(state->base.dev);
 507	struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
 508	struct intel_shared_dpll *pll;
 509	int i;
 510
 511	if (!state->dpll_set)
 512		return;
 513
 514	for_each_shared_dpll(i915, pll, i)
 515		swap(pll->state, shared_dpll[pll->index]);
 516}
 517
 518static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *i915,
 519				      struct intel_shared_dpll *pll,
 520				      struct intel_dpll_hw_state *hw_state)
 521{
 
 522	const enum intel_dpll_id id = pll->info->id;
 523	intel_wakeref_t wakeref;
 524	u32 val;
 525
 526	wakeref = intel_display_power_get_if_enabled(i915,
 527						     POWER_DOMAIN_DISPLAY_CORE);
 528	if (!wakeref)
 529		return false;
 530
 531	val = intel_de_read(i915, PCH_DPLL(id));
 532	hw_state->dpll = val;
 533	hw_state->fp0 = intel_de_read(i915, PCH_FP0(id));
 534	hw_state->fp1 = intel_de_read(i915, PCH_FP1(id));
 535
 536	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
 537
 538	return val & DPLL_VCO_ENABLE;
 539}
 540
 541static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *i915)
 542{
 
 543	u32 val;
 544	bool enabled;
 545
 546	val = intel_de_read(i915, PCH_DREF_CONTROL);
 547	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
 548			    DREF_SUPERSPREAD_SOURCE_MASK));
 549	I915_STATE_WARN(i915, !enabled,
 550			"PCH refclk assertion failure, should be active but is disabled\n");
 551}
 552
 553static void ibx_pch_dpll_enable(struct drm_i915_private *i915,
 554				struct intel_shared_dpll *pll)
 
 555{
 
 556	const enum intel_dpll_id id = pll->info->id;
 557
 558	/* PCH refclock must be enabled first */
 559	ibx_assert_pch_refclk_enabled(i915);
 560
 561	intel_de_write(i915, PCH_FP0(id), pll->state.hw_state.fp0);
 562	intel_de_write(i915, PCH_FP1(id), pll->state.hw_state.fp1);
 563
 564	intel_de_write(i915, PCH_DPLL(id), pll->state.hw_state.dpll);
 565
 566	/* Wait for the clocks to stabilize. */
 567	intel_de_posting_read(i915, PCH_DPLL(id));
 568	udelay(150);
 569
 570	/* The pixel multiplier can only be updated once the
 571	 * DPLL is enabled and the clocks are stable.
 572	 *
 573	 * So write it again.
 574	 */
 575	intel_de_write(i915, PCH_DPLL(id), pll->state.hw_state.dpll);
 576	intel_de_posting_read(i915, PCH_DPLL(id));
 577	udelay(200);
 578}
 579
 580static void ibx_pch_dpll_disable(struct drm_i915_private *i915,
 581				 struct intel_shared_dpll *pll)
 582{
 583	const enum intel_dpll_id id = pll->info->id;
 584
 585	intel_de_write(i915, PCH_DPLL(id), 0);
 586	intel_de_posting_read(i915, PCH_DPLL(id));
 587	udelay(200);
 588}
 589
 590static int ibx_compute_dpll(struct intel_atomic_state *state,
 591			    struct intel_crtc *crtc,
 592			    struct intel_encoder *encoder)
 593{
 594	return 0;
 595}
 596
 597static int ibx_get_dpll(struct intel_atomic_state *state,
 598			struct intel_crtc *crtc,
 599			struct intel_encoder *encoder)
 600{
 601	struct intel_crtc_state *crtc_state =
 602		intel_atomic_get_new_crtc_state(state, crtc);
 603	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
 604	struct intel_shared_dpll *pll;
 605	enum intel_dpll_id id;
 606
 607	if (HAS_PCH_IBX(i915)) {
 608		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
 609		id = (enum intel_dpll_id) crtc->pipe;
 610		pll = intel_get_shared_dpll_by_id(i915, id);
 611
 612		drm_dbg_kms(&i915->drm,
 613			    "[CRTC:%d:%s] using pre-allocated %s\n",
 614			    crtc->base.base.id, crtc->base.name,
 615			    pll->info->name);
 616	} else {
 617		pll = intel_find_shared_dpll(state, crtc,
 618					     &crtc_state->dpll_hw_state,
 619					     BIT(DPLL_ID_PCH_PLL_B) |
 620					     BIT(DPLL_ID_PCH_PLL_A));
 621	}
 622
 623	if (!pll)
 624		return -EINVAL;
 625
 626	/* reference the pll */
 627	intel_reference_shared_dpll(state, crtc,
 628				    pll, &crtc_state->dpll_hw_state);
 629
 630	crtc_state->shared_dpll = pll;
 631
 632	return 0;
 633}
 634
 635static void ibx_dump_hw_state(struct drm_i915_private *i915,
 636			      const struct intel_dpll_hw_state *hw_state)
 
 
 
 
 
 
 
 
 
 
 
 
 
 637{
 638	drm_dbg_kms(&i915->drm,
 639		    "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
 640		    "fp0: 0x%x, fp1: 0x%x\n",
 641		    hw_state->dpll,
 642		    hw_state->dpll_md,
 643		    hw_state->fp0,
 644		    hw_state->fp1);
 645}
 646
 647static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
 648	.enable = ibx_pch_dpll_enable,
 649	.disable = ibx_pch_dpll_disable,
 650	.get_hw_state = ibx_pch_dpll_get_hw_state,
 651};
 652
 653static const struct dpll_info pch_plls[] = {
 654	{ .name = "PCH DPLL A", .funcs = &ibx_pch_dpll_funcs, .id = DPLL_ID_PCH_PLL_A, },
 655	{ .name = "PCH DPLL B", .funcs = &ibx_pch_dpll_funcs, .id = DPLL_ID_PCH_PLL_B, },
 656	{}
 657};
 658
 659static const struct intel_dpll_mgr pch_pll_mgr = {
 660	.dpll_info = pch_plls,
 661	.compute_dplls = ibx_compute_dpll,
 662	.get_dplls = ibx_get_dpll,
 663	.put_dplls = intel_put_dpll,
 664	.dump_hw_state = ibx_dump_hw_state,
 
 665};
 666
 667static void hsw_ddi_wrpll_enable(struct drm_i915_private *i915,
 668				 struct intel_shared_dpll *pll)
 
 669{
 
 670	const enum intel_dpll_id id = pll->info->id;
 671
 672	intel_de_write(i915, WRPLL_CTL(id), pll->state.hw_state.wrpll);
 673	intel_de_posting_read(i915, WRPLL_CTL(id));
 674	udelay(20);
 675}
 676
 677static void hsw_ddi_spll_enable(struct drm_i915_private *i915,
 678				struct intel_shared_dpll *pll)
 
 679{
 680	intel_de_write(i915, SPLL_CTL, pll->state.hw_state.spll);
 
 
 681	intel_de_posting_read(i915, SPLL_CTL);
 682	udelay(20);
 683}
 684
 685static void hsw_ddi_wrpll_disable(struct drm_i915_private *i915,
 686				  struct intel_shared_dpll *pll)
 687{
 688	const enum intel_dpll_id id = pll->info->id;
 689
 690	intel_de_rmw(i915, WRPLL_CTL(id), WRPLL_PLL_ENABLE, 0);
 691	intel_de_posting_read(i915, WRPLL_CTL(id));
 692
 693	/*
 694	 * Try to set up the PCH reference clock once all DPLLs
 695	 * that depend on it have been shut down.
 696	 */
 697	if (i915->display.dpll.pch_ssc_use & BIT(id))
 698		intel_init_pch_refclk(i915);
 699}
 700
 701static void hsw_ddi_spll_disable(struct drm_i915_private *i915,
 702				 struct intel_shared_dpll *pll)
 703{
 704	enum intel_dpll_id id = pll->info->id;
 705
 706	intel_de_rmw(i915, SPLL_CTL, SPLL_PLL_ENABLE, 0);
 707	intel_de_posting_read(i915, SPLL_CTL);
 708
 709	/*
 710	 * Try to set up the PCH reference clock once all DPLLs
 711	 * that depend on it have been shut down.
 712	 */
 713	if (i915->display.dpll.pch_ssc_use & BIT(id))
 714		intel_init_pch_refclk(i915);
 715}
 716
 717static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *i915,
 718				       struct intel_shared_dpll *pll,
 719				       struct intel_dpll_hw_state *hw_state)
 720{
 
 721	const enum intel_dpll_id id = pll->info->id;
 722	intel_wakeref_t wakeref;
 723	u32 val;
 724
 725	wakeref = intel_display_power_get_if_enabled(i915,
 726						     POWER_DOMAIN_DISPLAY_CORE);
 727	if (!wakeref)
 728		return false;
 729
 730	val = intel_de_read(i915, WRPLL_CTL(id));
 731	hw_state->wrpll = val;
 732
 733	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
 734
 735	return val & WRPLL_PLL_ENABLE;
 736}
 737
 738static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *i915,
 739				      struct intel_shared_dpll *pll,
 740				      struct intel_dpll_hw_state *hw_state)
 741{
 
 742	intel_wakeref_t wakeref;
 743	u32 val;
 744
 745	wakeref = intel_display_power_get_if_enabled(i915,
 746						     POWER_DOMAIN_DISPLAY_CORE);
 747	if (!wakeref)
 748		return false;
 749
 750	val = intel_de_read(i915, SPLL_CTL);
 751	hw_state->spll = val;
 752
 753	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
 754
 755	return val & SPLL_PLL_ENABLE;
 756}
 757
 758#define LC_FREQ 2700
 759#define LC_FREQ_2K U64_C(LC_FREQ * 2000)
 760
 761#define P_MIN 2
 762#define P_MAX 64
 763#define P_INC 2
 764
 765/* Constraints for PLL good behavior */
 766#define REF_MIN 48
 767#define REF_MAX 400
 768#define VCO_MIN 2400
 769#define VCO_MAX 4800
 770
 771struct hsw_wrpll_rnp {
 772	unsigned p, n2, r2;
 773};
 774
 775static unsigned hsw_wrpll_get_budget_for_freq(int clock)
 776{
 777	switch (clock) {
 778	case 25175000:
 779	case 25200000:
 780	case 27000000:
 781	case 27027000:
 782	case 37762500:
 783	case 37800000:
 784	case 40500000:
 785	case 40541000:
 786	case 54000000:
 787	case 54054000:
 788	case 59341000:
 789	case 59400000:
 790	case 72000000:
 791	case 74176000:
 792	case 74250000:
 793	case 81000000:
 794	case 81081000:
 795	case 89012000:
 796	case 89100000:
 797	case 108000000:
 798	case 108108000:
 799	case 111264000:
 800	case 111375000:
 801	case 148352000:
 802	case 148500000:
 803	case 162000000:
 804	case 162162000:
 805	case 222525000:
 806	case 222750000:
 807	case 296703000:
 808	case 297000000:
 809		return 0;
 810	case 233500000:
 811	case 245250000:
 812	case 247750000:
 813	case 253250000:
 814	case 298000000:
 815		return 1500;
 816	case 169128000:
 817	case 169500000:
 818	case 179500000:
 819	case 202000000:
 820		return 2000;
 821	case 256250000:
 822	case 262500000:
 823	case 270000000:
 824	case 272500000:
 825	case 273750000:
 826	case 280750000:
 827	case 281250000:
 828	case 286000000:
 829	case 291750000:
 830		return 4000;
 831	case 267250000:
 832	case 268500000:
 833		return 5000;
 834	default:
 835		return 1000;
 836	}
 837}
 838
 839static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
 840				 unsigned int r2, unsigned int n2,
 841				 unsigned int p,
 842				 struct hsw_wrpll_rnp *best)
 843{
 844	u64 a, b, c, d, diff, diff_best;
 845
 846	/* No best (r,n,p) yet */
 847	if (best->p == 0) {
 848		best->p = p;
 849		best->n2 = n2;
 850		best->r2 = r2;
 851		return;
 852	}
 853
 854	/*
 855	 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
 856	 * freq2k.
 857	 *
 858	 * delta = 1e6 *
 859	 *	   abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
 860	 *	   freq2k;
 861	 *
 862	 * and we would like delta <= budget.
 863	 *
 864	 * If the discrepancy is above the PPM-based budget, always prefer to
 865	 * improve upon the previous solution.  However, if you're within the
 866	 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
 867	 */
 868	a = freq2k * budget * p * r2;
 869	b = freq2k * budget * best->p * best->r2;
 870	diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
 871	diff_best = abs_diff(freq2k * best->p * best->r2,
 872			     LC_FREQ_2K * best->n2);
 873	c = 1000000 * diff;
 874	d = 1000000 * diff_best;
 875
 876	if (a < c && b < d) {
 877		/* If both are above the budget, pick the closer */
 878		if (best->p * best->r2 * diff < p * r2 * diff_best) {
 879			best->p = p;
 880			best->n2 = n2;
 881			best->r2 = r2;
 882		}
 883	} else if (a >= c && b < d) {
 884		/* If A is below the threshold but B is above it?  Update. */
 885		best->p = p;
 886		best->n2 = n2;
 887		best->r2 = r2;
 888	} else if (a >= c && b >= d) {
 889		/* Both are below the limit, so pick the higher n2/(r2*r2) */
 890		if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
 891			best->p = p;
 892			best->n2 = n2;
 893			best->r2 = r2;
 894		}
 895	}
 896	/* Otherwise a < c && b >= d, do nothing */
 897}
 898
 899static void
 900hsw_ddi_calculate_wrpll(int clock /* in Hz */,
 901			unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
 902{
 903	u64 freq2k;
 904	unsigned p, n2, r2;
 905	struct hsw_wrpll_rnp best = {};
 906	unsigned budget;
 907
 908	freq2k = clock / 100;
 909
 910	budget = hsw_wrpll_get_budget_for_freq(clock);
 911
 912	/* Special case handling for 540 pixel clock: bypass WR PLL entirely
 913	 * and directly pass the LC PLL to it. */
 914	if (freq2k == 5400000) {
 915		*n2_out = 2;
 916		*p_out = 1;
 917		*r2_out = 2;
 918		return;
 919	}
 920
 921	/*
 922	 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
 923	 * the WR PLL.
 924	 *
 925	 * We want R so that REF_MIN <= Ref <= REF_MAX.
 926	 * Injecting R2 = 2 * R gives:
 927	 *   REF_MAX * r2 > LC_FREQ * 2 and
 928	 *   REF_MIN * r2 < LC_FREQ * 2
 929	 *
 930	 * Which means the desired boundaries for r2 are:
 931	 *  LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
 932	 *
 933	 */
 934	for (r2 = LC_FREQ * 2 / REF_MAX + 1;
 935	     r2 <= LC_FREQ * 2 / REF_MIN;
 936	     r2++) {
 937
 938		/*
 939		 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
 940		 *
 941		 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
 942		 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
 943		 *   VCO_MAX * r2 > n2 * LC_FREQ and
 944		 *   VCO_MIN * r2 < n2 * LC_FREQ)
 945		 *
 946		 * Which means the desired boundaries for n2 are:
 947		 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
 948		 */
 949		for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
 950		     n2 <= VCO_MAX * r2 / LC_FREQ;
 951		     n2++) {
 952
 953			for (p = P_MIN; p <= P_MAX; p += P_INC)
 954				hsw_wrpll_update_rnp(freq2k, budget,
 955						     r2, n2, p, &best);
 956		}
 957	}
 958
 959	*n2_out = best.n2;
 960	*p_out = best.p;
 961	*r2_out = best.r2;
 962}
 963
 964static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *i915,
 965				  const struct intel_shared_dpll *pll,
 966				  const struct intel_dpll_hw_state *pll_state)
 967{
 
 968	int refclk;
 969	int n, p, r;
 970	u32 wrpll = pll_state->wrpll;
 971
 972	switch (wrpll & WRPLL_REF_MASK) {
 973	case WRPLL_REF_SPECIAL_HSW:
 974		/* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
 975		if (IS_HASWELL(i915) && !IS_HASWELL_ULT(i915)) {
 976			refclk = i915->display.dpll.ref_clks.nssc;
 977			break;
 978		}
 979		fallthrough;
 980	case WRPLL_REF_PCH_SSC:
 981		/*
 982		 * We could calculate spread here, but our checking
 983		 * code only cares about 5% accuracy, and spread is a max of
 984		 * 0.5% downspread.
 985		 */
 986		refclk = i915->display.dpll.ref_clks.ssc;
 987		break;
 988	case WRPLL_REF_LCPLL:
 989		refclk = 2700000;
 990		break;
 991	default:
 992		MISSING_CASE(wrpll);
 993		return 0;
 994	}
 995
 996	r = wrpll & WRPLL_DIVIDER_REF_MASK;
 997	p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
 998	n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
 999
1000	/* Convert to KHz, p & r have a fixed point portion */
1001	return (refclk * n / 10) / (p * r) * 2;
1002}
1003
1004static int
1005hsw_ddi_wrpll_compute_dpll(struct intel_atomic_state *state,
1006			   struct intel_crtc *crtc)
1007{
1008	struct drm_i915_private *i915 = to_i915(state->base.dev);
1009	struct intel_crtc_state *crtc_state =
1010		intel_atomic_get_new_crtc_state(state, crtc);
 
1011	unsigned int p, n2, r2;
1012
1013	hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
1014
1015	crtc_state->dpll_hw_state.wrpll =
1016		WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
1017		WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
1018		WRPLL_DIVIDER_POST(p);
1019
1020	crtc_state->port_clock = hsw_ddi_wrpll_get_freq(i915, NULL,
1021							&crtc_state->dpll_hw_state);
1022
1023	return 0;
1024}
1025
1026static struct intel_shared_dpll *
1027hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
1028		       struct intel_crtc *crtc)
1029{
1030	struct intel_crtc_state *crtc_state =
1031		intel_atomic_get_new_crtc_state(state, crtc);
1032
1033	return intel_find_shared_dpll(state, crtc,
1034				      &crtc_state->dpll_hw_state,
1035				      BIT(DPLL_ID_WRPLL2) |
1036				      BIT(DPLL_ID_WRPLL1));
1037}
1038
1039static int
1040hsw_ddi_lcpll_compute_dpll(struct intel_crtc_state *crtc_state)
1041{
1042	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1043	int clock = crtc_state->port_clock;
1044
1045	switch (clock / 2) {
1046	case 81000:
1047	case 135000:
1048	case 270000:
1049		return 0;
1050	default:
1051		drm_dbg_kms(&i915->drm, "Invalid clock for DP: %d\n",
1052			    clock);
1053		return -EINVAL;
1054	}
1055}
1056
1057static struct intel_shared_dpll *
1058hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
1059{
1060	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1061	struct intel_shared_dpll *pll;
1062	enum intel_dpll_id pll_id;
1063	int clock = crtc_state->port_clock;
1064
1065	switch (clock / 2) {
1066	case 81000:
1067		pll_id = DPLL_ID_LCPLL_810;
1068		break;
1069	case 135000:
1070		pll_id = DPLL_ID_LCPLL_1350;
1071		break;
1072	case 270000:
1073		pll_id = DPLL_ID_LCPLL_2700;
1074		break;
1075	default:
1076		MISSING_CASE(clock / 2);
1077		return NULL;
1078	}
1079
1080	pll = intel_get_shared_dpll_by_id(i915, pll_id);
1081
1082	if (!pll)
1083		return NULL;
1084
1085	return pll;
1086}
1087
1088static int hsw_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1089				  const struct intel_shared_dpll *pll,
1090				  const struct intel_dpll_hw_state *pll_state)
1091{
1092	int link_clock = 0;
1093
1094	switch (pll->info->id) {
1095	case DPLL_ID_LCPLL_810:
1096		link_clock = 81000;
1097		break;
1098	case DPLL_ID_LCPLL_1350:
1099		link_clock = 135000;
1100		break;
1101	case DPLL_ID_LCPLL_2700:
1102		link_clock = 270000;
1103		break;
1104	default:
1105		drm_WARN(&i915->drm, 1, "bad port clock sel\n");
1106		break;
1107	}
1108
1109	return link_clock * 2;
1110}
1111
1112static int
1113hsw_ddi_spll_compute_dpll(struct intel_atomic_state *state,
1114			  struct intel_crtc *crtc)
1115{
1116	struct intel_crtc_state *crtc_state =
1117		intel_atomic_get_new_crtc_state(state, crtc);
 
1118
1119	if (drm_WARN_ON(crtc->base.dev, crtc_state->port_clock / 2 != 135000))
1120		return -EINVAL;
1121
1122	crtc_state->dpll_hw_state.spll =
1123		SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC;
1124
1125	return 0;
1126}
1127
1128static struct intel_shared_dpll *
1129hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
1130		      struct intel_crtc *crtc)
1131{
1132	struct intel_crtc_state *crtc_state =
1133		intel_atomic_get_new_crtc_state(state, crtc);
1134
1135	return intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state,
1136				      BIT(DPLL_ID_SPLL));
1137}
1138
1139static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915,
1140				 const struct intel_shared_dpll *pll,
1141				 const struct intel_dpll_hw_state *pll_state)
1142{
 
1143	int link_clock = 0;
1144
1145	switch (pll_state->spll & SPLL_FREQ_MASK) {
1146	case SPLL_FREQ_810MHz:
1147		link_clock = 81000;
1148		break;
1149	case SPLL_FREQ_1350MHz:
1150		link_clock = 135000;
1151		break;
1152	case SPLL_FREQ_2700MHz:
1153		link_clock = 270000;
1154		break;
1155	default:
1156		drm_WARN(&i915->drm, 1, "bad spll freq\n");
1157		break;
1158	}
1159
1160	return link_clock * 2;
1161}
1162
1163static int hsw_compute_dpll(struct intel_atomic_state *state,
1164			    struct intel_crtc *crtc,
1165			    struct intel_encoder *encoder)
1166{
1167	struct intel_crtc_state *crtc_state =
1168		intel_atomic_get_new_crtc_state(state, crtc);
1169
1170	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1171		return hsw_ddi_wrpll_compute_dpll(state, crtc);
1172	else if (intel_crtc_has_dp_encoder(crtc_state))
1173		return hsw_ddi_lcpll_compute_dpll(crtc_state);
1174	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1175		return hsw_ddi_spll_compute_dpll(state, crtc);
1176	else
1177		return -EINVAL;
1178}
1179
1180static int hsw_get_dpll(struct intel_atomic_state *state,
1181			struct intel_crtc *crtc,
1182			struct intel_encoder *encoder)
1183{
1184	struct intel_crtc_state *crtc_state =
1185		intel_atomic_get_new_crtc_state(state, crtc);
1186	struct intel_shared_dpll *pll = NULL;
1187
1188	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1189		pll = hsw_ddi_wrpll_get_dpll(state, crtc);
1190	else if (intel_crtc_has_dp_encoder(crtc_state))
1191		pll = hsw_ddi_lcpll_get_dpll(crtc_state);
1192	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1193		pll = hsw_ddi_spll_get_dpll(state, crtc);
1194
1195	if (!pll)
1196		return -EINVAL;
1197
1198	intel_reference_shared_dpll(state, crtc,
1199				    pll, &crtc_state->dpll_hw_state);
1200
1201	crtc_state->shared_dpll = pll;
1202
1203	return 0;
1204}
1205
1206static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915)
1207{
1208	i915->display.dpll.ref_clks.ssc = 135000;
1209	/* Non-SSC is only used on non-ULT HSW. */
1210	if (intel_de_read(i915, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
1211		i915->display.dpll.ref_clks.nssc = 24000;
1212	else
1213		i915->display.dpll.ref_clks.nssc = 135000;
1214}
1215
1216static void hsw_dump_hw_state(struct drm_i915_private *i915,
1217			      const struct intel_dpll_hw_state *hw_state)
 
 
 
 
 
 
 
 
 
1218{
1219	drm_dbg_kms(&i915->drm, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
1220		    hw_state->wrpll, hw_state->spll);
 
 
 
1221}
1222
1223static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
1224	.enable = hsw_ddi_wrpll_enable,
1225	.disable = hsw_ddi_wrpll_disable,
1226	.get_hw_state = hsw_ddi_wrpll_get_hw_state,
1227	.get_freq = hsw_ddi_wrpll_get_freq,
1228};
1229
1230static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
1231	.enable = hsw_ddi_spll_enable,
1232	.disable = hsw_ddi_spll_disable,
1233	.get_hw_state = hsw_ddi_spll_get_hw_state,
1234	.get_freq = hsw_ddi_spll_get_freq,
1235};
1236
1237static void hsw_ddi_lcpll_enable(struct drm_i915_private *i915,
1238				 struct intel_shared_dpll *pll)
 
1239{
1240}
1241
1242static void hsw_ddi_lcpll_disable(struct drm_i915_private *i915,
1243				  struct intel_shared_dpll *pll)
1244{
1245}
1246
1247static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *i915,
1248				       struct intel_shared_dpll *pll,
1249				       struct intel_dpll_hw_state *hw_state)
1250{
1251	return true;
1252}
1253
1254static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
1255	.enable = hsw_ddi_lcpll_enable,
1256	.disable = hsw_ddi_lcpll_disable,
1257	.get_hw_state = hsw_ddi_lcpll_get_hw_state,
1258	.get_freq = hsw_ddi_lcpll_get_freq,
1259};
1260
1261static const struct dpll_info hsw_plls[] = {
1262	{ .name = "WRPLL 1", .funcs = &hsw_ddi_wrpll_funcs, .id = DPLL_ID_WRPLL1, },
1263	{ .name = "WRPLL 2", .funcs = &hsw_ddi_wrpll_funcs, .id = DPLL_ID_WRPLL2, },
1264	{ .name = "SPLL", .funcs = &hsw_ddi_spll_funcs, .id = DPLL_ID_SPLL, },
1265	{ .name = "LCPLL 810", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_810,
1266	  .flags = INTEL_DPLL_ALWAYS_ON, },
1267	{ .name = "LCPLL 1350", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_1350,
1268	  .flags = INTEL_DPLL_ALWAYS_ON, },
1269	{ .name = "LCPLL 2700", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_2700,
1270	  .flags = INTEL_DPLL_ALWAYS_ON, },
1271	{}
1272};
1273
1274static const struct intel_dpll_mgr hsw_pll_mgr = {
1275	.dpll_info = hsw_plls,
1276	.compute_dplls = hsw_compute_dpll,
1277	.get_dplls = hsw_get_dpll,
1278	.put_dplls = intel_put_dpll,
1279	.update_ref_clks = hsw_update_dpll_ref_clks,
1280	.dump_hw_state = hsw_dump_hw_state,
 
1281};
1282
1283struct skl_dpll_regs {
1284	i915_reg_t ctl, cfgcr1, cfgcr2;
1285};
1286
1287/* this array is indexed by the *shared* pll id */
1288static const struct skl_dpll_regs skl_dpll_regs[4] = {
1289	{
1290		/* DPLL 0 */
1291		.ctl = LCPLL1_CTL,
1292		/* DPLL 0 doesn't support HDMI mode */
1293	},
1294	{
1295		/* DPLL 1 */
1296		.ctl = LCPLL2_CTL,
1297		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
1298		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
1299	},
1300	{
1301		/* DPLL 2 */
1302		.ctl = WRPLL_CTL(0),
1303		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
1304		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
1305	},
1306	{
1307		/* DPLL 3 */
1308		.ctl = WRPLL_CTL(1),
1309		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
1310		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
1311	},
1312};
1313
1314static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *i915,
1315				    struct intel_shared_dpll *pll)
 
1316{
1317	const enum intel_dpll_id id = pll->info->id;
1318
1319	intel_de_rmw(i915, DPLL_CTRL1,
1320		     DPLL_CTRL1_HDMI_MODE(id) | DPLL_CTRL1_SSC(id) | DPLL_CTRL1_LINK_RATE_MASK(id),
1321		     pll->state.hw_state.ctrl1 << (id * 6));
 
 
1322	intel_de_posting_read(i915, DPLL_CTRL1);
1323}
1324
1325static void skl_ddi_pll_enable(struct drm_i915_private *i915,
1326			       struct intel_shared_dpll *pll)
 
1327{
 
1328	const struct skl_dpll_regs *regs = skl_dpll_regs;
1329	const enum intel_dpll_id id = pll->info->id;
1330
1331	skl_ddi_pll_write_ctrl1(i915, pll);
1332
1333	intel_de_write(i915, regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
1334	intel_de_write(i915, regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
1335	intel_de_posting_read(i915, regs[id].cfgcr1);
1336	intel_de_posting_read(i915, regs[id].cfgcr2);
1337
1338	/* the enable bit is always bit 31 */
1339	intel_de_rmw(i915, regs[id].ctl, 0, LCPLL_PLL_ENABLE);
1340
1341	if (intel_de_wait_for_set(i915, DPLL_STATUS, DPLL_LOCK(id), 5))
1342		drm_err(&i915->drm, "DPLL %d not locked\n", id);
1343}
1344
1345static void skl_ddi_dpll0_enable(struct drm_i915_private *i915,
1346				 struct intel_shared_dpll *pll)
 
1347{
1348	skl_ddi_pll_write_ctrl1(i915, pll);
 
 
1349}
1350
1351static void skl_ddi_pll_disable(struct drm_i915_private *i915,
1352				struct intel_shared_dpll *pll)
1353{
1354	const struct skl_dpll_regs *regs = skl_dpll_regs;
1355	const enum intel_dpll_id id = pll->info->id;
1356
1357	/* the enable bit is always bit 31 */
1358	intel_de_rmw(i915, regs[id].ctl, LCPLL_PLL_ENABLE, 0);
1359	intel_de_posting_read(i915, regs[id].ctl);
1360}
1361
1362static void skl_ddi_dpll0_disable(struct drm_i915_private *i915,
1363				  struct intel_shared_dpll *pll)
1364{
1365}
1366
1367static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *i915,
1368				     struct intel_shared_dpll *pll,
1369				     struct intel_dpll_hw_state *hw_state)
1370{
1371	u32 val;
1372	const struct skl_dpll_regs *regs = skl_dpll_regs;
1373	const enum intel_dpll_id id = pll->info->id;
1374	intel_wakeref_t wakeref;
1375	bool ret;
 
1376
1377	wakeref = intel_display_power_get_if_enabled(i915,
1378						     POWER_DOMAIN_DISPLAY_CORE);
1379	if (!wakeref)
1380		return false;
1381
1382	ret = false;
1383
1384	val = intel_de_read(i915, regs[id].ctl);
1385	if (!(val & LCPLL_PLL_ENABLE))
1386		goto out;
1387
1388	val = intel_de_read(i915, DPLL_CTRL1);
1389	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1390
1391	/* avoid reading back stale values if HDMI mode is not enabled */
1392	if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1393		hw_state->cfgcr1 = intel_de_read(i915, regs[id].cfgcr1);
1394		hw_state->cfgcr2 = intel_de_read(i915, regs[id].cfgcr2);
1395	}
1396	ret = true;
1397
1398out:
1399	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1400
1401	return ret;
1402}
1403
1404static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *i915,
1405				       struct intel_shared_dpll *pll,
1406				       struct intel_dpll_hw_state *hw_state)
1407{
 
1408	const struct skl_dpll_regs *regs = skl_dpll_regs;
1409	const enum intel_dpll_id id = pll->info->id;
1410	intel_wakeref_t wakeref;
1411	u32 val;
1412	bool ret;
1413
1414	wakeref = intel_display_power_get_if_enabled(i915,
1415						     POWER_DOMAIN_DISPLAY_CORE);
1416	if (!wakeref)
1417		return false;
1418
1419	ret = false;
1420
1421	/* DPLL0 is always enabled since it drives CDCLK */
1422	val = intel_de_read(i915, regs[id].ctl);
1423	if (drm_WARN_ON(&i915->drm, !(val & LCPLL_PLL_ENABLE)))
1424		goto out;
1425
1426	val = intel_de_read(i915, DPLL_CTRL1);
1427	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1428
1429	ret = true;
1430
1431out:
1432	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1433
1434	return ret;
1435}
1436
1437struct skl_wrpll_context {
1438	u64 min_deviation;		/* current minimal deviation */
1439	u64 central_freq;		/* chosen central freq */
1440	u64 dco_freq;			/* chosen dco freq */
1441	unsigned int p;			/* chosen divider */
1442};
1443
1444/* DCO freq must be within +1%/-6%  of the DCO central freq */
1445#define SKL_DCO_MAX_PDEVIATION	100
1446#define SKL_DCO_MAX_NDEVIATION	600
1447
1448static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1449				  u64 central_freq,
1450				  u64 dco_freq,
1451				  unsigned int divider)
1452{
1453	u64 deviation;
1454
1455	deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1456			      central_freq);
1457
1458	/* positive deviation */
1459	if (dco_freq >= central_freq) {
1460		if (deviation < SKL_DCO_MAX_PDEVIATION &&
1461		    deviation < ctx->min_deviation) {
1462			ctx->min_deviation = deviation;
1463			ctx->central_freq = central_freq;
1464			ctx->dco_freq = dco_freq;
1465			ctx->p = divider;
1466		}
1467	/* negative deviation */
1468	} else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1469		   deviation < ctx->min_deviation) {
1470		ctx->min_deviation = deviation;
1471		ctx->central_freq = central_freq;
1472		ctx->dco_freq = dco_freq;
1473		ctx->p = divider;
1474	}
1475}
1476
1477static void skl_wrpll_get_multipliers(unsigned int p,
1478				      unsigned int *p0 /* out */,
1479				      unsigned int *p1 /* out */,
1480				      unsigned int *p2 /* out */)
1481{
1482	/* even dividers */
1483	if (p % 2 == 0) {
1484		unsigned int half = p / 2;
1485
1486		if (half == 1 || half == 2 || half == 3 || half == 5) {
1487			*p0 = 2;
1488			*p1 = 1;
1489			*p2 = half;
1490		} else if (half % 2 == 0) {
1491			*p0 = 2;
1492			*p1 = half / 2;
1493			*p2 = 2;
1494		} else if (half % 3 == 0) {
1495			*p0 = 3;
1496			*p1 = half / 3;
1497			*p2 = 2;
1498		} else if (half % 7 == 0) {
1499			*p0 = 7;
1500			*p1 = half / 7;
1501			*p2 = 2;
1502		}
1503	} else if (p == 3 || p == 9) {  /* 3, 5, 7, 9, 15, 21, 35 */
1504		*p0 = 3;
1505		*p1 = 1;
1506		*p2 = p / 3;
1507	} else if (p == 5 || p == 7) {
1508		*p0 = p;
1509		*p1 = 1;
1510		*p2 = 1;
1511	} else if (p == 15) {
1512		*p0 = 3;
1513		*p1 = 1;
1514		*p2 = 5;
1515	} else if (p == 21) {
1516		*p0 = 7;
1517		*p1 = 1;
1518		*p2 = 3;
1519	} else if (p == 35) {
1520		*p0 = 7;
1521		*p1 = 1;
1522		*p2 = 5;
1523	}
1524}
1525
1526struct skl_wrpll_params {
1527	u32 dco_fraction;
1528	u32 dco_integer;
1529	u32 qdiv_ratio;
1530	u32 qdiv_mode;
1531	u32 kdiv;
1532	u32 pdiv;
1533	u32 central_freq;
1534};
1535
1536static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1537				      u64 afe_clock,
1538				      int ref_clock,
1539				      u64 central_freq,
1540				      u32 p0, u32 p1, u32 p2)
1541{
1542	u64 dco_freq;
1543
1544	switch (central_freq) {
1545	case 9600000000ULL:
1546		params->central_freq = 0;
1547		break;
1548	case 9000000000ULL:
1549		params->central_freq = 1;
1550		break;
1551	case 8400000000ULL:
1552		params->central_freq = 3;
1553	}
1554
1555	switch (p0) {
1556	case 1:
1557		params->pdiv = 0;
1558		break;
1559	case 2:
1560		params->pdiv = 1;
1561		break;
1562	case 3:
1563		params->pdiv = 2;
1564		break;
1565	case 7:
1566		params->pdiv = 4;
1567		break;
1568	default:
1569		WARN(1, "Incorrect PDiv\n");
1570	}
1571
1572	switch (p2) {
1573	case 5:
1574		params->kdiv = 0;
1575		break;
1576	case 2:
1577		params->kdiv = 1;
1578		break;
1579	case 3:
1580		params->kdiv = 2;
1581		break;
1582	case 1:
1583		params->kdiv = 3;
1584		break;
1585	default:
1586		WARN(1, "Incorrect KDiv\n");
1587	}
1588
1589	params->qdiv_ratio = p1;
1590	params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1591
1592	dco_freq = p0 * p1 * p2 * afe_clock;
1593
1594	/*
1595	 * Intermediate values are in Hz.
1596	 * Divide by MHz to match bsepc
1597	 */
1598	params->dco_integer = div_u64(dco_freq, ref_clock * KHz(1));
1599	params->dco_fraction =
1600		div_u64((div_u64(dco_freq, ref_clock / KHz(1)) -
1601			 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1602}
1603
1604static int
1605skl_ddi_calculate_wrpll(int clock /* in Hz */,
1606			int ref_clock,
1607			struct skl_wrpll_params *wrpll_params)
1608{
1609	static const u64 dco_central_freq[3] = { 8400000000ULL,
1610						 9000000000ULL,
1611						 9600000000ULL };
1612	static const u8 even_dividers[] = {  4,  6,  8, 10, 12, 14, 16, 18, 20,
1613					    24, 28, 30, 32, 36, 40, 42, 44,
1614					    48, 52, 54, 56, 60, 64, 66, 68,
1615					    70, 72, 76, 78, 80, 84, 88, 90,
1616					    92, 96, 98 };
1617	static const u8 odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1618	static const struct {
1619		const u8 *list;
1620		int n_dividers;
1621	} dividers[] = {
1622		{ even_dividers, ARRAY_SIZE(even_dividers) },
1623		{ odd_dividers, ARRAY_SIZE(odd_dividers) },
1624	};
1625	struct skl_wrpll_context ctx = {
1626		.min_deviation = U64_MAX,
1627	};
1628	unsigned int dco, d, i;
1629	unsigned int p0, p1, p2;
1630	u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
1631
1632	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1633		for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1634			for (i = 0; i < dividers[d].n_dividers; i++) {
1635				unsigned int p = dividers[d].list[i];
1636				u64 dco_freq = p * afe_clock;
1637
1638				skl_wrpll_try_divider(&ctx,
1639						      dco_central_freq[dco],
1640						      dco_freq,
1641						      p);
1642				/*
1643				 * Skip the remaining dividers if we're sure to
1644				 * have found the definitive divider, we can't
1645				 * improve a 0 deviation.
1646				 */
1647				if (ctx.min_deviation == 0)
1648					goto skip_remaining_dividers;
1649			}
1650		}
1651
1652skip_remaining_dividers:
1653		/*
1654		 * If a solution is found with an even divider, prefer
1655		 * this one.
1656		 */
1657		if (d == 0 && ctx.p)
1658			break;
1659	}
1660
1661	if (!ctx.p)
1662		return -EINVAL;
1663
1664	/*
1665	 * gcc incorrectly analyses that these can be used without being
1666	 * initialized. To be fair, it's hard to guess.
1667	 */
1668	p0 = p1 = p2 = 0;
1669	skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1670	skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock,
1671				  ctx.central_freq, p0, p1, p2);
1672
1673	return 0;
1674}
1675
1676static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
1677				  const struct intel_shared_dpll *pll,
1678				  const struct intel_dpll_hw_state *pll_state)
1679{
 
1680	int ref_clock = i915->display.dpll.ref_clks.nssc;
1681	u32 p0, p1, p2, dco_freq;
1682
1683	p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
1684	p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
1685
1686	if (pll_state->cfgcr2 &  DPLL_CFGCR2_QDIV_MODE(1))
1687		p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
1688	else
1689		p1 = 1;
1690
1691
1692	switch (p0) {
1693	case DPLL_CFGCR2_PDIV_1:
1694		p0 = 1;
1695		break;
1696	case DPLL_CFGCR2_PDIV_2:
1697		p0 = 2;
1698		break;
1699	case DPLL_CFGCR2_PDIV_3:
1700		p0 = 3;
1701		break;
1702	case DPLL_CFGCR2_PDIV_7_INVALID:
1703		/*
1704		 * Incorrect ASUS-Z170M BIOS setting, the HW seems to ignore bit#0,
1705		 * handling it the same way as PDIV_7.
1706		 */
1707		drm_dbg_kms(&i915->drm, "Invalid WRPLL PDIV divider value, fixing it.\n");
1708		fallthrough;
1709	case DPLL_CFGCR2_PDIV_7:
1710		p0 = 7;
1711		break;
1712	default:
1713		MISSING_CASE(p0);
1714		return 0;
1715	}
1716
1717	switch (p2) {
1718	case DPLL_CFGCR2_KDIV_5:
1719		p2 = 5;
1720		break;
1721	case DPLL_CFGCR2_KDIV_2:
1722		p2 = 2;
1723		break;
1724	case DPLL_CFGCR2_KDIV_3:
1725		p2 = 3;
1726		break;
1727	case DPLL_CFGCR2_KDIV_1:
1728		p2 = 1;
1729		break;
1730	default:
1731		MISSING_CASE(p2);
1732		return 0;
1733	}
1734
1735	dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
1736		   ref_clock;
1737
1738	dco_freq += ((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
1739		    ref_clock / 0x8000;
1740
1741	if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
1742		return 0;
1743
1744	return dco_freq / (p0 * p1 * p2 * 5);
1745}
1746
1747static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
1748{
1749	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
 
1750	struct skl_wrpll_params wrpll_params = {};
1751	u32 ctrl1, cfgcr1, cfgcr2;
1752	int ret;
1753
 
 
 
 
 
1754	/*
1755	 * See comment in intel_dpll_hw_state to understand why we always use 0
1756	 * as the DPLL id in this function.
1757	 */
1758	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
 
 
1759
1760	ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
1761
1762	ret = skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
1763				      i915->display.dpll.ref_clks.nssc, &wrpll_params);
1764	if (ret)
1765		return ret;
1766
1767	cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
1768		DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1769		wrpll_params.dco_integer;
1770
1771	cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
 
1772		DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1773		DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1774		DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1775		wrpll_params.central_freq;
1776
1777	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1778	crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
1779	crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
1780
1781	crtc_state->port_clock = skl_ddi_wrpll_get_freq(i915, NULL,
1782							&crtc_state->dpll_hw_state);
1783
1784	return 0;
1785}
1786
1787static int
1788skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1789{
 
1790	u32 ctrl1;
1791
1792	/*
1793	 * See comment in intel_dpll_hw_state to understand why we always use 0
1794	 * as the DPLL id in this function.
1795	 */
1796	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1797	switch (crtc_state->port_clock / 2) {
1798	case 81000:
1799		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1800		break;
1801	case 135000:
1802		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1803		break;
1804	case 270000:
1805		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1806		break;
1807		/* eDP 1.4 rates */
1808	case 162000:
1809		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1810		break;
1811	case 108000:
1812		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1813		break;
1814	case 216000:
1815		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1816		break;
1817	}
1818
1819	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1820
1821	return 0;
1822}
1823
1824static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1825				  const struct intel_shared_dpll *pll,
1826				  const struct intel_dpll_hw_state *pll_state)
1827{
 
1828	int link_clock = 0;
1829
1830	switch ((pll_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0)) >>
1831		DPLL_CTRL1_LINK_RATE_SHIFT(0)) {
1832	case DPLL_CTRL1_LINK_RATE_810:
1833		link_clock = 81000;
1834		break;
1835	case DPLL_CTRL1_LINK_RATE_1080:
1836		link_clock = 108000;
1837		break;
1838	case DPLL_CTRL1_LINK_RATE_1350:
1839		link_clock = 135000;
1840		break;
1841	case DPLL_CTRL1_LINK_RATE_1620:
1842		link_clock = 162000;
1843		break;
1844	case DPLL_CTRL1_LINK_RATE_2160:
1845		link_clock = 216000;
1846		break;
1847	case DPLL_CTRL1_LINK_RATE_2700:
1848		link_clock = 270000;
1849		break;
1850	default:
1851		drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
1852		break;
1853	}
1854
1855	return link_clock * 2;
1856}
1857
1858static int skl_compute_dpll(struct intel_atomic_state *state,
1859			    struct intel_crtc *crtc,
1860			    struct intel_encoder *encoder)
1861{
1862	struct intel_crtc_state *crtc_state =
1863		intel_atomic_get_new_crtc_state(state, crtc);
1864
1865	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1866		return skl_ddi_hdmi_pll_dividers(crtc_state);
1867	else if (intel_crtc_has_dp_encoder(crtc_state))
1868		return skl_ddi_dp_set_dpll_hw_state(crtc_state);
1869	else
1870		return -EINVAL;
1871}
1872
1873static int skl_get_dpll(struct intel_atomic_state *state,
1874			struct intel_crtc *crtc,
1875			struct intel_encoder *encoder)
1876{
1877	struct intel_crtc_state *crtc_state =
1878		intel_atomic_get_new_crtc_state(state, crtc);
1879	struct intel_shared_dpll *pll;
1880
1881	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1882		pll = intel_find_shared_dpll(state, crtc,
1883					     &crtc_state->dpll_hw_state,
1884					     BIT(DPLL_ID_SKL_DPLL0));
1885	else
1886		pll = intel_find_shared_dpll(state, crtc,
1887					     &crtc_state->dpll_hw_state,
1888					     BIT(DPLL_ID_SKL_DPLL3) |
1889					     BIT(DPLL_ID_SKL_DPLL2) |
1890					     BIT(DPLL_ID_SKL_DPLL1));
1891	if (!pll)
1892		return -EINVAL;
1893
1894	intel_reference_shared_dpll(state, crtc,
1895				    pll, &crtc_state->dpll_hw_state);
1896
1897	crtc_state->shared_dpll = pll;
1898
1899	return 0;
1900}
1901
1902static int skl_ddi_pll_get_freq(struct drm_i915_private *i915,
1903				const struct intel_shared_dpll *pll,
1904				const struct intel_dpll_hw_state *pll_state)
1905{
 
 
1906	/*
1907	 * ctrl1 register is already shifted for each pll, just use 0 to get
1908	 * the internal shift for each field
1909	 */
1910	if (pll_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
1911		return skl_ddi_wrpll_get_freq(i915, pll, pll_state);
1912	else
1913		return skl_ddi_lcpll_get_freq(i915, pll, pll_state);
1914}
1915
1916static void skl_update_dpll_ref_clks(struct drm_i915_private *i915)
1917{
1918	/* No SSC ref */
1919	i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
1920}
1921
1922static void skl_dump_hw_state(struct drm_i915_private *i915,
1923			      const struct intel_dpll_hw_state *hw_state)
1924{
1925	drm_dbg_kms(&i915->drm, "dpll_hw_state: "
1926		      "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1927		      hw_state->ctrl1,
1928		      hw_state->cfgcr1,
1929		      hw_state->cfgcr2);
 
 
 
 
 
 
 
 
 
 
1930}
1931
1932static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
1933	.enable = skl_ddi_pll_enable,
1934	.disable = skl_ddi_pll_disable,
1935	.get_hw_state = skl_ddi_pll_get_hw_state,
1936	.get_freq = skl_ddi_pll_get_freq,
1937};
1938
1939static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
1940	.enable = skl_ddi_dpll0_enable,
1941	.disable = skl_ddi_dpll0_disable,
1942	.get_hw_state = skl_ddi_dpll0_get_hw_state,
1943	.get_freq = skl_ddi_pll_get_freq,
1944};
1945
1946static const struct dpll_info skl_plls[] = {
1947	{ .name = "DPLL 0", .funcs = &skl_ddi_dpll0_funcs, .id = DPLL_ID_SKL_DPLL0,
1948	  .flags = INTEL_DPLL_ALWAYS_ON, },
1949	{ .name = "DPLL 1", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL1, },
1950	{ .name = "DPLL 2", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL2, },
1951	{ .name = "DPLL 3", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL3, },
1952	{}
1953};
1954
1955static const struct intel_dpll_mgr skl_pll_mgr = {
1956	.dpll_info = skl_plls,
1957	.compute_dplls = skl_compute_dpll,
1958	.get_dplls = skl_get_dpll,
1959	.put_dplls = intel_put_dpll,
1960	.update_ref_clks = skl_update_dpll_ref_clks,
1961	.dump_hw_state = skl_dump_hw_state,
 
1962};
1963
1964static void bxt_ddi_pll_enable(struct drm_i915_private *i915,
1965			       struct intel_shared_dpll *pll)
 
1966{
1967	u32 temp;
 
1968	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1969	enum dpio_phy phy;
1970	enum dpio_channel ch;
 
1971
1972	bxt_port_to_phy_channel(i915, port, &phy, &ch);
1973
1974	/* Non-SSC reference */
1975	intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_REF_SEL);
1976
1977	if (IS_GEMINILAKE(i915)) {
1978		intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port),
1979			     0, PORT_PLL_POWER_ENABLE);
1980
1981		if (wait_for_us((intel_de_read(i915, BXT_PORT_PLL_ENABLE(port)) &
1982				 PORT_PLL_POWER_STATE), 200))
1983			drm_err(&i915->drm,
1984				"Power state not set for PLL:%d\n", port);
1985	}
1986
1987	/* Disable 10 bit clock */
1988	intel_de_rmw(i915, BXT_PORT_PLL_EBB_4(phy, ch),
1989		     PORT_PLL_10BIT_CLK_ENABLE, 0);
1990
1991	/* Write P1 & P2 */
1992	intel_de_rmw(i915, BXT_PORT_PLL_EBB_0(phy, ch),
1993		     PORT_PLL_P1_MASK | PORT_PLL_P2_MASK, pll->state.hw_state.ebb0);
1994
1995	/* Write M2 integer */
1996	intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 0),
1997		     PORT_PLL_M2_INT_MASK, pll->state.hw_state.pll0);
1998
1999	/* Write N */
2000	intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 1),
2001		     PORT_PLL_N_MASK, pll->state.hw_state.pll1);
2002
2003	/* Write M2 fraction */
2004	intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 2),
2005		     PORT_PLL_M2_FRAC_MASK, pll->state.hw_state.pll2);
2006
2007	/* Write M2 fraction enable */
2008	intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 3),
2009		     PORT_PLL_M2_FRAC_ENABLE, pll->state.hw_state.pll3);
2010
2011	/* Write coeff */
2012	temp = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 6));
2013	temp &= ~PORT_PLL_PROP_COEFF_MASK;
2014	temp &= ~PORT_PLL_INT_COEFF_MASK;
2015	temp &= ~PORT_PLL_GAIN_CTL_MASK;
2016	temp |= pll->state.hw_state.pll6;
2017	intel_de_write(i915, BXT_PORT_PLL(phy, ch, 6), temp);
2018
2019	/* Write calibration val */
2020	intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 8),
2021		     PORT_PLL_TARGET_CNT_MASK, pll->state.hw_state.pll8);
2022
2023	intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 9),
2024		     PORT_PLL_LOCK_THRESHOLD_MASK, pll->state.hw_state.pll9);
2025
2026	temp = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 10));
2027	temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
2028	temp &= ~PORT_PLL_DCO_AMP_MASK;
2029	temp |= pll->state.hw_state.pll10;
2030	intel_de_write(i915, BXT_PORT_PLL(phy, ch, 10), temp);
2031
2032	/* Recalibrate with new settings */
2033	temp = intel_de_read(i915, BXT_PORT_PLL_EBB_4(phy, ch));
2034	temp |= PORT_PLL_RECALIBRATE;
2035	intel_de_write(i915, BXT_PORT_PLL_EBB_4(phy, ch), temp);
2036	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
2037	temp |= pll->state.hw_state.ebb4;
2038	intel_de_write(i915, BXT_PORT_PLL_EBB_4(phy, ch), temp);
2039
2040	/* Enable PLL */
2041	intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_ENABLE);
2042	intel_de_posting_read(i915, BXT_PORT_PLL_ENABLE(port));
2043
2044	if (wait_for_us((intel_de_read(i915, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
2045			200))
2046		drm_err(&i915->drm, "PLL %d not locked\n", port);
2047
2048	if (IS_GEMINILAKE(i915)) {
2049		temp = intel_de_read(i915, BXT_PORT_TX_DW5_LN0(phy, ch));
2050		temp |= DCC_DELAY_RANGE_2;
2051		intel_de_write(i915, BXT_PORT_TX_DW5_GRP(phy, ch), temp);
2052	}
2053
2054	/*
2055	 * While we write to the group register to program all lanes at once we
2056	 * can read only lane registers and we pick lanes 0/1 for that.
2057	 */
2058	temp = intel_de_read(i915, BXT_PORT_PCS_DW12_LN01(phy, ch));
2059	temp &= ~LANE_STAGGER_MASK;
2060	temp &= ~LANESTAGGER_STRAP_OVRD;
2061	temp |= pll->state.hw_state.pcsdw12;
2062	intel_de_write(i915, BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
2063}
2064
2065static void bxt_ddi_pll_disable(struct drm_i915_private *i915,
2066				struct intel_shared_dpll *pll)
2067{
2068	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2069
2070	intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port), PORT_PLL_ENABLE, 0);
2071	intel_de_posting_read(i915, BXT_PORT_PLL_ENABLE(port));
2072
2073	if (IS_GEMINILAKE(i915)) {
2074		intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port),
2075			     PORT_PLL_POWER_ENABLE, 0);
2076
2077		if (wait_for_us(!(intel_de_read(i915, BXT_PORT_PLL_ENABLE(port)) &
2078				  PORT_PLL_POWER_STATE), 200))
2079			drm_err(&i915->drm,
2080				"Power state not reset for PLL:%d\n", port);
2081	}
2082}
2083
2084static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *i915,
2085				     struct intel_shared_dpll *pll,
2086				     struct intel_dpll_hw_state *hw_state)
2087{
 
 
2088	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2089	intel_wakeref_t wakeref;
2090	enum dpio_phy phy;
2091	enum dpio_channel ch;
2092	u32 val;
2093	bool ret;
2094
2095	bxt_port_to_phy_channel(i915, port, &phy, &ch);
2096
2097	wakeref = intel_display_power_get_if_enabled(i915,
2098						     POWER_DOMAIN_DISPLAY_CORE);
2099	if (!wakeref)
2100		return false;
2101
2102	ret = false;
2103
2104	val = intel_de_read(i915, BXT_PORT_PLL_ENABLE(port));
2105	if (!(val & PORT_PLL_ENABLE))
2106		goto out;
2107
2108	hw_state->ebb0 = intel_de_read(i915, BXT_PORT_PLL_EBB_0(phy, ch));
2109	hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
2110
2111	hw_state->ebb4 = intel_de_read(i915, BXT_PORT_PLL_EBB_4(phy, ch));
2112	hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
2113
2114	hw_state->pll0 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 0));
2115	hw_state->pll0 &= PORT_PLL_M2_INT_MASK;
2116
2117	hw_state->pll1 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 1));
2118	hw_state->pll1 &= PORT_PLL_N_MASK;
2119
2120	hw_state->pll2 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 2));
2121	hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
2122
2123	hw_state->pll3 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 3));
2124	hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
2125
2126	hw_state->pll6 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 6));
2127	hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
2128			  PORT_PLL_INT_COEFF_MASK |
2129			  PORT_PLL_GAIN_CTL_MASK;
2130
2131	hw_state->pll8 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 8));
2132	hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
2133
2134	hw_state->pll9 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 9));
2135	hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
2136
2137	hw_state->pll10 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 10));
2138	hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
2139			   PORT_PLL_DCO_AMP_MASK;
2140
2141	/*
2142	 * While we write to the group register to program all lanes at once we
2143	 * can read only lane registers. We configure all lanes the same way, so
2144	 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
2145	 */
2146	hw_state->pcsdw12 = intel_de_read(i915,
2147					  BXT_PORT_PCS_DW12_LN01(phy, ch));
2148	if (intel_de_read(i915, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
2149		drm_dbg(&i915->drm,
2150			"lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
2151			hw_state->pcsdw12,
2152			intel_de_read(i915,
2153				      BXT_PORT_PCS_DW12_LN23(phy, ch)));
2154	hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
2155
2156	ret = true;
2157
2158out:
2159	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2160
2161	return ret;
2162}
2163
2164/* pre-calculated values for DP linkrates */
2165static const struct dpll bxt_dp_clk_val[] = {
2166	/* m2 is .22 binary fixed point */
2167	{ .dot = 162000, .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2168	{ .dot = 270000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2169	{ .dot = 540000, .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2170	{ .dot = 216000, .p1 = 3, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2171	{ .dot = 243000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6133333 /* 24.3 */ },
2172	{ .dot = 324000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2173	{ .dot = 432000, .p1 = 3, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2174};
2175
2176static int
2177bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
2178			  struct dpll *clk_div)
2179{
2180	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2181
2182	/* Calculate HDMI div */
2183	/*
2184	 * FIXME: tie the following calculation into
2185	 * i9xx_crtc_compute_clock
2186	 */
2187	if (!bxt_find_best_dpll(crtc_state, clk_div))
2188		return -EINVAL;
2189
2190	drm_WARN_ON(&i915->drm, clk_div->m1 != 2);
2191
2192	return 0;
2193}
2194
2195static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
2196				    struct dpll *clk_div)
2197{
2198	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2199	int i;
2200
2201	*clk_div = bxt_dp_clk_val[0];
2202	for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
2203		if (crtc_state->port_clock == bxt_dp_clk_val[i].dot) {
2204			*clk_div = bxt_dp_clk_val[i];
2205			break;
2206		}
2207	}
2208
2209	chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, clk_div);
2210
2211	drm_WARN_ON(&i915->drm, clk_div->vco == 0 ||
2212		    clk_div->dot != crtc_state->port_clock);
2213}
2214
2215static int bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
2216				     const struct dpll *clk_div)
2217{
2218	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2219	struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
2220	int clock = crtc_state->port_clock;
2221	int vco = clk_div->vco;
2222	u32 prop_coef, int_coef, gain_ctl, targ_cnt;
2223	u32 lanestagger;
2224
2225	if (vco >= 6200000 && vco <= 6700000) {
2226		prop_coef = 4;
2227		int_coef = 9;
2228		gain_ctl = 3;
2229		targ_cnt = 8;
2230	} else if ((vco > 5400000 && vco < 6200000) ||
2231			(vco >= 4800000 && vco < 5400000)) {
2232		prop_coef = 5;
2233		int_coef = 11;
2234		gain_ctl = 3;
2235		targ_cnt = 9;
2236	} else if (vco == 5400000) {
2237		prop_coef = 3;
2238		int_coef = 8;
2239		gain_ctl = 1;
2240		targ_cnt = 9;
2241	} else {
2242		drm_err(&i915->drm, "Invalid VCO\n");
2243		return -EINVAL;
2244	}
2245
2246	if (clock > 270000)
2247		lanestagger = 0x18;
2248	else if (clock > 135000)
2249		lanestagger = 0x0d;
2250	else if (clock > 67000)
2251		lanestagger = 0x07;
2252	else if (clock > 33000)
2253		lanestagger = 0x04;
2254	else
2255		lanestagger = 0x02;
2256
2257	dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
2258	dpll_hw_state->pll0 = PORT_PLL_M2_INT(clk_div->m2 >> 22);
2259	dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
2260	dpll_hw_state->pll2 = PORT_PLL_M2_FRAC(clk_div->m2 & 0x3fffff);
2261
2262	if (clk_div->m2 & 0x3fffff)
2263		dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
2264
2265	dpll_hw_state->pll6 = PORT_PLL_PROP_COEFF(prop_coef) |
2266		PORT_PLL_INT_COEFF(int_coef) |
2267		PORT_PLL_GAIN_CTL(gain_ctl);
2268
2269	dpll_hw_state->pll8 = PORT_PLL_TARGET_CNT(targ_cnt);
2270
2271	dpll_hw_state->pll9 = PORT_PLL_LOCK_THRESHOLD(5);
2272
2273	dpll_hw_state->pll10 = PORT_PLL_DCO_AMP(15) |
2274		PORT_PLL_DCO_AMP_OVR_EN_H;
2275
2276	dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
2277
2278	dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
2279
2280	return 0;
2281}
2282
2283static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
2284				const struct intel_shared_dpll *pll,
2285				const struct intel_dpll_hw_state *pll_state)
2286{
 
2287	struct dpll clock;
2288
2289	clock.m1 = 2;
2290	clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK, pll_state->pll0) << 22;
2291	if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
2292		clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK, pll_state->pll2);
2293	clock.n = REG_FIELD_GET(PORT_PLL_N_MASK, pll_state->pll1);
2294	clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK, pll_state->ebb0);
2295	clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK, pll_state->ebb0);
 
2296
2297	return chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, &clock);
2298}
2299
2300static int
2301bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2302{
2303	struct dpll clk_div = {};
2304
2305	bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
2306
2307	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2308}
2309
2310static int
2311bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2312{
2313	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2314	struct dpll clk_div = {};
2315	int ret;
2316
2317	bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
2318
2319	ret = bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2320	if (ret)
2321		return ret;
2322
2323	crtc_state->port_clock = bxt_ddi_pll_get_freq(i915, NULL,
2324						      &crtc_state->dpll_hw_state);
2325
2326	return 0;
2327}
2328
2329static int bxt_compute_dpll(struct intel_atomic_state *state,
2330			    struct intel_crtc *crtc,
2331			    struct intel_encoder *encoder)
2332{
2333	struct intel_crtc_state *crtc_state =
2334		intel_atomic_get_new_crtc_state(state, crtc);
2335
2336	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
2337		return bxt_ddi_hdmi_set_dpll_hw_state(crtc_state);
2338	else if (intel_crtc_has_dp_encoder(crtc_state))
2339		return bxt_ddi_dp_set_dpll_hw_state(crtc_state);
2340	else
2341		return -EINVAL;
2342}
2343
2344static int bxt_get_dpll(struct intel_atomic_state *state,
2345			struct intel_crtc *crtc,
2346			struct intel_encoder *encoder)
2347{
2348	struct intel_crtc_state *crtc_state =
2349		intel_atomic_get_new_crtc_state(state, crtc);
2350	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2351	struct intel_shared_dpll *pll;
2352	enum intel_dpll_id id;
2353
2354	/* 1:1 mapping between ports and PLLs */
2355	id = (enum intel_dpll_id) encoder->port;
2356	pll = intel_get_shared_dpll_by_id(i915, id);
2357
2358	drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] using pre-allocated %s\n",
2359		    crtc->base.base.id, crtc->base.name, pll->info->name);
2360
2361	intel_reference_shared_dpll(state, crtc,
2362				    pll, &crtc_state->dpll_hw_state);
2363
2364	crtc_state->shared_dpll = pll;
2365
2366	return 0;
2367}
2368
2369static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915)
2370{
2371	i915->display.dpll.ref_clks.ssc = 100000;
2372	i915->display.dpll.ref_clks.nssc = 100000;
2373	/* DSI non-SSC ref 19.2MHz */
2374}
2375
2376static void bxt_dump_hw_state(struct drm_i915_private *i915,
2377			      const struct intel_dpll_hw_state *hw_state)
2378{
2379	drm_dbg_kms(&i915->drm, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
2380		    "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
2381		    "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
2382		    hw_state->ebb0,
2383		    hw_state->ebb4,
2384		    hw_state->pll0,
2385		    hw_state->pll1,
2386		    hw_state->pll2,
2387		    hw_state->pll3,
2388		    hw_state->pll6,
2389		    hw_state->pll8,
2390		    hw_state->pll9,
2391		    hw_state->pll10,
2392		    hw_state->pcsdw12);
 
 
 
 
 
 
 
 
 
 
 
 
 
2393}
2394
2395static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
2396	.enable = bxt_ddi_pll_enable,
2397	.disable = bxt_ddi_pll_disable,
2398	.get_hw_state = bxt_ddi_pll_get_hw_state,
2399	.get_freq = bxt_ddi_pll_get_freq,
2400};
2401
2402static const struct dpll_info bxt_plls[] = {
2403	{ .name = "PORT PLL A", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL0, },
2404	{ .name = "PORT PLL B", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL1, },
2405	{ .name = "PORT PLL C", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL2, },
2406	{}
2407};
2408
2409static const struct intel_dpll_mgr bxt_pll_mgr = {
2410	.dpll_info = bxt_plls,
2411	.compute_dplls = bxt_compute_dpll,
2412	.get_dplls = bxt_get_dpll,
2413	.put_dplls = intel_put_dpll,
2414	.update_ref_clks = bxt_update_dpll_ref_clks,
2415	.dump_hw_state = bxt_dump_hw_state,
 
2416};
2417
2418static void icl_wrpll_get_multipliers(int bestdiv, int *pdiv,
2419				      int *qdiv, int *kdiv)
2420{
2421	/* even dividers */
2422	if (bestdiv % 2 == 0) {
2423		if (bestdiv == 2) {
2424			*pdiv = 2;
2425			*qdiv = 1;
2426			*kdiv = 1;
2427		} else if (bestdiv % 4 == 0) {
2428			*pdiv = 2;
2429			*qdiv = bestdiv / 4;
2430			*kdiv = 2;
2431		} else if (bestdiv % 6 == 0) {
2432			*pdiv = 3;
2433			*qdiv = bestdiv / 6;
2434			*kdiv = 2;
2435		} else if (bestdiv % 5 == 0) {
2436			*pdiv = 5;
2437			*qdiv = bestdiv / 10;
2438			*kdiv = 2;
2439		} else if (bestdiv % 14 == 0) {
2440			*pdiv = 7;
2441			*qdiv = bestdiv / 14;
2442			*kdiv = 2;
2443		}
2444	} else {
2445		if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2446			*pdiv = bestdiv;
2447			*qdiv = 1;
2448			*kdiv = 1;
2449		} else { /* 9, 15, 21 */
2450			*pdiv = bestdiv / 3;
2451			*qdiv = 1;
2452			*kdiv = 3;
2453		}
2454	}
2455}
2456
2457static void icl_wrpll_params_populate(struct skl_wrpll_params *params,
2458				      u32 dco_freq, u32 ref_freq,
2459				      int pdiv, int qdiv, int kdiv)
2460{
2461	u32 dco;
2462
2463	switch (kdiv) {
2464	case 1:
2465		params->kdiv = 1;
2466		break;
2467	case 2:
2468		params->kdiv = 2;
2469		break;
2470	case 3:
2471		params->kdiv = 4;
2472		break;
2473	default:
2474		WARN(1, "Incorrect KDiv\n");
2475	}
2476
2477	switch (pdiv) {
2478	case 2:
2479		params->pdiv = 1;
2480		break;
2481	case 3:
2482		params->pdiv = 2;
2483		break;
2484	case 5:
2485		params->pdiv = 4;
2486		break;
2487	case 7:
2488		params->pdiv = 8;
2489		break;
2490	default:
2491		WARN(1, "Incorrect PDiv\n");
2492	}
2493
2494	WARN_ON(kdiv != 2 && qdiv != 1);
2495
2496	params->qdiv_ratio = qdiv;
2497	params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2498
2499	dco = div_u64((u64)dco_freq << 15, ref_freq);
2500
2501	params->dco_integer = dco >> 15;
2502	params->dco_fraction = dco & 0x7fff;
2503}
2504
2505/*
2506 * Display WA #22010492432: ehl, tgl, adl-s, adl-p
2507 * Program half of the nominal DCO divider fraction value.
2508 */
2509static bool
2510ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
2511{
2512	return (((IS_ELKHARTLAKE(i915) || IS_JASPERLAKE(i915)) &&
2513		 IS_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) ||
2514		 IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) &&
2515		 i915->display.dpll.ref_clks.nssc == 38400;
2516}
2517
2518struct icl_combo_pll_params {
2519	int clock;
2520	struct skl_wrpll_params wrpll;
2521};
2522
2523/*
2524 * These values alrea already adjusted: they're the bits we write to the
2525 * registers, not the logical values.
2526 */
2527static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
2528	{ 540000,
2529	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [0]: 5.4 */
2530	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2531	{ 270000,
2532	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [1]: 2.7 */
2533	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2534	{ 162000,
2535	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [2]: 1.62 */
2536	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2537	{ 324000,
2538	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [3]: 3.24 */
2539	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2540	{ 216000,
2541	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2542	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2543	{ 432000,
2544	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2545	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2546	{ 648000,
2547	  { .dco_integer = 0x195, .dco_fraction = 0x0000,		/* [6]: 6.48 */
2548	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2549	{ 810000,
2550	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [7]: 8.1 */
2551	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2552};
2553
2554
2555/* Also used for 38.4 MHz values. */
2556static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
2557	{ 540000,
2558	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [0]: 5.4 */
2559	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2560	{ 270000,
2561	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [1]: 2.7 */
2562	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2563	{ 162000,
2564	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [2]: 1.62 */
2565	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2566	{ 324000,
2567	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [3]: 3.24 */
2568	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2569	{ 216000,
2570	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2571	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2572	{ 432000,
2573	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2574	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2575	{ 648000,
2576	  { .dco_integer = 0x1FA, .dco_fraction = 0x2000,		/* [6]: 6.48 */
2577	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2578	{ 810000,
2579	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [7]: 8.1 */
2580	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2581};
2582
2583static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
2584	.dco_integer = 0x151, .dco_fraction = 0x4000,
2585	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2586};
2587
2588static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
2589	.dco_integer = 0x1A5, .dco_fraction = 0x7000,
2590	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2591};
2592
2593static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = {
2594	.dco_integer = 0x54, .dco_fraction = 0x3000,
2595	/* the following params are unused */
2596	.pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
2597};
2598
2599static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
2600	.dco_integer = 0x43, .dco_fraction = 0x4000,
2601	/* the following params are unused */
2602};
2603
2604static int icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
2605				 struct skl_wrpll_params *pll_params)
2606{
2607	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2608	const struct icl_combo_pll_params *params =
2609		i915->display.dpll.ref_clks.nssc == 24000 ?
2610		icl_dp_combo_pll_24MHz_values :
2611		icl_dp_combo_pll_19_2MHz_values;
2612	int clock = crtc_state->port_clock;
2613	int i;
2614
2615	for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
2616		if (clock == params[i].clock) {
2617			*pll_params = params[i].wrpll;
2618			return 0;
2619		}
2620	}
2621
2622	MISSING_CASE(clock);
2623	return -EINVAL;
2624}
2625
2626static int icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
2627			    struct skl_wrpll_params *pll_params)
2628{
2629	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2630
2631	if (DISPLAY_VER(i915) >= 12) {
2632		switch (i915->display.dpll.ref_clks.nssc) {
2633		default:
2634			MISSING_CASE(i915->display.dpll.ref_clks.nssc);
2635			fallthrough;
2636		case 19200:
2637		case 38400:
2638			*pll_params = tgl_tbt_pll_19_2MHz_values;
2639			break;
2640		case 24000:
2641			*pll_params = tgl_tbt_pll_24MHz_values;
2642			break;
2643		}
2644	} else {
2645		switch (i915->display.dpll.ref_clks.nssc) {
2646		default:
2647			MISSING_CASE(i915->display.dpll.ref_clks.nssc);
2648			fallthrough;
2649		case 19200:
2650		case 38400:
2651			*pll_params = icl_tbt_pll_19_2MHz_values;
2652			break;
2653		case 24000:
2654			*pll_params = icl_tbt_pll_24MHz_values;
2655			break;
2656		}
2657	}
2658
2659	return 0;
2660}
2661
2662static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915,
2663				    const struct intel_shared_dpll *pll,
2664				    const struct intel_dpll_hw_state *pll_state)
2665{
2666	/*
2667	 * The PLL outputs multiple frequencies at the same time, selection is
2668	 * made at DDI clock mux level.
2669	 */
2670	drm_WARN_ON(&i915->drm, 1);
2671
2672	return 0;
2673}
2674
2675static int icl_wrpll_ref_clock(struct drm_i915_private *i915)
2676{
2677	int ref_clock = i915->display.dpll.ref_clks.nssc;
2678
2679	/*
2680	 * For ICL+, the spec states: if reference frequency is 38.4,
2681	 * use 19.2 because the DPLL automatically divides that by 2.
2682	 */
2683	if (ref_clock == 38400)
2684		ref_clock = 19200;
2685
2686	return ref_clock;
2687}
2688
2689static int
2690icl_calc_wrpll(struct intel_crtc_state *crtc_state,
2691	       struct skl_wrpll_params *wrpll_params)
2692{
2693	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2694	int ref_clock = icl_wrpll_ref_clock(i915);
2695	u32 afe_clock = crtc_state->port_clock * 5;
2696	u32 dco_min = 7998000;
2697	u32 dco_max = 10000000;
2698	u32 dco_mid = (dco_min + dco_max) / 2;
2699	static const int dividers[] = {  2,  4,  6,  8, 10, 12,  14,  16,
2700					 18, 20, 24, 28, 30, 32,  36,  40,
2701					 42, 44, 48, 50, 52, 54,  56,  60,
2702					 64, 66, 68, 70, 72, 76,  78,  80,
2703					 84, 88, 90, 92, 96, 98, 100, 102,
2704					  3,  5,  7,  9, 15, 21 };
2705	u32 dco, best_dco = 0, dco_centrality = 0;
2706	u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2707	int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2708
2709	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2710		dco = afe_clock * dividers[d];
2711
2712		if (dco <= dco_max && dco >= dco_min) {
2713			dco_centrality = abs(dco - dco_mid);
2714
2715			if (dco_centrality < best_dco_centrality) {
2716				best_dco_centrality = dco_centrality;
2717				best_div = dividers[d];
2718				best_dco = dco;
2719			}
2720		}
2721	}
2722
2723	if (best_div == 0)
2724		return -EINVAL;
2725
2726	icl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2727	icl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
2728				  pdiv, qdiv, kdiv);
2729
2730	return 0;
2731}
2732
2733static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
2734				      const struct intel_shared_dpll *pll,
2735				      const struct intel_dpll_hw_state *pll_state)
2736{
 
2737	int ref_clock = icl_wrpll_ref_clock(i915);
2738	u32 dco_fraction;
2739	u32 p0, p1, p2, dco_freq;
2740
2741	p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
2742	p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
2743
2744	if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
2745		p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
2746			DPLL_CFGCR1_QDIV_RATIO_SHIFT;
2747	else
2748		p1 = 1;
2749
2750	switch (p0) {
2751	case DPLL_CFGCR1_PDIV_2:
2752		p0 = 2;
2753		break;
2754	case DPLL_CFGCR1_PDIV_3:
2755		p0 = 3;
2756		break;
2757	case DPLL_CFGCR1_PDIV_5:
2758		p0 = 5;
2759		break;
2760	case DPLL_CFGCR1_PDIV_7:
2761		p0 = 7;
2762		break;
2763	}
2764
2765	switch (p2) {
2766	case DPLL_CFGCR1_KDIV_1:
2767		p2 = 1;
2768		break;
2769	case DPLL_CFGCR1_KDIV_2:
2770		p2 = 2;
2771		break;
2772	case DPLL_CFGCR1_KDIV_3:
2773		p2 = 3;
2774		break;
2775	}
2776
2777	dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
2778		   ref_clock;
2779
2780	dco_fraction = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
2781		       DPLL_CFGCR0_DCO_FRACTION_SHIFT;
2782
2783	if (ehl_combo_pll_div_frac_wa_needed(i915))
2784		dco_fraction *= 2;
2785
2786	dco_freq += (dco_fraction * ref_clock) / 0x8000;
2787
2788	if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
2789		return 0;
2790
2791	return dco_freq / (p0 * p1 * p2 * 5);
2792}
2793
2794static void icl_calc_dpll_state(struct drm_i915_private *i915,
2795				const struct skl_wrpll_params *pll_params,
2796				struct intel_dpll_hw_state *pll_state)
2797{
 
2798	u32 dco_fraction = pll_params->dco_fraction;
2799
2800	if (ehl_combo_pll_div_frac_wa_needed(i915))
2801		dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2);
2802
2803	pll_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) |
2804			    pll_params->dco_integer;
2805
2806	pll_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) |
2807			    DPLL_CFGCR1_QDIV_MODE(pll_params->qdiv_mode) |
2808			    DPLL_CFGCR1_KDIV(pll_params->kdiv) |
2809			    DPLL_CFGCR1_PDIV(pll_params->pdiv);
2810
2811	if (DISPLAY_VER(i915) >= 12)
2812		pll_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
2813	else
2814		pll_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
2815
2816	if (i915->display.vbt.override_afc_startup)
2817		pll_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(i915->display.vbt.override_afc_startup_val);
2818}
2819
2820static int icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
2821				    u32 *target_dco_khz,
2822				    struct intel_dpll_hw_state *state,
2823				    bool is_dkl)
2824{
2825	static const u8 div1_vals[] = { 7, 5, 3, 2 };
2826	u32 dco_min_freq, dco_max_freq;
2827	unsigned int i;
2828	int div2;
2829
2830	dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
2831	dco_max_freq = is_dp ? 8100000 : 10000000;
2832
2833	for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
2834		int div1 = div1_vals[i];
2835
2836		for (div2 = 10; div2 > 0; div2--) {
2837			int dco = div1 * div2 * clock_khz * 5;
2838			int a_divratio, tlinedrv, inputsel;
2839			u32 hsdiv;
2840
2841			if (dco < dco_min_freq || dco > dco_max_freq)
2842				continue;
2843
2844			if (div2 >= 2) {
2845				/*
2846				 * Note: a_divratio not matching TGL BSpec
2847				 * algorithm but matching hardcoded values and
2848				 * working on HW for DP alt-mode at least
2849				 */
2850				a_divratio = is_dp ? 10 : 5;
2851				tlinedrv = is_dkl ? 1 : 2;
2852			} else {
2853				a_divratio = 5;
2854				tlinedrv = 0;
2855			}
2856			inputsel = is_dp ? 0 : 1;
2857
2858			switch (div1) {
2859			default:
2860				MISSING_CASE(div1);
2861				fallthrough;
2862			case 2:
2863				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
2864				break;
2865			case 3:
2866				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
2867				break;
2868			case 5:
2869				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
2870				break;
2871			case 7:
2872				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
2873				break;
2874			}
2875
2876			*target_dco_khz = dco;
2877
2878			state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
2879
2880			state->mg_clktop2_coreclkctl1 =
2881				MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
2882
2883			state->mg_clktop2_hsclkctl =
2884				MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
2885				MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
2886				hsdiv |
2887				MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
2888
2889			return 0;
2890		}
2891	}
2892
2893	return -EINVAL;
2894}
2895
2896/*
2897 * The specification for this function uses real numbers, so the math had to be
2898 * adapted to integer-only calculation, that's why it looks so different.
2899 */
2900static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
2901				 struct intel_dpll_hw_state *pll_state)
2902{
2903	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
 
2904	int refclk_khz = i915->display.dpll.ref_clks.nssc;
2905	int clock = crtc_state->port_clock;
2906	u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
2907	u32 iref_ndiv, iref_trim, iref_pulse_w;
2908	u32 prop_coeff, int_coeff;
2909	u32 tdc_targetcnt, feedfwgain;
2910	u64 ssc_stepsize, ssc_steplen, ssc_steplog;
2911	u64 tmp;
2912	bool use_ssc = false;
2913	bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
2914	bool is_dkl = DISPLAY_VER(i915) >= 12;
2915	int ret;
2916
2917	ret = icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
2918				       pll_state, is_dkl);
2919	if (ret)
2920		return ret;
2921
2922	m1div = 2;
2923	m2div_int = dco_khz / (refclk_khz * m1div);
2924	if (m2div_int > 255) {
2925		if (!is_dkl) {
2926			m1div = 4;
2927			m2div_int = dco_khz / (refclk_khz * m1div);
2928		}
2929
2930		if (m2div_int > 255)
2931			return -EINVAL;
2932	}
2933	m2div_rem = dco_khz % (refclk_khz * m1div);
2934
2935	tmp = (u64)m2div_rem * (1 << 22);
2936	do_div(tmp, refclk_khz * m1div);
2937	m2div_frac = tmp;
2938
2939	switch (refclk_khz) {
2940	case 19200:
2941		iref_ndiv = 1;
2942		iref_trim = 28;
2943		iref_pulse_w = 1;
2944		break;
2945	case 24000:
2946		iref_ndiv = 1;
2947		iref_trim = 25;
2948		iref_pulse_w = 2;
2949		break;
2950	case 38400:
2951		iref_ndiv = 2;
2952		iref_trim = 28;
2953		iref_pulse_w = 1;
2954		break;
2955	default:
2956		MISSING_CASE(refclk_khz);
2957		return -EINVAL;
2958	}
2959
2960	/*
2961	 * tdc_res = 0.000003
2962	 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
2963	 *
2964	 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
2965	 * was supposed to be a division, but we rearranged the operations of
2966	 * the formula to avoid early divisions so we don't multiply the
2967	 * rounding errors.
2968	 *
2969	 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
2970	 * we also rearrange to work with integers.
2971	 *
2972	 * The 0.5 transformed to 5 results in a multiplication by 10 and the
2973	 * last division by 10.
2974	 */
2975	tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
2976
2977	/*
2978	 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
2979	 * 32 bits. That's not a problem since we round the division down
2980	 * anyway.
2981	 */
2982	feedfwgain = (use_ssc || m2div_rem > 0) ?
2983		m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
2984
2985	if (dco_khz >= 9000000) {
2986		prop_coeff = 5;
2987		int_coeff = 10;
2988	} else {
2989		prop_coeff = 4;
2990		int_coeff = 8;
2991	}
2992
2993	if (use_ssc) {
2994		tmp = mul_u32_u32(dco_khz, 47 * 32);
2995		do_div(tmp, refclk_khz * m1div * 10000);
2996		ssc_stepsize = tmp;
2997
2998		tmp = mul_u32_u32(dco_khz, 1000);
2999		ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
3000	} else {
3001		ssc_stepsize = 0;
3002		ssc_steplen = 0;
3003	}
3004	ssc_steplog = 4;
3005
3006	/* write pll_state calculations */
3007	if (is_dkl) {
3008		pll_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
3009					 DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
3010					 DKL_PLL_DIV0_FBPREDIV(m1div) |
3011					 DKL_PLL_DIV0_FBDIV_INT(m2div_int);
3012		if (i915->display.vbt.override_afc_startup) {
3013			u8 val = i915->display.vbt.override_afc_startup_val;
3014
3015			pll_state->mg_pll_div0 |= DKL_PLL_DIV0_AFC_STARTUP(val);
3016		}
3017
3018		pll_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
3019					 DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
3020
3021		pll_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
3022					DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
3023					DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
3024					(use_ssc ? DKL_PLL_SSC_EN : 0);
3025
3026		pll_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
3027					  DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);
3028
3029		pll_state->mg_pll_tdc_coldst_bias =
3030				DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
3031				DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);
3032
3033	} else {
3034		pll_state->mg_pll_div0 =
3035			(m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
3036			MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
3037			MG_PLL_DIV0_FBDIV_INT(m2div_int);
3038
3039		pll_state->mg_pll_div1 =
3040			MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
3041			MG_PLL_DIV1_DITHER_DIV_2 |
3042			MG_PLL_DIV1_NDIVRATIO(1) |
3043			MG_PLL_DIV1_FBPREDIV(m1div);
3044
3045		pll_state->mg_pll_lf =
3046			MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
3047			MG_PLL_LF_AFCCNTSEL_512 |
3048			MG_PLL_LF_GAINCTRL(1) |
3049			MG_PLL_LF_INT_COEFF(int_coeff) |
3050			MG_PLL_LF_PROP_COEFF(prop_coeff);
3051
3052		pll_state->mg_pll_frac_lock =
3053			MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
3054			MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
3055			MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
3056			MG_PLL_FRAC_LOCK_DCODITHEREN |
3057			MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
3058		if (use_ssc || m2div_rem > 0)
3059			pll_state->mg_pll_frac_lock |=
3060				MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
3061
3062		pll_state->mg_pll_ssc =
3063			(use_ssc ? MG_PLL_SSC_EN : 0) |
3064			MG_PLL_SSC_TYPE(2) |
3065			MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
3066			MG_PLL_SSC_STEPNUM(ssc_steplog) |
3067			MG_PLL_SSC_FLLEN |
3068			MG_PLL_SSC_STEPSIZE(ssc_stepsize);
3069
3070		pll_state->mg_pll_tdc_coldst_bias =
3071			MG_PLL_TDC_COLDST_COLDSTART |
3072			MG_PLL_TDC_COLDST_IREFINT_EN |
3073			MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
3074			MG_PLL_TDC_TDCOVCCORR_EN |
3075			MG_PLL_TDC_TDCSEL(3);
3076
3077		pll_state->mg_pll_bias =
3078			MG_PLL_BIAS_BIAS_GB_SEL(3) |
3079			MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
3080			MG_PLL_BIAS_BIAS_BONUS(10) |
3081			MG_PLL_BIAS_BIASCAL_EN |
3082			MG_PLL_BIAS_CTRIM(12) |
3083			MG_PLL_BIAS_VREF_RDAC(4) |
3084			MG_PLL_BIAS_IREFTRIM(iref_trim);
3085
3086		if (refclk_khz == 38400) {
3087			pll_state->mg_pll_tdc_coldst_bias_mask =
3088				MG_PLL_TDC_COLDST_COLDSTART;
3089			pll_state->mg_pll_bias_mask = 0;
3090		} else {
3091			pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
3092			pll_state->mg_pll_bias_mask = -1U;
3093		}
3094
3095		pll_state->mg_pll_tdc_coldst_bias &=
3096			pll_state->mg_pll_tdc_coldst_bias_mask;
3097		pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
3098	}
3099
3100	return 0;
3101}
3102
3103static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *i915,
3104				   const struct intel_shared_dpll *pll,
3105				   const struct intel_dpll_hw_state *pll_state)
3106{
 
3107	u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
3108	u64 tmp;
3109
3110	ref_clock = i915->display.dpll.ref_clks.nssc;
3111
3112	if (DISPLAY_VER(i915) >= 12) {
3113		m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
3114		m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
3115		m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
3116
3117		if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
3118			m2_frac = pll_state->mg_pll_bias &
3119				  DKL_PLL_BIAS_FBDIV_FRAC_MASK;
3120			m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
3121		} else {
3122			m2_frac = 0;
3123		}
3124	} else {
3125		m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
3126		m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
3127
3128		if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
3129			m2_frac = pll_state->mg_pll_div0 &
3130				  MG_PLL_DIV0_FBDIV_FRAC_MASK;
3131			m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
3132		} else {
3133			m2_frac = 0;
3134		}
3135	}
3136
3137	switch (pll_state->mg_clktop2_hsclkctl &
3138		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
3139	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
3140		div1 = 2;
3141		break;
3142	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
3143		div1 = 3;
3144		break;
3145	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
3146		div1 = 5;
3147		break;
3148	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
3149		div1 = 7;
3150		break;
3151	default:
3152		MISSING_CASE(pll_state->mg_clktop2_hsclkctl);
3153		return 0;
3154	}
3155
3156	div2 = (pll_state->mg_clktop2_hsclkctl &
3157		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
3158		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
3159
3160	/* div2 value of 0 is same as 1 means no div */
3161	if (div2 == 0)
3162		div2 = 1;
3163
3164	/*
3165	 * Adjust the original formula to delay the division by 2^22 in order to
3166	 * minimize possible rounding errors.
3167	 */
3168	tmp = (u64)m1 * m2_int * ref_clock +
3169	      (((u64)m1 * m2_frac * ref_clock) >> 22);
3170	tmp = div_u64(tmp, 5 * div1 * div2);
3171
3172	return tmp;
3173}
3174
3175/**
3176 * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
3177 * @crtc_state: state for the CRTC to select the DPLL for
3178 * @port_dpll_id: the active @port_dpll_id to select
3179 *
3180 * Select the given @port_dpll_id instance from the DPLLs reserved for the
3181 * CRTC.
3182 */
3183void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
3184			      enum icl_port_dpll_id port_dpll_id)
3185{
3186	struct icl_port_dpll *port_dpll =
3187		&crtc_state->icl_port_dplls[port_dpll_id];
3188
3189	crtc_state->shared_dpll = port_dpll->pll;
3190	crtc_state->dpll_hw_state = port_dpll->hw_state;
3191}
3192
3193static void icl_update_active_dpll(struct intel_atomic_state *state,
3194				   struct intel_crtc *crtc,
3195				   struct intel_encoder *encoder)
3196{
3197	struct intel_crtc_state *crtc_state =
3198		intel_atomic_get_new_crtc_state(state, crtc);
3199	struct intel_digital_port *primary_port;
3200	enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
3201
3202	primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
3203		enc_to_mst(encoder)->primary :
3204		enc_to_dig_port(encoder);
3205
3206	if (primary_port &&
3207	    (intel_tc_port_in_dp_alt_mode(primary_port) ||
3208	     intel_tc_port_in_legacy_mode(primary_port)))
3209		port_dpll_id = ICL_PORT_DPLL_MG_PHY;
3210
3211	icl_set_active_port_dpll(crtc_state, port_dpll_id);
3212}
3213
3214static int icl_compute_combo_phy_dpll(struct intel_atomic_state *state,
3215				      struct intel_crtc *crtc)
3216{
3217	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
3218	struct intel_crtc_state *crtc_state =
3219		intel_atomic_get_new_crtc_state(state, crtc);
3220	struct icl_port_dpll *port_dpll =
3221		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3222	struct skl_wrpll_params pll_params = {};
3223	int ret;
3224
3225	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
3226	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
3227		ret = icl_calc_wrpll(crtc_state, &pll_params);
3228	else
3229		ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
3230
3231	if (ret)
3232		return ret;
3233
3234	icl_calc_dpll_state(i915, &pll_params, &port_dpll->hw_state);
3235
3236	/* this is mainly for the fastset check */
3237	icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
3238
3239	crtc_state->port_clock = icl_ddi_combo_pll_get_freq(i915, NULL,
3240							    &port_dpll->hw_state);
3241
3242	return 0;
3243}
3244
3245static int icl_get_combo_phy_dpll(struct intel_atomic_state *state,
3246				  struct intel_crtc *crtc,
3247				  struct intel_encoder *encoder)
3248{
 
3249	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
3250	struct intel_crtc_state *crtc_state =
3251		intel_atomic_get_new_crtc_state(state, crtc);
3252	struct icl_port_dpll *port_dpll =
3253		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3254	enum port port = encoder->port;
3255	unsigned long dpll_mask;
3256
3257	if (IS_ALDERLAKE_S(i915)) {
3258		dpll_mask =
3259			BIT(DPLL_ID_DG1_DPLL3) |
3260			BIT(DPLL_ID_DG1_DPLL2) |
3261			BIT(DPLL_ID_ICL_DPLL1) |
3262			BIT(DPLL_ID_ICL_DPLL0);
3263	} else if (IS_DG1(i915)) {
3264		if (port == PORT_D || port == PORT_E) {
3265			dpll_mask =
3266				BIT(DPLL_ID_DG1_DPLL2) |
3267				BIT(DPLL_ID_DG1_DPLL3);
3268		} else {
3269			dpll_mask =
3270				BIT(DPLL_ID_DG1_DPLL0) |
3271				BIT(DPLL_ID_DG1_DPLL1);
3272		}
3273	} else if (IS_ROCKETLAKE(i915)) {
3274		dpll_mask =
3275			BIT(DPLL_ID_EHL_DPLL4) |
3276			BIT(DPLL_ID_ICL_DPLL1) |
3277			BIT(DPLL_ID_ICL_DPLL0);
3278	} else if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
3279		   port != PORT_A) {
3280		dpll_mask =
3281			BIT(DPLL_ID_EHL_DPLL4) |
3282			BIT(DPLL_ID_ICL_DPLL1) |
3283			BIT(DPLL_ID_ICL_DPLL0);
3284	} else {
3285		dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0);
3286	}
3287
3288	/* Eliminate DPLLs from consideration if reserved by HTI */
3289	dpll_mask &= ~intel_hti_dpll_mask(i915);
3290
3291	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3292						&port_dpll->hw_state,
3293						dpll_mask);
3294	if (!port_dpll->pll)
3295		return -EINVAL;
3296
3297	intel_reference_shared_dpll(state, crtc,
3298				    port_dpll->pll, &port_dpll->hw_state);
3299
3300	icl_update_active_dpll(state, crtc, encoder);
3301
3302	return 0;
3303}
3304
3305static int icl_compute_tc_phy_dplls(struct intel_atomic_state *state,
3306				    struct intel_crtc *crtc)
3307{
3308	struct drm_i915_private *i915 = to_i915(state->base.dev);
3309	struct intel_crtc_state *crtc_state =
3310		intel_atomic_get_new_crtc_state(state, crtc);
 
 
3311	struct icl_port_dpll *port_dpll =
3312		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3313	struct skl_wrpll_params pll_params = {};
3314	int ret;
3315
3316	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3317	ret = icl_calc_tbt_pll(crtc_state, &pll_params);
3318	if (ret)
3319		return ret;
3320
3321	icl_calc_dpll_state(i915, &pll_params, &port_dpll->hw_state);
3322
3323	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3324	ret = icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state);
3325	if (ret)
3326		return ret;
3327
3328	/* this is mainly for the fastset check */
3329	icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_MG_PHY);
 
 
 
 
3330
3331	crtc_state->port_clock = icl_ddi_mg_pll_get_freq(i915, NULL,
3332							 &port_dpll->hw_state);
3333
3334	return 0;
3335}
3336
3337static int icl_get_tc_phy_dplls(struct intel_atomic_state *state,
3338				struct intel_crtc *crtc,
3339				struct intel_encoder *encoder)
3340{
3341	struct drm_i915_private *i915 = to_i915(state->base.dev);
3342	struct intel_crtc_state *crtc_state =
3343		intel_atomic_get_new_crtc_state(state, crtc);
3344	struct icl_port_dpll *port_dpll =
3345		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3346	enum intel_dpll_id dpll_id;
3347	int ret;
3348
3349	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3350	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3351						&port_dpll->hw_state,
3352						BIT(DPLL_ID_ICL_TBTPLL));
3353	if (!port_dpll->pll)
3354		return -EINVAL;
3355	intel_reference_shared_dpll(state, crtc,
3356				    port_dpll->pll, &port_dpll->hw_state);
3357
3358
3359	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3360	dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(i915,
3361							 encoder->port));
3362	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3363						&port_dpll->hw_state,
3364						BIT(dpll_id));
3365	if (!port_dpll->pll) {
3366		ret = -EINVAL;
3367		goto err_unreference_tbt_pll;
3368	}
3369	intel_reference_shared_dpll(state, crtc,
3370				    port_dpll->pll, &port_dpll->hw_state);
3371
3372	icl_update_active_dpll(state, crtc, encoder);
3373
3374	return 0;
3375
3376err_unreference_tbt_pll:
3377	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3378	intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
3379
3380	return ret;
3381}
3382
3383static int icl_compute_dplls(struct intel_atomic_state *state,
3384			     struct intel_crtc *crtc,
3385			     struct intel_encoder *encoder)
3386{
3387	struct drm_i915_private *i915 = to_i915(state->base.dev);
3388	enum phy phy = intel_port_to_phy(i915, encoder->port);
3389
3390	if (intel_phy_is_combo(i915, phy))
3391		return icl_compute_combo_phy_dpll(state, crtc);
3392	else if (intel_phy_is_tc(i915, phy))
3393		return icl_compute_tc_phy_dplls(state, crtc);
3394
3395	MISSING_CASE(phy);
3396
3397	return 0;
3398}
3399
3400static int icl_get_dplls(struct intel_atomic_state *state,
3401			 struct intel_crtc *crtc,
3402			 struct intel_encoder *encoder)
3403{
3404	struct drm_i915_private *i915 = to_i915(state->base.dev);
3405	enum phy phy = intel_port_to_phy(i915, encoder->port);
3406
3407	if (intel_phy_is_combo(i915, phy))
3408		return icl_get_combo_phy_dpll(state, crtc, encoder);
3409	else if (intel_phy_is_tc(i915, phy))
3410		return icl_get_tc_phy_dplls(state, crtc, encoder);
3411
3412	MISSING_CASE(phy);
3413
3414	return -EINVAL;
3415}
3416
3417static void icl_put_dplls(struct intel_atomic_state *state,
3418			  struct intel_crtc *crtc)
3419{
3420	const struct intel_crtc_state *old_crtc_state =
3421		intel_atomic_get_old_crtc_state(state, crtc);
3422	struct intel_crtc_state *new_crtc_state =
3423		intel_atomic_get_new_crtc_state(state, crtc);
3424	enum icl_port_dpll_id id;
3425
3426	new_crtc_state->shared_dpll = NULL;
3427
3428	for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
3429		const struct icl_port_dpll *old_port_dpll =
3430			&old_crtc_state->icl_port_dplls[id];
3431		struct icl_port_dpll *new_port_dpll =
3432			&new_crtc_state->icl_port_dplls[id];
3433
3434		new_port_dpll->pll = NULL;
3435
3436		if (!old_port_dpll->pll)
3437			continue;
3438
3439		intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
3440	}
3441}
3442
3443static bool mg_pll_get_hw_state(struct drm_i915_private *i915,
3444				struct intel_shared_dpll *pll,
3445				struct intel_dpll_hw_state *hw_state)
3446{
 
3447	const enum intel_dpll_id id = pll->info->id;
3448	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3449	intel_wakeref_t wakeref;
3450	bool ret = false;
3451	u32 val;
3452
3453	i915_reg_t enable_reg = intel_tc_pll_enable_reg(i915, pll);
3454
3455	wakeref = intel_display_power_get_if_enabled(i915,
3456						     POWER_DOMAIN_DISPLAY_CORE);
3457	if (!wakeref)
3458		return false;
3459
3460	val = intel_de_read(i915, enable_reg);
3461	if (!(val & PLL_ENABLE))
3462		goto out;
3463
3464	hw_state->mg_refclkin_ctl = intel_de_read(i915,
3465						  MG_REFCLKIN_CTL(tc_port));
3466	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3467
3468	hw_state->mg_clktop2_coreclkctl1 =
3469		intel_de_read(i915, MG_CLKTOP2_CORECLKCTL1(tc_port));
3470	hw_state->mg_clktop2_coreclkctl1 &=
3471		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3472
3473	hw_state->mg_clktop2_hsclkctl =
3474		intel_de_read(i915, MG_CLKTOP2_HSCLKCTL(tc_port));
3475	hw_state->mg_clktop2_hsclkctl &=
3476		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3477		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3478		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3479		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3480
3481	hw_state->mg_pll_div0 = intel_de_read(i915, MG_PLL_DIV0(tc_port));
3482	hw_state->mg_pll_div1 = intel_de_read(i915, MG_PLL_DIV1(tc_port));
3483	hw_state->mg_pll_lf = intel_de_read(i915, MG_PLL_LF(tc_port));
3484	hw_state->mg_pll_frac_lock = intel_de_read(i915,
3485						   MG_PLL_FRAC_LOCK(tc_port));
3486	hw_state->mg_pll_ssc = intel_de_read(i915, MG_PLL_SSC(tc_port));
3487
3488	hw_state->mg_pll_bias = intel_de_read(i915, MG_PLL_BIAS(tc_port));
3489	hw_state->mg_pll_tdc_coldst_bias =
3490		intel_de_read(i915, MG_PLL_TDC_COLDST_BIAS(tc_port));
3491
3492	if (i915->display.dpll.ref_clks.nssc == 38400) {
3493		hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
3494		hw_state->mg_pll_bias_mask = 0;
3495	} else {
3496		hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3497		hw_state->mg_pll_bias_mask = -1U;
3498	}
3499
3500	hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
3501	hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3502
3503	ret = true;
3504out:
3505	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3506	return ret;
3507}
3508
3509static bool dkl_pll_get_hw_state(struct drm_i915_private *i915,
3510				 struct intel_shared_dpll *pll,
3511				 struct intel_dpll_hw_state *hw_state)
3512{
 
3513	const enum intel_dpll_id id = pll->info->id;
3514	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3515	intel_wakeref_t wakeref;
3516	bool ret = false;
3517	u32 val;
3518
3519	wakeref = intel_display_power_get_if_enabled(i915,
3520						     POWER_DOMAIN_DISPLAY_CORE);
3521	if (!wakeref)
3522		return false;
3523
3524	val = intel_de_read(i915, intel_tc_pll_enable_reg(i915, pll));
3525	if (!(val & PLL_ENABLE))
3526		goto out;
3527
3528	/*
3529	 * All registers read here have the same HIP_INDEX_REG even though
3530	 * they are on different building blocks
3531	 */
3532	hw_state->mg_refclkin_ctl = intel_dkl_phy_read(i915,
3533						       DKL_REFCLKIN_CTL(tc_port));
3534	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3535
3536	hw_state->mg_clktop2_hsclkctl =
3537		intel_dkl_phy_read(i915, DKL_CLKTOP2_HSCLKCTL(tc_port));
3538	hw_state->mg_clktop2_hsclkctl &=
3539		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3540		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3541		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3542		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3543
3544	hw_state->mg_clktop2_coreclkctl1 =
3545		intel_dkl_phy_read(i915, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3546	hw_state->mg_clktop2_coreclkctl1 &=
3547		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3548
3549	hw_state->mg_pll_div0 = intel_dkl_phy_read(i915, DKL_PLL_DIV0(tc_port));
3550	val = DKL_PLL_DIV0_MASK;
3551	if (i915->display.vbt.override_afc_startup)
3552		val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3553	hw_state->mg_pll_div0 &= val;
3554
3555	hw_state->mg_pll_div1 = intel_dkl_phy_read(i915, DKL_PLL_DIV1(tc_port));
3556	hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
3557				  DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3558
3559	hw_state->mg_pll_ssc = intel_dkl_phy_read(i915, DKL_PLL_SSC(tc_port));
3560	hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3561				 DKL_PLL_SSC_STEP_LEN_MASK |
3562				 DKL_PLL_SSC_STEP_NUM_MASK |
3563				 DKL_PLL_SSC_EN);
3564
3565	hw_state->mg_pll_bias = intel_dkl_phy_read(i915, DKL_PLL_BIAS(tc_port));
3566	hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
3567				  DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3568
3569	hw_state->mg_pll_tdc_coldst_bias =
3570		intel_dkl_phy_read(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3571	hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3572					     DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3573
3574	ret = true;
3575out:
3576	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3577	return ret;
3578}
3579
3580static bool icl_pll_get_hw_state(struct drm_i915_private *i915,
3581				 struct intel_shared_dpll *pll,
3582				 struct intel_dpll_hw_state *hw_state,
3583				 i915_reg_t enable_reg)
3584{
 
3585	const enum intel_dpll_id id = pll->info->id;
3586	intel_wakeref_t wakeref;
3587	bool ret = false;
3588	u32 val;
3589
3590	wakeref = intel_display_power_get_if_enabled(i915,
3591						     POWER_DOMAIN_DISPLAY_CORE);
3592	if (!wakeref)
3593		return false;
3594
3595	val = intel_de_read(i915, enable_reg);
3596	if (!(val & PLL_ENABLE))
3597		goto out;
3598
3599	if (IS_ALDERLAKE_S(i915)) {
3600		hw_state->cfgcr0 = intel_de_read(i915, ADLS_DPLL_CFGCR0(id));
3601		hw_state->cfgcr1 = intel_de_read(i915, ADLS_DPLL_CFGCR1(id));
3602	} else if (IS_DG1(i915)) {
3603		hw_state->cfgcr0 = intel_de_read(i915, DG1_DPLL_CFGCR0(id));
3604		hw_state->cfgcr1 = intel_de_read(i915, DG1_DPLL_CFGCR1(id));
3605	} else if (IS_ROCKETLAKE(i915)) {
3606		hw_state->cfgcr0 = intel_de_read(i915,
3607						 RKL_DPLL_CFGCR0(id));
3608		hw_state->cfgcr1 = intel_de_read(i915,
3609						 RKL_DPLL_CFGCR1(id));
3610	} else if (DISPLAY_VER(i915) >= 12) {
3611		hw_state->cfgcr0 = intel_de_read(i915,
3612						 TGL_DPLL_CFGCR0(id));
3613		hw_state->cfgcr1 = intel_de_read(i915,
3614						 TGL_DPLL_CFGCR1(id));
3615		if (i915->display.vbt.override_afc_startup) {
3616			hw_state->div0 = intel_de_read(i915, TGL_DPLL0_DIV0(id));
3617			hw_state->div0 &= TGL_DPLL0_DIV0_AFC_STARTUP_MASK;
3618		}
3619	} else {
3620		if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
3621		    id == DPLL_ID_EHL_DPLL4) {
3622			hw_state->cfgcr0 = intel_de_read(i915,
3623							 ICL_DPLL_CFGCR0(4));
3624			hw_state->cfgcr1 = intel_de_read(i915,
3625							 ICL_DPLL_CFGCR1(4));
3626		} else {
3627			hw_state->cfgcr0 = intel_de_read(i915,
3628							 ICL_DPLL_CFGCR0(id));
3629			hw_state->cfgcr1 = intel_de_read(i915,
3630							 ICL_DPLL_CFGCR1(id));
3631		}
3632	}
3633
3634	ret = true;
3635out:
3636	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3637	return ret;
3638}
3639
3640static bool combo_pll_get_hw_state(struct drm_i915_private *i915,
3641				   struct intel_shared_dpll *pll,
3642				   struct intel_dpll_hw_state *hw_state)
3643{
3644	i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll);
3645
3646	return icl_pll_get_hw_state(i915, pll, hw_state, enable_reg);
3647}
3648
3649static bool tbt_pll_get_hw_state(struct drm_i915_private *i915,
3650				 struct intel_shared_dpll *pll,
3651				 struct intel_dpll_hw_state *hw_state)
3652{
3653	return icl_pll_get_hw_state(i915, pll, hw_state, TBT_PLL_ENABLE);
3654}
3655
3656static void icl_dpll_write(struct drm_i915_private *i915,
3657			   struct intel_shared_dpll *pll)
 
3658{
3659	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3660	const enum intel_dpll_id id = pll->info->id;
3661	i915_reg_t cfgcr0_reg, cfgcr1_reg, div0_reg = INVALID_MMIO_REG;
3662
3663	if (IS_ALDERLAKE_S(i915)) {
3664		cfgcr0_reg = ADLS_DPLL_CFGCR0(id);
3665		cfgcr1_reg = ADLS_DPLL_CFGCR1(id);
3666	} else if (IS_DG1(i915)) {
3667		cfgcr0_reg = DG1_DPLL_CFGCR0(id);
3668		cfgcr1_reg = DG1_DPLL_CFGCR1(id);
3669	} else if (IS_ROCKETLAKE(i915)) {
3670		cfgcr0_reg = RKL_DPLL_CFGCR0(id);
3671		cfgcr1_reg = RKL_DPLL_CFGCR1(id);
3672	} else if (DISPLAY_VER(i915) >= 12) {
3673		cfgcr0_reg = TGL_DPLL_CFGCR0(id);
3674		cfgcr1_reg = TGL_DPLL_CFGCR1(id);
3675		div0_reg = TGL_DPLL0_DIV0(id);
3676	} else {
3677		if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
3678		    id == DPLL_ID_EHL_DPLL4) {
3679			cfgcr0_reg = ICL_DPLL_CFGCR0(4);
3680			cfgcr1_reg = ICL_DPLL_CFGCR1(4);
3681		} else {
3682			cfgcr0_reg = ICL_DPLL_CFGCR0(id);
3683			cfgcr1_reg = ICL_DPLL_CFGCR1(id);
3684		}
3685	}
3686
3687	intel_de_write(i915, cfgcr0_reg, hw_state->cfgcr0);
3688	intel_de_write(i915, cfgcr1_reg, hw_state->cfgcr1);
3689	drm_WARN_ON_ONCE(&i915->drm, i915->display.vbt.override_afc_startup &&
3690			 !i915_mmio_reg_valid(div0_reg));
3691	if (i915->display.vbt.override_afc_startup &&
3692	    i915_mmio_reg_valid(div0_reg))
3693		intel_de_rmw(i915, div0_reg,
3694			     TGL_DPLL0_DIV0_AFC_STARTUP_MASK, hw_state->div0);
3695	intel_de_posting_read(i915, cfgcr1_reg);
3696}
3697
3698static void icl_mg_pll_write(struct drm_i915_private *i915,
3699			     struct intel_shared_dpll *pll)
 
3700{
3701	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3702	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3703
3704	/*
3705	 * Some of the following registers have reserved fields, so program
3706	 * these with RMW based on a mask. The mask can be fixed or generated
3707	 * during the calc/readout phase if the mask depends on some other HW
3708	 * state like refclk, see icl_calc_mg_pll_state().
3709	 */
3710	intel_de_rmw(i915, MG_REFCLKIN_CTL(tc_port),
3711		     MG_REFCLKIN_CTL_OD_2_MUX_MASK, hw_state->mg_refclkin_ctl);
3712
3713	intel_de_rmw(i915, MG_CLKTOP2_CORECLKCTL1(tc_port),
3714		     MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK,
3715		     hw_state->mg_clktop2_coreclkctl1);
3716
3717	intel_de_rmw(i915, MG_CLKTOP2_HSCLKCTL(tc_port),
3718		     MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3719		     MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3720		     MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3721		     MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK,
3722		     hw_state->mg_clktop2_hsclkctl);
3723
3724	intel_de_write(i915, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
3725	intel_de_write(i915, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
3726	intel_de_write(i915, MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
3727	intel_de_write(i915, MG_PLL_FRAC_LOCK(tc_port),
3728		       hw_state->mg_pll_frac_lock);
3729	intel_de_write(i915, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
3730
3731	intel_de_rmw(i915, MG_PLL_BIAS(tc_port),
3732		     hw_state->mg_pll_bias_mask, hw_state->mg_pll_bias);
3733
3734	intel_de_rmw(i915, MG_PLL_TDC_COLDST_BIAS(tc_port),
3735		     hw_state->mg_pll_tdc_coldst_bias_mask,
3736		     hw_state->mg_pll_tdc_coldst_bias);
3737
3738	intel_de_posting_read(i915, MG_PLL_TDC_COLDST_BIAS(tc_port));
3739}
3740
3741static void dkl_pll_write(struct drm_i915_private *i915,
3742			  struct intel_shared_dpll *pll)
 
3743{
3744	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3745	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3746	u32 val;
3747
3748	/*
3749	 * All registers programmed here have the same HIP_INDEX_REG even
3750	 * though on different building block
3751	 */
3752	/* All the registers are RMW */
3753	val = intel_dkl_phy_read(i915, DKL_REFCLKIN_CTL(tc_port));
3754	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3755	val |= hw_state->mg_refclkin_ctl;
3756	intel_dkl_phy_write(i915, DKL_REFCLKIN_CTL(tc_port), val);
3757
3758	val = intel_dkl_phy_read(i915, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3759	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3760	val |= hw_state->mg_clktop2_coreclkctl1;
3761	intel_dkl_phy_write(i915, DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
3762
3763	val = intel_dkl_phy_read(i915, DKL_CLKTOP2_HSCLKCTL(tc_port));
3764	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3765		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3766		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3767		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3768	val |= hw_state->mg_clktop2_hsclkctl;
3769	intel_dkl_phy_write(i915, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
3770
3771	val = DKL_PLL_DIV0_MASK;
3772	if (i915->display.vbt.override_afc_startup)
3773		val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3774	intel_dkl_phy_rmw(i915, DKL_PLL_DIV0(tc_port), val,
3775			  hw_state->mg_pll_div0);
3776
3777	val = intel_dkl_phy_read(i915, DKL_PLL_DIV1(tc_port));
3778	val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
3779		 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3780	val |= hw_state->mg_pll_div1;
3781	intel_dkl_phy_write(i915, DKL_PLL_DIV1(tc_port), val);
3782
3783	val = intel_dkl_phy_read(i915, DKL_PLL_SSC(tc_port));
3784	val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3785		 DKL_PLL_SSC_STEP_LEN_MASK |
3786		 DKL_PLL_SSC_STEP_NUM_MASK |
3787		 DKL_PLL_SSC_EN);
3788	val |= hw_state->mg_pll_ssc;
3789	intel_dkl_phy_write(i915, DKL_PLL_SSC(tc_port), val);
3790
3791	val = intel_dkl_phy_read(i915, DKL_PLL_BIAS(tc_port));
3792	val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
3793		 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3794	val |= hw_state->mg_pll_bias;
3795	intel_dkl_phy_write(i915, DKL_PLL_BIAS(tc_port), val);
3796
3797	val = intel_dkl_phy_read(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3798	val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3799		 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3800	val |= hw_state->mg_pll_tdc_coldst_bias;
3801	intel_dkl_phy_write(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
3802
3803	intel_dkl_phy_posting_read(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3804}
3805
3806static void icl_pll_power_enable(struct drm_i915_private *i915,
3807				 struct intel_shared_dpll *pll,
3808				 i915_reg_t enable_reg)
3809{
3810	intel_de_rmw(i915, enable_reg, 0, PLL_POWER_ENABLE);
3811
3812	/*
3813	 * The spec says we need to "wait" but it also says it should be
3814	 * immediate.
3815	 */
3816	if (intel_de_wait_for_set(i915, enable_reg, PLL_POWER_STATE, 1))
3817		drm_err(&i915->drm, "PLL %d Power not enabled\n",
3818			pll->info->id);
3819}
3820
3821static void icl_pll_enable(struct drm_i915_private *i915,
3822			   struct intel_shared_dpll *pll,
3823			   i915_reg_t enable_reg)
3824{
3825	intel_de_rmw(i915, enable_reg, 0, PLL_ENABLE);
3826
3827	/* Timeout is actually 600us. */
3828	if (intel_de_wait_for_set(i915, enable_reg, PLL_LOCK, 1))
3829		drm_err(&i915->drm, "PLL %d not locked\n", pll->info->id);
3830}
3831
3832static void adlp_cmtg_clock_gating_wa(struct drm_i915_private *i915, struct intel_shared_dpll *pll)
3833{
3834	u32 val;
3835
3836	if (!(IS_ALDERLAKE_P(i915) && IS_DISPLAY_STEP(i915, STEP_A0, STEP_B0)) ||
3837	    pll->info->id != DPLL_ID_ICL_DPLL0)
3838		return;
3839	/*
3840	 * Wa_16011069516:adl-p[a0]
3841	 *
3842	 * All CMTG regs are unreliable until CMTG clock gating is disabled,
3843	 * so we can only assume the default TRANS_CMTG_CHICKEN reg value and
3844	 * sanity check this assumption with a double read, which presumably
3845	 * returns the correct value even with clock gating on.
3846	 *
3847	 * Instead of the usual place for workarounds we apply this one here,
3848	 * since TRANS_CMTG_CHICKEN is only accessible while DPLL0 is enabled.
3849	 */
3850	val = intel_de_read(i915, TRANS_CMTG_CHICKEN);
3851	val = intel_de_rmw(i915, TRANS_CMTG_CHICKEN, ~0, DISABLE_DPT_CLK_GATING);
3852	if (drm_WARN_ON(&i915->drm, val & ~DISABLE_DPT_CLK_GATING))
3853		drm_dbg_kms(&i915->drm, "Unexpected flags in TRANS_CMTG_CHICKEN: %08x\n", val);
3854}
3855
3856static void combo_pll_enable(struct drm_i915_private *i915,
3857			     struct intel_shared_dpll *pll)
 
3858{
 
3859	i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll);
3860
3861	icl_pll_power_enable(i915, pll, enable_reg);
3862
3863	icl_dpll_write(i915, pll);
3864
3865	/*
3866	 * DVFS pre sequence would be here, but in our driver the cdclk code
3867	 * paths should already be setting the appropriate voltage, hence we do
3868	 * nothing here.
3869	 */
3870
3871	icl_pll_enable(i915, pll, enable_reg);
3872
3873	adlp_cmtg_clock_gating_wa(i915, pll);
3874
3875	/* DVFS post sequence would be here. See the comment above. */
3876}
3877
3878static void tbt_pll_enable(struct drm_i915_private *i915,
3879			   struct intel_shared_dpll *pll)
 
3880{
 
 
3881	icl_pll_power_enable(i915, pll, TBT_PLL_ENABLE);
3882
3883	icl_dpll_write(i915, pll);
3884
3885	/*
3886	 * DVFS pre sequence would be here, but in our driver the cdclk code
3887	 * paths should already be setting the appropriate voltage, hence we do
3888	 * nothing here.
3889	 */
3890
3891	icl_pll_enable(i915, pll, TBT_PLL_ENABLE);
3892
3893	/* DVFS post sequence would be here. See the comment above. */
3894}
3895
3896static void mg_pll_enable(struct drm_i915_private *i915,
3897			  struct intel_shared_dpll *pll)
 
3898{
 
3899	i915_reg_t enable_reg = intel_tc_pll_enable_reg(i915, pll);
3900
3901	icl_pll_power_enable(i915, pll, enable_reg);
3902
3903	if (DISPLAY_VER(i915) >= 12)
3904		dkl_pll_write(i915, pll);
3905	else
3906		icl_mg_pll_write(i915, pll);
3907
3908	/*
3909	 * DVFS pre sequence would be here, but in our driver the cdclk code
3910	 * paths should already be setting the appropriate voltage, hence we do
3911	 * nothing here.
3912	 */
3913
3914	icl_pll_enable(i915, pll, enable_reg);
3915
3916	/* DVFS post sequence would be here. See the comment above. */
3917}
3918
3919static void icl_pll_disable(struct drm_i915_private *i915,
3920			    struct intel_shared_dpll *pll,
3921			    i915_reg_t enable_reg)
3922{
3923	/* The first steps are done by intel_ddi_post_disable(). */
3924
3925	/*
3926	 * DVFS pre sequence would be here, but in our driver the cdclk code
3927	 * paths should already be setting the appropriate voltage, hence we do
3928	 * nothing here.
3929	 */
3930
3931	intel_de_rmw(i915, enable_reg, PLL_ENABLE, 0);
3932
3933	/* Timeout is actually 1us. */
3934	if (intel_de_wait_for_clear(i915, enable_reg, PLL_LOCK, 1))
3935		drm_err(&i915->drm, "PLL %d locked\n", pll->info->id);
3936
3937	/* DVFS post sequence would be here. See the comment above. */
3938
3939	intel_de_rmw(i915, enable_reg, PLL_POWER_ENABLE, 0);
3940
3941	/*
3942	 * The spec says we need to "wait" but it also says it should be
3943	 * immediate.
3944	 */
3945	if (intel_de_wait_for_clear(i915, enable_reg, PLL_POWER_STATE, 1))
3946		drm_err(&i915->drm, "PLL %d Power not disabled\n",
3947			pll->info->id);
3948}
3949
3950static void combo_pll_disable(struct drm_i915_private *i915,
3951			      struct intel_shared_dpll *pll)
3952{
3953	i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll);
3954
3955	icl_pll_disable(i915, pll, enable_reg);
3956}
3957
3958static void tbt_pll_disable(struct drm_i915_private *i915,
3959			    struct intel_shared_dpll *pll)
3960{
3961	icl_pll_disable(i915, pll, TBT_PLL_ENABLE);
3962}
3963
3964static void mg_pll_disable(struct drm_i915_private *i915,
3965			   struct intel_shared_dpll *pll)
3966{
3967	i915_reg_t enable_reg = intel_tc_pll_enable_reg(i915, pll);
3968
3969	icl_pll_disable(i915, pll, enable_reg);
3970}
3971
3972static void icl_update_dpll_ref_clks(struct drm_i915_private *i915)
3973{
3974	/* No SSC ref */
3975	i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
3976}
3977
3978static void icl_dump_hw_state(struct drm_i915_private *i915,
3979			      const struct intel_dpll_hw_state *hw_state)
3980{
3981	drm_dbg_kms(&i915->drm,
3982		    "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, div0: 0x%x, "
3983		    "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
3984		    "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
3985		    "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
3986		    "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
3987		    "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
3988		    hw_state->cfgcr0, hw_state->cfgcr1,
3989		    hw_state->div0,
3990		    hw_state->mg_refclkin_ctl,
3991		    hw_state->mg_clktop2_coreclkctl1,
3992		    hw_state->mg_clktop2_hsclkctl,
3993		    hw_state->mg_pll_div0,
3994		    hw_state->mg_pll_div1,
3995		    hw_state->mg_pll_lf,
3996		    hw_state->mg_pll_frac_lock,
3997		    hw_state->mg_pll_ssc,
3998		    hw_state->mg_pll_bias,
3999		    hw_state->mg_pll_tdc_coldst_bias);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4000}
4001
4002static const struct intel_shared_dpll_funcs combo_pll_funcs = {
4003	.enable = combo_pll_enable,
4004	.disable = combo_pll_disable,
4005	.get_hw_state = combo_pll_get_hw_state,
4006	.get_freq = icl_ddi_combo_pll_get_freq,
4007};
4008
4009static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
4010	.enable = tbt_pll_enable,
4011	.disable = tbt_pll_disable,
4012	.get_hw_state = tbt_pll_get_hw_state,
4013	.get_freq = icl_ddi_tbt_pll_get_freq,
4014};
4015
4016static const struct intel_shared_dpll_funcs mg_pll_funcs = {
4017	.enable = mg_pll_enable,
4018	.disable = mg_pll_disable,
4019	.get_hw_state = mg_pll_get_hw_state,
4020	.get_freq = icl_ddi_mg_pll_get_freq,
4021};
4022
4023static const struct dpll_info icl_plls[] = {
4024	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4025	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4026	{ .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL, },
 
4027	{ .name = "MG PLL 1", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
4028	{ .name = "MG PLL 2", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
4029	{ .name = "MG PLL 3", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
4030	{ .name = "MG PLL 4", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
4031	{}
4032};
4033
4034static const struct intel_dpll_mgr icl_pll_mgr = {
4035	.dpll_info = icl_plls,
4036	.compute_dplls = icl_compute_dplls,
4037	.get_dplls = icl_get_dplls,
4038	.put_dplls = icl_put_dplls,
4039	.update_active_dpll = icl_update_active_dpll,
4040	.update_ref_clks = icl_update_dpll_ref_clks,
4041	.dump_hw_state = icl_dump_hw_state,
 
4042};
4043
4044static const struct dpll_info ehl_plls[] = {
4045	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4046	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4047	{ .name = "DPLL 4", .funcs = &combo_pll_funcs, .id = DPLL_ID_EHL_DPLL4,
4048	  .power_domain = POWER_DOMAIN_DC_OFF, },
4049	{}
4050};
4051
4052static const struct intel_dpll_mgr ehl_pll_mgr = {
4053	.dpll_info = ehl_plls,
4054	.compute_dplls = icl_compute_dplls,
4055	.get_dplls = icl_get_dplls,
4056	.put_dplls = icl_put_dplls,
4057	.update_ref_clks = icl_update_dpll_ref_clks,
4058	.dump_hw_state = icl_dump_hw_state,
 
4059};
4060
4061static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
4062	.enable = mg_pll_enable,
4063	.disable = mg_pll_disable,
4064	.get_hw_state = dkl_pll_get_hw_state,
4065	.get_freq = icl_ddi_mg_pll_get_freq,
4066};
4067
4068static const struct dpll_info tgl_plls[] = {
4069	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4070	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4071	{ .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL, },
 
4072	{ .name = "TC PLL 1", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
4073	{ .name = "TC PLL 2", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
4074	{ .name = "TC PLL 3", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
4075	{ .name = "TC PLL 4", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
4076	{ .name = "TC PLL 5", .funcs = &dkl_pll_funcs, .id = DPLL_ID_TGL_MGPLL5, },
4077	{ .name = "TC PLL 6", .funcs = &dkl_pll_funcs, .id = DPLL_ID_TGL_MGPLL6, },
4078	{}
4079};
4080
4081static const struct intel_dpll_mgr tgl_pll_mgr = {
4082	.dpll_info = tgl_plls,
4083	.compute_dplls = icl_compute_dplls,
4084	.get_dplls = icl_get_dplls,
4085	.put_dplls = icl_put_dplls,
4086	.update_active_dpll = icl_update_active_dpll,
4087	.update_ref_clks = icl_update_dpll_ref_clks,
4088	.dump_hw_state = icl_dump_hw_state,
 
4089};
4090
4091static const struct dpll_info rkl_plls[] = {
4092	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4093	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4094	{ .name = "DPLL 4", .funcs = &combo_pll_funcs, .id = DPLL_ID_EHL_DPLL4, },
4095	{}
4096};
4097
4098static const struct intel_dpll_mgr rkl_pll_mgr = {
4099	.dpll_info = rkl_plls,
4100	.compute_dplls = icl_compute_dplls,
4101	.get_dplls = icl_get_dplls,
4102	.put_dplls = icl_put_dplls,
4103	.update_ref_clks = icl_update_dpll_ref_clks,
4104	.dump_hw_state = icl_dump_hw_state,
 
4105};
4106
4107static const struct dpll_info dg1_plls[] = {
4108	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL0, },
4109	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL1, },
4110	{ .name = "DPLL 2", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL2, },
4111	{ .name = "DPLL 3", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL3, },
4112	{}
4113};
4114
4115static const struct intel_dpll_mgr dg1_pll_mgr = {
4116	.dpll_info = dg1_plls,
4117	.compute_dplls = icl_compute_dplls,
4118	.get_dplls = icl_get_dplls,
4119	.put_dplls = icl_put_dplls,
4120	.update_ref_clks = icl_update_dpll_ref_clks,
4121	.dump_hw_state = icl_dump_hw_state,
 
4122};
4123
4124static const struct dpll_info adls_plls[] = {
4125	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4126	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4127	{ .name = "DPLL 2", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL2, },
4128	{ .name = "DPLL 3", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL3, },
4129	{}
4130};
4131
4132static const struct intel_dpll_mgr adls_pll_mgr = {
4133	.dpll_info = adls_plls,
4134	.compute_dplls = icl_compute_dplls,
4135	.get_dplls = icl_get_dplls,
4136	.put_dplls = icl_put_dplls,
4137	.update_ref_clks = icl_update_dpll_ref_clks,
4138	.dump_hw_state = icl_dump_hw_state,
 
4139};
4140
4141static const struct dpll_info adlp_plls[] = {
4142	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4143	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4144	{ .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL, },
 
4145	{ .name = "TC PLL 1", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
4146	{ .name = "TC PLL 2", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
4147	{ .name = "TC PLL 3", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
4148	{ .name = "TC PLL 4", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
4149	{}
4150};
4151
4152static const struct intel_dpll_mgr adlp_pll_mgr = {
4153	.dpll_info = adlp_plls,
4154	.compute_dplls = icl_compute_dplls,
4155	.get_dplls = icl_get_dplls,
4156	.put_dplls = icl_put_dplls,
4157	.update_active_dpll = icl_update_active_dpll,
4158	.update_ref_clks = icl_update_dpll_ref_clks,
4159	.dump_hw_state = icl_dump_hw_state,
 
4160};
4161
4162/**
4163 * intel_shared_dpll_init - Initialize shared DPLLs
4164 * @i915: i915 device
4165 *
4166 * Initialize shared DPLLs for @i915.
4167 */
4168void intel_shared_dpll_init(struct drm_i915_private *i915)
4169{
4170	const struct intel_dpll_mgr *dpll_mgr = NULL;
4171	const struct dpll_info *dpll_info;
4172	int i;
4173
4174	mutex_init(&i915->display.dpll.lock);
4175
4176	if (DISPLAY_VER(i915) >= 14 || IS_DG2(i915))
4177		/* No shared DPLLs on DG2; port PLLs are part of the PHY */
4178		dpll_mgr = NULL;
4179	else if (IS_ALDERLAKE_P(i915))
4180		dpll_mgr = &adlp_pll_mgr;
4181	else if (IS_ALDERLAKE_S(i915))
4182		dpll_mgr = &adls_pll_mgr;
4183	else if (IS_DG1(i915))
4184		dpll_mgr = &dg1_pll_mgr;
4185	else if (IS_ROCKETLAKE(i915))
4186		dpll_mgr = &rkl_pll_mgr;
4187	else if (DISPLAY_VER(i915) >= 12)
4188		dpll_mgr = &tgl_pll_mgr;
4189	else if (IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915))
4190		dpll_mgr = &ehl_pll_mgr;
4191	else if (DISPLAY_VER(i915) >= 11)
4192		dpll_mgr = &icl_pll_mgr;
4193	else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
4194		dpll_mgr = &bxt_pll_mgr;
4195	else if (DISPLAY_VER(i915) == 9)
4196		dpll_mgr = &skl_pll_mgr;
4197	else if (HAS_DDI(i915))
4198		dpll_mgr = &hsw_pll_mgr;
4199	else if (HAS_PCH_IBX(i915) || HAS_PCH_CPT(i915))
4200		dpll_mgr = &pch_pll_mgr;
4201
4202	if (!dpll_mgr)
4203		return;
4204
4205	dpll_info = dpll_mgr->dpll_info;
4206
4207	for (i = 0; dpll_info[i].name; i++) {
4208		if (drm_WARN_ON(&i915->drm,
4209				i >= ARRAY_SIZE(i915->display.dpll.shared_dplls)))
4210			break;
4211
4212		/* must fit into unsigned long bitmask on 32bit */
4213		if (drm_WARN_ON(&i915->drm, dpll_info[i].id >= 32))
4214			break;
4215
4216		i915->display.dpll.shared_dplls[i].info = &dpll_info[i];
4217		i915->display.dpll.shared_dplls[i].index = i;
4218	}
4219
4220	i915->display.dpll.mgr = dpll_mgr;
4221	i915->display.dpll.num_shared_dpll = i;
4222}
4223
4224/**
4225 * intel_compute_shared_dplls - compute DPLL state CRTC and encoder combination
4226 * @state: atomic state
4227 * @crtc: CRTC to compute DPLLs for
4228 * @encoder: encoder
4229 *
4230 * This function computes the DPLL state for the given CRTC and encoder.
4231 *
4232 * The new configuration in the atomic commit @state is made effective by
4233 * calling intel_shared_dpll_swap_state().
4234 *
4235 * Returns:
4236 * 0 on success, negative error code on falure.
4237 */
4238int intel_compute_shared_dplls(struct intel_atomic_state *state,
4239			       struct intel_crtc *crtc,
4240			       struct intel_encoder *encoder)
4241{
4242	struct drm_i915_private *i915 = to_i915(state->base.dev);
4243	const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr;
4244
4245	if (drm_WARN_ON(&i915->drm, !dpll_mgr))
4246		return -EINVAL;
4247
4248	return dpll_mgr->compute_dplls(state, crtc, encoder);
4249}
4250
4251/**
4252 * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
4253 * @state: atomic state
4254 * @crtc: CRTC to reserve DPLLs for
4255 * @encoder: encoder
4256 *
4257 * This function reserves all required DPLLs for the given CRTC and encoder
4258 * combination in the current atomic commit @state and the new @crtc atomic
4259 * state.
4260 *
4261 * The new configuration in the atomic commit @state is made effective by
4262 * calling intel_shared_dpll_swap_state().
4263 *
4264 * The reserved DPLLs should be released by calling
4265 * intel_release_shared_dplls().
4266 *
4267 * Returns:
4268 * 0 if all required DPLLs were successfully reserved,
4269 * negative error code otherwise.
4270 */
4271int intel_reserve_shared_dplls(struct intel_atomic_state *state,
4272			       struct intel_crtc *crtc,
4273			       struct intel_encoder *encoder)
4274{
4275	struct drm_i915_private *i915 = to_i915(state->base.dev);
4276	const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr;
4277
4278	if (drm_WARN_ON(&i915->drm, !dpll_mgr))
4279		return -EINVAL;
4280
4281	return dpll_mgr->get_dplls(state, crtc, encoder);
4282}
4283
4284/**
4285 * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
4286 * @state: atomic state
4287 * @crtc: crtc from which the DPLLs are to be released
4288 *
4289 * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
4290 * from the current atomic commit @state and the old @crtc atomic state.
4291 *
4292 * The new configuration in the atomic commit @state is made effective by
4293 * calling intel_shared_dpll_swap_state().
4294 */
4295void intel_release_shared_dplls(struct intel_atomic_state *state,
4296				struct intel_crtc *crtc)
4297{
4298	struct drm_i915_private *i915 = to_i915(state->base.dev);
4299	const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr;
4300
4301	/*
4302	 * FIXME: this function is called for every platform having a
4303	 * compute_clock hook, even though the platform doesn't yet support
4304	 * the shared DPLL framework and intel_reserve_shared_dplls() is not
4305	 * called on those.
4306	 */
4307	if (!dpll_mgr)
4308		return;
4309
4310	dpll_mgr->put_dplls(state, crtc);
4311}
4312
4313/**
4314 * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
4315 * @state: atomic state
4316 * @crtc: the CRTC for which to update the active DPLL
4317 * @encoder: encoder determining the type of port DPLL
4318 *
4319 * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
4320 * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
4321 * DPLL selected will be based on the current mode of the encoder's port.
4322 */
4323void intel_update_active_dpll(struct intel_atomic_state *state,
4324			      struct intel_crtc *crtc,
4325			      struct intel_encoder *encoder)
4326{
4327	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
4328	const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr;
4329
4330	if (drm_WARN_ON(&i915->drm, !dpll_mgr))
4331		return;
4332
4333	dpll_mgr->update_active_dpll(state, crtc, encoder);
4334}
4335
4336/**
4337 * intel_dpll_get_freq - calculate the DPLL's output frequency
4338 * @i915: i915 device
4339 * @pll: DPLL for which to calculate the output frequency
4340 * @pll_state: DPLL state from which to calculate the output frequency
4341 *
4342 * Return the output frequency corresponding to @pll's passed in @pll_state.
4343 */
4344int intel_dpll_get_freq(struct drm_i915_private *i915,
4345			const struct intel_shared_dpll *pll,
4346			const struct intel_dpll_hw_state *pll_state)
4347{
4348	if (drm_WARN_ON(&i915->drm, !pll->info->funcs->get_freq))
4349		return 0;
4350
4351	return pll->info->funcs->get_freq(i915, pll, pll_state);
4352}
4353
4354/**
4355 * intel_dpll_get_hw_state - readout the DPLL's hardware state
4356 * @i915: i915 device
4357 * @pll: DPLL for which to calculate the output frequency
4358 * @hw_state: DPLL's hardware state
4359 *
4360 * Read out @pll's hardware state into @hw_state.
4361 */
4362bool intel_dpll_get_hw_state(struct drm_i915_private *i915,
4363			     struct intel_shared_dpll *pll,
4364			     struct intel_dpll_hw_state *hw_state)
4365{
4366	return pll->info->funcs->get_hw_state(i915, pll, hw_state);
4367}
4368
4369static void readout_dpll_hw_state(struct drm_i915_private *i915,
4370				  struct intel_shared_dpll *pll)
4371{
4372	struct intel_crtc *crtc;
4373
4374	pll->on = intel_dpll_get_hw_state(i915, pll, &pll->state.hw_state);
4375
4376	if (pll->on && pll->info->power_domain)
4377		pll->wakeref = intel_display_power_get(i915, pll->info->power_domain);
4378
4379	pll->state.pipe_mask = 0;
4380	for_each_intel_crtc(&i915->drm, crtc) {
4381		struct intel_crtc_state *crtc_state =
4382			to_intel_crtc_state(crtc->base.state);
4383
4384		if (crtc_state->hw.active && crtc_state->shared_dpll == pll)
4385			intel_reference_shared_dpll_crtc(crtc, pll, &pll->state);
4386	}
4387	pll->active_mask = pll->state.pipe_mask;
4388
4389	drm_dbg_kms(&i915->drm,
4390		    "%s hw state readout: pipe_mask 0x%x, on %i\n",
4391		    pll->info->name, pll->state.pipe_mask, pll->on);
4392}
4393
4394void intel_dpll_update_ref_clks(struct drm_i915_private *i915)
4395{
4396	if (i915->display.dpll.mgr && i915->display.dpll.mgr->update_ref_clks)
4397		i915->display.dpll.mgr->update_ref_clks(i915);
4398}
4399
4400void intel_dpll_readout_hw_state(struct drm_i915_private *i915)
4401{
4402	struct intel_shared_dpll *pll;
4403	int i;
4404
4405	for_each_shared_dpll(i915, pll, i)
4406		readout_dpll_hw_state(i915, pll);
4407}
4408
4409static void sanitize_dpll_state(struct drm_i915_private *i915,
4410				struct intel_shared_dpll *pll)
4411{
4412	if (!pll->on)
4413		return;
4414
4415	adlp_cmtg_clock_gating_wa(i915, pll);
4416
4417	if (pll->active_mask)
4418		return;
4419
4420	drm_dbg_kms(&i915->drm,
4421		    "%s enabled but not in use, disabling\n",
4422		    pll->info->name);
4423
4424	_intel_disable_shared_dpll(i915, pll);
4425}
4426
4427void intel_dpll_sanitize_state(struct drm_i915_private *i915)
4428{
4429	struct intel_shared_dpll *pll;
4430	int i;
4431
4432	for_each_shared_dpll(i915, pll, i)
4433		sanitize_dpll_state(i915, pll);
4434}
4435
4436/**
4437 * intel_dpll_dump_hw_state - write hw_state to dmesg
4438 * @i915: i915 drm device
4439 * @hw_state: hw state to be written to the log
 
4440 *
4441 * Write the relevant values in @hw_state to dmesg using drm_dbg_kms.
4442 */
4443void intel_dpll_dump_hw_state(struct drm_i915_private *i915,
4444			      const struct intel_dpll_hw_state *hw_state)
 
4445{
4446	if (i915->display.dpll.mgr) {
4447		i915->display.dpll.mgr->dump_hw_state(i915, hw_state);
4448	} else {
4449		/* fallback for platforms that don't use the shared dpll
4450		 * infrastructure
4451		 */
4452		drm_dbg_kms(&i915->drm,
4453			    "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
4454			    "fp0: 0x%x, fp1: 0x%x\n",
4455			    hw_state->dpll,
4456			    hw_state->dpll_md,
4457			    hw_state->fp0,
4458			    hw_state->fp1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4459	}
4460}
4461
4462static void
4463verify_single_dpll_state(struct drm_i915_private *i915,
4464			 struct intel_shared_dpll *pll,
4465			 struct intel_crtc *crtc,
4466			 const struct intel_crtc_state *new_crtc_state)
4467{
4468	struct intel_dpll_hw_state dpll_hw_state;
 
4469	u8 pipe_mask;
4470	bool active;
4471
4472	memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
4473
4474	drm_dbg_kms(&i915->drm, "%s\n", pll->info->name);
4475
4476	active = intel_dpll_get_hw_state(i915, pll, &dpll_hw_state);
4477
4478	if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
4479		I915_STATE_WARN(i915, !pll->on && pll->active_mask,
4480				"pll in active use but not on in sw tracking\n");
4481		I915_STATE_WARN(i915, pll->on && !pll->active_mask,
4482				"pll is on but not used by any active pipe\n");
4483		I915_STATE_WARN(i915, pll->on != active,
4484				"pll on state mismatch (expected %i, found %i)\n",
4485				pll->on, active);
 
 
4486	}
4487
4488	if (!crtc) {
4489		I915_STATE_WARN(i915,
4490				pll->active_mask & ~pll->state.pipe_mask,
4491				"more active pll users than references: 0x%x vs 0x%x\n",
4492				pll->active_mask, pll->state.pipe_mask);
4493
4494		return;
4495	}
4496
4497	pipe_mask = BIT(crtc->pipe);
4498
4499	if (new_crtc_state->hw.active)
4500		I915_STATE_WARN(i915, !(pll->active_mask & pipe_mask),
4501				"pll active mismatch (expected pipe %c in active mask 0x%x)\n",
4502				pipe_name(crtc->pipe), pll->active_mask);
4503	else
4504		I915_STATE_WARN(i915, pll->active_mask & pipe_mask,
4505				"pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
4506				pipe_name(crtc->pipe), pll->active_mask);
4507
4508	I915_STATE_WARN(i915, !(pll->state.pipe_mask & pipe_mask),
4509			"pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
4510			pipe_mask, pll->state.pipe_mask);
4511
4512	I915_STATE_WARN(i915,
4513			pll->on && memcmp(&pll->state.hw_state, &dpll_hw_state,
4514					  sizeof(dpll_hw_state)),
4515			"pll hw state mismatch\n");
 
 
 
 
 
 
 
 
4516}
4517
4518void intel_shared_dpll_state_verify(struct intel_atomic_state *state,
4519				    struct intel_crtc *crtc)
4520{
 
4521	struct drm_i915_private *i915 = to_i915(state->base.dev);
4522	const struct intel_crtc_state *old_crtc_state =
4523		intel_atomic_get_old_crtc_state(state, crtc);
4524	const struct intel_crtc_state *new_crtc_state =
4525		intel_atomic_get_new_crtc_state(state, crtc);
4526
4527	if (new_crtc_state->shared_dpll)
4528		verify_single_dpll_state(i915, new_crtc_state->shared_dpll,
4529					 crtc, new_crtc_state);
4530
4531	if (old_crtc_state->shared_dpll &&
4532	    old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
4533		u8 pipe_mask = BIT(crtc->pipe);
4534		struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
4535
4536		I915_STATE_WARN(i915, pll->active_mask & pipe_mask,
4537				"pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
4538				pipe_name(crtc->pipe), pll->active_mask);
4539		I915_STATE_WARN(i915, pll->state.pipe_mask & pipe_mask,
4540				"pll enabled crtcs mismatch (found pipe %c in enabled mask (0x%x))\n",
4541				pipe_name(crtc->pipe), pll->state.pipe_mask);
 
 
 
 
4542	}
4543}
4544
4545void intel_shared_dpll_verify_disabled(struct intel_atomic_state *state)
4546{
4547	struct drm_i915_private *i915 = to_i915(state->base.dev);
4548	struct intel_shared_dpll *pll;
4549	int i;
4550
4551	for_each_shared_dpll(i915, pll, i)
4552		verify_single_dpll_state(i915, pll, NULL, NULL);
4553}