Linux Audio

Check our new training course

Loading...
v5.14.15
   1/*
   2 * Copyright © 2006-2016 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  21 * DEALINGS IN THE SOFTWARE.
  22 */
  23
 
 
 
 
 
  24#include "intel_de.h"
  25#include "intel_display_types.h"
 
 
  26#include "intel_dpio_phy.h"
  27#include "intel_dpll.h"
  28#include "intel_dpll_mgr.h"
 
 
 
 
  29
  30/**
  31 * DOC: Display PLLs
  32 *
  33 * Display PLLs used for driving outputs vary by platform. While some have
  34 * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
  35 * from a pool. In the latter scenario, it is possible that multiple pipes
  36 * share a PLL if their configurations match.
  37 *
  38 * This file provides an abstraction over display PLLs. The function
  39 * intel_shared_dpll_init() initializes the PLLs for the given platform.  The
  40 * users of a PLL are tracked and that tracking is integrated with the atomic
  41 * modset interface. During an atomic operation, required PLLs can be reserved
  42 * for a given CRTC and encoder configuration by calling
  43 * intel_reserve_shared_dplls() and previously reserved PLLs can be released
  44 * with intel_release_shared_dplls().
  45 * Changes to the users are first staged in the atomic state, and then made
  46 * effective by calling intel_shared_dpll_swap_state() during the atomic
  47 * commit phase.
  48 */
  49
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  50struct intel_dpll_mgr {
  51	const struct dpll_info *dpll_info;
  52
  53	bool (*get_dplls)(struct intel_atomic_state *state,
  54			  struct intel_crtc *crtc,
  55			  struct intel_encoder *encoder);
 
 
 
  56	void (*put_dplls)(struct intel_atomic_state *state,
  57			  struct intel_crtc *crtc);
  58	void (*update_active_dpll)(struct intel_atomic_state *state,
  59				   struct intel_crtc *crtc,
  60				   struct intel_encoder *encoder);
  61	void (*update_ref_clks)(struct drm_i915_private *i915);
  62	void (*dump_hw_state)(struct drm_i915_private *dev_priv,
  63			      const struct intel_dpll_hw_state *hw_state);
 
 
  64};
  65
  66static void
  67intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
  68				  struct intel_shared_dpll_state *shared_dpll)
  69{
  70	enum intel_dpll_id i;
 
  71
  72	/* Copy shared dpll state */
  73	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
  74		struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i];
  75
  76		shared_dpll[i] = pll->state;
  77	}
  78}
  79
  80static struct intel_shared_dpll_state *
  81intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
  82{
  83	struct intel_atomic_state *state = to_intel_atomic_state(s);
  84
  85	drm_WARN_ON(s->dev, !drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
  86
  87	if (!state->dpll_set) {
  88		state->dpll_set = true;
  89
  90		intel_atomic_duplicate_dpll_state(to_i915(s->dev),
  91						  state->shared_dpll);
  92	}
  93
  94	return state->shared_dpll;
  95}
  96
  97/**
  98 * intel_get_shared_dpll_by_id - get a DPLL given its id
  99 * @dev_priv: i915 device instance
 100 * @id: pll id
 101 *
 102 * Returns:
 103 * A pointer to the DPLL with @id
 104 */
 105struct intel_shared_dpll *
 106intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
 107			    enum intel_dpll_id id)
 108{
 109	return &dev_priv->dpll.shared_dplls[id];
 110}
 111
 112/**
 113 * intel_get_shared_dpll_id - get the id of a DPLL
 114 * @dev_priv: i915 device instance
 115 * @pll: the DPLL
 116 *
 117 * Returns:
 118 * The id of @pll
 119 */
 120enum intel_dpll_id
 121intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
 122			 struct intel_shared_dpll *pll)
 123{
 124	long pll_idx = pll - dev_priv->dpll.shared_dplls;
 125
 126	if (drm_WARN_ON(&dev_priv->drm,
 127			pll_idx < 0 ||
 128			pll_idx >= dev_priv->dpll.num_shared_dpll))
 129		return -1;
 130
 131	return pll_idx;
 
 132}
 133
 134/* For ILK+ */
 135void assert_shared_dpll(struct drm_i915_private *dev_priv,
 136			struct intel_shared_dpll *pll,
 137			bool state)
 138{
 
 139	bool cur_state;
 140	struct intel_dpll_hw_state hw_state;
 141
 142	if (drm_WARN(&dev_priv->drm, !pll,
 143		     "asserting DPLL %s with no DPLL\n", onoff(state)))
 144		return;
 145
 146	cur_state = intel_dpll_get_hw_state(dev_priv, pll, &hw_state);
 147	I915_STATE_WARN(cur_state != state,
 148	     "%s assertion failure (expected %s, current %s)\n",
 149			pll->info->name, onoff(state), onoff(cur_state));
 
 150}
 151
 152static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
 153{
 154	return TC_PORT_1 + id - DPLL_ID_ICL_MGPLL1;
 155}
 156
 157enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
 158{
 159	return tc_port - TC_PORT_1 + DPLL_ID_ICL_MGPLL1;
 160}
 161
 162static i915_reg_t
 163intel_combo_pll_enable_reg(struct drm_i915_private *i915,
 164			   struct intel_shared_dpll *pll)
 165{
 166	if (IS_DG1(i915))
 167		return DG1_DPLL_ENABLE(pll->info->id);
 168	else if (IS_JSL_EHL(i915) && (pll->info->id == DPLL_ID_EHL_DPLL4))
 
 169		return MG_PLL_ENABLE(0);
 170
 171	return CNL_DPLL_ENABLE(pll->info->id);
 172}
 173
 174static i915_reg_t
 175intel_tc_pll_enable_reg(struct drm_i915_private *i915,
 176			struct intel_shared_dpll *pll)
 177{
 178	const enum intel_dpll_id id = pll->info->id;
 179	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
 180
 181	if (IS_ALDERLAKE_P(i915))
 182		return ADLP_PORTTC_PLL_ENABLE(tc_port);
 183
 184	return MG_PLL_ENABLE(tc_port);
 185}
 186
 187/**
 188 * intel_prepare_shared_dpll - call a dpll's prepare hook
 189 * @crtc_state: CRTC, and its state, which has a shared dpll
 190 *
 191 * This calls the PLL's prepare hook if it has one and if the PLL is not
 192 * already enabled. The prepare hook is platform specific.
 193 */
 194void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state)
 195{
 196	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 197	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 198	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
 199
 200	if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
 201		return;
 
 202
 203	mutex_lock(&dev_priv->dpll.lock);
 204	drm_WARN_ON(&dev_priv->drm, !pll->state.pipe_mask);
 205	if (!pll->active_mask) {
 206		drm_dbg(&dev_priv->drm, "setting up %s\n", pll->info->name);
 207		drm_WARN_ON(&dev_priv->drm, pll->on);
 208		assert_shared_dpll_disabled(dev_priv, pll);
 209
 210		pll->info->funcs->prepare(dev_priv, pll);
 211	}
 212	mutex_unlock(&dev_priv->dpll.lock);
 213}
 214
 215/**
 216 * intel_enable_shared_dpll - enable a CRTC's shared DPLL
 217 * @crtc_state: CRTC, and its state, which has a shared DPLL
 218 *
 219 * Enable the shared DPLL used by @crtc.
 220 */
 221void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
 222{
 223	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 224	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 225	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
 226	unsigned int pipe_mask = BIT(crtc->pipe);
 227	unsigned int old_mask;
 228
 229	if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
 230		return;
 231
 232	mutex_lock(&dev_priv->dpll.lock);
 233	old_mask = pll->active_mask;
 234
 235	if (drm_WARN_ON(&dev_priv->drm, !(pll->state.pipe_mask & pipe_mask)) ||
 236	    drm_WARN_ON(&dev_priv->drm, pll->active_mask & pipe_mask))
 237		goto out;
 238
 239	pll->active_mask |= pipe_mask;
 240
 241	drm_dbg_kms(&dev_priv->drm,
 242		    "enable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
 243		    pll->info->name, pll->active_mask, pll->on,
 244		    crtc->base.base.id, crtc->base.name);
 245
 246	if (old_mask) {
 247		drm_WARN_ON(&dev_priv->drm, !pll->on);
 248		assert_shared_dpll_enabled(dev_priv, pll);
 249		goto out;
 250	}
 251	drm_WARN_ON(&dev_priv->drm, pll->on);
 252
 253	drm_dbg_kms(&dev_priv->drm, "enabling %s\n", pll->info->name);
 254	pll->info->funcs->enable(dev_priv, pll);
 255	pll->on = true;
 256
 257out:
 258	mutex_unlock(&dev_priv->dpll.lock);
 259}
 260
 261/**
 262 * intel_disable_shared_dpll - disable a CRTC's shared DPLL
 263 * @crtc_state: CRTC, and its state, which has a shared DPLL
 264 *
 265 * Disable the shared DPLL used by @crtc.
 266 */
 267void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
 268{
 269	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 270	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 271	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
 272	unsigned int pipe_mask = BIT(crtc->pipe);
 273
 274	/* PCH only available on ILK+ */
 275	if (DISPLAY_VER(dev_priv) < 5)
 276		return;
 277
 278	if (pll == NULL)
 279		return;
 280
 281	mutex_lock(&dev_priv->dpll.lock);
 282	if (drm_WARN(&dev_priv->drm, !(pll->active_mask & pipe_mask),
 283		     "%s not used by [CRTC:%d:%s]\n", pll->info->name,
 284		     crtc->base.base.id, crtc->base.name))
 285		goto out;
 286
 287	drm_dbg_kms(&dev_priv->drm,
 288		    "disable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
 289		    pll->info->name, pll->active_mask, pll->on,
 290		    crtc->base.base.id, crtc->base.name);
 291
 292	assert_shared_dpll_enabled(dev_priv, pll);
 293	drm_WARN_ON(&dev_priv->drm, !pll->on);
 294
 295	pll->active_mask &= ~pipe_mask;
 296	if (pll->active_mask)
 297		goto out;
 298
 299	drm_dbg_kms(&dev_priv->drm, "disabling %s\n", pll->info->name);
 300	pll->info->funcs->disable(dev_priv, pll);
 301	pll->on = false;
 302
 303out:
 304	mutex_unlock(&dev_priv->dpll.lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 305}
 306
 307static struct intel_shared_dpll *
 308intel_find_shared_dpll(struct intel_atomic_state *state,
 309		       const struct intel_crtc *crtc,
 310		       const struct intel_dpll_hw_state *pll_state,
 311		       unsigned long dpll_mask)
 312{
 313	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 314	struct intel_shared_dpll *pll, *unused_pll = NULL;
 315	struct intel_shared_dpll_state *shared_dpll;
 316	enum intel_dpll_id i;
 
 317
 318	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
 319
 320	drm_WARN_ON(&dev_priv->drm, dpll_mask & ~(BIT(I915_NUM_PLLS) - 1));
 321
 322	for_each_set_bit(i, &dpll_mask, I915_NUM_PLLS) {
 323		pll = &dev_priv->dpll.shared_dplls[i];
 
 
 
 
 324
 325		/* Only want to check enabled timings first */
 326		if (shared_dpll[i].pipe_mask == 0) {
 327			if (!unused_pll)
 328				unused_pll = pll;
 329			continue;
 330		}
 331
 332		if (memcmp(pll_state,
 333			   &shared_dpll[i].hw_state,
 334			   sizeof(*pll_state)) == 0) {
 335			drm_dbg_kms(&dev_priv->drm,
 336				    "[CRTC:%d:%s] sharing existing %s (pipe mask 0x%x, active 0x%x)\n",
 337				    crtc->base.base.id, crtc->base.name,
 338				    pll->info->name,
 339				    shared_dpll[i].pipe_mask,
 340				    pll->active_mask);
 341			return pll;
 342		}
 343	}
 344
 345	/* Ok no matching timings, maybe there's a free one? */
 346	if (unused_pll) {
 347		drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] allocated %s\n",
 348			    crtc->base.base.id, crtc->base.name,
 349			    unused_pll->info->name);
 350		return unused_pll;
 351	}
 352
 353	return NULL;
 354}
 355
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 356static void
 357intel_reference_shared_dpll(struct intel_atomic_state *state,
 358			    const struct intel_crtc *crtc,
 359			    const struct intel_shared_dpll *pll,
 360			    const struct intel_dpll_hw_state *pll_state)
 361{
 362	struct drm_i915_private *i915 = to_i915(state->base.dev);
 363	struct intel_shared_dpll_state *shared_dpll;
 364	const enum intel_dpll_id id = pll->info->id;
 365
 366	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
 367
 368	if (shared_dpll[id].pipe_mask == 0)
 369		shared_dpll[id].hw_state = *pll_state;
 370
 371	drm_dbg(&i915->drm, "using %s for pipe %c\n", pll->info->name,
 372		pipe_name(crtc->pipe));
 373
 374	shared_dpll[id].pipe_mask |= BIT(crtc->pipe);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 375}
 376
 377static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
 378					  const struct intel_crtc *crtc,
 379					  const struct intel_shared_dpll *pll)
 380{
 381	struct intel_shared_dpll_state *shared_dpll;
 382
 383	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
 384	shared_dpll[pll->info->id].pipe_mask &= ~BIT(crtc->pipe);
 
 385}
 386
 387static void intel_put_dpll(struct intel_atomic_state *state,
 388			   struct intel_crtc *crtc)
 389{
 390	const struct intel_crtc_state *old_crtc_state =
 391		intel_atomic_get_old_crtc_state(state, crtc);
 392	struct intel_crtc_state *new_crtc_state =
 393		intel_atomic_get_new_crtc_state(state, crtc);
 394
 395	new_crtc_state->shared_dpll = NULL;
 396
 397	if (!old_crtc_state->shared_dpll)
 398		return;
 399
 400	intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
 401}
 402
 403/**
 404 * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
 405 * @state: atomic state
 406 *
 407 * This is the dpll version of drm_atomic_helper_swap_state() since the
 408 * helper does not handle driver-specific global state.
 409 *
 410 * For consistency with atomic helpers this function does a complete swap,
 411 * i.e. it also puts the current state into @state, even though there is no
 412 * need for that at this moment.
 413 */
 414void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
 415{
 416	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
 417	struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
 418	enum intel_dpll_id i;
 
 419
 420	if (!state->dpll_set)
 421		return;
 422
 423	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
 424		struct intel_shared_dpll *pll =
 425			&dev_priv->dpll.shared_dplls[i];
 426
 427		swap(pll->state, shared_dpll[i]);
 428	}
 429}
 430
 431static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
 432				      struct intel_shared_dpll *pll,
 433				      struct intel_dpll_hw_state *hw_state)
 434{
 
 435	const enum intel_dpll_id id = pll->info->id;
 436	intel_wakeref_t wakeref;
 437	u32 val;
 438
 439	wakeref = intel_display_power_get_if_enabled(dev_priv,
 440						     POWER_DOMAIN_DISPLAY_CORE);
 441	if (!wakeref)
 442		return false;
 443
 444	val = intel_de_read(dev_priv, PCH_DPLL(id));
 445	hw_state->dpll = val;
 446	hw_state->fp0 = intel_de_read(dev_priv, PCH_FP0(id));
 447	hw_state->fp1 = intel_de_read(dev_priv, PCH_FP1(id));
 448
 449	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
 450
 451	return val & DPLL_VCO_ENABLE;
 452}
 453
 454static void ibx_pch_dpll_prepare(struct drm_i915_private *dev_priv,
 455				 struct intel_shared_dpll *pll)
 456{
 457	const enum intel_dpll_id id = pll->info->id;
 458
 459	intel_de_write(dev_priv, PCH_FP0(id), pll->state.hw_state.fp0);
 460	intel_de_write(dev_priv, PCH_FP1(id), pll->state.hw_state.fp1);
 461}
 462
 463static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
 464{
 
 465	u32 val;
 466	bool enabled;
 467
 468	I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
 469
 470	val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
 471	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
 472			    DREF_SUPERSPREAD_SOURCE_MASK));
 473	I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
 
 474}
 475
 476static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
 477				struct intel_shared_dpll *pll)
 
 478{
 
 479	const enum intel_dpll_id id = pll->info->id;
 480
 481	/* PCH refclock must be enabled first */
 482	ibx_assert_pch_refclk_enabled(dev_priv);
 
 
 
 483
 484	intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
 485
 486	/* Wait for the clocks to stabilize. */
 487	intel_de_posting_read(dev_priv, PCH_DPLL(id));
 488	udelay(150);
 489
 490	/* The pixel multiplier can only be updated once the
 491	 * DPLL is enabled and the clocks are stable.
 492	 *
 493	 * So write it again.
 494	 */
 495	intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
 496	intel_de_posting_read(dev_priv, PCH_DPLL(id));
 497	udelay(200);
 498}
 499
 500static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
 501				 struct intel_shared_dpll *pll)
 502{
 503	const enum intel_dpll_id id = pll->info->id;
 504
 505	intel_de_write(dev_priv, PCH_DPLL(id), 0);
 506	intel_de_posting_read(dev_priv, PCH_DPLL(id));
 507	udelay(200);
 508}
 509
 510static bool ibx_get_dpll(struct intel_atomic_state *state,
 511			 struct intel_crtc *crtc,
 512			 struct intel_encoder *encoder)
 
 
 
 
 
 
 
 513{
 514	struct intel_crtc_state *crtc_state =
 515		intel_atomic_get_new_crtc_state(state, crtc);
 516	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 517	struct intel_shared_dpll *pll;
 518	enum intel_dpll_id i;
 519
 520	if (HAS_PCH_IBX(dev_priv)) {
 521		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
 522		i = (enum intel_dpll_id) crtc->pipe;
 523		pll = &dev_priv->dpll.shared_dplls[i];
 524
 525		drm_dbg_kms(&dev_priv->drm,
 526			    "[CRTC:%d:%s] using pre-allocated %s\n",
 527			    crtc->base.base.id, crtc->base.name,
 528			    pll->info->name);
 529	} else {
 530		pll = intel_find_shared_dpll(state, crtc,
 531					     &crtc_state->dpll_hw_state,
 532					     BIT(DPLL_ID_PCH_PLL_B) |
 533					     BIT(DPLL_ID_PCH_PLL_A));
 534	}
 535
 536	if (!pll)
 537		return false;
 538
 539	/* reference the pll */
 540	intel_reference_shared_dpll(state, crtc,
 541				    pll, &crtc_state->dpll_hw_state);
 542
 543	crtc_state->shared_dpll = pll;
 544
 545	return true;
 546}
 547
 548static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
 549			      const struct intel_dpll_hw_state *hw_state)
 550{
 551	drm_dbg_kms(&dev_priv->drm,
 552		    "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
 553		    "fp0: 0x%x, fp1: 0x%x\n",
 554		    hw_state->dpll,
 555		    hw_state->dpll_md,
 556		    hw_state->fp0,
 557		    hw_state->fp1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 558}
 559
 560static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
 561	.prepare = ibx_pch_dpll_prepare,
 562	.enable = ibx_pch_dpll_enable,
 563	.disable = ibx_pch_dpll_disable,
 564	.get_hw_state = ibx_pch_dpll_get_hw_state,
 565};
 566
 567static const struct dpll_info pch_plls[] = {
 568	{ "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
 569	{ "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
 570	{ },
 571};
 572
 573static const struct intel_dpll_mgr pch_pll_mgr = {
 574	.dpll_info = pch_plls,
 
 575	.get_dplls = ibx_get_dpll,
 576	.put_dplls = intel_put_dpll,
 577	.dump_hw_state = ibx_dump_hw_state,
 
 578};
 579
 580static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
 581			       struct intel_shared_dpll *pll)
 
 582{
 
 583	const enum intel_dpll_id id = pll->info->id;
 584
 585	intel_de_write(dev_priv, WRPLL_CTL(id), pll->state.hw_state.wrpll);
 586	intel_de_posting_read(dev_priv, WRPLL_CTL(id));
 587	udelay(20);
 588}
 589
 590static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
 591				struct intel_shared_dpll *pll)
 
 592{
 593	intel_de_write(dev_priv, SPLL_CTL, pll->state.hw_state.spll);
 594	intel_de_posting_read(dev_priv, SPLL_CTL);
 
 
 595	udelay(20);
 596}
 597
 598static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
 599				  struct intel_shared_dpll *pll)
 600{
 601	const enum intel_dpll_id id = pll->info->id;
 602	u32 val;
 603
 604	val = intel_de_read(dev_priv, WRPLL_CTL(id));
 605	intel_de_write(dev_priv, WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE);
 606	intel_de_posting_read(dev_priv, WRPLL_CTL(id));
 607
 608	/*
 609	 * Try to set up the PCH reference clock once all DPLLs
 610	 * that depend on it have been shut down.
 611	 */
 612	if (dev_priv->pch_ssc_use & BIT(id))
 613		intel_init_pch_refclk(dev_priv);
 614}
 615
 616static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
 617				 struct intel_shared_dpll *pll)
 618{
 619	enum intel_dpll_id id = pll->info->id;
 620	u32 val;
 621
 622	val = intel_de_read(dev_priv, SPLL_CTL);
 623	intel_de_write(dev_priv, SPLL_CTL, val & ~SPLL_PLL_ENABLE);
 624	intel_de_posting_read(dev_priv, SPLL_CTL);
 625
 626	/*
 627	 * Try to set up the PCH reference clock once all DPLLs
 628	 * that depend on it have been shut down.
 629	 */
 630	if (dev_priv->pch_ssc_use & BIT(id))
 631		intel_init_pch_refclk(dev_priv);
 632}
 633
 634static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
 635				       struct intel_shared_dpll *pll,
 636				       struct intel_dpll_hw_state *hw_state)
 637{
 
 638	const enum intel_dpll_id id = pll->info->id;
 639	intel_wakeref_t wakeref;
 640	u32 val;
 641
 642	wakeref = intel_display_power_get_if_enabled(dev_priv,
 643						     POWER_DOMAIN_DISPLAY_CORE);
 644	if (!wakeref)
 645		return false;
 646
 647	val = intel_de_read(dev_priv, WRPLL_CTL(id));
 648	hw_state->wrpll = val;
 649
 650	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
 651
 652	return val & WRPLL_PLL_ENABLE;
 653}
 654
 655static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
 656				      struct intel_shared_dpll *pll,
 657				      struct intel_dpll_hw_state *hw_state)
 658{
 
 659	intel_wakeref_t wakeref;
 660	u32 val;
 661
 662	wakeref = intel_display_power_get_if_enabled(dev_priv,
 663						     POWER_DOMAIN_DISPLAY_CORE);
 664	if (!wakeref)
 665		return false;
 666
 667	val = intel_de_read(dev_priv, SPLL_CTL);
 668	hw_state->spll = val;
 669
 670	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
 671
 672	return val & SPLL_PLL_ENABLE;
 673}
 674
 675#define LC_FREQ 2700
 676#define LC_FREQ_2K U64_C(LC_FREQ * 2000)
 677
 678#define P_MIN 2
 679#define P_MAX 64
 680#define P_INC 2
 681
 682/* Constraints for PLL good behavior */
 683#define REF_MIN 48
 684#define REF_MAX 400
 685#define VCO_MIN 2400
 686#define VCO_MAX 4800
 687
 688struct hsw_wrpll_rnp {
 689	unsigned p, n2, r2;
 690};
 691
 692static unsigned hsw_wrpll_get_budget_for_freq(int clock)
 693{
 694	unsigned budget;
 695
 696	switch (clock) {
 697	case 25175000:
 698	case 25200000:
 699	case 27000000:
 700	case 27027000:
 701	case 37762500:
 702	case 37800000:
 703	case 40500000:
 704	case 40541000:
 705	case 54000000:
 706	case 54054000:
 707	case 59341000:
 708	case 59400000:
 709	case 72000000:
 710	case 74176000:
 711	case 74250000:
 712	case 81000000:
 713	case 81081000:
 714	case 89012000:
 715	case 89100000:
 716	case 108000000:
 717	case 108108000:
 718	case 111264000:
 719	case 111375000:
 720	case 148352000:
 721	case 148500000:
 722	case 162000000:
 723	case 162162000:
 724	case 222525000:
 725	case 222750000:
 726	case 296703000:
 727	case 297000000:
 728		budget = 0;
 729		break;
 730	case 233500000:
 731	case 245250000:
 732	case 247750000:
 733	case 253250000:
 734	case 298000000:
 735		budget = 1500;
 736		break;
 737	case 169128000:
 738	case 169500000:
 739	case 179500000:
 740	case 202000000:
 741		budget = 2000;
 742		break;
 743	case 256250000:
 744	case 262500000:
 745	case 270000000:
 746	case 272500000:
 747	case 273750000:
 748	case 280750000:
 749	case 281250000:
 750	case 286000000:
 751	case 291750000:
 752		budget = 4000;
 753		break;
 754	case 267250000:
 755	case 268500000:
 756		budget = 5000;
 757		break;
 758	default:
 759		budget = 1000;
 760		break;
 761	}
 762
 763	return budget;
 764}
 765
 766static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
 767				 unsigned int r2, unsigned int n2,
 768				 unsigned int p,
 769				 struct hsw_wrpll_rnp *best)
 770{
 771	u64 a, b, c, d, diff, diff_best;
 772
 773	/* No best (r,n,p) yet */
 774	if (best->p == 0) {
 775		best->p = p;
 776		best->n2 = n2;
 777		best->r2 = r2;
 778		return;
 779	}
 780
 781	/*
 782	 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
 783	 * freq2k.
 784	 *
 785	 * delta = 1e6 *
 786	 *	   abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
 787	 *	   freq2k;
 788	 *
 789	 * and we would like delta <= budget.
 790	 *
 791	 * If the discrepancy is above the PPM-based budget, always prefer to
 792	 * improve upon the previous solution.  However, if you're within the
 793	 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
 794	 */
 795	a = freq2k * budget * p * r2;
 796	b = freq2k * budget * best->p * best->r2;
 797	diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
 798	diff_best = abs_diff(freq2k * best->p * best->r2,
 799			     LC_FREQ_2K * best->n2);
 800	c = 1000000 * diff;
 801	d = 1000000 * diff_best;
 802
 803	if (a < c && b < d) {
 804		/* If both are above the budget, pick the closer */
 805		if (best->p * best->r2 * diff < p * r2 * diff_best) {
 806			best->p = p;
 807			best->n2 = n2;
 808			best->r2 = r2;
 809		}
 810	} else if (a >= c && b < d) {
 811		/* If A is below the threshold but B is above it?  Update. */
 812		best->p = p;
 813		best->n2 = n2;
 814		best->r2 = r2;
 815	} else if (a >= c && b >= d) {
 816		/* Both are below the limit, so pick the higher n2/(r2*r2) */
 817		if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
 818			best->p = p;
 819			best->n2 = n2;
 820			best->r2 = r2;
 821		}
 822	}
 823	/* Otherwise a < c && b >= d, do nothing */
 824}
 825
 826static void
 827hsw_ddi_calculate_wrpll(int clock /* in Hz */,
 828			unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
 829{
 830	u64 freq2k;
 831	unsigned p, n2, r2;
 832	struct hsw_wrpll_rnp best = { 0, 0, 0 };
 833	unsigned budget;
 834
 835	freq2k = clock / 100;
 836
 837	budget = hsw_wrpll_get_budget_for_freq(clock);
 838
 839	/* Special case handling for 540 pixel clock: bypass WR PLL entirely
 840	 * and directly pass the LC PLL to it. */
 841	if (freq2k == 5400000) {
 842		*n2_out = 2;
 843		*p_out = 1;
 844		*r2_out = 2;
 845		return;
 846	}
 847
 848	/*
 849	 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
 850	 * the WR PLL.
 851	 *
 852	 * We want R so that REF_MIN <= Ref <= REF_MAX.
 853	 * Injecting R2 = 2 * R gives:
 854	 *   REF_MAX * r2 > LC_FREQ * 2 and
 855	 *   REF_MIN * r2 < LC_FREQ * 2
 856	 *
 857	 * Which means the desired boundaries for r2 are:
 858	 *  LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
 859	 *
 860	 */
 861	for (r2 = LC_FREQ * 2 / REF_MAX + 1;
 862	     r2 <= LC_FREQ * 2 / REF_MIN;
 863	     r2++) {
 864
 865		/*
 866		 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
 867		 *
 868		 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
 869		 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
 870		 *   VCO_MAX * r2 > n2 * LC_FREQ and
 871		 *   VCO_MIN * r2 < n2 * LC_FREQ)
 872		 *
 873		 * Which means the desired boundaries for n2 are:
 874		 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
 875		 */
 876		for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
 877		     n2 <= VCO_MAX * r2 / LC_FREQ;
 878		     n2++) {
 879
 880			for (p = P_MIN; p <= P_MAX; p += P_INC)
 881				hsw_wrpll_update_rnp(freq2k, budget,
 882						     r2, n2, p, &best);
 883		}
 884	}
 885
 886	*n2_out = best.n2;
 887	*p_out = best.p;
 888	*r2_out = best.r2;
 889}
 890
 891static struct intel_shared_dpll *
 892hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
 893		       struct intel_crtc *crtc)
 894{
 895	struct intel_crtc_state *crtc_state =
 896		intel_atomic_get_new_crtc_state(state, crtc);
 897	struct intel_shared_dpll *pll;
 898	u32 val;
 899	unsigned int p, n2, r2;
 900
 901	hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
 902
 903	val = WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
 904	      WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
 905	      WRPLL_DIVIDER_POST(p);
 906
 907	crtc_state->dpll_hw_state.wrpll = val;
 908
 909	pll = intel_find_shared_dpll(state, crtc,
 910				     &crtc_state->dpll_hw_state,
 911				     BIT(DPLL_ID_WRPLL2) |
 912				     BIT(DPLL_ID_WRPLL1));
 913
 914	if (!pll)
 915		return NULL;
 916
 917	return pll;
 918}
 919
 920static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
 921				  const struct intel_shared_dpll *pll,
 922				  const struct intel_dpll_hw_state *pll_state)
 923{
 
 924	int refclk;
 925	int n, p, r;
 926	u32 wrpll = pll_state->wrpll;
 927
 928	switch (wrpll & WRPLL_REF_MASK) {
 929	case WRPLL_REF_SPECIAL_HSW:
 930		/* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
 931		if (IS_HASWELL(dev_priv) && !IS_HSW_ULT(dev_priv)) {
 932			refclk = dev_priv->dpll.ref_clks.nssc;
 933			break;
 934		}
 935		fallthrough;
 936	case WRPLL_REF_PCH_SSC:
 937		/*
 938		 * We could calculate spread here, but our checking
 939		 * code only cares about 5% accuracy, and spread is a max of
 940		 * 0.5% downspread.
 941		 */
 942		refclk = dev_priv->dpll.ref_clks.ssc;
 943		break;
 944	case WRPLL_REF_LCPLL:
 945		refclk = 2700000;
 946		break;
 947	default:
 948		MISSING_CASE(wrpll);
 949		return 0;
 950	}
 951
 952	r = wrpll & WRPLL_DIVIDER_REF_MASK;
 953	p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
 954	n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
 955
 956	/* Convert to KHz, p & r have a fixed point portion */
 957	return (refclk * n / 10) / (p * r) * 2;
 958}
 959
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 960static struct intel_shared_dpll *
 961hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
 962{
 963	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
 964	struct intel_shared_dpll *pll;
 965	enum intel_dpll_id pll_id;
 966	int clock = crtc_state->port_clock;
 967
 968	switch (clock / 2) {
 969	case 81000:
 970		pll_id = DPLL_ID_LCPLL_810;
 971		break;
 972	case 135000:
 973		pll_id = DPLL_ID_LCPLL_1350;
 974		break;
 975	case 270000:
 976		pll_id = DPLL_ID_LCPLL_2700;
 977		break;
 978	default:
 979		drm_dbg_kms(&dev_priv->drm, "Invalid clock for DP: %d\n",
 980			    clock);
 981		return NULL;
 982	}
 983
 984	pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
 985
 986	if (!pll)
 987		return NULL;
 988
 989	return pll;
 990}
 991
 992static int hsw_ddi_lcpll_get_freq(struct drm_i915_private *i915,
 993				  const struct intel_shared_dpll *pll,
 994				  const struct intel_dpll_hw_state *pll_state)
 995{
 996	int link_clock = 0;
 997
 998	switch (pll->info->id) {
 999	case DPLL_ID_LCPLL_810:
1000		link_clock = 81000;
1001		break;
1002	case DPLL_ID_LCPLL_1350:
1003		link_clock = 135000;
1004		break;
1005	case DPLL_ID_LCPLL_2700:
1006		link_clock = 270000;
1007		break;
1008	default:
1009		drm_WARN(&i915->drm, 1, "bad port clock sel\n");
1010		break;
1011	}
1012
1013	return link_clock * 2;
1014}
1015
1016static struct intel_shared_dpll *
1017hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
1018		      struct intel_crtc *crtc)
1019{
1020	struct intel_crtc_state *crtc_state =
1021		intel_atomic_get_new_crtc_state(state, crtc);
 
1022
1023	if (drm_WARN_ON(crtc->base.dev, crtc_state->port_clock / 2 != 135000))
1024		return NULL;
 
 
 
1025
1026	crtc_state->dpll_hw_state.spll = SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz |
1027					 SPLL_REF_MUXED_SSC;
 
 
 
 
 
 
 
1028
1029	return intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state,
1030				      BIT(DPLL_ID_SPLL));
1031}
1032
1033static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915,
1034				 const struct intel_shared_dpll *pll,
1035				 const struct intel_dpll_hw_state *pll_state)
1036{
 
1037	int link_clock = 0;
1038
1039	switch (pll_state->spll & SPLL_FREQ_MASK) {
1040	case SPLL_FREQ_810MHz:
1041		link_clock = 81000;
1042		break;
1043	case SPLL_FREQ_1350MHz:
1044		link_clock = 135000;
1045		break;
1046	case SPLL_FREQ_2700MHz:
1047		link_clock = 270000;
1048		break;
1049	default:
1050		drm_WARN(&i915->drm, 1, "bad spll freq\n");
1051		break;
1052	}
1053
1054	return link_clock * 2;
1055}
1056
1057static bool hsw_get_dpll(struct intel_atomic_state *state,
1058			 struct intel_crtc *crtc,
1059			 struct intel_encoder *encoder)
1060{
1061	struct intel_crtc_state *crtc_state =
1062		intel_atomic_get_new_crtc_state(state, crtc);
1063	struct intel_shared_dpll *pll;
1064
1065	memset(&crtc_state->dpll_hw_state, 0,
1066	       sizeof(crtc_state->dpll_hw_state));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1067
1068	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1069		pll = hsw_ddi_wrpll_get_dpll(state, crtc);
1070	else if (intel_crtc_has_dp_encoder(crtc_state))
1071		pll = hsw_ddi_lcpll_get_dpll(crtc_state);
1072	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1073		pll = hsw_ddi_spll_get_dpll(state, crtc);
1074	else
1075		return false;
1076
1077	if (!pll)
1078		return false;
1079
1080	intel_reference_shared_dpll(state, crtc,
1081				    pll, &crtc_state->dpll_hw_state);
1082
1083	crtc_state->shared_dpll = pll;
1084
1085	return true;
1086}
1087
1088static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915)
1089{
1090	i915->dpll.ref_clks.ssc = 135000;
1091	/* Non-SSC is only used on non-ULT HSW. */
1092	if (intel_de_read(i915, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
1093		i915->dpll.ref_clks.nssc = 24000;
1094	else
1095		i915->dpll.ref_clks.nssc = 135000;
1096}
1097
1098static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
1099			      const struct intel_dpll_hw_state *hw_state)
1100{
1101	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
1102		    hw_state->wrpll, hw_state->spll);
 
 
 
 
 
 
 
 
 
 
 
 
1103}
1104
1105static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
1106	.enable = hsw_ddi_wrpll_enable,
1107	.disable = hsw_ddi_wrpll_disable,
1108	.get_hw_state = hsw_ddi_wrpll_get_hw_state,
1109	.get_freq = hsw_ddi_wrpll_get_freq,
1110};
1111
1112static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
1113	.enable = hsw_ddi_spll_enable,
1114	.disable = hsw_ddi_spll_disable,
1115	.get_hw_state = hsw_ddi_spll_get_hw_state,
1116	.get_freq = hsw_ddi_spll_get_freq,
1117};
1118
1119static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv,
1120				 struct intel_shared_dpll *pll)
 
1121{
1122}
1123
1124static void hsw_ddi_lcpll_disable(struct drm_i915_private *dev_priv,
1125				  struct intel_shared_dpll *pll)
1126{
1127}
1128
1129static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *dev_priv,
1130				       struct intel_shared_dpll *pll,
1131				       struct intel_dpll_hw_state *hw_state)
1132{
1133	return true;
1134}
1135
1136static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
1137	.enable = hsw_ddi_lcpll_enable,
1138	.disable = hsw_ddi_lcpll_disable,
1139	.get_hw_state = hsw_ddi_lcpll_get_hw_state,
1140	.get_freq = hsw_ddi_lcpll_get_freq,
1141};
1142
1143static const struct dpll_info hsw_plls[] = {
1144	{ "WRPLL 1",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1,     0 },
1145	{ "WRPLL 2",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2,     0 },
1146	{ "SPLL",       &hsw_ddi_spll_funcs,  DPLL_ID_SPLL,       0 },
1147	{ "LCPLL 810",  &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810,  INTEL_DPLL_ALWAYS_ON },
1148	{ "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
1149	{ "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
1150	{ },
 
 
 
1151};
1152
1153static const struct intel_dpll_mgr hsw_pll_mgr = {
1154	.dpll_info = hsw_plls,
 
1155	.get_dplls = hsw_get_dpll,
1156	.put_dplls = intel_put_dpll,
1157	.update_ref_clks = hsw_update_dpll_ref_clks,
1158	.dump_hw_state = hsw_dump_hw_state,
 
1159};
1160
1161struct skl_dpll_regs {
1162	i915_reg_t ctl, cfgcr1, cfgcr2;
1163};
1164
1165/* this array is indexed by the *shared* pll id */
1166static const struct skl_dpll_regs skl_dpll_regs[4] = {
1167	{
1168		/* DPLL 0 */
1169		.ctl = LCPLL1_CTL,
1170		/* DPLL 0 doesn't support HDMI mode */
1171	},
1172	{
1173		/* DPLL 1 */
1174		.ctl = LCPLL2_CTL,
1175		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
1176		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
1177	},
1178	{
1179		/* DPLL 2 */
1180		.ctl = WRPLL_CTL(0),
1181		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
1182		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
1183	},
1184	{
1185		/* DPLL 3 */
1186		.ctl = WRPLL_CTL(1),
1187		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
1188		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
1189	},
1190};
1191
1192static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
1193				    struct intel_shared_dpll *pll)
 
1194{
1195	const enum intel_dpll_id id = pll->info->id;
1196	u32 val;
1197
1198	val = intel_de_read(dev_priv, DPLL_CTRL1);
1199
1200	val &= ~(DPLL_CTRL1_HDMI_MODE(id) |
1201		 DPLL_CTRL1_SSC(id) |
1202		 DPLL_CTRL1_LINK_RATE_MASK(id));
1203	val |= pll->state.hw_state.ctrl1 << (id * 6);
1204
1205	intel_de_write(dev_priv, DPLL_CTRL1, val);
1206	intel_de_posting_read(dev_priv, DPLL_CTRL1);
1207}
1208
1209static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
1210			       struct intel_shared_dpll *pll)
 
1211{
 
1212	const struct skl_dpll_regs *regs = skl_dpll_regs;
1213	const enum intel_dpll_id id = pll->info->id;
1214
1215	skl_ddi_pll_write_ctrl1(dev_priv, pll);
1216
1217	intel_de_write(dev_priv, regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
1218	intel_de_write(dev_priv, regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
1219	intel_de_posting_read(dev_priv, regs[id].cfgcr1);
1220	intel_de_posting_read(dev_priv, regs[id].cfgcr2);
1221
1222	/* the enable bit is always bit 31 */
1223	intel_de_write(dev_priv, regs[id].ctl,
1224		       intel_de_read(dev_priv, regs[id].ctl) | LCPLL_PLL_ENABLE);
1225
1226	if (intel_de_wait_for_set(dev_priv, DPLL_STATUS, DPLL_LOCK(id), 5))
1227		drm_err(&dev_priv->drm, "DPLL %d not locked\n", id);
1228}
1229
1230static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv,
1231				 struct intel_shared_dpll *pll)
 
1232{
1233	skl_ddi_pll_write_ctrl1(dev_priv, pll);
 
 
1234}
1235
1236static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
1237				struct intel_shared_dpll *pll)
1238{
1239	const struct skl_dpll_regs *regs = skl_dpll_regs;
1240	const enum intel_dpll_id id = pll->info->id;
1241
1242	/* the enable bit is always bit 31 */
1243	intel_de_write(dev_priv, regs[id].ctl,
1244		       intel_de_read(dev_priv, regs[id].ctl) & ~LCPLL_PLL_ENABLE);
1245	intel_de_posting_read(dev_priv, regs[id].ctl);
1246}
1247
1248static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv,
1249				  struct intel_shared_dpll *pll)
1250{
1251}
1252
1253static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1254				     struct intel_shared_dpll *pll,
1255				     struct intel_dpll_hw_state *hw_state)
1256{
1257	u32 val;
1258	const struct skl_dpll_regs *regs = skl_dpll_regs;
1259	const enum intel_dpll_id id = pll->info->id;
1260	intel_wakeref_t wakeref;
1261	bool ret;
 
1262
1263	wakeref = intel_display_power_get_if_enabled(dev_priv,
1264						     POWER_DOMAIN_DISPLAY_CORE);
1265	if (!wakeref)
1266		return false;
1267
1268	ret = false;
1269
1270	val = intel_de_read(dev_priv, regs[id].ctl);
1271	if (!(val & LCPLL_PLL_ENABLE))
1272		goto out;
1273
1274	val = intel_de_read(dev_priv, DPLL_CTRL1);
1275	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1276
1277	/* avoid reading back stale values if HDMI mode is not enabled */
1278	if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1279		hw_state->cfgcr1 = intel_de_read(dev_priv, regs[id].cfgcr1);
1280		hw_state->cfgcr2 = intel_de_read(dev_priv, regs[id].cfgcr2);
1281	}
1282	ret = true;
1283
1284out:
1285	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1286
1287	return ret;
1288}
1289
1290static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
1291				       struct intel_shared_dpll *pll,
1292				       struct intel_dpll_hw_state *hw_state)
1293{
 
1294	const struct skl_dpll_regs *regs = skl_dpll_regs;
1295	const enum intel_dpll_id id = pll->info->id;
1296	intel_wakeref_t wakeref;
1297	u32 val;
1298	bool ret;
1299
1300	wakeref = intel_display_power_get_if_enabled(dev_priv,
1301						     POWER_DOMAIN_DISPLAY_CORE);
1302	if (!wakeref)
1303		return false;
1304
1305	ret = false;
1306
1307	/* DPLL0 is always enabled since it drives CDCLK */
1308	val = intel_de_read(dev_priv, regs[id].ctl);
1309	if (drm_WARN_ON(&dev_priv->drm, !(val & LCPLL_PLL_ENABLE)))
1310		goto out;
1311
1312	val = intel_de_read(dev_priv, DPLL_CTRL1);
1313	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1314
1315	ret = true;
1316
1317out:
1318	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1319
1320	return ret;
1321}
1322
1323struct skl_wrpll_context {
1324	u64 min_deviation;		/* current minimal deviation */
1325	u64 central_freq;		/* chosen central freq */
1326	u64 dco_freq;			/* chosen dco freq */
1327	unsigned int p;			/* chosen divider */
1328};
1329
1330static void skl_wrpll_context_init(struct skl_wrpll_context *ctx)
1331{
1332	memset(ctx, 0, sizeof(*ctx));
1333
1334	ctx->min_deviation = U64_MAX;
1335}
1336
1337/* DCO freq must be within +1%/-6%  of the DCO central freq */
1338#define SKL_DCO_MAX_PDEVIATION	100
1339#define SKL_DCO_MAX_NDEVIATION	600
1340
1341static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1342				  u64 central_freq,
1343				  u64 dco_freq,
1344				  unsigned int divider)
1345{
1346	u64 deviation;
1347
1348	deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1349			      central_freq);
1350
1351	/* positive deviation */
1352	if (dco_freq >= central_freq) {
1353		if (deviation < SKL_DCO_MAX_PDEVIATION &&
1354		    deviation < ctx->min_deviation) {
1355			ctx->min_deviation = deviation;
1356			ctx->central_freq = central_freq;
1357			ctx->dco_freq = dco_freq;
1358			ctx->p = divider;
1359		}
1360	/* negative deviation */
1361	} else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1362		   deviation < ctx->min_deviation) {
1363		ctx->min_deviation = deviation;
1364		ctx->central_freq = central_freq;
1365		ctx->dco_freq = dco_freq;
1366		ctx->p = divider;
1367	}
1368}
1369
1370static void skl_wrpll_get_multipliers(unsigned int p,
1371				      unsigned int *p0 /* out */,
1372				      unsigned int *p1 /* out */,
1373				      unsigned int *p2 /* out */)
1374{
1375	/* even dividers */
1376	if (p % 2 == 0) {
1377		unsigned int half = p / 2;
1378
1379		if (half == 1 || half == 2 || half == 3 || half == 5) {
1380			*p0 = 2;
1381			*p1 = 1;
1382			*p2 = half;
1383		} else if (half % 2 == 0) {
1384			*p0 = 2;
1385			*p1 = half / 2;
1386			*p2 = 2;
1387		} else if (half % 3 == 0) {
1388			*p0 = 3;
1389			*p1 = half / 3;
1390			*p2 = 2;
1391		} else if (half % 7 == 0) {
1392			*p0 = 7;
1393			*p1 = half / 7;
1394			*p2 = 2;
1395		}
1396	} else if (p == 3 || p == 9) {  /* 3, 5, 7, 9, 15, 21, 35 */
1397		*p0 = 3;
1398		*p1 = 1;
1399		*p2 = p / 3;
1400	} else if (p == 5 || p == 7) {
1401		*p0 = p;
1402		*p1 = 1;
1403		*p2 = 1;
1404	} else if (p == 15) {
1405		*p0 = 3;
1406		*p1 = 1;
1407		*p2 = 5;
1408	} else if (p == 21) {
1409		*p0 = 7;
1410		*p1 = 1;
1411		*p2 = 3;
1412	} else if (p == 35) {
1413		*p0 = 7;
1414		*p1 = 1;
1415		*p2 = 5;
1416	}
1417}
1418
1419struct skl_wrpll_params {
1420	u32 dco_fraction;
1421	u32 dco_integer;
1422	u32 qdiv_ratio;
1423	u32 qdiv_mode;
1424	u32 kdiv;
1425	u32 pdiv;
1426	u32 central_freq;
1427};
1428
1429static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1430				      u64 afe_clock,
1431				      int ref_clock,
1432				      u64 central_freq,
1433				      u32 p0, u32 p1, u32 p2)
1434{
1435	u64 dco_freq;
1436
1437	switch (central_freq) {
1438	case 9600000000ULL:
1439		params->central_freq = 0;
1440		break;
1441	case 9000000000ULL:
1442		params->central_freq = 1;
1443		break;
1444	case 8400000000ULL:
1445		params->central_freq = 3;
1446	}
1447
1448	switch (p0) {
1449	case 1:
1450		params->pdiv = 0;
1451		break;
1452	case 2:
1453		params->pdiv = 1;
1454		break;
1455	case 3:
1456		params->pdiv = 2;
1457		break;
1458	case 7:
1459		params->pdiv = 4;
1460		break;
1461	default:
1462		WARN(1, "Incorrect PDiv\n");
1463	}
1464
1465	switch (p2) {
1466	case 5:
1467		params->kdiv = 0;
1468		break;
1469	case 2:
1470		params->kdiv = 1;
1471		break;
1472	case 3:
1473		params->kdiv = 2;
1474		break;
1475	case 1:
1476		params->kdiv = 3;
1477		break;
1478	default:
1479		WARN(1, "Incorrect KDiv\n");
1480	}
1481
1482	params->qdiv_ratio = p1;
1483	params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1484
1485	dco_freq = p0 * p1 * p2 * afe_clock;
1486
1487	/*
1488	 * Intermediate values are in Hz.
1489	 * Divide by MHz to match bsepc
1490	 */
1491	params->dco_integer = div_u64(dco_freq, ref_clock * KHz(1));
1492	params->dco_fraction =
1493		div_u64((div_u64(dco_freq, ref_clock / KHz(1)) -
1494			 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1495}
1496
1497static bool
1498skl_ddi_calculate_wrpll(int clock /* in Hz */,
1499			int ref_clock,
1500			struct skl_wrpll_params *wrpll_params)
1501{
1502	u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
1503	u64 dco_central_freq[3] = { 8400000000ULL,
1504				    9000000000ULL,
1505				    9600000000ULL };
1506	static const int even_dividers[] = {  4,  6,  8, 10, 12, 14, 16, 18, 20,
1507					     24, 28, 30, 32, 36, 40, 42, 44,
1508					     48, 52, 54, 56, 60, 64, 66, 68,
1509					     70, 72, 76, 78, 80, 84, 88, 90,
1510					     92, 96, 98 };
1511	static const int odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1512	static const struct {
1513		const int *list;
1514		int n_dividers;
1515	} dividers[] = {
1516		{ even_dividers, ARRAY_SIZE(even_dividers) },
1517		{ odd_dividers, ARRAY_SIZE(odd_dividers) },
1518	};
1519	struct skl_wrpll_context ctx;
 
 
1520	unsigned int dco, d, i;
1521	unsigned int p0, p1, p2;
1522
1523	skl_wrpll_context_init(&ctx);
1524
1525	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1526		for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1527			for (i = 0; i < dividers[d].n_dividers; i++) {
1528				unsigned int p = dividers[d].list[i];
1529				u64 dco_freq = p * afe_clock;
1530
1531				skl_wrpll_try_divider(&ctx,
1532						      dco_central_freq[dco],
1533						      dco_freq,
1534						      p);
1535				/*
1536				 * Skip the remaining dividers if we're sure to
1537				 * have found the definitive divider, we can't
1538				 * improve a 0 deviation.
1539				 */
1540				if (ctx.min_deviation == 0)
1541					goto skip_remaining_dividers;
1542			}
1543		}
1544
1545skip_remaining_dividers:
1546		/*
1547		 * If a solution is found with an even divider, prefer
1548		 * this one.
1549		 */
1550		if (d == 0 && ctx.p)
1551			break;
1552	}
1553
1554	if (!ctx.p) {
1555		DRM_DEBUG_DRIVER("No valid divider found for %dHz\n", clock);
1556		return false;
1557	}
1558
1559	/*
1560	 * gcc incorrectly analyses that these can be used without being
1561	 * initialized. To be fair, it's hard to guess.
1562	 */
1563	p0 = p1 = p2 = 0;
1564	skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1565	skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock,
1566				  ctx.central_freq, p0, p1, p2);
1567
1568	return true;
1569}
1570
1571static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
1572{
1573	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1574	u32 ctrl1, cfgcr1, cfgcr2;
1575	struct skl_wrpll_params wrpll_params = { 0, };
1576
1577	/*
1578	 * See comment in intel_dpll_hw_state to understand why we always use 0
1579	 * as the DPLL id in this function.
1580	 */
1581	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1582
1583	ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
1584
1585	if (!skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
1586				     i915->dpll.ref_clks.nssc,
1587				     &wrpll_params))
1588		return false;
1589
1590	cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
1591		DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1592		wrpll_params.dco_integer;
1593
1594	cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1595		DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1596		DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1597		DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1598		wrpll_params.central_freq;
1599
1600	memset(&crtc_state->dpll_hw_state, 0,
1601	       sizeof(crtc_state->dpll_hw_state));
1602
1603	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1604	crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
1605	crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
1606	return true;
1607}
1608
1609static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
1610				  const struct intel_shared_dpll *pll,
1611				  const struct intel_dpll_hw_state *pll_state)
1612{
1613	int ref_clock = i915->dpll.ref_clks.nssc;
 
1614	u32 p0, p1, p2, dco_freq;
1615
1616	p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
1617	p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
1618
1619	if (pll_state->cfgcr2 &  DPLL_CFGCR2_QDIV_MODE(1))
1620		p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
1621	else
1622		p1 = 1;
1623
1624
1625	switch (p0) {
1626	case DPLL_CFGCR2_PDIV_1:
1627		p0 = 1;
1628		break;
1629	case DPLL_CFGCR2_PDIV_2:
1630		p0 = 2;
1631		break;
1632	case DPLL_CFGCR2_PDIV_3:
1633		p0 = 3;
1634		break;
1635	case DPLL_CFGCR2_PDIV_7_INVALID:
1636		/*
1637		 * Incorrect ASUS-Z170M BIOS setting, the HW seems to ignore bit#0,
1638		 * handling it the same way as PDIV_7.
1639		 */
1640		drm_dbg_kms(&i915->drm, "Invalid WRPLL PDIV divider value, fixing it.\n");
1641		fallthrough;
1642	case DPLL_CFGCR2_PDIV_7:
1643		p0 = 7;
1644		break;
1645	default:
1646		MISSING_CASE(p0);
1647		return 0;
1648	}
1649
1650	switch (p2) {
1651	case DPLL_CFGCR2_KDIV_5:
1652		p2 = 5;
1653		break;
1654	case DPLL_CFGCR2_KDIV_2:
1655		p2 = 2;
1656		break;
1657	case DPLL_CFGCR2_KDIV_3:
1658		p2 = 3;
1659		break;
1660	case DPLL_CFGCR2_KDIV_1:
1661		p2 = 1;
1662		break;
1663	default:
1664		MISSING_CASE(p2);
1665		return 0;
1666	}
1667
1668	dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
1669		   ref_clock;
1670
1671	dco_freq += ((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
1672		    ref_clock / 0x8000;
1673
1674	if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
1675		return 0;
1676
1677	return dco_freq / (p0 * p1 * p2 * 5);
1678}
1679
1680static bool
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1681skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1682{
 
1683	u32 ctrl1;
1684
1685	/*
1686	 * See comment in intel_dpll_hw_state to understand why we always use 0
1687	 * as the DPLL id in this function.
1688	 */
1689	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1690	switch (crtc_state->port_clock / 2) {
1691	case 81000:
1692		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1693		break;
1694	case 135000:
1695		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1696		break;
1697	case 270000:
1698		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1699		break;
1700		/* eDP 1.4 rates */
1701	case 162000:
1702		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1703		break;
1704	case 108000:
1705		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1706		break;
1707	case 216000:
1708		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1709		break;
1710	}
1711
1712	memset(&crtc_state->dpll_hw_state, 0,
1713	       sizeof(crtc_state->dpll_hw_state));
1714
1715	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1716
1717	return true;
1718}
1719
1720static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1721				  const struct intel_shared_dpll *pll,
1722				  const struct intel_dpll_hw_state *pll_state)
1723{
 
1724	int link_clock = 0;
1725
1726	switch ((pll_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0)) >>
1727		DPLL_CTRL1_LINK_RATE_SHIFT(0)) {
1728	case DPLL_CTRL1_LINK_RATE_810:
1729		link_clock = 81000;
1730		break;
1731	case DPLL_CTRL1_LINK_RATE_1080:
1732		link_clock = 108000;
1733		break;
1734	case DPLL_CTRL1_LINK_RATE_1350:
1735		link_clock = 135000;
1736		break;
1737	case DPLL_CTRL1_LINK_RATE_1620:
1738		link_clock = 162000;
1739		break;
1740	case DPLL_CTRL1_LINK_RATE_2160:
1741		link_clock = 216000;
1742		break;
1743	case DPLL_CTRL1_LINK_RATE_2700:
1744		link_clock = 270000;
1745		break;
1746	default:
1747		drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
1748		break;
1749	}
1750
1751	return link_clock * 2;
1752}
1753
1754static bool skl_get_dpll(struct intel_atomic_state *state,
1755			 struct intel_crtc *crtc,
1756			 struct intel_encoder *encoder)
1757{
1758	struct intel_crtc_state *crtc_state =
1759		intel_atomic_get_new_crtc_state(state, crtc);
1760	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
1761	struct intel_shared_dpll *pll;
1762	bool bret;
1763
1764	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
1765		bret = skl_ddi_hdmi_pll_dividers(crtc_state);
1766		if (!bret) {
1767			drm_dbg_kms(&i915->drm,
1768				    "Could not get HDMI pll dividers.\n");
1769			return false;
1770		}
1771	} else if (intel_crtc_has_dp_encoder(crtc_state)) {
1772		bret = skl_ddi_dp_set_dpll_hw_state(crtc_state);
1773		if (!bret) {
1774			drm_dbg_kms(&i915->drm,
1775				    "Could not set DP dpll HW state.\n");
1776			return false;
1777		}
1778	} else {
1779		return false;
1780	}
1781
1782	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1783		pll = intel_find_shared_dpll(state, crtc,
1784					     &crtc_state->dpll_hw_state,
1785					     BIT(DPLL_ID_SKL_DPLL0));
1786	else
1787		pll = intel_find_shared_dpll(state, crtc,
1788					     &crtc_state->dpll_hw_state,
1789					     BIT(DPLL_ID_SKL_DPLL3) |
1790					     BIT(DPLL_ID_SKL_DPLL2) |
1791					     BIT(DPLL_ID_SKL_DPLL1));
1792	if (!pll)
1793		return false;
1794
1795	intel_reference_shared_dpll(state, crtc,
1796				    pll, &crtc_state->dpll_hw_state);
1797
1798	crtc_state->shared_dpll = pll;
1799
1800	return true;
1801}
1802
1803static int skl_ddi_pll_get_freq(struct drm_i915_private *i915,
1804				const struct intel_shared_dpll *pll,
1805				const struct intel_dpll_hw_state *pll_state)
1806{
 
 
1807	/*
1808	 * ctrl1 register is already shifted for each pll, just use 0 to get
1809	 * the internal shift for each field
1810	 */
1811	if (pll_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
1812		return skl_ddi_wrpll_get_freq(i915, pll, pll_state);
1813	else
1814		return skl_ddi_lcpll_get_freq(i915, pll, pll_state);
1815}
1816
1817static void skl_update_dpll_ref_clks(struct drm_i915_private *i915)
1818{
1819	/* No SSC ref */
1820	i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
1821}
1822
1823static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
1824			      const struct intel_dpll_hw_state *hw_state)
1825{
1826	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: "
1827		      "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1828		      hw_state->ctrl1,
1829		      hw_state->cfgcr1,
1830		      hw_state->cfgcr2);
 
 
 
 
 
 
 
 
 
 
1831}
1832
1833static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
1834	.enable = skl_ddi_pll_enable,
1835	.disable = skl_ddi_pll_disable,
1836	.get_hw_state = skl_ddi_pll_get_hw_state,
1837	.get_freq = skl_ddi_pll_get_freq,
1838};
1839
1840static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
1841	.enable = skl_ddi_dpll0_enable,
1842	.disable = skl_ddi_dpll0_disable,
1843	.get_hw_state = skl_ddi_dpll0_get_hw_state,
1844	.get_freq = skl_ddi_pll_get_freq,
1845};
1846
1847static const struct dpll_info skl_plls[] = {
1848	{ "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
1849	{ "DPLL 1", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL1, 0 },
1850	{ "DPLL 2", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL2, 0 },
1851	{ "DPLL 3", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL3, 0 },
1852	{ },
 
1853};
1854
1855static const struct intel_dpll_mgr skl_pll_mgr = {
1856	.dpll_info = skl_plls,
 
1857	.get_dplls = skl_get_dpll,
1858	.put_dplls = intel_put_dpll,
1859	.update_ref_clks = skl_update_dpll_ref_clks,
1860	.dump_hw_state = skl_dump_hw_state,
 
1861};
1862
1863static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
1864				struct intel_shared_dpll *pll)
 
1865{
1866	u32 temp;
 
1867	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1868	enum dpio_phy phy;
1869	enum dpio_channel ch;
 
1870
1871	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1872
1873	/* Non-SSC reference */
1874	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1875	temp |= PORT_PLL_REF_SEL;
1876	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1877
1878	if (IS_GEMINILAKE(dev_priv)) {
1879		temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1880		temp |= PORT_PLL_POWER_ENABLE;
1881		intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1882
1883		if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
 
 
 
 
1884				 PORT_PLL_POWER_STATE), 200))
1885			drm_err(&dev_priv->drm,
1886				"Power state not set for PLL:%d\n", port);
1887	}
1888
1889	/* Disable 10 bit clock */
1890	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1891	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1892	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1893
1894	/* Write P1 & P2 */
1895	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
1896	temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
1897	temp |= pll->state.hw_state.ebb0;
1898	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch), temp);
1899
1900	/* Write M2 integer */
1901	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
1902	temp &= ~PORT_PLL_M2_MASK;
1903	temp |= pll->state.hw_state.pll0;
1904	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 0), temp);
1905
1906	/* Write N */
1907	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
1908	temp &= ~PORT_PLL_N_MASK;
1909	temp |= pll->state.hw_state.pll1;
1910	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 1), temp);
1911
1912	/* Write M2 fraction */
1913	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
1914	temp &= ~PORT_PLL_M2_FRAC_MASK;
1915	temp |= pll->state.hw_state.pll2;
1916	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 2), temp);
1917
1918	/* Write M2 fraction enable */
1919	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
1920	temp &= ~PORT_PLL_M2_FRAC_ENABLE;
1921	temp |= pll->state.hw_state.pll3;
1922	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 3), temp);
1923
1924	/* Write coeff */
1925	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
1926	temp &= ~PORT_PLL_PROP_COEFF_MASK;
1927	temp &= ~PORT_PLL_INT_COEFF_MASK;
1928	temp &= ~PORT_PLL_GAIN_CTL_MASK;
1929	temp |= pll->state.hw_state.pll6;
1930	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 6), temp);
1931
1932	/* Write calibration val */
1933	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
1934	temp &= ~PORT_PLL_TARGET_CNT_MASK;
1935	temp |= pll->state.hw_state.pll8;
1936	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 8), temp);
1937
1938	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
1939	temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
1940	temp |= pll->state.hw_state.pll9;
1941	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 9), temp);
1942
1943	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
 
 
 
1944	temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
1945	temp &= ~PORT_PLL_DCO_AMP_MASK;
1946	temp |= pll->state.hw_state.pll10;
1947	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 10), temp);
1948
1949	/* Recalibrate with new settings */
1950	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1951	temp |= PORT_PLL_RECALIBRATE;
1952	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1953	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1954	temp |= pll->state.hw_state.ebb4;
1955	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1956
1957	/* Enable PLL */
1958	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1959	temp |= PORT_PLL_ENABLE;
1960	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1961	intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1962
1963	if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
1964			200))
1965		drm_err(&dev_priv->drm, "PLL %d not locked\n", port);
1966
1967	if (IS_GEMINILAKE(dev_priv)) {
1968		temp = intel_de_read(dev_priv, BXT_PORT_TX_DW5_LN0(phy, ch));
1969		temp |= DCC_DELAY_RANGE_2;
1970		intel_de_write(dev_priv, BXT_PORT_TX_DW5_GRP(phy, ch), temp);
1971	}
1972
1973	/*
1974	 * While we write to the group register to program all lanes at once we
1975	 * can read only lane registers and we pick lanes 0/1 for that.
1976	 */
1977	temp = intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN01(phy, ch));
1978	temp &= ~LANE_STAGGER_MASK;
1979	temp &= ~LANESTAGGER_STRAP_OVRD;
1980	temp |= pll->state.hw_state.pcsdw12;
1981	intel_de_write(dev_priv, BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
1982}
1983
1984static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
1985					struct intel_shared_dpll *pll)
1986{
1987	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1988	u32 temp;
1989
1990	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1991	temp &= ~PORT_PLL_ENABLE;
1992	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1993	intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1994
1995	if (IS_GEMINILAKE(dev_priv)) {
1996		temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1997		temp &= ~PORT_PLL_POWER_ENABLE;
1998		intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1999
2000		if (wait_for_us(!(intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
2001				  PORT_PLL_POWER_STATE), 200))
2002			drm_err(&dev_priv->drm,
2003				"Power state not reset for PLL:%d\n", port);
2004	}
2005}
2006
2007static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2008					struct intel_shared_dpll *pll,
2009					struct intel_dpll_hw_state *hw_state)
2010{
 
 
2011	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2012	intel_wakeref_t wakeref;
2013	enum dpio_phy phy;
2014	enum dpio_channel ch;
2015	u32 val;
2016	bool ret;
2017
2018	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
2019
2020	wakeref = intel_display_power_get_if_enabled(dev_priv,
2021						     POWER_DOMAIN_DISPLAY_CORE);
2022	if (!wakeref)
2023		return false;
2024
2025	ret = false;
2026
2027	val = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
2028	if (!(val & PORT_PLL_ENABLE))
2029		goto out;
2030
2031	hw_state->ebb0 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
2032	hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
2033
2034	hw_state->ebb4 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
2035	hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
2036
2037	hw_state->pll0 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
2038	hw_state->pll0 &= PORT_PLL_M2_MASK;
2039
2040	hw_state->pll1 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
2041	hw_state->pll1 &= PORT_PLL_N_MASK;
2042
2043	hw_state->pll2 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
2044	hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
2045
2046	hw_state->pll3 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
2047	hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
2048
2049	hw_state->pll6 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
2050	hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
2051			  PORT_PLL_INT_COEFF_MASK |
2052			  PORT_PLL_GAIN_CTL_MASK;
2053
2054	hw_state->pll8 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
2055	hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
2056
2057	hw_state->pll9 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
2058	hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
2059
2060	hw_state->pll10 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
2061	hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
2062			   PORT_PLL_DCO_AMP_MASK;
2063
2064	/*
2065	 * While we write to the group register to program all lanes at once we
2066	 * can read only lane registers. We configure all lanes the same way, so
2067	 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
2068	 */
2069	hw_state->pcsdw12 = intel_de_read(dev_priv,
2070					  BXT_PORT_PCS_DW12_LN01(phy, ch));
2071	if (intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
2072		drm_dbg(&dev_priv->drm,
2073			"lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
2074			hw_state->pcsdw12,
2075			intel_de_read(dev_priv,
2076				      BXT_PORT_PCS_DW12_LN23(phy, ch)));
2077	hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
2078
2079	ret = true;
2080
2081out:
2082	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2083
2084	return ret;
2085}
2086
2087/* bxt clock parameters */
2088struct bxt_clk_div {
2089	int clock;
2090	u32 p1;
2091	u32 p2;
2092	u32 m2_int;
2093	u32 m2_frac;
2094	bool m2_frac_en;
2095	u32 n;
2096
2097	int vco;
2098};
2099
2100/* pre-calculated values for DP linkrates */
2101static const struct bxt_clk_div bxt_dp_clk_val[] = {
2102	{162000, 4, 2, 32, 1677722, 1, 1},
2103	{270000, 4, 1, 27,       0, 0, 1},
2104	{540000, 2, 1, 27,       0, 0, 1},
2105	{216000, 3, 2, 32, 1677722, 1, 1},
2106	{243000, 4, 1, 24, 1258291, 1, 1},
2107	{324000, 4, 1, 32, 1677722, 1, 1},
2108	{432000, 3, 1, 32, 1677722, 1, 1}
 
2109};
2110
2111static bool
2112bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
2113			  struct bxt_clk_div *clk_div)
2114{
2115	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2116	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2117	struct dpll best_clock;
2118
2119	/* Calculate HDMI div */
2120	/*
2121	 * FIXME: tie the following calculation into
2122	 * i9xx_crtc_compute_clock
2123	 */
2124	if (!bxt_find_best_dpll(crtc_state, &best_clock)) {
2125		drm_dbg(&i915->drm, "no PLL dividers found for clock %d pipe %c\n",
2126			crtc_state->port_clock,
2127			pipe_name(crtc->pipe));
2128		return false;
2129	}
2130
2131	clk_div->p1 = best_clock.p1;
2132	clk_div->p2 = best_clock.p2;
2133	drm_WARN_ON(&i915->drm, best_clock.m1 != 2);
2134	clk_div->n = best_clock.n;
2135	clk_div->m2_int = best_clock.m2 >> 22;
2136	clk_div->m2_frac = best_clock.m2 & ((1 << 22) - 1);
2137	clk_div->m2_frac_en = clk_div->m2_frac != 0;
2138
2139	clk_div->vco = best_clock.vco;
2140
2141	return true;
2142}
2143
2144static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
2145				    struct bxt_clk_div *clk_div)
2146{
2147	int clock = crtc_state->port_clock;
2148	int i;
2149
2150	*clk_div = bxt_dp_clk_val[0];
2151	for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
2152		if (bxt_dp_clk_val[i].clock == clock) {
2153			*clk_div = bxt_dp_clk_val[i];
2154			break;
2155		}
2156	}
2157
2158	clk_div->vco = clock * 10 / 2 * clk_div->p1 * clk_div->p2;
 
 
 
2159}
2160
2161static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
2162				      const struct bxt_clk_div *clk_div)
2163{
2164	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2165	struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
2166	int clock = crtc_state->port_clock;
2167	int vco = clk_div->vco;
2168	u32 prop_coef, int_coef, gain_ctl, targ_cnt;
2169	u32 lanestagger;
2170
2171	memset(dpll_hw_state, 0, sizeof(*dpll_hw_state));
2172
2173	if (vco >= 6200000 && vco <= 6700000) {
2174		prop_coef = 4;
2175		int_coef = 9;
2176		gain_ctl = 3;
2177		targ_cnt = 8;
2178	} else if ((vco > 5400000 && vco < 6200000) ||
2179			(vco >= 4800000 && vco < 5400000)) {
2180		prop_coef = 5;
2181		int_coef = 11;
2182		gain_ctl = 3;
2183		targ_cnt = 9;
2184	} else if (vco == 5400000) {
2185		prop_coef = 3;
2186		int_coef = 8;
2187		gain_ctl = 1;
2188		targ_cnt = 9;
2189	} else {
2190		drm_err(&i915->drm, "Invalid VCO\n");
2191		return false;
2192	}
2193
2194	if (clock > 270000)
2195		lanestagger = 0x18;
2196	else if (clock > 135000)
2197		lanestagger = 0x0d;
2198	else if (clock > 67000)
2199		lanestagger = 0x07;
2200	else if (clock > 33000)
2201		lanestagger = 0x04;
2202	else
2203		lanestagger = 0x02;
2204
2205	dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
2206	dpll_hw_state->pll0 = clk_div->m2_int;
2207	dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
2208	dpll_hw_state->pll2 = clk_div->m2_frac;
2209
2210	if (clk_div->m2_frac_en)
2211		dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
2212
2213	dpll_hw_state->pll6 = prop_coef | PORT_PLL_INT_COEFF(int_coef);
2214	dpll_hw_state->pll6 |= PORT_PLL_GAIN_CTL(gain_ctl);
 
2215
2216	dpll_hw_state->pll8 = targ_cnt;
2217
2218	dpll_hw_state->pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT;
2219
2220	dpll_hw_state->pll10 =
2221		PORT_PLL_DCO_AMP(PORT_PLL_DCO_AMP_DEFAULT)
2222		| PORT_PLL_DCO_AMP_OVR_EN_H;
2223
2224	dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
2225
2226	dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
2227
2228	return true;
2229}
2230
2231static bool
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2232bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2233{
2234	struct bxt_clk_div clk_div = {};
2235
2236	bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
2237
2238	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2239}
2240
2241static bool
2242bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2243{
2244	struct bxt_clk_div clk_div = {};
 
 
2245
2246	bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
2247
2248	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
 
 
 
 
 
 
 
2249}
2250
2251static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
2252				const struct intel_shared_dpll *pll,
2253				const struct intel_dpll_hw_state *pll_state)
2254{
2255	struct dpll clock;
2256
2257	clock.m1 = 2;
2258	clock.m2 = (pll_state->pll0 & PORT_PLL_M2_MASK) << 22;
2259	if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
2260		clock.m2 |= pll_state->pll2 & PORT_PLL_M2_FRAC_MASK;
2261	clock.n = (pll_state->pll1 & PORT_PLL_N_MASK) >> PORT_PLL_N_SHIFT;
2262	clock.p1 = (pll_state->ebb0 & PORT_PLL_P1_MASK) >> PORT_PLL_P1_SHIFT;
2263	clock.p2 = (pll_state->ebb0 & PORT_PLL_P2_MASK) >> PORT_PLL_P2_SHIFT;
2264
2265	return chv_calc_dpll_params(i915->dpll.ref_clks.nssc, &clock);
 
 
 
 
 
2266}
2267
2268static bool bxt_get_dpll(struct intel_atomic_state *state,
2269			 struct intel_crtc *crtc,
2270			 struct intel_encoder *encoder)
2271{
2272	struct intel_crtc_state *crtc_state =
2273		intel_atomic_get_new_crtc_state(state, crtc);
2274	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2275	struct intel_shared_dpll *pll;
2276	enum intel_dpll_id id;
2277
2278	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
2279	    !bxt_ddi_hdmi_set_dpll_hw_state(crtc_state))
2280		return false;
2281
2282	if (intel_crtc_has_dp_encoder(crtc_state) &&
2283	    !bxt_ddi_dp_set_dpll_hw_state(crtc_state))
2284		return false;
2285
2286	/* 1:1 mapping between ports and PLLs */
2287	id = (enum intel_dpll_id) encoder->port;
2288	pll = intel_get_shared_dpll_by_id(dev_priv, id);
2289
2290	drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] using pre-allocated %s\n",
2291		    crtc->base.base.id, crtc->base.name, pll->info->name);
2292
2293	intel_reference_shared_dpll(state, crtc,
2294				    pll, &crtc_state->dpll_hw_state);
2295
2296	crtc_state->shared_dpll = pll;
2297
2298	return true;
2299}
2300
2301static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915)
2302{
2303	i915->dpll.ref_clks.ssc = 100000;
2304	i915->dpll.ref_clks.nssc = 100000;
2305	/* DSI non-SSC ref 19.2MHz */
2306}
2307
2308static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
2309			      const struct intel_dpll_hw_state *hw_state)
2310{
2311	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
2312		    "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
2313		    "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
2314		    hw_state->ebb0,
2315		    hw_state->ebb4,
2316		    hw_state->pll0,
2317		    hw_state->pll1,
2318		    hw_state->pll2,
2319		    hw_state->pll3,
2320		    hw_state->pll6,
2321		    hw_state->pll8,
2322		    hw_state->pll9,
2323		    hw_state->pll10,
2324		    hw_state->pcsdw12);
 
 
 
 
 
 
 
 
 
 
 
 
 
2325}
2326
2327static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
2328	.enable = bxt_ddi_pll_enable,
2329	.disable = bxt_ddi_pll_disable,
2330	.get_hw_state = bxt_ddi_pll_get_hw_state,
2331	.get_freq = bxt_ddi_pll_get_freq,
2332};
2333
2334static const struct dpll_info bxt_plls[] = {
2335	{ "PORT PLL A", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
2336	{ "PORT PLL B", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2337	{ "PORT PLL C", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2338	{ },
2339};
2340
2341static const struct intel_dpll_mgr bxt_pll_mgr = {
2342	.dpll_info = bxt_plls,
 
2343	.get_dplls = bxt_get_dpll,
2344	.put_dplls = intel_put_dpll,
2345	.update_ref_clks = bxt_update_dpll_ref_clks,
2346	.dump_hw_state = bxt_dump_hw_state,
 
2347};
2348
2349static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
2350			       struct intel_shared_dpll *pll)
2351{
2352	const enum intel_dpll_id id = pll->info->id;
2353	u32 val;
2354
2355	/* 1. Enable DPLL power in DPLL_ENABLE. */
2356	val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2357	val |= PLL_POWER_ENABLE;
2358	intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
2359
2360	/* 2. Wait for DPLL power state enabled in DPLL_ENABLE. */
2361	if (intel_de_wait_for_set(dev_priv, CNL_DPLL_ENABLE(id),
2362				  PLL_POWER_STATE, 5))
2363		drm_err(&dev_priv->drm, "PLL %d Power not enabled\n", id);
2364
2365	/*
2366	 * 3. Configure DPLL_CFGCR0 to set SSC enable/disable,
2367	 * select DP mode, and set DP link rate.
2368	 */
2369	val = pll->state.hw_state.cfgcr0;
2370	intel_de_write(dev_priv, CNL_DPLL_CFGCR0(id), val);
2371
2372	/* 4. Reab back to ensure writes completed */
2373	intel_de_posting_read(dev_priv, CNL_DPLL_CFGCR0(id));
2374
2375	/* 3. Configure DPLL_CFGCR0 */
2376	/* Avoid touch CFGCR1 if HDMI mode is not enabled */
2377	if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
2378		val = pll->state.hw_state.cfgcr1;
2379		intel_de_write(dev_priv, CNL_DPLL_CFGCR1(id), val);
2380		/* 4. Reab back to ensure writes completed */
2381		intel_de_posting_read(dev_priv, CNL_DPLL_CFGCR1(id));
2382	}
2383
2384	/*
2385	 * 5. If the frequency will result in a change to the voltage
2386	 * requirement, follow the Display Voltage Frequency Switching
2387	 * Sequence Before Frequency Change
2388	 *
2389	 * Note: DVFS is actually handled via the cdclk code paths,
2390	 * hence we do nothing here.
2391	 */
2392
2393	/* 6. Enable DPLL in DPLL_ENABLE. */
2394	val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2395	val |= PLL_ENABLE;
2396	intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
2397
2398	/* 7. Wait for PLL lock status in DPLL_ENABLE. */
2399	if (intel_de_wait_for_set(dev_priv, CNL_DPLL_ENABLE(id), PLL_LOCK, 5))
2400		drm_err(&dev_priv->drm, "PLL %d not locked\n", id);
2401
2402	/*
2403	 * 8. If the frequency will result in a change to the voltage
2404	 * requirement, follow the Display Voltage Frequency Switching
2405	 * Sequence After Frequency Change
2406	 *
2407	 * Note: DVFS is actually handled via the cdclk code paths,
2408	 * hence we do nothing here.
2409	 */
2410
2411	/*
2412	 * 9. turn on the clock for the DDI and map the DPLL to the DDI
2413	 * Done at intel_ddi_clk_select
2414	 */
2415}
2416
2417static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
2418				struct intel_shared_dpll *pll)
2419{
2420	const enum intel_dpll_id id = pll->info->id;
2421	u32 val;
2422
2423	/*
2424	 * 1. Configure DPCLKA_CFGCR0 to turn off the clock for the DDI.
2425	 * Done at intel_ddi_post_disable
2426	 */
2427
2428	/*
2429	 * 2. If the frequency will result in a change to the voltage
2430	 * requirement, follow the Display Voltage Frequency Switching
2431	 * Sequence Before Frequency Change
2432	 *
2433	 * Note: DVFS is actually handled via the cdclk code paths,
2434	 * hence we do nothing here.
2435	 */
2436
2437	/* 3. Disable DPLL through DPLL_ENABLE. */
2438	val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2439	val &= ~PLL_ENABLE;
2440	intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
2441
2442	/* 4. Wait for PLL not locked status in DPLL_ENABLE. */
2443	if (intel_de_wait_for_clear(dev_priv, CNL_DPLL_ENABLE(id), PLL_LOCK, 5))
2444		drm_err(&dev_priv->drm, "PLL %d locked\n", id);
2445
2446	/*
2447	 * 5. If the frequency will result in a change to the voltage
2448	 * requirement, follow the Display Voltage Frequency Switching
2449	 * Sequence After Frequency Change
2450	 *
2451	 * Note: DVFS is actually handled via the cdclk code paths,
2452	 * hence we do nothing here.
2453	 */
2454
2455	/* 6. Disable DPLL power in DPLL_ENABLE. */
2456	val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2457	val &= ~PLL_POWER_ENABLE;
2458	intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
2459
2460	/* 7. Wait for DPLL power state disabled in DPLL_ENABLE. */
2461	if (intel_de_wait_for_clear(dev_priv, CNL_DPLL_ENABLE(id),
2462				    PLL_POWER_STATE, 5))
2463		drm_err(&dev_priv->drm, "PLL %d Power not disabled\n", id);
2464}
2465
2466static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2467				     struct intel_shared_dpll *pll,
2468				     struct intel_dpll_hw_state *hw_state)
2469{
2470	const enum intel_dpll_id id = pll->info->id;
2471	intel_wakeref_t wakeref;
2472	u32 val;
2473	bool ret;
2474
2475	wakeref = intel_display_power_get_if_enabled(dev_priv,
2476						     POWER_DOMAIN_DISPLAY_CORE);
2477	if (!wakeref)
2478		return false;
2479
2480	ret = false;
2481
2482	val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2483	if (!(val & PLL_ENABLE))
2484		goto out;
2485
2486	val = intel_de_read(dev_priv, CNL_DPLL_CFGCR0(id));
2487	hw_state->cfgcr0 = val;
2488
2489	/* avoid reading back stale values if HDMI mode is not enabled */
2490	if (val & DPLL_CFGCR0_HDMI_MODE) {
2491		hw_state->cfgcr1 = intel_de_read(dev_priv,
2492						 CNL_DPLL_CFGCR1(id));
2493	}
2494	ret = true;
2495
2496out:
2497	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2498
2499	return ret;
2500}
2501
2502static void cnl_wrpll_get_multipliers(int bestdiv, int *pdiv,
2503				      int *qdiv, int *kdiv)
2504{
2505	/* even dividers */
2506	if (bestdiv % 2 == 0) {
2507		if (bestdiv == 2) {
2508			*pdiv = 2;
2509			*qdiv = 1;
2510			*kdiv = 1;
2511		} else if (bestdiv % 4 == 0) {
2512			*pdiv = 2;
2513			*qdiv = bestdiv / 4;
2514			*kdiv = 2;
2515		} else if (bestdiv % 6 == 0) {
2516			*pdiv = 3;
2517			*qdiv = bestdiv / 6;
2518			*kdiv = 2;
2519		} else if (bestdiv % 5 == 0) {
2520			*pdiv = 5;
2521			*qdiv = bestdiv / 10;
2522			*kdiv = 2;
2523		} else if (bestdiv % 14 == 0) {
2524			*pdiv = 7;
2525			*qdiv = bestdiv / 14;
2526			*kdiv = 2;
2527		}
2528	} else {
2529		if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2530			*pdiv = bestdiv;
2531			*qdiv = 1;
2532			*kdiv = 1;
2533		} else { /* 9, 15, 21 */
2534			*pdiv = bestdiv / 3;
2535			*qdiv = 1;
2536			*kdiv = 3;
2537		}
2538	}
2539}
2540
2541static void cnl_wrpll_params_populate(struct skl_wrpll_params *params,
2542				      u32 dco_freq, u32 ref_freq,
2543				      int pdiv, int qdiv, int kdiv)
2544{
2545	u32 dco;
2546
2547	switch (kdiv) {
2548	case 1:
2549		params->kdiv = 1;
2550		break;
2551	case 2:
2552		params->kdiv = 2;
2553		break;
2554	case 3:
2555		params->kdiv = 4;
2556		break;
2557	default:
2558		WARN(1, "Incorrect KDiv\n");
2559	}
2560
2561	switch (pdiv) {
2562	case 2:
2563		params->pdiv = 1;
2564		break;
2565	case 3:
2566		params->pdiv = 2;
2567		break;
2568	case 5:
2569		params->pdiv = 4;
2570		break;
2571	case 7:
2572		params->pdiv = 8;
2573		break;
2574	default:
2575		WARN(1, "Incorrect PDiv\n");
2576	}
2577
2578	WARN_ON(kdiv != 2 && qdiv != 1);
2579
2580	params->qdiv_ratio = qdiv;
2581	params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2582
2583	dco = div_u64((u64)dco_freq << 15, ref_freq);
2584
2585	params->dco_integer = dco >> 15;
2586	params->dco_fraction = dco & 0x7fff;
2587}
2588
2589static bool
2590__cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
2591			  struct skl_wrpll_params *wrpll_params,
2592			  int ref_clock)
2593{
2594	u32 afe_clock = crtc_state->port_clock * 5;
2595	u32 dco_min = 7998000;
2596	u32 dco_max = 10000000;
2597	u32 dco_mid = (dco_min + dco_max) / 2;
2598	static const int dividers[] = {  2,  4,  6,  8, 10, 12,  14,  16,
2599					 18, 20, 24, 28, 30, 32,  36,  40,
2600					 42, 44, 48, 50, 52, 54,  56,  60,
2601					 64, 66, 68, 70, 72, 76,  78,  80,
2602					 84, 88, 90, 92, 96, 98, 100, 102,
2603					  3,  5,  7,  9, 15, 21 };
2604	u32 dco, best_dco = 0, dco_centrality = 0;
2605	u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2606	int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2607
2608	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2609		dco = afe_clock * dividers[d];
2610
2611		if ((dco <= dco_max) && (dco >= dco_min)) {
2612			dco_centrality = abs(dco - dco_mid);
2613
2614			if (dco_centrality < best_dco_centrality) {
2615				best_dco_centrality = dco_centrality;
2616				best_div = dividers[d];
2617				best_dco = dco;
2618			}
2619		}
2620	}
2621
2622	if (best_div == 0)
2623		return false;
2624
2625	cnl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2626	cnl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
2627				  pdiv, qdiv, kdiv);
2628
2629	return true;
2630}
2631
2632static bool
2633cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
2634			struct skl_wrpll_params *wrpll_params)
2635{
2636	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2637
2638	return __cnl_ddi_calculate_wrpll(crtc_state, wrpll_params,
2639					 i915->dpll.ref_clks.nssc);
2640}
2641
2642static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
2643{
2644	u32 cfgcr0, cfgcr1;
2645	struct skl_wrpll_params wrpll_params = { 0, };
2646
2647	cfgcr0 = DPLL_CFGCR0_HDMI_MODE;
2648
2649	if (!cnl_ddi_calculate_wrpll(crtc_state, &wrpll_params))
2650		return false;
2651
2652	cfgcr0 |= DPLL_CFGCR0_DCO_FRACTION(wrpll_params.dco_fraction) |
2653		wrpll_params.dco_integer;
2654
2655	cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(wrpll_params.qdiv_ratio) |
2656		DPLL_CFGCR1_QDIV_MODE(wrpll_params.qdiv_mode) |
2657		DPLL_CFGCR1_KDIV(wrpll_params.kdiv) |
2658		DPLL_CFGCR1_PDIV(wrpll_params.pdiv) |
2659		DPLL_CFGCR1_CENTRAL_FREQ;
2660
2661	memset(&crtc_state->dpll_hw_state, 0,
2662	       sizeof(crtc_state->dpll_hw_state));
2663
2664	crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
2665	crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
2666	return true;
2667}
2668
2669/*
2670 * Display WA #22010492432: ehl, tgl
2671 * Program half of the nominal DCO divider fraction value.
2672 */
2673static bool
2674ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
2675{
2676	return ((IS_PLATFORM(i915, INTEL_ELKHARTLAKE) &&
2677		 IS_JSL_EHL_REVID(i915, EHL_REVID_B0, REVID_FOREVER)) ||
2678		 IS_TIGERLAKE(i915)) &&
2679		 i915->dpll.ref_clks.nssc == 38400;
2680}
2681
2682static int __cnl_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
2683				    const struct intel_shared_dpll *pll,
2684				    const struct intel_dpll_hw_state *pll_state,
2685				    int ref_clock)
2686{
2687	u32 dco_fraction;
2688	u32 p0, p1, p2, dco_freq;
2689
2690	p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
2691	p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
2692
2693	if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
2694		p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
2695			DPLL_CFGCR1_QDIV_RATIO_SHIFT;
2696	else
2697		p1 = 1;
2698
2699
2700	switch (p0) {
2701	case DPLL_CFGCR1_PDIV_2:
2702		p0 = 2;
2703		break;
2704	case DPLL_CFGCR1_PDIV_3:
2705		p0 = 3;
2706		break;
2707	case DPLL_CFGCR1_PDIV_5:
2708		p0 = 5;
2709		break;
2710	case DPLL_CFGCR1_PDIV_7:
2711		p0 = 7;
2712		break;
2713	}
2714
2715	switch (p2) {
2716	case DPLL_CFGCR1_KDIV_1:
2717		p2 = 1;
2718		break;
2719	case DPLL_CFGCR1_KDIV_2:
2720		p2 = 2;
2721		break;
2722	case DPLL_CFGCR1_KDIV_3:
2723		p2 = 3;
2724		break;
2725	}
2726
2727	dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
2728		   ref_clock;
2729
2730	dco_fraction = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
2731		       DPLL_CFGCR0_DCO_FRACTION_SHIFT;
2732
2733	if (ehl_combo_pll_div_frac_wa_needed(dev_priv))
2734		dco_fraction *= 2;
2735
2736	dco_freq += (dco_fraction * ref_clock) / 0x8000;
2737
2738	if (drm_WARN_ON(&dev_priv->drm, p0 == 0 || p1 == 0 || p2 == 0))
2739		return 0;
2740
2741	return dco_freq / (p0 * p1 * p2 * 5);
2742}
2743
2744static int cnl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
2745				  const struct intel_shared_dpll *pll,
2746				  const struct intel_dpll_hw_state *pll_state)
2747{
2748	return __cnl_ddi_wrpll_get_freq(i915, pll, pll_state,
2749					i915->dpll.ref_clks.nssc);
2750}
2751
2752static bool
2753cnl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2754{
2755	u32 cfgcr0;
2756
2757	cfgcr0 = DPLL_CFGCR0_SSC_ENABLE;
2758
2759	switch (crtc_state->port_clock / 2) {
2760	case 81000:
2761		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_810;
2762		break;
2763	case 135000:
2764		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1350;
2765		break;
2766	case 270000:
2767		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2700;
2768		break;
2769		/* eDP 1.4 rates */
2770	case 162000:
2771		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1620;
2772		break;
2773	case 108000:
2774		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1080;
2775		break;
2776	case 216000:
2777		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2160;
2778		break;
2779	case 324000:
2780		/* Some SKUs may require elevated I/O voltage to support this */
2781		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_3240;
2782		break;
2783	case 405000:
2784		/* Some SKUs may require elevated I/O voltage to support this */
2785		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_4050;
2786		break;
2787	}
2788
2789	memset(&crtc_state->dpll_hw_state, 0,
2790	       sizeof(crtc_state->dpll_hw_state));
2791
2792	crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
2793
2794	return true;
2795}
2796
2797static int cnl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
2798				  const struct intel_shared_dpll *pll,
2799				  const struct intel_dpll_hw_state *pll_state)
2800{
2801	int link_clock = 0;
2802
2803	switch (pll_state->cfgcr0 & DPLL_CFGCR0_LINK_RATE_MASK) {
2804	case DPLL_CFGCR0_LINK_RATE_810:
2805		link_clock = 81000;
2806		break;
2807	case DPLL_CFGCR0_LINK_RATE_1080:
2808		link_clock = 108000;
2809		break;
2810	case DPLL_CFGCR0_LINK_RATE_1350:
2811		link_clock = 135000;
2812		break;
2813	case DPLL_CFGCR0_LINK_RATE_1620:
2814		link_clock = 162000;
2815		break;
2816	case DPLL_CFGCR0_LINK_RATE_2160:
2817		link_clock = 216000;
2818		break;
2819	case DPLL_CFGCR0_LINK_RATE_2700:
2820		link_clock = 270000;
2821		break;
2822	case DPLL_CFGCR0_LINK_RATE_3240:
2823		link_clock = 324000;
2824		break;
2825	case DPLL_CFGCR0_LINK_RATE_4050:
2826		link_clock = 405000;
2827		break;
2828	default:
2829		drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
2830		break;
2831	}
2832
2833	return link_clock * 2;
2834}
2835
2836static bool cnl_get_dpll(struct intel_atomic_state *state,
2837			 struct intel_crtc *crtc,
2838			 struct intel_encoder *encoder)
2839{
2840	struct intel_crtc_state *crtc_state =
2841		intel_atomic_get_new_crtc_state(state, crtc);
2842	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2843	struct intel_shared_dpll *pll;
2844	bool bret;
2845
2846	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
2847		bret = cnl_ddi_hdmi_pll_dividers(crtc_state);
2848		if (!bret) {
2849			drm_dbg_kms(&i915->drm,
2850				    "Could not get HDMI pll dividers.\n");
2851			return false;
2852		}
2853	} else if (intel_crtc_has_dp_encoder(crtc_state)) {
2854		bret = cnl_ddi_dp_set_dpll_hw_state(crtc_state);
2855		if (!bret) {
2856			drm_dbg_kms(&i915->drm,
2857				    "Could not set DP dpll HW state.\n");
2858			return false;
2859		}
2860	} else {
2861		drm_dbg_kms(&i915->drm,
2862			    "Skip DPLL setup for output_types 0x%x\n",
2863			    crtc_state->output_types);
2864		return false;
2865	}
2866
2867	pll = intel_find_shared_dpll(state, crtc,
2868				     &crtc_state->dpll_hw_state,
2869				     BIT(DPLL_ID_SKL_DPLL2) |
2870				     BIT(DPLL_ID_SKL_DPLL1) |
2871				     BIT(DPLL_ID_SKL_DPLL0));
2872	if (!pll) {
2873		drm_dbg_kms(&i915->drm, "No PLL selected\n");
2874		return false;
2875	}
2876
2877	intel_reference_shared_dpll(state, crtc,
2878				    pll, &crtc_state->dpll_hw_state);
2879
2880	crtc_state->shared_dpll = pll;
2881
2882	return true;
2883}
2884
2885static int cnl_ddi_pll_get_freq(struct drm_i915_private *i915,
2886				const struct intel_shared_dpll *pll,
2887				const struct intel_dpll_hw_state *pll_state)
2888{
2889	if (pll_state->cfgcr0 & DPLL_CFGCR0_HDMI_MODE)
2890		return cnl_ddi_wrpll_get_freq(i915, pll, pll_state);
2891	else
2892		return cnl_ddi_lcpll_get_freq(i915, pll, pll_state);
2893}
2894
2895static void cnl_update_dpll_ref_clks(struct drm_i915_private *i915)
2896{
2897	/* No SSC reference */
2898	i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
2899}
2900
2901static void cnl_dump_hw_state(struct drm_i915_private *dev_priv,
2902			      const struct intel_dpll_hw_state *hw_state)
2903{
2904	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: "
2905		    "cfgcr0: 0x%x, cfgcr1: 0x%x\n",
2906		    hw_state->cfgcr0,
2907		    hw_state->cfgcr1);
2908}
2909
2910static const struct intel_shared_dpll_funcs cnl_ddi_pll_funcs = {
2911	.enable = cnl_ddi_pll_enable,
2912	.disable = cnl_ddi_pll_disable,
2913	.get_hw_state = cnl_ddi_pll_get_hw_state,
2914	.get_freq = cnl_ddi_pll_get_freq,
2915};
2916
2917static const struct dpll_info cnl_plls[] = {
2918	{ "DPLL 0", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
2919	{ "DPLL 1", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2920	{ "DPLL 2", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2921	{ },
2922};
2923
2924static const struct intel_dpll_mgr cnl_pll_mgr = {
2925	.dpll_info = cnl_plls,
2926	.get_dplls = cnl_get_dpll,
2927	.put_dplls = intel_put_dpll,
2928	.update_ref_clks = cnl_update_dpll_ref_clks,
2929	.dump_hw_state = cnl_dump_hw_state,
2930};
2931
2932struct icl_combo_pll_params {
2933	int clock;
2934	struct skl_wrpll_params wrpll;
2935};
2936
2937/*
2938 * These values alrea already adjusted: they're the bits we write to the
2939 * registers, not the logical values.
2940 */
2941static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
2942	{ 540000,
2943	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [0]: 5.4 */
2944	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2945	{ 270000,
2946	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [1]: 2.7 */
2947	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2948	{ 162000,
2949	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [2]: 1.62 */
2950	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2951	{ 324000,
2952	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [3]: 3.24 */
2953	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2954	{ 216000,
2955	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2956	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2957	{ 432000,
2958	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2959	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2960	{ 648000,
2961	  { .dco_integer = 0x195, .dco_fraction = 0x0000,		/* [6]: 6.48 */
2962	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2963	{ 810000,
2964	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [7]: 8.1 */
2965	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2966};
2967
2968
2969/* Also used for 38.4 MHz values. */
2970static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
2971	{ 540000,
2972	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [0]: 5.4 */
2973	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2974	{ 270000,
2975	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [1]: 2.7 */
2976	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2977	{ 162000,
2978	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [2]: 1.62 */
2979	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2980	{ 324000,
2981	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [3]: 3.24 */
2982	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2983	{ 216000,
2984	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2985	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2986	{ 432000,
2987	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2988	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2989	{ 648000,
2990	  { .dco_integer = 0x1FA, .dco_fraction = 0x2000,		/* [6]: 6.48 */
2991	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2992	{ 810000,
2993	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [7]: 8.1 */
2994	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2995};
2996
2997static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
2998	.dco_integer = 0x151, .dco_fraction = 0x4000,
2999	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
3000};
3001
3002static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
3003	.dco_integer = 0x1A5, .dco_fraction = 0x7000,
3004	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
3005};
3006
3007static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = {
3008	.dco_integer = 0x54, .dco_fraction = 0x3000,
3009	/* the following params are unused */
3010	.pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
3011};
3012
3013static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
3014	.dco_integer = 0x43, .dco_fraction = 0x4000,
3015	/* the following params are unused */
3016};
3017
3018static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
3019				  struct skl_wrpll_params *pll_params)
3020{
3021	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
3022	const struct icl_combo_pll_params *params =
3023		dev_priv->dpll.ref_clks.nssc == 24000 ?
3024		icl_dp_combo_pll_24MHz_values :
3025		icl_dp_combo_pll_19_2MHz_values;
3026	int clock = crtc_state->port_clock;
3027	int i;
3028
3029	for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
3030		if (clock == params[i].clock) {
3031			*pll_params = params[i].wrpll;
3032			return true;
3033		}
3034	}
3035
3036	MISSING_CASE(clock);
3037	return false;
3038}
3039
3040static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
3041			     struct skl_wrpll_params *pll_params)
3042{
3043	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
3044
3045	if (DISPLAY_VER(dev_priv) >= 12) {
3046		switch (dev_priv->dpll.ref_clks.nssc) {
3047		default:
3048			MISSING_CASE(dev_priv->dpll.ref_clks.nssc);
3049			fallthrough;
3050		case 19200:
3051		case 38400:
3052			*pll_params = tgl_tbt_pll_19_2MHz_values;
3053			break;
3054		case 24000:
3055			*pll_params = tgl_tbt_pll_24MHz_values;
3056			break;
3057		}
3058	} else {
3059		switch (dev_priv->dpll.ref_clks.nssc) {
3060		default:
3061			MISSING_CASE(dev_priv->dpll.ref_clks.nssc);
3062			fallthrough;
3063		case 19200:
3064		case 38400:
3065			*pll_params = icl_tbt_pll_19_2MHz_values;
3066			break;
3067		case 24000:
3068			*pll_params = icl_tbt_pll_24MHz_values;
3069			break;
3070		}
3071	}
3072
3073	return true;
3074}
3075
3076static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915,
3077				    const struct intel_shared_dpll *pll,
3078				    const struct intel_dpll_hw_state *pll_state)
3079{
3080	/*
3081	 * The PLL outputs multiple frequencies at the same time, selection is
3082	 * made at DDI clock mux level.
3083	 */
3084	drm_WARN_ON(&i915->drm, 1);
3085
3086	return 0;
3087}
3088
3089static int icl_wrpll_ref_clock(struct drm_i915_private *i915)
3090{
3091	int ref_clock = i915->dpll.ref_clks.nssc;
3092
3093	/*
3094	 * For ICL+, the spec states: if reference frequency is 38.4,
3095	 * use 19.2 because the DPLL automatically divides that by 2.
3096	 */
3097	if (ref_clock == 38400)
3098		ref_clock = 19200;
3099
3100	return ref_clock;
3101}
3102
3103static bool
3104icl_calc_wrpll(struct intel_crtc_state *crtc_state,
3105	       struct skl_wrpll_params *wrpll_params)
3106{
3107	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3108
3109	return __cnl_ddi_calculate_wrpll(crtc_state, wrpll_params,
3110					 icl_wrpll_ref_clock(i915));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3111}
3112
3113static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
3114				      const struct intel_shared_dpll *pll,
3115				      const struct intel_dpll_hw_state *pll_state)
3116{
3117	return __cnl_ddi_wrpll_get_freq(i915, pll, pll_state,
3118					icl_wrpll_ref_clock(i915));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3119}
3120
3121static void icl_calc_dpll_state(struct drm_i915_private *i915,
3122				const struct skl_wrpll_params *pll_params,
3123				struct intel_dpll_hw_state *pll_state)
3124{
 
3125	u32 dco_fraction = pll_params->dco_fraction;
3126
3127	memset(pll_state, 0, sizeof(*pll_state));
3128
3129	if (ehl_combo_pll_div_frac_wa_needed(i915))
3130		dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2);
3131
3132	pll_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) |
3133			    pll_params->dco_integer;
3134
3135	pll_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) |
3136			    DPLL_CFGCR1_QDIV_MODE(pll_params->qdiv_mode) |
3137			    DPLL_CFGCR1_KDIV(pll_params->kdiv) |
3138			    DPLL_CFGCR1_PDIV(pll_params->pdiv);
3139
3140	if (DISPLAY_VER(i915) >= 12)
3141		pll_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
3142	else
3143		pll_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
 
 
 
3144}
3145
3146static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
3147				     u32 *target_dco_khz,
3148				     struct intel_dpll_hw_state *state,
3149				     bool is_dkl)
3150{
 
3151	u32 dco_min_freq, dco_max_freq;
3152	int div1_vals[] = {7, 5, 3, 2};
3153	unsigned int i;
3154	int div2;
3155
3156	dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
3157	dco_max_freq = is_dp ? 8100000 : 10000000;
3158
3159	for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
3160		int div1 = div1_vals[i];
3161
3162		for (div2 = 10; div2 > 0; div2--) {
3163			int dco = div1 * div2 * clock_khz * 5;
3164			int a_divratio, tlinedrv, inputsel;
3165			u32 hsdiv;
3166
3167			if (dco < dco_min_freq || dco > dco_max_freq)
3168				continue;
3169
3170			if (div2 >= 2) {
3171				/*
3172				 * Note: a_divratio not matching TGL BSpec
3173				 * algorithm but matching hardcoded values and
3174				 * working on HW for DP alt-mode at least
3175				 */
3176				a_divratio = is_dp ? 10 : 5;
3177				tlinedrv = is_dkl ? 1 : 2;
3178			} else {
3179				a_divratio = 5;
3180				tlinedrv = 0;
3181			}
3182			inputsel = is_dp ? 0 : 1;
3183
3184			switch (div1) {
3185			default:
3186				MISSING_CASE(div1);
3187				fallthrough;
3188			case 2:
3189				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
3190				break;
3191			case 3:
3192				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
3193				break;
3194			case 5:
3195				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
3196				break;
3197			case 7:
3198				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
3199				break;
3200			}
3201
3202			*target_dco_khz = dco;
3203
3204			state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
3205
3206			state->mg_clktop2_coreclkctl1 =
3207				MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
3208
3209			state->mg_clktop2_hsclkctl =
3210				MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
3211				MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
3212				hsdiv |
3213				MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
3214
3215			return true;
3216		}
3217	}
3218
3219	return false;
3220}
3221
3222/*
3223 * The specification for this function uses real numbers, so the math had to be
3224 * adapted to integer-only calculation, that's why it looks so different.
3225 */
3226static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
3227				  struct intel_dpll_hw_state *pll_state)
3228{
3229	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
3230	int refclk_khz = dev_priv->dpll.ref_clks.nssc;
 
3231	int clock = crtc_state->port_clock;
3232	u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
3233	u32 iref_ndiv, iref_trim, iref_pulse_w;
3234	u32 prop_coeff, int_coeff;
3235	u32 tdc_targetcnt, feedfwgain;
3236	u64 ssc_stepsize, ssc_steplen, ssc_steplog;
3237	u64 tmp;
3238	bool use_ssc = false;
3239	bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
3240	bool is_dkl = DISPLAY_VER(dev_priv) >= 12;
3241
3242	memset(pll_state, 0, sizeof(*pll_state));
3243
3244	if (!icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
3245				      pll_state, is_dkl)) {
3246		drm_dbg_kms(&dev_priv->drm,
3247			    "Failed to find divisors for clock %d\n", clock);
3248		return false;
3249	}
3250
3251	m1div = 2;
3252	m2div_int = dco_khz / (refclk_khz * m1div);
3253	if (m2div_int > 255) {
3254		if (!is_dkl) {
3255			m1div = 4;
3256			m2div_int = dco_khz / (refclk_khz * m1div);
3257		}
3258
3259		if (m2div_int > 255) {
3260			drm_dbg_kms(&dev_priv->drm,
3261				    "Failed to find mdiv for clock %d\n",
3262				    clock);
3263			return false;
3264		}
3265	}
3266	m2div_rem = dco_khz % (refclk_khz * m1div);
3267
3268	tmp = (u64)m2div_rem * (1 << 22);
3269	do_div(tmp, refclk_khz * m1div);
3270	m2div_frac = tmp;
3271
3272	switch (refclk_khz) {
3273	case 19200:
3274		iref_ndiv = 1;
3275		iref_trim = 28;
3276		iref_pulse_w = 1;
3277		break;
3278	case 24000:
3279		iref_ndiv = 1;
3280		iref_trim = 25;
3281		iref_pulse_w = 2;
3282		break;
3283	case 38400:
3284		iref_ndiv = 2;
3285		iref_trim = 28;
3286		iref_pulse_w = 1;
3287		break;
3288	default:
3289		MISSING_CASE(refclk_khz);
3290		return false;
3291	}
3292
3293	/*
3294	 * tdc_res = 0.000003
3295	 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
3296	 *
3297	 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
3298	 * was supposed to be a division, but we rearranged the operations of
3299	 * the formula to avoid early divisions so we don't multiply the
3300	 * rounding errors.
3301	 *
3302	 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
3303	 * we also rearrange to work with integers.
3304	 *
3305	 * The 0.5 transformed to 5 results in a multiplication by 10 and the
3306	 * last division by 10.
3307	 */
3308	tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
3309
3310	/*
3311	 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
3312	 * 32 bits. That's not a problem since we round the division down
3313	 * anyway.
3314	 */
3315	feedfwgain = (use_ssc || m2div_rem > 0) ?
3316		m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
3317
3318	if (dco_khz >= 9000000) {
3319		prop_coeff = 5;
3320		int_coeff = 10;
3321	} else {
3322		prop_coeff = 4;
3323		int_coeff = 8;
3324	}
3325
3326	if (use_ssc) {
3327		tmp = mul_u32_u32(dco_khz, 47 * 32);
3328		do_div(tmp, refclk_khz * m1div * 10000);
3329		ssc_stepsize = tmp;
3330
3331		tmp = mul_u32_u32(dco_khz, 1000);
3332		ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
3333	} else {
3334		ssc_stepsize = 0;
3335		ssc_steplen = 0;
3336	}
3337	ssc_steplog = 4;
3338
3339	/* write pll_state calculations */
3340	if (is_dkl) {
3341		pll_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
3342					 DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
3343					 DKL_PLL_DIV0_FBPREDIV(m1div) |
3344					 DKL_PLL_DIV0_FBDIV_INT(m2div_int);
 
 
 
 
 
3345
3346		pll_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
3347					 DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
3348
3349		pll_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
3350					DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
3351					DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
3352					(use_ssc ? DKL_PLL_SSC_EN : 0);
3353
3354		pll_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
3355					  DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);
3356
3357		pll_state->mg_pll_tdc_coldst_bias =
3358				DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
3359				DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);
3360
3361	} else {
3362		pll_state->mg_pll_div0 =
3363			(m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
3364			MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
3365			MG_PLL_DIV0_FBDIV_INT(m2div_int);
3366
3367		pll_state->mg_pll_div1 =
3368			MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
3369			MG_PLL_DIV1_DITHER_DIV_2 |
3370			MG_PLL_DIV1_NDIVRATIO(1) |
3371			MG_PLL_DIV1_FBPREDIV(m1div);
3372
3373		pll_state->mg_pll_lf =
3374			MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
3375			MG_PLL_LF_AFCCNTSEL_512 |
3376			MG_PLL_LF_GAINCTRL(1) |
3377			MG_PLL_LF_INT_COEFF(int_coeff) |
3378			MG_PLL_LF_PROP_COEFF(prop_coeff);
3379
3380		pll_state->mg_pll_frac_lock =
3381			MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
3382			MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
3383			MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
3384			MG_PLL_FRAC_LOCK_DCODITHEREN |
3385			MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
3386		if (use_ssc || m2div_rem > 0)
3387			pll_state->mg_pll_frac_lock |=
3388				MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
3389
3390		pll_state->mg_pll_ssc =
3391			(use_ssc ? MG_PLL_SSC_EN : 0) |
3392			MG_PLL_SSC_TYPE(2) |
3393			MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
3394			MG_PLL_SSC_STEPNUM(ssc_steplog) |
3395			MG_PLL_SSC_FLLEN |
3396			MG_PLL_SSC_STEPSIZE(ssc_stepsize);
3397
3398		pll_state->mg_pll_tdc_coldst_bias =
3399			MG_PLL_TDC_COLDST_COLDSTART |
3400			MG_PLL_TDC_COLDST_IREFINT_EN |
3401			MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
3402			MG_PLL_TDC_TDCOVCCORR_EN |
3403			MG_PLL_TDC_TDCSEL(3);
3404
3405		pll_state->mg_pll_bias =
3406			MG_PLL_BIAS_BIAS_GB_SEL(3) |
3407			MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
3408			MG_PLL_BIAS_BIAS_BONUS(10) |
3409			MG_PLL_BIAS_BIASCAL_EN |
3410			MG_PLL_BIAS_CTRIM(12) |
3411			MG_PLL_BIAS_VREF_RDAC(4) |
3412			MG_PLL_BIAS_IREFTRIM(iref_trim);
3413
3414		if (refclk_khz == 38400) {
3415			pll_state->mg_pll_tdc_coldst_bias_mask =
3416				MG_PLL_TDC_COLDST_COLDSTART;
3417			pll_state->mg_pll_bias_mask = 0;
3418		} else {
3419			pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
3420			pll_state->mg_pll_bias_mask = -1U;
3421		}
3422
3423		pll_state->mg_pll_tdc_coldst_bias &=
3424			pll_state->mg_pll_tdc_coldst_bias_mask;
3425		pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
3426	}
3427
3428	return true;
3429}
3430
3431static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *dev_priv,
3432				   const struct intel_shared_dpll *pll,
3433				   const struct intel_dpll_hw_state *pll_state)
3434{
 
3435	u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
3436	u64 tmp;
3437
3438	ref_clock = dev_priv->dpll.ref_clks.nssc;
3439
3440	if (DISPLAY_VER(dev_priv) >= 12) {
3441		m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
3442		m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
3443		m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
3444
3445		if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
3446			m2_frac = pll_state->mg_pll_bias &
3447				  DKL_PLL_BIAS_FBDIV_FRAC_MASK;
3448			m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
3449		} else {
3450			m2_frac = 0;
3451		}
3452	} else {
3453		m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
3454		m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
3455
3456		if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
3457			m2_frac = pll_state->mg_pll_div0 &
3458				  MG_PLL_DIV0_FBDIV_FRAC_MASK;
3459			m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
3460		} else {
3461			m2_frac = 0;
3462		}
3463	}
3464
3465	switch (pll_state->mg_clktop2_hsclkctl &
3466		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
3467	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
3468		div1 = 2;
3469		break;
3470	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
3471		div1 = 3;
3472		break;
3473	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
3474		div1 = 5;
3475		break;
3476	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
3477		div1 = 7;
3478		break;
3479	default:
3480		MISSING_CASE(pll_state->mg_clktop2_hsclkctl);
3481		return 0;
3482	}
3483
3484	div2 = (pll_state->mg_clktop2_hsclkctl &
3485		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
3486		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
3487
3488	/* div2 value of 0 is same as 1 means no div */
3489	if (div2 == 0)
3490		div2 = 1;
3491
3492	/*
3493	 * Adjust the original formula to delay the division by 2^22 in order to
3494	 * minimize possible rounding errors.
3495	 */
3496	tmp = (u64)m1 * m2_int * ref_clock +
3497	      (((u64)m1 * m2_frac * ref_clock) >> 22);
3498	tmp = div_u64(tmp, 5 * div1 * div2);
3499
3500	return tmp;
3501}
3502
3503/**
3504 * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
3505 * @crtc_state: state for the CRTC to select the DPLL for
3506 * @port_dpll_id: the active @port_dpll_id to select
3507 *
3508 * Select the given @port_dpll_id instance from the DPLLs reserved for the
3509 * CRTC.
3510 */
3511void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
3512			      enum icl_port_dpll_id port_dpll_id)
3513{
3514	struct icl_port_dpll *port_dpll =
3515		&crtc_state->icl_port_dplls[port_dpll_id];
3516
3517	crtc_state->shared_dpll = port_dpll->pll;
3518	crtc_state->dpll_hw_state = port_dpll->hw_state;
3519}
3520
3521static void icl_update_active_dpll(struct intel_atomic_state *state,
3522				   struct intel_crtc *crtc,
3523				   struct intel_encoder *encoder)
3524{
3525	struct intel_crtc_state *crtc_state =
3526		intel_atomic_get_new_crtc_state(state, crtc);
3527	struct intel_digital_port *primary_port;
3528	enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
3529
3530	primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
3531		enc_to_mst(encoder)->primary :
3532		enc_to_dig_port(encoder);
3533
3534	if (primary_port &&
3535	    (primary_port->tc_mode == TC_PORT_DP_ALT ||
3536	     primary_port->tc_mode == TC_PORT_LEGACY))
3537		port_dpll_id = ICL_PORT_DPLL_MG_PHY;
3538
3539	icl_set_active_port_dpll(crtc_state, port_dpll_id);
3540}
3541
3542static u32 intel_get_hti_plls(struct drm_i915_private *i915)
3543{
3544	if (!(i915->hti_state & HDPORT_ENABLED))
3545		return 0;
3546
3547	return REG_FIELD_GET(HDPORT_DPLL_USED_MASK, i915->hti_state);
3548}
3549
3550static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
3551				   struct intel_crtc *crtc,
3552				   struct intel_encoder *encoder)
3553{
 
3554	struct intel_crtc_state *crtc_state =
3555		intel_atomic_get_new_crtc_state(state, crtc);
3556	struct skl_wrpll_params pll_params = { };
3557	struct icl_port_dpll *port_dpll =
3558		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3559	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3560	enum port port = encoder->port;
3561	unsigned long dpll_mask;
3562	int ret;
3563
3564	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
3565	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
3566		ret = icl_calc_wrpll(crtc_state, &pll_params);
3567	else
3568		ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
3569
3570	if (!ret) {
3571		drm_dbg_kms(&dev_priv->drm,
3572			    "Could not calculate combo PHY PLL state.\n");
3573
3574		return false;
3575	}
 
 
3576
3577	icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3578
3579	if (IS_ALDERLAKE_S(dev_priv)) {
3580		dpll_mask =
3581			BIT(DPLL_ID_DG1_DPLL3) |
3582			BIT(DPLL_ID_DG1_DPLL2) |
3583			BIT(DPLL_ID_ICL_DPLL1) |
3584			BIT(DPLL_ID_ICL_DPLL0);
3585	} else if (IS_DG1(dev_priv)) {
3586		if (port == PORT_D || port == PORT_E) {
3587			dpll_mask =
3588				BIT(DPLL_ID_DG1_DPLL2) |
3589				BIT(DPLL_ID_DG1_DPLL3);
3590		} else {
3591			dpll_mask =
3592				BIT(DPLL_ID_DG1_DPLL0) |
3593				BIT(DPLL_ID_DG1_DPLL1);
3594		}
3595	} else if (IS_ROCKETLAKE(dev_priv)) {
3596		dpll_mask =
3597			BIT(DPLL_ID_EHL_DPLL4) |
3598			BIT(DPLL_ID_ICL_DPLL1) |
3599			BIT(DPLL_ID_ICL_DPLL0);
3600	} else if (IS_JSL_EHL(dev_priv) && port != PORT_A) {
 
3601		dpll_mask =
3602			BIT(DPLL_ID_EHL_DPLL4) |
3603			BIT(DPLL_ID_ICL_DPLL1) |
3604			BIT(DPLL_ID_ICL_DPLL0);
3605	} else {
3606		dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0);
3607	}
3608
3609	/* Eliminate DPLLs from consideration if reserved by HTI */
3610	dpll_mask &= ~intel_get_hti_plls(dev_priv);
3611
3612	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3613						&port_dpll->hw_state,
3614						dpll_mask);
3615	if (!port_dpll->pll) {
3616		drm_dbg_kms(&dev_priv->drm,
3617			    "No combo PHY PLL found for [ENCODER:%d:%s]\n",
3618			    encoder->base.base.id, encoder->base.name);
3619		return false;
3620	}
3621
3622	intel_reference_shared_dpll(state, crtc,
3623				    port_dpll->pll, &port_dpll->hw_state);
3624
3625	icl_update_active_dpll(state, crtc, encoder);
3626
3627	return true;
3628}
3629
3630static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
3631				 struct intel_crtc *crtc,
3632				 struct intel_encoder *encoder)
3633{
3634	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3635	struct intel_crtc_state *crtc_state =
3636		intel_atomic_get_new_crtc_state(state, crtc);
3637	struct skl_wrpll_params pll_params = { };
3638	struct icl_port_dpll *port_dpll;
3639	enum intel_dpll_id dpll_id;
 
 
 
3640
3641	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3642	if (!icl_calc_tbt_pll(crtc_state, &pll_params)) {
3643		drm_dbg_kms(&dev_priv->drm,
3644			    "Could not calculate TBT PLL state.\n");
3645		return false;
3646	}
3647
3648	icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
 
 
 
 
 
 
 
 
 
 
3649
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3650	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3651						&port_dpll->hw_state,
3652						BIT(DPLL_ID_ICL_TBTPLL));
3653	if (!port_dpll->pll) {
3654		drm_dbg_kms(&dev_priv->drm, "No TBT-ALT PLL found\n");
3655		return false;
3656	}
3657	intel_reference_shared_dpll(state, crtc,
3658				    port_dpll->pll, &port_dpll->hw_state);
3659
3660
3661	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3662	if (!icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state)) {
3663		drm_dbg_kms(&dev_priv->drm,
3664			    "Could not calculate MG PHY PLL state.\n");
3665		goto err_unreference_tbt_pll;
3666	}
3667
3668	dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
3669							 encoder->port));
3670	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3671						&port_dpll->hw_state,
3672						BIT(dpll_id));
3673	if (!port_dpll->pll) {
3674		drm_dbg_kms(&dev_priv->drm, "No MG PHY PLL found\n");
3675		goto err_unreference_tbt_pll;
3676	}
3677	intel_reference_shared_dpll(state, crtc,
3678				    port_dpll->pll, &port_dpll->hw_state);
3679
3680	icl_update_active_dpll(state, crtc, encoder);
3681
3682	return true;
3683
3684err_unreference_tbt_pll:
3685	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3686	intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
3687
3688	return false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3689}
3690
3691static bool icl_get_dplls(struct intel_atomic_state *state,
3692			  struct intel_crtc *crtc,
3693			  struct intel_encoder *encoder)
3694{
3695	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3696	enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
3697
3698	if (intel_phy_is_combo(dev_priv, phy))
3699		return icl_get_combo_phy_dpll(state, crtc, encoder);
3700	else if (intel_phy_is_tc(dev_priv, phy))
3701		return icl_get_tc_phy_dplls(state, crtc, encoder);
3702
3703	MISSING_CASE(phy);
3704
3705	return false;
3706}
3707
3708static void icl_put_dplls(struct intel_atomic_state *state,
3709			  struct intel_crtc *crtc)
3710{
3711	const struct intel_crtc_state *old_crtc_state =
3712		intel_atomic_get_old_crtc_state(state, crtc);
3713	struct intel_crtc_state *new_crtc_state =
3714		intel_atomic_get_new_crtc_state(state, crtc);
3715	enum icl_port_dpll_id id;
3716
3717	new_crtc_state->shared_dpll = NULL;
3718
3719	for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
3720		const struct icl_port_dpll *old_port_dpll =
3721			&old_crtc_state->icl_port_dplls[id];
3722		struct icl_port_dpll *new_port_dpll =
3723			&new_crtc_state->icl_port_dplls[id];
3724
3725		new_port_dpll->pll = NULL;
3726
3727		if (!old_port_dpll->pll)
3728			continue;
3729
3730		intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
3731	}
3732}
3733
3734static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
3735				struct intel_shared_dpll *pll,
3736				struct intel_dpll_hw_state *hw_state)
3737{
 
3738	const enum intel_dpll_id id = pll->info->id;
3739	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3740	intel_wakeref_t wakeref;
3741	bool ret = false;
3742	u32 val;
3743
3744	i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
3745
3746	wakeref = intel_display_power_get_if_enabled(dev_priv,
3747						     POWER_DOMAIN_DISPLAY_CORE);
3748	if (!wakeref)
3749		return false;
3750
3751	val = intel_de_read(dev_priv, enable_reg);
3752	if (!(val & PLL_ENABLE))
3753		goto out;
3754
3755	hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
3756						  MG_REFCLKIN_CTL(tc_port));
3757	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3758
3759	hw_state->mg_clktop2_coreclkctl1 =
3760		intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
3761	hw_state->mg_clktop2_coreclkctl1 &=
3762		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3763
3764	hw_state->mg_clktop2_hsclkctl =
3765		intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
3766	hw_state->mg_clktop2_hsclkctl &=
3767		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3768		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3769		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3770		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3771
3772	hw_state->mg_pll_div0 = intel_de_read(dev_priv, MG_PLL_DIV0(tc_port));
3773	hw_state->mg_pll_div1 = intel_de_read(dev_priv, MG_PLL_DIV1(tc_port));
3774	hw_state->mg_pll_lf = intel_de_read(dev_priv, MG_PLL_LF(tc_port));
3775	hw_state->mg_pll_frac_lock = intel_de_read(dev_priv,
3776						   MG_PLL_FRAC_LOCK(tc_port));
3777	hw_state->mg_pll_ssc = intel_de_read(dev_priv, MG_PLL_SSC(tc_port));
3778
3779	hw_state->mg_pll_bias = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
3780	hw_state->mg_pll_tdc_coldst_bias =
3781		intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3782
3783	if (dev_priv->dpll.ref_clks.nssc == 38400) {
3784		hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
3785		hw_state->mg_pll_bias_mask = 0;
3786	} else {
3787		hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3788		hw_state->mg_pll_bias_mask = -1U;
3789	}
3790
3791	hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
3792	hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3793
3794	ret = true;
3795out:
3796	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3797	return ret;
3798}
3799
3800static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3801				 struct intel_shared_dpll *pll,
3802				 struct intel_dpll_hw_state *hw_state)
3803{
 
3804	const enum intel_dpll_id id = pll->info->id;
3805	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3806	intel_wakeref_t wakeref;
3807	bool ret = false;
3808	u32 val;
3809
3810	wakeref = intel_display_power_get_if_enabled(dev_priv,
3811						     POWER_DOMAIN_DISPLAY_CORE);
3812	if (!wakeref)
3813		return false;
3814
3815	val = intel_de_read(dev_priv, intel_tc_pll_enable_reg(dev_priv, pll));
3816	if (!(val & PLL_ENABLE))
3817		goto out;
3818
3819	/*
3820	 * All registers read here have the same HIP_INDEX_REG even though
3821	 * they are on different building blocks
3822	 */
3823	intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
3824		       HIP_INDEX_VAL(tc_port, 0x2));
3825
3826	hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
3827						  DKL_REFCLKIN_CTL(tc_port));
3828	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3829
3830	hw_state->mg_clktop2_hsclkctl =
3831		intel_de_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
3832	hw_state->mg_clktop2_hsclkctl &=
3833		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3834		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3835		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3836		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3837
3838	hw_state->mg_clktop2_coreclkctl1 =
3839		intel_de_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3840	hw_state->mg_clktop2_coreclkctl1 &=
3841		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3842
3843	hw_state->mg_pll_div0 = intel_de_read(dev_priv, DKL_PLL_DIV0(tc_port));
3844	hw_state->mg_pll_div0 &= (DKL_PLL_DIV0_INTEG_COEFF_MASK |
3845				  DKL_PLL_DIV0_PROP_COEFF_MASK |
3846				  DKL_PLL_DIV0_FBPREDIV_MASK |
3847				  DKL_PLL_DIV0_FBDIV_INT_MASK);
3848
3849	hw_state->mg_pll_div1 = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port));
3850	hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
3851				  DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3852
3853	hw_state->mg_pll_ssc = intel_de_read(dev_priv, DKL_PLL_SSC(tc_port));
3854	hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3855				 DKL_PLL_SSC_STEP_LEN_MASK |
3856				 DKL_PLL_SSC_STEP_NUM_MASK |
3857				 DKL_PLL_SSC_EN);
3858
3859	hw_state->mg_pll_bias = intel_de_read(dev_priv, DKL_PLL_BIAS(tc_port));
3860	hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
3861				  DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3862
3863	hw_state->mg_pll_tdc_coldst_bias =
3864		intel_de_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3865	hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3866					     DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3867
3868	ret = true;
3869out:
3870	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3871	return ret;
3872}
3873
3874static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3875				 struct intel_shared_dpll *pll,
3876				 struct intel_dpll_hw_state *hw_state,
3877				 i915_reg_t enable_reg)
3878{
 
3879	const enum intel_dpll_id id = pll->info->id;
3880	intel_wakeref_t wakeref;
3881	bool ret = false;
3882	u32 val;
3883
3884	wakeref = intel_display_power_get_if_enabled(dev_priv,
3885						     POWER_DOMAIN_DISPLAY_CORE);
3886	if (!wakeref)
3887		return false;
3888
3889	val = intel_de_read(dev_priv, enable_reg);
3890	if (!(val & PLL_ENABLE))
3891		goto out;
3892
3893	if (IS_ALDERLAKE_S(dev_priv)) {
3894		hw_state->cfgcr0 = intel_de_read(dev_priv, ADLS_DPLL_CFGCR0(id));
3895		hw_state->cfgcr1 = intel_de_read(dev_priv, ADLS_DPLL_CFGCR1(id));
3896	} else if (IS_DG1(dev_priv)) {
3897		hw_state->cfgcr0 = intel_de_read(dev_priv, DG1_DPLL_CFGCR0(id));
3898		hw_state->cfgcr1 = intel_de_read(dev_priv, DG1_DPLL_CFGCR1(id));
3899	} else if (IS_ROCKETLAKE(dev_priv)) {
3900		hw_state->cfgcr0 = intel_de_read(dev_priv,
3901						 RKL_DPLL_CFGCR0(id));
3902		hw_state->cfgcr1 = intel_de_read(dev_priv,
3903						 RKL_DPLL_CFGCR1(id));
3904	} else if (DISPLAY_VER(dev_priv) >= 12) {
3905		hw_state->cfgcr0 = intel_de_read(dev_priv,
3906						 TGL_DPLL_CFGCR0(id));
3907		hw_state->cfgcr1 = intel_de_read(dev_priv,
3908						 TGL_DPLL_CFGCR1(id));
 
 
 
 
3909	} else {
3910		if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3911			hw_state->cfgcr0 = intel_de_read(dev_priv,
 
3912							 ICL_DPLL_CFGCR0(4));
3913			hw_state->cfgcr1 = intel_de_read(dev_priv,
3914							 ICL_DPLL_CFGCR1(4));
3915		} else {
3916			hw_state->cfgcr0 = intel_de_read(dev_priv,
3917							 ICL_DPLL_CFGCR0(id));
3918			hw_state->cfgcr1 = intel_de_read(dev_priv,
3919							 ICL_DPLL_CFGCR1(id));
3920		}
3921	}
3922
3923	ret = true;
3924out:
3925	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3926	return ret;
3927}
3928
3929static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv,
3930				   struct intel_shared_dpll *pll,
3931				   struct intel_dpll_hw_state *hw_state)
3932{
3933	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3934
3935	return icl_pll_get_hw_state(dev_priv, pll, hw_state, enable_reg);
3936}
3937
3938static bool tbt_pll_get_hw_state(struct drm_i915_private *dev_priv,
3939				 struct intel_shared_dpll *pll,
3940				 struct intel_dpll_hw_state *hw_state)
3941{
3942	return icl_pll_get_hw_state(dev_priv, pll, hw_state, TBT_PLL_ENABLE);
3943}
3944
3945static void icl_dpll_write(struct drm_i915_private *dev_priv,
3946			   struct intel_shared_dpll *pll)
 
3947{
3948	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3949	const enum intel_dpll_id id = pll->info->id;
3950	i915_reg_t cfgcr0_reg, cfgcr1_reg;
3951
3952	if (IS_ALDERLAKE_S(dev_priv)) {
3953		cfgcr0_reg = ADLS_DPLL_CFGCR0(id);
3954		cfgcr1_reg = ADLS_DPLL_CFGCR1(id);
3955	} else if (IS_DG1(dev_priv)) {
3956		cfgcr0_reg = DG1_DPLL_CFGCR0(id);
3957		cfgcr1_reg = DG1_DPLL_CFGCR1(id);
3958	} else if (IS_ROCKETLAKE(dev_priv)) {
3959		cfgcr0_reg = RKL_DPLL_CFGCR0(id);
3960		cfgcr1_reg = RKL_DPLL_CFGCR1(id);
3961	} else if (DISPLAY_VER(dev_priv) >= 12) {
3962		cfgcr0_reg = TGL_DPLL_CFGCR0(id);
3963		cfgcr1_reg = TGL_DPLL_CFGCR1(id);
 
3964	} else {
3965		if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
 
3966			cfgcr0_reg = ICL_DPLL_CFGCR0(4);
3967			cfgcr1_reg = ICL_DPLL_CFGCR1(4);
3968		} else {
3969			cfgcr0_reg = ICL_DPLL_CFGCR0(id);
3970			cfgcr1_reg = ICL_DPLL_CFGCR1(id);
3971		}
3972	}
3973
3974	intel_de_write(dev_priv, cfgcr0_reg, hw_state->cfgcr0);
3975	intel_de_write(dev_priv, cfgcr1_reg, hw_state->cfgcr1);
3976	intel_de_posting_read(dev_priv, cfgcr1_reg);
 
 
 
 
 
 
3977}
3978
3979static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
3980			     struct intel_shared_dpll *pll)
 
3981{
3982	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3983	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3984	u32 val;
3985
3986	/*
3987	 * Some of the following registers have reserved fields, so program
3988	 * these with RMW based on a mask. The mask can be fixed or generated
3989	 * during the calc/readout phase if the mask depends on some other HW
3990	 * state like refclk, see icl_calc_mg_pll_state().
3991	 */
3992	val = intel_de_read(dev_priv, MG_REFCLKIN_CTL(tc_port));
3993	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3994	val |= hw_state->mg_refclkin_ctl;
3995	intel_de_write(dev_priv, MG_REFCLKIN_CTL(tc_port), val);
3996
3997	val = intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
3998	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3999	val |= hw_state->mg_clktop2_coreclkctl1;
4000	intel_de_write(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port), val);
4001
4002	val = intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
4003	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
4004		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
4005		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
4006		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
4007	val |= hw_state->mg_clktop2_hsclkctl;
4008	intel_de_write(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port), val);
4009
4010	intel_de_write(dev_priv, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
4011	intel_de_write(dev_priv, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
4012	intel_de_write(dev_priv, MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
4013	intel_de_write(dev_priv, MG_PLL_FRAC_LOCK(tc_port),
 
 
 
 
 
 
 
 
 
 
 
4014		       hw_state->mg_pll_frac_lock);
4015	intel_de_write(dev_priv, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
4016
4017	val = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
4018	val &= ~hw_state->mg_pll_bias_mask;
4019	val |= hw_state->mg_pll_bias;
4020	intel_de_write(dev_priv, MG_PLL_BIAS(tc_port), val);
4021
4022	val = intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
4023	val &= ~hw_state->mg_pll_tdc_coldst_bias_mask;
4024	val |= hw_state->mg_pll_tdc_coldst_bias;
4025	intel_de_write(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port), val);
4026
4027	intel_de_posting_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
4028}
4029
4030static void dkl_pll_write(struct drm_i915_private *dev_priv,
4031			  struct intel_shared_dpll *pll)
 
4032{
4033	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
4034	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
4035	u32 val;
4036
4037	/*
4038	 * All registers programmed here have the same HIP_INDEX_REG even
4039	 * though on different building block
4040	 */
4041	intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
4042		       HIP_INDEX_VAL(tc_port, 0x2));
4043
4044	/* All the registers are RMW */
4045	val = intel_de_read(dev_priv, DKL_REFCLKIN_CTL(tc_port));
4046	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
4047	val |= hw_state->mg_refclkin_ctl;
4048	intel_de_write(dev_priv, DKL_REFCLKIN_CTL(tc_port), val);
4049
4050	val = intel_de_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
4051	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
4052	val |= hw_state->mg_clktop2_coreclkctl1;
4053	intel_de_write(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
4054
4055	val = intel_de_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
4056	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
4057		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
4058		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
4059		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
4060	val |= hw_state->mg_clktop2_hsclkctl;
4061	intel_de_write(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
4062
4063	val = intel_de_read(dev_priv, DKL_PLL_DIV0(tc_port));
4064	val &= ~(DKL_PLL_DIV0_INTEG_COEFF_MASK |
4065		 DKL_PLL_DIV0_PROP_COEFF_MASK |
4066		 DKL_PLL_DIV0_FBPREDIV_MASK |
4067		 DKL_PLL_DIV0_FBDIV_INT_MASK);
4068	val |= hw_state->mg_pll_div0;
4069	intel_de_write(dev_priv, DKL_PLL_DIV0(tc_port), val);
4070
4071	val = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port));
4072	val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
4073		 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
4074	val |= hw_state->mg_pll_div1;
4075	intel_de_write(dev_priv, DKL_PLL_DIV1(tc_port), val);
4076
4077	val = intel_de_read(dev_priv, DKL_PLL_SSC(tc_port));
4078	val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
4079		 DKL_PLL_SSC_STEP_LEN_MASK |
4080		 DKL_PLL_SSC_STEP_NUM_MASK |
4081		 DKL_PLL_SSC_EN);
4082	val |= hw_state->mg_pll_ssc;
4083	intel_de_write(dev_priv, DKL_PLL_SSC(tc_port), val);
4084
4085	val = intel_de_read(dev_priv, DKL_PLL_BIAS(tc_port));
4086	val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
4087		 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
4088	val |= hw_state->mg_pll_bias;
4089	intel_de_write(dev_priv, DKL_PLL_BIAS(tc_port), val);
4090
4091	val = intel_de_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
4092	val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
4093		 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
4094	val |= hw_state->mg_pll_tdc_coldst_bias;
4095	intel_de_write(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
4096
4097	intel_de_posting_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
4098}
4099
4100static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
4101				 struct intel_shared_dpll *pll,
4102				 i915_reg_t enable_reg)
4103{
4104	u32 val;
4105
4106	val = intel_de_read(dev_priv, enable_reg);
4107	val |= PLL_POWER_ENABLE;
4108	intel_de_write(dev_priv, enable_reg, val);
4109
4110	/*
4111	 * The spec says we need to "wait" but it also says it should be
4112	 * immediate.
4113	 */
4114	if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_POWER_STATE, 1))
4115		drm_err(&dev_priv->drm, "PLL %d Power not enabled\n",
4116			pll->info->id);
4117}
4118
4119static void icl_pll_enable(struct drm_i915_private *dev_priv,
4120			   struct intel_shared_dpll *pll,
4121			   i915_reg_t enable_reg)
4122{
4123	u32 val;
4124
4125	val = intel_de_read(dev_priv, enable_reg);
4126	val |= PLL_ENABLE;
4127	intel_de_write(dev_priv, enable_reg, val);
4128
4129	/* Timeout is actually 600us. */
4130	if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 1))
4131		drm_err(&dev_priv->drm, "PLL %d not locked\n", pll->info->id);
4132}
4133
4134static void combo_pll_enable(struct drm_i915_private *dev_priv,
4135			     struct intel_shared_dpll *pll)
4136{
4137	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
4138
4139	if (IS_JSL_EHL(dev_priv) &&
4140	    pll->info->id == DPLL_ID_EHL_DPLL4) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4141
4142		/*
4143		 * We need to disable DC states when this DPLL is enabled.
4144		 * This can be done by taking a reference on DPLL4 power
4145		 * domain.
4146		 */
4147		pll->wakeref = intel_display_power_get(dev_priv,
4148						       POWER_DOMAIN_DPLL_DC_OFF);
4149	}
4150
4151	icl_pll_power_enable(dev_priv, pll, enable_reg);
4152
4153	icl_dpll_write(dev_priv, pll);
4154
4155	/*
4156	 * DVFS pre sequence would be here, but in our driver the cdclk code
4157	 * paths should already be setting the appropriate voltage, hence we do
4158	 * nothing here.
4159	 */
4160
4161	icl_pll_enable(dev_priv, pll, enable_reg);
 
 
4162
4163	/* DVFS post sequence would be here. See the comment above. */
4164}
4165
4166static void tbt_pll_enable(struct drm_i915_private *dev_priv,
4167			   struct intel_shared_dpll *pll)
 
4168{
4169	icl_pll_power_enable(dev_priv, pll, TBT_PLL_ENABLE);
 
 
4170
4171	icl_dpll_write(dev_priv, pll);
4172
4173	/*
4174	 * DVFS pre sequence would be here, but in our driver the cdclk code
4175	 * paths should already be setting the appropriate voltage, hence we do
4176	 * nothing here.
4177	 */
4178
4179	icl_pll_enable(dev_priv, pll, TBT_PLL_ENABLE);
4180
4181	/* DVFS post sequence would be here. See the comment above. */
4182}
4183
4184static void mg_pll_enable(struct drm_i915_private *dev_priv,
4185			  struct intel_shared_dpll *pll)
 
4186{
4187	i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
 
4188
4189	icl_pll_power_enable(dev_priv, pll, enable_reg);
4190
4191	if (DISPLAY_VER(dev_priv) >= 12)
4192		dkl_pll_write(dev_priv, pll);
4193	else
4194		icl_mg_pll_write(dev_priv, pll);
4195
4196	/*
4197	 * DVFS pre sequence would be here, but in our driver the cdclk code
4198	 * paths should already be setting the appropriate voltage, hence we do
4199	 * nothing here.
4200	 */
4201
4202	icl_pll_enable(dev_priv, pll, enable_reg);
4203
4204	/* DVFS post sequence would be here. See the comment above. */
4205}
4206
4207static void icl_pll_disable(struct drm_i915_private *dev_priv,
4208			    struct intel_shared_dpll *pll,
4209			    i915_reg_t enable_reg)
4210{
4211	u32 val;
4212
4213	/* The first steps are done by intel_ddi_post_disable(). */
4214
4215	/*
4216	 * DVFS pre sequence would be here, but in our driver the cdclk code
4217	 * paths should already be setting the appropriate voltage, hence we do
4218	 * nothing here.
4219	 */
4220
4221	val = intel_de_read(dev_priv, enable_reg);
4222	val &= ~PLL_ENABLE;
4223	intel_de_write(dev_priv, enable_reg, val);
4224
4225	/* Timeout is actually 1us. */
4226	if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_LOCK, 1))
4227		drm_err(&dev_priv->drm, "PLL %d locked\n", pll->info->id);
4228
4229	/* DVFS post sequence would be here. See the comment above. */
4230
4231	val = intel_de_read(dev_priv, enable_reg);
4232	val &= ~PLL_POWER_ENABLE;
4233	intel_de_write(dev_priv, enable_reg, val);
4234
4235	/*
4236	 * The spec says we need to "wait" but it also says it should be
4237	 * immediate.
4238	 */
4239	if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_POWER_STATE, 1))
4240		drm_err(&dev_priv->drm, "PLL %d Power not disabled\n",
4241			pll->info->id);
4242}
4243
4244static void combo_pll_disable(struct drm_i915_private *dev_priv,
4245			      struct intel_shared_dpll *pll)
4246{
4247	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
4248
4249	icl_pll_disable(dev_priv, pll, enable_reg);
4250
4251	if (IS_JSL_EHL(dev_priv) &&
4252	    pll->info->id == DPLL_ID_EHL_DPLL4)
4253		intel_display_power_put(dev_priv, POWER_DOMAIN_DPLL_DC_OFF,
4254					pll->wakeref);
4255}
4256
4257static void tbt_pll_disable(struct drm_i915_private *dev_priv,
4258			    struct intel_shared_dpll *pll)
4259{
4260	icl_pll_disable(dev_priv, pll, TBT_PLL_ENABLE);
4261}
4262
4263static void mg_pll_disable(struct drm_i915_private *dev_priv,
4264			   struct intel_shared_dpll *pll)
4265{
4266	i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
4267
4268	icl_pll_disable(dev_priv, pll, enable_reg);
4269}
4270
4271static void icl_update_dpll_ref_clks(struct drm_i915_private *i915)
4272{
4273	/* No SSC ref */
4274	i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
4275}
4276
4277static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
4278			      const struct intel_dpll_hw_state *hw_state)
4279{
4280	drm_dbg_kms(&dev_priv->drm,
4281		    "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, "
4282		    "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
4283		    "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
4284		    "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
4285		    "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
4286		    "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
4287		    hw_state->cfgcr0, hw_state->cfgcr1,
4288		    hw_state->mg_refclkin_ctl,
4289		    hw_state->mg_clktop2_coreclkctl1,
4290		    hw_state->mg_clktop2_hsclkctl,
4291		    hw_state->mg_pll_div0,
4292		    hw_state->mg_pll_div1,
4293		    hw_state->mg_pll_lf,
4294		    hw_state->mg_pll_frac_lock,
4295		    hw_state->mg_pll_ssc,
4296		    hw_state->mg_pll_bias,
4297		    hw_state->mg_pll_tdc_coldst_bias);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4298}
4299
4300static const struct intel_shared_dpll_funcs combo_pll_funcs = {
4301	.enable = combo_pll_enable,
4302	.disable = combo_pll_disable,
4303	.get_hw_state = combo_pll_get_hw_state,
4304	.get_freq = icl_ddi_combo_pll_get_freq,
4305};
4306
4307static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
4308	.enable = tbt_pll_enable,
4309	.disable = tbt_pll_disable,
4310	.get_hw_state = tbt_pll_get_hw_state,
4311	.get_freq = icl_ddi_tbt_pll_get_freq,
4312};
4313
4314static const struct intel_shared_dpll_funcs mg_pll_funcs = {
4315	.enable = mg_pll_enable,
4316	.disable = mg_pll_disable,
4317	.get_hw_state = mg_pll_get_hw_state,
4318	.get_freq = icl_ddi_mg_pll_get_freq,
4319};
4320
4321static const struct dpll_info icl_plls[] = {
4322	{ "DPLL 0",   &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
4323	{ "DPLL 1",   &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
4324	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4325	{ "MG PLL 1", &mg_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4326	{ "MG PLL 2", &mg_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4327	{ "MG PLL 3", &mg_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4328	{ "MG PLL 4", &mg_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4329	{ },
 
4330};
4331
4332static const struct intel_dpll_mgr icl_pll_mgr = {
4333	.dpll_info = icl_plls,
 
4334	.get_dplls = icl_get_dplls,
4335	.put_dplls = icl_put_dplls,
4336	.update_active_dpll = icl_update_active_dpll,
4337	.update_ref_clks = icl_update_dpll_ref_clks,
4338	.dump_hw_state = icl_dump_hw_state,
 
4339};
4340
4341static const struct dpll_info ehl_plls[] = {
4342	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4343	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4344	{ "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
4345	{ },
 
4346};
4347
4348static const struct intel_dpll_mgr ehl_pll_mgr = {
4349	.dpll_info = ehl_plls,
 
4350	.get_dplls = icl_get_dplls,
4351	.put_dplls = icl_put_dplls,
4352	.update_ref_clks = icl_update_dpll_ref_clks,
4353	.dump_hw_state = icl_dump_hw_state,
 
4354};
4355
4356static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
4357	.enable = mg_pll_enable,
4358	.disable = mg_pll_disable,
4359	.get_hw_state = dkl_pll_get_hw_state,
4360	.get_freq = icl_ddi_mg_pll_get_freq,
4361};
4362
4363static const struct dpll_info tgl_plls[] = {
4364	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
4365	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
4366	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4367	{ "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4368	{ "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4369	{ "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4370	{ "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4371	{ "TC PLL 5", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL5, 0 },
4372	{ "TC PLL 6", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL6, 0 },
4373	{ },
 
4374};
4375
4376static const struct intel_dpll_mgr tgl_pll_mgr = {
4377	.dpll_info = tgl_plls,
 
4378	.get_dplls = icl_get_dplls,
4379	.put_dplls = icl_put_dplls,
4380	.update_active_dpll = icl_update_active_dpll,
4381	.update_ref_clks = icl_update_dpll_ref_clks,
4382	.dump_hw_state = icl_dump_hw_state,
 
4383};
4384
4385static const struct dpll_info rkl_plls[] = {
4386	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4387	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4388	{ "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
4389	{ },
4390};
4391
4392static const struct intel_dpll_mgr rkl_pll_mgr = {
4393	.dpll_info = rkl_plls,
 
4394	.get_dplls = icl_get_dplls,
4395	.put_dplls = icl_put_dplls,
4396	.update_ref_clks = icl_update_dpll_ref_clks,
4397	.dump_hw_state = icl_dump_hw_state,
 
4398};
4399
4400static const struct dpll_info dg1_plls[] = {
4401	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_DG1_DPLL0, 0 },
4402	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_DG1_DPLL1, 0 },
4403	{ "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
4404	{ "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
4405	{ },
4406};
4407
4408static const struct intel_dpll_mgr dg1_pll_mgr = {
4409	.dpll_info = dg1_plls,
 
4410	.get_dplls = icl_get_dplls,
4411	.put_dplls = icl_put_dplls,
4412	.update_ref_clks = icl_update_dpll_ref_clks,
4413	.dump_hw_state = icl_dump_hw_state,
 
4414};
4415
4416static const struct dpll_info adls_plls[] = {
4417	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4418	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4419	{ "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
4420	{ "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
4421	{ },
4422};
4423
4424static const struct intel_dpll_mgr adls_pll_mgr = {
4425	.dpll_info = adls_plls,
 
4426	.get_dplls = icl_get_dplls,
4427	.put_dplls = icl_put_dplls,
4428	.update_ref_clks = icl_update_dpll_ref_clks,
4429	.dump_hw_state = icl_dump_hw_state,
 
4430};
4431
4432static const struct dpll_info adlp_plls[] = {
4433	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
4434	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
4435	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4436	{ "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4437	{ "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4438	{ "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4439	{ "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4440	{ },
 
4441};
4442
4443static const struct intel_dpll_mgr adlp_pll_mgr = {
4444	.dpll_info = adlp_plls,
 
4445	.get_dplls = icl_get_dplls,
4446	.put_dplls = icl_put_dplls,
4447	.update_active_dpll = icl_update_active_dpll,
4448	.update_ref_clks = icl_update_dpll_ref_clks,
4449	.dump_hw_state = icl_dump_hw_state,
 
4450};
4451
4452/**
4453 * intel_shared_dpll_init - Initialize shared DPLLs
4454 * @dev: drm device
4455 *
4456 * Initialize shared DPLLs for @dev.
4457 */
4458void intel_shared_dpll_init(struct drm_device *dev)
4459{
4460	struct drm_i915_private *dev_priv = to_i915(dev);
4461	const struct intel_dpll_mgr *dpll_mgr = NULL;
4462	const struct dpll_info *dpll_info;
4463	int i;
4464
4465	if (IS_ALDERLAKE_P(dev_priv))
 
 
 
 
 
4466		dpll_mgr = &adlp_pll_mgr;
4467	else if (IS_ALDERLAKE_S(dev_priv))
4468		dpll_mgr = &adls_pll_mgr;
4469	else if (IS_DG1(dev_priv))
4470		dpll_mgr = &dg1_pll_mgr;
4471	else if (IS_ROCKETLAKE(dev_priv))
4472		dpll_mgr = &rkl_pll_mgr;
4473	else if (DISPLAY_VER(dev_priv) >= 12)
4474		dpll_mgr = &tgl_pll_mgr;
4475	else if (IS_JSL_EHL(dev_priv))
4476		dpll_mgr = &ehl_pll_mgr;
4477	else if (DISPLAY_VER(dev_priv) >= 11)
4478		dpll_mgr = &icl_pll_mgr;
4479	else if (IS_CANNONLAKE(dev_priv))
4480		dpll_mgr = &cnl_pll_mgr;
4481	else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
4482		dpll_mgr = &bxt_pll_mgr;
4483	else if (DISPLAY_VER(dev_priv) == 9)
4484		dpll_mgr = &skl_pll_mgr;
4485	else if (HAS_DDI(dev_priv))
4486		dpll_mgr = &hsw_pll_mgr;
4487	else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
4488		dpll_mgr = &pch_pll_mgr;
4489
4490	if (!dpll_mgr) {
4491		dev_priv->dpll.num_shared_dpll = 0;
4492		return;
4493	}
4494
4495	dpll_info = dpll_mgr->dpll_info;
4496
4497	for (i = 0; dpll_info[i].name; i++) {
4498		drm_WARN_ON(dev, i != dpll_info[i].id);
4499		dev_priv->dpll.shared_dplls[i].info = &dpll_info[i];
 
 
 
 
 
 
 
 
4500	}
4501
4502	dev_priv->dpll.mgr = dpll_mgr;
4503	dev_priv->dpll.num_shared_dpll = i;
4504	mutex_init(&dev_priv->dpll.lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4505
4506	BUG_ON(dev_priv->dpll.num_shared_dpll > I915_NUM_PLLS);
4507}
4508
4509/**
4510 * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
4511 * @state: atomic state
4512 * @crtc: CRTC to reserve DPLLs for
4513 * @encoder: encoder
4514 *
4515 * This function reserves all required DPLLs for the given CRTC and encoder
4516 * combination in the current atomic commit @state and the new @crtc atomic
4517 * state.
4518 *
4519 * The new configuration in the atomic commit @state is made effective by
4520 * calling intel_shared_dpll_swap_state().
4521 *
4522 * The reserved DPLLs should be released by calling
4523 * intel_release_shared_dplls().
4524 *
4525 * Returns:
4526 * True if all required DPLLs were successfully reserved.
 
4527 */
4528bool intel_reserve_shared_dplls(struct intel_atomic_state *state,
4529				struct intel_crtc *crtc,
4530				struct intel_encoder *encoder)
4531{
4532	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4533	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
4534
4535	if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4536		return false;
4537
4538	return dpll_mgr->get_dplls(state, crtc, encoder);
4539}
4540
4541/**
4542 * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
4543 * @state: atomic state
4544 * @crtc: crtc from which the DPLLs are to be released
4545 *
4546 * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
4547 * from the current atomic commit @state and the old @crtc atomic state.
4548 *
4549 * The new configuration in the atomic commit @state is made effective by
4550 * calling intel_shared_dpll_swap_state().
4551 */
4552void intel_release_shared_dplls(struct intel_atomic_state *state,
4553				struct intel_crtc *crtc)
4554{
4555	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4556	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
4557
4558	/*
4559	 * FIXME: this function is called for every platform having a
4560	 * compute_clock hook, even though the platform doesn't yet support
4561	 * the shared DPLL framework and intel_reserve_shared_dplls() is not
4562	 * called on those.
4563	 */
4564	if (!dpll_mgr)
4565		return;
4566
4567	dpll_mgr->put_dplls(state, crtc);
4568}
4569
4570/**
4571 * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
4572 * @state: atomic state
4573 * @crtc: the CRTC for which to update the active DPLL
4574 * @encoder: encoder determining the type of port DPLL
4575 *
4576 * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
4577 * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
4578 * DPLL selected will be based on the current mode of the encoder's port.
4579 */
4580void intel_update_active_dpll(struct intel_atomic_state *state,
4581			      struct intel_crtc *crtc,
4582			      struct intel_encoder *encoder)
4583{
4584	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4585	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
4586
4587	if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4588		return;
4589
4590	dpll_mgr->update_active_dpll(state, crtc, encoder);
4591}
4592
4593/**
4594 * intel_dpll_get_freq - calculate the DPLL's output frequency
4595 * @i915: i915 device
4596 * @pll: DPLL for which to calculate the output frequency
4597 * @pll_state: DPLL state from which to calculate the output frequency
4598 *
4599 * Return the output frequency corresponding to @pll's passed in @pll_state.
4600 */
4601int intel_dpll_get_freq(struct drm_i915_private *i915,
4602			const struct intel_shared_dpll *pll,
4603			const struct intel_dpll_hw_state *pll_state)
4604{
4605	if (drm_WARN_ON(&i915->drm, !pll->info->funcs->get_freq))
4606		return 0;
4607
4608	return pll->info->funcs->get_freq(i915, pll, pll_state);
4609}
4610
4611/**
4612 * intel_dpll_get_hw_state - readout the DPLL's hardware state
4613 * @i915: i915 device
4614 * @pll: DPLL for which to calculate the output frequency
4615 * @hw_state: DPLL's hardware state
4616 *
4617 * Read out @pll's hardware state into @hw_state.
4618 */
4619bool intel_dpll_get_hw_state(struct drm_i915_private *i915,
4620			     struct intel_shared_dpll *pll,
4621			     struct intel_dpll_hw_state *hw_state)
4622{
4623	return pll->info->funcs->get_hw_state(i915, pll, hw_state);
4624}
4625
4626static void readout_dpll_hw_state(struct drm_i915_private *i915,
4627				  struct intel_shared_dpll *pll)
4628{
4629	struct intel_crtc *crtc;
4630
4631	pll->on = intel_dpll_get_hw_state(i915, pll, &pll->state.hw_state);
4632
4633	if (IS_JSL_EHL(i915) && pll->on &&
4634	    pll->info->id == DPLL_ID_EHL_DPLL4) {
4635		pll->wakeref = intel_display_power_get(i915,
4636						       POWER_DOMAIN_DPLL_DC_OFF);
4637	}
4638
4639	pll->state.pipe_mask = 0;
4640	for_each_intel_crtc(&i915->drm, crtc) {
4641		struct intel_crtc_state *crtc_state =
4642			to_intel_crtc_state(crtc->base.state);
4643
4644		if (crtc_state->hw.active && crtc_state->shared_dpll == pll)
4645			pll->state.pipe_mask |= BIT(crtc->pipe);
4646	}
4647	pll->active_mask = pll->state.pipe_mask;
4648
4649	drm_dbg_kms(&i915->drm,
4650		    "%s hw state readout: pipe_mask 0x%x, on %i\n",
4651		    pll->info->name, pll->state.pipe_mask, pll->on);
4652}
4653
4654void intel_dpll_update_ref_clks(struct drm_i915_private *i915)
4655{
4656	if (i915->dpll.mgr && i915->dpll.mgr->update_ref_clks)
4657		i915->dpll.mgr->update_ref_clks(i915);
4658}
4659
4660void intel_dpll_readout_hw_state(struct drm_i915_private *i915)
4661{
 
4662	int i;
4663
4664	for (i = 0; i < i915->dpll.num_shared_dpll; i++)
4665		readout_dpll_hw_state(i915, &i915->dpll.shared_dplls[i]);
4666}
4667
4668static void sanitize_dpll_state(struct drm_i915_private *i915,
4669				struct intel_shared_dpll *pll)
4670{
4671	if (!pll->on || pll->active_mask)
 
 
 
 
 
4672		return;
4673
4674	drm_dbg_kms(&i915->drm,
4675		    "%s enabled but not in use, disabling\n",
4676		    pll->info->name);
4677
4678	pll->info->funcs->disable(i915, pll);
4679	pll->on = false;
4680}
4681
4682void intel_dpll_sanitize_state(struct drm_i915_private *i915)
4683{
 
4684	int i;
4685
4686	for (i = 0; i < i915->dpll.num_shared_dpll; i++)
4687		sanitize_dpll_state(i915, &i915->dpll.shared_dplls[i]);
4688}
4689
4690/**
4691 * intel_dpll_dump_hw_state - write hw_state to dmesg
4692 * @dev_priv: i915 drm device
4693 * @hw_state: hw state to be written to the log
 
4694 *
4695 * Write the relevant values in @hw_state to dmesg using drm_dbg_kms.
4696 */
4697void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
4698			      const struct intel_dpll_hw_state *hw_state)
 
4699{
4700	if (dev_priv->dpll.mgr) {
4701		dev_priv->dpll.mgr->dump_hw_state(dev_priv, hw_state);
4702	} else {
4703		/* fallback for platforms that don't use the shared dpll
4704		 * infrastructure
4705		 */
4706		drm_dbg_kms(&dev_priv->drm,
4707			    "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
4708			    "fp0: 0x%x, fp1: 0x%x\n",
4709			    hw_state->dpll,
4710			    hw_state->dpll_md,
4711			    hw_state->fp0,
4712			    hw_state->fp1);
4713	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4714}
v6.13.7
   1/*
   2 * Copyright © 2006-2016 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  21 * DEALINGS IN THE SOFTWARE.
  22 */
  23
  24#include <linux/math.h>
  25#include <linux/string_helpers.h>
  26
  27#include "bxt_dpio_phy_regs.h"
  28#include "i915_reg.h"
  29#include "intel_de.h"
  30#include "intel_display_types.h"
  31#include "intel_dkl_phy.h"
  32#include "intel_dkl_phy_regs.h"
  33#include "intel_dpio_phy.h"
  34#include "intel_dpll.h"
  35#include "intel_dpll_mgr.h"
  36#include "intel_hti.h"
  37#include "intel_mg_phy_regs.h"
  38#include "intel_pch_refclk.h"
  39#include "intel_tc.h"
  40
  41/**
  42 * DOC: Display PLLs
  43 *
  44 * Display PLLs used for driving outputs vary by platform. While some have
  45 * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
  46 * from a pool. In the latter scenario, it is possible that multiple pipes
  47 * share a PLL if their configurations match.
  48 *
  49 * This file provides an abstraction over display PLLs. The function
  50 * intel_shared_dpll_init() initializes the PLLs for the given platform.  The
  51 * users of a PLL are tracked and that tracking is integrated with the atomic
  52 * modset interface. During an atomic operation, required PLLs can be reserved
  53 * for a given CRTC and encoder configuration by calling
  54 * intel_reserve_shared_dplls() and previously reserved PLLs can be released
  55 * with intel_release_shared_dplls().
  56 * Changes to the users are first staged in the atomic state, and then made
  57 * effective by calling intel_shared_dpll_swap_state() during the atomic
  58 * commit phase.
  59 */
  60
  61/* platform specific hooks for managing DPLLs */
  62struct intel_shared_dpll_funcs {
  63	/*
  64	 * Hook for enabling the pll, called from intel_enable_shared_dpll() if
  65	 * the pll is not already enabled.
  66	 */
  67	void (*enable)(struct drm_i915_private *i915,
  68		       struct intel_shared_dpll *pll,
  69		       const struct intel_dpll_hw_state *dpll_hw_state);
  70
  71	/*
  72	 * Hook for disabling the pll, called from intel_disable_shared_dpll()
  73	 * only when it is safe to disable the pll, i.e., there are no more
  74	 * tracked users for it.
  75	 */
  76	void (*disable)(struct drm_i915_private *i915,
  77			struct intel_shared_dpll *pll);
  78
  79	/*
  80	 * Hook for reading the values currently programmed to the DPLL
  81	 * registers. This is used for initial hw state readout and state
  82	 * verification after a mode set.
  83	 */
  84	bool (*get_hw_state)(struct drm_i915_private *i915,
  85			     struct intel_shared_dpll *pll,
  86			     struct intel_dpll_hw_state *dpll_hw_state);
  87
  88	/*
  89	 * Hook for calculating the pll's output frequency based on its passed
  90	 * in state.
  91	 */
  92	int (*get_freq)(struct drm_i915_private *i915,
  93			const struct intel_shared_dpll *pll,
  94			const struct intel_dpll_hw_state *dpll_hw_state);
  95};
  96
  97struct intel_dpll_mgr {
  98	const struct dpll_info *dpll_info;
  99
 100	int (*compute_dplls)(struct intel_atomic_state *state,
 101			     struct intel_crtc *crtc,
 102			     struct intel_encoder *encoder);
 103	int (*get_dplls)(struct intel_atomic_state *state,
 104			 struct intel_crtc *crtc,
 105			 struct intel_encoder *encoder);
 106	void (*put_dplls)(struct intel_atomic_state *state,
 107			  struct intel_crtc *crtc);
 108	void (*update_active_dpll)(struct intel_atomic_state *state,
 109				   struct intel_crtc *crtc,
 110				   struct intel_encoder *encoder);
 111	void (*update_ref_clks)(struct drm_i915_private *i915);
 112	void (*dump_hw_state)(struct drm_printer *p,
 113			      const struct intel_dpll_hw_state *dpll_hw_state);
 114	bool (*compare_hw_state)(const struct intel_dpll_hw_state *a,
 115				 const struct intel_dpll_hw_state *b);
 116};
 117
 118static void
 119intel_atomic_duplicate_dpll_state(struct drm_i915_private *i915,
 120				  struct intel_shared_dpll_state *shared_dpll)
 121{
 122	struct intel_shared_dpll *pll;
 123	int i;
 124
 125	/* Copy shared dpll state */
 126	for_each_shared_dpll(i915, pll, i)
 127		shared_dpll[pll->index] = pll->state;
 
 
 
 128}
 129
 130static struct intel_shared_dpll_state *
 131intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
 132{
 133	struct intel_atomic_state *state = to_intel_atomic_state(s);
 134
 135	drm_WARN_ON(s->dev, !drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
 136
 137	if (!state->dpll_set) {
 138		state->dpll_set = true;
 139
 140		intel_atomic_duplicate_dpll_state(to_i915(s->dev),
 141						  state->shared_dpll);
 142	}
 143
 144	return state->shared_dpll;
 145}
 146
 147/**
 148 * intel_get_shared_dpll_by_id - get a DPLL given its id
 149 * @i915: i915 device instance
 150 * @id: pll id
 151 *
 152 * Returns:
 153 * A pointer to the DPLL with @id
 154 */
 155struct intel_shared_dpll *
 156intel_get_shared_dpll_by_id(struct drm_i915_private *i915,
 157			    enum intel_dpll_id id)
 158{
 159	struct intel_shared_dpll *pll;
 160	int i;
 161
 162	for_each_shared_dpll(i915, pll, i) {
 163		if (pll->info->id == id)
 164			return pll;
 165	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 166
 167	MISSING_CASE(id);
 168	return NULL;
 169}
 170
 171/* For ILK+ */
 172void assert_shared_dpll(struct drm_i915_private *i915,
 173			struct intel_shared_dpll *pll,
 174			bool state)
 175{
 176	struct intel_display *display = &i915->display;
 177	bool cur_state;
 178	struct intel_dpll_hw_state hw_state;
 179
 180	if (drm_WARN(display->drm, !pll,
 181		     "asserting DPLL %s with no DPLL\n", str_on_off(state)))
 182		return;
 183
 184	cur_state = intel_dpll_get_hw_state(i915, pll, &hw_state);
 185	INTEL_DISPLAY_STATE_WARN(display, cur_state != state,
 186				 "%s assertion failure (expected %s, current %s)\n",
 187				 pll->info->name, str_on_off(state),
 188				 str_on_off(cur_state));
 189}
 190
 191static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
 192{
 193	return TC_PORT_1 + id - DPLL_ID_ICL_MGPLL1;
 194}
 195
 196enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
 197{
 198	return tc_port - TC_PORT_1 + DPLL_ID_ICL_MGPLL1;
 199}
 200
 201static i915_reg_t
 202intel_combo_pll_enable_reg(struct drm_i915_private *i915,
 203			   struct intel_shared_dpll *pll)
 204{
 205	if (IS_DG1(i915))
 206		return DG1_DPLL_ENABLE(pll->info->id);
 207	else if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
 208		 (pll->info->id == DPLL_ID_EHL_DPLL4))
 209		return MG_PLL_ENABLE(0);
 210
 211	return ICL_DPLL_ENABLE(pll->info->id);
 212}
 213
 214static i915_reg_t
 215intel_tc_pll_enable_reg(struct drm_i915_private *i915,
 216			struct intel_shared_dpll *pll)
 217{
 218	const enum intel_dpll_id id = pll->info->id;
 219	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
 220
 221	if (IS_ALDERLAKE_P(i915))
 222		return ADLP_PORTTC_PLL_ENABLE(tc_port);
 223
 224	return MG_PLL_ENABLE(tc_port);
 225}
 226
 227static void _intel_enable_shared_dpll(struct drm_i915_private *i915,
 228				      struct intel_shared_dpll *pll)
 
 
 
 
 
 
 229{
 230	if (pll->info->power_domain)
 231		pll->wakeref = intel_display_power_get(i915, pll->info->power_domain);
 
 232
 233	pll->info->funcs->enable(i915, pll, &pll->state.hw_state);
 234	pll->on = true;
 235}
 236
 237static void _intel_disable_shared_dpll(struct drm_i915_private *i915,
 238				       struct intel_shared_dpll *pll)
 239{
 240	pll->info->funcs->disable(i915, pll);
 241	pll->on = false;
 
 242
 243	if (pll->info->power_domain)
 244		intel_display_power_put(i915, pll->info->power_domain, pll->wakeref);
 
 245}
 246
 247/**
 248 * intel_enable_shared_dpll - enable a CRTC's shared DPLL
 249 * @crtc_state: CRTC, and its state, which has a shared DPLL
 250 *
 251 * Enable the shared DPLL used by @crtc.
 252 */
 253void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
 254{
 255	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 256	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
 257	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
 258	unsigned int pipe_mask = BIT(crtc->pipe);
 259	unsigned int old_mask;
 260
 261	if (drm_WARN_ON(&i915->drm, pll == NULL))
 262		return;
 263
 264	mutex_lock(&i915->display.dpll.lock);
 265	old_mask = pll->active_mask;
 266
 267	if (drm_WARN_ON(&i915->drm, !(pll->state.pipe_mask & pipe_mask)) ||
 268	    drm_WARN_ON(&i915->drm, pll->active_mask & pipe_mask))
 269		goto out;
 270
 271	pll->active_mask |= pipe_mask;
 272
 273	drm_dbg_kms(&i915->drm,
 274		    "enable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
 275		    pll->info->name, pll->active_mask, pll->on,
 276		    crtc->base.base.id, crtc->base.name);
 277
 278	if (old_mask) {
 279		drm_WARN_ON(&i915->drm, !pll->on);
 280		assert_shared_dpll_enabled(i915, pll);
 281		goto out;
 282	}
 283	drm_WARN_ON(&i915->drm, pll->on);
 284
 285	drm_dbg_kms(&i915->drm, "enabling %s\n", pll->info->name);
 286
 287	_intel_enable_shared_dpll(i915, pll);
 288
 289out:
 290	mutex_unlock(&i915->display.dpll.lock);
 291}
 292
 293/**
 294 * intel_disable_shared_dpll - disable a CRTC's shared DPLL
 295 * @crtc_state: CRTC, and its state, which has a shared DPLL
 296 *
 297 * Disable the shared DPLL used by @crtc.
 298 */
 299void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
 300{
 301	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 302	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
 303	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
 304	unsigned int pipe_mask = BIT(crtc->pipe);
 305
 306	/* PCH only available on ILK+ */
 307	if (DISPLAY_VER(i915) < 5)
 308		return;
 309
 310	if (pll == NULL)
 311		return;
 312
 313	mutex_lock(&i915->display.dpll.lock);
 314	if (drm_WARN(&i915->drm, !(pll->active_mask & pipe_mask),
 315		     "%s not used by [CRTC:%d:%s]\n", pll->info->name,
 316		     crtc->base.base.id, crtc->base.name))
 317		goto out;
 318
 319	drm_dbg_kms(&i915->drm,
 320		    "disable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
 321		    pll->info->name, pll->active_mask, pll->on,
 322		    crtc->base.base.id, crtc->base.name);
 323
 324	assert_shared_dpll_enabled(i915, pll);
 325	drm_WARN_ON(&i915->drm, !pll->on);
 326
 327	pll->active_mask &= ~pipe_mask;
 328	if (pll->active_mask)
 329		goto out;
 330
 331	drm_dbg_kms(&i915->drm, "disabling %s\n", pll->info->name);
 332
 333	_intel_disable_shared_dpll(i915, pll);
 334
 335out:
 336	mutex_unlock(&i915->display.dpll.lock);
 337}
 338
 339static unsigned long
 340intel_dpll_mask_all(struct drm_i915_private *i915)
 341{
 342	struct intel_shared_dpll *pll;
 343	unsigned long dpll_mask = 0;
 344	int i;
 345
 346	for_each_shared_dpll(i915, pll, i) {
 347		drm_WARN_ON(&i915->drm, dpll_mask & BIT(pll->info->id));
 348
 349		dpll_mask |= BIT(pll->info->id);
 350	}
 351
 352	return dpll_mask;
 353}
 354
 355static struct intel_shared_dpll *
 356intel_find_shared_dpll(struct intel_atomic_state *state,
 357		       const struct intel_crtc *crtc,
 358		       const struct intel_dpll_hw_state *dpll_hw_state,
 359		       unsigned long dpll_mask)
 360{
 361	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
 362	unsigned long dpll_mask_all = intel_dpll_mask_all(i915);
 363	struct intel_shared_dpll_state *shared_dpll;
 364	struct intel_shared_dpll *unused_pll = NULL;
 365	enum intel_dpll_id id;
 366
 367	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
 368
 369	drm_WARN_ON(&i915->drm, dpll_mask & ~dpll_mask_all);
 370
 371	for_each_set_bit(id, &dpll_mask, fls(dpll_mask_all)) {
 372		struct intel_shared_dpll *pll;
 373
 374		pll = intel_get_shared_dpll_by_id(i915, id);
 375		if (!pll)
 376			continue;
 377
 378		/* Only want to check enabled timings first */
 379		if (shared_dpll[pll->index].pipe_mask == 0) {
 380			if (!unused_pll)
 381				unused_pll = pll;
 382			continue;
 383		}
 384
 385		if (memcmp(dpll_hw_state,
 386			   &shared_dpll[pll->index].hw_state,
 387			   sizeof(*dpll_hw_state)) == 0) {
 388			drm_dbg_kms(&i915->drm,
 389				    "[CRTC:%d:%s] sharing existing %s (pipe mask 0x%x, active 0x%x)\n",
 390				    crtc->base.base.id, crtc->base.name,
 391				    pll->info->name,
 392				    shared_dpll[pll->index].pipe_mask,
 393				    pll->active_mask);
 394			return pll;
 395		}
 396	}
 397
 398	/* Ok no matching timings, maybe there's a free one? */
 399	if (unused_pll) {
 400		drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] allocated %s\n",
 401			    crtc->base.base.id, crtc->base.name,
 402			    unused_pll->info->name);
 403		return unused_pll;
 404	}
 405
 406	return NULL;
 407}
 408
 409/**
 410 * intel_reference_shared_dpll_crtc - Get a DPLL reference for a CRTC
 411 * @crtc: CRTC on which behalf the reference is taken
 412 * @pll: DPLL for which the reference is taken
 413 * @shared_dpll_state: the DPLL atomic state in which the reference is tracked
 414 *
 415 * Take a reference for @pll tracking the use of it by @crtc.
 416 */
 417static void
 418intel_reference_shared_dpll_crtc(const struct intel_crtc *crtc,
 419				 const struct intel_shared_dpll *pll,
 420				 struct intel_shared_dpll_state *shared_dpll_state)
 421{
 422	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
 423
 424	drm_WARN_ON(&i915->drm, (shared_dpll_state->pipe_mask & BIT(crtc->pipe)) != 0);
 425
 426	shared_dpll_state->pipe_mask |= BIT(crtc->pipe);
 427
 428	drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] reserving %s\n",
 429		    crtc->base.base.id, crtc->base.name, pll->info->name);
 430}
 431
 432static void
 433intel_reference_shared_dpll(struct intel_atomic_state *state,
 434			    const struct intel_crtc *crtc,
 435			    const struct intel_shared_dpll *pll,
 436			    const struct intel_dpll_hw_state *dpll_hw_state)
 437{
 
 438	struct intel_shared_dpll_state *shared_dpll;
 
 439
 440	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
 441
 442	if (shared_dpll[pll->index].pipe_mask == 0)
 443		shared_dpll[pll->index].hw_state = *dpll_hw_state;
 444
 445	intel_reference_shared_dpll_crtc(crtc, pll, &shared_dpll[pll->index]);
 446}
 447
 448/**
 449 * intel_unreference_shared_dpll_crtc - Drop a DPLL reference for a CRTC
 450 * @crtc: CRTC on which behalf the reference is dropped
 451 * @pll: DPLL for which the reference is dropped
 452 * @shared_dpll_state: the DPLL atomic state in which the reference is tracked
 453 *
 454 * Drop a reference for @pll tracking the end of use of it by @crtc.
 455 */
 456void
 457intel_unreference_shared_dpll_crtc(const struct intel_crtc *crtc,
 458				   const struct intel_shared_dpll *pll,
 459				   struct intel_shared_dpll_state *shared_dpll_state)
 460{
 461	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
 462
 463	drm_WARN_ON(&i915->drm, (shared_dpll_state->pipe_mask & BIT(crtc->pipe)) == 0);
 464
 465	shared_dpll_state->pipe_mask &= ~BIT(crtc->pipe);
 466
 467	drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] releasing %s\n",
 468		    crtc->base.base.id, crtc->base.name, pll->info->name);
 469}
 470
 471static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
 472					  const struct intel_crtc *crtc,
 473					  const struct intel_shared_dpll *pll)
 474{
 475	struct intel_shared_dpll_state *shared_dpll;
 476
 477	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
 478
 479	intel_unreference_shared_dpll_crtc(crtc, pll, &shared_dpll[pll->index]);
 480}
 481
 482static void intel_put_dpll(struct intel_atomic_state *state,
 483			   struct intel_crtc *crtc)
 484{
 485	const struct intel_crtc_state *old_crtc_state =
 486		intel_atomic_get_old_crtc_state(state, crtc);
 487	struct intel_crtc_state *new_crtc_state =
 488		intel_atomic_get_new_crtc_state(state, crtc);
 489
 490	new_crtc_state->shared_dpll = NULL;
 491
 492	if (!old_crtc_state->shared_dpll)
 493		return;
 494
 495	intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
 496}
 497
 498/**
 499 * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
 500 * @state: atomic state
 501 *
 502 * This is the dpll version of drm_atomic_helper_swap_state() since the
 503 * helper does not handle driver-specific global state.
 504 *
 505 * For consistency with atomic helpers this function does a complete swap,
 506 * i.e. it also puts the current state into @state, even though there is no
 507 * need for that at this moment.
 508 */
 509void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
 510{
 511	struct drm_i915_private *i915 = to_i915(state->base.dev);
 512	struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
 513	struct intel_shared_dpll *pll;
 514	int i;
 515
 516	if (!state->dpll_set)
 517		return;
 518
 519	for_each_shared_dpll(i915, pll, i)
 520		swap(pll->state, shared_dpll[pll->index]);
 
 
 
 
 521}
 522
 523static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *i915,
 524				      struct intel_shared_dpll *pll,
 525				      struct intel_dpll_hw_state *dpll_hw_state)
 526{
 527	struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
 528	const enum intel_dpll_id id = pll->info->id;
 529	intel_wakeref_t wakeref;
 530	u32 val;
 531
 532	wakeref = intel_display_power_get_if_enabled(i915,
 533						     POWER_DOMAIN_DISPLAY_CORE);
 534	if (!wakeref)
 535		return false;
 536
 537	val = intel_de_read(i915, PCH_DPLL(id));
 538	hw_state->dpll = val;
 539	hw_state->fp0 = intel_de_read(i915, PCH_FP0(id));
 540	hw_state->fp1 = intel_de_read(i915, PCH_FP1(id));
 541
 542	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
 543
 544	return val & DPLL_VCO_ENABLE;
 545}
 546
 547static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *i915)
 
 
 
 
 
 
 
 
 
 548{
 549	struct intel_display *display = &i915->display;
 550	u32 val;
 551	bool enabled;
 552
 553	val = intel_de_read(display, PCH_DREF_CONTROL);
 
 
 554	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
 555			    DREF_SUPERSPREAD_SOURCE_MASK));
 556	INTEL_DISPLAY_STATE_WARN(display, !enabled,
 557				 "PCH refclk assertion failure, should be active but is disabled\n");
 558}
 559
 560static void ibx_pch_dpll_enable(struct drm_i915_private *i915,
 561				struct intel_shared_dpll *pll,
 562				const struct intel_dpll_hw_state *dpll_hw_state)
 563{
 564	const struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
 565	const enum intel_dpll_id id = pll->info->id;
 566
 567	/* PCH refclock must be enabled first */
 568	ibx_assert_pch_refclk_enabled(i915);
 569
 570	intel_de_write(i915, PCH_FP0(id), hw_state->fp0);
 571	intel_de_write(i915, PCH_FP1(id), hw_state->fp1);
 572
 573	intel_de_write(i915, PCH_DPLL(id), hw_state->dpll);
 574
 575	/* Wait for the clocks to stabilize. */
 576	intel_de_posting_read(i915, PCH_DPLL(id));
 577	udelay(150);
 578
 579	/* The pixel multiplier can only be updated once the
 580	 * DPLL is enabled and the clocks are stable.
 581	 *
 582	 * So write it again.
 583	 */
 584	intel_de_write(i915, PCH_DPLL(id), hw_state->dpll);
 585	intel_de_posting_read(i915, PCH_DPLL(id));
 586	udelay(200);
 587}
 588
 589static void ibx_pch_dpll_disable(struct drm_i915_private *i915,
 590				 struct intel_shared_dpll *pll)
 591{
 592	const enum intel_dpll_id id = pll->info->id;
 593
 594	intel_de_write(i915, PCH_DPLL(id), 0);
 595	intel_de_posting_read(i915, PCH_DPLL(id));
 596	udelay(200);
 597}
 598
 599static int ibx_compute_dpll(struct intel_atomic_state *state,
 600			    struct intel_crtc *crtc,
 601			    struct intel_encoder *encoder)
 602{
 603	return 0;
 604}
 605
 606static int ibx_get_dpll(struct intel_atomic_state *state,
 607			struct intel_crtc *crtc,
 608			struct intel_encoder *encoder)
 609{
 610	struct intel_crtc_state *crtc_state =
 611		intel_atomic_get_new_crtc_state(state, crtc);
 612	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
 613	struct intel_shared_dpll *pll;
 614	enum intel_dpll_id id;
 615
 616	if (HAS_PCH_IBX(i915)) {
 617		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
 618		id = (enum intel_dpll_id) crtc->pipe;
 619		pll = intel_get_shared_dpll_by_id(i915, id);
 620
 621		drm_dbg_kms(&i915->drm,
 622			    "[CRTC:%d:%s] using pre-allocated %s\n",
 623			    crtc->base.base.id, crtc->base.name,
 624			    pll->info->name);
 625	} else {
 626		pll = intel_find_shared_dpll(state, crtc,
 627					     &crtc_state->dpll_hw_state,
 628					     BIT(DPLL_ID_PCH_PLL_B) |
 629					     BIT(DPLL_ID_PCH_PLL_A));
 630	}
 631
 632	if (!pll)
 633		return -EINVAL;
 634
 635	/* reference the pll */
 636	intel_reference_shared_dpll(state, crtc,
 637				    pll, &crtc_state->dpll_hw_state);
 638
 639	crtc_state->shared_dpll = pll;
 640
 641	return 0;
 642}
 643
 644static void ibx_dump_hw_state(struct drm_printer *p,
 645			      const struct intel_dpll_hw_state *dpll_hw_state)
 646{
 647	const struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
 648
 649	drm_printf(p, "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
 650		   "fp0: 0x%x, fp1: 0x%x\n",
 651		   hw_state->dpll,
 652		   hw_state->dpll_md,
 653		   hw_state->fp0,
 654		   hw_state->fp1);
 655}
 656
 657static bool ibx_compare_hw_state(const struct intel_dpll_hw_state *_a,
 658				 const struct intel_dpll_hw_state *_b)
 659{
 660	const struct i9xx_dpll_hw_state *a = &_a->i9xx;
 661	const struct i9xx_dpll_hw_state *b = &_b->i9xx;
 662
 663	return a->dpll == b->dpll &&
 664		a->dpll_md == b->dpll_md &&
 665		a->fp0 == b->fp0 &&
 666		a->fp1 == b->fp1;
 667}
 668
 669static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
 
 670	.enable = ibx_pch_dpll_enable,
 671	.disable = ibx_pch_dpll_disable,
 672	.get_hw_state = ibx_pch_dpll_get_hw_state,
 673};
 674
 675static const struct dpll_info pch_plls[] = {
 676	{ .name = "PCH DPLL A", .funcs = &ibx_pch_dpll_funcs, .id = DPLL_ID_PCH_PLL_A, },
 677	{ .name = "PCH DPLL B", .funcs = &ibx_pch_dpll_funcs, .id = DPLL_ID_PCH_PLL_B, },
 678	{}
 679};
 680
 681static const struct intel_dpll_mgr pch_pll_mgr = {
 682	.dpll_info = pch_plls,
 683	.compute_dplls = ibx_compute_dpll,
 684	.get_dplls = ibx_get_dpll,
 685	.put_dplls = intel_put_dpll,
 686	.dump_hw_state = ibx_dump_hw_state,
 687	.compare_hw_state = ibx_compare_hw_state,
 688};
 689
 690static void hsw_ddi_wrpll_enable(struct drm_i915_private *i915,
 691				 struct intel_shared_dpll *pll,
 692				 const struct intel_dpll_hw_state *dpll_hw_state)
 693{
 694	const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
 695	const enum intel_dpll_id id = pll->info->id;
 696
 697	intel_de_write(i915, WRPLL_CTL(id), hw_state->wrpll);
 698	intel_de_posting_read(i915, WRPLL_CTL(id));
 699	udelay(20);
 700}
 701
 702static void hsw_ddi_spll_enable(struct drm_i915_private *i915,
 703				struct intel_shared_dpll *pll,
 704				const struct intel_dpll_hw_state *dpll_hw_state)
 705{
 706	const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
 707
 708	intel_de_write(i915, SPLL_CTL, hw_state->spll);
 709	intel_de_posting_read(i915, SPLL_CTL);
 710	udelay(20);
 711}
 712
 713static void hsw_ddi_wrpll_disable(struct drm_i915_private *i915,
 714				  struct intel_shared_dpll *pll)
 715{
 716	const enum intel_dpll_id id = pll->info->id;
 
 717
 718	intel_de_rmw(i915, WRPLL_CTL(id), WRPLL_PLL_ENABLE, 0);
 719	intel_de_posting_read(i915, WRPLL_CTL(id));
 
 720
 721	/*
 722	 * Try to set up the PCH reference clock once all DPLLs
 723	 * that depend on it have been shut down.
 724	 */
 725	if (i915->display.dpll.pch_ssc_use & BIT(id))
 726		intel_init_pch_refclk(i915);
 727}
 728
 729static void hsw_ddi_spll_disable(struct drm_i915_private *i915,
 730				 struct intel_shared_dpll *pll)
 731{
 732	enum intel_dpll_id id = pll->info->id;
 
 733
 734	intel_de_rmw(i915, SPLL_CTL, SPLL_PLL_ENABLE, 0);
 735	intel_de_posting_read(i915, SPLL_CTL);
 
 736
 737	/*
 738	 * Try to set up the PCH reference clock once all DPLLs
 739	 * that depend on it have been shut down.
 740	 */
 741	if (i915->display.dpll.pch_ssc_use & BIT(id))
 742		intel_init_pch_refclk(i915);
 743}
 744
 745static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *i915,
 746				       struct intel_shared_dpll *pll,
 747				       struct intel_dpll_hw_state *dpll_hw_state)
 748{
 749	struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
 750	const enum intel_dpll_id id = pll->info->id;
 751	intel_wakeref_t wakeref;
 752	u32 val;
 753
 754	wakeref = intel_display_power_get_if_enabled(i915,
 755						     POWER_DOMAIN_DISPLAY_CORE);
 756	if (!wakeref)
 757		return false;
 758
 759	val = intel_de_read(i915, WRPLL_CTL(id));
 760	hw_state->wrpll = val;
 761
 762	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
 763
 764	return val & WRPLL_PLL_ENABLE;
 765}
 766
 767static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *i915,
 768				      struct intel_shared_dpll *pll,
 769				      struct intel_dpll_hw_state *dpll_hw_state)
 770{
 771	struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
 772	intel_wakeref_t wakeref;
 773	u32 val;
 774
 775	wakeref = intel_display_power_get_if_enabled(i915,
 776						     POWER_DOMAIN_DISPLAY_CORE);
 777	if (!wakeref)
 778		return false;
 779
 780	val = intel_de_read(i915, SPLL_CTL);
 781	hw_state->spll = val;
 782
 783	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
 784
 785	return val & SPLL_PLL_ENABLE;
 786}
 787
 788#define LC_FREQ 2700
 789#define LC_FREQ_2K U64_C(LC_FREQ * 2000)
 790
 791#define P_MIN 2
 792#define P_MAX 64
 793#define P_INC 2
 794
 795/* Constraints for PLL good behavior */
 796#define REF_MIN 48
 797#define REF_MAX 400
 798#define VCO_MIN 2400
 799#define VCO_MAX 4800
 800
 801struct hsw_wrpll_rnp {
 802	unsigned p, n2, r2;
 803};
 804
 805static unsigned hsw_wrpll_get_budget_for_freq(int clock)
 806{
 
 
 807	switch (clock) {
 808	case 25175000:
 809	case 25200000:
 810	case 27000000:
 811	case 27027000:
 812	case 37762500:
 813	case 37800000:
 814	case 40500000:
 815	case 40541000:
 816	case 54000000:
 817	case 54054000:
 818	case 59341000:
 819	case 59400000:
 820	case 72000000:
 821	case 74176000:
 822	case 74250000:
 823	case 81000000:
 824	case 81081000:
 825	case 89012000:
 826	case 89100000:
 827	case 108000000:
 828	case 108108000:
 829	case 111264000:
 830	case 111375000:
 831	case 148352000:
 832	case 148500000:
 833	case 162000000:
 834	case 162162000:
 835	case 222525000:
 836	case 222750000:
 837	case 296703000:
 838	case 297000000:
 839		return 0;
 
 840	case 233500000:
 841	case 245250000:
 842	case 247750000:
 843	case 253250000:
 844	case 298000000:
 845		return 1500;
 
 846	case 169128000:
 847	case 169500000:
 848	case 179500000:
 849	case 202000000:
 850		return 2000;
 
 851	case 256250000:
 852	case 262500000:
 853	case 270000000:
 854	case 272500000:
 855	case 273750000:
 856	case 280750000:
 857	case 281250000:
 858	case 286000000:
 859	case 291750000:
 860		return 4000;
 
 861	case 267250000:
 862	case 268500000:
 863		return 5000;
 
 864	default:
 865		return 1000;
 
 866	}
 
 
 867}
 868
 869static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
 870				 unsigned int r2, unsigned int n2,
 871				 unsigned int p,
 872				 struct hsw_wrpll_rnp *best)
 873{
 874	u64 a, b, c, d, diff, diff_best;
 875
 876	/* No best (r,n,p) yet */
 877	if (best->p == 0) {
 878		best->p = p;
 879		best->n2 = n2;
 880		best->r2 = r2;
 881		return;
 882	}
 883
 884	/*
 885	 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
 886	 * freq2k.
 887	 *
 888	 * delta = 1e6 *
 889	 *	   abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
 890	 *	   freq2k;
 891	 *
 892	 * and we would like delta <= budget.
 893	 *
 894	 * If the discrepancy is above the PPM-based budget, always prefer to
 895	 * improve upon the previous solution.  However, if you're within the
 896	 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
 897	 */
 898	a = freq2k * budget * p * r2;
 899	b = freq2k * budget * best->p * best->r2;
 900	diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
 901	diff_best = abs_diff(freq2k * best->p * best->r2,
 902			     LC_FREQ_2K * best->n2);
 903	c = 1000000 * diff;
 904	d = 1000000 * diff_best;
 905
 906	if (a < c && b < d) {
 907		/* If both are above the budget, pick the closer */
 908		if (best->p * best->r2 * diff < p * r2 * diff_best) {
 909			best->p = p;
 910			best->n2 = n2;
 911			best->r2 = r2;
 912		}
 913	} else if (a >= c && b < d) {
 914		/* If A is below the threshold but B is above it?  Update. */
 915		best->p = p;
 916		best->n2 = n2;
 917		best->r2 = r2;
 918	} else if (a >= c && b >= d) {
 919		/* Both are below the limit, so pick the higher n2/(r2*r2) */
 920		if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
 921			best->p = p;
 922			best->n2 = n2;
 923			best->r2 = r2;
 924		}
 925	}
 926	/* Otherwise a < c && b >= d, do nothing */
 927}
 928
 929static void
 930hsw_ddi_calculate_wrpll(int clock /* in Hz */,
 931			unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
 932{
 933	u64 freq2k;
 934	unsigned p, n2, r2;
 935	struct hsw_wrpll_rnp best = {};
 936	unsigned budget;
 937
 938	freq2k = clock / 100;
 939
 940	budget = hsw_wrpll_get_budget_for_freq(clock);
 941
 942	/* Special case handling for 540 pixel clock: bypass WR PLL entirely
 943	 * and directly pass the LC PLL to it. */
 944	if (freq2k == 5400000) {
 945		*n2_out = 2;
 946		*p_out = 1;
 947		*r2_out = 2;
 948		return;
 949	}
 950
 951	/*
 952	 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
 953	 * the WR PLL.
 954	 *
 955	 * We want R so that REF_MIN <= Ref <= REF_MAX.
 956	 * Injecting R2 = 2 * R gives:
 957	 *   REF_MAX * r2 > LC_FREQ * 2 and
 958	 *   REF_MIN * r2 < LC_FREQ * 2
 959	 *
 960	 * Which means the desired boundaries for r2 are:
 961	 *  LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
 962	 *
 963	 */
 964	for (r2 = LC_FREQ * 2 / REF_MAX + 1;
 965	     r2 <= LC_FREQ * 2 / REF_MIN;
 966	     r2++) {
 967
 968		/*
 969		 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
 970		 *
 971		 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
 972		 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
 973		 *   VCO_MAX * r2 > n2 * LC_FREQ and
 974		 *   VCO_MIN * r2 < n2 * LC_FREQ)
 975		 *
 976		 * Which means the desired boundaries for n2 are:
 977		 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
 978		 */
 979		for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
 980		     n2 <= VCO_MAX * r2 / LC_FREQ;
 981		     n2++) {
 982
 983			for (p = P_MIN; p <= P_MAX; p += P_INC)
 984				hsw_wrpll_update_rnp(freq2k, budget,
 985						     r2, n2, p, &best);
 986		}
 987	}
 988
 989	*n2_out = best.n2;
 990	*p_out = best.p;
 991	*r2_out = best.r2;
 992}
 993
 994static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *i915,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 995				  const struct intel_shared_dpll *pll,
 996				  const struct intel_dpll_hw_state *dpll_hw_state)
 997{
 998	const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
 999	int refclk;
1000	int n, p, r;
1001	u32 wrpll = hw_state->wrpll;
1002
1003	switch (wrpll & WRPLL_REF_MASK) {
1004	case WRPLL_REF_SPECIAL_HSW:
1005		/* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
1006		if (IS_HASWELL(i915) && !IS_HASWELL_ULT(i915)) {
1007			refclk = i915->display.dpll.ref_clks.nssc;
1008			break;
1009		}
1010		fallthrough;
1011	case WRPLL_REF_PCH_SSC:
1012		/*
1013		 * We could calculate spread here, but our checking
1014		 * code only cares about 5% accuracy, and spread is a max of
1015		 * 0.5% downspread.
1016		 */
1017		refclk = i915->display.dpll.ref_clks.ssc;
1018		break;
1019	case WRPLL_REF_LCPLL:
1020		refclk = 2700000;
1021		break;
1022	default:
1023		MISSING_CASE(wrpll);
1024		return 0;
1025	}
1026
1027	r = wrpll & WRPLL_DIVIDER_REF_MASK;
1028	p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
1029	n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
1030
1031	/* Convert to KHz, p & r have a fixed point portion */
1032	return (refclk * n / 10) / (p * r) * 2;
1033}
1034
1035static int
1036hsw_ddi_wrpll_compute_dpll(struct intel_atomic_state *state,
1037			   struct intel_crtc *crtc)
1038{
1039	struct drm_i915_private *i915 = to_i915(state->base.dev);
1040	struct intel_crtc_state *crtc_state =
1041		intel_atomic_get_new_crtc_state(state, crtc);
1042	struct hsw_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.hsw;
1043	unsigned int p, n2, r2;
1044
1045	hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
1046
1047	hw_state->wrpll =
1048		WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
1049		WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
1050		WRPLL_DIVIDER_POST(p);
1051
1052	crtc_state->port_clock = hsw_ddi_wrpll_get_freq(i915, NULL,
1053							&crtc_state->dpll_hw_state);
1054
1055	return 0;
1056}
1057
1058static struct intel_shared_dpll *
1059hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
1060		       struct intel_crtc *crtc)
1061{
1062	struct intel_crtc_state *crtc_state =
1063		intel_atomic_get_new_crtc_state(state, crtc);
1064
1065	return intel_find_shared_dpll(state, crtc,
1066				      &crtc_state->dpll_hw_state,
1067				      BIT(DPLL_ID_WRPLL2) |
1068				      BIT(DPLL_ID_WRPLL1));
1069}
1070
1071static int
1072hsw_ddi_lcpll_compute_dpll(struct intel_crtc_state *crtc_state)
1073{
1074	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1075	int clock = crtc_state->port_clock;
1076
1077	switch (clock / 2) {
1078	case 81000:
1079	case 135000:
1080	case 270000:
1081		return 0;
1082	default:
1083		drm_dbg_kms(&i915->drm, "Invalid clock for DP: %d\n",
1084			    clock);
1085		return -EINVAL;
1086	}
1087}
1088
1089static struct intel_shared_dpll *
1090hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
1091{
1092	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1093	struct intel_shared_dpll *pll;
1094	enum intel_dpll_id pll_id;
1095	int clock = crtc_state->port_clock;
1096
1097	switch (clock / 2) {
1098	case 81000:
1099		pll_id = DPLL_ID_LCPLL_810;
1100		break;
1101	case 135000:
1102		pll_id = DPLL_ID_LCPLL_1350;
1103		break;
1104	case 270000:
1105		pll_id = DPLL_ID_LCPLL_2700;
1106		break;
1107	default:
1108		MISSING_CASE(clock / 2);
 
1109		return NULL;
1110	}
1111
1112	pll = intel_get_shared_dpll_by_id(i915, pll_id);
1113
1114	if (!pll)
1115		return NULL;
1116
1117	return pll;
1118}
1119
1120static int hsw_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1121				  const struct intel_shared_dpll *pll,
1122				  const struct intel_dpll_hw_state *dpll_hw_state)
1123{
1124	int link_clock = 0;
1125
1126	switch (pll->info->id) {
1127	case DPLL_ID_LCPLL_810:
1128		link_clock = 81000;
1129		break;
1130	case DPLL_ID_LCPLL_1350:
1131		link_clock = 135000;
1132		break;
1133	case DPLL_ID_LCPLL_2700:
1134		link_clock = 270000;
1135		break;
1136	default:
1137		drm_WARN(&i915->drm, 1, "bad port clock sel\n");
1138		break;
1139	}
1140
1141	return link_clock * 2;
1142}
1143
1144static int
1145hsw_ddi_spll_compute_dpll(struct intel_atomic_state *state,
1146			  struct intel_crtc *crtc)
1147{
1148	struct intel_crtc_state *crtc_state =
1149		intel_atomic_get_new_crtc_state(state, crtc);
1150	struct hsw_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.hsw;
1151
1152	if (drm_WARN_ON(crtc->base.dev, crtc_state->port_clock / 2 != 135000))
1153		return -EINVAL;
1154
1155	hw_state->spll =
1156		SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC;
1157
1158	return 0;
1159}
1160
1161static struct intel_shared_dpll *
1162hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
1163		      struct intel_crtc *crtc)
1164{
1165	struct intel_crtc_state *crtc_state =
1166		intel_atomic_get_new_crtc_state(state, crtc);
1167
1168	return intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state,
1169				      BIT(DPLL_ID_SPLL));
1170}
1171
1172static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915,
1173				 const struct intel_shared_dpll *pll,
1174				 const struct intel_dpll_hw_state *dpll_hw_state)
1175{
1176	const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
1177	int link_clock = 0;
1178
1179	switch (hw_state->spll & SPLL_FREQ_MASK) {
1180	case SPLL_FREQ_810MHz:
1181		link_clock = 81000;
1182		break;
1183	case SPLL_FREQ_1350MHz:
1184		link_clock = 135000;
1185		break;
1186	case SPLL_FREQ_2700MHz:
1187		link_clock = 270000;
1188		break;
1189	default:
1190		drm_WARN(&i915->drm, 1, "bad spll freq\n");
1191		break;
1192	}
1193
1194	return link_clock * 2;
1195}
1196
1197static int hsw_compute_dpll(struct intel_atomic_state *state,
1198			    struct intel_crtc *crtc,
1199			    struct intel_encoder *encoder)
1200{
1201	struct intel_crtc_state *crtc_state =
1202		intel_atomic_get_new_crtc_state(state, crtc);
 
1203
1204	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1205		return hsw_ddi_wrpll_compute_dpll(state, crtc);
1206	else if (intel_crtc_has_dp_encoder(crtc_state))
1207		return hsw_ddi_lcpll_compute_dpll(crtc_state);
1208	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1209		return hsw_ddi_spll_compute_dpll(state, crtc);
1210	else
1211		return -EINVAL;
1212}
1213
1214static int hsw_get_dpll(struct intel_atomic_state *state,
1215			struct intel_crtc *crtc,
1216			struct intel_encoder *encoder)
1217{
1218	struct intel_crtc_state *crtc_state =
1219		intel_atomic_get_new_crtc_state(state, crtc);
1220	struct intel_shared_dpll *pll = NULL;
1221
1222	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1223		pll = hsw_ddi_wrpll_get_dpll(state, crtc);
1224	else if (intel_crtc_has_dp_encoder(crtc_state))
1225		pll = hsw_ddi_lcpll_get_dpll(crtc_state);
1226	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1227		pll = hsw_ddi_spll_get_dpll(state, crtc);
 
 
1228
1229	if (!pll)
1230		return -EINVAL;
1231
1232	intel_reference_shared_dpll(state, crtc,
1233				    pll, &crtc_state->dpll_hw_state);
1234
1235	crtc_state->shared_dpll = pll;
1236
1237	return 0;
1238}
1239
1240static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915)
1241{
1242	i915->display.dpll.ref_clks.ssc = 135000;
1243	/* Non-SSC is only used on non-ULT HSW. */
1244	if (intel_de_read(i915, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
1245		i915->display.dpll.ref_clks.nssc = 24000;
1246	else
1247		i915->display.dpll.ref_clks.nssc = 135000;
1248}
1249
1250static void hsw_dump_hw_state(struct drm_printer *p,
1251			      const struct intel_dpll_hw_state *dpll_hw_state)
1252{
1253	const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
1254
1255	drm_printf(p, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
1256		   hw_state->wrpll, hw_state->spll);
1257}
1258
1259static bool hsw_compare_hw_state(const struct intel_dpll_hw_state *_a,
1260				 const struct intel_dpll_hw_state *_b)
1261{
1262	const struct hsw_dpll_hw_state *a = &_a->hsw;
1263	const struct hsw_dpll_hw_state *b = &_b->hsw;
1264
1265	return a->wrpll == b->wrpll &&
1266		a->spll == b->spll;
1267}
1268
1269static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
1270	.enable = hsw_ddi_wrpll_enable,
1271	.disable = hsw_ddi_wrpll_disable,
1272	.get_hw_state = hsw_ddi_wrpll_get_hw_state,
1273	.get_freq = hsw_ddi_wrpll_get_freq,
1274};
1275
1276static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
1277	.enable = hsw_ddi_spll_enable,
1278	.disable = hsw_ddi_spll_disable,
1279	.get_hw_state = hsw_ddi_spll_get_hw_state,
1280	.get_freq = hsw_ddi_spll_get_freq,
1281};
1282
1283static void hsw_ddi_lcpll_enable(struct drm_i915_private *i915,
1284				 struct intel_shared_dpll *pll,
1285				 const struct intel_dpll_hw_state *hw_state)
1286{
1287}
1288
1289static void hsw_ddi_lcpll_disable(struct drm_i915_private *i915,
1290				  struct intel_shared_dpll *pll)
1291{
1292}
1293
1294static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *i915,
1295				       struct intel_shared_dpll *pll,
1296				       struct intel_dpll_hw_state *dpll_hw_state)
1297{
1298	return true;
1299}
1300
1301static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
1302	.enable = hsw_ddi_lcpll_enable,
1303	.disable = hsw_ddi_lcpll_disable,
1304	.get_hw_state = hsw_ddi_lcpll_get_hw_state,
1305	.get_freq = hsw_ddi_lcpll_get_freq,
1306};
1307
1308static const struct dpll_info hsw_plls[] = {
1309	{ .name = "WRPLL 1", .funcs = &hsw_ddi_wrpll_funcs, .id = DPLL_ID_WRPLL1, },
1310	{ .name = "WRPLL 2", .funcs = &hsw_ddi_wrpll_funcs, .id = DPLL_ID_WRPLL2, },
1311	{ .name = "SPLL", .funcs = &hsw_ddi_spll_funcs, .id = DPLL_ID_SPLL, },
1312	{ .name = "LCPLL 810", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_810,
1313	  .always_on = true, },
1314	{ .name = "LCPLL 1350", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_1350,
1315	  .always_on = true, },
1316	{ .name = "LCPLL 2700", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_2700,
1317	  .always_on = true, },
1318	{}
1319};
1320
1321static const struct intel_dpll_mgr hsw_pll_mgr = {
1322	.dpll_info = hsw_plls,
1323	.compute_dplls = hsw_compute_dpll,
1324	.get_dplls = hsw_get_dpll,
1325	.put_dplls = intel_put_dpll,
1326	.update_ref_clks = hsw_update_dpll_ref_clks,
1327	.dump_hw_state = hsw_dump_hw_state,
1328	.compare_hw_state = hsw_compare_hw_state,
1329};
1330
1331struct skl_dpll_regs {
1332	i915_reg_t ctl, cfgcr1, cfgcr2;
1333};
1334
1335/* this array is indexed by the *shared* pll id */
1336static const struct skl_dpll_regs skl_dpll_regs[4] = {
1337	{
1338		/* DPLL 0 */
1339		.ctl = LCPLL1_CTL,
1340		/* DPLL 0 doesn't support HDMI mode */
1341	},
1342	{
1343		/* DPLL 1 */
1344		.ctl = LCPLL2_CTL,
1345		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
1346		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
1347	},
1348	{
1349		/* DPLL 2 */
1350		.ctl = WRPLL_CTL(0),
1351		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
1352		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
1353	},
1354	{
1355		/* DPLL 3 */
1356		.ctl = WRPLL_CTL(1),
1357		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
1358		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
1359	},
1360};
1361
1362static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *i915,
1363				    struct intel_shared_dpll *pll,
1364				    const struct skl_dpll_hw_state *hw_state)
1365{
1366	const enum intel_dpll_id id = pll->info->id;
 
1367
1368	intel_de_rmw(i915, DPLL_CTRL1,
1369		     DPLL_CTRL1_HDMI_MODE(id) |
1370		     DPLL_CTRL1_SSC(id) |
1371		     DPLL_CTRL1_LINK_RATE_MASK(id),
1372		     hw_state->ctrl1 << (id * 6));
1373	intel_de_posting_read(i915, DPLL_CTRL1);
 
 
 
1374}
1375
1376static void skl_ddi_pll_enable(struct drm_i915_private *i915,
1377			       struct intel_shared_dpll *pll,
1378			       const struct intel_dpll_hw_state *dpll_hw_state)
1379{
1380	const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1381	const struct skl_dpll_regs *regs = skl_dpll_regs;
1382	const enum intel_dpll_id id = pll->info->id;
1383
1384	skl_ddi_pll_write_ctrl1(i915, pll, hw_state);
1385
1386	intel_de_write(i915, regs[id].cfgcr1, hw_state->cfgcr1);
1387	intel_de_write(i915, regs[id].cfgcr2, hw_state->cfgcr2);
1388	intel_de_posting_read(i915, regs[id].cfgcr1);
1389	intel_de_posting_read(i915, regs[id].cfgcr2);
1390
1391	/* the enable bit is always bit 31 */
1392	intel_de_rmw(i915, regs[id].ctl, 0, LCPLL_PLL_ENABLE);
 
1393
1394	if (intel_de_wait_for_set(i915, DPLL_STATUS, DPLL_LOCK(id), 5))
1395		drm_err(&i915->drm, "DPLL %d not locked\n", id);
1396}
1397
1398static void skl_ddi_dpll0_enable(struct drm_i915_private *i915,
1399				 struct intel_shared_dpll *pll,
1400				 const struct intel_dpll_hw_state *dpll_hw_state)
1401{
1402	const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1403
1404	skl_ddi_pll_write_ctrl1(i915, pll, hw_state);
1405}
1406
1407static void skl_ddi_pll_disable(struct drm_i915_private *i915,
1408				struct intel_shared_dpll *pll)
1409{
1410	const struct skl_dpll_regs *regs = skl_dpll_regs;
1411	const enum intel_dpll_id id = pll->info->id;
1412
1413	/* the enable bit is always bit 31 */
1414	intel_de_rmw(i915, regs[id].ctl, LCPLL_PLL_ENABLE, 0);
1415	intel_de_posting_read(i915, regs[id].ctl);
 
1416}
1417
1418static void skl_ddi_dpll0_disable(struct drm_i915_private *i915,
1419				  struct intel_shared_dpll *pll)
1420{
1421}
1422
1423static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *i915,
1424				     struct intel_shared_dpll *pll,
1425				     struct intel_dpll_hw_state *dpll_hw_state)
1426{
1427	struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1428	const struct skl_dpll_regs *regs = skl_dpll_regs;
1429	const enum intel_dpll_id id = pll->info->id;
1430	intel_wakeref_t wakeref;
1431	bool ret;
1432	u32 val;
1433
1434	wakeref = intel_display_power_get_if_enabled(i915,
1435						     POWER_DOMAIN_DISPLAY_CORE);
1436	if (!wakeref)
1437		return false;
1438
1439	ret = false;
1440
1441	val = intel_de_read(i915, regs[id].ctl);
1442	if (!(val & LCPLL_PLL_ENABLE))
1443		goto out;
1444
1445	val = intel_de_read(i915, DPLL_CTRL1);
1446	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1447
1448	/* avoid reading back stale values if HDMI mode is not enabled */
1449	if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1450		hw_state->cfgcr1 = intel_de_read(i915, regs[id].cfgcr1);
1451		hw_state->cfgcr2 = intel_de_read(i915, regs[id].cfgcr2);
1452	}
1453	ret = true;
1454
1455out:
1456	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1457
1458	return ret;
1459}
1460
1461static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *i915,
1462				       struct intel_shared_dpll *pll,
1463				       struct intel_dpll_hw_state *dpll_hw_state)
1464{
1465	struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1466	const struct skl_dpll_regs *regs = skl_dpll_regs;
1467	const enum intel_dpll_id id = pll->info->id;
1468	intel_wakeref_t wakeref;
1469	u32 val;
1470	bool ret;
1471
1472	wakeref = intel_display_power_get_if_enabled(i915,
1473						     POWER_DOMAIN_DISPLAY_CORE);
1474	if (!wakeref)
1475		return false;
1476
1477	ret = false;
1478
1479	/* DPLL0 is always enabled since it drives CDCLK */
1480	val = intel_de_read(i915, regs[id].ctl);
1481	if (drm_WARN_ON(&i915->drm, !(val & LCPLL_PLL_ENABLE)))
1482		goto out;
1483
1484	val = intel_de_read(i915, DPLL_CTRL1);
1485	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1486
1487	ret = true;
1488
1489out:
1490	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1491
1492	return ret;
1493}
1494
1495struct skl_wrpll_context {
1496	u64 min_deviation;		/* current minimal deviation */
1497	u64 central_freq;		/* chosen central freq */
1498	u64 dco_freq;			/* chosen dco freq */
1499	unsigned int p;			/* chosen divider */
1500};
1501
 
 
 
 
 
 
 
1502/* DCO freq must be within +1%/-6%  of the DCO central freq */
1503#define SKL_DCO_MAX_PDEVIATION	100
1504#define SKL_DCO_MAX_NDEVIATION	600
1505
1506static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1507				  u64 central_freq,
1508				  u64 dco_freq,
1509				  unsigned int divider)
1510{
1511	u64 deviation;
1512
1513	deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1514			      central_freq);
1515
1516	/* positive deviation */
1517	if (dco_freq >= central_freq) {
1518		if (deviation < SKL_DCO_MAX_PDEVIATION &&
1519		    deviation < ctx->min_deviation) {
1520			ctx->min_deviation = deviation;
1521			ctx->central_freq = central_freq;
1522			ctx->dco_freq = dco_freq;
1523			ctx->p = divider;
1524		}
1525	/* negative deviation */
1526	} else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1527		   deviation < ctx->min_deviation) {
1528		ctx->min_deviation = deviation;
1529		ctx->central_freq = central_freq;
1530		ctx->dco_freq = dco_freq;
1531		ctx->p = divider;
1532	}
1533}
1534
1535static void skl_wrpll_get_multipliers(unsigned int p,
1536				      unsigned int *p0 /* out */,
1537				      unsigned int *p1 /* out */,
1538				      unsigned int *p2 /* out */)
1539{
1540	/* even dividers */
1541	if (p % 2 == 0) {
1542		unsigned int half = p / 2;
1543
1544		if (half == 1 || half == 2 || half == 3 || half == 5) {
1545			*p0 = 2;
1546			*p1 = 1;
1547			*p2 = half;
1548		} else if (half % 2 == 0) {
1549			*p0 = 2;
1550			*p1 = half / 2;
1551			*p2 = 2;
1552		} else if (half % 3 == 0) {
1553			*p0 = 3;
1554			*p1 = half / 3;
1555			*p2 = 2;
1556		} else if (half % 7 == 0) {
1557			*p0 = 7;
1558			*p1 = half / 7;
1559			*p2 = 2;
1560		}
1561	} else if (p == 3 || p == 9) {  /* 3, 5, 7, 9, 15, 21, 35 */
1562		*p0 = 3;
1563		*p1 = 1;
1564		*p2 = p / 3;
1565	} else if (p == 5 || p == 7) {
1566		*p0 = p;
1567		*p1 = 1;
1568		*p2 = 1;
1569	} else if (p == 15) {
1570		*p0 = 3;
1571		*p1 = 1;
1572		*p2 = 5;
1573	} else if (p == 21) {
1574		*p0 = 7;
1575		*p1 = 1;
1576		*p2 = 3;
1577	} else if (p == 35) {
1578		*p0 = 7;
1579		*p1 = 1;
1580		*p2 = 5;
1581	}
1582}
1583
1584struct skl_wrpll_params {
1585	u32 dco_fraction;
1586	u32 dco_integer;
1587	u32 qdiv_ratio;
1588	u32 qdiv_mode;
1589	u32 kdiv;
1590	u32 pdiv;
1591	u32 central_freq;
1592};
1593
1594static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1595				      u64 afe_clock,
1596				      int ref_clock,
1597				      u64 central_freq,
1598				      u32 p0, u32 p1, u32 p2)
1599{
1600	u64 dco_freq;
1601
1602	switch (central_freq) {
1603	case 9600000000ULL:
1604		params->central_freq = 0;
1605		break;
1606	case 9000000000ULL:
1607		params->central_freq = 1;
1608		break;
1609	case 8400000000ULL:
1610		params->central_freq = 3;
1611	}
1612
1613	switch (p0) {
1614	case 1:
1615		params->pdiv = 0;
1616		break;
1617	case 2:
1618		params->pdiv = 1;
1619		break;
1620	case 3:
1621		params->pdiv = 2;
1622		break;
1623	case 7:
1624		params->pdiv = 4;
1625		break;
1626	default:
1627		WARN(1, "Incorrect PDiv\n");
1628	}
1629
1630	switch (p2) {
1631	case 5:
1632		params->kdiv = 0;
1633		break;
1634	case 2:
1635		params->kdiv = 1;
1636		break;
1637	case 3:
1638		params->kdiv = 2;
1639		break;
1640	case 1:
1641		params->kdiv = 3;
1642		break;
1643	default:
1644		WARN(1, "Incorrect KDiv\n");
1645	}
1646
1647	params->qdiv_ratio = p1;
1648	params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1649
1650	dco_freq = p0 * p1 * p2 * afe_clock;
1651
1652	/*
1653	 * Intermediate values are in Hz.
1654	 * Divide by MHz to match bsepc
1655	 */
1656	params->dco_integer = div_u64(dco_freq, ref_clock * KHz(1));
1657	params->dco_fraction =
1658		div_u64((div_u64(dco_freq, ref_clock / KHz(1)) -
1659			 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1660}
1661
1662static int
1663skl_ddi_calculate_wrpll(int clock,
1664			int ref_clock,
1665			struct skl_wrpll_params *wrpll_params)
1666{
1667	static const u64 dco_central_freq[3] = { 8400000000ULL,
1668						 9000000000ULL,
1669						 9600000000ULL };
1670	static const u8 even_dividers[] = {  4,  6,  8, 10, 12, 14, 16, 18, 20,
1671					    24, 28, 30, 32, 36, 40, 42, 44,
1672					    48, 52, 54, 56, 60, 64, 66, 68,
1673					    70, 72, 76, 78, 80, 84, 88, 90,
1674					    92, 96, 98 };
1675	static const u8 odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
 
1676	static const struct {
1677		const u8 *list;
1678		int n_dividers;
1679	} dividers[] = {
1680		{ even_dividers, ARRAY_SIZE(even_dividers) },
1681		{ odd_dividers, ARRAY_SIZE(odd_dividers) },
1682	};
1683	struct skl_wrpll_context ctx = {
1684		.min_deviation = U64_MAX,
1685	};
1686	unsigned int dco, d, i;
1687	unsigned int p0, p1, p2;
1688	u64 afe_clock = (u64)clock * 1000 * 5; /* AFE Clock is 5x Pixel clock, in Hz */
 
1689
1690	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1691		for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1692			for (i = 0; i < dividers[d].n_dividers; i++) {
1693				unsigned int p = dividers[d].list[i];
1694				u64 dco_freq = p * afe_clock;
1695
1696				skl_wrpll_try_divider(&ctx,
1697						      dco_central_freq[dco],
1698						      dco_freq,
1699						      p);
1700				/*
1701				 * Skip the remaining dividers if we're sure to
1702				 * have found the definitive divider, we can't
1703				 * improve a 0 deviation.
1704				 */
1705				if (ctx.min_deviation == 0)
1706					goto skip_remaining_dividers;
1707			}
1708		}
1709
1710skip_remaining_dividers:
1711		/*
1712		 * If a solution is found with an even divider, prefer
1713		 * this one.
1714		 */
1715		if (d == 0 && ctx.p)
1716			break;
1717	}
1718
1719	if (!ctx.p)
1720		return -EINVAL;
 
 
1721
1722	/*
1723	 * gcc incorrectly analyses that these can be used without being
1724	 * initialized. To be fair, it's hard to guess.
1725	 */
1726	p0 = p1 = p2 = 0;
1727	skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1728	skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock,
1729				  ctx.central_freq, p0, p1, p2);
1730
1731	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1732}
1733
1734static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
1735				  const struct intel_shared_dpll *pll,
1736				  const struct intel_dpll_hw_state *dpll_hw_state)
1737{
1738	const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1739	int ref_clock = i915->display.dpll.ref_clks.nssc;
1740	u32 p0, p1, p2, dco_freq;
1741
1742	p0 = hw_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
1743	p2 = hw_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
1744
1745	if (hw_state->cfgcr2 &  DPLL_CFGCR2_QDIV_MODE(1))
1746		p1 = (hw_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
1747	else
1748		p1 = 1;
1749
1750
1751	switch (p0) {
1752	case DPLL_CFGCR2_PDIV_1:
1753		p0 = 1;
1754		break;
1755	case DPLL_CFGCR2_PDIV_2:
1756		p0 = 2;
1757		break;
1758	case DPLL_CFGCR2_PDIV_3:
1759		p0 = 3;
1760		break;
1761	case DPLL_CFGCR2_PDIV_7_INVALID:
1762		/*
1763		 * Incorrect ASUS-Z170M BIOS setting, the HW seems to ignore bit#0,
1764		 * handling it the same way as PDIV_7.
1765		 */
1766		drm_dbg_kms(&i915->drm, "Invalid WRPLL PDIV divider value, fixing it.\n");
1767		fallthrough;
1768	case DPLL_CFGCR2_PDIV_7:
1769		p0 = 7;
1770		break;
1771	default:
1772		MISSING_CASE(p0);
1773		return 0;
1774	}
1775
1776	switch (p2) {
1777	case DPLL_CFGCR2_KDIV_5:
1778		p2 = 5;
1779		break;
1780	case DPLL_CFGCR2_KDIV_2:
1781		p2 = 2;
1782		break;
1783	case DPLL_CFGCR2_KDIV_3:
1784		p2 = 3;
1785		break;
1786	case DPLL_CFGCR2_KDIV_1:
1787		p2 = 1;
1788		break;
1789	default:
1790		MISSING_CASE(p2);
1791		return 0;
1792	}
1793
1794	dco_freq = (hw_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
1795		   ref_clock;
1796
1797	dco_freq += ((hw_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
1798		    ref_clock / 0x8000;
1799
1800	if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
1801		return 0;
1802
1803	return dco_freq / (p0 * p1 * p2 * 5);
1804}
1805
1806static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
1807{
1808	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1809	struct skl_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.skl;
1810	struct skl_wrpll_params wrpll_params = {};
1811	int ret;
1812
1813	ret = skl_ddi_calculate_wrpll(crtc_state->port_clock,
1814				      i915->display.dpll.ref_clks.nssc, &wrpll_params);
1815	if (ret)
1816		return ret;
1817
1818	/*
1819	 * See comment in intel_dpll_hw_state to understand why we always use 0
1820	 * as the DPLL id in this function.
1821	 */
1822	hw_state->ctrl1 =
1823		DPLL_CTRL1_OVERRIDE(0) |
1824		DPLL_CTRL1_HDMI_MODE(0);
1825
1826	hw_state->cfgcr1 =
1827		DPLL_CFGCR1_FREQ_ENABLE |
1828		DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1829		wrpll_params.dco_integer;
1830
1831	hw_state->cfgcr2 =
1832		DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1833		DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1834		DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1835		DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1836		wrpll_params.central_freq;
1837
1838	crtc_state->port_clock = skl_ddi_wrpll_get_freq(i915, NULL,
1839							&crtc_state->dpll_hw_state);
1840
1841	return 0;
1842}
1843
1844static int
1845skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1846{
1847	struct skl_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.skl;
1848	u32 ctrl1;
1849
1850	/*
1851	 * See comment in intel_dpll_hw_state to understand why we always use 0
1852	 * as the DPLL id in this function.
1853	 */
1854	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1855	switch (crtc_state->port_clock / 2) {
1856	case 81000:
1857		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1858		break;
1859	case 135000:
1860		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1861		break;
1862	case 270000:
1863		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1864		break;
1865		/* eDP 1.4 rates */
1866	case 162000:
1867		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1868		break;
1869	case 108000:
1870		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1871		break;
1872	case 216000:
1873		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1874		break;
1875	}
1876
1877	hw_state->ctrl1 = ctrl1;
 
 
 
1878
1879	return 0;
1880}
1881
1882static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1883				  const struct intel_shared_dpll *pll,
1884				  const struct intel_dpll_hw_state *dpll_hw_state)
1885{
1886	const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1887	int link_clock = 0;
1888
1889	switch ((hw_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0)) >>
1890		DPLL_CTRL1_LINK_RATE_SHIFT(0)) {
1891	case DPLL_CTRL1_LINK_RATE_810:
1892		link_clock = 81000;
1893		break;
1894	case DPLL_CTRL1_LINK_RATE_1080:
1895		link_clock = 108000;
1896		break;
1897	case DPLL_CTRL1_LINK_RATE_1350:
1898		link_clock = 135000;
1899		break;
1900	case DPLL_CTRL1_LINK_RATE_1620:
1901		link_clock = 162000;
1902		break;
1903	case DPLL_CTRL1_LINK_RATE_2160:
1904		link_clock = 216000;
1905		break;
1906	case DPLL_CTRL1_LINK_RATE_2700:
1907		link_clock = 270000;
1908		break;
1909	default:
1910		drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
1911		break;
1912	}
1913
1914	return link_clock * 2;
1915}
1916
1917static int skl_compute_dpll(struct intel_atomic_state *state,
1918			    struct intel_crtc *crtc,
1919			    struct intel_encoder *encoder)
1920{
1921	struct intel_crtc_state *crtc_state =
1922		intel_atomic_get_new_crtc_state(state, crtc);
 
 
 
1923
1924	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1925		return skl_ddi_hdmi_pll_dividers(crtc_state);
1926	else if (intel_crtc_has_dp_encoder(crtc_state))
1927		return skl_ddi_dp_set_dpll_hw_state(crtc_state);
1928	else
1929		return -EINVAL;
1930}
1931
1932static int skl_get_dpll(struct intel_atomic_state *state,
1933			struct intel_crtc *crtc,
1934			struct intel_encoder *encoder)
1935{
1936	struct intel_crtc_state *crtc_state =
1937		intel_atomic_get_new_crtc_state(state, crtc);
1938	struct intel_shared_dpll *pll;
 
 
1939
1940	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1941		pll = intel_find_shared_dpll(state, crtc,
1942					     &crtc_state->dpll_hw_state,
1943					     BIT(DPLL_ID_SKL_DPLL0));
1944	else
1945		pll = intel_find_shared_dpll(state, crtc,
1946					     &crtc_state->dpll_hw_state,
1947					     BIT(DPLL_ID_SKL_DPLL3) |
1948					     BIT(DPLL_ID_SKL_DPLL2) |
1949					     BIT(DPLL_ID_SKL_DPLL1));
1950	if (!pll)
1951		return -EINVAL;
1952
1953	intel_reference_shared_dpll(state, crtc,
1954				    pll, &crtc_state->dpll_hw_state);
1955
1956	crtc_state->shared_dpll = pll;
1957
1958	return 0;
1959}
1960
1961static int skl_ddi_pll_get_freq(struct drm_i915_private *i915,
1962				const struct intel_shared_dpll *pll,
1963				const struct intel_dpll_hw_state *dpll_hw_state)
1964{
1965	const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1966
1967	/*
1968	 * ctrl1 register is already shifted for each pll, just use 0 to get
1969	 * the internal shift for each field
1970	 */
1971	if (hw_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
1972		return skl_ddi_wrpll_get_freq(i915, pll, dpll_hw_state);
1973	else
1974		return skl_ddi_lcpll_get_freq(i915, pll, dpll_hw_state);
1975}
1976
1977static void skl_update_dpll_ref_clks(struct drm_i915_private *i915)
1978{
1979	/* No SSC ref */
1980	i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
1981}
1982
1983static void skl_dump_hw_state(struct drm_printer *p,
1984			      const struct intel_dpll_hw_state *dpll_hw_state)
1985{
1986	const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1987
1988	drm_printf(p, "dpll_hw_state: ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1989		   hw_state->ctrl1, hw_state->cfgcr1, hw_state->cfgcr2);
1990}
1991
1992static bool skl_compare_hw_state(const struct intel_dpll_hw_state *_a,
1993				 const struct intel_dpll_hw_state *_b)
1994{
1995	const struct skl_dpll_hw_state *a = &_a->skl;
1996	const struct skl_dpll_hw_state *b = &_b->skl;
1997
1998	return a->ctrl1 == b->ctrl1 &&
1999		a->cfgcr1 == b->cfgcr1 &&
2000		a->cfgcr2 == b->cfgcr2;
2001}
2002
2003static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
2004	.enable = skl_ddi_pll_enable,
2005	.disable = skl_ddi_pll_disable,
2006	.get_hw_state = skl_ddi_pll_get_hw_state,
2007	.get_freq = skl_ddi_pll_get_freq,
2008};
2009
2010static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
2011	.enable = skl_ddi_dpll0_enable,
2012	.disable = skl_ddi_dpll0_disable,
2013	.get_hw_state = skl_ddi_dpll0_get_hw_state,
2014	.get_freq = skl_ddi_pll_get_freq,
2015};
2016
2017static const struct dpll_info skl_plls[] = {
2018	{ .name = "DPLL 0", .funcs = &skl_ddi_dpll0_funcs, .id = DPLL_ID_SKL_DPLL0,
2019	  .always_on = true, },
2020	{ .name = "DPLL 1", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL1, },
2021	{ .name = "DPLL 2", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL2, },
2022	{ .name = "DPLL 3", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL3, },
2023	{}
2024};
2025
2026static const struct intel_dpll_mgr skl_pll_mgr = {
2027	.dpll_info = skl_plls,
2028	.compute_dplls = skl_compute_dpll,
2029	.get_dplls = skl_get_dpll,
2030	.put_dplls = intel_put_dpll,
2031	.update_ref_clks = skl_update_dpll_ref_clks,
2032	.dump_hw_state = skl_dump_hw_state,
2033	.compare_hw_state = skl_compare_hw_state,
2034};
2035
2036static void bxt_ddi_pll_enable(struct drm_i915_private *i915,
2037			       struct intel_shared_dpll *pll,
2038			       const struct intel_dpll_hw_state *dpll_hw_state)
2039{
2040	struct intel_display *display = &i915->display;
2041	const struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
2042	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2043	enum dpio_phy phy;
2044	enum dpio_channel ch;
2045	u32 temp;
2046
2047	bxt_port_to_phy_channel(display, port, &phy, &ch);
2048
2049	/* Non-SSC reference */
2050	intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_REF_SEL);
 
 
 
 
 
 
 
2051
2052	if (IS_GEMINILAKE(i915)) {
2053		intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port),
2054			     0, PORT_PLL_POWER_ENABLE);
2055
2056		if (wait_for_us((intel_de_read(i915, BXT_PORT_PLL_ENABLE(port)) &
2057				 PORT_PLL_POWER_STATE), 200))
2058			drm_err(&i915->drm,
2059				"Power state not set for PLL:%d\n", port);
2060	}
2061
2062	/* Disable 10 bit clock */
2063	intel_de_rmw(i915, BXT_PORT_PLL_EBB_4(phy, ch),
2064		     PORT_PLL_10BIT_CLK_ENABLE, 0);
 
2065
2066	/* Write P1 & P2 */
2067	intel_de_rmw(i915, BXT_PORT_PLL_EBB_0(phy, ch),
2068		     PORT_PLL_P1_MASK | PORT_PLL_P2_MASK, hw_state->ebb0);
 
 
2069
2070	/* Write M2 integer */
2071	intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 0),
2072		     PORT_PLL_M2_INT_MASK, hw_state->pll0);
 
 
2073
2074	/* Write N */
2075	intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 1),
2076		     PORT_PLL_N_MASK, hw_state->pll1);
 
 
2077
2078	/* Write M2 fraction */
2079	intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 2),
2080		     PORT_PLL_M2_FRAC_MASK, hw_state->pll2);
 
 
2081
2082	/* Write M2 fraction enable */
2083	intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 3),
2084		     PORT_PLL_M2_FRAC_ENABLE, hw_state->pll3);
 
 
2085
2086	/* Write coeff */
2087	temp = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 6));
2088	temp &= ~PORT_PLL_PROP_COEFF_MASK;
2089	temp &= ~PORT_PLL_INT_COEFF_MASK;
2090	temp &= ~PORT_PLL_GAIN_CTL_MASK;
2091	temp |= hw_state->pll6;
2092	intel_de_write(i915, BXT_PORT_PLL(phy, ch, 6), temp);
2093
2094	/* Write calibration val */
2095	intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 8),
2096		     PORT_PLL_TARGET_CNT_MASK, hw_state->pll8);
 
 
 
 
 
 
 
2097
2098	intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 9),
2099		     PORT_PLL_LOCK_THRESHOLD_MASK, hw_state->pll9);
2100
2101	temp = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 10));
2102	temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
2103	temp &= ~PORT_PLL_DCO_AMP_MASK;
2104	temp |= hw_state->pll10;
2105	intel_de_write(i915, BXT_PORT_PLL(phy, ch, 10), temp);
2106
2107	/* Recalibrate with new settings */
2108	temp = intel_de_read(i915, BXT_PORT_PLL_EBB_4(phy, ch));
2109	temp |= PORT_PLL_RECALIBRATE;
2110	intel_de_write(i915, BXT_PORT_PLL_EBB_4(phy, ch), temp);
2111	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
2112	temp |= hw_state->ebb4;
2113	intel_de_write(i915, BXT_PORT_PLL_EBB_4(phy, ch), temp);
2114
2115	/* Enable PLL */
2116	intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_ENABLE);
2117	intel_de_posting_read(i915, BXT_PORT_PLL_ENABLE(port));
 
 
2118
2119	if (wait_for_us((intel_de_read(i915, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
2120			200))
2121		drm_err(&i915->drm, "PLL %d not locked\n", port);
2122
2123	if (IS_GEMINILAKE(i915)) {
2124		temp = intel_de_read(i915, BXT_PORT_TX_DW5_LN(phy, ch, 0));
2125		temp |= DCC_DELAY_RANGE_2;
2126		intel_de_write(i915, BXT_PORT_TX_DW5_GRP(phy, ch), temp);
2127	}
2128
2129	/*
2130	 * While we write to the group register to program all lanes at once we
2131	 * can read only lane registers and we pick lanes 0/1 for that.
2132	 */
2133	temp = intel_de_read(i915, BXT_PORT_PCS_DW12_LN01(phy, ch));
2134	temp &= ~LANE_STAGGER_MASK;
2135	temp &= ~LANESTAGGER_STRAP_OVRD;
2136	temp |= hw_state->pcsdw12;
2137	intel_de_write(i915, BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
2138}
2139
2140static void bxt_ddi_pll_disable(struct drm_i915_private *i915,
2141				struct intel_shared_dpll *pll)
2142{
2143	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
 
2144
2145	intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port), PORT_PLL_ENABLE, 0);
2146	intel_de_posting_read(i915, BXT_PORT_PLL_ENABLE(port));
2147
2148	if (IS_GEMINILAKE(i915)) {
2149		intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port),
2150			     PORT_PLL_POWER_ENABLE, 0);
 
 
 
2151
2152		if (wait_for_us(!(intel_de_read(i915, BXT_PORT_PLL_ENABLE(port)) &
2153				  PORT_PLL_POWER_STATE), 200))
2154			drm_err(&i915->drm,
2155				"Power state not reset for PLL:%d\n", port);
2156	}
2157}
2158
2159static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *i915,
2160				     struct intel_shared_dpll *pll,
2161				     struct intel_dpll_hw_state *dpll_hw_state)
2162{
2163	struct intel_display *display = &i915->display;
2164	struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
2165	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2166	intel_wakeref_t wakeref;
2167	enum dpio_phy phy;
2168	enum dpio_channel ch;
2169	u32 val;
2170	bool ret;
2171
2172	bxt_port_to_phy_channel(display, port, &phy, &ch);
2173
2174	wakeref = intel_display_power_get_if_enabled(i915,
2175						     POWER_DOMAIN_DISPLAY_CORE);
2176	if (!wakeref)
2177		return false;
2178
2179	ret = false;
2180
2181	val = intel_de_read(i915, BXT_PORT_PLL_ENABLE(port));
2182	if (!(val & PORT_PLL_ENABLE))
2183		goto out;
2184
2185	hw_state->ebb0 = intel_de_read(i915, BXT_PORT_PLL_EBB_0(phy, ch));
2186	hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
2187
2188	hw_state->ebb4 = intel_de_read(i915, BXT_PORT_PLL_EBB_4(phy, ch));
2189	hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
2190
2191	hw_state->pll0 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 0));
2192	hw_state->pll0 &= PORT_PLL_M2_INT_MASK;
2193
2194	hw_state->pll1 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 1));
2195	hw_state->pll1 &= PORT_PLL_N_MASK;
2196
2197	hw_state->pll2 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 2));
2198	hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
2199
2200	hw_state->pll3 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 3));
2201	hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
2202
2203	hw_state->pll6 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 6));
2204	hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
2205			  PORT_PLL_INT_COEFF_MASK |
2206			  PORT_PLL_GAIN_CTL_MASK;
2207
2208	hw_state->pll8 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 8));
2209	hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
2210
2211	hw_state->pll9 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 9));
2212	hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
2213
2214	hw_state->pll10 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 10));
2215	hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
2216			   PORT_PLL_DCO_AMP_MASK;
2217
2218	/*
2219	 * While we write to the group register to program all lanes at once we
2220	 * can read only lane registers. We configure all lanes the same way, so
2221	 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
2222	 */
2223	hw_state->pcsdw12 = intel_de_read(i915,
2224					  BXT_PORT_PCS_DW12_LN01(phy, ch));
2225	if (intel_de_read(i915, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
2226		drm_dbg(&i915->drm,
2227			"lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
2228			hw_state->pcsdw12,
2229			intel_de_read(i915,
2230				      BXT_PORT_PCS_DW12_LN23(phy, ch)));
2231	hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
2232
2233	ret = true;
2234
2235out:
2236	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2237
2238	return ret;
2239}
2240
 
 
 
 
 
 
 
 
 
 
 
 
 
2241/* pre-calculated values for DP linkrates */
2242static const struct dpll bxt_dp_clk_val[] = {
2243	/* m2 is .22 binary fixed point */
2244	{ .dot = 162000, .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2245	{ .dot = 270000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2246	{ .dot = 540000, .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2247	{ .dot = 216000, .p1 = 3, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2248	{ .dot = 243000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6133333 /* 24.3 */ },
2249	{ .dot = 324000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2250	{ .dot = 432000, .p1 = 3, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2251};
2252
2253static int
2254bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
2255			  struct dpll *clk_div)
2256{
2257	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
 
 
2258
2259	/* Calculate HDMI div */
2260	/*
2261	 * FIXME: tie the following calculation into
2262	 * i9xx_crtc_compute_clock
2263	 */
2264	if (!bxt_find_best_dpll(crtc_state, clk_div))
2265		return -EINVAL;
 
 
 
 
2266
2267	drm_WARN_ON(&i915->drm, clk_div->m1 != 2);
 
 
 
 
 
 
2268
2269	return 0;
 
 
2270}
2271
2272static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
2273				    struct dpll *clk_div)
2274{
2275	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2276	int i;
2277
2278	*clk_div = bxt_dp_clk_val[0];
2279	for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
2280		if (crtc_state->port_clock == bxt_dp_clk_val[i].dot) {
2281			*clk_div = bxt_dp_clk_val[i];
2282			break;
2283		}
2284	}
2285
2286	chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, clk_div);
2287
2288	drm_WARN_ON(&i915->drm, clk_div->vco == 0 ||
2289		    clk_div->dot != crtc_state->port_clock);
2290}
2291
2292static int bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
2293				     const struct dpll *clk_div)
2294{
2295	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2296	struct bxt_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.bxt;
2297	int clock = crtc_state->port_clock;
2298	int vco = clk_div->vco;
2299	u32 prop_coef, int_coef, gain_ctl, targ_cnt;
2300	u32 lanestagger;
2301
 
 
2302	if (vco >= 6200000 && vco <= 6700000) {
2303		prop_coef = 4;
2304		int_coef = 9;
2305		gain_ctl = 3;
2306		targ_cnt = 8;
2307	} else if ((vco > 5400000 && vco < 6200000) ||
2308			(vco >= 4800000 && vco < 5400000)) {
2309		prop_coef = 5;
2310		int_coef = 11;
2311		gain_ctl = 3;
2312		targ_cnt = 9;
2313	} else if (vco == 5400000) {
2314		prop_coef = 3;
2315		int_coef = 8;
2316		gain_ctl = 1;
2317		targ_cnt = 9;
2318	} else {
2319		drm_err(&i915->drm, "Invalid VCO\n");
2320		return -EINVAL;
2321	}
2322
2323	if (clock > 270000)
2324		lanestagger = 0x18;
2325	else if (clock > 135000)
2326		lanestagger = 0x0d;
2327	else if (clock > 67000)
2328		lanestagger = 0x07;
2329	else if (clock > 33000)
2330		lanestagger = 0x04;
2331	else
2332		lanestagger = 0x02;
2333
2334	hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
2335	hw_state->pll0 = PORT_PLL_M2_INT(clk_div->m2 >> 22);
2336	hw_state->pll1 = PORT_PLL_N(clk_div->n);
2337	hw_state->pll2 = PORT_PLL_M2_FRAC(clk_div->m2 & 0x3fffff);
2338
2339	if (clk_div->m2 & 0x3fffff)
2340		hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
2341
2342	hw_state->pll6 = PORT_PLL_PROP_COEFF(prop_coef) |
2343		PORT_PLL_INT_COEFF(int_coef) |
2344		PORT_PLL_GAIN_CTL(gain_ctl);
2345
2346	hw_state->pll8 = PORT_PLL_TARGET_CNT(targ_cnt);
2347
2348	hw_state->pll9 = PORT_PLL_LOCK_THRESHOLD(5);
2349
2350	hw_state->pll10 = PORT_PLL_DCO_AMP(15) |
2351		PORT_PLL_DCO_AMP_OVR_EN_H;
 
2352
2353	hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
2354
2355	hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
2356
2357	return 0;
2358}
2359
2360static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
2361				const struct intel_shared_dpll *pll,
2362				const struct intel_dpll_hw_state *dpll_hw_state)
2363{
2364	const struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
2365	struct dpll clock;
2366
2367	clock.m1 = 2;
2368	clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK, hw_state->pll0) << 22;
2369	if (hw_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
2370		clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK,
2371					  hw_state->pll2);
2372	clock.n = REG_FIELD_GET(PORT_PLL_N_MASK, hw_state->pll1);
2373	clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK, hw_state->ebb0);
2374	clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK, hw_state->ebb0);
2375
2376	return chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, &clock);
2377}
2378
2379static int
2380bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2381{
2382	struct dpll clk_div = {};
2383
2384	bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
2385
2386	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2387}
2388
2389static int
2390bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2391{
2392	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2393	struct dpll clk_div = {};
2394	int ret;
2395
2396	bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
2397
2398	ret = bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2399	if (ret)
2400		return ret;
2401
2402	crtc_state->port_clock = bxt_ddi_pll_get_freq(i915, NULL,
2403						      &crtc_state->dpll_hw_state);
2404
2405	return 0;
2406}
2407
2408static int bxt_compute_dpll(struct intel_atomic_state *state,
2409			    struct intel_crtc *crtc,
2410			    struct intel_encoder *encoder)
2411{
2412	struct intel_crtc_state *crtc_state =
2413		intel_atomic_get_new_crtc_state(state, crtc);
 
 
 
 
 
 
 
2414
2415	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
2416		return bxt_ddi_hdmi_set_dpll_hw_state(crtc_state);
2417	else if (intel_crtc_has_dp_encoder(crtc_state))
2418		return bxt_ddi_dp_set_dpll_hw_state(crtc_state);
2419	else
2420		return -EINVAL;
2421}
2422
2423static int bxt_get_dpll(struct intel_atomic_state *state,
2424			struct intel_crtc *crtc,
2425			struct intel_encoder *encoder)
2426{
2427	struct intel_crtc_state *crtc_state =
2428		intel_atomic_get_new_crtc_state(state, crtc);
2429	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2430	struct intel_shared_dpll *pll;
2431	enum intel_dpll_id id;
2432
 
 
 
 
 
 
 
 
2433	/* 1:1 mapping between ports and PLLs */
2434	id = (enum intel_dpll_id) encoder->port;
2435	pll = intel_get_shared_dpll_by_id(i915, id);
2436
2437	drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] using pre-allocated %s\n",
2438		    crtc->base.base.id, crtc->base.name, pll->info->name);
2439
2440	intel_reference_shared_dpll(state, crtc,
2441				    pll, &crtc_state->dpll_hw_state);
2442
2443	crtc_state->shared_dpll = pll;
2444
2445	return 0;
2446}
2447
2448static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915)
2449{
2450	i915->display.dpll.ref_clks.ssc = 100000;
2451	i915->display.dpll.ref_clks.nssc = 100000;
2452	/* DSI non-SSC ref 19.2MHz */
2453}
2454
2455static void bxt_dump_hw_state(struct drm_printer *p,
2456			      const struct intel_dpll_hw_state *dpll_hw_state)
2457{
2458	const struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
2459
2460	drm_printf(p, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
2461		   "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
2462		   "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
2463		   hw_state->ebb0, hw_state->ebb4,
2464		   hw_state->pll0, hw_state->pll1, hw_state->pll2, hw_state->pll3,
2465		   hw_state->pll6, hw_state->pll8, hw_state->pll9, hw_state->pll10,
2466		   hw_state->pcsdw12);
2467}
2468
2469static bool bxt_compare_hw_state(const struct intel_dpll_hw_state *_a,
2470				 const struct intel_dpll_hw_state *_b)
2471{
2472	const struct bxt_dpll_hw_state *a = &_a->bxt;
2473	const struct bxt_dpll_hw_state *b = &_b->bxt;
2474
2475	return a->ebb0 == b->ebb0 &&
2476		a->ebb4 == b->ebb4 &&
2477		a->pll0 == b->pll0 &&
2478		a->pll1 == b->pll1 &&
2479		a->pll2 == b->pll2 &&
2480		a->pll3 == b->pll3 &&
2481		a->pll6 == b->pll6 &&
2482		a->pll8 == b->pll8 &&
2483		a->pll10 == b->pll10 &&
2484		a->pcsdw12 == b->pcsdw12;
2485}
2486
2487static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
2488	.enable = bxt_ddi_pll_enable,
2489	.disable = bxt_ddi_pll_disable,
2490	.get_hw_state = bxt_ddi_pll_get_hw_state,
2491	.get_freq = bxt_ddi_pll_get_freq,
2492};
2493
2494static const struct dpll_info bxt_plls[] = {
2495	{ .name = "PORT PLL A", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL0, },
2496	{ .name = "PORT PLL B", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL1, },
2497	{ .name = "PORT PLL C", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL2, },
2498	{}
2499};
2500
2501static const struct intel_dpll_mgr bxt_pll_mgr = {
2502	.dpll_info = bxt_plls,
2503	.compute_dplls = bxt_compute_dpll,
2504	.get_dplls = bxt_get_dpll,
2505	.put_dplls = intel_put_dpll,
2506	.update_ref_clks = bxt_update_dpll_ref_clks,
2507	.dump_hw_state = bxt_dump_hw_state,
2508	.compare_hw_state = bxt_compare_hw_state,
2509};
2510
2511static void icl_wrpll_get_multipliers(int bestdiv, int *pdiv,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2512				      int *qdiv, int *kdiv)
2513{
2514	/* even dividers */
2515	if (bestdiv % 2 == 0) {
2516		if (bestdiv == 2) {
2517			*pdiv = 2;
2518			*qdiv = 1;
2519			*kdiv = 1;
2520		} else if (bestdiv % 4 == 0) {
2521			*pdiv = 2;
2522			*qdiv = bestdiv / 4;
2523			*kdiv = 2;
2524		} else if (bestdiv % 6 == 0) {
2525			*pdiv = 3;
2526			*qdiv = bestdiv / 6;
2527			*kdiv = 2;
2528		} else if (bestdiv % 5 == 0) {
2529			*pdiv = 5;
2530			*qdiv = bestdiv / 10;
2531			*kdiv = 2;
2532		} else if (bestdiv % 14 == 0) {
2533			*pdiv = 7;
2534			*qdiv = bestdiv / 14;
2535			*kdiv = 2;
2536		}
2537	} else {
2538		if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2539			*pdiv = bestdiv;
2540			*qdiv = 1;
2541			*kdiv = 1;
2542		} else { /* 9, 15, 21 */
2543			*pdiv = bestdiv / 3;
2544			*qdiv = 1;
2545			*kdiv = 3;
2546		}
2547	}
2548}
2549
2550static void icl_wrpll_params_populate(struct skl_wrpll_params *params,
2551				      u32 dco_freq, u32 ref_freq,
2552				      int pdiv, int qdiv, int kdiv)
2553{
2554	u32 dco;
2555
2556	switch (kdiv) {
2557	case 1:
2558		params->kdiv = 1;
2559		break;
2560	case 2:
2561		params->kdiv = 2;
2562		break;
2563	case 3:
2564		params->kdiv = 4;
2565		break;
2566	default:
2567		WARN(1, "Incorrect KDiv\n");
2568	}
2569
2570	switch (pdiv) {
2571	case 2:
2572		params->pdiv = 1;
2573		break;
2574	case 3:
2575		params->pdiv = 2;
2576		break;
2577	case 5:
2578		params->pdiv = 4;
2579		break;
2580	case 7:
2581		params->pdiv = 8;
2582		break;
2583	default:
2584		WARN(1, "Incorrect PDiv\n");
2585	}
2586
2587	WARN_ON(kdiv != 2 && qdiv != 1);
2588
2589	params->qdiv_ratio = qdiv;
2590	params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2591
2592	dco = div_u64((u64)dco_freq << 15, ref_freq);
2593
2594	params->dco_integer = dco >> 15;
2595	params->dco_fraction = dco & 0x7fff;
2596}
2597
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2598/*
2599 * Display WA #22010492432: ehl, tgl, adl-s, adl-p
2600 * Program half of the nominal DCO divider fraction value.
2601 */
2602static bool
2603ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
2604{
2605	return ((IS_ELKHARTLAKE(i915) &&
2606		 IS_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) ||
2607		 IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) &&
2608		 i915->display.dpll.ref_clks.nssc == 38400;
2609}
2610
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2611struct icl_combo_pll_params {
2612	int clock;
2613	struct skl_wrpll_params wrpll;
2614};
2615
2616/*
2617 * These values alrea already adjusted: they're the bits we write to the
2618 * registers, not the logical values.
2619 */
2620static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
2621	{ 540000,
2622	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [0]: 5.4 */
2623	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2624	{ 270000,
2625	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [1]: 2.7 */
2626	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2627	{ 162000,
2628	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [2]: 1.62 */
2629	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2630	{ 324000,
2631	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [3]: 3.24 */
2632	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2633	{ 216000,
2634	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2635	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2636	{ 432000,
2637	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2638	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2639	{ 648000,
2640	  { .dco_integer = 0x195, .dco_fraction = 0x0000,		/* [6]: 6.48 */
2641	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2642	{ 810000,
2643	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [7]: 8.1 */
2644	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2645};
2646
2647
2648/* Also used for 38.4 MHz values. */
2649static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
2650	{ 540000,
2651	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [0]: 5.4 */
2652	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2653	{ 270000,
2654	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [1]: 2.7 */
2655	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2656	{ 162000,
2657	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [2]: 1.62 */
2658	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2659	{ 324000,
2660	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [3]: 3.24 */
2661	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2662	{ 216000,
2663	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2664	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2665	{ 432000,
2666	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2667	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2668	{ 648000,
2669	  { .dco_integer = 0x1FA, .dco_fraction = 0x2000,		/* [6]: 6.48 */
2670	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2671	{ 810000,
2672	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [7]: 8.1 */
2673	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2674};
2675
2676static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
2677	.dco_integer = 0x151, .dco_fraction = 0x4000,
2678	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2679};
2680
2681static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
2682	.dco_integer = 0x1A5, .dco_fraction = 0x7000,
2683	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2684};
2685
2686static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = {
2687	.dco_integer = 0x54, .dco_fraction = 0x3000,
2688	/* the following params are unused */
2689	.pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
2690};
2691
2692static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
2693	.dco_integer = 0x43, .dco_fraction = 0x4000,
2694	/* the following params are unused */
2695};
2696
2697static int icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
2698				 struct skl_wrpll_params *pll_params)
2699{
2700	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2701	const struct icl_combo_pll_params *params =
2702		i915->display.dpll.ref_clks.nssc == 24000 ?
2703		icl_dp_combo_pll_24MHz_values :
2704		icl_dp_combo_pll_19_2MHz_values;
2705	int clock = crtc_state->port_clock;
2706	int i;
2707
2708	for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
2709		if (clock == params[i].clock) {
2710			*pll_params = params[i].wrpll;
2711			return 0;
2712		}
2713	}
2714
2715	MISSING_CASE(clock);
2716	return -EINVAL;
2717}
2718
2719static int icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
2720			    struct skl_wrpll_params *pll_params)
2721{
2722	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2723
2724	if (DISPLAY_VER(i915) >= 12) {
2725		switch (i915->display.dpll.ref_clks.nssc) {
2726		default:
2727			MISSING_CASE(i915->display.dpll.ref_clks.nssc);
2728			fallthrough;
2729		case 19200:
2730		case 38400:
2731			*pll_params = tgl_tbt_pll_19_2MHz_values;
2732			break;
2733		case 24000:
2734			*pll_params = tgl_tbt_pll_24MHz_values;
2735			break;
2736		}
2737	} else {
2738		switch (i915->display.dpll.ref_clks.nssc) {
2739		default:
2740			MISSING_CASE(i915->display.dpll.ref_clks.nssc);
2741			fallthrough;
2742		case 19200:
2743		case 38400:
2744			*pll_params = icl_tbt_pll_19_2MHz_values;
2745			break;
2746		case 24000:
2747			*pll_params = icl_tbt_pll_24MHz_values;
2748			break;
2749		}
2750	}
2751
2752	return 0;
2753}
2754
2755static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915,
2756				    const struct intel_shared_dpll *pll,
2757				    const struct intel_dpll_hw_state *dpll_hw_state)
2758{
2759	/*
2760	 * The PLL outputs multiple frequencies at the same time, selection is
2761	 * made at DDI clock mux level.
2762	 */
2763	drm_WARN_ON(&i915->drm, 1);
2764
2765	return 0;
2766}
2767
2768static int icl_wrpll_ref_clock(struct drm_i915_private *i915)
2769{
2770	int ref_clock = i915->display.dpll.ref_clks.nssc;
2771
2772	/*
2773	 * For ICL+, the spec states: if reference frequency is 38.4,
2774	 * use 19.2 because the DPLL automatically divides that by 2.
2775	 */
2776	if (ref_clock == 38400)
2777		ref_clock = 19200;
2778
2779	return ref_clock;
2780}
2781
2782static int
2783icl_calc_wrpll(struct intel_crtc_state *crtc_state,
2784	       struct skl_wrpll_params *wrpll_params)
2785{
2786	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2787	int ref_clock = icl_wrpll_ref_clock(i915);
2788	u32 afe_clock = crtc_state->port_clock * 5;
2789	u32 dco_min = 7998000;
2790	u32 dco_max = 10000000;
2791	u32 dco_mid = (dco_min + dco_max) / 2;
2792	static const int dividers[] = {  2,  4,  6,  8, 10, 12,  14,  16,
2793					 18, 20, 24, 28, 30, 32,  36,  40,
2794					 42, 44, 48, 50, 52, 54,  56,  60,
2795					 64, 66, 68, 70, 72, 76,  78,  80,
2796					 84, 88, 90, 92, 96, 98, 100, 102,
2797					  3,  5,  7,  9, 15, 21 };
2798	u32 dco, best_dco = 0, dco_centrality = 0;
2799	u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2800	int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2801
2802	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2803		dco = afe_clock * dividers[d];
2804
2805		if (dco <= dco_max && dco >= dco_min) {
2806			dco_centrality = abs(dco - dco_mid);
2807
2808			if (dco_centrality < best_dco_centrality) {
2809				best_dco_centrality = dco_centrality;
2810				best_div = dividers[d];
2811				best_dco = dco;
2812			}
2813		}
2814	}
2815
2816	if (best_div == 0)
2817		return -EINVAL;
2818
2819	icl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2820	icl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
2821				  pdiv, qdiv, kdiv);
2822
2823	return 0;
2824}
2825
2826static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
2827				      const struct intel_shared_dpll *pll,
2828				      const struct intel_dpll_hw_state *dpll_hw_state)
2829{
2830	const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
2831	int ref_clock = icl_wrpll_ref_clock(i915);
2832	u32 dco_fraction;
2833	u32 p0, p1, p2, dco_freq;
2834
2835	p0 = hw_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
2836	p2 = hw_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
2837
2838	if (hw_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
2839		p1 = (hw_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
2840			DPLL_CFGCR1_QDIV_RATIO_SHIFT;
2841	else
2842		p1 = 1;
2843
2844	switch (p0) {
2845	case DPLL_CFGCR1_PDIV_2:
2846		p0 = 2;
2847		break;
2848	case DPLL_CFGCR1_PDIV_3:
2849		p0 = 3;
2850		break;
2851	case DPLL_CFGCR1_PDIV_5:
2852		p0 = 5;
2853		break;
2854	case DPLL_CFGCR1_PDIV_7:
2855		p0 = 7;
2856		break;
2857	}
2858
2859	switch (p2) {
2860	case DPLL_CFGCR1_KDIV_1:
2861		p2 = 1;
2862		break;
2863	case DPLL_CFGCR1_KDIV_2:
2864		p2 = 2;
2865		break;
2866	case DPLL_CFGCR1_KDIV_3:
2867		p2 = 3;
2868		break;
2869	}
2870
2871	dco_freq = (hw_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
2872		   ref_clock;
2873
2874	dco_fraction = (hw_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
2875		       DPLL_CFGCR0_DCO_FRACTION_SHIFT;
2876
2877	if (ehl_combo_pll_div_frac_wa_needed(i915))
2878		dco_fraction *= 2;
2879
2880	dco_freq += (dco_fraction * ref_clock) / 0x8000;
2881
2882	if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
2883		return 0;
2884
2885	return dco_freq / (p0 * p1 * p2 * 5);
2886}
2887
2888static void icl_calc_dpll_state(struct drm_i915_private *i915,
2889				const struct skl_wrpll_params *pll_params,
2890				struct intel_dpll_hw_state *dpll_hw_state)
2891{
2892	struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
2893	u32 dco_fraction = pll_params->dco_fraction;
2894
 
 
2895	if (ehl_combo_pll_div_frac_wa_needed(i915))
2896		dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2);
2897
2898	hw_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) |
2899			    pll_params->dco_integer;
2900
2901	hw_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) |
2902			    DPLL_CFGCR1_QDIV_MODE(pll_params->qdiv_mode) |
2903			    DPLL_CFGCR1_KDIV(pll_params->kdiv) |
2904			    DPLL_CFGCR1_PDIV(pll_params->pdiv);
2905
2906	if (DISPLAY_VER(i915) >= 12)
2907		hw_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
2908	else
2909		hw_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
2910
2911	if (i915->display.vbt.override_afc_startup)
2912		hw_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(i915->display.vbt.override_afc_startup_val);
2913}
2914
2915static int icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
2916				    u32 *target_dco_khz,
2917				    struct icl_dpll_hw_state *hw_state,
2918				    bool is_dkl)
2919{
2920	static const u8 div1_vals[] = { 7, 5, 3, 2 };
2921	u32 dco_min_freq, dco_max_freq;
 
2922	unsigned int i;
2923	int div2;
2924
2925	dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
2926	dco_max_freq = is_dp ? 8100000 : 10000000;
2927
2928	for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
2929		int div1 = div1_vals[i];
2930
2931		for (div2 = 10; div2 > 0; div2--) {
2932			int dco = div1 * div2 * clock_khz * 5;
2933			int a_divratio, tlinedrv, inputsel;
2934			u32 hsdiv;
2935
2936			if (dco < dco_min_freq || dco > dco_max_freq)
2937				continue;
2938
2939			if (div2 >= 2) {
2940				/*
2941				 * Note: a_divratio not matching TGL BSpec
2942				 * algorithm but matching hardcoded values and
2943				 * working on HW for DP alt-mode at least
2944				 */
2945				a_divratio = is_dp ? 10 : 5;
2946				tlinedrv = is_dkl ? 1 : 2;
2947			} else {
2948				a_divratio = 5;
2949				tlinedrv = 0;
2950			}
2951			inputsel = is_dp ? 0 : 1;
2952
2953			switch (div1) {
2954			default:
2955				MISSING_CASE(div1);
2956				fallthrough;
2957			case 2:
2958				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
2959				break;
2960			case 3:
2961				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
2962				break;
2963			case 5:
2964				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
2965				break;
2966			case 7:
2967				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
2968				break;
2969			}
2970
2971			*target_dco_khz = dco;
2972
2973			hw_state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
2974
2975			hw_state->mg_clktop2_coreclkctl1 =
2976				MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
2977
2978			hw_state->mg_clktop2_hsclkctl =
2979				MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
2980				MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
2981				hsdiv |
2982				MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
2983
2984			return 0;
2985		}
2986	}
2987
2988	return -EINVAL;
2989}
2990
2991/*
2992 * The specification for this function uses real numbers, so the math had to be
2993 * adapted to integer-only calculation, that's why it looks so different.
2994 */
2995static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
2996				 struct intel_dpll_hw_state *dpll_hw_state)
2997{
2998	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2999	struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3000	int refclk_khz = i915->display.dpll.ref_clks.nssc;
3001	int clock = crtc_state->port_clock;
3002	u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
3003	u32 iref_ndiv, iref_trim, iref_pulse_w;
3004	u32 prop_coeff, int_coeff;
3005	u32 tdc_targetcnt, feedfwgain;
3006	u64 ssc_stepsize, ssc_steplen, ssc_steplog;
3007	u64 tmp;
3008	bool use_ssc = false;
3009	bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
3010	bool is_dkl = DISPLAY_VER(i915) >= 12;
3011	int ret;
 
3012
3013	ret = icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
3014				       hw_state, is_dkl);
3015	if (ret)
3016		return ret;
 
 
3017
3018	m1div = 2;
3019	m2div_int = dco_khz / (refclk_khz * m1div);
3020	if (m2div_int > 255) {
3021		if (!is_dkl) {
3022			m1div = 4;
3023			m2div_int = dco_khz / (refclk_khz * m1div);
3024		}
3025
3026		if (m2div_int > 255)
3027			return -EINVAL;
 
 
 
 
3028	}
3029	m2div_rem = dco_khz % (refclk_khz * m1div);
3030
3031	tmp = (u64)m2div_rem * (1 << 22);
3032	do_div(tmp, refclk_khz * m1div);
3033	m2div_frac = tmp;
3034
3035	switch (refclk_khz) {
3036	case 19200:
3037		iref_ndiv = 1;
3038		iref_trim = 28;
3039		iref_pulse_w = 1;
3040		break;
3041	case 24000:
3042		iref_ndiv = 1;
3043		iref_trim = 25;
3044		iref_pulse_w = 2;
3045		break;
3046	case 38400:
3047		iref_ndiv = 2;
3048		iref_trim = 28;
3049		iref_pulse_w = 1;
3050		break;
3051	default:
3052		MISSING_CASE(refclk_khz);
3053		return -EINVAL;
3054	}
3055
3056	/*
3057	 * tdc_res = 0.000003
3058	 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
3059	 *
3060	 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
3061	 * was supposed to be a division, but we rearranged the operations of
3062	 * the formula to avoid early divisions so we don't multiply the
3063	 * rounding errors.
3064	 *
3065	 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
3066	 * we also rearrange to work with integers.
3067	 *
3068	 * The 0.5 transformed to 5 results in a multiplication by 10 and the
3069	 * last division by 10.
3070	 */
3071	tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
3072
3073	/*
3074	 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
3075	 * 32 bits. That's not a problem since we round the division down
3076	 * anyway.
3077	 */
3078	feedfwgain = (use_ssc || m2div_rem > 0) ?
3079		m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
3080
3081	if (dco_khz >= 9000000) {
3082		prop_coeff = 5;
3083		int_coeff = 10;
3084	} else {
3085		prop_coeff = 4;
3086		int_coeff = 8;
3087	}
3088
3089	if (use_ssc) {
3090		tmp = mul_u32_u32(dco_khz, 47 * 32);
3091		do_div(tmp, refclk_khz * m1div * 10000);
3092		ssc_stepsize = tmp;
3093
3094		tmp = mul_u32_u32(dco_khz, 1000);
3095		ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
3096	} else {
3097		ssc_stepsize = 0;
3098		ssc_steplen = 0;
3099	}
3100	ssc_steplog = 4;
3101
3102	/* write pll_state calculations */
3103	if (is_dkl) {
3104		hw_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
3105					 DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
3106					 DKL_PLL_DIV0_FBPREDIV(m1div) |
3107					 DKL_PLL_DIV0_FBDIV_INT(m2div_int);
3108		if (i915->display.vbt.override_afc_startup) {
3109			u8 val = i915->display.vbt.override_afc_startup_val;
3110
3111			hw_state->mg_pll_div0 |= DKL_PLL_DIV0_AFC_STARTUP(val);
3112		}
3113
3114		hw_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
3115					 DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
3116
3117		hw_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
3118					DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
3119					DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
3120					(use_ssc ? DKL_PLL_SSC_EN : 0);
3121
3122		hw_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
3123					  DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);
3124
3125		hw_state->mg_pll_tdc_coldst_bias =
3126				DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
3127				DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);
3128
3129	} else {
3130		hw_state->mg_pll_div0 =
3131			(m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
3132			MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
3133			MG_PLL_DIV0_FBDIV_INT(m2div_int);
3134
3135		hw_state->mg_pll_div1 =
3136			MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
3137			MG_PLL_DIV1_DITHER_DIV_2 |
3138			MG_PLL_DIV1_NDIVRATIO(1) |
3139			MG_PLL_DIV1_FBPREDIV(m1div);
3140
3141		hw_state->mg_pll_lf =
3142			MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
3143			MG_PLL_LF_AFCCNTSEL_512 |
3144			MG_PLL_LF_GAINCTRL(1) |
3145			MG_PLL_LF_INT_COEFF(int_coeff) |
3146			MG_PLL_LF_PROP_COEFF(prop_coeff);
3147
3148		hw_state->mg_pll_frac_lock =
3149			MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
3150			MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
3151			MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
3152			MG_PLL_FRAC_LOCK_DCODITHEREN |
3153			MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
3154		if (use_ssc || m2div_rem > 0)
3155			hw_state->mg_pll_frac_lock |=
3156				MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
3157
3158		hw_state->mg_pll_ssc =
3159			(use_ssc ? MG_PLL_SSC_EN : 0) |
3160			MG_PLL_SSC_TYPE(2) |
3161			MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
3162			MG_PLL_SSC_STEPNUM(ssc_steplog) |
3163			MG_PLL_SSC_FLLEN |
3164			MG_PLL_SSC_STEPSIZE(ssc_stepsize);
3165
3166		hw_state->mg_pll_tdc_coldst_bias =
3167			MG_PLL_TDC_COLDST_COLDSTART |
3168			MG_PLL_TDC_COLDST_IREFINT_EN |
3169			MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
3170			MG_PLL_TDC_TDCOVCCORR_EN |
3171			MG_PLL_TDC_TDCSEL(3);
3172
3173		hw_state->mg_pll_bias =
3174			MG_PLL_BIAS_BIAS_GB_SEL(3) |
3175			MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
3176			MG_PLL_BIAS_BIAS_BONUS(10) |
3177			MG_PLL_BIAS_BIASCAL_EN |
3178			MG_PLL_BIAS_CTRIM(12) |
3179			MG_PLL_BIAS_VREF_RDAC(4) |
3180			MG_PLL_BIAS_IREFTRIM(iref_trim);
3181
3182		if (refclk_khz == 38400) {
3183			hw_state->mg_pll_tdc_coldst_bias_mask =
3184				MG_PLL_TDC_COLDST_COLDSTART;
3185			hw_state->mg_pll_bias_mask = 0;
3186		} else {
3187			hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3188			hw_state->mg_pll_bias_mask = -1U;
3189		}
3190
3191		hw_state->mg_pll_tdc_coldst_bias &=
3192			hw_state->mg_pll_tdc_coldst_bias_mask;
3193		hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3194	}
3195
3196	return 0;
3197}
3198
3199static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *i915,
3200				   const struct intel_shared_dpll *pll,
3201				   const struct intel_dpll_hw_state *dpll_hw_state)
3202{
3203	const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3204	u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
3205	u64 tmp;
3206
3207	ref_clock = i915->display.dpll.ref_clks.nssc;
3208
3209	if (DISPLAY_VER(i915) >= 12) {
3210		m1 = hw_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
3211		m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
3212		m2_int = hw_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
3213
3214		if (hw_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
3215			m2_frac = hw_state->mg_pll_bias &
3216				  DKL_PLL_BIAS_FBDIV_FRAC_MASK;
3217			m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
3218		} else {
3219			m2_frac = 0;
3220		}
3221	} else {
3222		m1 = hw_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
3223		m2_int = hw_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
3224
3225		if (hw_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
3226			m2_frac = hw_state->mg_pll_div0 &
3227				  MG_PLL_DIV0_FBDIV_FRAC_MASK;
3228			m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
3229		} else {
3230			m2_frac = 0;
3231		}
3232	}
3233
3234	switch (hw_state->mg_clktop2_hsclkctl &
3235		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
3236	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
3237		div1 = 2;
3238		break;
3239	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
3240		div1 = 3;
3241		break;
3242	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
3243		div1 = 5;
3244		break;
3245	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
3246		div1 = 7;
3247		break;
3248	default:
3249		MISSING_CASE(hw_state->mg_clktop2_hsclkctl);
3250		return 0;
3251	}
3252
3253	div2 = (hw_state->mg_clktop2_hsclkctl &
3254		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
3255		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
3256
3257	/* div2 value of 0 is same as 1 means no div */
3258	if (div2 == 0)
3259		div2 = 1;
3260
3261	/*
3262	 * Adjust the original formula to delay the division by 2^22 in order to
3263	 * minimize possible rounding errors.
3264	 */
3265	tmp = (u64)m1 * m2_int * ref_clock +
3266	      (((u64)m1 * m2_frac * ref_clock) >> 22);
3267	tmp = div_u64(tmp, 5 * div1 * div2);
3268
3269	return tmp;
3270}
3271
3272/**
3273 * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
3274 * @crtc_state: state for the CRTC to select the DPLL for
3275 * @port_dpll_id: the active @port_dpll_id to select
3276 *
3277 * Select the given @port_dpll_id instance from the DPLLs reserved for the
3278 * CRTC.
3279 */
3280void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
3281			      enum icl_port_dpll_id port_dpll_id)
3282{
3283	struct icl_port_dpll *port_dpll =
3284		&crtc_state->icl_port_dplls[port_dpll_id];
3285
3286	crtc_state->shared_dpll = port_dpll->pll;
3287	crtc_state->dpll_hw_state = port_dpll->hw_state;
3288}
3289
3290static void icl_update_active_dpll(struct intel_atomic_state *state,
3291				   struct intel_crtc *crtc,
3292				   struct intel_encoder *encoder)
3293{
3294	struct intel_crtc_state *crtc_state =
3295		intel_atomic_get_new_crtc_state(state, crtc);
3296	struct intel_digital_port *primary_port;
3297	enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
3298
3299	primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
3300		enc_to_mst(encoder)->primary :
3301		enc_to_dig_port(encoder);
3302
3303	if (primary_port &&
3304	    (intel_tc_port_in_dp_alt_mode(primary_port) ||
3305	     intel_tc_port_in_legacy_mode(primary_port)))
3306		port_dpll_id = ICL_PORT_DPLL_MG_PHY;
3307
3308	icl_set_active_port_dpll(crtc_state, port_dpll_id);
3309}
3310
3311static int icl_compute_combo_phy_dpll(struct intel_atomic_state *state,
3312				      struct intel_crtc *crtc)
 
 
 
 
 
 
 
 
 
3313{
3314	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
3315	struct intel_crtc_state *crtc_state =
3316		intel_atomic_get_new_crtc_state(state, crtc);
 
3317	struct icl_port_dpll *port_dpll =
3318		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3319	struct skl_wrpll_params pll_params = {};
 
 
3320	int ret;
3321
3322	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
3323	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
3324		ret = icl_calc_wrpll(crtc_state, &pll_params);
3325	else
3326		ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
3327
3328	if (ret)
3329		return ret;
 
3330
3331	icl_calc_dpll_state(i915, &pll_params, &port_dpll->hw_state);
3332
3333	/* this is mainly for the fastset check */
3334	icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
3335
3336	crtc_state->port_clock = icl_ddi_combo_pll_get_freq(i915, NULL,
3337							    &port_dpll->hw_state);
3338
3339	return 0;
3340}
3341
3342static int icl_get_combo_phy_dpll(struct intel_atomic_state *state,
3343				  struct intel_crtc *crtc,
3344				  struct intel_encoder *encoder)
3345{
3346	struct intel_display *display = to_intel_display(crtc);
3347	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
3348	struct intel_crtc_state *crtc_state =
3349		intel_atomic_get_new_crtc_state(state, crtc);
3350	struct icl_port_dpll *port_dpll =
3351		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3352	enum port port = encoder->port;
3353	unsigned long dpll_mask;
3354
3355	if (IS_ALDERLAKE_S(i915)) {
3356		dpll_mask =
3357			BIT(DPLL_ID_DG1_DPLL3) |
3358			BIT(DPLL_ID_DG1_DPLL2) |
3359			BIT(DPLL_ID_ICL_DPLL1) |
3360			BIT(DPLL_ID_ICL_DPLL0);
3361	} else if (IS_DG1(i915)) {
3362		if (port == PORT_D || port == PORT_E) {
3363			dpll_mask =
3364				BIT(DPLL_ID_DG1_DPLL2) |
3365				BIT(DPLL_ID_DG1_DPLL3);
3366		} else {
3367			dpll_mask =
3368				BIT(DPLL_ID_DG1_DPLL0) |
3369				BIT(DPLL_ID_DG1_DPLL1);
3370		}
3371	} else if (IS_ROCKETLAKE(i915)) {
3372		dpll_mask =
3373			BIT(DPLL_ID_EHL_DPLL4) |
3374			BIT(DPLL_ID_ICL_DPLL1) |
3375			BIT(DPLL_ID_ICL_DPLL0);
3376	} else if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
3377		   port != PORT_A) {
3378		dpll_mask =
3379			BIT(DPLL_ID_EHL_DPLL4) |
3380			BIT(DPLL_ID_ICL_DPLL1) |
3381			BIT(DPLL_ID_ICL_DPLL0);
3382	} else {
3383		dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0);
3384	}
3385
3386	/* Eliminate DPLLs from consideration if reserved by HTI */
3387	dpll_mask &= ~intel_hti_dpll_mask(display);
3388
3389	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3390						&port_dpll->hw_state,
3391						dpll_mask);
3392	if (!port_dpll->pll)
3393		return -EINVAL;
 
 
 
 
3394
3395	intel_reference_shared_dpll(state, crtc,
3396				    port_dpll->pll, &port_dpll->hw_state);
3397
3398	icl_update_active_dpll(state, crtc, encoder);
3399
3400	return 0;
3401}
3402
3403static int icl_compute_tc_phy_dplls(struct intel_atomic_state *state,
3404				    struct intel_crtc *crtc)
 
3405{
3406	struct drm_i915_private *i915 = to_i915(state->base.dev);
3407	struct intel_crtc_state *crtc_state =
3408		intel_atomic_get_new_crtc_state(state, crtc);
3409	const struct intel_crtc_state *old_crtc_state =
3410		intel_atomic_get_old_crtc_state(state, crtc);
3411	struct icl_port_dpll *port_dpll =
3412		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3413	struct skl_wrpll_params pll_params = {};
3414	int ret;
3415
3416	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3417	ret = icl_calc_tbt_pll(crtc_state, &pll_params);
3418	if (ret)
3419		return ret;
3420
3421	icl_calc_dpll_state(i915, &pll_params, &port_dpll->hw_state);
3422
3423	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3424	ret = icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state);
3425	if (ret)
3426		return ret;
3427
3428	/* this is mainly for the fastset check */
3429	if (old_crtc_state->shared_dpll &&
3430	    old_crtc_state->shared_dpll->info->id == DPLL_ID_ICL_TBTPLL)
3431		icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
3432	else
3433		icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_MG_PHY);
3434
3435	crtc_state->port_clock = icl_ddi_mg_pll_get_freq(i915, NULL,
3436							 &port_dpll->hw_state);
3437
3438	return 0;
3439}
3440
3441static int icl_get_tc_phy_dplls(struct intel_atomic_state *state,
3442				struct intel_crtc *crtc,
3443				struct intel_encoder *encoder)
3444{
3445	struct intel_crtc_state *crtc_state =
3446		intel_atomic_get_new_crtc_state(state, crtc);
3447	struct icl_port_dpll *port_dpll =
3448		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3449	enum intel_dpll_id dpll_id;
3450	int ret;
3451
3452	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3453	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3454						&port_dpll->hw_state,
3455						BIT(DPLL_ID_ICL_TBTPLL));
3456	if (!port_dpll->pll)
3457		return -EINVAL;
 
 
3458	intel_reference_shared_dpll(state, crtc,
3459				    port_dpll->pll, &port_dpll->hw_state);
3460
3461
3462	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3463	dpll_id = icl_tc_port_to_pll_id(intel_encoder_to_tc(encoder));
 
 
 
 
 
 
 
3464	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3465						&port_dpll->hw_state,
3466						BIT(dpll_id));
3467	if (!port_dpll->pll) {
3468		ret = -EINVAL;
3469		goto err_unreference_tbt_pll;
3470	}
3471	intel_reference_shared_dpll(state, crtc,
3472				    port_dpll->pll, &port_dpll->hw_state);
3473
3474	icl_update_active_dpll(state, crtc, encoder);
3475
3476	return 0;
3477
3478err_unreference_tbt_pll:
3479	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3480	intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
3481
3482	return ret;
3483}
3484
3485static int icl_compute_dplls(struct intel_atomic_state *state,
3486			     struct intel_crtc *crtc,
3487			     struct intel_encoder *encoder)
3488{
3489	if (intel_encoder_is_combo(encoder))
3490		return icl_compute_combo_phy_dpll(state, crtc);
3491	else if (intel_encoder_is_tc(encoder))
3492		return icl_compute_tc_phy_dplls(state, crtc);
3493
3494	MISSING_CASE(encoder->port);
3495
3496	return 0;
3497}
3498
3499static int icl_get_dplls(struct intel_atomic_state *state,
3500			 struct intel_crtc *crtc,
3501			 struct intel_encoder *encoder)
3502{
3503	if (intel_encoder_is_combo(encoder))
 
 
 
3504		return icl_get_combo_phy_dpll(state, crtc, encoder);
3505	else if (intel_encoder_is_tc(encoder))
3506		return icl_get_tc_phy_dplls(state, crtc, encoder);
3507
3508	MISSING_CASE(encoder->port);
3509
3510	return -EINVAL;
3511}
3512
3513static void icl_put_dplls(struct intel_atomic_state *state,
3514			  struct intel_crtc *crtc)
3515{
3516	const struct intel_crtc_state *old_crtc_state =
3517		intel_atomic_get_old_crtc_state(state, crtc);
3518	struct intel_crtc_state *new_crtc_state =
3519		intel_atomic_get_new_crtc_state(state, crtc);
3520	enum icl_port_dpll_id id;
3521
3522	new_crtc_state->shared_dpll = NULL;
3523
3524	for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
3525		const struct icl_port_dpll *old_port_dpll =
3526			&old_crtc_state->icl_port_dplls[id];
3527		struct icl_port_dpll *new_port_dpll =
3528			&new_crtc_state->icl_port_dplls[id];
3529
3530		new_port_dpll->pll = NULL;
3531
3532		if (!old_port_dpll->pll)
3533			continue;
3534
3535		intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
3536	}
3537}
3538
3539static bool mg_pll_get_hw_state(struct drm_i915_private *i915,
3540				struct intel_shared_dpll *pll,
3541				struct intel_dpll_hw_state *dpll_hw_state)
3542{
3543	struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3544	const enum intel_dpll_id id = pll->info->id;
3545	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3546	intel_wakeref_t wakeref;
3547	bool ret = false;
3548	u32 val;
3549
3550	i915_reg_t enable_reg = intel_tc_pll_enable_reg(i915, pll);
3551
3552	wakeref = intel_display_power_get_if_enabled(i915,
3553						     POWER_DOMAIN_DISPLAY_CORE);
3554	if (!wakeref)
3555		return false;
3556
3557	val = intel_de_read(i915, enable_reg);
3558	if (!(val & PLL_ENABLE))
3559		goto out;
3560
3561	hw_state->mg_refclkin_ctl = intel_de_read(i915,
3562						  MG_REFCLKIN_CTL(tc_port));
3563	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3564
3565	hw_state->mg_clktop2_coreclkctl1 =
3566		intel_de_read(i915, MG_CLKTOP2_CORECLKCTL1(tc_port));
3567	hw_state->mg_clktop2_coreclkctl1 &=
3568		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3569
3570	hw_state->mg_clktop2_hsclkctl =
3571		intel_de_read(i915, MG_CLKTOP2_HSCLKCTL(tc_port));
3572	hw_state->mg_clktop2_hsclkctl &=
3573		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3574		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3575		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3576		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3577
3578	hw_state->mg_pll_div0 = intel_de_read(i915, MG_PLL_DIV0(tc_port));
3579	hw_state->mg_pll_div1 = intel_de_read(i915, MG_PLL_DIV1(tc_port));
3580	hw_state->mg_pll_lf = intel_de_read(i915, MG_PLL_LF(tc_port));
3581	hw_state->mg_pll_frac_lock = intel_de_read(i915,
3582						   MG_PLL_FRAC_LOCK(tc_port));
3583	hw_state->mg_pll_ssc = intel_de_read(i915, MG_PLL_SSC(tc_port));
3584
3585	hw_state->mg_pll_bias = intel_de_read(i915, MG_PLL_BIAS(tc_port));
3586	hw_state->mg_pll_tdc_coldst_bias =
3587		intel_de_read(i915, MG_PLL_TDC_COLDST_BIAS(tc_port));
3588
3589	if (i915->display.dpll.ref_clks.nssc == 38400) {
3590		hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
3591		hw_state->mg_pll_bias_mask = 0;
3592	} else {
3593		hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3594		hw_state->mg_pll_bias_mask = -1U;
3595	}
3596
3597	hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
3598	hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3599
3600	ret = true;
3601out:
3602	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3603	return ret;
3604}
3605
3606static bool dkl_pll_get_hw_state(struct drm_i915_private *i915,
3607				 struct intel_shared_dpll *pll,
3608				 struct intel_dpll_hw_state *dpll_hw_state)
3609{
3610	struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3611	const enum intel_dpll_id id = pll->info->id;
3612	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3613	intel_wakeref_t wakeref;
3614	bool ret = false;
3615	u32 val;
3616
3617	wakeref = intel_display_power_get_if_enabled(i915,
3618						     POWER_DOMAIN_DISPLAY_CORE);
3619	if (!wakeref)
3620		return false;
3621
3622	val = intel_de_read(i915, intel_tc_pll_enable_reg(i915, pll));
3623	if (!(val & PLL_ENABLE))
3624		goto out;
3625
3626	/*
3627	 * All registers read here have the same HIP_INDEX_REG even though
3628	 * they are on different building blocks
3629	 */
3630	hw_state->mg_refclkin_ctl = intel_dkl_phy_read(i915,
3631						       DKL_REFCLKIN_CTL(tc_port));
 
 
 
3632	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3633
3634	hw_state->mg_clktop2_hsclkctl =
3635		intel_dkl_phy_read(i915, DKL_CLKTOP2_HSCLKCTL(tc_port));
3636	hw_state->mg_clktop2_hsclkctl &=
3637		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3638		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3639		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3640		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3641
3642	hw_state->mg_clktop2_coreclkctl1 =
3643		intel_dkl_phy_read(i915, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3644	hw_state->mg_clktop2_coreclkctl1 &=
3645		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3646
3647	hw_state->mg_pll_div0 = intel_dkl_phy_read(i915, DKL_PLL_DIV0(tc_port));
3648	val = DKL_PLL_DIV0_MASK;
3649	if (i915->display.vbt.override_afc_startup)
3650		val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3651	hw_state->mg_pll_div0 &= val;
3652
3653	hw_state->mg_pll_div1 = intel_dkl_phy_read(i915, DKL_PLL_DIV1(tc_port));
3654	hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
3655				  DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3656
3657	hw_state->mg_pll_ssc = intel_dkl_phy_read(i915, DKL_PLL_SSC(tc_port));
3658	hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3659				 DKL_PLL_SSC_STEP_LEN_MASK |
3660				 DKL_PLL_SSC_STEP_NUM_MASK |
3661				 DKL_PLL_SSC_EN);
3662
3663	hw_state->mg_pll_bias = intel_dkl_phy_read(i915, DKL_PLL_BIAS(tc_port));
3664	hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
3665				  DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3666
3667	hw_state->mg_pll_tdc_coldst_bias =
3668		intel_dkl_phy_read(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3669	hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3670					     DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3671
3672	ret = true;
3673out:
3674	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3675	return ret;
3676}
3677
3678static bool icl_pll_get_hw_state(struct drm_i915_private *i915,
3679				 struct intel_shared_dpll *pll,
3680				 struct intel_dpll_hw_state *dpll_hw_state,
3681				 i915_reg_t enable_reg)
3682{
3683	struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3684	const enum intel_dpll_id id = pll->info->id;
3685	intel_wakeref_t wakeref;
3686	bool ret = false;
3687	u32 val;
3688
3689	wakeref = intel_display_power_get_if_enabled(i915,
3690						     POWER_DOMAIN_DISPLAY_CORE);
3691	if (!wakeref)
3692		return false;
3693
3694	val = intel_de_read(i915, enable_reg);
3695	if (!(val & PLL_ENABLE))
3696		goto out;
3697
3698	if (IS_ALDERLAKE_S(i915)) {
3699		hw_state->cfgcr0 = intel_de_read(i915, ADLS_DPLL_CFGCR0(id));
3700		hw_state->cfgcr1 = intel_de_read(i915, ADLS_DPLL_CFGCR1(id));
3701	} else if (IS_DG1(i915)) {
3702		hw_state->cfgcr0 = intel_de_read(i915, DG1_DPLL_CFGCR0(id));
3703		hw_state->cfgcr1 = intel_de_read(i915, DG1_DPLL_CFGCR1(id));
3704	} else if (IS_ROCKETLAKE(i915)) {
3705		hw_state->cfgcr0 = intel_de_read(i915,
3706						 RKL_DPLL_CFGCR0(id));
3707		hw_state->cfgcr1 = intel_de_read(i915,
3708						 RKL_DPLL_CFGCR1(id));
3709	} else if (DISPLAY_VER(i915) >= 12) {
3710		hw_state->cfgcr0 = intel_de_read(i915,
3711						 TGL_DPLL_CFGCR0(id));
3712		hw_state->cfgcr1 = intel_de_read(i915,
3713						 TGL_DPLL_CFGCR1(id));
3714		if (i915->display.vbt.override_afc_startup) {
3715			hw_state->div0 = intel_de_read(i915, TGL_DPLL0_DIV0(id));
3716			hw_state->div0 &= TGL_DPLL0_DIV0_AFC_STARTUP_MASK;
3717		}
3718	} else {
3719		if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
3720		    id == DPLL_ID_EHL_DPLL4) {
3721			hw_state->cfgcr0 = intel_de_read(i915,
3722							 ICL_DPLL_CFGCR0(4));
3723			hw_state->cfgcr1 = intel_de_read(i915,
3724							 ICL_DPLL_CFGCR1(4));
3725		} else {
3726			hw_state->cfgcr0 = intel_de_read(i915,
3727							 ICL_DPLL_CFGCR0(id));
3728			hw_state->cfgcr1 = intel_de_read(i915,
3729							 ICL_DPLL_CFGCR1(id));
3730		}
3731	}
3732
3733	ret = true;
3734out:
3735	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3736	return ret;
3737}
3738
3739static bool combo_pll_get_hw_state(struct drm_i915_private *i915,
3740				   struct intel_shared_dpll *pll,
3741				   struct intel_dpll_hw_state *dpll_hw_state)
3742{
3743	i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll);
3744
3745	return icl_pll_get_hw_state(i915, pll, dpll_hw_state, enable_reg);
3746}
3747
3748static bool tbt_pll_get_hw_state(struct drm_i915_private *i915,
3749				 struct intel_shared_dpll *pll,
3750				 struct intel_dpll_hw_state *dpll_hw_state)
3751{
3752	return icl_pll_get_hw_state(i915, pll, dpll_hw_state, TBT_PLL_ENABLE);
3753}
3754
3755static void icl_dpll_write(struct drm_i915_private *i915,
3756			   struct intel_shared_dpll *pll,
3757			   const struct icl_dpll_hw_state *hw_state)
3758{
 
3759	const enum intel_dpll_id id = pll->info->id;
3760	i915_reg_t cfgcr0_reg, cfgcr1_reg, div0_reg = INVALID_MMIO_REG;
3761
3762	if (IS_ALDERLAKE_S(i915)) {
3763		cfgcr0_reg = ADLS_DPLL_CFGCR0(id);
3764		cfgcr1_reg = ADLS_DPLL_CFGCR1(id);
3765	} else if (IS_DG1(i915)) {
3766		cfgcr0_reg = DG1_DPLL_CFGCR0(id);
3767		cfgcr1_reg = DG1_DPLL_CFGCR1(id);
3768	} else if (IS_ROCKETLAKE(i915)) {
3769		cfgcr0_reg = RKL_DPLL_CFGCR0(id);
3770		cfgcr1_reg = RKL_DPLL_CFGCR1(id);
3771	} else if (DISPLAY_VER(i915) >= 12) {
3772		cfgcr0_reg = TGL_DPLL_CFGCR0(id);
3773		cfgcr1_reg = TGL_DPLL_CFGCR1(id);
3774		div0_reg = TGL_DPLL0_DIV0(id);
3775	} else {
3776		if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
3777		    id == DPLL_ID_EHL_DPLL4) {
3778			cfgcr0_reg = ICL_DPLL_CFGCR0(4);
3779			cfgcr1_reg = ICL_DPLL_CFGCR1(4);
3780		} else {
3781			cfgcr0_reg = ICL_DPLL_CFGCR0(id);
3782			cfgcr1_reg = ICL_DPLL_CFGCR1(id);
3783		}
3784	}
3785
3786	intel_de_write(i915, cfgcr0_reg, hw_state->cfgcr0);
3787	intel_de_write(i915, cfgcr1_reg, hw_state->cfgcr1);
3788	drm_WARN_ON_ONCE(&i915->drm, i915->display.vbt.override_afc_startup &&
3789			 !i915_mmio_reg_valid(div0_reg));
3790	if (i915->display.vbt.override_afc_startup &&
3791	    i915_mmio_reg_valid(div0_reg))
3792		intel_de_rmw(i915, div0_reg,
3793			     TGL_DPLL0_DIV0_AFC_STARTUP_MASK, hw_state->div0);
3794	intel_de_posting_read(i915, cfgcr1_reg);
3795}
3796
3797static void icl_mg_pll_write(struct drm_i915_private *i915,
3798			     struct intel_shared_dpll *pll,
3799			     const struct icl_dpll_hw_state *hw_state)
3800{
 
3801	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
 
3802
3803	/*
3804	 * Some of the following registers have reserved fields, so program
3805	 * these with RMW based on a mask. The mask can be fixed or generated
3806	 * during the calc/readout phase if the mask depends on some other HW
3807	 * state like refclk, see icl_calc_mg_pll_state().
3808	 */
3809	intel_de_rmw(i915, MG_REFCLKIN_CTL(tc_port),
3810		     MG_REFCLKIN_CTL_OD_2_MUX_MASK, hw_state->mg_refclkin_ctl);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3811
3812	intel_de_rmw(i915, MG_CLKTOP2_CORECLKCTL1(tc_port),
3813		     MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK,
3814		     hw_state->mg_clktop2_coreclkctl1);
3815
3816	intel_de_rmw(i915, MG_CLKTOP2_HSCLKCTL(tc_port),
3817		     MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3818		     MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3819		     MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3820		     MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK,
3821		     hw_state->mg_clktop2_hsclkctl);
3822
3823	intel_de_write(i915, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
3824	intel_de_write(i915, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
3825	intel_de_write(i915, MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
3826	intel_de_write(i915, MG_PLL_FRAC_LOCK(tc_port),
3827		       hw_state->mg_pll_frac_lock);
3828	intel_de_write(i915, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
3829
3830	intel_de_rmw(i915, MG_PLL_BIAS(tc_port),
3831		     hw_state->mg_pll_bias_mask, hw_state->mg_pll_bias);
 
 
3832
3833	intel_de_rmw(i915, MG_PLL_TDC_COLDST_BIAS(tc_port),
3834		     hw_state->mg_pll_tdc_coldst_bias_mask,
3835		     hw_state->mg_pll_tdc_coldst_bias);
 
3836
3837	intel_de_posting_read(i915, MG_PLL_TDC_COLDST_BIAS(tc_port));
3838}
3839
3840static void dkl_pll_write(struct drm_i915_private *i915,
3841			  struct intel_shared_dpll *pll,
3842			  const struct icl_dpll_hw_state *hw_state)
3843{
 
3844	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3845	u32 val;
3846
3847	/*
3848	 * All registers programmed here have the same HIP_INDEX_REG even
3849	 * though on different building block
3850	 */
 
 
 
3851	/* All the registers are RMW */
3852	val = intel_dkl_phy_read(i915, DKL_REFCLKIN_CTL(tc_port));
3853	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3854	val |= hw_state->mg_refclkin_ctl;
3855	intel_dkl_phy_write(i915, DKL_REFCLKIN_CTL(tc_port), val);
3856
3857	val = intel_dkl_phy_read(i915, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3858	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3859	val |= hw_state->mg_clktop2_coreclkctl1;
3860	intel_dkl_phy_write(i915, DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
3861
3862	val = intel_dkl_phy_read(i915, DKL_CLKTOP2_HSCLKCTL(tc_port));
3863	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3864		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3865		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3866		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3867	val |= hw_state->mg_clktop2_hsclkctl;
3868	intel_dkl_phy_write(i915, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
3869
3870	val = DKL_PLL_DIV0_MASK;
3871	if (i915->display.vbt.override_afc_startup)
3872		val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3873	intel_dkl_phy_rmw(i915, DKL_PLL_DIV0(tc_port), val,
3874			  hw_state->mg_pll_div0);
 
 
3875
3876	val = intel_dkl_phy_read(i915, DKL_PLL_DIV1(tc_port));
3877	val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
3878		 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3879	val |= hw_state->mg_pll_div1;
3880	intel_dkl_phy_write(i915, DKL_PLL_DIV1(tc_port), val);
3881
3882	val = intel_dkl_phy_read(i915, DKL_PLL_SSC(tc_port));
3883	val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3884		 DKL_PLL_SSC_STEP_LEN_MASK |
3885		 DKL_PLL_SSC_STEP_NUM_MASK |
3886		 DKL_PLL_SSC_EN);
3887	val |= hw_state->mg_pll_ssc;
3888	intel_dkl_phy_write(i915, DKL_PLL_SSC(tc_port), val);
3889
3890	val = intel_dkl_phy_read(i915, DKL_PLL_BIAS(tc_port));
3891	val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
3892		 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3893	val |= hw_state->mg_pll_bias;
3894	intel_dkl_phy_write(i915, DKL_PLL_BIAS(tc_port), val);
3895
3896	val = intel_dkl_phy_read(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3897	val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3898		 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3899	val |= hw_state->mg_pll_tdc_coldst_bias;
3900	intel_dkl_phy_write(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
3901
3902	intel_dkl_phy_posting_read(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3903}
3904
3905static void icl_pll_power_enable(struct drm_i915_private *i915,
3906				 struct intel_shared_dpll *pll,
3907				 i915_reg_t enable_reg)
3908{
3909	intel_de_rmw(i915, enable_reg, 0, PLL_POWER_ENABLE);
 
 
 
 
3910
3911	/*
3912	 * The spec says we need to "wait" but it also says it should be
3913	 * immediate.
3914	 */
3915	if (intel_de_wait_for_set(i915, enable_reg, PLL_POWER_STATE, 1))
3916		drm_err(&i915->drm, "PLL %d Power not enabled\n",
3917			pll->info->id);
3918}
3919
3920static void icl_pll_enable(struct drm_i915_private *i915,
3921			   struct intel_shared_dpll *pll,
3922			   i915_reg_t enable_reg)
3923{
3924	intel_de_rmw(i915, enable_reg, 0, PLL_ENABLE);
 
 
 
 
3925
3926	/* Timeout is actually 600us. */
3927	if (intel_de_wait_for_set(i915, enable_reg, PLL_LOCK, 1))
3928		drm_err(&i915->drm, "PLL %d not locked\n", pll->info->id);
3929}
3930
3931static void adlp_cmtg_clock_gating_wa(struct drm_i915_private *i915, struct intel_shared_dpll *pll)
 
3932{
3933	u32 val;
3934
3935	if (!(IS_ALDERLAKE_P(i915) && IS_DISPLAY_STEP(i915, STEP_A0, STEP_B0)) ||
3936	    pll->info->id != DPLL_ID_ICL_DPLL0)
3937		return;
3938	/*
3939	 * Wa_16011069516:adl-p[a0]
3940	 *
3941	 * All CMTG regs are unreliable until CMTG clock gating is disabled,
3942	 * so we can only assume the default TRANS_CMTG_CHICKEN reg value and
3943	 * sanity check this assumption with a double read, which presumably
3944	 * returns the correct value even with clock gating on.
3945	 *
3946	 * Instead of the usual place for workarounds we apply this one here,
3947	 * since TRANS_CMTG_CHICKEN is only accessible while DPLL0 is enabled.
3948	 */
3949	val = intel_de_read(i915, TRANS_CMTG_CHICKEN);
3950	val = intel_de_rmw(i915, TRANS_CMTG_CHICKEN, ~0, DISABLE_DPT_CLK_GATING);
3951	if (drm_WARN_ON(&i915->drm, val & ~DISABLE_DPT_CLK_GATING))
3952		drm_dbg_kms(&i915->drm, "Unexpected flags in TRANS_CMTG_CHICKEN: %08x\n", val);
3953}
3954
3955static void combo_pll_enable(struct drm_i915_private *i915,
3956			     struct intel_shared_dpll *pll,
3957			     const struct intel_dpll_hw_state *dpll_hw_state)
3958{
3959	const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3960	i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll);
 
 
3961
3962	icl_pll_power_enable(i915, pll, enable_reg);
3963
3964	icl_dpll_write(i915, pll, hw_state);
3965
3966	/*
3967	 * DVFS pre sequence would be here, but in our driver the cdclk code
3968	 * paths should already be setting the appropriate voltage, hence we do
3969	 * nothing here.
3970	 */
3971
3972	icl_pll_enable(i915, pll, enable_reg);
3973
3974	adlp_cmtg_clock_gating_wa(i915, pll);
3975
3976	/* DVFS post sequence would be here. See the comment above. */
3977}
3978
3979static void tbt_pll_enable(struct drm_i915_private *i915,
3980			   struct intel_shared_dpll *pll,
3981			   const struct intel_dpll_hw_state *dpll_hw_state)
3982{
3983	const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3984
3985	icl_pll_power_enable(i915, pll, TBT_PLL_ENABLE);
3986
3987	icl_dpll_write(i915, pll, hw_state);
3988
3989	/*
3990	 * DVFS pre sequence would be here, but in our driver the cdclk code
3991	 * paths should already be setting the appropriate voltage, hence we do
3992	 * nothing here.
3993	 */
3994
3995	icl_pll_enable(i915, pll, TBT_PLL_ENABLE);
3996
3997	/* DVFS post sequence would be here. See the comment above. */
3998}
3999
4000static void mg_pll_enable(struct drm_i915_private *i915,
4001			  struct intel_shared_dpll *pll,
4002			  const struct intel_dpll_hw_state *dpll_hw_state)
4003{
4004	const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
4005	i915_reg_t enable_reg = intel_tc_pll_enable_reg(i915, pll);
4006
4007	icl_pll_power_enable(i915, pll, enable_reg);
4008
4009	if (DISPLAY_VER(i915) >= 12)
4010		dkl_pll_write(i915, pll, hw_state);
4011	else
4012		icl_mg_pll_write(i915, pll, hw_state);
4013
4014	/*
4015	 * DVFS pre sequence would be here, but in our driver the cdclk code
4016	 * paths should already be setting the appropriate voltage, hence we do
4017	 * nothing here.
4018	 */
4019
4020	icl_pll_enable(i915, pll, enable_reg);
4021
4022	/* DVFS post sequence would be here. See the comment above. */
4023}
4024
4025static void icl_pll_disable(struct drm_i915_private *i915,
4026			    struct intel_shared_dpll *pll,
4027			    i915_reg_t enable_reg)
4028{
 
 
4029	/* The first steps are done by intel_ddi_post_disable(). */
4030
4031	/*
4032	 * DVFS pre sequence would be here, but in our driver the cdclk code
4033	 * paths should already be setting the appropriate voltage, hence we do
4034	 * nothing here.
4035	 */
4036
4037	intel_de_rmw(i915, enable_reg, PLL_ENABLE, 0);
 
 
4038
4039	/* Timeout is actually 1us. */
4040	if (intel_de_wait_for_clear(i915, enable_reg, PLL_LOCK, 1))
4041		drm_err(&i915->drm, "PLL %d locked\n", pll->info->id);
4042
4043	/* DVFS post sequence would be here. See the comment above. */
4044
4045	intel_de_rmw(i915, enable_reg, PLL_POWER_ENABLE, 0);
 
 
4046
4047	/*
4048	 * The spec says we need to "wait" but it also says it should be
4049	 * immediate.
4050	 */
4051	if (intel_de_wait_for_clear(i915, enable_reg, PLL_POWER_STATE, 1))
4052		drm_err(&i915->drm, "PLL %d Power not disabled\n",
4053			pll->info->id);
4054}
4055
4056static void combo_pll_disable(struct drm_i915_private *i915,
4057			      struct intel_shared_dpll *pll)
4058{
4059	i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll);
 
 
4060
4061	icl_pll_disable(i915, pll, enable_reg);
 
 
 
4062}
4063
4064static void tbt_pll_disable(struct drm_i915_private *i915,
4065			    struct intel_shared_dpll *pll)
4066{
4067	icl_pll_disable(i915, pll, TBT_PLL_ENABLE);
4068}
4069
4070static void mg_pll_disable(struct drm_i915_private *i915,
4071			   struct intel_shared_dpll *pll)
4072{
4073	i915_reg_t enable_reg = intel_tc_pll_enable_reg(i915, pll);
4074
4075	icl_pll_disable(i915, pll, enable_reg);
4076}
4077
4078static void icl_update_dpll_ref_clks(struct drm_i915_private *i915)
4079{
4080	/* No SSC ref */
4081	i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
4082}
4083
4084static void icl_dump_hw_state(struct drm_printer *p,
4085			      const struct intel_dpll_hw_state *dpll_hw_state)
4086{
4087	const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
4088
4089	drm_printf(p, "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, div0: 0x%x, "
4090		   "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
4091		   "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
4092		   "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
4093		   "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
4094		   "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
4095		   hw_state->cfgcr0, hw_state->cfgcr1, hw_state->div0,
4096		   hw_state->mg_refclkin_ctl,
4097		   hw_state->mg_clktop2_coreclkctl1,
4098		   hw_state->mg_clktop2_hsclkctl,
4099		   hw_state->mg_pll_div0,
4100		   hw_state->mg_pll_div1,
4101		   hw_state->mg_pll_lf,
4102		   hw_state->mg_pll_frac_lock,
4103		   hw_state->mg_pll_ssc,
4104		   hw_state->mg_pll_bias,
4105		   hw_state->mg_pll_tdc_coldst_bias);
4106}
4107
4108static bool icl_compare_hw_state(const struct intel_dpll_hw_state *_a,
4109				 const struct intel_dpll_hw_state *_b)
4110{
4111	const struct icl_dpll_hw_state *a = &_a->icl;
4112	const struct icl_dpll_hw_state *b = &_b->icl;
4113
4114	/* FIXME split combo vs. mg more thoroughly */
4115	return a->cfgcr0 == b->cfgcr0 &&
4116		a->cfgcr1 == b->cfgcr1 &&
4117		a->div0 == b->div0 &&
4118		a->mg_refclkin_ctl == b->mg_refclkin_ctl &&
4119		a->mg_clktop2_coreclkctl1 == b->mg_clktop2_coreclkctl1 &&
4120		a->mg_clktop2_hsclkctl == b->mg_clktop2_hsclkctl &&
4121		a->mg_pll_div0 == b->mg_pll_div0 &&
4122		a->mg_pll_div1 == b->mg_pll_div1 &&
4123		a->mg_pll_lf == b->mg_pll_lf &&
4124		a->mg_pll_frac_lock == b->mg_pll_frac_lock &&
4125		a->mg_pll_ssc == b->mg_pll_ssc &&
4126		a->mg_pll_bias == b->mg_pll_bias &&
4127		a->mg_pll_tdc_coldst_bias == b->mg_pll_tdc_coldst_bias;
4128}
4129
4130static const struct intel_shared_dpll_funcs combo_pll_funcs = {
4131	.enable = combo_pll_enable,
4132	.disable = combo_pll_disable,
4133	.get_hw_state = combo_pll_get_hw_state,
4134	.get_freq = icl_ddi_combo_pll_get_freq,
4135};
4136
4137static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
4138	.enable = tbt_pll_enable,
4139	.disable = tbt_pll_disable,
4140	.get_hw_state = tbt_pll_get_hw_state,
4141	.get_freq = icl_ddi_tbt_pll_get_freq,
4142};
4143
4144static const struct intel_shared_dpll_funcs mg_pll_funcs = {
4145	.enable = mg_pll_enable,
4146	.disable = mg_pll_disable,
4147	.get_hw_state = mg_pll_get_hw_state,
4148	.get_freq = icl_ddi_mg_pll_get_freq,
4149};
4150
4151static const struct dpll_info icl_plls[] = {
4152	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4153	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4154	{ .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
4155	  .is_alt_port_dpll = true, },
4156	{ .name = "MG PLL 1", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
4157	{ .name = "MG PLL 2", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
4158	{ .name = "MG PLL 3", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
4159	{ .name = "MG PLL 4", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
4160	{}
4161};
4162
4163static const struct intel_dpll_mgr icl_pll_mgr = {
4164	.dpll_info = icl_plls,
4165	.compute_dplls = icl_compute_dplls,
4166	.get_dplls = icl_get_dplls,
4167	.put_dplls = icl_put_dplls,
4168	.update_active_dpll = icl_update_active_dpll,
4169	.update_ref_clks = icl_update_dpll_ref_clks,
4170	.dump_hw_state = icl_dump_hw_state,
4171	.compare_hw_state = icl_compare_hw_state,
4172};
4173
4174static const struct dpll_info ehl_plls[] = {
4175	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4176	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4177	{ .name = "DPLL 4", .funcs = &combo_pll_funcs, .id = DPLL_ID_EHL_DPLL4,
4178	  .power_domain = POWER_DOMAIN_DC_OFF, },
4179	{}
4180};
4181
4182static const struct intel_dpll_mgr ehl_pll_mgr = {
4183	.dpll_info = ehl_plls,
4184	.compute_dplls = icl_compute_dplls,
4185	.get_dplls = icl_get_dplls,
4186	.put_dplls = icl_put_dplls,
4187	.update_ref_clks = icl_update_dpll_ref_clks,
4188	.dump_hw_state = icl_dump_hw_state,
4189	.compare_hw_state = icl_compare_hw_state,
4190};
4191
4192static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
4193	.enable = mg_pll_enable,
4194	.disable = mg_pll_disable,
4195	.get_hw_state = dkl_pll_get_hw_state,
4196	.get_freq = icl_ddi_mg_pll_get_freq,
4197};
4198
4199static const struct dpll_info tgl_plls[] = {
4200	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4201	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4202	{ .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
4203	  .is_alt_port_dpll = true, },
4204	{ .name = "TC PLL 1", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
4205	{ .name = "TC PLL 2", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
4206	{ .name = "TC PLL 3", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
4207	{ .name = "TC PLL 4", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
4208	{ .name = "TC PLL 5", .funcs = &dkl_pll_funcs, .id = DPLL_ID_TGL_MGPLL5, },
4209	{ .name = "TC PLL 6", .funcs = &dkl_pll_funcs, .id = DPLL_ID_TGL_MGPLL6, },
4210	{}
4211};
4212
4213static const struct intel_dpll_mgr tgl_pll_mgr = {
4214	.dpll_info = tgl_plls,
4215	.compute_dplls = icl_compute_dplls,
4216	.get_dplls = icl_get_dplls,
4217	.put_dplls = icl_put_dplls,
4218	.update_active_dpll = icl_update_active_dpll,
4219	.update_ref_clks = icl_update_dpll_ref_clks,
4220	.dump_hw_state = icl_dump_hw_state,
4221	.compare_hw_state = icl_compare_hw_state,
4222};
4223
4224static const struct dpll_info rkl_plls[] = {
4225	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4226	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4227	{ .name = "DPLL 4", .funcs = &combo_pll_funcs, .id = DPLL_ID_EHL_DPLL4, },
4228	{}
4229};
4230
4231static const struct intel_dpll_mgr rkl_pll_mgr = {
4232	.dpll_info = rkl_plls,
4233	.compute_dplls = icl_compute_dplls,
4234	.get_dplls = icl_get_dplls,
4235	.put_dplls = icl_put_dplls,
4236	.update_ref_clks = icl_update_dpll_ref_clks,
4237	.dump_hw_state = icl_dump_hw_state,
4238	.compare_hw_state = icl_compare_hw_state,
4239};
4240
4241static const struct dpll_info dg1_plls[] = {
4242	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL0, },
4243	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL1, },
4244	{ .name = "DPLL 2", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL2, },
4245	{ .name = "DPLL 3", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL3, },
4246	{}
4247};
4248
4249static const struct intel_dpll_mgr dg1_pll_mgr = {
4250	.dpll_info = dg1_plls,
4251	.compute_dplls = icl_compute_dplls,
4252	.get_dplls = icl_get_dplls,
4253	.put_dplls = icl_put_dplls,
4254	.update_ref_clks = icl_update_dpll_ref_clks,
4255	.dump_hw_state = icl_dump_hw_state,
4256	.compare_hw_state = icl_compare_hw_state,
4257};
4258
4259static const struct dpll_info adls_plls[] = {
4260	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4261	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4262	{ .name = "DPLL 2", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL2, },
4263	{ .name = "DPLL 3", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL3, },
4264	{}
4265};
4266
4267static const struct intel_dpll_mgr adls_pll_mgr = {
4268	.dpll_info = adls_plls,
4269	.compute_dplls = icl_compute_dplls,
4270	.get_dplls = icl_get_dplls,
4271	.put_dplls = icl_put_dplls,
4272	.update_ref_clks = icl_update_dpll_ref_clks,
4273	.dump_hw_state = icl_dump_hw_state,
4274	.compare_hw_state = icl_compare_hw_state,
4275};
4276
4277static const struct dpll_info adlp_plls[] = {
4278	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4279	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4280	{ .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
4281	  .is_alt_port_dpll = true, },
4282	{ .name = "TC PLL 1", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
4283	{ .name = "TC PLL 2", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
4284	{ .name = "TC PLL 3", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
4285	{ .name = "TC PLL 4", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
4286	{}
4287};
4288
4289static const struct intel_dpll_mgr adlp_pll_mgr = {
4290	.dpll_info = adlp_plls,
4291	.compute_dplls = icl_compute_dplls,
4292	.get_dplls = icl_get_dplls,
4293	.put_dplls = icl_put_dplls,
4294	.update_active_dpll = icl_update_active_dpll,
4295	.update_ref_clks = icl_update_dpll_ref_clks,
4296	.dump_hw_state = icl_dump_hw_state,
4297	.compare_hw_state = icl_compare_hw_state,
4298};
4299
4300/**
4301 * intel_shared_dpll_init - Initialize shared DPLLs
4302 * @i915: i915 device
4303 *
4304 * Initialize shared DPLLs for @i915.
4305 */
4306void intel_shared_dpll_init(struct drm_i915_private *i915)
4307{
 
4308	const struct intel_dpll_mgr *dpll_mgr = NULL;
4309	const struct dpll_info *dpll_info;
4310	int i;
4311
4312	mutex_init(&i915->display.dpll.lock);
4313
4314	if (DISPLAY_VER(i915) >= 14 || IS_DG2(i915))
4315		/* No shared DPLLs on DG2; port PLLs are part of the PHY */
4316		dpll_mgr = NULL;
4317	else if (IS_ALDERLAKE_P(i915))
4318		dpll_mgr = &adlp_pll_mgr;
4319	else if (IS_ALDERLAKE_S(i915))
4320		dpll_mgr = &adls_pll_mgr;
4321	else if (IS_DG1(i915))
4322		dpll_mgr = &dg1_pll_mgr;
4323	else if (IS_ROCKETLAKE(i915))
4324		dpll_mgr = &rkl_pll_mgr;
4325	else if (DISPLAY_VER(i915) >= 12)
4326		dpll_mgr = &tgl_pll_mgr;
4327	else if (IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915))
4328		dpll_mgr = &ehl_pll_mgr;
4329	else if (DISPLAY_VER(i915) >= 11)
4330		dpll_mgr = &icl_pll_mgr;
4331	else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
 
 
4332		dpll_mgr = &bxt_pll_mgr;
4333	else if (DISPLAY_VER(i915) == 9)
4334		dpll_mgr = &skl_pll_mgr;
4335	else if (HAS_DDI(i915))
4336		dpll_mgr = &hsw_pll_mgr;
4337	else if (HAS_PCH_IBX(i915) || HAS_PCH_CPT(i915))
4338		dpll_mgr = &pch_pll_mgr;
4339
4340	if (!dpll_mgr)
 
4341		return;
 
4342
4343	dpll_info = dpll_mgr->dpll_info;
4344
4345	for (i = 0; dpll_info[i].name; i++) {
4346		if (drm_WARN_ON(&i915->drm,
4347				i >= ARRAY_SIZE(i915->display.dpll.shared_dplls)))
4348			break;
4349
4350		/* must fit into unsigned long bitmask on 32bit */
4351		if (drm_WARN_ON(&i915->drm, dpll_info[i].id >= 32))
4352			break;
4353
4354		i915->display.dpll.shared_dplls[i].info = &dpll_info[i];
4355		i915->display.dpll.shared_dplls[i].index = i;
4356	}
4357
4358	i915->display.dpll.mgr = dpll_mgr;
4359	i915->display.dpll.num_shared_dpll = i;
4360}
4361
4362/**
4363 * intel_compute_shared_dplls - compute DPLL state CRTC and encoder combination
4364 * @state: atomic state
4365 * @crtc: CRTC to compute DPLLs for
4366 * @encoder: encoder
4367 *
4368 * This function computes the DPLL state for the given CRTC and encoder.
4369 *
4370 * The new configuration in the atomic commit @state is made effective by
4371 * calling intel_shared_dpll_swap_state().
4372 *
4373 * Returns:
4374 * 0 on success, negative error code on falure.
4375 */
4376int intel_compute_shared_dplls(struct intel_atomic_state *state,
4377			       struct intel_crtc *crtc,
4378			       struct intel_encoder *encoder)
4379{
4380	struct drm_i915_private *i915 = to_i915(state->base.dev);
4381	const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr;
4382
4383	if (drm_WARN_ON(&i915->drm, !dpll_mgr))
4384		return -EINVAL;
4385
4386	return dpll_mgr->compute_dplls(state, crtc, encoder);
4387}
4388
4389/**
4390 * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
4391 * @state: atomic state
4392 * @crtc: CRTC to reserve DPLLs for
4393 * @encoder: encoder
4394 *
4395 * This function reserves all required DPLLs for the given CRTC and encoder
4396 * combination in the current atomic commit @state and the new @crtc atomic
4397 * state.
4398 *
4399 * The new configuration in the atomic commit @state is made effective by
4400 * calling intel_shared_dpll_swap_state().
4401 *
4402 * The reserved DPLLs should be released by calling
4403 * intel_release_shared_dplls().
4404 *
4405 * Returns:
4406 * 0 if all required DPLLs were successfully reserved,
4407 * negative error code otherwise.
4408 */
4409int intel_reserve_shared_dplls(struct intel_atomic_state *state,
4410			       struct intel_crtc *crtc,
4411			       struct intel_encoder *encoder)
4412{
4413	struct drm_i915_private *i915 = to_i915(state->base.dev);
4414	const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr;
4415
4416	if (drm_WARN_ON(&i915->drm, !dpll_mgr))
4417		return -EINVAL;
4418
4419	return dpll_mgr->get_dplls(state, crtc, encoder);
4420}
4421
4422/**
4423 * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
4424 * @state: atomic state
4425 * @crtc: crtc from which the DPLLs are to be released
4426 *
4427 * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
4428 * from the current atomic commit @state and the old @crtc atomic state.
4429 *
4430 * The new configuration in the atomic commit @state is made effective by
4431 * calling intel_shared_dpll_swap_state().
4432 */
4433void intel_release_shared_dplls(struct intel_atomic_state *state,
4434				struct intel_crtc *crtc)
4435{
4436	struct drm_i915_private *i915 = to_i915(state->base.dev);
4437	const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr;
4438
4439	/*
4440	 * FIXME: this function is called for every platform having a
4441	 * compute_clock hook, even though the platform doesn't yet support
4442	 * the shared DPLL framework and intel_reserve_shared_dplls() is not
4443	 * called on those.
4444	 */
4445	if (!dpll_mgr)
4446		return;
4447
4448	dpll_mgr->put_dplls(state, crtc);
4449}
4450
4451/**
4452 * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
4453 * @state: atomic state
4454 * @crtc: the CRTC for which to update the active DPLL
4455 * @encoder: encoder determining the type of port DPLL
4456 *
4457 * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
4458 * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
4459 * DPLL selected will be based on the current mode of the encoder's port.
4460 */
4461void intel_update_active_dpll(struct intel_atomic_state *state,
4462			      struct intel_crtc *crtc,
4463			      struct intel_encoder *encoder)
4464{
4465	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
4466	const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr;
4467
4468	if (drm_WARN_ON(&i915->drm, !dpll_mgr))
4469		return;
4470
4471	dpll_mgr->update_active_dpll(state, crtc, encoder);
4472}
4473
4474/**
4475 * intel_dpll_get_freq - calculate the DPLL's output frequency
4476 * @i915: i915 device
4477 * @pll: DPLL for which to calculate the output frequency
4478 * @dpll_hw_state: DPLL state from which to calculate the output frequency
4479 *
4480 * Return the output frequency corresponding to @pll's passed in @dpll_hw_state.
4481 */
4482int intel_dpll_get_freq(struct drm_i915_private *i915,
4483			const struct intel_shared_dpll *pll,
4484			const struct intel_dpll_hw_state *dpll_hw_state)
4485{
4486	if (drm_WARN_ON(&i915->drm, !pll->info->funcs->get_freq))
4487		return 0;
4488
4489	return pll->info->funcs->get_freq(i915, pll, dpll_hw_state);
4490}
4491
4492/**
4493 * intel_dpll_get_hw_state - readout the DPLL's hardware state
4494 * @i915: i915 device
4495 * @pll: DPLL for which to calculate the output frequency
4496 * @dpll_hw_state: DPLL's hardware state
4497 *
4498 * Read out @pll's hardware state into @dpll_hw_state.
4499 */
4500bool intel_dpll_get_hw_state(struct drm_i915_private *i915,
4501			     struct intel_shared_dpll *pll,
4502			     struct intel_dpll_hw_state *dpll_hw_state)
4503{
4504	return pll->info->funcs->get_hw_state(i915, pll, dpll_hw_state);
4505}
4506
4507static void readout_dpll_hw_state(struct drm_i915_private *i915,
4508				  struct intel_shared_dpll *pll)
4509{
4510	struct intel_crtc *crtc;
4511
4512	pll->on = intel_dpll_get_hw_state(i915, pll, &pll->state.hw_state);
4513
4514	if (pll->on && pll->info->power_domain)
4515		pll->wakeref = intel_display_power_get(i915, pll->info->power_domain);
 
 
 
4516
4517	pll->state.pipe_mask = 0;
4518	for_each_intel_crtc(&i915->drm, crtc) {
4519		struct intel_crtc_state *crtc_state =
4520			to_intel_crtc_state(crtc->base.state);
4521
4522		if (crtc_state->hw.active && crtc_state->shared_dpll == pll)
4523			intel_reference_shared_dpll_crtc(crtc, pll, &pll->state);
4524	}
4525	pll->active_mask = pll->state.pipe_mask;
4526
4527	drm_dbg_kms(&i915->drm,
4528		    "%s hw state readout: pipe_mask 0x%x, on %i\n",
4529		    pll->info->name, pll->state.pipe_mask, pll->on);
4530}
4531
4532void intel_dpll_update_ref_clks(struct drm_i915_private *i915)
4533{
4534	if (i915->display.dpll.mgr && i915->display.dpll.mgr->update_ref_clks)
4535		i915->display.dpll.mgr->update_ref_clks(i915);
4536}
4537
4538void intel_dpll_readout_hw_state(struct drm_i915_private *i915)
4539{
4540	struct intel_shared_dpll *pll;
4541	int i;
4542
4543	for_each_shared_dpll(i915, pll, i)
4544		readout_dpll_hw_state(i915, pll);
4545}
4546
4547static void sanitize_dpll_state(struct drm_i915_private *i915,
4548				struct intel_shared_dpll *pll)
4549{
4550	if (!pll->on)
4551		return;
4552
4553	adlp_cmtg_clock_gating_wa(i915, pll);
4554
4555	if (pll->active_mask)
4556		return;
4557
4558	drm_dbg_kms(&i915->drm,
4559		    "%s enabled but not in use, disabling\n",
4560		    pll->info->name);
4561
4562	_intel_disable_shared_dpll(i915, pll);
 
4563}
4564
4565void intel_dpll_sanitize_state(struct drm_i915_private *i915)
4566{
4567	struct intel_shared_dpll *pll;
4568	int i;
4569
4570	for_each_shared_dpll(i915, pll, i)
4571		sanitize_dpll_state(i915, pll);
4572}
4573
4574/**
4575 * intel_dpll_dump_hw_state - dump hw_state
4576 * @i915: i915 drm device
4577 * @p: where to print the state to
4578 * @dpll_hw_state: hw state to be dumped
4579 *
4580 * Dumo out the relevant values in @dpll_hw_state.
4581 */
4582void intel_dpll_dump_hw_state(struct drm_i915_private *i915,
4583			      struct drm_printer *p,
4584			      const struct intel_dpll_hw_state *dpll_hw_state)
4585{
4586	if (i915->display.dpll.mgr) {
4587		i915->display.dpll.mgr->dump_hw_state(p, dpll_hw_state);
4588	} else {
4589		/* fallback for platforms that don't use the shared dpll
4590		 * infrastructure
4591		 */
4592		ibx_dump_hw_state(p, dpll_hw_state);
 
 
 
 
 
 
4593	}
4594}
4595
4596/**
4597 * intel_dpll_compare_hw_state - compare the two states
4598 * @i915: i915 drm device
4599 * @a: first DPLL hw state
4600 * @b: second DPLL hw state
4601 *
4602 * Compare DPLL hw states @a and @b.
4603 *
4604 * Returns: true if the states are equal, false if the differ
4605 */
4606bool intel_dpll_compare_hw_state(struct drm_i915_private *i915,
4607				 const struct intel_dpll_hw_state *a,
4608				 const struct intel_dpll_hw_state *b)
4609{
4610	if (i915->display.dpll.mgr) {
4611		return i915->display.dpll.mgr->compare_hw_state(a, b);
4612	} else {
4613		/* fallback for platforms that don't use the shared dpll
4614		 * infrastructure
4615		 */
4616		return ibx_compare_hw_state(a, b);
4617	}
4618}
4619
4620static void
4621verify_single_dpll_state(struct drm_i915_private *i915,
4622			 struct intel_shared_dpll *pll,
4623			 struct intel_crtc *crtc,
4624			 const struct intel_crtc_state *new_crtc_state)
4625{
4626	struct intel_display *display = &i915->display;
4627	struct intel_dpll_hw_state dpll_hw_state = {};
4628	u8 pipe_mask;
4629	bool active;
4630
4631	active = intel_dpll_get_hw_state(i915, pll, &dpll_hw_state);
4632
4633	if (!pll->info->always_on) {
4634		INTEL_DISPLAY_STATE_WARN(display, !pll->on && pll->active_mask,
4635					 "%s: pll in active use but not on in sw tracking\n",
4636					 pll->info->name);
4637		INTEL_DISPLAY_STATE_WARN(display, pll->on && !pll->active_mask,
4638					 "%s: pll is on but not used by any active pipe\n",
4639					 pll->info->name);
4640		INTEL_DISPLAY_STATE_WARN(display, pll->on != active,
4641					 "%s: pll on state mismatch (expected %i, found %i)\n",
4642					 pll->info->name, pll->on, active);
4643	}
4644
4645	if (!crtc) {
4646		INTEL_DISPLAY_STATE_WARN(display,
4647					 pll->active_mask & ~pll->state.pipe_mask,
4648					 "%s: more active pll users than references: 0x%x vs 0x%x\n",
4649					 pll->info->name, pll->active_mask, pll->state.pipe_mask);
4650
4651		return;
4652	}
4653
4654	pipe_mask = BIT(crtc->pipe);
4655
4656	if (new_crtc_state->hw.active)
4657		INTEL_DISPLAY_STATE_WARN(display, !(pll->active_mask & pipe_mask),
4658					 "%s: pll active mismatch (expected pipe %c in active mask 0x%x)\n",
4659					 pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
4660	else
4661		INTEL_DISPLAY_STATE_WARN(display, pll->active_mask & pipe_mask,
4662					 "%s: pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
4663					 pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
4664
4665	INTEL_DISPLAY_STATE_WARN(display, !(pll->state.pipe_mask & pipe_mask),
4666				 "%s: pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
4667				 pll->info->name, pipe_mask, pll->state.pipe_mask);
4668
4669	INTEL_DISPLAY_STATE_WARN(display,
4670				 pll->on && memcmp(&pll->state.hw_state, &dpll_hw_state,
4671						   sizeof(dpll_hw_state)),
4672				 "%s: pll hw state mismatch\n",
4673				 pll->info->name);
4674}
4675
4676static bool has_alt_port_dpll(const struct intel_shared_dpll *old_pll,
4677			      const struct intel_shared_dpll *new_pll)
4678{
4679	return old_pll && new_pll && old_pll != new_pll &&
4680		(old_pll->info->is_alt_port_dpll || new_pll->info->is_alt_port_dpll);
4681}
4682
4683void intel_shared_dpll_state_verify(struct intel_atomic_state *state,
4684				    struct intel_crtc *crtc)
4685{
4686	struct intel_display *display = to_intel_display(state);
4687	struct drm_i915_private *i915 = to_i915(state->base.dev);
4688	const struct intel_crtc_state *old_crtc_state =
4689		intel_atomic_get_old_crtc_state(state, crtc);
4690	const struct intel_crtc_state *new_crtc_state =
4691		intel_atomic_get_new_crtc_state(state, crtc);
4692
4693	if (new_crtc_state->shared_dpll)
4694		verify_single_dpll_state(i915, new_crtc_state->shared_dpll,
4695					 crtc, new_crtc_state);
4696
4697	if (old_crtc_state->shared_dpll &&
4698	    old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
4699		u8 pipe_mask = BIT(crtc->pipe);
4700		struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
4701
4702		INTEL_DISPLAY_STATE_WARN(display, pll->active_mask & pipe_mask,
4703					 "%s: pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
4704					 pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
4705
4706		/* TC ports have both MG/TC and TBT PLL referenced simultaneously */
4707		INTEL_DISPLAY_STATE_WARN(display, !has_alt_port_dpll(old_crtc_state->shared_dpll,
4708								     new_crtc_state->shared_dpll) &&
4709					 pll->state.pipe_mask & pipe_mask,
4710					 "%s: pll enabled crtcs mismatch (found pipe %c in enabled mask (0x%x))\n",
4711					 pll->info->name, pipe_name(crtc->pipe), pll->state.pipe_mask);
4712	}
4713}
4714
4715void intel_shared_dpll_verify_disabled(struct intel_atomic_state *state)
4716{
4717	struct drm_i915_private *i915 = to_i915(state->base.dev);
4718	struct intel_shared_dpll *pll;
4719	int i;
4720
4721	for_each_shared_dpll(i915, pll, i)
4722		verify_single_dpll_state(i915, pll, NULL, NULL);
4723}