Linux Audio

Check our new training course

Loading...
v6.9.4
   1/*
   2 * Copyright © 2006-2016 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  21 * DEALINGS IN THE SOFTWARE.
  22 */
  23
  24#include <linux/math.h>
  25#include <linux/string_helpers.h>
  26
  27#include "i915_reg.h"
  28#include "intel_de.h"
  29#include "intel_display_types.h"
  30#include "intel_dkl_phy.h"
  31#include "intel_dkl_phy_regs.h"
  32#include "intel_dpio_phy.h"
  33#include "intel_dpll.h"
  34#include "intel_dpll_mgr.h"
  35#include "intel_hti.h"
  36#include "intel_mg_phy_regs.h"
  37#include "intel_pch_refclk.h"
  38#include "intel_tc.h"
  39
  40/**
  41 * DOC: Display PLLs
  42 *
  43 * Display PLLs used for driving outputs vary by platform. While some have
  44 * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
  45 * from a pool. In the latter scenario, it is possible that multiple pipes
  46 * share a PLL if their configurations match.
  47 *
  48 * This file provides an abstraction over display PLLs. The function
  49 * intel_shared_dpll_init() initializes the PLLs for the given platform.  The
  50 * users of a PLL are tracked and that tracking is integrated with the atomic
  51 * modset interface. During an atomic operation, required PLLs can be reserved
  52 * for a given CRTC and encoder configuration by calling
  53 * intel_reserve_shared_dplls() and previously reserved PLLs can be released
  54 * with intel_release_shared_dplls().
  55 * Changes to the users are first staged in the atomic state, and then made
  56 * effective by calling intel_shared_dpll_swap_state() during the atomic
  57 * commit phase.
  58 */
  59
  60/* platform specific hooks for managing DPLLs */
  61struct intel_shared_dpll_funcs {
  62	/*
  63	 * Hook for enabling the pll, called from intel_enable_shared_dpll() if
  64	 * the pll is not already enabled.
  65	 */
  66	void (*enable)(struct drm_i915_private *i915,
  67		       struct intel_shared_dpll *pll);
  68
  69	/*
  70	 * Hook for disabling the pll, called from intel_disable_shared_dpll()
  71	 * only when it is safe to disable the pll, i.e., there are no more
  72	 * tracked users for it.
  73	 */
  74	void (*disable)(struct drm_i915_private *i915,
  75			struct intel_shared_dpll *pll);
  76
  77	/*
  78	 * Hook for reading the values currently programmed to the DPLL
  79	 * registers. This is used for initial hw state readout and state
  80	 * verification after a mode set.
  81	 */
  82	bool (*get_hw_state)(struct drm_i915_private *i915,
  83			     struct intel_shared_dpll *pll,
  84			     struct intel_dpll_hw_state *hw_state);
  85
  86	/*
  87	 * Hook for calculating the pll's output frequency based on its passed
  88	 * in state.
  89	 */
  90	int (*get_freq)(struct drm_i915_private *i915,
  91			const struct intel_shared_dpll *pll,
  92			const struct intel_dpll_hw_state *pll_state);
  93};
  94
  95struct intel_dpll_mgr {
  96	const struct dpll_info *dpll_info;
  97
  98	int (*compute_dplls)(struct intel_atomic_state *state,
  99			     struct intel_crtc *crtc,
 100			     struct intel_encoder *encoder);
 101	int (*get_dplls)(struct intel_atomic_state *state,
 102			 struct intel_crtc *crtc,
 103			 struct intel_encoder *encoder);
 104	void (*put_dplls)(struct intel_atomic_state *state,
 105			  struct intel_crtc *crtc);
 106	void (*update_active_dpll)(struct intel_atomic_state *state,
 107				   struct intel_crtc *crtc,
 108				   struct intel_encoder *encoder);
 109	void (*update_ref_clks)(struct drm_i915_private *i915);
 110	void (*dump_hw_state)(struct drm_i915_private *i915,
 111			      const struct intel_dpll_hw_state *hw_state);
 112	bool (*compare_hw_state)(const struct intel_dpll_hw_state *a,
 113				 const struct intel_dpll_hw_state *b);
 114};
 115
 116static void
 117intel_atomic_duplicate_dpll_state(struct drm_i915_private *i915,
 118				  struct intel_shared_dpll_state *shared_dpll)
 119{
 120	struct intel_shared_dpll *pll;
 121	int i;
 122
 123	/* Copy shared dpll state */
 124	for_each_shared_dpll(i915, pll, i)
 125		shared_dpll[pll->index] = pll->state;
 
 
 
 126}
 127
 128static struct intel_shared_dpll_state *
 129intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
 130{
 131	struct intel_atomic_state *state = to_intel_atomic_state(s);
 132
 133	drm_WARN_ON(s->dev, !drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
 134
 135	if (!state->dpll_set) {
 136		state->dpll_set = true;
 137
 138		intel_atomic_duplicate_dpll_state(to_i915(s->dev),
 139						  state->shared_dpll);
 140	}
 141
 142	return state->shared_dpll;
 143}
 144
 145/**
 146 * intel_get_shared_dpll_by_id - get a DPLL given its id
 147 * @i915: i915 device instance
 148 * @id: pll id
 149 *
 150 * Returns:
 151 * A pointer to the DPLL with @id
 152 */
 153struct intel_shared_dpll *
 154intel_get_shared_dpll_by_id(struct drm_i915_private *i915,
 155			    enum intel_dpll_id id)
 156{
 157	struct intel_shared_dpll *pll;
 158	int i;
 159
 160	for_each_shared_dpll(i915, pll, i) {
 161		if (pll->info->id == id)
 162			return pll;
 163	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 164
 165	MISSING_CASE(id);
 166	return NULL;
 167}
 168
 169/* For ILK+ */
 170void assert_shared_dpll(struct drm_i915_private *i915,
 171			struct intel_shared_dpll *pll,
 172			bool state)
 173{
 174	bool cur_state;
 175	struct intel_dpll_hw_state hw_state;
 176
 177	if (drm_WARN(&i915->drm, !pll,
 178		     "asserting DPLL %s with no DPLL\n", str_on_off(state)))
 179		return;
 180
 181	cur_state = intel_dpll_get_hw_state(i915, pll, &hw_state);
 182	I915_STATE_WARN(i915, cur_state != state,
 183			"%s assertion failure (expected %s, current %s)\n",
 184			pll->info->name, str_on_off(state),
 185			str_on_off(cur_state));
 186}
 187
 188static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
 189{
 190	return TC_PORT_1 + id - DPLL_ID_ICL_MGPLL1;
 191}
 192
 193enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
 194{
 195	return tc_port - TC_PORT_1 + DPLL_ID_ICL_MGPLL1;
 196}
 197
 198static i915_reg_t
 199intel_combo_pll_enable_reg(struct drm_i915_private *i915,
 200			   struct intel_shared_dpll *pll)
 201{
 202	if (IS_DG1(i915))
 203		return DG1_DPLL_ENABLE(pll->info->id);
 204	else if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
 205		 (pll->info->id == DPLL_ID_EHL_DPLL4))
 206		return MG_PLL_ENABLE(0);
 207
 208	return ICL_DPLL_ENABLE(pll->info->id);
 209}
 210
 211static i915_reg_t
 212intel_tc_pll_enable_reg(struct drm_i915_private *i915,
 213			struct intel_shared_dpll *pll)
 214{
 215	const enum intel_dpll_id id = pll->info->id;
 216	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
 217
 218	if (IS_ALDERLAKE_P(i915))
 219		return ADLP_PORTTC_PLL_ENABLE(tc_port);
 220
 221	return MG_PLL_ENABLE(tc_port);
 222}
 223
 224static void _intel_enable_shared_dpll(struct drm_i915_private *i915,
 225				      struct intel_shared_dpll *pll)
 226{
 227	if (pll->info->power_domain)
 228		pll->wakeref = intel_display_power_get(i915, pll->info->power_domain);
 
 229
 230	pll->info->funcs->enable(i915, pll);
 231	pll->on = true;
 232}
 233
 234static void _intel_disable_shared_dpll(struct drm_i915_private *i915,
 235				       struct intel_shared_dpll *pll)
 236{
 237	pll->info->funcs->disable(i915, pll);
 238	pll->on = false;
 
 239
 240	if (pll->info->power_domain)
 241		intel_display_power_put(i915, pll->info->power_domain, pll->wakeref);
 
 242}
 243
 244/**
 245 * intel_enable_shared_dpll - enable a CRTC's shared DPLL
 246 * @crtc_state: CRTC, and its state, which has a shared DPLL
 247 *
 248 * Enable the shared DPLL used by @crtc.
 249 */
 250void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
 251{
 252	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 253	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
 254	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
 255	unsigned int pipe_mask = BIT(crtc->pipe);
 256	unsigned int old_mask;
 257
 258	if (drm_WARN_ON(&i915->drm, pll == NULL))
 259		return;
 260
 261	mutex_lock(&i915->display.dpll.lock);
 262	old_mask = pll->active_mask;
 263
 264	if (drm_WARN_ON(&i915->drm, !(pll->state.pipe_mask & pipe_mask)) ||
 265	    drm_WARN_ON(&i915->drm, pll->active_mask & pipe_mask))
 266		goto out;
 267
 268	pll->active_mask |= pipe_mask;
 269
 270	drm_dbg_kms(&i915->drm,
 271		    "enable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
 272		    pll->info->name, pll->active_mask, pll->on,
 273		    crtc->base.base.id, crtc->base.name);
 274
 275	if (old_mask) {
 276		drm_WARN_ON(&i915->drm, !pll->on);
 277		assert_shared_dpll_enabled(i915, pll);
 278		goto out;
 279	}
 280	drm_WARN_ON(&i915->drm, pll->on);
 281
 282	drm_dbg_kms(&i915->drm, "enabling %s\n", pll->info->name);
 283
 284	_intel_enable_shared_dpll(i915, pll);
 285
 286out:
 287	mutex_unlock(&i915->display.dpll.lock);
 288}
 289
 290/**
 291 * intel_disable_shared_dpll - disable a CRTC's shared DPLL
 292 * @crtc_state: CRTC, and its state, which has a shared DPLL
 293 *
 294 * Disable the shared DPLL used by @crtc.
 295 */
 296void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
 297{
 298	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 299	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
 300	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
 301	unsigned int pipe_mask = BIT(crtc->pipe);
 302
 303	/* PCH only available on ILK+ */
 304	if (DISPLAY_VER(i915) < 5)
 305		return;
 306
 307	if (pll == NULL)
 308		return;
 309
 310	mutex_lock(&i915->display.dpll.lock);
 311	if (drm_WARN(&i915->drm, !(pll->active_mask & pipe_mask),
 312		     "%s not used by [CRTC:%d:%s]\n", pll->info->name,
 313		     crtc->base.base.id, crtc->base.name))
 314		goto out;
 315
 316	drm_dbg_kms(&i915->drm,
 317		    "disable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
 318		    pll->info->name, pll->active_mask, pll->on,
 319		    crtc->base.base.id, crtc->base.name);
 320
 321	assert_shared_dpll_enabled(i915, pll);
 322	drm_WARN_ON(&i915->drm, !pll->on);
 323
 324	pll->active_mask &= ~pipe_mask;
 325	if (pll->active_mask)
 326		goto out;
 327
 328	drm_dbg_kms(&i915->drm, "disabling %s\n", pll->info->name);
 329
 330	_intel_disable_shared_dpll(i915, pll);
 331
 332out:
 333	mutex_unlock(&i915->display.dpll.lock);
 334}
 335
 336static unsigned long
 337intel_dpll_mask_all(struct drm_i915_private *i915)
 338{
 339	struct intel_shared_dpll *pll;
 340	unsigned long dpll_mask = 0;
 341	int i;
 342
 343	for_each_shared_dpll(i915, pll, i) {
 344		drm_WARN_ON(&i915->drm, dpll_mask & BIT(pll->info->id));
 345
 346		dpll_mask |= BIT(pll->info->id);
 347	}
 348
 349	return dpll_mask;
 350}
 351
 352static struct intel_shared_dpll *
 353intel_find_shared_dpll(struct intel_atomic_state *state,
 354		       const struct intel_crtc *crtc,
 355		       const struct intel_dpll_hw_state *pll_state,
 356		       unsigned long dpll_mask)
 357{
 358	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
 359	unsigned long dpll_mask_all = intel_dpll_mask_all(i915);
 360	struct intel_shared_dpll_state *shared_dpll;
 361	struct intel_shared_dpll *unused_pll = NULL;
 362	enum intel_dpll_id id;
 363
 364	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
 365
 366	drm_WARN_ON(&i915->drm, dpll_mask & ~dpll_mask_all);
 367
 368	for_each_set_bit(id, &dpll_mask, fls(dpll_mask_all)) {
 369		struct intel_shared_dpll *pll;
 370
 371		pll = intel_get_shared_dpll_by_id(i915, id);
 372		if (!pll)
 373			continue;
 374
 375		/* Only want to check enabled timings first */
 376		if (shared_dpll[pll->index].pipe_mask == 0) {
 377			if (!unused_pll)
 378				unused_pll = pll;
 379			continue;
 380		}
 381
 382		if (memcmp(pll_state,
 383			   &shared_dpll[pll->index].hw_state,
 384			   sizeof(*pll_state)) == 0) {
 385			drm_dbg_kms(&i915->drm,
 386				    "[CRTC:%d:%s] sharing existing %s (pipe mask 0x%x, active 0x%x)\n",
 387				    crtc->base.base.id, crtc->base.name,
 388				    pll->info->name,
 389				    shared_dpll[pll->index].pipe_mask,
 390				    pll->active_mask);
 391			return pll;
 392		}
 393	}
 394
 395	/* Ok no matching timings, maybe there's a free one? */
 396	if (unused_pll) {
 397		drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] allocated %s\n",
 398			    crtc->base.base.id, crtc->base.name,
 399			    unused_pll->info->name);
 400		return unused_pll;
 401	}
 402
 403	return NULL;
 404}
 405
 406/**
 407 * intel_reference_shared_dpll_crtc - Get a DPLL reference for a CRTC
 408 * @crtc: CRTC on which behalf the reference is taken
 409 * @pll: DPLL for which the reference is taken
 410 * @shared_dpll_state: the DPLL atomic state in which the reference is tracked
 411 *
 412 * Take a reference for @pll tracking the use of it by @crtc.
 413 */
 414static void
 415intel_reference_shared_dpll_crtc(const struct intel_crtc *crtc,
 416				 const struct intel_shared_dpll *pll,
 417				 struct intel_shared_dpll_state *shared_dpll_state)
 418{
 419	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
 420
 421	drm_WARN_ON(&i915->drm, (shared_dpll_state->pipe_mask & BIT(crtc->pipe)) != 0);
 422
 423	shared_dpll_state->pipe_mask |= BIT(crtc->pipe);
 424
 425	drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] reserving %s\n",
 426		    crtc->base.base.id, crtc->base.name, pll->info->name);
 427}
 428
 429static void
 430intel_reference_shared_dpll(struct intel_atomic_state *state,
 431			    const struct intel_crtc *crtc,
 432			    const struct intel_shared_dpll *pll,
 433			    const struct intel_dpll_hw_state *pll_state)
 434{
 
 435	struct intel_shared_dpll_state *shared_dpll;
 
 436
 437	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
 438
 439	if (shared_dpll[pll->index].pipe_mask == 0)
 440		shared_dpll[pll->index].hw_state = *pll_state;
 441
 442	intel_reference_shared_dpll_crtc(crtc, pll, &shared_dpll[pll->index]);
 443}
 444
 445/**
 446 * intel_unreference_shared_dpll_crtc - Drop a DPLL reference for a CRTC
 447 * @crtc: CRTC on which behalf the reference is dropped
 448 * @pll: DPLL for which the reference is dropped
 449 * @shared_dpll_state: the DPLL atomic state in which the reference is tracked
 450 *
 451 * Drop a reference for @pll tracking the end of use of it by @crtc.
 452 */
 453void
 454intel_unreference_shared_dpll_crtc(const struct intel_crtc *crtc,
 455				   const struct intel_shared_dpll *pll,
 456				   struct intel_shared_dpll_state *shared_dpll_state)
 457{
 458	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
 459
 460	drm_WARN_ON(&i915->drm, (shared_dpll_state->pipe_mask & BIT(crtc->pipe)) == 0);
 461
 462	shared_dpll_state->pipe_mask &= ~BIT(crtc->pipe);
 
 463
 464	drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] releasing %s\n",
 465		    crtc->base.base.id, crtc->base.name, pll->info->name);
 466}
 467
 468static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
 469					  const struct intel_crtc *crtc,
 470					  const struct intel_shared_dpll *pll)
 471{
 472	struct intel_shared_dpll_state *shared_dpll;
 473
 474	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
 475
 476	intel_unreference_shared_dpll_crtc(crtc, pll, &shared_dpll[pll->index]);
 477}
 478
 479static void intel_put_dpll(struct intel_atomic_state *state,
 480			   struct intel_crtc *crtc)
 481{
 482	const struct intel_crtc_state *old_crtc_state =
 483		intel_atomic_get_old_crtc_state(state, crtc);
 484	struct intel_crtc_state *new_crtc_state =
 485		intel_atomic_get_new_crtc_state(state, crtc);
 486
 487	new_crtc_state->shared_dpll = NULL;
 488
 489	if (!old_crtc_state->shared_dpll)
 490		return;
 491
 492	intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
 493}
 494
 495/**
 496 * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
 497 * @state: atomic state
 498 *
 499 * This is the dpll version of drm_atomic_helper_swap_state() since the
 500 * helper does not handle driver-specific global state.
 501 *
 502 * For consistency with atomic helpers this function does a complete swap,
 503 * i.e. it also puts the current state into @state, even though there is no
 504 * need for that at this moment.
 505 */
 506void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
 507{
 508	struct drm_i915_private *i915 = to_i915(state->base.dev);
 509	struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
 510	struct intel_shared_dpll *pll;
 511	int i;
 512
 513	if (!state->dpll_set)
 514		return;
 515
 516	for_each_shared_dpll(i915, pll, i)
 517		swap(pll->state, shared_dpll[pll->index]);
 
 
 
 
 518}
 519
 520static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *i915,
 521				      struct intel_shared_dpll *pll,
 522				      struct intel_dpll_hw_state *hw_state)
 523{
 524	const enum intel_dpll_id id = pll->info->id;
 525	intel_wakeref_t wakeref;
 526	u32 val;
 527
 528	wakeref = intel_display_power_get_if_enabled(i915,
 529						     POWER_DOMAIN_DISPLAY_CORE);
 530	if (!wakeref)
 531		return false;
 532
 533	val = intel_de_read(i915, PCH_DPLL(id));
 534	hw_state->dpll = val;
 535	hw_state->fp0 = intel_de_read(i915, PCH_FP0(id));
 536	hw_state->fp1 = intel_de_read(i915, PCH_FP1(id));
 537
 538	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
 539
 540	return val & DPLL_VCO_ENABLE;
 541}
 542
 543static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *i915)
 
 
 
 
 
 
 
 
 
 544{
 545	u32 val;
 546	bool enabled;
 547
 548	val = intel_de_read(i915, PCH_DREF_CONTROL);
 
 
 549	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
 550			    DREF_SUPERSPREAD_SOURCE_MASK));
 551	I915_STATE_WARN(i915, !enabled,
 552			"PCH refclk assertion failure, should be active but is disabled\n");
 553}
 554
 555static void ibx_pch_dpll_enable(struct drm_i915_private *i915,
 556				struct intel_shared_dpll *pll)
 557{
 558	const enum intel_dpll_id id = pll->info->id;
 559
 560	/* PCH refclock must be enabled first */
 561	ibx_assert_pch_refclk_enabled(i915);
 562
 563	intel_de_write(i915, PCH_FP0(id), pll->state.hw_state.fp0);
 564	intel_de_write(i915, PCH_FP1(id), pll->state.hw_state.fp1);
 565
 566	intel_de_write(i915, PCH_DPLL(id), pll->state.hw_state.dpll);
 567
 568	/* Wait for the clocks to stabilize. */
 569	intel_de_posting_read(i915, PCH_DPLL(id));
 570	udelay(150);
 571
 572	/* The pixel multiplier can only be updated once the
 573	 * DPLL is enabled and the clocks are stable.
 574	 *
 575	 * So write it again.
 576	 */
 577	intel_de_write(i915, PCH_DPLL(id), pll->state.hw_state.dpll);
 578	intel_de_posting_read(i915, PCH_DPLL(id));
 579	udelay(200);
 580}
 581
 582static void ibx_pch_dpll_disable(struct drm_i915_private *i915,
 583				 struct intel_shared_dpll *pll)
 584{
 585	const enum intel_dpll_id id = pll->info->id;
 586
 587	intel_de_write(i915, PCH_DPLL(id), 0);
 588	intel_de_posting_read(i915, PCH_DPLL(id));
 589	udelay(200);
 590}
 591
 592static int ibx_compute_dpll(struct intel_atomic_state *state,
 593			    struct intel_crtc *crtc,
 594			    struct intel_encoder *encoder)
 595{
 596	return 0;
 597}
 598
 599static int ibx_get_dpll(struct intel_atomic_state *state,
 600			struct intel_crtc *crtc,
 601			struct intel_encoder *encoder)
 602{
 603	struct intel_crtc_state *crtc_state =
 604		intel_atomic_get_new_crtc_state(state, crtc);
 605	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
 606	struct intel_shared_dpll *pll;
 607	enum intel_dpll_id id;
 608
 609	if (HAS_PCH_IBX(i915)) {
 610		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
 611		id = (enum intel_dpll_id) crtc->pipe;
 612		pll = intel_get_shared_dpll_by_id(i915, id);
 613
 614		drm_dbg_kms(&i915->drm,
 615			    "[CRTC:%d:%s] using pre-allocated %s\n",
 616			    crtc->base.base.id, crtc->base.name,
 617			    pll->info->name);
 618	} else {
 619		pll = intel_find_shared_dpll(state, crtc,
 620					     &crtc_state->dpll_hw_state,
 621					     BIT(DPLL_ID_PCH_PLL_B) |
 622					     BIT(DPLL_ID_PCH_PLL_A));
 623	}
 624
 625	if (!pll)
 626		return -EINVAL;
 627
 628	/* reference the pll */
 629	intel_reference_shared_dpll(state, crtc,
 630				    pll, &crtc_state->dpll_hw_state);
 631
 632	crtc_state->shared_dpll = pll;
 633
 634	return 0;
 635}
 636
 637static void ibx_dump_hw_state(struct drm_i915_private *i915,
 638			      const struct intel_dpll_hw_state *hw_state)
 639{
 640	drm_dbg_kms(&i915->drm,
 641		    "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
 642		    "fp0: 0x%x, fp1: 0x%x\n",
 643		    hw_state->dpll,
 644		    hw_state->dpll_md,
 645		    hw_state->fp0,
 646		    hw_state->fp1);
 647}
 648
 649static bool ibx_compare_hw_state(const struct intel_dpll_hw_state *a,
 650				 const struct intel_dpll_hw_state *b)
 651{
 652	return a->dpll == b->dpll &&
 653		a->dpll_md == b->dpll_md &&
 654		a->fp0 == b->fp0 &&
 655		a->fp1 == b->fp1;
 656}
 657
 658static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
 
 659	.enable = ibx_pch_dpll_enable,
 660	.disable = ibx_pch_dpll_disable,
 661	.get_hw_state = ibx_pch_dpll_get_hw_state,
 662};
 663
 664static const struct dpll_info pch_plls[] = {
 665	{ .name = "PCH DPLL A", .funcs = &ibx_pch_dpll_funcs, .id = DPLL_ID_PCH_PLL_A, },
 666	{ .name = "PCH DPLL B", .funcs = &ibx_pch_dpll_funcs, .id = DPLL_ID_PCH_PLL_B, },
 667	{}
 668};
 669
 670static const struct intel_dpll_mgr pch_pll_mgr = {
 671	.dpll_info = pch_plls,
 672	.compute_dplls = ibx_compute_dpll,
 673	.get_dplls = ibx_get_dpll,
 674	.put_dplls = intel_put_dpll,
 675	.dump_hw_state = ibx_dump_hw_state,
 676	.compare_hw_state = ibx_compare_hw_state,
 677};
 678
 679static void hsw_ddi_wrpll_enable(struct drm_i915_private *i915,
 680				 struct intel_shared_dpll *pll)
 681{
 682	const enum intel_dpll_id id = pll->info->id;
 683
 684	intel_de_write(i915, WRPLL_CTL(id), pll->state.hw_state.wrpll);
 685	intel_de_posting_read(i915, WRPLL_CTL(id));
 686	udelay(20);
 687}
 688
 689static void hsw_ddi_spll_enable(struct drm_i915_private *i915,
 690				struct intel_shared_dpll *pll)
 691{
 692	intel_de_write(i915, SPLL_CTL, pll->state.hw_state.spll);
 693	intel_de_posting_read(i915, SPLL_CTL);
 694	udelay(20);
 695}
 696
 697static void hsw_ddi_wrpll_disable(struct drm_i915_private *i915,
 698				  struct intel_shared_dpll *pll)
 699{
 700	const enum intel_dpll_id id = pll->info->id;
 
 701
 702	intel_de_rmw(i915, WRPLL_CTL(id), WRPLL_PLL_ENABLE, 0);
 703	intel_de_posting_read(i915, WRPLL_CTL(id));
 
 704
 705	/*
 706	 * Try to set up the PCH reference clock once all DPLLs
 707	 * that depend on it have been shut down.
 708	 */
 709	if (i915->display.dpll.pch_ssc_use & BIT(id))
 710		intel_init_pch_refclk(i915);
 711}
 712
 713static void hsw_ddi_spll_disable(struct drm_i915_private *i915,
 714				 struct intel_shared_dpll *pll)
 715{
 716	enum intel_dpll_id id = pll->info->id;
 
 717
 718	intel_de_rmw(i915, SPLL_CTL, SPLL_PLL_ENABLE, 0);
 719	intel_de_posting_read(i915, SPLL_CTL);
 
 720
 721	/*
 722	 * Try to set up the PCH reference clock once all DPLLs
 723	 * that depend on it have been shut down.
 724	 */
 725	if (i915->display.dpll.pch_ssc_use & BIT(id))
 726		intel_init_pch_refclk(i915);
 727}
 728
 729static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *i915,
 730				       struct intel_shared_dpll *pll,
 731				       struct intel_dpll_hw_state *hw_state)
 732{
 733	const enum intel_dpll_id id = pll->info->id;
 734	intel_wakeref_t wakeref;
 735	u32 val;
 736
 737	wakeref = intel_display_power_get_if_enabled(i915,
 738						     POWER_DOMAIN_DISPLAY_CORE);
 739	if (!wakeref)
 740		return false;
 741
 742	val = intel_de_read(i915, WRPLL_CTL(id));
 743	hw_state->wrpll = val;
 744
 745	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
 746
 747	return val & WRPLL_PLL_ENABLE;
 748}
 749
 750static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *i915,
 751				      struct intel_shared_dpll *pll,
 752				      struct intel_dpll_hw_state *hw_state)
 753{
 754	intel_wakeref_t wakeref;
 755	u32 val;
 756
 757	wakeref = intel_display_power_get_if_enabled(i915,
 758						     POWER_DOMAIN_DISPLAY_CORE);
 759	if (!wakeref)
 760		return false;
 761
 762	val = intel_de_read(i915, SPLL_CTL);
 763	hw_state->spll = val;
 764
 765	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
 766
 767	return val & SPLL_PLL_ENABLE;
 768}
 769
 770#define LC_FREQ 2700
 771#define LC_FREQ_2K U64_C(LC_FREQ * 2000)
 772
 773#define P_MIN 2
 774#define P_MAX 64
 775#define P_INC 2
 776
 777/* Constraints for PLL good behavior */
 778#define REF_MIN 48
 779#define REF_MAX 400
 780#define VCO_MIN 2400
 781#define VCO_MAX 4800
 782
 783struct hsw_wrpll_rnp {
 784	unsigned p, n2, r2;
 785};
 786
 787static unsigned hsw_wrpll_get_budget_for_freq(int clock)
 788{
 
 
 789	switch (clock) {
 790	case 25175000:
 791	case 25200000:
 792	case 27000000:
 793	case 27027000:
 794	case 37762500:
 795	case 37800000:
 796	case 40500000:
 797	case 40541000:
 798	case 54000000:
 799	case 54054000:
 800	case 59341000:
 801	case 59400000:
 802	case 72000000:
 803	case 74176000:
 804	case 74250000:
 805	case 81000000:
 806	case 81081000:
 807	case 89012000:
 808	case 89100000:
 809	case 108000000:
 810	case 108108000:
 811	case 111264000:
 812	case 111375000:
 813	case 148352000:
 814	case 148500000:
 815	case 162000000:
 816	case 162162000:
 817	case 222525000:
 818	case 222750000:
 819	case 296703000:
 820	case 297000000:
 821		return 0;
 
 822	case 233500000:
 823	case 245250000:
 824	case 247750000:
 825	case 253250000:
 826	case 298000000:
 827		return 1500;
 
 828	case 169128000:
 829	case 169500000:
 830	case 179500000:
 831	case 202000000:
 832		return 2000;
 
 833	case 256250000:
 834	case 262500000:
 835	case 270000000:
 836	case 272500000:
 837	case 273750000:
 838	case 280750000:
 839	case 281250000:
 840	case 286000000:
 841	case 291750000:
 842		return 4000;
 
 843	case 267250000:
 844	case 268500000:
 845		return 5000;
 
 846	default:
 847		return 1000;
 
 848	}
 
 
 849}
 850
 851static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
 852				 unsigned int r2, unsigned int n2,
 853				 unsigned int p,
 854				 struct hsw_wrpll_rnp *best)
 855{
 856	u64 a, b, c, d, diff, diff_best;
 857
 858	/* No best (r,n,p) yet */
 859	if (best->p == 0) {
 860		best->p = p;
 861		best->n2 = n2;
 862		best->r2 = r2;
 863		return;
 864	}
 865
 866	/*
 867	 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
 868	 * freq2k.
 869	 *
 870	 * delta = 1e6 *
 871	 *	   abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
 872	 *	   freq2k;
 873	 *
 874	 * and we would like delta <= budget.
 875	 *
 876	 * If the discrepancy is above the PPM-based budget, always prefer to
 877	 * improve upon the previous solution.  However, if you're within the
 878	 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
 879	 */
 880	a = freq2k * budget * p * r2;
 881	b = freq2k * budget * best->p * best->r2;
 882	diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
 883	diff_best = abs_diff(freq2k * best->p * best->r2,
 884			     LC_FREQ_2K * best->n2);
 885	c = 1000000 * diff;
 886	d = 1000000 * diff_best;
 887
 888	if (a < c && b < d) {
 889		/* If both are above the budget, pick the closer */
 890		if (best->p * best->r2 * diff < p * r2 * diff_best) {
 891			best->p = p;
 892			best->n2 = n2;
 893			best->r2 = r2;
 894		}
 895	} else if (a >= c && b < d) {
 896		/* If A is below the threshold but B is above it?  Update. */
 897		best->p = p;
 898		best->n2 = n2;
 899		best->r2 = r2;
 900	} else if (a >= c && b >= d) {
 901		/* Both are below the limit, so pick the higher n2/(r2*r2) */
 902		if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
 903			best->p = p;
 904			best->n2 = n2;
 905			best->r2 = r2;
 906		}
 907	}
 908	/* Otherwise a < c && b >= d, do nothing */
 909}
 910
 911static void
 912hsw_ddi_calculate_wrpll(int clock /* in Hz */,
 913			unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
 914{
 915	u64 freq2k;
 916	unsigned p, n2, r2;
 917	struct hsw_wrpll_rnp best = {};
 918	unsigned budget;
 919
 920	freq2k = clock / 100;
 921
 922	budget = hsw_wrpll_get_budget_for_freq(clock);
 923
 924	/* Special case handling for 540 pixel clock: bypass WR PLL entirely
 925	 * and directly pass the LC PLL to it. */
 926	if (freq2k == 5400000) {
 927		*n2_out = 2;
 928		*p_out = 1;
 929		*r2_out = 2;
 930		return;
 931	}
 932
 933	/*
 934	 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
 935	 * the WR PLL.
 936	 *
 937	 * We want R so that REF_MIN <= Ref <= REF_MAX.
 938	 * Injecting R2 = 2 * R gives:
 939	 *   REF_MAX * r2 > LC_FREQ * 2 and
 940	 *   REF_MIN * r2 < LC_FREQ * 2
 941	 *
 942	 * Which means the desired boundaries for r2 are:
 943	 *  LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
 944	 *
 945	 */
 946	for (r2 = LC_FREQ * 2 / REF_MAX + 1;
 947	     r2 <= LC_FREQ * 2 / REF_MIN;
 948	     r2++) {
 949
 950		/*
 951		 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
 952		 *
 953		 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
 954		 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
 955		 *   VCO_MAX * r2 > n2 * LC_FREQ and
 956		 *   VCO_MIN * r2 < n2 * LC_FREQ)
 957		 *
 958		 * Which means the desired boundaries for n2 are:
 959		 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
 960		 */
 961		for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
 962		     n2 <= VCO_MAX * r2 / LC_FREQ;
 963		     n2++) {
 964
 965			for (p = P_MIN; p <= P_MAX; p += P_INC)
 966				hsw_wrpll_update_rnp(freq2k, budget,
 967						     r2, n2, p, &best);
 968		}
 969	}
 970
 971	*n2_out = best.n2;
 972	*p_out = best.p;
 973	*r2_out = best.r2;
 974}
 975
 976static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *i915,
 977				  const struct intel_shared_dpll *pll,
 978				  const struct intel_dpll_hw_state *pll_state)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 979{
 980	int refclk;
 981	int n, p, r;
 982	u32 wrpll = pll_state->wrpll;
 983
 984	switch (wrpll & WRPLL_REF_MASK) {
 985	case WRPLL_REF_SPECIAL_HSW:
 986		/* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
 987		if (IS_HASWELL(i915) && !IS_HASWELL_ULT(i915)) {
 988			refclk = i915->display.dpll.ref_clks.nssc;
 989			break;
 990		}
 991		fallthrough;
 992	case WRPLL_REF_PCH_SSC:
 993		/*
 994		 * We could calculate spread here, but our checking
 995		 * code only cares about 5% accuracy, and spread is a max of
 996		 * 0.5% downspread.
 997		 */
 998		refclk = i915->display.dpll.ref_clks.ssc;
 999		break;
1000	case WRPLL_REF_LCPLL:
1001		refclk = 2700000;
1002		break;
1003	default:
1004		MISSING_CASE(wrpll);
1005		return 0;
1006	}
1007
1008	r = wrpll & WRPLL_DIVIDER_REF_MASK;
1009	p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
1010	n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
1011
1012	/* Convert to KHz, p & r have a fixed point portion */
1013	return (refclk * n / 10) / (p * r) * 2;
1014}
1015
1016static int
1017hsw_ddi_wrpll_compute_dpll(struct intel_atomic_state *state,
1018			   struct intel_crtc *crtc)
1019{
1020	struct drm_i915_private *i915 = to_i915(state->base.dev);
1021	struct intel_crtc_state *crtc_state =
1022		intel_atomic_get_new_crtc_state(state, crtc);
1023	unsigned int p, n2, r2;
1024
1025	hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
1026
1027	crtc_state->dpll_hw_state.wrpll =
1028		WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
1029		WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
1030		WRPLL_DIVIDER_POST(p);
1031
1032	crtc_state->port_clock = hsw_ddi_wrpll_get_freq(i915, NULL,
1033							&crtc_state->dpll_hw_state);
1034
1035	return 0;
1036}
1037
1038static struct intel_shared_dpll *
1039hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
1040		       struct intel_crtc *crtc)
1041{
1042	struct intel_crtc_state *crtc_state =
1043		intel_atomic_get_new_crtc_state(state, crtc);
1044
1045	return intel_find_shared_dpll(state, crtc,
1046				      &crtc_state->dpll_hw_state,
1047				      BIT(DPLL_ID_WRPLL2) |
1048				      BIT(DPLL_ID_WRPLL1));
1049}
1050
1051static int
1052hsw_ddi_lcpll_compute_dpll(struct intel_crtc_state *crtc_state)
1053{
1054	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1055	int clock = crtc_state->port_clock;
1056
1057	switch (clock / 2) {
1058	case 81000:
1059	case 135000:
1060	case 270000:
1061		return 0;
1062	default:
1063		drm_dbg_kms(&i915->drm, "Invalid clock for DP: %d\n",
1064			    clock);
1065		return -EINVAL;
1066	}
1067}
1068
1069static struct intel_shared_dpll *
1070hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
1071{
1072	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1073	struct intel_shared_dpll *pll;
1074	enum intel_dpll_id pll_id;
1075	int clock = crtc_state->port_clock;
1076
1077	switch (clock / 2) {
1078	case 81000:
1079		pll_id = DPLL_ID_LCPLL_810;
1080		break;
1081	case 135000:
1082		pll_id = DPLL_ID_LCPLL_1350;
1083		break;
1084	case 270000:
1085		pll_id = DPLL_ID_LCPLL_2700;
1086		break;
1087	default:
1088		MISSING_CASE(clock / 2);
 
1089		return NULL;
1090	}
1091
1092	pll = intel_get_shared_dpll_by_id(i915, pll_id);
1093
1094	if (!pll)
1095		return NULL;
1096
1097	return pll;
1098}
1099
1100static int hsw_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1101				  const struct intel_shared_dpll *pll,
1102				  const struct intel_dpll_hw_state *pll_state)
1103{
1104	int link_clock = 0;
1105
1106	switch (pll->info->id) {
1107	case DPLL_ID_LCPLL_810:
1108		link_clock = 81000;
1109		break;
1110	case DPLL_ID_LCPLL_1350:
1111		link_clock = 135000;
1112		break;
1113	case DPLL_ID_LCPLL_2700:
1114		link_clock = 270000;
1115		break;
1116	default:
1117		drm_WARN(&i915->drm, 1, "bad port clock sel\n");
1118		break;
1119	}
1120
1121	return link_clock * 2;
1122}
1123
1124static int
1125hsw_ddi_spll_compute_dpll(struct intel_atomic_state *state,
1126			  struct intel_crtc *crtc)
1127{
1128	struct intel_crtc_state *crtc_state =
1129		intel_atomic_get_new_crtc_state(state, crtc);
1130
1131	if (drm_WARN_ON(crtc->base.dev, crtc_state->port_clock / 2 != 135000))
1132		return -EINVAL;
1133
1134	crtc_state->dpll_hw_state.spll =
1135		SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC;
1136
1137	return 0;
1138}
1139
1140static struct intel_shared_dpll *
1141hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
1142		      struct intel_crtc *crtc)
1143{
1144	struct intel_crtc_state *crtc_state =
1145		intel_atomic_get_new_crtc_state(state, crtc);
1146
 
 
 
 
 
 
1147	return intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state,
1148				      BIT(DPLL_ID_SPLL));
1149}
1150
1151static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915,
1152				 const struct intel_shared_dpll *pll,
1153				 const struct intel_dpll_hw_state *pll_state)
1154{
1155	int link_clock = 0;
1156
1157	switch (pll_state->spll & SPLL_FREQ_MASK) {
1158	case SPLL_FREQ_810MHz:
1159		link_clock = 81000;
1160		break;
1161	case SPLL_FREQ_1350MHz:
1162		link_clock = 135000;
1163		break;
1164	case SPLL_FREQ_2700MHz:
1165		link_clock = 270000;
1166		break;
1167	default:
1168		drm_WARN(&i915->drm, 1, "bad spll freq\n");
1169		break;
1170	}
1171
1172	return link_clock * 2;
1173}
1174
1175static int hsw_compute_dpll(struct intel_atomic_state *state,
1176			    struct intel_crtc *crtc,
1177			    struct intel_encoder *encoder)
1178{
1179	struct intel_crtc_state *crtc_state =
1180		intel_atomic_get_new_crtc_state(state, crtc);
 
1181
1182	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1183		return hsw_ddi_wrpll_compute_dpll(state, crtc);
1184	else if (intel_crtc_has_dp_encoder(crtc_state))
1185		return hsw_ddi_lcpll_compute_dpll(crtc_state);
1186	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1187		return hsw_ddi_spll_compute_dpll(state, crtc);
1188	else
1189		return -EINVAL;
1190}
1191
1192static int hsw_get_dpll(struct intel_atomic_state *state,
1193			struct intel_crtc *crtc,
1194			struct intel_encoder *encoder)
1195{
1196	struct intel_crtc_state *crtc_state =
1197		intel_atomic_get_new_crtc_state(state, crtc);
1198	struct intel_shared_dpll *pll = NULL;
1199
1200	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1201		pll = hsw_ddi_wrpll_get_dpll(state, crtc);
1202	else if (intel_crtc_has_dp_encoder(crtc_state))
1203		pll = hsw_ddi_lcpll_get_dpll(crtc_state);
1204	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1205		pll = hsw_ddi_spll_get_dpll(state, crtc);
 
 
1206
1207	if (!pll)
1208		return -EINVAL;
1209
1210	intel_reference_shared_dpll(state, crtc,
1211				    pll, &crtc_state->dpll_hw_state);
1212
1213	crtc_state->shared_dpll = pll;
1214
1215	return 0;
1216}
1217
1218static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915)
1219{
1220	i915->display.dpll.ref_clks.ssc = 135000;
1221	/* Non-SSC is only used on non-ULT HSW. */
1222	if (intel_de_read(i915, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
1223		i915->display.dpll.ref_clks.nssc = 24000;
1224	else
1225		i915->display.dpll.ref_clks.nssc = 135000;
1226}
1227
1228static void hsw_dump_hw_state(struct drm_i915_private *i915,
1229			      const struct intel_dpll_hw_state *hw_state)
1230{
1231	drm_dbg_kms(&i915->drm, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
1232		    hw_state->wrpll, hw_state->spll);
1233}
1234
1235static bool hsw_compare_hw_state(const struct intel_dpll_hw_state *a,
1236				 const struct intel_dpll_hw_state *b)
1237{
1238	return a->wrpll == b->wrpll &&
1239		a->spll == b->spll;
1240}
1241
1242static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
1243	.enable = hsw_ddi_wrpll_enable,
1244	.disable = hsw_ddi_wrpll_disable,
1245	.get_hw_state = hsw_ddi_wrpll_get_hw_state,
1246	.get_freq = hsw_ddi_wrpll_get_freq,
1247};
1248
1249static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
1250	.enable = hsw_ddi_spll_enable,
1251	.disable = hsw_ddi_spll_disable,
1252	.get_hw_state = hsw_ddi_spll_get_hw_state,
1253	.get_freq = hsw_ddi_spll_get_freq,
1254};
1255
1256static void hsw_ddi_lcpll_enable(struct drm_i915_private *i915,
1257				 struct intel_shared_dpll *pll)
1258{
1259}
1260
1261static void hsw_ddi_lcpll_disable(struct drm_i915_private *i915,
1262				  struct intel_shared_dpll *pll)
1263{
1264}
1265
1266static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *i915,
1267				       struct intel_shared_dpll *pll,
1268				       struct intel_dpll_hw_state *hw_state)
1269{
1270	return true;
1271}
1272
1273static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
1274	.enable = hsw_ddi_lcpll_enable,
1275	.disable = hsw_ddi_lcpll_disable,
1276	.get_hw_state = hsw_ddi_lcpll_get_hw_state,
1277	.get_freq = hsw_ddi_lcpll_get_freq,
1278};
1279
1280static const struct dpll_info hsw_plls[] = {
1281	{ .name = "WRPLL 1", .funcs = &hsw_ddi_wrpll_funcs, .id = DPLL_ID_WRPLL1, },
1282	{ .name = "WRPLL 2", .funcs = &hsw_ddi_wrpll_funcs, .id = DPLL_ID_WRPLL2, },
1283	{ .name = "SPLL", .funcs = &hsw_ddi_spll_funcs, .id = DPLL_ID_SPLL, },
1284	{ .name = "LCPLL 810", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_810,
1285	  .always_on = true, },
1286	{ .name = "LCPLL 1350", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_1350,
1287	  .always_on = true, },
1288	{ .name = "LCPLL 2700", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_2700,
1289	  .always_on = true, },
1290	{}
1291};
1292
1293static const struct intel_dpll_mgr hsw_pll_mgr = {
1294	.dpll_info = hsw_plls,
1295	.compute_dplls = hsw_compute_dpll,
1296	.get_dplls = hsw_get_dpll,
1297	.put_dplls = intel_put_dpll,
1298	.update_ref_clks = hsw_update_dpll_ref_clks,
1299	.dump_hw_state = hsw_dump_hw_state,
1300	.compare_hw_state = hsw_compare_hw_state,
1301};
1302
1303struct skl_dpll_regs {
1304	i915_reg_t ctl, cfgcr1, cfgcr2;
1305};
1306
1307/* this array is indexed by the *shared* pll id */
1308static const struct skl_dpll_regs skl_dpll_regs[4] = {
1309	{
1310		/* DPLL 0 */
1311		.ctl = LCPLL1_CTL,
1312		/* DPLL 0 doesn't support HDMI mode */
1313	},
1314	{
1315		/* DPLL 1 */
1316		.ctl = LCPLL2_CTL,
1317		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
1318		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
1319	},
1320	{
1321		/* DPLL 2 */
1322		.ctl = WRPLL_CTL(0),
1323		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
1324		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
1325	},
1326	{
1327		/* DPLL 3 */
1328		.ctl = WRPLL_CTL(1),
1329		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
1330		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
1331	},
1332};
1333
1334static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *i915,
1335				    struct intel_shared_dpll *pll)
1336{
1337	const enum intel_dpll_id id = pll->info->id;
 
 
 
1338
1339	intel_de_rmw(i915, DPLL_CTRL1,
1340		     DPLL_CTRL1_HDMI_MODE(id) | DPLL_CTRL1_SSC(id) | DPLL_CTRL1_LINK_RATE_MASK(id),
1341		     pll->state.hw_state.ctrl1 << (id * 6));
1342	intel_de_posting_read(i915, DPLL_CTRL1);
 
 
 
1343}
1344
1345static void skl_ddi_pll_enable(struct drm_i915_private *i915,
1346			       struct intel_shared_dpll *pll)
1347{
1348	const struct skl_dpll_regs *regs = skl_dpll_regs;
1349	const enum intel_dpll_id id = pll->info->id;
1350
1351	skl_ddi_pll_write_ctrl1(i915, pll);
1352
1353	intel_de_write(i915, regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
1354	intel_de_write(i915, regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
1355	intel_de_posting_read(i915, regs[id].cfgcr1);
1356	intel_de_posting_read(i915, regs[id].cfgcr2);
1357
1358	/* the enable bit is always bit 31 */
1359	intel_de_rmw(i915, regs[id].ctl, 0, LCPLL_PLL_ENABLE);
 
1360
1361	if (intel_de_wait_for_set(i915, DPLL_STATUS, DPLL_LOCK(id), 5))
1362		drm_err(&i915->drm, "DPLL %d not locked\n", id);
1363}
1364
1365static void skl_ddi_dpll0_enable(struct drm_i915_private *i915,
1366				 struct intel_shared_dpll *pll)
1367{
1368	skl_ddi_pll_write_ctrl1(i915, pll);
1369}
1370
1371static void skl_ddi_pll_disable(struct drm_i915_private *i915,
1372				struct intel_shared_dpll *pll)
1373{
1374	const struct skl_dpll_regs *regs = skl_dpll_regs;
1375	const enum intel_dpll_id id = pll->info->id;
1376
1377	/* the enable bit is always bit 31 */
1378	intel_de_rmw(i915, regs[id].ctl, LCPLL_PLL_ENABLE, 0);
1379	intel_de_posting_read(i915, regs[id].ctl);
 
1380}
1381
1382static void skl_ddi_dpll0_disable(struct drm_i915_private *i915,
1383				  struct intel_shared_dpll *pll)
1384{
1385}
1386
1387static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *i915,
1388				     struct intel_shared_dpll *pll,
1389				     struct intel_dpll_hw_state *hw_state)
1390{
1391	u32 val;
1392	const struct skl_dpll_regs *regs = skl_dpll_regs;
1393	const enum intel_dpll_id id = pll->info->id;
1394	intel_wakeref_t wakeref;
1395	bool ret;
1396
1397	wakeref = intel_display_power_get_if_enabled(i915,
1398						     POWER_DOMAIN_DISPLAY_CORE);
1399	if (!wakeref)
1400		return false;
1401
1402	ret = false;
1403
1404	val = intel_de_read(i915, regs[id].ctl);
1405	if (!(val & LCPLL_PLL_ENABLE))
1406		goto out;
1407
1408	val = intel_de_read(i915, DPLL_CTRL1);
1409	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1410
1411	/* avoid reading back stale values if HDMI mode is not enabled */
1412	if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1413		hw_state->cfgcr1 = intel_de_read(i915, regs[id].cfgcr1);
1414		hw_state->cfgcr2 = intel_de_read(i915, regs[id].cfgcr2);
1415	}
1416	ret = true;
1417
1418out:
1419	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1420
1421	return ret;
1422}
1423
1424static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *i915,
1425				       struct intel_shared_dpll *pll,
1426				       struct intel_dpll_hw_state *hw_state)
1427{
1428	const struct skl_dpll_regs *regs = skl_dpll_regs;
1429	const enum intel_dpll_id id = pll->info->id;
1430	intel_wakeref_t wakeref;
1431	u32 val;
1432	bool ret;
1433
1434	wakeref = intel_display_power_get_if_enabled(i915,
1435						     POWER_DOMAIN_DISPLAY_CORE);
1436	if (!wakeref)
1437		return false;
1438
1439	ret = false;
1440
1441	/* DPLL0 is always enabled since it drives CDCLK */
1442	val = intel_de_read(i915, regs[id].ctl);
1443	if (drm_WARN_ON(&i915->drm, !(val & LCPLL_PLL_ENABLE)))
1444		goto out;
1445
1446	val = intel_de_read(i915, DPLL_CTRL1);
1447	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1448
1449	ret = true;
1450
1451out:
1452	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1453
1454	return ret;
1455}
1456
1457struct skl_wrpll_context {
1458	u64 min_deviation;		/* current minimal deviation */
1459	u64 central_freq;		/* chosen central freq */
1460	u64 dco_freq;			/* chosen dco freq */
1461	unsigned int p;			/* chosen divider */
1462};
1463
 
 
 
 
 
 
 
1464/* DCO freq must be within +1%/-6%  of the DCO central freq */
1465#define SKL_DCO_MAX_PDEVIATION	100
1466#define SKL_DCO_MAX_NDEVIATION	600
1467
1468static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1469				  u64 central_freq,
1470				  u64 dco_freq,
1471				  unsigned int divider)
1472{
1473	u64 deviation;
1474
1475	deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1476			      central_freq);
1477
1478	/* positive deviation */
1479	if (dco_freq >= central_freq) {
1480		if (deviation < SKL_DCO_MAX_PDEVIATION &&
1481		    deviation < ctx->min_deviation) {
1482			ctx->min_deviation = deviation;
1483			ctx->central_freq = central_freq;
1484			ctx->dco_freq = dco_freq;
1485			ctx->p = divider;
1486		}
1487	/* negative deviation */
1488	} else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1489		   deviation < ctx->min_deviation) {
1490		ctx->min_deviation = deviation;
1491		ctx->central_freq = central_freq;
1492		ctx->dco_freq = dco_freq;
1493		ctx->p = divider;
1494	}
1495}
1496
1497static void skl_wrpll_get_multipliers(unsigned int p,
1498				      unsigned int *p0 /* out */,
1499				      unsigned int *p1 /* out */,
1500				      unsigned int *p2 /* out */)
1501{
1502	/* even dividers */
1503	if (p % 2 == 0) {
1504		unsigned int half = p / 2;
1505
1506		if (half == 1 || half == 2 || half == 3 || half == 5) {
1507			*p0 = 2;
1508			*p1 = 1;
1509			*p2 = half;
1510		} else if (half % 2 == 0) {
1511			*p0 = 2;
1512			*p1 = half / 2;
1513			*p2 = 2;
1514		} else if (half % 3 == 0) {
1515			*p0 = 3;
1516			*p1 = half / 3;
1517			*p2 = 2;
1518		} else if (half % 7 == 0) {
1519			*p0 = 7;
1520			*p1 = half / 7;
1521			*p2 = 2;
1522		}
1523	} else if (p == 3 || p == 9) {  /* 3, 5, 7, 9, 15, 21, 35 */
1524		*p0 = 3;
1525		*p1 = 1;
1526		*p2 = p / 3;
1527	} else if (p == 5 || p == 7) {
1528		*p0 = p;
1529		*p1 = 1;
1530		*p2 = 1;
1531	} else if (p == 15) {
1532		*p0 = 3;
1533		*p1 = 1;
1534		*p2 = 5;
1535	} else if (p == 21) {
1536		*p0 = 7;
1537		*p1 = 1;
1538		*p2 = 3;
1539	} else if (p == 35) {
1540		*p0 = 7;
1541		*p1 = 1;
1542		*p2 = 5;
1543	}
1544}
1545
1546struct skl_wrpll_params {
1547	u32 dco_fraction;
1548	u32 dco_integer;
1549	u32 qdiv_ratio;
1550	u32 qdiv_mode;
1551	u32 kdiv;
1552	u32 pdiv;
1553	u32 central_freq;
1554};
1555
1556static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1557				      u64 afe_clock,
1558				      int ref_clock,
1559				      u64 central_freq,
1560				      u32 p0, u32 p1, u32 p2)
1561{
1562	u64 dco_freq;
1563
1564	switch (central_freq) {
1565	case 9600000000ULL:
1566		params->central_freq = 0;
1567		break;
1568	case 9000000000ULL:
1569		params->central_freq = 1;
1570		break;
1571	case 8400000000ULL:
1572		params->central_freq = 3;
1573	}
1574
1575	switch (p0) {
1576	case 1:
1577		params->pdiv = 0;
1578		break;
1579	case 2:
1580		params->pdiv = 1;
1581		break;
1582	case 3:
1583		params->pdiv = 2;
1584		break;
1585	case 7:
1586		params->pdiv = 4;
1587		break;
1588	default:
1589		WARN(1, "Incorrect PDiv\n");
1590	}
1591
1592	switch (p2) {
1593	case 5:
1594		params->kdiv = 0;
1595		break;
1596	case 2:
1597		params->kdiv = 1;
1598		break;
1599	case 3:
1600		params->kdiv = 2;
1601		break;
1602	case 1:
1603		params->kdiv = 3;
1604		break;
1605	default:
1606		WARN(1, "Incorrect KDiv\n");
1607	}
1608
1609	params->qdiv_ratio = p1;
1610	params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1611
1612	dco_freq = p0 * p1 * p2 * afe_clock;
1613
1614	/*
1615	 * Intermediate values are in Hz.
1616	 * Divide by MHz to match bsepc
1617	 */
1618	params->dco_integer = div_u64(dco_freq, ref_clock * KHz(1));
1619	params->dco_fraction =
1620		div_u64((div_u64(dco_freq, ref_clock / KHz(1)) -
1621			 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1622}
1623
1624static int
1625skl_ddi_calculate_wrpll(int clock /* in Hz */,
1626			int ref_clock,
1627			struct skl_wrpll_params *wrpll_params)
1628{
1629	static const u64 dco_central_freq[3] = { 8400000000ULL,
1630						 9000000000ULL,
1631						 9600000000ULL };
1632	static const u8 even_dividers[] = {  4,  6,  8, 10, 12, 14, 16, 18, 20,
1633					    24, 28, 30, 32, 36, 40, 42, 44,
1634					    48, 52, 54, 56, 60, 64, 66, 68,
1635					    70, 72, 76, 78, 80, 84, 88, 90,
1636					    92, 96, 98 };
1637	static const u8 odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
 
1638	static const struct {
1639		const u8 *list;
1640		int n_dividers;
1641	} dividers[] = {
1642		{ even_dividers, ARRAY_SIZE(even_dividers) },
1643		{ odd_dividers, ARRAY_SIZE(odd_dividers) },
1644	};
1645	struct skl_wrpll_context ctx = {
1646		.min_deviation = U64_MAX,
1647	};
1648	unsigned int dco, d, i;
1649	unsigned int p0, p1, p2;
1650	u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
 
1651
1652	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1653		for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1654			for (i = 0; i < dividers[d].n_dividers; i++) {
1655				unsigned int p = dividers[d].list[i];
1656				u64 dco_freq = p * afe_clock;
1657
1658				skl_wrpll_try_divider(&ctx,
1659						      dco_central_freq[dco],
1660						      dco_freq,
1661						      p);
1662				/*
1663				 * Skip the remaining dividers if we're sure to
1664				 * have found the definitive divider, we can't
1665				 * improve a 0 deviation.
1666				 */
1667				if (ctx.min_deviation == 0)
1668					goto skip_remaining_dividers;
1669			}
1670		}
1671
1672skip_remaining_dividers:
1673		/*
1674		 * If a solution is found with an even divider, prefer
1675		 * this one.
1676		 */
1677		if (d == 0 && ctx.p)
1678			break;
1679	}
1680
1681	if (!ctx.p)
1682		return -EINVAL;
 
 
1683
1684	/*
1685	 * gcc incorrectly analyses that these can be used without being
1686	 * initialized. To be fair, it's hard to guess.
1687	 */
1688	p0 = p1 = p2 = 0;
1689	skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1690	skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock,
1691				  ctx.central_freq, p0, p1, p2);
1692
1693	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1694}
1695
1696static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
1697				  const struct intel_shared_dpll *pll,
1698				  const struct intel_dpll_hw_state *pll_state)
1699{
1700	int ref_clock = i915->display.dpll.ref_clks.nssc;
 
1701	u32 p0, p1, p2, dco_freq;
1702
1703	p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
1704	p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
1705
1706	if (pll_state->cfgcr2 &  DPLL_CFGCR2_QDIV_MODE(1))
1707		p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
1708	else
1709		p1 = 1;
1710
1711
1712	switch (p0) {
1713	case DPLL_CFGCR2_PDIV_1:
1714		p0 = 1;
1715		break;
1716	case DPLL_CFGCR2_PDIV_2:
1717		p0 = 2;
1718		break;
1719	case DPLL_CFGCR2_PDIV_3:
1720		p0 = 3;
1721		break;
1722	case DPLL_CFGCR2_PDIV_7_INVALID:
1723		/*
1724		 * Incorrect ASUS-Z170M BIOS setting, the HW seems to ignore bit#0,
1725		 * handling it the same way as PDIV_7.
1726		 */
1727		drm_dbg_kms(&i915->drm, "Invalid WRPLL PDIV divider value, fixing it.\n");
1728		fallthrough;
1729	case DPLL_CFGCR2_PDIV_7:
1730		p0 = 7;
1731		break;
1732	default:
1733		MISSING_CASE(p0);
1734		return 0;
1735	}
1736
1737	switch (p2) {
1738	case DPLL_CFGCR2_KDIV_5:
1739		p2 = 5;
1740		break;
1741	case DPLL_CFGCR2_KDIV_2:
1742		p2 = 2;
1743		break;
1744	case DPLL_CFGCR2_KDIV_3:
1745		p2 = 3;
1746		break;
1747	case DPLL_CFGCR2_KDIV_1:
1748		p2 = 1;
1749		break;
1750	default:
1751		MISSING_CASE(p2);
1752		return 0;
1753	}
1754
1755	dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
1756		   ref_clock;
1757
1758	dco_freq += ((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
1759		    ref_clock / 0x8000;
1760
1761	if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
1762		return 0;
1763
1764	return dco_freq / (p0 * p1 * p2 * 5);
1765}
1766
1767static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
1768{
1769	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1770	struct skl_wrpll_params wrpll_params = {};
1771	u32 ctrl1, cfgcr1, cfgcr2;
1772	int ret;
1773
1774	/*
1775	 * See comment in intel_dpll_hw_state to understand why we always use 0
1776	 * as the DPLL id in this function.
1777	 */
1778	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1779
1780	ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
1781
1782	ret = skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
1783				      i915->display.dpll.ref_clks.nssc, &wrpll_params);
1784	if (ret)
1785		return ret;
1786
1787	cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
1788		DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1789		wrpll_params.dco_integer;
1790
1791	cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1792		DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1793		DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1794		DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1795		wrpll_params.central_freq;
1796
1797	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1798	crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
1799	crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
1800
1801	crtc_state->port_clock = skl_ddi_wrpll_get_freq(i915, NULL,
1802							&crtc_state->dpll_hw_state);
1803
1804	return 0;
1805}
1806
1807static int
1808skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1809{
1810	u32 ctrl1;
1811
1812	/*
1813	 * See comment in intel_dpll_hw_state to understand why we always use 0
1814	 * as the DPLL id in this function.
1815	 */
1816	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1817	switch (crtc_state->port_clock / 2) {
1818	case 81000:
1819		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1820		break;
1821	case 135000:
1822		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1823		break;
1824	case 270000:
1825		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1826		break;
1827		/* eDP 1.4 rates */
1828	case 162000:
1829		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1830		break;
1831	case 108000:
1832		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1833		break;
1834	case 216000:
1835		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1836		break;
1837	}
1838
 
 
 
1839	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1840
1841	return 0;
1842}
1843
1844static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1845				  const struct intel_shared_dpll *pll,
1846				  const struct intel_dpll_hw_state *pll_state)
1847{
1848	int link_clock = 0;
1849
1850	switch ((pll_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0)) >>
 
1851		DPLL_CTRL1_LINK_RATE_SHIFT(0)) {
1852	case DPLL_CTRL1_LINK_RATE_810:
1853		link_clock = 81000;
1854		break;
1855	case DPLL_CTRL1_LINK_RATE_1080:
1856		link_clock = 108000;
1857		break;
1858	case DPLL_CTRL1_LINK_RATE_1350:
1859		link_clock = 135000;
1860		break;
1861	case DPLL_CTRL1_LINK_RATE_1620:
1862		link_clock = 162000;
1863		break;
1864	case DPLL_CTRL1_LINK_RATE_2160:
1865		link_clock = 216000;
1866		break;
1867	case DPLL_CTRL1_LINK_RATE_2700:
1868		link_clock = 270000;
1869		break;
1870	default:
1871		drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
1872		break;
1873	}
1874
1875	return link_clock * 2;
1876}
1877
1878static int skl_compute_dpll(struct intel_atomic_state *state,
1879			    struct intel_crtc *crtc,
1880			    struct intel_encoder *encoder)
1881{
1882	struct intel_crtc_state *crtc_state =
1883		intel_atomic_get_new_crtc_state(state, crtc);
1884
1885	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1886		return skl_ddi_hdmi_pll_dividers(crtc_state);
1887	else if (intel_crtc_has_dp_encoder(crtc_state))
1888		return skl_ddi_dp_set_dpll_hw_state(crtc_state);
1889	else
1890		return -EINVAL;
1891}
1892
1893static int skl_get_dpll(struct intel_atomic_state *state,
1894			struct intel_crtc *crtc,
1895			struct intel_encoder *encoder)
1896{
1897	struct intel_crtc_state *crtc_state =
1898		intel_atomic_get_new_crtc_state(state, crtc);
 
1899	struct intel_shared_dpll *pll;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1900
1901	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1902		pll = intel_find_shared_dpll(state, crtc,
1903					     &crtc_state->dpll_hw_state,
1904					     BIT(DPLL_ID_SKL_DPLL0));
1905	else
1906		pll = intel_find_shared_dpll(state, crtc,
1907					     &crtc_state->dpll_hw_state,
1908					     BIT(DPLL_ID_SKL_DPLL3) |
1909					     BIT(DPLL_ID_SKL_DPLL2) |
1910					     BIT(DPLL_ID_SKL_DPLL1));
1911	if (!pll)
1912		return -EINVAL;
1913
1914	intel_reference_shared_dpll(state, crtc,
1915				    pll, &crtc_state->dpll_hw_state);
1916
1917	crtc_state->shared_dpll = pll;
1918
1919	return 0;
1920}
1921
1922static int skl_ddi_pll_get_freq(struct drm_i915_private *i915,
1923				const struct intel_shared_dpll *pll,
1924				const struct intel_dpll_hw_state *pll_state)
1925{
1926	/*
1927	 * ctrl1 register is already shifted for each pll, just use 0 to get
1928	 * the internal shift for each field
1929	 */
1930	if (pll_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
1931		return skl_ddi_wrpll_get_freq(i915, pll, pll_state);
1932	else
1933		return skl_ddi_lcpll_get_freq(i915, pll, pll_state);
1934}
1935
1936static void skl_update_dpll_ref_clks(struct drm_i915_private *i915)
1937{
1938	/* No SSC ref */
1939	i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
1940}
1941
1942static void skl_dump_hw_state(struct drm_i915_private *i915,
1943			      const struct intel_dpll_hw_state *hw_state)
1944{
1945	drm_dbg_kms(&i915->drm, "dpll_hw_state: "
1946		      "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1947		      hw_state->ctrl1,
1948		      hw_state->cfgcr1,
1949		      hw_state->cfgcr2);
1950}
1951
1952static bool skl_compare_hw_state(const struct intel_dpll_hw_state *a,
1953				 const struct intel_dpll_hw_state *b)
1954{
1955	return a->ctrl1 == b->ctrl1 &&
1956		a->cfgcr1 == b->cfgcr1 &&
1957		a->cfgcr2 == b->cfgcr2;
1958}
1959
1960static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
1961	.enable = skl_ddi_pll_enable,
1962	.disable = skl_ddi_pll_disable,
1963	.get_hw_state = skl_ddi_pll_get_hw_state,
1964	.get_freq = skl_ddi_pll_get_freq,
1965};
1966
1967static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
1968	.enable = skl_ddi_dpll0_enable,
1969	.disable = skl_ddi_dpll0_disable,
1970	.get_hw_state = skl_ddi_dpll0_get_hw_state,
1971	.get_freq = skl_ddi_pll_get_freq,
1972};
1973
1974static const struct dpll_info skl_plls[] = {
1975	{ .name = "DPLL 0", .funcs = &skl_ddi_dpll0_funcs, .id = DPLL_ID_SKL_DPLL0,
1976	  .always_on = true, },
1977	{ .name = "DPLL 1", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL1, },
1978	{ .name = "DPLL 2", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL2, },
1979	{ .name = "DPLL 3", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL3, },
1980	{}
1981};
1982
1983static const struct intel_dpll_mgr skl_pll_mgr = {
1984	.dpll_info = skl_plls,
1985	.compute_dplls = skl_compute_dpll,
1986	.get_dplls = skl_get_dpll,
1987	.put_dplls = intel_put_dpll,
1988	.update_ref_clks = skl_update_dpll_ref_clks,
1989	.dump_hw_state = skl_dump_hw_state,
1990	.compare_hw_state = skl_compare_hw_state,
1991};
1992
1993static void bxt_ddi_pll_enable(struct drm_i915_private *i915,
1994			       struct intel_shared_dpll *pll)
1995{
1996	u32 temp;
1997	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1998	enum dpio_phy phy;
1999	enum dpio_channel ch;
2000
2001	bxt_port_to_phy_channel(i915, port, &phy, &ch);
2002
2003	/* Non-SSC reference */
2004	intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_REF_SEL);
 
 
 
 
 
 
 
2005
2006	if (IS_GEMINILAKE(i915)) {
2007		intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port),
2008			     0, PORT_PLL_POWER_ENABLE);
2009
2010		if (wait_for_us((intel_de_read(i915, BXT_PORT_PLL_ENABLE(port)) &
2011				 PORT_PLL_POWER_STATE), 200))
2012			drm_err(&i915->drm,
2013				"Power state not set for PLL:%d\n", port);
2014	}
2015
2016	/* Disable 10 bit clock */
2017	intel_de_rmw(i915, BXT_PORT_PLL_EBB_4(phy, ch),
2018		     PORT_PLL_10BIT_CLK_ENABLE, 0);
 
2019
2020	/* Write P1 & P2 */
2021	intel_de_rmw(i915, BXT_PORT_PLL_EBB_0(phy, ch),
2022		     PORT_PLL_P1_MASK | PORT_PLL_P2_MASK, pll->state.hw_state.ebb0);
 
 
2023
2024	/* Write M2 integer */
2025	intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 0),
2026		     PORT_PLL_M2_INT_MASK, pll->state.hw_state.pll0);
 
 
2027
2028	/* Write N */
2029	intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 1),
2030		     PORT_PLL_N_MASK, pll->state.hw_state.pll1);
 
 
2031
2032	/* Write M2 fraction */
2033	intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 2),
2034		     PORT_PLL_M2_FRAC_MASK, pll->state.hw_state.pll2);
 
 
2035
2036	/* Write M2 fraction enable */
2037	intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 3),
2038		     PORT_PLL_M2_FRAC_ENABLE, pll->state.hw_state.pll3);
 
 
2039
2040	/* Write coeff */
2041	temp = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 6));
2042	temp &= ~PORT_PLL_PROP_COEFF_MASK;
2043	temp &= ~PORT_PLL_INT_COEFF_MASK;
2044	temp &= ~PORT_PLL_GAIN_CTL_MASK;
2045	temp |= pll->state.hw_state.pll6;
2046	intel_de_write(i915, BXT_PORT_PLL(phy, ch, 6), temp);
2047
2048	/* Write calibration val */
2049	intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 8),
2050		     PORT_PLL_TARGET_CNT_MASK, pll->state.hw_state.pll8);
 
 
 
 
 
 
 
2051
2052	intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 9),
2053		     PORT_PLL_LOCK_THRESHOLD_MASK, pll->state.hw_state.pll9);
2054
2055	temp = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 10));
2056	temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
2057	temp &= ~PORT_PLL_DCO_AMP_MASK;
2058	temp |= pll->state.hw_state.pll10;
2059	intel_de_write(i915, BXT_PORT_PLL(phy, ch, 10), temp);
2060
2061	/* Recalibrate with new settings */
2062	temp = intel_de_read(i915, BXT_PORT_PLL_EBB_4(phy, ch));
2063	temp |= PORT_PLL_RECALIBRATE;
2064	intel_de_write(i915, BXT_PORT_PLL_EBB_4(phy, ch), temp);
2065	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
2066	temp |= pll->state.hw_state.ebb4;
2067	intel_de_write(i915, BXT_PORT_PLL_EBB_4(phy, ch), temp);
2068
2069	/* Enable PLL */
2070	intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_ENABLE);
2071	intel_de_posting_read(i915, BXT_PORT_PLL_ENABLE(port));
 
 
2072
2073	if (wait_for_us((intel_de_read(i915, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
2074			200))
2075		drm_err(&i915->drm, "PLL %d not locked\n", port);
2076
2077	if (IS_GEMINILAKE(i915)) {
2078		temp = intel_de_read(i915, BXT_PORT_TX_DW5_LN0(phy, ch));
2079		temp |= DCC_DELAY_RANGE_2;
2080		intel_de_write(i915, BXT_PORT_TX_DW5_GRP(phy, ch), temp);
2081	}
2082
2083	/*
2084	 * While we write to the group register to program all lanes at once we
2085	 * can read only lane registers and we pick lanes 0/1 for that.
2086	 */
2087	temp = intel_de_read(i915, BXT_PORT_PCS_DW12_LN01(phy, ch));
2088	temp &= ~LANE_STAGGER_MASK;
2089	temp &= ~LANESTAGGER_STRAP_OVRD;
2090	temp |= pll->state.hw_state.pcsdw12;
2091	intel_de_write(i915, BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
2092}
2093
2094static void bxt_ddi_pll_disable(struct drm_i915_private *i915,
2095				struct intel_shared_dpll *pll)
2096{
2097	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
 
2098
2099	intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port), PORT_PLL_ENABLE, 0);
2100	intel_de_posting_read(i915, BXT_PORT_PLL_ENABLE(port));
2101
2102	if (IS_GEMINILAKE(i915)) {
2103		intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port),
2104			     PORT_PLL_POWER_ENABLE, 0);
 
 
 
2105
2106		if (wait_for_us(!(intel_de_read(i915, BXT_PORT_PLL_ENABLE(port)) &
2107				  PORT_PLL_POWER_STATE), 200))
2108			drm_err(&i915->drm,
2109				"Power state not reset for PLL:%d\n", port);
2110	}
2111}
2112
2113static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *i915,
2114				     struct intel_shared_dpll *pll,
2115				     struct intel_dpll_hw_state *hw_state)
2116{
2117	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2118	intel_wakeref_t wakeref;
2119	enum dpio_phy phy;
2120	enum dpio_channel ch;
2121	u32 val;
2122	bool ret;
2123
2124	bxt_port_to_phy_channel(i915, port, &phy, &ch);
2125
2126	wakeref = intel_display_power_get_if_enabled(i915,
2127						     POWER_DOMAIN_DISPLAY_CORE);
2128	if (!wakeref)
2129		return false;
2130
2131	ret = false;
2132
2133	val = intel_de_read(i915, BXT_PORT_PLL_ENABLE(port));
2134	if (!(val & PORT_PLL_ENABLE))
2135		goto out;
2136
2137	hw_state->ebb0 = intel_de_read(i915, BXT_PORT_PLL_EBB_0(phy, ch));
2138	hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
2139
2140	hw_state->ebb4 = intel_de_read(i915, BXT_PORT_PLL_EBB_4(phy, ch));
2141	hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
2142
2143	hw_state->pll0 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 0));
2144	hw_state->pll0 &= PORT_PLL_M2_INT_MASK;
2145
2146	hw_state->pll1 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 1));
2147	hw_state->pll1 &= PORT_PLL_N_MASK;
2148
2149	hw_state->pll2 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 2));
2150	hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
2151
2152	hw_state->pll3 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 3));
2153	hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
2154
2155	hw_state->pll6 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 6));
2156	hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
2157			  PORT_PLL_INT_COEFF_MASK |
2158			  PORT_PLL_GAIN_CTL_MASK;
2159
2160	hw_state->pll8 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 8));
2161	hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
2162
2163	hw_state->pll9 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 9));
2164	hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
2165
2166	hw_state->pll10 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 10));
2167	hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
2168			   PORT_PLL_DCO_AMP_MASK;
2169
2170	/*
2171	 * While we write to the group register to program all lanes at once we
2172	 * can read only lane registers. We configure all lanes the same way, so
2173	 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
2174	 */
2175	hw_state->pcsdw12 = intel_de_read(i915,
2176					  BXT_PORT_PCS_DW12_LN01(phy, ch));
2177	if (intel_de_read(i915, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
2178		drm_dbg(&i915->drm,
2179			"lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
2180			hw_state->pcsdw12,
2181			intel_de_read(i915,
2182				      BXT_PORT_PCS_DW12_LN23(phy, ch)));
2183	hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
2184
2185	ret = true;
2186
2187out:
2188	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2189
2190	return ret;
2191}
2192
 
 
 
 
 
 
 
 
 
 
 
 
 
2193/* pre-calculated values for DP linkrates */
2194static const struct dpll bxt_dp_clk_val[] = {
2195	/* m2 is .22 binary fixed point */
2196	{ .dot = 162000, .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2197	{ .dot = 270000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2198	{ .dot = 540000, .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2199	{ .dot = 216000, .p1 = 3, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2200	{ .dot = 243000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6133333 /* 24.3 */ },
2201	{ .dot = 324000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2202	{ .dot = 432000, .p1 = 3, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2203};
2204
2205static int
2206bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
2207			  struct dpll *clk_div)
2208{
2209	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
 
 
2210
2211	/* Calculate HDMI div */
2212	/*
2213	 * FIXME: tie the following calculation into
2214	 * i9xx_crtc_compute_clock
2215	 */
2216	if (!bxt_find_best_dpll(crtc_state, clk_div))
2217		return -EINVAL;
 
 
 
 
2218
2219	drm_WARN_ON(&i915->drm, clk_div->m1 != 2);
 
 
 
 
 
 
2220
2221	return 0;
 
 
2222}
2223
2224static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
2225				    struct dpll *clk_div)
2226{
2227	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2228	int i;
2229
2230	*clk_div = bxt_dp_clk_val[0];
2231	for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
2232		if (crtc_state->port_clock == bxt_dp_clk_val[i].dot) {
2233			*clk_div = bxt_dp_clk_val[i];
2234			break;
2235		}
2236	}
2237
2238	chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, clk_div);
2239
2240	drm_WARN_ON(&i915->drm, clk_div->vco == 0 ||
2241		    clk_div->dot != crtc_state->port_clock);
2242}
2243
2244static int bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
2245				     const struct dpll *clk_div)
2246{
2247	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2248	struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
2249	int clock = crtc_state->port_clock;
2250	int vco = clk_div->vco;
2251	u32 prop_coef, int_coef, gain_ctl, targ_cnt;
2252	u32 lanestagger;
2253
 
 
2254	if (vco >= 6200000 && vco <= 6700000) {
2255		prop_coef = 4;
2256		int_coef = 9;
2257		gain_ctl = 3;
2258		targ_cnt = 8;
2259	} else if ((vco > 5400000 && vco < 6200000) ||
2260			(vco >= 4800000 && vco < 5400000)) {
2261		prop_coef = 5;
2262		int_coef = 11;
2263		gain_ctl = 3;
2264		targ_cnt = 9;
2265	} else if (vco == 5400000) {
2266		prop_coef = 3;
2267		int_coef = 8;
2268		gain_ctl = 1;
2269		targ_cnt = 9;
2270	} else {
2271		drm_err(&i915->drm, "Invalid VCO\n");
2272		return -EINVAL;
2273	}
2274
2275	if (clock > 270000)
2276		lanestagger = 0x18;
2277	else if (clock > 135000)
2278		lanestagger = 0x0d;
2279	else if (clock > 67000)
2280		lanestagger = 0x07;
2281	else if (clock > 33000)
2282		lanestagger = 0x04;
2283	else
2284		lanestagger = 0x02;
2285
2286	dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
2287	dpll_hw_state->pll0 = PORT_PLL_M2_INT(clk_div->m2 >> 22);
2288	dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
2289	dpll_hw_state->pll2 = PORT_PLL_M2_FRAC(clk_div->m2 & 0x3fffff);
2290
2291	if (clk_div->m2 & 0x3fffff)
2292		dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
2293
2294	dpll_hw_state->pll6 = PORT_PLL_PROP_COEFF(prop_coef) |
2295		PORT_PLL_INT_COEFF(int_coef) |
2296		PORT_PLL_GAIN_CTL(gain_ctl);
2297
2298	dpll_hw_state->pll8 = PORT_PLL_TARGET_CNT(targ_cnt);
2299
2300	dpll_hw_state->pll9 = PORT_PLL_LOCK_THRESHOLD(5);
2301
2302	dpll_hw_state->pll10 = PORT_PLL_DCO_AMP(15) |
2303		PORT_PLL_DCO_AMP_OVR_EN_H;
 
2304
2305	dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
2306
2307	dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
2308
2309	return 0;
2310}
2311
2312static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
2313				const struct intel_shared_dpll *pll,
2314				const struct intel_dpll_hw_state *pll_state)
2315{
2316	struct dpll clock;
2317
2318	clock.m1 = 2;
2319	clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK, pll_state->pll0) << 22;
2320	if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
2321		clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK, pll_state->pll2);
2322	clock.n = REG_FIELD_GET(PORT_PLL_N_MASK, pll_state->pll1);
2323	clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK, pll_state->ebb0);
2324	clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK, pll_state->ebb0);
2325
2326	return chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, &clock);
2327}
2328
2329static int
2330bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2331{
2332	struct dpll clk_div = {};
2333
2334	bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
2335
2336	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2337}
2338
2339static int
2340bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2341{
2342	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2343	struct dpll clk_div = {};
2344	int ret;
2345
2346	bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
2347
2348	ret = bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2349	if (ret)
2350		return ret;
2351
2352	crtc_state->port_clock = bxt_ddi_pll_get_freq(i915, NULL,
2353						      &crtc_state->dpll_hw_state);
2354
2355	return 0;
2356}
2357
2358static int bxt_compute_dpll(struct intel_atomic_state *state,
2359			    struct intel_crtc *crtc,
2360			    struct intel_encoder *encoder)
2361{
2362	struct intel_crtc_state *crtc_state =
2363		intel_atomic_get_new_crtc_state(state, crtc);
2364
2365	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
2366		return bxt_ddi_hdmi_set_dpll_hw_state(crtc_state);
2367	else if (intel_crtc_has_dp_encoder(crtc_state))
2368		return bxt_ddi_dp_set_dpll_hw_state(crtc_state);
2369	else
2370		return -EINVAL;
 
 
 
2371}
2372
2373static int bxt_get_dpll(struct intel_atomic_state *state,
2374			struct intel_crtc *crtc,
2375			struct intel_encoder *encoder)
2376{
2377	struct intel_crtc_state *crtc_state =
2378		intel_atomic_get_new_crtc_state(state, crtc);
2379	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2380	struct intel_shared_dpll *pll;
2381	enum intel_dpll_id id;
2382
 
 
 
 
 
 
 
 
2383	/* 1:1 mapping between ports and PLLs */
2384	id = (enum intel_dpll_id) encoder->port;
2385	pll = intel_get_shared_dpll_by_id(i915, id);
2386
2387	drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] using pre-allocated %s\n",
2388		    crtc->base.base.id, crtc->base.name, pll->info->name);
2389
2390	intel_reference_shared_dpll(state, crtc,
2391				    pll, &crtc_state->dpll_hw_state);
2392
2393	crtc_state->shared_dpll = pll;
2394
2395	return 0;
2396}
2397
2398static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915)
2399{
2400	i915->display.dpll.ref_clks.ssc = 100000;
2401	i915->display.dpll.ref_clks.nssc = 100000;
2402	/* DSI non-SSC ref 19.2MHz */
2403}
2404
2405static void bxt_dump_hw_state(struct drm_i915_private *i915,
2406			      const struct intel_dpll_hw_state *hw_state)
2407{
2408	drm_dbg_kms(&i915->drm, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
2409		    "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
2410		    "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
2411		    hw_state->ebb0,
2412		    hw_state->ebb4,
2413		    hw_state->pll0,
2414		    hw_state->pll1,
2415		    hw_state->pll2,
2416		    hw_state->pll3,
2417		    hw_state->pll6,
2418		    hw_state->pll8,
2419		    hw_state->pll9,
2420		    hw_state->pll10,
2421		    hw_state->pcsdw12);
2422}
2423
2424static bool bxt_compare_hw_state(const struct intel_dpll_hw_state *a,
2425				 const struct intel_dpll_hw_state *b)
2426{
2427	return a->ebb0 == b->ebb0 &&
2428		a->ebb4 == b->ebb4 &&
2429		a->pll0 == b->pll0 &&
2430		a->pll1 == b->pll1 &&
2431		a->pll2 == b->pll2 &&
2432		a->pll3 == b->pll3 &&
2433		a->pll6 == b->pll6 &&
2434		a->pll8 == b->pll8 &&
2435		a->pll10 == b->pll10 &&
2436		a->pcsdw12 == b->pcsdw12;
2437}
2438
2439static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
2440	.enable = bxt_ddi_pll_enable,
2441	.disable = bxt_ddi_pll_disable,
2442	.get_hw_state = bxt_ddi_pll_get_hw_state,
2443	.get_freq = bxt_ddi_pll_get_freq,
2444};
2445
2446static const struct dpll_info bxt_plls[] = {
2447	{ .name = "PORT PLL A", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL0, },
2448	{ .name = "PORT PLL B", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL1, },
2449	{ .name = "PORT PLL C", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL2, },
2450	{}
2451};
2452
2453static const struct intel_dpll_mgr bxt_pll_mgr = {
2454	.dpll_info = bxt_plls,
2455	.compute_dplls = bxt_compute_dpll,
2456	.get_dplls = bxt_get_dpll,
2457	.put_dplls = intel_put_dpll,
2458	.update_ref_clks = bxt_update_dpll_ref_clks,
2459	.dump_hw_state = bxt_dump_hw_state,
2460	.compare_hw_state = bxt_compare_hw_state,
2461};
2462
2463static void icl_wrpll_get_multipliers(int bestdiv, int *pdiv,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2464				      int *qdiv, int *kdiv)
2465{
2466	/* even dividers */
2467	if (bestdiv % 2 == 0) {
2468		if (bestdiv == 2) {
2469			*pdiv = 2;
2470			*qdiv = 1;
2471			*kdiv = 1;
2472		} else if (bestdiv % 4 == 0) {
2473			*pdiv = 2;
2474			*qdiv = bestdiv / 4;
2475			*kdiv = 2;
2476		} else if (bestdiv % 6 == 0) {
2477			*pdiv = 3;
2478			*qdiv = bestdiv / 6;
2479			*kdiv = 2;
2480		} else if (bestdiv % 5 == 0) {
2481			*pdiv = 5;
2482			*qdiv = bestdiv / 10;
2483			*kdiv = 2;
2484		} else if (bestdiv % 14 == 0) {
2485			*pdiv = 7;
2486			*qdiv = bestdiv / 14;
2487			*kdiv = 2;
2488		}
2489	} else {
2490		if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2491			*pdiv = bestdiv;
2492			*qdiv = 1;
2493			*kdiv = 1;
2494		} else { /* 9, 15, 21 */
2495			*pdiv = bestdiv / 3;
2496			*qdiv = 1;
2497			*kdiv = 3;
2498		}
2499	}
2500}
2501
2502static void icl_wrpll_params_populate(struct skl_wrpll_params *params,
2503				      u32 dco_freq, u32 ref_freq,
2504				      int pdiv, int qdiv, int kdiv)
2505{
2506	u32 dco;
2507
2508	switch (kdiv) {
2509	case 1:
2510		params->kdiv = 1;
2511		break;
2512	case 2:
2513		params->kdiv = 2;
2514		break;
2515	case 3:
2516		params->kdiv = 4;
2517		break;
2518	default:
2519		WARN(1, "Incorrect KDiv\n");
2520	}
2521
2522	switch (pdiv) {
2523	case 2:
2524		params->pdiv = 1;
2525		break;
2526	case 3:
2527		params->pdiv = 2;
2528		break;
2529	case 5:
2530		params->pdiv = 4;
2531		break;
2532	case 7:
2533		params->pdiv = 8;
2534		break;
2535	default:
2536		WARN(1, "Incorrect PDiv\n");
2537	}
2538
2539	WARN_ON(kdiv != 2 && qdiv != 1);
2540
2541	params->qdiv_ratio = qdiv;
2542	params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2543
2544	dco = div_u64((u64)dco_freq << 15, ref_freq);
2545
2546	params->dco_integer = dco >> 15;
2547	params->dco_fraction = dco & 0x7fff;
2548}
2549
2550/*
2551 * Display WA #22010492432: ehl, tgl, adl-s, adl-p
2552 * Program half of the nominal DCO divider fraction value.
2553 */
2554static bool
2555ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2556{
2557	return ((IS_ELKHARTLAKE(i915) &&
2558		 IS_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) ||
2559		 IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) &&
2560		 i915->display.dpll.ref_clks.nssc == 38400;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2561}
2562
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2563struct icl_combo_pll_params {
2564	int clock;
2565	struct skl_wrpll_params wrpll;
2566};
2567
2568/*
2569 * These values alrea already adjusted: they're the bits we write to the
2570 * registers, not the logical values.
2571 */
2572static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
2573	{ 540000,
2574	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [0]: 5.4 */
2575	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2576	{ 270000,
2577	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [1]: 2.7 */
2578	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2579	{ 162000,
2580	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [2]: 1.62 */
2581	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2582	{ 324000,
2583	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [3]: 3.24 */
2584	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2585	{ 216000,
2586	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2587	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2588	{ 432000,
2589	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2590	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2591	{ 648000,
2592	  { .dco_integer = 0x195, .dco_fraction = 0x0000,		/* [6]: 6.48 */
2593	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2594	{ 810000,
2595	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [7]: 8.1 */
2596	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2597};
2598
2599
2600/* Also used for 38.4 MHz values. */
2601static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
2602	{ 540000,
2603	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [0]: 5.4 */
2604	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2605	{ 270000,
2606	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [1]: 2.7 */
2607	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2608	{ 162000,
2609	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [2]: 1.62 */
2610	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2611	{ 324000,
2612	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [3]: 3.24 */
2613	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2614	{ 216000,
2615	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2616	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2617	{ 432000,
2618	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2619	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2620	{ 648000,
2621	  { .dco_integer = 0x1FA, .dco_fraction = 0x2000,		/* [6]: 6.48 */
2622	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2623	{ 810000,
2624	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [7]: 8.1 */
2625	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2626};
2627
2628static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
2629	.dco_integer = 0x151, .dco_fraction = 0x4000,
2630	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2631};
2632
2633static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
2634	.dco_integer = 0x1A5, .dco_fraction = 0x7000,
2635	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2636};
2637
2638static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = {
2639	.dco_integer = 0x54, .dco_fraction = 0x3000,
2640	/* the following params are unused */
2641	.pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
2642};
2643
2644static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
2645	.dco_integer = 0x43, .dco_fraction = 0x4000,
2646	/* the following params are unused */
2647};
2648
2649static int icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
2650				 struct skl_wrpll_params *pll_params)
 
 
 
 
 
 
 
 
 
 
2651{
2652	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2653	const struct icl_combo_pll_params *params =
2654		i915->display.dpll.ref_clks.nssc == 24000 ?
2655		icl_dp_combo_pll_24MHz_values :
2656		icl_dp_combo_pll_19_2MHz_values;
2657	int clock = crtc_state->port_clock;
2658	int i;
2659
2660	for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
2661		if (clock == params[i].clock) {
2662			*pll_params = params[i].wrpll;
2663			return 0;
2664		}
2665	}
2666
2667	MISSING_CASE(clock);
2668	return -EINVAL;
2669}
2670
2671static int icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
2672			    struct skl_wrpll_params *pll_params)
2673{
2674	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2675
2676	if (DISPLAY_VER(i915) >= 12) {
2677		switch (i915->display.dpll.ref_clks.nssc) {
2678		default:
2679			MISSING_CASE(i915->display.dpll.ref_clks.nssc);
2680			fallthrough;
2681		case 19200:
2682		case 38400:
2683			*pll_params = tgl_tbt_pll_19_2MHz_values;
2684			break;
2685		case 24000:
2686			*pll_params = tgl_tbt_pll_24MHz_values;
2687			break;
 
 
 
2688		}
2689	} else {
2690		switch (i915->display.dpll.ref_clks.nssc) {
2691		default:
2692			MISSING_CASE(i915->display.dpll.ref_clks.nssc);
2693			fallthrough;
2694		case 19200:
2695		case 38400:
2696			*pll_params = icl_tbt_pll_19_2MHz_values;
2697			break;
2698		case 24000:
2699			*pll_params = icl_tbt_pll_24MHz_values;
2700			break;
2701		}
2702	}
2703
2704	return 0;
2705}
2706
2707static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915,
2708				    const struct intel_shared_dpll *pll,
2709				    const struct intel_dpll_hw_state *pll_state)
2710{
2711	/*
2712	 * The PLL outputs multiple frequencies at the same time, selection is
2713	 * made at DDI clock mux level.
2714	 */
2715	drm_WARN_ON(&i915->drm, 1);
2716
2717	return 0;
2718}
2719
2720static int icl_wrpll_ref_clock(struct drm_i915_private *i915)
2721{
2722	int ref_clock = i915->display.dpll.ref_clks.nssc;
2723
2724	/*
2725	 * For ICL+, the spec states: if reference frequency is 38.4,
2726	 * use 19.2 because the DPLL automatically divides that by 2.
2727	 */
2728	if (ref_clock == 38400)
2729		ref_clock = 19200;
2730
2731	return ref_clock;
2732}
2733
2734static int
2735icl_calc_wrpll(struct intel_crtc_state *crtc_state,
2736	       struct skl_wrpll_params *wrpll_params)
2737{
2738	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2739	int ref_clock = icl_wrpll_ref_clock(i915);
2740	u32 afe_clock = crtc_state->port_clock * 5;
2741	u32 dco_min = 7998000;
2742	u32 dco_max = 10000000;
2743	u32 dco_mid = (dco_min + dco_max) / 2;
2744	static const int dividers[] = {  2,  4,  6,  8, 10, 12,  14,  16,
2745					 18, 20, 24, 28, 30, 32,  36,  40,
2746					 42, 44, 48, 50, 52, 54,  56,  60,
2747					 64, 66, 68, 70, 72, 76,  78,  80,
2748					 84, 88, 90, 92, 96, 98, 100, 102,
2749					  3,  5,  7,  9, 15, 21 };
2750	u32 dco, best_dco = 0, dco_centrality = 0;
2751	u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2752	int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2753
2754	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2755		dco = afe_clock * dividers[d];
2756
2757		if (dco <= dco_max && dco >= dco_min) {
2758			dco_centrality = abs(dco - dco_mid);
2759
2760			if (dco_centrality < best_dco_centrality) {
2761				best_dco_centrality = dco_centrality;
2762				best_div = dividers[d];
2763				best_dco = dco;
2764			}
2765		}
2766	}
2767
2768	if (best_div == 0)
2769		return -EINVAL;
2770
2771	icl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2772	icl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
2773				  pdiv, qdiv, kdiv);
2774
2775	return 0;
2776}
2777
2778static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
2779				      const struct intel_shared_dpll *pll,
2780				      const struct intel_dpll_hw_state *pll_state)
2781{
2782	int ref_clock = icl_wrpll_ref_clock(i915);
2783	u32 dco_fraction;
2784	u32 p0, p1, p2, dco_freq;
2785
2786	p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
2787	p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
2788
2789	if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
2790		p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
2791			DPLL_CFGCR1_QDIV_RATIO_SHIFT;
2792	else
2793		p1 = 1;
2794
2795	switch (p0) {
2796	case DPLL_CFGCR1_PDIV_2:
2797		p0 = 2;
2798		break;
2799	case DPLL_CFGCR1_PDIV_3:
2800		p0 = 3;
2801		break;
2802	case DPLL_CFGCR1_PDIV_5:
2803		p0 = 5;
2804		break;
2805	case DPLL_CFGCR1_PDIV_7:
2806		p0 = 7;
2807		break;
2808	}
2809
2810	switch (p2) {
2811	case DPLL_CFGCR1_KDIV_1:
2812		p2 = 1;
2813		break;
2814	case DPLL_CFGCR1_KDIV_2:
2815		p2 = 2;
2816		break;
2817	case DPLL_CFGCR1_KDIV_3:
2818		p2 = 3;
2819		break;
2820	}
2821
2822	dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
2823		   ref_clock;
2824
2825	dco_fraction = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
2826		       DPLL_CFGCR0_DCO_FRACTION_SHIFT;
2827
2828	if (ehl_combo_pll_div_frac_wa_needed(i915))
2829		dco_fraction *= 2;
2830
2831	dco_freq += (dco_fraction * ref_clock) / 0x8000;
2832
2833	if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
2834		return 0;
2835
2836	return dco_freq / (p0 * p1 * p2 * 5);
2837}
2838
2839static void icl_calc_dpll_state(struct drm_i915_private *i915,
2840				const struct skl_wrpll_params *pll_params,
2841				struct intel_dpll_hw_state *pll_state)
2842{
2843	u32 dco_fraction = pll_params->dco_fraction;
2844
2845	if (ehl_combo_pll_div_frac_wa_needed(i915))
2846		dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2);
2847
2848	pll_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) |
2849			    pll_params->dco_integer;
2850
2851	pll_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) |
2852			    DPLL_CFGCR1_QDIV_MODE(pll_params->qdiv_mode) |
2853			    DPLL_CFGCR1_KDIV(pll_params->kdiv) |
2854			    DPLL_CFGCR1_PDIV(pll_params->pdiv);
2855
2856	if (DISPLAY_VER(i915) >= 12)
2857		pll_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
2858	else
2859		pll_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
 
 
 
 
 
 
2860
2861	if (i915->display.vbt.override_afc_startup)
2862		pll_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(i915->display.vbt.override_afc_startup_val);
 
2863}
2864
2865static int icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
2866				    u32 *target_dco_khz,
2867				    struct intel_dpll_hw_state *state,
2868				    bool is_dkl)
2869{
2870	static const u8 div1_vals[] = { 7, 5, 3, 2 };
2871	u32 dco_min_freq, dco_max_freq;
 
2872	unsigned int i;
2873	int div2;
2874
2875	dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
2876	dco_max_freq = is_dp ? 8100000 : 10000000;
2877
2878	for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
2879		int div1 = div1_vals[i];
2880
2881		for (div2 = 10; div2 > 0; div2--) {
2882			int dco = div1 * div2 * clock_khz * 5;
2883			int a_divratio, tlinedrv, inputsel;
2884			u32 hsdiv;
2885
2886			if (dco < dco_min_freq || dco > dco_max_freq)
2887				continue;
2888
2889			if (div2 >= 2) {
2890				/*
2891				 * Note: a_divratio not matching TGL BSpec
2892				 * algorithm but matching hardcoded values and
2893				 * working on HW for DP alt-mode at least
2894				 */
2895				a_divratio = is_dp ? 10 : 5;
2896				tlinedrv = is_dkl ? 1 : 2;
2897			} else {
2898				a_divratio = 5;
2899				tlinedrv = 0;
2900			}
2901			inputsel = is_dp ? 0 : 1;
2902
2903			switch (div1) {
2904			default:
2905				MISSING_CASE(div1);
2906				fallthrough;
2907			case 2:
2908				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
2909				break;
2910			case 3:
2911				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
2912				break;
2913			case 5:
2914				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
2915				break;
2916			case 7:
2917				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
2918				break;
2919			}
2920
2921			*target_dco_khz = dco;
2922
2923			state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
2924
2925			state->mg_clktop2_coreclkctl1 =
2926				MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
2927
2928			state->mg_clktop2_hsclkctl =
2929				MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
2930				MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
2931				hsdiv |
2932				MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
2933
2934			return 0;
2935		}
2936	}
2937
2938	return -EINVAL;
2939}
2940
2941/*
2942 * The specification for this function uses real numbers, so the math had to be
2943 * adapted to integer-only calculation, that's why it looks so different.
2944 */
2945static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
2946				 struct intel_dpll_hw_state *pll_state)
2947{
2948	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2949	int refclk_khz = i915->display.dpll.ref_clks.nssc;
2950	int clock = crtc_state->port_clock;
2951	u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
2952	u32 iref_ndiv, iref_trim, iref_pulse_w;
2953	u32 prop_coeff, int_coeff;
2954	u32 tdc_targetcnt, feedfwgain;
2955	u64 ssc_stepsize, ssc_steplen, ssc_steplog;
2956	u64 tmp;
2957	bool use_ssc = false;
2958	bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
2959	bool is_dkl = DISPLAY_VER(i915) >= 12;
2960	int ret;
2961
2962	ret = icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
2963				       pll_state, is_dkl);
2964	if (ret)
2965		return ret;
 
 
 
 
2966
2967	m1div = 2;
2968	m2div_int = dco_khz / (refclk_khz * m1div);
2969	if (m2div_int > 255) {
2970		if (!is_dkl) {
2971			m1div = 4;
2972			m2div_int = dco_khz / (refclk_khz * m1div);
2973		}
2974
2975		if (m2div_int > 255)
2976			return -EINVAL;
 
 
 
 
2977	}
2978	m2div_rem = dco_khz % (refclk_khz * m1div);
2979
2980	tmp = (u64)m2div_rem * (1 << 22);
2981	do_div(tmp, refclk_khz * m1div);
2982	m2div_frac = tmp;
2983
2984	switch (refclk_khz) {
2985	case 19200:
2986		iref_ndiv = 1;
2987		iref_trim = 28;
2988		iref_pulse_w = 1;
2989		break;
2990	case 24000:
2991		iref_ndiv = 1;
2992		iref_trim = 25;
2993		iref_pulse_w = 2;
2994		break;
2995	case 38400:
2996		iref_ndiv = 2;
2997		iref_trim = 28;
2998		iref_pulse_w = 1;
2999		break;
3000	default:
3001		MISSING_CASE(refclk_khz);
3002		return -EINVAL;
3003	}
3004
3005	/*
3006	 * tdc_res = 0.000003
3007	 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
3008	 *
3009	 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
3010	 * was supposed to be a division, but we rearranged the operations of
3011	 * the formula to avoid early divisions so we don't multiply the
3012	 * rounding errors.
3013	 *
3014	 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
3015	 * we also rearrange to work with integers.
3016	 *
3017	 * The 0.5 transformed to 5 results in a multiplication by 10 and the
3018	 * last division by 10.
3019	 */
3020	tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
3021
3022	/*
3023	 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
3024	 * 32 bits. That's not a problem since we round the division down
3025	 * anyway.
3026	 */
3027	feedfwgain = (use_ssc || m2div_rem > 0) ?
3028		m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
3029
3030	if (dco_khz >= 9000000) {
3031		prop_coeff = 5;
3032		int_coeff = 10;
3033	} else {
3034		prop_coeff = 4;
3035		int_coeff = 8;
3036	}
3037
3038	if (use_ssc) {
3039		tmp = mul_u32_u32(dco_khz, 47 * 32);
3040		do_div(tmp, refclk_khz * m1div * 10000);
3041		ssc_stepsize = tmp;
3042
3043		tmp = mul_u32_u32(dco_khz, 1000);
3044		ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
3045	} else {
3046		ssc_stepsize = 0;
3047		ssc_steplen = 0;
3048	}
3049	ssc_steplog = 4;
3050
3051	/* write pll_state calculations */
3052	if (is_dkl) {
3053		pll_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
3054					 DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
3055					 DKL_PLL_DIV0_FBPREDIV(m1div) |
3056					 DKL_PLL_DIV0_FBDIV_INT(m2div_int);
3057		if (i915->display.vbt.override_afc_startup) {
3058			u8 val = i915->display.vbt.override_afc_startup_val;
3059
3060			pll_state->mg_pll_div0 |= DKL_PLL_DIV0_AFC_STARTUP(val);
3061		}
3062
3063		pll_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
3064					 DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
3065
3066		pll_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
3067					DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
3068					DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
3069					(use_ssc ? DKL_PLL_SSC_EN : 0);
3070
3071		pll_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
3072					  DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);
3073
3074		pll_state->mg_pll_tdc_coldst_bias =
3075				DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
3076				DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);
3077
3078	} else {
3079		pll_state->mg_pll_div0 =
3080			(m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
3081			MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
3082			MG_PLL_DIV0_FBDIV_INT(m2div_int);
3083
3084		pll_state->mg_pll_div1 =
3085			MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
3086			MG_PLL_DIV1_DITHER_DIV_2 |
3087			MG_PLL_DIV1_NDIVRATIO(1) |
3088			MG_PLL_DIV1_FBPREDIV(m1div);
3089
3090		pll_state->mg_pll_lf =
3091			MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
3092			MG_PLL_LF_AFCCNTSEL_512 |
3093			MG_PLL_LF_GAINCTRL(1) |
3094			MG_PLL_LF_INT_COEFF(int_coeff) |
3095			MG_PLL_LF_PROP_COEFF(prop_coeff);
3096
3097		pll_state->mg_pll_frac_lock =
3098			MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
3099			MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
3100			MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
3101			MG_PLL_FRAC_LOCK_DCODITHEREN |
3102			MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
3103		if (use_ssc || m2div_rem > 0)
3104			pll_state->mg_pll_frac_lock |=
3105				MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
3106
3107		pll_state->mg_pll_ssc =
3108			(use_ssc ? MG_PLL_SSC_EN : 0) |
3109			MG_PLL_SSC_TYPE(2) |
3110			MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
3111			MG_PLL_SSC_STEPNUM(ssc_steplog) |
3112			MG_PLL_SSC_FLLEN |
3113			MG_PLL_SSC_STEPSIZE(ssc_stepsize);
3114
3115		pll_state->mg_pll_tdc_coldst_bias =
3116			MG_PLL_TDC_COLDST_COLDSTART |
3117			MG_PLL_TDC_COLDST_IREFINT_EN |
3118			MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
3119			MG_PLL_TDC_TDCOVCCORR_EN |
3120			MG_PLL_TDC_TDCSEL(3);
3121
3122		pll_state->mg_pll_bias =
3123			MG_PLL_BIAS_BIAS_GB_SEL(3) |
3124			MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
3125			MG_PLL_BIAS_BIAS_BONUS(10) |
3126			MG_PLL_BIAS_BIASCAL_EN |
3127			MG_PLL_BIAS_CTRIM(12) |
3128			MG_PLL_BIAS_VREF_RDAC(4) |
3129			MG_PLL_BIAS_IREFTRIM(iref_trim);
3130
3131		if (refclk_khz == 38400) {
3132			pll_state->mg_pll_tdc_coldst_bias_mask =
3133				MG_PLL_TDC_COLDST_COLDSTART;
3134			pll_state->mg_pll_bias_mask = 0;
3135		} else {
3136			pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
3137			pll_state->mg_pll_bias_mask = -1U;
3138		}
3139
3140		pll_state->mg_pll_tdc_coldst_bias &=
3141			pll_state->mg_pll_tdc_coldst_bias_mask;
3142		pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
3143	}
3144
3145	return 0;
3146}
3147
3148static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *i915,
3149				   const struct intel_shared_dpll *pll,
3150				   const struct intel_dpll_hw_state *pll_state)
3151{
 
3152	u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
3153	u64 tmp;
3154
3155	ref_clock = i915->display.dpll.ref_clks.nssc;
3156
3157	if (DISPLAY_VER(i915) >= 12) {
3158		m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
3159		m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
3160		m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
3161
3162		if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
3163			m2_frac = pll_state->mg_pll_bias &
3164				  DKL_PLL_BIAS_FBDIV_FRAC_MASK;
3165			m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
3166		} else {
3167			m2_frac = 0;
3168		}
3169	} else {
3170		m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
3171		m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
3172
3173		if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
3174			m2_frac = pll_state->mg_pll_div0 &
3175				  MG_PLL_DIV0_FBDIV_FRAC_MASK;
3176			m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
3177		} else {
3178			m2_frac = 0;
3179		}
3180	}
3181
3182	switch (pll_state->mg_clktop2_hsclkctl &
3183		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
3184	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
3185		div1 = 2;
3186		break;
3187	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
3188		div1 = 3;
3189		break;
3190	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
3191		div1 = 5;
3192		break;
3193	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
3194		div1 = 7;
3195		break;
3196	default:
3197		MISSING_CASE(pll_state->mg_clktop2_hsclkctl);
3198		return 0;
3199	}
3200
3201	div2 = (pll_state->mg_clktop2_hsclkctl &
3202		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
3203		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
3204
3205	/* div2 value of 0 is same as 1 means no div */
3206	if (div2 == 0)
3207		div2 = 1;
3208
3209	/*
3210	 * Adjust the original formula to delay the division by 2^22 in order to
3211	 * minimize possible rounding errors.
3212	 */
3213	tmp = (u64)m1 * m2_int * ref_clock +
3214	      (((u64)m1 * m2_frac * ref_clock) >> 22);
3215	tmp = div_u64(tmp, 5 * div1 * div2);
3216
3217	return tmp;
3218}
3219
3220/**
3221 * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
3222 * @crtc_state: state for the CRTC to select the DPLL for
3223 * @port_dpll_id: the active @port_dpll_id to select
3224 *
3225 * Select the given @port_dpll_id instance from the DPLLs reserved for the
3226 * CRTC.
3227 */
3228void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
3229			      enum icl_port_dpll_id port_dpll_id)
3230{
3231	struct icl_port_dpll *port_dpll =
3232		&crtc_state->icl_port_dplls[port_dpll_id];
3233
3234	crtc_state->shared_dpll = port_dpll->pll;
3235	crtc_state->dpll_hw_state = port_dpll->hw_state;
3236}
3237
3238static void icl_update_active_dpll(struct intel_atomic_state *state,
3239				   struct intel_crtc *crtc,
3240				   struct intel_encoder *encoder)
3241{
3242	struct intel_crtc_state *crtc_state =
3243		intel_atomic_get_new_crtc_state(state, crtc);
3244	struct intel_digital_port *primary_port;
3245	enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
3246
3247	primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
3248		enc_to_mst(encoder)->primary :
3249		enc_to_dig_port(encoder);
3250
3251	if (primary_port &&
3252	    (intel_tc_port_in_dp_alt_mode(primary_port) ||
3253	     intel_tc_port_in_legacy_mode(primary_port)))
3254		port_dpll_id = ICL_PORT_DPLL_MG_PHY;
3255
3256	icl_set_active_port_dpll(crtc_state, port_dpll_id);
3257}
3258
3259static int icl_compute_combo_phy_dpll(struct intel_atomic_state *state,
3260				      struct intel_crtc *crtc)
 
3261{
3262	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
3263	struct intel_crtc_state *crtc_state =
3264		intel_atomic_get_new_crtc_state(state, crtc);
 
3265	struct icl_port_dpll *port_dpll =
3266		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3267	struct skl_wrpll_params pll_params = {};
 
 
3268	int ret;
3269
3270	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
3271	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
3272		ret = icl_calc_wrpll(crtc_state, &pll_params);
3273	else
3274		ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
3275
3276	if (ret)
3277		return ret;
3278
3279	icl_calc_dpll_state(i915, &pll_params, &port_dpll->hw_state);
3280
3281	/* this is mainly for the fastset check */
3282	icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
3283
3284	crtc_state->port_clock = icl_ddi_combo_pll_get_freq(i915, NULL,
3285							    &port_dpll->hw_state);
3286
3287	return 0;
3288}
3289
3290static int icl_get_combo_phy_dpll(struct intel_atomic_state *state,
3291				  struct intel_crtc *crtc,
3292				  struct intel_encoder *encoder)
3293{
3294	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
3295	struct intel_crtc_state *crtc_state =
3296		intel_atomic_get_new_crtc_state(state, crtc);
3297	struct icl_port_dpll *port_dpll =
3298		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3299	enum port port = encoder->port;
3300	unsigned long dpll_mask;
3301
3302	if (IS_ALDERLAKE_S(i915)) {
3303		dpll_mask =
3304			BIT(DPLL_ID_DG1_DPLL3) |
3305			BIT(DPLL_ID_DG1_DPLL2) |
3306			BIT(DPLL_ID_ICL_DPLL1) |
3307			BIT(DPLL_ID_ICL_DPLL0);
3308	} else if (IS_DG1(i915)) {
3309		if (port == PORT_D || port == PORT_E) {
3310			dpll_mask =
3311				BIT(DPLL_ID_DG1_DPLL2) |
3312				BIT(DPLL_ID_DG1_DPLL3);
3313		} else {
3314			dpll_mask =
3315				BIT(DPLL_ID_DG1_DPLL0) |
3316				BIT(DPLL_ID_DG1_DPLL1);
3317		}
3318	} else if (IS_ROCKETLAKE(i915)) {
3319		dpll_mask =
3320			BIT(DPLL_ID_EHL_DPLL4) |
3321			BIT(DPLL_ID_ICL_DPLL1) |
3322			BIT(DPLL_ID_ICL_DPLL0);
3323	} else if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
3324		   port != PORT_A) {
3325		dpll_mask =
3326			BIT(DPLL_ID_EHL_DPLL4) |
3327			BIT(DPLL_ID_ICL_DPLL1) |
3328			BIT(DPLL_ID_ICL_DPLL0);
3329	} else {
3330		dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0);
3331	}
3332
3333	/* Eliminate DPLLs from consideration if reserved by HTI */
3334	dpll_mask &= ~intel_hti_dpll_mask(i915);
3335
3336	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3337						&port_dpll->hw_state,
3338						dpll_mask);
3339	if (!port_dpll->pll)
3340		return -EINVAL;
 
 
 
 
3341
3342	intel_reference_shared_dpll(state, crtc,
3343				    port_dpll->pll, &port_dpll->hw_state);
3344
3345	icl_update_active_dpll(state, crtc, encoder);
3346
3347	return 0;
3348}
3349
3350static int icl_compute_tc_phy_dplls(struct intel_atomic_state *state,
3351				    struct intel_crtc *crtc)
 
3352{
3353	struct drm_i915_private *i915 = to_i915(state->base.dev);
3354	struct intel_crtc_state *crtc_state =
3355		intel_atomic_get_new_crtc_state(state, crtc);
3356	const struct intel_crtc_state *old_crtc_state =
3357		intel_atomic_get_old_crtc_state(state, crtc);
3358	struct icl_port_dpll *port_dpll =
3359		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3360	struct skl_wrpll_params pll_params = {};
3361	int ret;
3362
3363	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3364	ret = icl_calc_tbt_pll(crtc_state, &pll_params);
3365	if (ret)
3366		return ret;
3367
3368	icl_calc_dpll_state(i915, &pll_params, &port_dpll->hw_state);
3369
3370	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3371	ret = icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state);
3372	if (ret)
3373		return ret;
3374
3375	/* this is mainly for the fastset check */
3376	if (old_crtc_state->shared_dpll &&
3377	    old_crtc_state->shared_dpll->info->id == DPLL_ID_ICL_TBTPLL)
3378		icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
3379	else
3380		icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_MG_PHY);
3381
3382	crtc_state->port_clock = icl_ddi_mg_pll_get_freq(i915, NULL,
3383							 &port_dpll->hw_state);
3384
3385	return 0;
3386}
3387
3388static int icl_get_tc_phy_dplls(struct intel_atomic_state *state,
3389				struct intel_crtc *crtc,
3390				struct intel_encoder *encoder)
3391{
3392	struct drm_i915_private *i915 = to_i915(state->base.dev);
3393	struct intel_crtc_state *crtc_state =
3394		intel_atomic_get_new_crtc_state(state, crtc);
3395	struct icl_port_dpll *port_dpll =
3396		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3397	enum intel_dpll_id dpll_id;
3398	int ret;
3399
3400	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3401	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3402						&port_dpll->hw_state,
3403						BIT(DPLL_ID_ICL_TBTPLL));
3404	if (!port_dpll->pll)
3405		return -EINVAL;
 
 
3406	intel_reference_shared_dpll(state, crtc,
3407				    port_dpll->pll, &port_dpll->hw_state);
3408
3409
3410	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3411	dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(i915,
 
 
 
 
 
 
3412							 encoder->port));
3413	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3414						&port_dpll->hw_state,
3415						BIT(dpll_id));
3416	if (!port_dpll->pll) {
3417		ret = -EINVAL;
3418		goto err_unreference_tbt_pll;
3419	}
3420	intel_reference_shared_dpll(state, crtc,
3421				    port_dpll->pll, &port_dpll->hw_state);
3422
3423	icl_update_active_dpll(state, crtc, encoder);
3424
3425	return 0;
3426
3427err_unreference_tbt_pll:
3428	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3429	intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
3430
3431	return ret;
3432}
3433
3434static int icl_compute_dplls(struct intel_atomic_state *state,
3435			     struct intel_crtc *crtc,
3436			     struct intel_encoder *encoder)
3437{
3438	struct drm_i915_private *i915 = to_i915(state->base.dev);
3439	enum phy phy = intel_port_to_phy(i915, encoder->port);
3440
3441	if (intel_phy_is_combo(i915, phy))
3442		return icl_compute_combo_phy_dpll(state, crtc);
3443	else if (intel_phy_is_tc(i915, phy))
3444		return icl_compute_tc_phy_dplls(state, crtc);
3445
3446	MISSING_CASE(phy);
3447
3448	return 0;
3449}
3450
3451static int icl_get_dplls(struct intel_atomic_state *state,
3452			 struct intel_crtc *crtc,
3453			 struct intel_encoder *encoder)
3454{
3455	struct drm_i915_private *i915 = to_i915(state->base.dev);
3456	enum phy phy = intel_port_to_phy(i915, encoder->port);
3457
3458	if (intel_phy_is_combo(i915, phy))
3459		return icl_get_combo_phy_dpll(state, crtc, encoder);
3460	else if (intel_phy_is_tc(i915, phy))
3461		return icl_get_tc_phy_dplls(state, crtc, encoder);
3462
3463	MISSING_CASE(phy);
3464
3465	return -EINVAL;
3466}
3467
3468static void icl_put_dplls(struct intel_atomic_state *state,
3469			  struct intel_crtc *crtc)
3470{
3471	const struct intel_crtc_state *old_crtc_state =
3472		intel_atomic_get_old_crtc_state(state, crtc);
3473	struct intel_crtc_state *new_crtc_state =
3474		intel_atomic_get_new_crtc_state(state, crtc);
3475	enum icl_port_dpll_id id;
3476
3477	new_crtc_state->shared_dpll = NULL;
3478
3479	for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
3480		const struct icl_port_dpll *old_port_dpll =
3481			&old_crtc_state->icl_port_dplls[id];
3482		struct icl_port_dpll *new_port_dpll =
3483			&new_crtc_state->icl_port_dplls[id];
3484
3485		new_port_dpll->pll = NULL;
3486
3487		if (!old_port_dpll->pll)
3488			continue;
3489
3490		intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
3491	}
3492}
3493
3494static bool mg_pll_get_hw_state(struct drm_i915_private *i915,
3495				struct intel_shared_dpll *pll,
3496				struct intel_dpll_hw_state *hw_state)
3497{
3498	const enum intel_dpll_id id = pll->info->id;
3499	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3500	intel_wakeref_t wakeref;
3501	bool ret = false;
3502	u32 val;
3503
3504	i915_reg_t enable_reg = intel_tc_pll_enable_reg(i915, pll);
3505
3506	wakeref = intel_display_power_get_if_enabled(i915,
3507						     POWER_DOMAIN_DISPLAY_CORE);
3508	if (!wakeref)
3509		return false;
3510
3511	val = intel_de_read(i915, enable_reg);
3512	if (!(val & PLL_ENABLE))
3513		goto out;
3514
3515	hw_state->mg_refclkin_ctl = intel_de_read(i915,
3516						  MG_REFCLKIN_CTL(tc_port));
3517	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3518
3519	hw_state->mg_clktop2_coreclkctl1 =
3520		intel_de_read(i915, MG_CLKTOP2_CORECLKCTL1(tc_port));
3521	hw_state->mg_clktop2_coreclkctl1 &=
3522		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3523
3524	hw_state->mg_clktop2_hsclkctl =
3525		intel_de_read(i915, MG_CLKTOP2_HSCLKCTL(tc_port));
3526	hw_state->mg_clktop2_hsclkctl &=
3527		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3528		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3529		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3530		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3531
3532	hw_state->mg_pll_div0 = intel_de_read(i915, MG_PLL_DIV0(tc_port));
3533	hw_state->mg_pll_div1 = intel_de_read(i915, MG_PLL_DIV1(tc_port));
3534	hw_state->mg_pll_lf = intel_de_read(i915, MG_PLL_LF(tc_port));
3535	hw_state->mg_pll_frac_lock = intel_de_read(i915,
3536						   MG_PLL_FRAC_LOCK(tc_port));
3537	hw_state->mg_pll_ssc = intel_de_read(i915, MG_PLL_SSC(tc_port));
3538
3539	hw_state->mg_pll_bias = intel_de_read(i915, MG_PLL_BIAS(tc_port));
3540	hw_state->mg_pll_tdc_coldst_bias =
3541		intel_de_read(i915, MG_PLL_TDC_COLDST_BIAS(tc_port));
3542
3543	if (i915->display.dpll.ref_clks.nssc == 38400) {
3544		hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
3545		hw_state->mg_pll_bias_mask = 0;
3546	} else {
3547		hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3548		hw_state->mg_pll_bias_mask = -1U;
3549	}
3550
3551	hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
3552	hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3553
3554	ret = true;
3555out:
3556	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3557	return ret;
3558}
3559
3560static bool dkl_pll_get_hw_state(struct drm_i915_private *i915,
3561				 struct intel_shared_dpll *pll,
3562				 struct intel_dpll_hw_state *hw_state)
3563{
3564	const enum intel_dpll_id id = pll->info->id;
3565	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3566	intel_wakeref_t wakeref;
3567	bool ret = false;
3568	u32 val;
3569
3570	wakeref = intel_display_power_get_if_enabled(i915,
3571						     POWER_DOMAIN_DISPLAY_CORE);
3572	if (!wakeref)
3573		return false;
3574
3575	val = intel_de_read(i915, intel_tc_pll_enable_reg(i915, pll));
3576	if (!(val & PLL_ENABLE))
3577		goto out;
3578
3579	/*
3580	 * All registers read here have the same HIP_INDEX_REG even though
3581	 * they are on different building blocks
3582	 */
3583	hw_state->mg_refclkin_ctl = intel_dkl_phy_read(i915,
3584						       DKL_REFCLKIN_CTL(tc_port));
 
 
 
3585	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3586
3587	hw_state->mg_clktop2_hsclkctl =
3588		intel_dkl_phy_read(i915, DKL_CLKTOP2_HSCLKCTL(tc_port));
3589	hw_state->mg_clktop2_hsclkctl &=
3590		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3591		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3592		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3593		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3594
3595	hw_state->mg_clktop2_coreclkctl1 =
3596		intel_dkl_phy_read(i915, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3597	hw_state->mg_clktop2_coreclkctl1 &=
3598		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3599
3600	hw_state->mg_pll_div0 = intel_dkl_phy_read(i915, DKL_PLL_DIV0(tc_port));
3601	val = DKL_PLL_DIV0_MASK;
3602	if (i915->display.vbt.override_afc_startup)
3603		val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3604	hw_state->mg_pll_div0 &= val;
3605
3606	hw_state->mg_pll_div1 = intel_dkl_phy_read(i915, DKL_PLL_DIV1(tc_port));
3607	hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
3608				  DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3609
3610	hw_state->mg_pll_ssc = intel_dkl_phy_read(i915, DKL_PLL_SSC(tc_port));
3611	hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3612				 DKL_PLL_SSC_STEP_LEN_MASK |
3613				 DKL_PLL_SSC_STEP_NUM_MASK |
3614				 DKL_PLL_SSC_EN);
3615
3616	hw_state->mg_pll_bias = intel_dkl_phy_read(i915, DKL_PLL_BIAS(tc_port));
3617	hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
3618				  DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3619
3620	hw_state->mg_pll_tdc_coldst_bias =
3621		intel_dkl_phy_read(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3622	hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3623					     DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3624
3625	ret = true;
3626out:
3627	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3628	return ret;
3629}
3630
3631static bool icl_pll_get_hw_state(struct drm_i915_private *i915,
3632				 struct intel_shared_dpll *pll,
3633				 struct intel_dpll_hw_state *hw_state,
3634				 i915_reg_t enable_reg)
3635{
3636	const enum intel_dpll_id id = pll->info->id;
3637	intel_wakeref_t wakeref;
3638	bool ret = false;
3639	u32 val;
3640
3641	wakeref = intel_display_power_get_if_enabled(i915,
3642						     POWER_DOMAIN_DISPLAY_CORE);
3643	if (!wakeref)
3644		return false;
3645
3646	val = intel_de_read(i915, enable_reg);
3647	if (!(val & PLL_ENABLE))
3648		goto out;
3649
3650	if (IS_ALDERLAKE_S(i915)) {
3651		hw_state->cfgcr0 = intel_de_read(i915, ADLS_DPLL_CFGCR0(id));
3652		hw_state->cfgcr1 = intel_de_read(i915, ADLS_DPLL_CFGCR1(id));
3653	} else if (IS_DG1(i915)) {
3654		hw_state->cfgcr0 = intel_de_read(i915, DG1_DPLL_CFGCR0(id));
3655		hw_state->cfgcr1 = intel_de_read(i915, DG1_DPLL_CFGCR1(id));
3656	} else if (IS_ROCKETLAKE(i915)) {
3657		hw_state->cfgcr0 = intel_de_read(i915,
3658						 RKL_DPLL_CFGCR0(id));
3659		hw_state->cfgcr1 = intel_de_read(i915,
3660						 RKL_DPLL_CFGCR1(id));
3661	} else if (DISPLAY_VER(i915) >= 12) {
3662		hw_state->cfgcr0 = intel_de_read(i915,
3663						 TGL_DPLL_CFGCR0(id));
3664		hw_state->cfgcr1 = intel_de_read(i915,
3665						 TGL_DPLL_CFGCR1(id));
3666		if (i915->display.vbt.override_afc_startup) {
3667			hw_state->div0 = intel_de_read(i915, TGL_DPLL0_DIV0(id));
3668			hw_state->div0 &= TGL_DPLL0_DIV0_AFC_STARTUP_MASK;
3669		}
3670	} else {
3671		if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
3672		    id == DPLL_ID_EHL_DPLL4) {
3673			hw_state->cfgcr0 = intel_de_read(i915,
3674							 ICL_DPLL_CFGCR0(4));
3675			hw_state->cfgcr1 = intel_de_read(i915,
3676							 ICL_DPLL_CFGCR1(4));
3677		} else {
3678			hw_state->cfgcr0 = intel_de_read(i915,
3679							 ICL_DPLL_CFGCR0(id));
3680			hw_state->cfgcr1 = intel_de_read(i915,
3681							 ICL_DPLL_CFGCR1(id));
3682		}
3683	}
3684
3685	ret = true;
3686out:
3687	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3688	return ret;
3689}
3690
3691static bool combo_pll_get_hw_state(struct drm_i915_private *i915,
3692				   struct intel_shared_dpll *pll,
3693				   struct intel_dpll_hw_state *hw_state)
3694{
3695	i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll);
 
 
 
 
 
3696
3697	return icl_pll_get_hw_state(i915, pll, hw_state, enable_reg);
3698}
3699
3700static bool tbt_pll_get_hw_state(struct drm_i915_private *i915,
3701				 struct intel_shared_dpll *pll,
3702				 struct intel_dpll_hw_state *hw_state)
3703{
3704	return icl_pll_get_hw_state(i915, pll, hw_state, TBT_PLL_ENABLE);
3705}
3706
3707static void icl_dpll_write(struct drm_i915_private *i915,
3708			   struct intel_shared_dpll *pll)
3709{
3710	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3711	const enum intel_dpll_id id = pll->info->id;
3712	i915_reg_t cfgcr0_reg, cfgcr1_reg, div0_reg = INVALID_MMIO_REG;
3713
3714	if (IS_ALDERLAKE_S(i915)) {
3715		cfgcr0_reg = ADLS_DPLL_CFGCR0(id);
3716		cfgcr1_reg = ADLS_DPLL_CFGCR1(id);
3717	} else if (IS_DG1(i915)) {
3718		cfgcr0_reg = DG1_DPLL_CFGCR0(id);
3719		cfgcr1_reg = DG1_DPLL_CFGCR1(id);
3720	} else if (IS_ROCKETLAKE(i915)) {
3721		cfgcr0_reg = RKL_DPLL_CFGCR0(id);
3722		cfgcr1_reg = RKL_DPLL_CFGCR1(id);
3723	} else if (DISPLAY_VER(i915) >= 12) {
3724		cfgcr0_reg = TGL_DPLL_CFGCR0(id);
3725		cfgcr1_reg = TGL_DPLL_CFGCR1(id);
3726		div0_reg = TGL_DPLL0_DIV0(id);
3727	} else {
3728		if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
3729		    id == DPLL_ID_EHL_DPLL4) {
3730			cfgcr0_reg = ICL_DPLL_CFGCR0(4);
3731			cfgcr1_reg = ICL_DPLL_CFGCR1(4);
3732		} else {
3733			cfgcr0_reg = ICL_DPLL_CFGCR0(id);
3734			cfgcr1_reg = ICL_DPLL_CFGCR1(id);
3735		}
3736	}
3737
3738	intel_de_write(i915, cfgcr0_reg, hw_state->cfgcr0);
3739	intel_de_write(i915, cfgcr1_reg, hw_state->cfgcr1);
3740	drm_WARN_ON_ONCE(&i915->drm, i915->display.vbt.override_afc_startup &&
3741			 !i915_mmio_reg_valid(div0_reg));
3742	if (i915->display.vbt.override_afc_startup &&
3743	    i915_mmio_reg_valid(div0_reg))
3744		intel_de_rmw(i915, div0_reg,
3745			     TGL_DPLL0_DIV0_AFC_STARTUP_MASK, hw_state->div0);
3746	intel_de_posting_read(i915, cfgcr1_reg);
3747}
3748
3749static void icl_mg_pll_write(struct drm_i915_private *i915,
3750			     struct intel_shared_dpll *pll)
3751{
3752	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3753	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
 
3754
3755	/*
3756	 * Some of the following registers have reserved fields, so program
3757	 * these with RMW based on a mask. The mask can be fixed or generated
3758	 * during the calc/readout phase if the mask depends on some other HW
3759	 * state like refclk, see icl_calc_mg_pll_state().
3760	 */
3761	intel_de_rmw(i915, MG_REFCLKIN_CTL(tc_port),
3762		     MG_REFCLKIN_CTL_OD_2_MUX_MASK, hw_state->mg_refclkin_ctl);
 
 
3763
3764	intel_de_rmw(i915, MG_CLKTOP2_CORECLKCTL1(tc_port),
3765		     MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK,
3766		     hw_state->mg_clktop2_coreclkctl1);
3767
3768	intel_de_rmw(i915, MG_CLKTOP2_HSCLKCTL(tc_port),
3769		     MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3770		     MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3771		     MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3772		     MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK,
3773		     hw_state->mg_clktop2_hsclkctl);
3774
3775	intel_de_write(i915, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
3776	intel_de_write(i915, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
3777	intel_de_write(i915, MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
3778	intel_de_write(i915, MG_PLL_FRAC_LOCK(tc_port),
 
 
3779		       hw_state->mg_pll_frac_lock);
3780	intel_de_write(i915, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
3781
3782	intel_de_rmw(i915, MG_PLL_BIAS(tc_port),
3783		     hw_state->mg_pll_bias_mask, hw_state->mg_pll_bias);
 
 
3784
3785	intel_de_rmw(i915, MG_PLL_TDC_COLDST_BIAS(tc_port),
3786		     hw_state->mg_pll_tdc_coldst_bias_mask,
3787		     hw_state->mg_pll_tdc_coldst_bias);
 
3788
3789	intel_de_posting_read(i915, MG_PLL_TDC_COLDST_BIAS(tc_port));
3790}
3791
3792static void dkl_pll_write(struct drm_i915_private *i915,
3793			  struct intel_shared_dpll *pll)
3794{
3795	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3796	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3797	u32 val;
3798
3799	/*
3800	 * All registers programmed here have the same HIP_INDEX_REG even
3801	 * though on different building block
3802	 */
 
 
 
3803	/* All the registers are RMW */
3804	val = intel_dkl_phy_read(i915, DKL_REFCLKIN_CTL(tc_port));
3805	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3806	val |= hw_state->mg_refclkin_ctl;
3807	intel_dkl_phy_write(i915, DKL_REFCLKIN_CTL(tc_port), val);
3808
3809	val = intel_dkl_phy_read(i915, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3810	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3811	val |= hw_state->mg_clktop2_coreclkctl1;
3812	intel_dkl_phy_write(i915, DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
3813
3814	val = intel_dkl_phy_read(i915, DKL_CLKTOP2_HSCLKCTL(tc_port));
3815	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3816		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3817		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3818		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3819	val |= hw_state->mg_clktop2_hsclkctl;
3820	intel_dkl_phy_write(i915, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
3821
3822	val = DKL_PLL_DIV0_MASK;
3823	if (i915->display.vbt.override_afc_startup)
3824		val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3825	intel_dkl_phy_rmw(i915, DKL_PLL_DIV0(tc_port), val,
3826			  hw_state->mg_pll_div0);
 
 
3827
3828	val = intel_dkl_phy_read(i915, DKL_PLL_DIV1(tc_port));
3829	val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
3830		 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3831	val |= hw_state->mg_pll_div1;
3832	intel_dkl_phy_write(i915, DKL_PLL_DIV1(tc_port), val);
3833
3834	val = intel_dkl_phy_read(i915, DKL_PLL_SSC(tc_port));
3835	val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3836		 DKL_PLL_SSC_STEP_LEN_MASK |
3837		 DKL_PLL_SSC_STEP_NUM_MASK |
3838		 DKL_PLL_SSC_EN);
3839	val |= hw_state->mg_pll_ssc;
3840	intel_dkl_phy_write(i915, DKL_PLL_SSC(tc_port), val);
3841
3842	val = intel_dkl_phy_read(i915, DKL_PLL_BIAS(tc_port));
3843	val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
3844		 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3845	val |= hw_state->mg_pll_bias;
3846	intel_dkl_phy_write(i915, DKL_PLL_BIAS(tc_port), val);
3847
3848	val = intel_dkl_phy_read(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3849	val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3850		 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3851	val |= hw_state->mg_pll_tdc_coldst_bias;
3852	intel_dkl_phy_write(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
3853
3854	intel_dkl_phy_posting_read(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3855}
3856
3857static void icl_pll_power_enable(struct drm_i915_private *i915,
3858				 struct intel_shared_dpll *pll,
3859				 i915_reg_t enable_reg)
3860{
3861	intel_de_rmw(i915, enable_reg, 0, PLL_POWER_ENABLE);
 
 
 
 
3862
3863	/*
3864	 * The spec says we need to "wait" but it also says it should be
3865	 * immediate.
3866	 */
3867	if (intel_de_wait_for_set(i915, enable_reg, PLL_POWER_STATE, 1))
3868		drm_err(&i915->drm, "PLL %d Power not enabled\n",
3869			pll->info->id);
3870}
3871
3872static void icl_pll_enable(struct drm_i915_private *i915,
3873			   struct intel_shared_dpll *pll,
3874			   i915_reg_t enable_reg)
3875{
3876	intel_de_rmw(i915, enable_reg, 0, PLL_ENABLE);
 
 
 
 
3877
3878	/* Timeout is actually 600us. */
3879	if (intel_de_wait_for_set(i915, enable_reg, PLL_LOCK, 1))
3880		drm_err(&i915->drm, "PLL %d not locked\n", pll->info->id);
3881}
3882
3883static void adlp_cmtg_clock_gating_wa(struct drm_i915_private *i915, struct intel_shared_dpll *pll)
 
3884{
3885	u32 val;
3886
3887	if (!(IS_ALDERLAKE_P(i915) && IS_DISPLAY_STEP(i915, STEP_A0, STEP_B0)) ||
3888	    pll->info->id != DPLL_ID_ICL_DPLL0)
3889		return;
3890	/*
3891	 * Wa_16011069516:adl-p[a0]
3892	 *
3893	 * All CMTG regs are unreliable until CMTG clock gating is disabled,
3894	 * so we can only assume the default TRANS_CMTG_CHICKEN reg value and
3895	 * sanity check this assumption with a double read, which presumably
3896	 * returns the correct value even with clock gating on.
3897	 *
3898	 * Instead of the usual place for workarounds we apply this one here,
3899	 * since TRANS_CMTG_CHICKEN is only accessible while DPLL0 is enabled.
3900	 */
3901	val = intel_de_read(i915, TRANS_CMTG_CHICKEN);
3902	val = intel_de_rmw(i915, TRANS_CMTG_CHICKEN, ~0, DISABLE_DPT_CLK_GATING);
3903	if (drm_WARN_ON(&i915->drm, val & ~DISABLE_DPT_CLK_GATING))
3904		drm_dbg_kms(&i915->drm, "Unexpected flags in TRANS_CMTG_CHICKEN: %08x\n", val);
3905}
3906
3907static void combo_pll_enable(struct drm_i915_private *i915,
3908			     struct intel_shared_dpll *pll)
3909{
3910	i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll);
 
 
 
 
3911
3912	icl_pll_power_enable(i915, pll, enable_reg);
3913
3914	icl_dpll_write(i915, pll);
3915
3916	/*
3917	 * DVFS pre sequence would be here, but in our driver the cdclk code
3918	 * paths should already be setting the appropriate voltage, hence we do
3919	 * nothing here.
3920	 */
3921
3922	icl_pll_enable(i915, pll, enable_reg);
3923
3924	adlp_cmtg_clock_gating_wa(i915, pll);
3925
3926	/* DVFS post sequence would be here. See the comment above. */
3927}
3928
3929static void tbt_pll_enable(struct drm_i915_private *i915,
3930			   struct intel_shared_dpll *pll)
3931{
3932	icl_pll_power_enable(i915, pll, TBT_PLL_ENABLE);
3933
3934	icl_dpll_write(i915, pll);
3935
3936	/*
3937	 * DVFS pre sequence would be here, but in our driver the cdclk code
3938	 * paths should already be setting the appropriate voltage, hence we do
3939	 * nothing here.
3940	 */
3941
3942	icl_pll_enable(i915, pll, TBT_PLL_ENABLE);
3943
3944	/* DVFS post sequence would be here. See the comment above. */
3945}
3946
3947static void mg_pll_enable(struct drm_i915_private *i915,
3948			  struct intel_shared_dpll *pll)
3949{
3950	i915_reg_t enable_reg = intel_tc_pll_enable_reg(i915, pll);
 
3951
3952	icl_pll_power_enable(i915, pll, enable_reg);
3953
3954	if (DISPLAY_VER(i915) >= 12)
3955		dkl_pll_write(i915, pll);
3956	else
3957		icl_mg_pll_write(i915, pll);
3958
3959	/*
3960	 * DVFS pre sequence would be here, but in our driver the cdclk code
3961	 * paths should already be setting the appropriate voltage, hence we do
3962	 * nothing here.
3963	 */
3964
3965	icl_pll_enable(i915, pll, enable_reg);
3966
3967	/* DVFS post sequence would be here. See the comment above. */
3968}
3969
3970static void icl_pll_disable(struct drm_i915_private *i915,
3971			    struct intel_shared_dpll *pll,
3972			    i915_reg_t enable_reg)
3973{
 
 
3974	/* The first steps are done by intel_ddi_post_disable(). */
3975
3976	/*
3977	 * DVFS pre sequence would be here, but in our driver the cdclk code
3978	 * paths should already be setting the appropriate voltage, hence we do
3979	 * nothing here.
3980	 */
3981
3982	intel_de_rmw(i915, enable_reg, PLL_ENABLE, 0);
 
 
3983
3984	/* Timeout is actually 1us. */
3985	if (intel_de_wait_for_clear(i915, enable_reg, PLL_LOCK, 1))
3986		drm_err(&i915->drm, "PLL %d locked\n", pll->info->id);
3987
3988	/* DVFS post sequence would be here. See the comment above. */
3989
3990	intel_de_rmw(i915, enable_reg, PLL_POWER_ENABLE, 0);
 
 
3991
3992	/*
3993	 * The spec says we need to "wait" but it also says it should be
3994	 * immediate.
3995	 */
3996	if (intel_de_wait_for_clear(i915, enable_reg, PLL_POWER_STATE, 1))
3997		drm_err(&i915->drm, "PLL %d Power not disabled\n",
3998			pll->info->id);
3999}
4000
4001static void combo_pll_disable(struct drm_i915_private *i915,
4002			      struct intel_shared_dpll *pll)
4003{
4004	i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll);
 
 
 
 
 
 
 
 
 
 
4005
4006	icl_pll_disable(i915, pll, enable_reg);
4007}
4008
4009static void tbt_pll_disable(struct drm_i915_private *i915,
4010			    struct intel_shared_dpll *pll)
4011{
4012	icl_pll_disable(i915, pll, TBT_PLL_ENABLE);
4013}
4014
4015static void mg_pll_disable(struct drm_i915_private *i915,
4016			   struct intel_shared_dpll *pll)
4017{
4018	i915_reg_t enable_reg = intel_tc_pll_enable_reg(i915, pll);
 
4019
4020	icl_pll_disable(i915, pll, enable_reg);
4021}
4022
4023static void icl_update_dpll_ref_clks(struct drm_i915_private *i915)
4024{
4025	/* No SSC ref */
4026	i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
4027}
4028
4029static void icl_dump_hw_state(struct drm_i915_private *i915,
4030			      const struct intel_dpll_hw_state *hw_state)
4031{
4032	drm_dbg_kms(&i915->drm,
4033		    "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, div0: 0x%x, "
4034		    "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
4035		    "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
4036		    "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
4037		    "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
4038		    "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
4039		    hw_state->cfgcr0, hw_state->cfgcr1,
4040		    hw_state->div0,
4041		    hw_state->mg_refclkin_ctl,
4042		    hw_state->mg_clktop2_coreclkctl1,
4043		    hw_state->mg_clktop2_hsclkctl,
4044		    hw_state->mg_pll_div0,
4045		    hw_state->mg_pll_div1,
4046		    hw_state->mg_pll_lf,
4047		    hw_state->mg_pll_frac_lock,
4048		    hw_state->mg_pll_ssc,
4049		    hw_state->mg_pll_bias,
4050		    hw_state->mg_pll_tdc_coldst_bias);
4051}
4052
4053static bool icl_compare_hw_state(const struct intel_dpll_hw_state *a,
4054				 const struct intel_dpll_hw_state *b)
4055{
4056	/* FIXME split combo vs. mg more thoroughly */
4057	return a->cfgcr0 == b->cfgcr0 &&
4058		a->cfgcr1 == b->cfgcr1 &&
4059		a->div0 == b->div0 &&
4060		a->mg_refclkin_ctl == b->mg_refclkin_ctl &&
4061		a->mg_clktop2_coreclkctl1 == b->mg_clktop2_coreclkctl1 &&
4062		a->mg_clktop2_hsclkctl == b->mg_clktop2_hsclkctl &&
4063		a->mg_pll_div0 == b->mg_pll_div0 &&
4064		a->mg_pll_div1 == b->mg_pll_div1 &&
4065		a->mg_pll_lf == b->mg_pll_lf &&
4066		a->mg_pll_frac_lock == b->mg_pll_frac_lock &&
4067		a->mg_pll_ssc == b->mg_pll_ssc &&
4068		a->mg_pll_bias == b->mg_pll_bias &&
4069		a->mg_pll_tdc_coldst_bias == b->mg_pll_tdc_coldst_bias;
4070}
4071
4072static const struct intel_shared_dpll_funcs combo_pll_funcs = {
4073	.enable = combo_pll_enable,
4074	.disable = combo_pll_disable,
4075	.get_hw_state = combo_pll_get_hw_state,
4076	.get_freq = icl_ddi_combo_pll_get_freq,
4077};
4078
4079static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
4080	.enable = tbt_pll_enable,
4081	.disable = tbt_pll_disable,
4082	.get_hw_state = tbt_pll_get_hw_state,
4083	.get_freq = icl_ddi_tbt_pll_get_freq,
4084};
4085
4086static const struct intel_shared_dpll_funcs mg_pll_funcs = {
4087	.enable = mg_pll_enable,
4088	.disable = mg_pll_disable,
4089	.get_hw_state = mg_pll_get_hw_state,
4090	.get_freq = icl_ddi_mg_pll_get_freq,
4091};
4092
4093static const struct dpll_info icl_plls[] = {
4094	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4095	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4096	{ .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
4097	  .is_alt_port_dpll = true, },
4098	{ .name = "MG PLL 1", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
4099	{ .name = "MG PLL 2", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
4100	{ .name = "MG PLL 3", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
4101	{ .name = "MG PLL 4", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
4102	{}
4103};
4104
4105static const struct intel_dpll_mgr icl_pll_mgr = {
4106	.dpll_info = icl_plls,
4107	.compute_dplls = icl_compute_dplls,
4108	.get_dplls = icl_get_dplls,
4109	.put_dplls = icl_put_dplls,
4110	.update_active_dpll = icl_update_active_dpll,
4111	.update_ref_clks = icl_update_dpll_ref_clks,
4112	.dump_hw_state = icl_dump_hw_state,
4113	.compare_hw_state = icl_compare_hw_state,
4114};
4115
4116static const struct dpll_info ehl_plls[] = {
4117	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4118	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4119	{ .name = "DPLL 4", .funcs = &combo_pll_funcs, .id = DPLL_ID_EHL_DPLL4,
4120	  .power_domain = POWER_DOMAIN_DC_OFF, },
4121	{}
4122};
4123
4124static const struct intel_dpll_mgr ehl_pll_mgr = {
4125	.dpll_info = ehl_plls,
4126	.compute_dplls = icl_compute_dplls,
4127	.get_dplls = icl_get_dplls,
4128	.put_dplls = icl_put_dplls,
4129	.update_ref_clks = icl_update_dpll_ref_clks,
4130	.dump_hw_state = icl_dump_hw_state,
4131	.compare_hw_state = icl_compare_hw_state,
4132};
4133
4134static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
4135	.enable = mg_pll_enable,
4136	.disable = mg_pll_disable,
4137	.get_hw_state = dkl_pll_get_hw_state,
4138	.get_freq = icl_ddi_mg_pll_get_freq,
4139};
4140
4141static const struct dpll_info tgl_plls[] = {
4142	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4143	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4144	{ .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
4145	  .is_alt_port_dpll = true, },
4146	{ .name = "TC PLL 1", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
4147	{ .name = "TC PLL 2", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
4148	{ .name = "TC PLL 3", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
4149	{ .name = "TC PLL 4", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
4150	{ .name = "TC PLL 5", .funcs = &dkl_pll_funcs, .id = DPLL_ID_TGL_MGPLL5, },
4151	{ .name = "TC PLL 6", .funcs = &dkl_pll_funcs, .id = DPLL_ID_TGL_MGPLL6, },
4152	{}
4153};
4154
4155static const struct intel_dpll_mgr tgl_pll_mgr = {
4156	.dpll_info = tgl_plls,
4157	.compute_dplls = icl_compute_dplls,
4158	.get_dplls = icl_get_dplls,
4159	.put_dplls = icl_put_dplls,
4160	.update_active_dpll = icl_update_active_dpll,
4161	.update_ref_clks = icl_update_dpll_ref_clks,
4162	.dump_hw_state = icl_dump_hw_state,
4163	.compare_hw_state = icl_compare_hw_state,
4164};
4165
4166static const struct dpll_info rkl_plls[] = {
4167	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4168	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4169	{ .name = "DPLL 4", .funcs = &combo_pll_funcs, .id = DPLL_ID_EHL_DPLL4, },
4170	{}
4171};
4172
4173static const struct intel_dpll_mgr rkl_pll_mgr = {
4174	.dpll_info = rkl_plls,
4175	.compute_dplls = icl_compute_dplls,
4176	.get_dplls = icl_get_dplls,
4177	.put_dplls = icl_put_dplls,
4178	.update_ref_clks = icl_update_dpll_ref_clks,
4179	.dump_hw_state = icl_dump_hw_state,
4180	.compare_hw_state = icl_compare_hw_state,
4181};
4182
4183static const struct dpll_info dg1_plls[] = {
4184	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL0, },
4185	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL1, },
4186	{ .name = "DPLL 2", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL2, },
4187	{ .name = "DPLL 3", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL3, },
4188	{}
4189};
4190
4191static const struct intel_dpll_mgr dg1_pll_mgr = {
4192	.dpll_info = dg1_plls,
4193	.compute_dplls = icl_compute_dplls,
4194	.get_dplls = icl_get_dplls,
4195	.put_dplls = icl_put_dplls,
4196	.update_ref_clks = icl_update_dpll_ref_clks,
4197	.dump_hw_state = icl_dump_hw_state,
4198	.compare_hw_state = icl_compare_hw_state,
4199};
4200
4201static const struct dpll_info adls_plls[] = {
4202	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4203	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4204	{ .name = "DPLL 2", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL2, },
4205	{ .name = "DPLL 3", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL3, },
4206	{}
4207};
4208
4209static const struct intel_dpll_mgr adls_pll_mgr = {
4210	.dpll_info = adls_plls,
4211	.compute_dplls = icl_compute_dplls,
4212	.get_dplls = icl_get_dplls,
4213	.put_dplls = icl_put_dplls,
4214	.update_ref_clks = icl_update_dpll_ref_clks,
4215	.dump_hw_state = icl_dump_hw_state,
4216	.compare_hw_state = icl_compare_hw_state,
4217};
4218
4219static const struct dpll_info adlp_plls[] = {
4220	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4221	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4222	{ .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
4223	  .is_alt_port_dpll = true, },
4224	{ .name = "TC PLL 1", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
4225	{ .name = "TC PLL 2", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
4226	{ .name = "TC PLL 3", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
4227	{ .name = "TC PLL 4", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
4228	{}
4229};
4230
4231static const struct intel_dpll_mgr adlp_pll_mgr = {
4232	.dpll_info = adlp_plls,
4233	.compute_dplls = icl_compute_dplls,
4234	.get_dplls = icl_get_dplls,
4235	.put_dplls = icl_put_dplls,
4236	.update_active_dpll = icl_update_active_dpll,
4237	.update_ref_clks = icl_update_dpll_ref_clks,
4238	.dump_hw_state = icl_dump_hw_state,
4239	.compare_hw_state = icl_compare_hw_state,
4240};
4241
4242/**
4243 * intel_shared_dpll_init - Initialize shared DPLLs
4244 * @i915: i915 device
4245 *
4246 * Initialize shared DPLLs for @i915.
4247 */
4248void intel_shared_dpll_init(struct drm_i915_private *i915)
4249{
 
4250	const struct intel_dpll_mgr *dpll_mgr = NULL;
4251	const struct dpll_info *dpll_info;
4252	int i;
4253
4254	mutex_init(&i915->display.dpll.lock);
4255
4256	if (DISPLAY_VER(i915) >= 14 || IS_DG2(i915))
4257		/* No shared DPLLs on DG2; port PLLs are part of the PHY */
4258		dpll_mgr = NULL;
4259	else if (IS_ALDERLAKE_P(i915))
4260		dpll_mgr = &adlp_pll_mgr;
4261	else if (IS_ALDERLAKE_S(i915))
4262		dpll_mgr = &adls_pll_mgr;
4263	else if (IS_DG1(i915))
4264		dpll_mgr = &dg1_pll_mgr;
4265	else if (IS_ROCKETLAKE(i915))
4266		dpll_mgr = &rkl_pll_mgr;
4267	else if (DISPLAY_VER(i915) >= 12)
4268		dpll_mgr = &tgl_pll_mgr;
4269	else if (IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915))
4270		dpll_mgr = &ehl_pll_mgr;
4271	else if (DISPLAY_VER(i915) >= 11)
4272		dpll_mgr = &icl_pll_mgr;
4273	else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
4274		dpll_mgr = &bxt_pll_mgr;
4275	else if (DISPLAY_VER(i915) == 9)
4276		dpll_mgr = &skl_pll_mgr;
4277	else if (HAS_DDI(i915))
 
 
4278		dpll_mgr = &hsw_pll_mgr;
4279	else if (HAS_PCH_IBX(i915) || HAS_PCH_CPT(i915))
4280		dpll_mgr = &pch_pll_mgr;
4281
4282	if (!dpll_mgr)
 
4283		return;
 
4284
4285	dpll_info = dpll_mgr->dpll_info;
4286
4287	for (i = 0; dpll_info[i].name; i++) {
4288		if (drm_WARN_ON(&i915->drm,
4289				i >= ARRAY_SIZE(i915->display.dpll.shared_dplls)))
4290			break;
4291
4292		/* must fit into unsigned long bitmask on 32bit */
4293		if (drm_WARN_ON(&i915->drm, dpll_info[i].id >= 32))
4294			break;
4295
4296		i915->display.dpll.shared_dplls[i].info = &dpll_info[i];
4297		i915->display.dpll.shared_dplls[i].index = i;
4298	}
4299
4300	i915->display.dpll.mgr = dpll_mgr;
4301	i915->display.dpll.num_shared_dpll = i;
4302}
4303
4304/**
4305 * intel_compute_shared_dplls - compute DPLL state CRTC and encoder combination
4306 * @state: atomic state
4307 * @crtc: CRTC to compute DPLLs for
4308 * @encoder: encoder
4309 *
4310 * This function computes the DPLL state for the given CRTC and encoder.
4311 *
4312 * The new configuration in the atomic commit @state is made effective by
4313 * calling intel_shared_dpll_swap_state().
4314 *
4315 * Returns:
4316 * 0 on success, negative error code on falure.
4317 */
4318int intel_compute_shared_dplls(struct intel_atomic_state *state,
4319			       struct intel_crtc *crtc,
4320			       struct intel_encoder *encoder)
4321{
4322	struct drm_i915_private *i915 = to_i915(state->base.dev);
4323	const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr;
4324
4325	if (drm_WARN_ON(&i915->drm, !dpll_mgr))
4326		return -EINVAL;
4327
4328	return dpll_mgr->compute_dplls(state, crtc, encoder);
4329}
4330
4331/**
4332 * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
4333 * @state: atomic state
4334 * @crtc: CRTC to reserve DPLLs for
4335 * @encoder: encoder
4336 *
4337 * This function reserves all required DPLLs for the given CRTC and encoder
4338 * combination in the current atomic commit @state and the new @crtc atomic
4339 * state.
4340 *
4341 * The new configuration in the atomic commit @state is made effective by
4342 * calling intel_shared_dpll_swap_state().
4343 *
4344 * The reserved DPLLs should be released by calling
4345 * intel_release_shared_dplls().
4346 *
4347 * Returns:
4348 * 0 if all required DPLLs were successfully reserved,
4349 * negative error code otherwise.
4350 */
4351int intel_reserve_shared_dplls(struct intel_atomic_state *state,
4352			       struct intel_crtc *crtc,
4353			       struct intel_encoder *encoder)
4354{
4355	struct drm_i915_private *i915 = to_i915(state->base.dev);
4356	const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr;
4357
4358	if (drm_WARN_ON(&i915->drm, !dpll_mgr))
4359		return -EINVAL;
4360
4361	return dpll_mgr->get_dplls(state, crtc, encoder);
4362}
4363
4364/**
4365 * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
4366 * @state: atomic state
4367 * @crtc: crtc from which the DPLLs are to be released
4368 *
4369 * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
4370 * from the current atomic commit @state and the old @crtc atomic state.
4371 *
4372 * The new configuration in the atomic commit @state is made effective by
4373 * calling intel_shared_dpll_swap_state().
4374 */
4375void intel_release_shared_dplls(struct intel_atomic_state *state,
4376				struct intel_crtc *crtc)
4377{
4378	struct drm_i915_private *i915 = to_i915(state->base.dev);
4379	const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr;
4380
4381	/*
4382	 * FIXME: this function is called for every platform having a
4383	 * compute_clock hook, even though the platform doesn't yet support
4384	 * the shared DPLL framework and intel_reserve_shared_dplls() is not
4385	 * called on those.
4386	 */
4387	if (!dpll_mgr)
4388		return;
4389
4390	dpll_mgr->put_dplls(state, crtc);
4391}
4392
4393/**
4394 * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
4395 * @state: atomic state
4396 * @crtc: the CRTC for which to update the active DPLL
4397 * @encoder: encoder determining the type of port DPLL
4398 *
4399 * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
4400 * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
4401 * DPLL selected will be based on the current mode of the encoder's port.
4402 */
4403void intel_update_active_dpll(struct intel_atomic_state *state,
4404			      struct intel_crtc *crtc,
4405			      struct intel_encoder *encoder)
4406{
4407	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
4408	const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr;
4409
4410	if (drm_WARN_ON(&i915->drm, !dpll_mgr))
4411		return;
4412
4413	dpll_mgr->update_active_dpll(state, crtc, encoder);
4414}
4415
4416/**
4417 * intel_dpll_get_freq - calculate the DPLL's output frequency
4418 * @i915: i915 device
4419 * @pll: DPLL for which to calculate the output frequency
4420 * @pll_state: DPLL state from which to calculate the output frequency
4421 *
4422 * Return the output frequency corresponding to @pll's passed in @pll_state.
4423 */
4424int intel_dpll_get_freq(struct drm_i915_private *i915,
4425			const struct intel_shared_dpll *pll,
4426			const struct intel_dpll_hw_state *pll_state)
4427{
4428	if (drm_WARN_ON(&i915->drm, !pll->info->funcs->get_freq))
4429		return 0;
4430
4431	return pll->info->funcs->get_freq(i915, pll, pll_state);
4432}
4433
4434/**
4435 * intel_dpll_get_hw_state - readout the DPLL's hardware state
4436 * @i915: i915 device
4437 * @pll: DPLL for which to calculate the output frequency
4438 * @hw_state: DPLL's hardware state
4439 *
4440 * Read out @pll's hardware state into @hw_state.
4441 */
4442bool intel_dpll_get_hw_state(struct drm_i915_private *i915,
4443			     struct intel_shared_dpll *pll,
4444			     struct intel_dpll_hw_state *hw_state)
4445{
4446	return pll->info->funcs->get_hw_state(i915, pll, hw_state);
4447}
4448
4449static void readout_dpll_hw_state(struct drm_i915_private *i915,
4450				  struct intel_shared_dpll *pll)
4451{
4452	struct intel_crtc *crtc;
4453
4454	pll->on = intel_dpll_get_hw_state(i915, pll, &pll->state.hw_state);
 
4455
4456	if (pll->on && pll->info->power_domain)
4457		pll->wakeref = intel_display_power_get(i915, pll->info->power_domain);
 
 
 
4458
4459	pll->state.pipe_mask = 0;
4460	for_each_intel_crtc(&i915->drm, crtc) {
4461		struct intel_crtc_state *crtc_state =
4462			to_intel_crtc_state(crtc->base.state);
4463
4464		if (crtc_state->hw.active && crtc_state->shared_dpll == pll)
4465			intel_reference_shared_dpll_crtc(crtc, pll, &pll->state);
4466	}
4467	pll->active_mask = pll->state.pipe_mask;
4468
4469	drm_dbg_kms(&i915->drm,
4470		    "%s hw state readout: pipe_mask 0x%x, on %i\n",
4471		    pll->info->name, pll->state.pipe_mask, pll->on);
4472}
4473
4474void intel_dpll_update_ref_clks(struct drm_i915_private *i915)
4475{
4476	if (i915->display.dpll.mgr && i915->display.dpll.mgr->update_ref_clks)
4477		i915->display.dpll.mgr->update_ref_clks(i915);
4478}
4479
4480void intel_dpll_readout_hw_state(struct drm_i915_private *i915)
4481{
4482	struct intel_shared_dpll *pll;
4483	int i;
4484
4485	for_each_shared_dpll(i915, pll, i)
4486		readout_dpll_hw_state(i915, pll);
 
 
 
4487}
4488
4489static void sanitize_dpll_state(struct drm_i915_private *i915,
4490				struct intel_shared_dpll *pll)
4491{
4492	if (!pll->on)
4493		return;
4494
4495	adlp_cmtg_clock_gating_wa(i915, pll);
4496
4497	if (pll->active_mask)
4498		return;
4499
4500	drm_dbg_kms(&i915->drm,
4501		    "%s enabled but not in use, disabling\n",
4502		    pll->info->name);
4503
4504	_intel_disable_shared_dpll(i915, pll);
 
4505}
4506
4507void intel_dpll_sanitize_state(struct drm_i915_private *i915)
4508{
4509	struct intel_shared_dpll *pll;
4510	int i;
4511
4512	for_each_shared_dpll(i915, pll, i)
4513		sanitize_dpll_state(i915, pll);
4514}
4515
4516/**
4517 * intel_dpll_dump_hw_state - write hw_state to dmesg
4518 * @i915: i915 drm device
4519 * @hw_state: hw state to be written to the log
4520 *
4521 * Write the relevant values in @hw_state to dmesg using drm_dbg_kms.
4522 */
4523void intel_dpll_dump_hw_state(struct drm_i915_private *i915,
4524			      const struct intel_dpll_hw_state *hw_state)
4525{
4526	if (i915->display.dpll.mgr) {
4527		i915->display.dpll.mgr->dump_hw_state(i915, hw_state);
4528	} else {
4529		/* fallback for platforms that don't use the shared dpll
4530		 * infrastructure
4531		 */
4532		ibx_dump_hw_state(i915, hw_state);
 
 
 
 
 
 
4533	}
4534}
4535
4536/**
4537 * intel_dpll_compare_hw_state - compare the two states
4538 * @i915: i915 drm device
4539 * @a: first DPLL hw state
4540 * @b: second DPLL hw state
4541 *
4542 * Compare DPLL hw states @a and @b.
4543 *
4544 * Returns: true if the states are equal, false if the differ
4545 */
4546bool intel_dpll_compare_hw_state(struct drm_i915_private *i915,
4547				 const struct intel_dpll_hw_state *a,
4548				 const struct intel_dpll_hw_state *b)
4549{
4550	if (i915->display.dpll.mgr) {
4551		return i915->display.dpll.mgr->compare_hw_state(a, b);
4552	} else {
4553		/* fallback for platforms that don't use the shared dpll
4554		 * infrastructure
4555		 */
4556		return ibx_compare_hw_state(a, b);
4557	}
4558}
4559
4560static void
4561verify_single_dpll_state(struct drm_i915_private *i915,
4562			 struct intel_shared_dpll *pll,
4563			 struct intel_crtc *crtc,
4564			 const struct intel_crtc_state *new_crtc_state)
4565{
4566	struct intel_dpll_hw_state dpll_hw_state = {};
4567	u8 pipe_mask;
4568	bool active;
4569
4570	active = intel_dpll_get_hw_state(i915, pll, &dpll_hw_state);
4571
4572	if (!pll->info->always_on) {
4573		I915_STATE_WARN(i915, !pll->on && pll->active_mask,
4574				"%s: pll in active use but not on in sw tracking\n",
4575				pll->info->name);
4576		I915_STATE_WARN(i915, pll->on && !pll->active_mask,
4577				"%s: pll is on but not used by any active pipe\n",
4578				pll->info->name);
4579		I915_STATE_WARN(i915, pll->on != active,
4580				"%s: pll on state mismatch (expected %i, found %i)\n",
4581				pll->info->name, pll->on, active);
4582	}
4583
4584	if (!crtc) {
4585		I915_STATE_WARN(i915,
4586				pll->active_mask & ~pll->state.pipe_mask,
4587				"%s: more active pll users than references: 0x%x vs 0x%x\n",
4588				pll->info->name, pll->active_mask, pll->state.pipe_mask);
4589
4590		return;
4591	}
4592
4593	pipe_mask = BIT(crtc->pipe);
4594
4595	if (new_crtc_state->hw.active)
4596		I915_STATE_WARN(i915, !(pll->active_mask & pipe_mask),
4597				"%s: pll active mismatch (expected pipe %c in active mask 0x%x)\n",
4598				pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
4599	else
4600		I915_STATE_WARN(i915, pll->active_mask & pipe_mask,
4601				"%s: pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
4602				pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
4603
4604	I915_STATE_WARN(i915, !(pll->state.pipe_mask & pipe_mask),
4605			"%s: pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
4606			pll->info->name, pipe_mask, pll->state.pipe_mask);
4607
4608	I915_STATE_WARN(i915,
4609			pll->on && memcmp(&pll->state.hw_state, &dpll_hw_state,
4610					  sizeof(dpll_hw_state)),
4611			"%s: pll hw state mismatch\n",
4612			pll->info->name);
4613}
4614
4615static bool has_alt_port_dpll(const struct intel_shared_dpll *old_pll,
4616			      const struct intel_shared_dpll *new_pll)
4617{
4618	return old_pll && new_pll && old_pll != new_pll &&
4619		(old_pll->info->is_alt_port_dpll || new_pll->info->is_alt_port_dpll);
4620}
4621
4622void intel_shared_dpll_state_verify(struct intel_atomic_state *state,
4623				    struct intel_crtc *crtc)
4624{
4625	struct drm_i915_private *i915 = to_i915(state->base.dev);
4626	const struct intel_crtc_state *old_crtc_state =
4627		intel_atomic_get_old_crtc_state(state, crtc);
4628	const struct intel_crtc_state *new_crtc_state =
4629		intel_atomic_get_new_crtc_state(state, crtc);
4630
4631	if (new_crtc_state->shared_dpll)
4632		verify_single_dpll_state(i915, new_crtc_state->shared_dpll,
4633					 crtc, new_crtc_state);
4634
4635	if (old_crtc_state->shared_dpll &&
4636	    old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
4637		u8 pipe_mask = BIT(crtc->pipe);
4638		struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
4639
4640		I915_STATE_WARN(i915, pll->active_mask & pipe_mask,
4641				"%s: pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
4642				pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
4643
4644		/* TC ports have both MG/TC and TBT PLL referenced simultaneously */
4645		I915_STATE_WARN(i915, !has_alt_port_dpll(old_crtc_state->shared_dpll,
4646							 new_crtc_state->shared_dpll) &&
4647				pll->state.pipe_mask & pipe_mask,
4648				"%s: pll enabled crtcs mismatch (found pipe %c in enabled mask (0x%x))\n",
4649				pll->info->name, pipe_name(crtc->pipe), pll->state.pipe_mask);
4650	}
4651}
4652
4653void intel_shared_dpll_verify_disabled(struct intel_atomic_state *state)
4654{
4655	struct drm_i915_private *i915 = to_i915(state->base.dev);
4656	struct intel_shared_dpll *pll;
4657	int i;
4658
4659	for_each_shared_dpll(i915, pll, i)
4660		verify_single_dpll_state(i915, pll, NULL, NULL);
4661}
v5.9
   1/*
   2 * Copyright © 2006-2016 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  21 * DEALINGS IN THE SOFTWARE.
  22 */
  23
 
 
 
 
 
  24#include "intel_display_types.h"
 
 
  25#include "intel_dpio_phy.h"
 
  26#include "intel_dpll_mgr.h"
 
 
 
 
  27
  28/**
  29 * DOC: Display PLLs
  30 *
  31 * Display PLLs used for driving outputs vary by platform. While some have
  32 * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
  33 * from a pool. In the latter scenario, it is possible that multiple pipes
  34 * share a PLL if their configurations match.
  35 *
  36 * This file provides an abstraction over display PLLs. The function
  37 * intel_shared_dpll_init() initializes the PLLs for the given platform.  The
  38 * users of a PLL are tracked and that tracking is integrated with the atomic
  39 * modset interface. During an atomic operation, required PLLs can be reserved
  40 * for a given CRTC and encoder configuration by calling
  41 * intel_reserve_shared_dplls() and previously reserved PLLs can be released
  42 * with intel_release_shared_dplls().
  43 * Changes to the users are first staged in the atomic state, and then made
  44 * effective by calling intel_shared_dpll_swap_state() during the atomic
  45 * commit phase.
  46 */
  47
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  48struct intel_dpll_mgr {
  49	const struct dpll_info *dpll_info;
  50
  51	bool (*get_dplls)(struct intel_atomic_state *state,
  52			  struct intel_crtc *crtc,
  53			  struct intel_encoder *encoder);
 
 
 
  54	void (*put_dplls)(struct intel_atomic_state *state,
  55			  struct intel_crtc *crtc);
  56	void (*update_active_dpll)(struct intel_atomic_state *state,
  57				   struct intel_crtc *crtc,
  58				   struct intel_encoder *encoder);
  59	void (*update_ref_clks)(struct drm_i915_private *i915);
  60	void (*dump_hw_state)(struct drm_i915_private *dev_priv,
  61			      const struct intel_dpll_hw_state *hw_state);
 
 
  62};
  63
  64static void
  65intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
  66				  struct intel_shared_dpll_state *shared_dpll)
  67{
  68	enum intel_dpll_id i;
 
  69
  70	/* Copy shared dpll state */
  71	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
  72		struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i];
  73
  74		shared_dpll[i] = pll->state;
  75	}
  76}
  77
  78static struct intel_shared_dpll_state *
  79intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
  80{
  81	struct intel_atomic_state *state = to_intel_atomic_state(s);
  82
  83	drm_WARN_ON(s->dev, !drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
  84
  85	if (!state->dpll_set) {
  86		state->dpll_set = true;
  87
  88		intel_atomic_duplicate_dpll_state(to_i915(s->dev),
  89						  state->shared_dpll);
  90	}
  91
  92	return state->shared_dpll;
  93}
  94
  95/**
  96 * intel_get_shared_dpll_by_id - get a DPLL given its id
  97 * @dev_priv: i915 device instance
  98 * @id: pll id
  99 *
 100 * Returns:
 101 * A pointer to the DPLL with @id
 102 */
 103struct intel_shared_dpll *
 104intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
 105			    enum intel_dpll_id id)
 106{
 107	return &dev_priv->dpll.shared_dplls[id];
 108}
 109
 110/**
 111 * intel_get_shared_dpll_id - get the id of a DPLL
 112 * @dev_priv: i915 device instance
 113 * @pll: the DPLL
 114 *
 115 * Returns:
 116 * The id of @pll
 117 */
 118enum intel_dpll_id
 119intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
 120			 struct intel_shared_dpll *pll)
 121{
 122	long pll_idx = pll - dev_priv->dpll.shared_dplls;
 123
 124	if (drm_WARN_ON(&dev_priv->drm,
 125			pll_idx < 0 ||
 126			pll_idx >= dev_priv->dpll.num_shared_dpll))
 127		return -1;
 128
 129	return pll_idx;
 
 130}
 131
 132/* For ILK+ */
 133void assert_shared_dpll(struct drm_i915_private *dev_priv,
 134			struct intel_shared_dpll *pll,
 135			bool state)
 136{
 137	bool cur_state;
 138	struct intel_dpll_hw_state hw_state;
 139
 140	if (drm_WARN(&dev_priv->drm, !pll,
 141		     "asserting DPLL %s with no DPLL\n", onoff(state)))
 142		return;
 143
 144	cur_state = pll->info->funcs->get_hw_state(dev_priv, pll, &hw_state);
 145	I915_STATE_WARN(cur_state != state,
 146	     "%s assertion failure (expected %s, current %s)\n",
 147			pll->info->name, onoff(state), onoff(cur_state));
 
 
 
 
 
 
 
 
 
 
 
 148}
 149
 150/**
 151 * intel_prepare_shared_dpll - call a dpll's prepare hook
 152 * @crtc_state: CRTC, and its state, which has a shared dpll
 153 *
 154 * This calls the PLL's prepare hook if it has one and if the PLL is not
 155 * already enabled. The prepare hook is platform specific.
 156 */
 157void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 158{
 159	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 160	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 161	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
 162
 163	if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
 164		return;
 
 165
 166	mutex_lock(&dev_priv->dpll.lock);
 167	drm_WARN_ON(&dev_priv->drm, !pll->state.crtc_mask);
 168	if (!pll->active_mask) {
 169		drm_dbg(&dev_priv->drm, "setting up %s\n", pll->info->name);
 170		drm_WARN_ON(&dev_priv->drm, pll->on);
 171		assert_shared_dpll_disabled(dev_priv, pll);
 172
 173		pll->info->funcs->prepare(dev_priv, pll);
 174	}
 175	mutex_unlock(&dev_priv->dpll.lock);
 176}
 177
 178/**
 179 * intel_enable_shared_dpll - enable a CRTC's shared DPLL
 180 * @crtc_state: CRTC, and its state, which has a shared DPLL
 181 *
 182 * Enable the shared DPLL used by @crtc.
 183 */
 184void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
 185{
 186	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 187	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 188	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
 189	unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
 190	unsigned int old_mask;
 191
 192	if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
 193		return;
 194
 195	mutex_lock(&dev_priv->dpll.lock);
 196	old_mask = pll->active_mask;
 197
 198	if (drm_WARN_ON(&dev_priv->drm, !(pll->state.crtc_mask & crtc_mask)) ||
 199	    drm_WARN_ON(&dev_priv->drm, pll->active_mask & crtc_mask))
 200		goto out;
 201
 202	pll->active_mask |= crtc_mask;
 203
 204	drm_dbg_kms(&dev_priv->drm,
 205		    "enable %s (active %x, on? %d) for crtc %d\n",
 206		    pll->info->name, pll->active_mask, pll->on,
 207		    crtc->base.base.id);
 208
 209	if (old_mask) {
 210		drm_WARN_ON(&dev_priv->drm, !pll->on);
 211		assert_shared_dpll_enabled(dev_priv, pll);
 212		goto out;
 213	}
 214	drm_WARN_ON(&dev_priv->drm, pll->on);
 215
 216	drm_dbg_kms(&dev_priv->drm, "enabling %s\n", pll->info->name);
 217	pll->info->funcs->enable(dev_priv, pll);
 218	pll->on = true;
 219
 220out:
 221	mutex_unlock(&dev_priv->dpll.lock);
 222}
 223
 224/**
 225 * intel_disable_shared_dpll - disable a CRTC's shared DPLL
 226 * @crtc_state: CRTC, and its state, which has a shared DPLL
 227 *
 228 * Disable the shared DPLL used by @crtc.
 229 */
 230void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
 231{
 232	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 233	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 234	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
 235	unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
 236
 237	/* PCH only available on ILK+ */
 238	if (INTEL_GEN(dev_priv) < 5)
 239		return;
 240
 241	if (pll == NULL)
 242		return;
 243
 244	mutex_lock(&dev_priv->dpll.lock);
 245	if (drm_WARN_ON(&dev_priv->drm, !(pll->active_mask & crtc_mask)))
 
 
 246		goto out;
 247
 248	drm_dbg_kms(&dev_priv->drm,
 249		    "disable %s (active %x, on? %d) for crtc %d\n",
 250		    pll->info->name, pll->active_mask, pll->on,
 251		    crtc->base.base.id);
 252
 253	assert_shared_dpll_enabled(dev_priv, pll);
 254	drm_WARN_ON(&dev_priv->drm, !pll->on);
 255
 256	pll->active_mask &= ~crtc_mask;
 257	if (pll->active_mask)
 258		goto out;
 259
 260	drm_dbg_kms(&dev_priv->drm, "disabling %s\n", pll->info->name);
 261	pll->info->funcs->disable(dev_priv, pll);
 262	pll->on = false;
 263
 264out:
 265	mutex_unlock(&dev_priv->dpll.lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 266}
 267
 268static struct intel_shared_dpll *
 269intel_find_shared_dpll(struct intel_atomic_state *state,
 270		       const struct intel_crtc *crtc,
 271		       const struct intel_dpll_hw_state *pll_state,
 272		       unsigned long dpll_mask)
 273{
 274	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 275	struct intel_shared_dpll *pll, *unused_pll = NULL;
 276	struct intel_shared_dpll_state *shared_dpll;
 277	enum intel_dpll_id i;
 
 278
 279	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
 280
 281	drm_WARN_ON(&dev_priv->drm, dpll_mask & ~(BIT(I915_NUM_PLLS) - 1));
 282
 283	for_each_set_bit(i, &dpll_mask, I915_NUM_PLLS) {
 284		pll = &dev_priv->dpll.shared_dplls[i];
 
 
 
 
 285
 286		/* Only want to check enabled timings first */
 287		if (shared_dpll[i].crtc_mask == 0) {
 288			if (!unused_pll)
 289				unused_pll = pll;
 290			continue;
 291		}
 292
 293		if (memcmp(pll_state,
 294			   &shared_dpll[i].hw_state,
 295			   sizeof(*pll_state)) == 0) {
 296			drm_dbg_kms(&dev_priv->drm,
 297				    "[CRTC:%d:%s] sharing existing %s (crtc mask 0x%08x, active %x)\n",
 298				    crtc->base.base.id, crtc->base.name,
 299				    pll->info->name,
 300				    shared_dpll[i].crtc_mask,
 301				    pll->active_mask);
 302			return pll;
 303		}
 304	}
 305
 306	/* Ok no matching timings, maybe there's a free one? */
 307	if (unused_pll) {
 308		drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] allocated %s\n",
 309			    crtc->base.base.id, crtc->base.name,
 310			    unused_pll->info->name);
 311		return unused_pll;
 312	}
 313
 314	return NULL;
 315}
 316
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 317static void
 318intel_reference_shared_dpll(struct intel_atomic_state *state,
 319			    const struct intel_crtc *crtc,
 320			    const struct intel_shared_dpll *pll,
 321			    const struct intel_dpll_hw_state *pll_state)
 322{
 323	struct drm_i915_private *i915 = to_i915(state->base.dev);
 324	struct intel_shared_dpll_state *shared_dpll;
 325	const enum intel_dpll_id id = pll->info->id;
 326
 327	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
 328
 329	if (shared_dpll[id].crtc_mask == 0)
 330		shared_dpll[id].hw_state = *pll_state;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 331
 332	drm_dbg(&i915->drm, "using %s for pipe %c\n", pll->info->name,
 333		pipe_name(crtc->pipe));
 334
 335	shared_dpll[id].crtc_mask |= 1 << crtc->pipe;
 
 336}
 337
 338static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
 339					  const struct intel_crtc *crtc,
 340					  const struct intel_shared_dpll *pll)
 341{
 342	struct intel_shared_dpll_state *shared_dpll;
 343
 344	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
 345	shared_dpll[pll->info->id].crtc_mask &= ~(1 << crtc->pipe);
 
 346}
 347
 348static void intel_put_dpll(struct intel_atomic_state *state,
 349			   struct intel_crtc *crtc)
 350{
 351	const struct intel_crtc_state *old_crtc_state =
 352		intel_atomic_get_old_crtc_state(state, crtc);
 353	struct intel_crtc_state *new_crtc_state =
 354		intel_atomic_get_new_crtc_state(state, crtc);
 355
 356	new_crtc_state->shared_dpll = NULL;
 357
 358	if (!old_crtc_state->shared_dpll)
 359		return;
 360
 361	intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
 362}
 363
 364/**
 365 * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
 366 * @state: atomic state
 367 *
 368 * This is the dpll version of drm_atomic_helper_swap_state() since the
 369 * helper does not handle driver-specific global state.
 370 *
 371 * For consistency with atomic helpers this function does a complete swap,
 372 * i.e. it also puts the current state into @state, even though there is no
 373 * need for that at this moment.
 374 */
 375void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
 376{
 377	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
 378	struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
 379	enum intel_dpll_id i;
 
 380
 381	if (!state->dpll_set)
 382		return;
 383
 384	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
 385		struct intel_shared_dpll *pll =
 386			&dev_priv->dpll.shared_dplls[i];
 387
 388		swap(pll->state, shared_dpll[i]);
 389	}
 390}
 391
 392static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
 393				      struct intel_shared_dpll *pll,
 394				      struct intel_dpll_hw_state *hw_state)
 395{
 396	const enum intel_dpll_id id = pll->info->id;
 397	intel_wakeref_t wakeref;
 398	u32 val;
 399
 400	wakeref = intel_display_power_get_if_enabled(dev_priv,
 401						     POWER_DOMAIN_DISPLAY_CORE);
 402	if (!wakeref)
 403		return false;
 404
 405	val = intel_de_read(dev_priv, PCH_DPLL(id));
 406	hw_state->dpll = val;
 407	hw_state->fp0 = intel_de_read(dev_priv, PCH_FP0(id));
 408	hw_state->fp1 = intel_de_read(dev_priv, PCH_FP1(id));
 409
 410	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
 411
 412	return val & DPLL_VCO_ENABLE;
 413}
 414
 415static void ibx_pch_dpll_prepare(struct drm_i915_private *dev_priv,
 416				 struct intel_shared_dpll *pll)
 417{
 418	const enum intel_dpll_id id = pll->info->id;
 419
 420	intel_de_write(dev_priv, PCH_FP0(id), pll->state.hw_state.fp0);
 421	intel_de_write(dev_priv, PCH_FP1(id), pll->state.hw_state.fp1);
 422}
 423
 424static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
 425{
 426	u32 val;
 427	bool enabled;
 428
 429	I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
 430
 431	val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
 432	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
 433			    DREF_SUPERSPREAD_SOURCE_MASK));
 434	I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
 
 435}
 436
 437static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
 438				struct intel_shared_dpll *pll)
 439{
 440	const enum intel_dpll_id id = pll->info->id;
 441
 442	/* PCH refclock must be enabled first */
 443	ibx_assert_pch_refclk_enabled(dev_priv);
 444
 445	intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
 
 
 
 446
 447	/* Wait for the clocks to stabilize. */
 448	intel_de_posting_read(dev_priv, PCH_DPLL(id));
 449	udelay(150);
 450
 451	/* The pixel multiplier can only be updated once the
 452	 * DPLL is enabled and the clocks are stable.
 453	 *
 454	 * So write it again.
 455	 */
 456	intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
 457	intel_de_posting_read(dev_priv, PCH_DPLL(id));
 458	udelay(200);
 459}
 460
 461static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
 462				 struct intel_shared_dpll *pll)
 463{
 464	const enum intel_dpll_id id = pll->info->id;
 465
 466	intel_de_write(dev_priv, PCH_DPLL(id), 0);
 467	intel_de_posting_read(dev_priv, PCH_DPLL(id));
 468	udelay(200);
 469}
 470
 471static bool ibx_get_dpll(struct intel_atomic_state *state,
 472			 struct intel_crtc *crtc,
 473			 struct intel_encoder *encoder)
 
 
 
 
 
 
 
 474{
 475	struct intel_crtc_state *crtc_state =
 476		intel_atomic_get_new_crtc_state(state, crtc);
 477	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 478	struct intel_shared_dpll *pll;
 479	enum intel_dpll_id i;
 480
 481	if (HAS_PCH_IBX(dev_priv)) {
 482		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
 483		i = (enum intel_dpll_id) crtc->pipe;
 484		pll = &dev_priv->dpll.shared_dplls[i];
 485
 486		drm_dbg_kms(&dev_priv->drm,
 487			    "[CRTC:%d:%s] using pre-allocated %s\n",
 488			    crtc->base.base.id, crtc->base.name,
 489			    pll->info->name);
 490	} else {
 491		pll = intel_find_shared_dpll(state, crtc,
 492					     &crtc_state->dpll_hw_state,
 493					     BIT(DPLL_ID_PCH_PLL_B) |
 494					     BIT(DPLL_ID_PCH_PLL_A));
 495	}
 496
 497	if (!pll)
 498		return false;
 499
 500	/* reference the pll */
 501	intel_reference_shared_dpll(state, crtc,
 502				    pll, &crtc_state->dpll_hw_state);
 503
 504	crtc_state->shared_dpll = pll;
 505
 506	return true;
 507}
 508
 509static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
 510			      const struct intel_dpll_hw_state *hw_state)
 511{
 512	drm_dbg_kms(&dev_priv->drm,
 513		    "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
 514		    "fp0: 0x%x, fp1: 0x%x\n",
 515		    hw_state->dpll,
 516		    hw_state->dpll_md,
 517		    hw_state->fp0,
 518		    hw_state->fp1);
 519}
 520
 
 
 
 
 
 
 
 
 
 521static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
 522	.prepare = ibx_pch_dpll_prepare,
 523	.enable = ibx_pch_dpll_enable,
 524	.disable = ibx_pch_dpll_disable,
 525	.get_hw_state = ibx_pch_dpll_get_hw_state,
 526};
 527
 528static const struct dpll_info pch_plls[] = {
 529	{ "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
 530	{ "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
 531	{ },
 532};
 533
 534static const struct intel_dpll_mgr pch_pll_mgr = {
 535	.dpll_info = pch_plls,
 
 536	.get_dplls = ibx_get_dpll,
 537	.put_dplls = intel_put_dpll,
 538	.dump_hw_state = ibx_dump_hw_state,
 
 539};
 540
 541static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
 542			       struct intel_shared_dpll *pll)
 543{
 544	const enum intel_dpll_id id = pll->info->id;
 545
 546	intel_de_write(dev_priv, WRPLL_CTL(id), pll->state.hw_state.wrpll);
 547	intel_de_posting_read(dev_priv, WRPLL_CTL(id));
 548	udelay(20);
 549}
 550
 551static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
 552				struct intel_shared_dpll *pll)
 553{
 554	intel_de_write(dev_priv, SPLL_CTL, pll->state.hw_state.spll);
 555	intel_de_posting_read(dev_priv, SPLL_CTL);
 556	udelay(20);
 557}
 558
 559static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
 560				  struct intel_shared_dpll *pll)
 561{
 562	const enum intel_dpll_id id = pll->info->id;
 563	u32 val;
 564
 565	val = intel_de_read(dev_priv, WRPLL_CTL(id));
 566	intel_de_write(dev_priv, WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE);
 567	intel_de_posting_read(dev_priv, WRPLL_CTL(id));
 568
 569	/*
 570	 * Try to set up the PCH reference clock once all DPLLs
 571	 * that depend on it have been shut down.
 572	 */
 573	if (dev_priv->pch_ssc_use & BIT(id))
 574		intel_init_pch_refclk(dev_priv);
 575}
 576
 577static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
 578				 struct intel_shared_dpll *pll)
 579{
 580	enum intel_dpll_id id = pll->info->id;
 581	u32 val;
 582
 583	val = intel_de_read(dev_priv, SPLL_CTL);
 584	intel_de_write(dev_priv, SPLL_CTL, val & ~SPLL_PLL_ENABLE);
 585	intel_de_posting_read(dev_priv, SPLL_CTL);
 586
 587	/*
 588	 * Try to set up the PCH reference clock once all DPLLs
 589	 * that depend on it have been shut down.
 590	 */
 591	if (dev_priv->pch_ssc_use & BIT(id))
 592		intel_init_pch_refclk(dev_priv);
 593}
 594
 595static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
 596				       struct intel_shared_dpll *pll,
 597				       struct intel_dpll_hw_state *hw_state)
 598{
 599	const enum intel_dpll_id id = pll->info->id;
 600	intel_wakeref_t wakeref;
 601	u32 val;
 602
 603	wakeref = intel_display_power_get_if_enabled(dev_priv,
 604						     POWER_DOMAIN_DISPLAY_CORE);
 605	if (!wakeref)
 606		return false;
 607
 608	val = intel_de_read(dev_priv, WRPLL_CTL(id));
 609	hw_state->wrpll = val;
 610
 611	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
 612
 613	return val & WRPLL_PLL_ENABLE;
 614}
 615
 616static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
 617				      struct intel_shared_dpll *pll,
 618				      struct intel_dpll_hw_state *hw_state)
 619{
 620	intel_wakeref_t wakeref;
 621	u32 val;
 622
 623	wakeref = intel_display_power_get_if_enabled(dev_priv,
 624						     POWER_DOMAIN_DISPLAY_CORE);
 625	if (!wakeref)
 626		return false;
 627
 628	val = intel_de_read(dev_priv, SPLL_CTL);
 629	hw_state->spll = val;
 630
 631	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
 632
 633	return val & SPLL_PLL_ENABLE;
 634}
 635
 636#define LC_FREQ 2700
 637#define LC_FREQ_2K U64_C(LC_FREQ * 2000)
 638
 639#define P_MIN 2
 640#define P_MAX 64
 641#define P_INC 2
 642
 643/* Constraints for PLL good behavior */
 644#define REF_MIN 48
 645#define REF_MAX 400
 646#define VCO_MIN 2400
 647#define VCO_MAX 4800
 648
 649struct hsw_wrpll_rnp {
 650	unsigned p, n2, r2;
 651};
 652
 653static unsigned hsw_wrpll_get_budget_for_freq(int clock)
 654{
 655	unsigned budget;
 656
 657	switch (clock) {
 658	case 25175000:
 659	case 25200000:
 660	case 27000000:
 661	case 27027000:
 662	case 37762500:
 663	case 37800000:
 664	case 40500000:
 665	case 40541000:
 666	case 54000000:
 667	case 54054000:
 668	case 59341000:
 669	case 59400000:
 670	case 72000000:
 671	case 74176000:
 672	case 74250000:
 673	case 81000000:
 674	case 81081000:
 675	case 89012000:
 676	case 89100000:
 677	case 108000000:
 678	case 108108000:
 679	case 111264000:
 680	case 111375000:
 681	case 148352000:
 682	case 148500000:
 683	case 162000000:
 684	case 162162000:
 685	case 222525000:
 686	case 222750000:
 687	case 296703000:
 688	case 297000000:
 689		budget = 0;
 690		break;
 691	case 233500000:
 692	case 245250000:
 693	case 247750000:
 694	case 253250000:
 695	case 298000000:
 696		budget = 1500;
 697		break;
 698	case 169128000:
 699	case 169500000:
 700	case 179500000:
 701	case 202000000:
 702		budget = 2000;
 703		break;
 704	case 256250000:
 705	case 262500000:
 706	case 270000000:
 707	case 272500000:
 708	case 273750000:
 709	case 280750000:
 710	case 281250000:
 711	case 286000000:
 712	case 291750000:
 713		budget = 4000;
 714		break;
 715	case 267250000:
 716	case 268500000:
 717		budget = 5000;
 718		break;
 719	default:
 720		budget = 1000;
 721		break;
 722	}
 723
 724	return budget;
 725}
 726
 727static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
 728				 unsigned int r2, unsigned int n2,
 729				 unsigned int p,
 730				 struct hsw_wrpll_rnp *best)
 731{
 732	u64 a, b, c, d, diff, diff_best;
 733
 734	/* No best (r,n,p) yet */
 735	if (best->p == 0) {
 736		best->p = p;
 737		best->n2 = n2;
 738		best->r2 = r2;
 739		return;
 740	}
 741
 742	/*
 743	 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
 744	 * freq2k.
 745	 *
 746	 * delta = 1e6 *
 747	 *	   abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
 748	 *	   freq2k;
 749	 *
 750	 * and we would like delta <= budget.
 751	 *
 752	 * If the discrepancy is above the PPM-based budget, always prefer to
 753	 * improve upon the previous solution.  However, if you're within the
 754	 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
 755	 */
 756	a = freq2k * budget * p * r2;
 757	b = freq2k * budget * best->p * best->r2;
 758	diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
 759	diff_best = abs_diff(freq2k * best->p * best->r2,
 760			     LC_FREQ_2K * best->n2);
 761	c = 1000000 * diff;
 762	d = 1000000 * diff_best;
 763
 764	if (a < c && b < d) {
 765		/* If both are above the budget, pick the closer */
 766		if (best->p * best->r2 * diff < p * r2 * diff_best) {
 767			best->p = p;
 768			best->n2 = n2;
 769			best->r2 = r2;
 770		}
 771	} else if (a >= c && b < d) {
 772		/* If A is below the threshold but B is above it?  Update. */
 773		best->p = p;
 774		best->n2 = n2;
 775		best->r2 = r2;
 776	} else if (a >= c && b >= d) {
 777		/* Both are below the limit, so pick the higher n2/(r2*r2) */
 778		if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
 779			best->p = p;
 780			best->n2 = n2;
 781			best->r2 = r2;
 782		}
 783	}
 784	/* Otherwise a < c && b >= d, do nothing */
 785}
 786
 787static void
 788hsw_ddi_calculate_wrpll(int clock /* in Hz */,
 789			unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
 790{
 791	u64 freq2k;
 792	unsigned p, n2, r2;
 793	struct hsw_wrpll_rnp best = { 0, 0, 0 };
 794	unsigned budget;
 795
 796	freq2k = clock / 100;
 797
 798	budget = hsw_wrpll_get_budget_for_freq(clock);
 799
 800	/* Special case handling for 540 pixel clock: bypass WR PLL entirely
 801	 * and directly pass the LC PLL to it. */
 802	if (freq2k == 5400000) {
 803		*n2_out = 2;
 804		*p_out = 1;
 805		*r2_out = 2;
 806		return;
 807	}
 808
 809	/*
 810	 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
 811	 * the WR PLL.
 812	 *
 813	 * We want R so that REF_MIN <= Ref <= REF_MAX.
 814	 * Injecting R2 = 2 * R gives:
 815	 *   REF_MAX * r2 > LC_FREQ * 2 and
 816	 *   REF_MIN * r2 < LC_FREQ * 2
 817	 *
 818	 * Which means the desired boundaries for r2 are:
 819	 *  LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
 820	 *
 821	 */
 822	for (r2 = LC_FREQ * 2 / REF_MAX + 1;
 823	     r2 <= LC_FREQ * 2 / REF_MIN;
 824	     r2++) {
 825
 826		/*
 827		 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
 828		 *
 829		 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
 830		 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
 831		 *   VCO_MAX * r2 > n2 * LC_FREQ and
 832		 *   VCO_MIN * r2 < n2 * LC_FREQ)
 833		 *
 834		 * Which means the desired boundaries for n2 are:
 835		 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
 836		 */
 837		for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
 838		     n2 <= VCO_MAX * r2 / LC_FREQ;
 839		     n2++) {
 840
 841			for (p = P_MIN; p <= P_MAX; p += P_INC)
 842				hsw_wrpll_update_rnp(freq2k, budget,
 843						     r2, n2, p, &best);
 844		}
 845	}
 846
 847	*n2_out = best.n2;
 848	*p_out = best.p;
 849	*r2_out = best.r2;
 850}
 851
 852static struct intel_shared_dpll *
 853hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
 854		       struct intel_crtc *crtc)
 855{
 856	struct intel_crtc_state *crtc_state =
 857		intel_atomic_get_new_crtc_state(state, crtc);
 858	struct intel_shared_dpll *pll;
 859	u32 val;
 860	unsigned int p, n2, r2;
 861
 862	hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
 863
 864	val = WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
 865	      WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
 866	      WRPLL_DIVIDER_POST(p);
 867
 868	crtc_state->dpll_hw_state.wrpll = val;
 869
 870	pll = intel_find_shared_dpll(state, crtc,
 871				     &crtc_state->dpll_hw_state,
 872				     BIT(DPLL_ID_WRPLL2) |
 873				     BIT(DPLL_ID_WRPLL1));
 874
 875	if (!pll)
 876		return NULL;
 877
 878	return pll;
 879}
 880
 881static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
 882				  const struct intel_shared_dpll *pll)
 883{
 884	int refclk;
 885	int n, p, r;
 886	u32 wrpll = pll->state.hw_state.wrpll;
 887
 888	switch (wrpll & WRPLL_REF_MASK) {
 889	case WRPLL_REF_SPECIAL_HSW:
 890		/* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
 891		if (IS_HASWELL(dev_priv) && !IS_HSW_ULT(dev_priv)) {
 892			refclk = dev_priv->dpll.ref_clks.nssc;
 893			break;
 894		}
 895		fallthrough;
 896	case WRPLL_REF_PCH_SSC:
 897		/*
 898		 * We could calculate spread here, but our checking
 899		 * code only cares about 5% accuracy, and spread is a max of
 900		 * 0.5% downspread.
 901		 */
 902		refclk = dev_priv->dpll.ref_clks.ssc;
 903		break;
 904	case WRPLL_REF_LCPLL:
 905		refclk = 2700000;
 906		break;
 907	default:
 908		MISSING_CASE(wrpll);
 909		return 0;
 910	}
 911
 912	r = wrpll & WRPLL_DIVIDER_REF_MASK;
 913	p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
 914	n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
 915
 916	/* Convert to KHz, p & r have a fixed point portion */
 917	return (refclk * n / 10) / (p * r) * 2;
 918}
 919
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 920static struct intel_shared_dpll *
 921hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
 922{
 923	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
 924	struct intel_shared_dpll *pll;
 925	enum intel_dpll_id pll_id;
 926	int clock = crtc_state->port_clock;
 927
 928	switch (clock / 2) {
 929	case 81000:
 930		pll_id = DPLL_ID_LCPLL_810;
 931		break;
 932	case 135000:
 933		pll_id = DPLL_ID_LCPLL_1350;
 934		break;
 935	case 270000:
 936		pll_id = DPLL_ID_LCPLL_2700;
 937		break;
 938	default:
 939		drm_dbg_kms(&dev_priv->drm, "Invalid clock for DP: %d\n",
 940			    clock);
 941		return NULL;
 942	}
 943
 944	pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
 945
 946	if (!pll)
 947		return NULL;
 948
 949	return pll;
 950}
 951
 952static int hsw_ddi_lcpll_get_freq(struct drm_i915_private *i915,
 953				  const struct intel_shared_dpll *pll)
 
 954{
 955	int link_clock = 0;
 956
 957	switch (pll->info->id) {
 958	case DPLL_ID_LCPLL_810:
 959		link_clock = 81000;
 960		break;
 961	case DPLL_ID_LCPLL_1350:
 962		link_clock = 135000;
 963		break;
 964	case DPLL_ID_LCPLL_2700:
 965		link_clock = 270000;
 966		break;
 967	default:
 968		drm_WARN(&i915->drm, 1, "bad port clock sel\n");
 969		break;
 970	}
 971
 972	return link_clock * 2;
 973}
 974
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 975static struct intel_shared_dpll *
 976hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
 977		      struct intel_crtc *crtc)
 978{
 979	struct intel_crtc_state *crtc_state =
 980		intel_atomic_get_new_crtc_state(state, crtc);
 981
 982	if (drm_WARN_ON(crtc->base.dev, crtc_state->port_clock / 2 != 135000))
 983		return NULL;
 984
 985	crtc_state->dpll_hw_state.spll = SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz |
 986					 SPLL_REF_MUXED_SSC;
 987
 988	return intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state,
 989				      BIT(DPLL_ID_SPLL));
 990}
 991
 992static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915,
 993				 const struct intel_shared_dpll *pll)
 
 994{
 995	int link_clock = 0;
 996
 997	switch (pll->state.hw_state.spll & SPLL_FREQ_MASK) {
 998	case SPLL_FREQ_810MHz:
 999		link_clock = 81000;
1000		break;
1001	case SPLL_FREQ_1350MHz:
1002		link_clock = 135000;
1003		break;
1004	case SPLL_FREQ_2700MHz:
1005		link_clock = 270000;
1006		break;
1007	default:
1008		drm_WARN(&i915->drm, 1, "bad spll freq\n");
1009		break;
1010	}
1011
1012	return link_clock * 2;
1013}
1014
1015static bool hsw_get_dpll(struct intel_atomic_state *state,
1016			 struct intel_crtc *crtc,
1017			 struct intel_encoder *encoder)
1018{
1019	struct intel_crtc_state *crtc_state =
1020		intel_atomic_get_new_crtc_state(state, crtc);
1021	struct intel_shared_dpll *pll;
1022
1023	memset(&crtc_state->dpll_hw_state, 0,
1024	       sizeof(crtc_state->dpll_hw_state));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1025
1026	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1027		pll = hsw_ddi_wrpll_get_dpll(state, crtc);
1028	else if (intel_crtc_has_dp_encoder(crtc_state))
1029		pll = hsw_ddi_lcpll_get_dpll(crtc_state);
1030	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1031		pll = hsw_ddi_spll_get_dpll(state, crtc);
1032	else
1033		return false;
1034
1035	if (!pll)
1036		return false;
1037
1038	intel_reference_shared_dpll(state, crtc,
1039				    pll, &crtc_state->dpll_hw_state);
1040
1041	crtc_state->shared_dpll = pll;
1042
1043	return true;
1044}
1045
1046static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915)
1047{
1048	i915->dpll.ref_clks.ssc = 135000;
1049	/* Non-SSC is only used on non-ULT HSW. */
1050	if (intel_de_read(i915, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
1051		i915->dpll.ref_clks.nssc = 24000;
1052	else
1053		i915->dpll.ref_clks.nssc = 135000;
1054}
1055
1056static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
1057			      const struct intel_dpll_hw_state *hw_state)
1058{
1059	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
1060		    hw_state->wrpll, hw_state->spll);
1061}
1062
 
 
 
 
 
 
 
1063static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
1064	.enable = hsw_ddi_wrpll_enable,
1065	.disable = hsw_ddi_wrpll_disable,
1066	.get_hw_state = hsw_ddi_wrpll_get_hw_state,
1067	.get_freq = hsw_ddi_wrpll_get_freq,
1068};
1069
1070static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
1071	.enable = hsw_ddi_spll_enable,
1072	.disable = hsw_ddi_spll_disable,
1073	.get_hw_state = hsw_ddi_spll_get_hw_state,
1074	.get_freq = hsw_ddi_spll_get_freq,
1075};
1076
1077static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv,
1078				 struct intel_shared_dpll *pll)
1079{
1080}
1081
1082static void hsw_ddi_lcpll_disable(struct drm_i915_private *dev_priv,
1083				  struct intel_shared_dpll *pll)
1084{
1085}
1086
1087static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *dev_priv,
1088				       struct intel_shared_dpll *pll,
1089				       struct intel_dpll_hw_state *hw_state)
1090{
1091	return true;
1092}
1093
1094static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
1095	.enable = hsw_ddi_lcpll_enable,
1096	.disable = hsw_ddi_lcpll_disable,
1097	.get_hw_state = hsw_ddi_lcpll_get_hw_state,
1098	.get_freq = hsw_ddi_lcpll_get_freq,
1099};
1100
1101static const struct dpll_info hsw_plls[] = {
1102	{ "WRPLL 1",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1,     0 },
1103	{ "WRPLL 2",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2,     0 },
1104	{ "SPLL",       &hsw_ddi_spll_funcs,  DPLL_ID_SPLL,       0 },
1105	{ "LCPLL 810",  &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810,  INTEL_DPLL_ALWAYS_ON },
1106	{ "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
1107	{ "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
1108	{ },
 
 
 
1109};
1110
1111static const struct intel_dpll_mgr hsw_pll_mgr = {
1112	.dpll_info = hsw_plls,
 
1113	.get_dplls = hsw_get_dpll,
1114	.put_dplls = intel_put_dpll,
1115	.update_ref_clks = hsw_update_dpll_ref_clks,
1116	.dump_hw_state = hsw_dump_hw_state,
 
1117};
1118
1119struct skl_dpll_regs {
1120	i915_reg_t ctl, cfgcr1, cfgcr2;
1121};
1122
1123/* this array is indexed by the *shared* pll id */
1124static const struct skl_dpll_regs skl_dpll_regs[4] = {
1125	{
1126		/* DPLL 0 */
1127		.ctl = LCPLL1_CTL,
1128		/* DPLL 0 doesn't support HDMI mode */
1129	},
1130	{
1131		/* DPLL 1 */
1132		.ctl = LCPLL2_CTL,
1133		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
1134		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
1135	},
1136	{
1137		/* DPLL 2 */
1138		.ctl = WRPLL_CTL(0),
1139		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
1140		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
1141	},
1142	{
1143		/* DPLL 3 */
1144		.ctl = WRPLL_CTL(1),
1145		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
1146		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
1147	},
1148};
1149
1150static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
1151				    struct intel_shared_dpll *pll)
1152{
1153	const enum intel_dpll_id id = pll->info->id;
1154	u32 val;
1155
1156	val = intel_de_read(dev_priv, DPLL_CTRL1);
1157
1158	val &= ~(DPLL_CTRL1_HDMI_MODE(id) |
1159		 DPLL_CTRL1_SSC(id) |
1160		 DPLL_CTRL1_LINK_RATE_MASK(id));
1161	val |= pll->state.hw_state.ctrl1 << (id * 6);
1162
1163	intel_de_write(dev_priv, DPLL_CTRL1, val);
1164	intel_de_posting_read(dev_priv, DPLL_CTRL1);
1165}
1166
1167static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
1168			       struct intel_shared_dpll *pll)
1169{
1170	const struct skl_dpll_regs *regs = skl_dpll_regs;
1171	const enum intel_dpll_id id = pll->info->id;
1172
1173	skl_ddi_pll_write_ctrl1(dev_priv, pll);
1174
1175	intel_de_write(dev_priv, regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
1176	intel_de_write(dev_priv, regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
1177	intel_de_posting_read(dev_priv, regs[id].cfgcr1);
1178	intel_de_posting_read(dev_priv, regs[id].cfgcr2);
1179
1180	/* the enable bit is always bit 31 */
1181	intel_de_write(dev_priv, regs[id].ctl,
1182		       intel_de_read(dev_priv, regs[id].ctl) | LCPLL_PLL_ENABLE);
1183
1184	if (intel_de_wait_for_set(dev_priv, DPLL_STATUS, DPLL_LOCK(id), 5))
1185		drm_err(&dev_priv->drm, "DPLL %d not locked\n", id);
1186}
1187
1188static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv,
1189				 struct intel_shared_dpll *pll)
1190{
1191	skl_ddi_pll_write_ctrl1(dev_priv, pll);
1192}
1193
1194static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
1195				struct intel_shared_dpll *pll)
1196{
1197	const struct skl_dpll_regs *regs = skl_dpll_regs;
1198	const enum intel_dpll_id id = pll->info->id;
1199
1200	/* the enable bit is always bit 31 */
1201	intel_de_write(dev_priv, regs[id].ctl,
1202		       intel_de_read(dev_priv, regs[id].ctl) & ~LCPLL_PLL_ENABLE);
1203	intel_de_posting_read(dev_priv, regs[id].ctl);
1204}
1205
1206static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv,
1207				  struct intel_shared_dpll *pll)
1208{
1209}
1210
1211static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1212				     struct intel_shared_dpll *pll,
1213				     struct intel_dpll_hw_state *hw_state)
1214{
1215	u32 val;
1216	const struct skl_dpll_regs *regs = skl_dpll_regs;
1217	const enum intel_dpll_id id = pll->info->id;
1218	intel_wakeref_t wakeref;
1219	bool ret;
1220
1221	wakeref = intel_display_power_get_if_enabled(dev_priv,
1222						     POWER_DOMAIN_DISPLAY_CORE);
1223	if (!wakeref)
1224		return false;
1225
1226	ret = false;
1227
1228	val = intel_de_read(dev_priv, regs[id].ctl);
1229	if (!(val & LCPLL_PLL_ENABLE))
1230		goto out;
1231
1232	val = intel_de_read(dev_priv, DPLL_CTRL1);
1233	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1234
1235	/* avoid reading back stale values if HDMI mode is not enabled */
1236	if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1237		hw_state->cfgcr1 = intel_de_read(dev_priv, regs[id].cfgcr1);
1238		hw_state->cfgcr2 = intel_de_read(dev_priv, regs[id].cfgcr2);
1239	}
1240	ret = true;
1241
1242out:
1243	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1244
1245	return ret;
1246}
1247
1248static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
1249				       struct intel_shared_dpll *pll,
1250				       struct intel_dpll_hw_state *hw_state)
1251{
1252	const struct skl_dpll_regs *regs = skl_dpll_regs;
1253	const enum intel_dpll_id id = pll->info->id;
1254	intel_wakeref_t wakeref;
1255	u32 val;
1256	bool ret;
1257
1258	wakeref = intel_display_power_get_if_enabled(dev_priv,
1259						     POWER_DOMAIN_DISPLAY_CORE);
1260	if (!wakeref)
1261		return false;
1262
1263	ret = false;
1264
1265	/* DPLL0 is always enabled since it drives CDCLK */
1266	val = intel_de_read(dev_priv, regs[id].ctl);
1267	if (drm_WARN_ON(&dev_priv->drm, !(val & LCPLL_PLL_ENABLE)))
1268		goto out;
1269
1270	val = intel_de_read(dev_priv, DPLL_CTRL1);
1271	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1272
1273	ret = true;
1274
1275out:
1276	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1277
1278	return ret;
1279}
1280
1281struct skl_wrpll_context {
1282	u64 min_deviation;		/* current minimal deviation */
1283	u64 central_freq;		/* chosen central freq */
1284	u64 dco_freq;			/* chosen dco freq */
1285	unsigned int p;			/* chosen divider */
1286};
1287
1288static void skl_wrpll_context_init(struct skl_wrpll_context *ctx)
1289{
1290	memset(ctx, 0, sizeof(*ctx));
1291
1292	ctx->min_deviation = U64_MAX;
1293}
1294
1295/* DCO freq must be within +1%/-6%  of the DCO central freq */
1296#define SKL_DCO_MAX_PDEVIATION	100
1297#define SKL_DCO_MAX_NDEVIATION	600
1298
1299static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1300				  u64 central_freq,
1301				  u64 dco_freq,
1302				  unsigned int divider)
1303{
1304	u64 deviation;
1305
1306	deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1307			      central_freq);
1308
1309	/* positive deviation */
1310	if (dco_freq >= central_freq) {
1311		if (deviation < SKL_DCO_MAX_PDEVIATION &&
1312		    deviation < ctx->min_deviation) {
1313			ctx->min_deviation = deviation;
1314			ctx->central_freq = central_freq;
1315			ctx->dco_freq = dco_freq;
1316			ctx->p = divider;
1317		}
1318	/* negative deviation */
1319	} else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1320		   deviation < ctx->min_deviation) {
1321		ctx->min_deviation = deviation;
1322		ctx->central_freq = central_freq;
1323		ctx->dco_freq = dco_freq;
1324		ctx->p = divider;
1325	}
1326}
1327
1328static void skl_wrpll_get_multipliers(unsigned int p,
1329				      unsigned int *p0 /* out */,
1330				      unsigned int *p1 /* out */,
1331				      unsigned int *p2 /* out */)
1332{
1333	/* even dividers */
1334	if (p % 2 == 0) {
1335		unsigned int half = p / 2;
1336
1337		if (half == 1 || half == 2 || half == 3 || half == 5) {
1338			*p0 = 2;
1339			*p1 = 1;
1340			*p2 = half;
1341		} else if (half % 2 == 0) {
1342			*p0 = 2;
1343			*p1 = half / 2;
1344			*p2 = 2;
1345		} else if (half % 3 == 0) {
1346			*p0 = 3;
1347			*p1 = half / 3;
1348			*p2 = 2;
1349		} else if (half % 7 == 0) {
1350			*p0 = 7;
1351			*p1 = half / 7;
1352			*p2 = 2;
1353		}
1354	} else if (p == 3 || p == 9) {  /* 3, 5, 7, 9, 15, 21, 35 */
1355		*p0 = 3;
1356		*p1 = 1;
1357		*p2 = p / 3;
1358	} else if (p == 5 || p == 7) {
1359		*p0 = p;
1360		*p1 = 1;
1361		*p2 = 1;
1362	} else if (p == 15) {
1363		*p0 = 3;
1364		*p1 = 1;
1365		*p2 = 5;
1366	} else if (p == 21) {
1367		*p0 = 7;
1368		*p1 = 1;
1369		*p2 = 3;
1370	} else if (p == 35) {
1371		*p0 = 7;
1372		*p1 = 1;
1373		*p2 = 5;
1374	}
1375}
1376
1377struct skl_wrpll_params {
1378	u32 dco_fraction;
1379	u32 dco_integer;
1380	u32 qdiv_ratio;
1381	u32 qdiv_mode;
1382	u32 kdiv;
1383	u32 pdiv;
1384	u32 central_freq;
1385};
1386
1387static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1388				      u64 afe_clock,
1389				      int ref_clock,
1390				      u64 central_freq,
1391				      u32 p0, u32 p1, u32 p2)
1392{
1393	u64 dco_freq;
1394
1395	switch (central_freq) {
1396	case 9600000000ULL:
1397		params->central_freq = 0;
1398		break;
1399	case 9000000000ULL:
1400		params->central_freq = 1;
1401		break;
1402	case 8400000000ULL:
1403		params->central_freq = 3;
1404	}
1405
1406	switch (p0) {
1407	case 1:
1408		params->pdiv = 0;
1409		break;
1410	case 2:
1411		params->pdiv = 1;
1412		break;
1413	case 3:
1414		params->pdiv = 2;
1415		break;
1416	case 7:
1417		params->pdiv = 4;
1418		break;
1419	default:
1420		WARN(1, "Incorrect PDiv\n");
1421	}
1422
1423	switch (p2) {
1424	case 5:
1425		params->kdiv = 0;
1426		break;
1427	case 2:
1428		params->kdiv = 1;
1429		break;
1430	case 3:
1431		params->kdiv = 2;
1432		break;
1433	case 1:
1434		params->kdiv = 3;
1435		break;
1436	default:
1437		WARN(1, "Incorrect KDiv\n");
1438	}
1439
1440	params->qdiv_ratio = p1;
1441	params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1442
1443	dco_freq = p0 * p1 * p2 * afe_clock;
1444
1445	/*
1446	 * Intermediate values are in Hz.
1447	 * Divide by MHz to match bsepc
1448	 */
1449	params->dco_integer = div_u64(dco_freq, ref_clock * KHz(1));
1450	params->dco_fraction =
1451		div_u64((div_u64(dco_freq, ref_clock / KHz(1)) -
1452			 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1453}
1454
1455static bool
1456skl_ddi_calculate_wrpll(int clock /* in Hz */,
1457			int ref_clock,
1458			struct skl_wrpll_params *wrpll_params)
1459{
1460	u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
1461	u64 dco_central_freq[3] = { 8400000000ULL,
1462				    9000000000ULL,
1463				    9600000000ULL };
1464	static const int even_dividers[] = {  4,  6,  8, 10, 12, 14, 16, 18, 20,
1465					     24, 28, 30, 32, 36, 40, 42, 44,
1466					     48, 52, 54, 56, 60, 64, 66, 68,
1467					     70, 72, 76, 78, 80, 84, 88, 90,
1468					     92, 96, 98 };
1469	static const int odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1470	static const struct {
1471		const int *list;
1472		int n_dividers;
1473	} dividers[] = {
1474		{ even_dividers, ARRAY_SIZE(even_dividers) },
1475		{ odd_dividers, ARRAY_SIZE(odd_dividers) },
1476	};
1477	struct skl_wrpll_context ctx;
 
 
1478	unsigned int dco, d, i;
1479	unsigned int p0, p1, p2;
1480
1481	skl_wrpll_context_init(&ctx);
1482
1483	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1484		for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1485			for (i = 0; i < dividers[d].n_dividers; i++) {
1486				unsigned int p = dividers[d].list[i];
1487				u64 dco_freq = p * afe_clock;
1488
1489				skl_wrpll_try_divider(&ctx,
1490						      dco_central_freq[dco],
1491						      dco_freq,
1492						      p);
1493				/*
1494				 * Skip the remaining dividers if we're sure to
1495				 * have found the definitive divider, we can't
1496				 * improve a 0 deviation.
1497				 */
1498				if (ctx.min_deviation == 0)
1499					goto skip_remaining_dividers;
1500			}
1501		}
1502
1503skip_remaining_dividers:
1504		/*
1505		 * If a solution is found with an even divider, prefer
1506		 * this one.
1507		 */
1508		if (d == 0 && ctx.p)
1509			break;
1510	}
1511
1512	if (!ctx.p) {
1513		DRM_DEBUG_DRIVER("No valid divider found for %dHz\n", clock);
1514		return false;
1515	}
1516
1517	/*
1518	 * gcc incorrectly analyses that these can be used without being
1519	 * initialized. To be fair, it's hard to guess.
1520	 */
1521	p0 = p1 = p2 = 0;
1522	skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1523	skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock,
1524				  ctx.central_freq, p0, p1, p2);
1525
1526	return true;
1527}
1528
1529static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
1530{
1531	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1532	u32 ctrl1, cfgcr1, cfgcr2;
1533	struct skl_wrpll_params wrpll_params = { 0, };
1534
1535	/*
1536	 * See comment in intel_dpll_hw_state to understand why we always use 0
1537	 * as the DPLL id in this function.
1538	 */
1539	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1540
1541	ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
1542
1543	if (!skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
1544				     i915->dpll.ref_clks.nssc,
1545				     &wrpll_params))
1546		return false;
1547
1548	cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
1549		DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1550		wrpll_params.dco_integer;
1551
1552	cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1553		DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1554		DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1555		DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1556		wrpll_params.central_freq;
1557
1558	memset(&crtc_state->dpll_hw_state, 0,
1559	       sizeof(crtc_state->dpll_hw_state));
1560
1561	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1562	crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
1563	crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
1564	return true;
1565}
1566
1567static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
1568				  const struct intel_shared_dpll *pll)
 
1569{
1570	const struct intel_dpll_hw_state *pll_state = &pll->state.hw_state;
1571	int ref_clock = i915->dpll.ref_clks.nssc;
1572	u32 p0, p1, p2, dco_freq;
1573
1574	p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
1575	p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
1576
1577	if (pll_state->cfgcr2 &  DPLL_CFGCR2_QDIV_MODE(1))
1578		p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
1579	else
1580		p1 = 1;
1581
1582
1583	switch (p0) {
1584	case DPLL_CFGCR2_PDIV_1:
1585		p0 = 1;
1586		break;
1587	case DPLL_CFGCR2_PDIV_2:
1588		p0 = 2;
1589		break;
1590	case DPLL_CFGCR2_PDIV_3:
1591		p0 = 3;
1592		break;
 
 
 
 
 
 
 
1593	case DPLL_CFGCR2_PDIV_7:
1594		p0 = 7;
1595		break;
 
 
 
1596	}
1597
1598	switch (p2) {
1599	case DPLL_CFGCR2_KDIV_5:
1600		p2 = 5;
1601		break;
1602	case DPLL_CFGCR2_KDIV_2:
1603		p2 = 2;
1604		break;
1605	case DPLL_CFGCR2_KDIV_3:
1606		p2 = 3;
1607		break;
1608	case DPLL_CFGCR2_KDIV_1:
1609		p2 = 1;
1610		break;
 
 
 
1611	}
1612
1613	dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
1614		   ref_clock;
1615
1616	dco_freq += ((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
1617		    ref_clock / 0x8000;
1618
1619	if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
1620		return 0;
1621
1622	return dco_freq / (p0 * p1 * p2 * 5);
1623}
1624
1625static bool
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1626skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1627{
1628	u32 ctrl1;
1629
1630	/*
1631	 * See comment in intel_dpll_hw_state to understand why we always use 0
1632	 * as the DPLL id in this function.
1633	 */
1634	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1635	switch (crtc_state->port_clock / 2) {
1636	case 81000:
1637		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1638		break;
1639	case 135000:
1640		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1641		break;
1642	case 270000:
1643		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1644		break;
1645		/* eDP 1.4 rates */
1646	case 162000:
1647		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1648		break;
1649	case 108000:
1650		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1651		break;
1652	case 216000:
1653		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1654		break;
1655	}
1656
1657	memset(&crtc_state->dpll_hw_state, 0,
1658	       sizeof(crtc_state->dpll_hw_state));
1659
1660	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1661
1662	return true;
1663}
1664
1665static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1666				  const struct intel_shared_dpll *pll)
 
1667{
1668	int link_clock = 0;
1669
1670	switch ((pll->state.hw_state.ctrl1 &
1671		 DPLL_CTRL1_LINK_RATE_MASK(0)) >>
1672		DPLL_CTRL1_LINK_RATE_SHIFT(0)) {
1673	case DPLL_CTRL1_LINK_RATE_810:
1674		link_clock = 81000;
1675		break;
1676	case DPLL_CTRL1_LINK_RATE_1080:
1677		link_clock = 108000;
1678		break;
1679	case DPLL_CTRL1_LINK_RATE_1350:
1680		link_clock = 135000;
1681		break;
1682	case DPLL_CTRL1_LINK_RATE_1620:
1683		link_clock = 162000;
1684		break;
1685	case DPLL_CTRL1_LINK_RATE_2160:
1686		link_clock = 216000;
1687		break;
1688	case DPLL_CTRL1_LINK_RATE_2700:
1689		link_clock = 270000;
1690		break;
1691	default:
1692		drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
1693		break;
1694	}
1695
1696	return link_clock * 2;
1697}
1698
1699static bool skl_get_dpll(struct intel_atomic_state *state,
1700			 struct intel_crtc *crtc,
1701			 struct intel_encoder *encoder)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1702{
1703	struct intel_crtc_state *crtc_state =
1704		intel_atomic_get_new_crtc_state(state, crtc);
1705	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
1706	struct intel_shared_dpll *pll;
1707	bool bret;
1708
1709	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
1710		bret = skl_ddi_hdmi_pll_dividers(crtc_state);
1711		if (!bret) {
1712			drm_dbg_kms(&i915->drm,
1713				    "Could not get HDMI pll dividers.\n");
1714			return false;
1715		}
1716	} else if (intel_crtc_has_dp_encoder(crtc_state)) {
1717		bret = skl_ddi_dp_set_dpll_hw_state(crtc_state);
1718		if (!bret) {
1719			drm_dbg_kms(&i915->drm,
1720				    "Could not set DP dpll HW state.\n");
1721			return false;
1722		}
1723	} else {
1724		return false;
1725	}
1726
1727	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1728		pll = intel_find_shared_dpll(state, crtc,
1729					     &crtc_state->dpll_hw_state,
1730					     BIT(DPLL_ID_SKL_DPLL0));
1731	else
1732		pll = intel_find_shared_dpll(state, crtc,
1733					     &crtc_state->dpll_hw_state,
1734					     BIT(DPLL_ID_SKL_DPLL3) |
1735					     BIT(DPLL_ID_SKL_DPLL2) |
1736					     BIT(DPLL_ID_SKL_DPLL1));
1737	if (!pll)
1738		return false;
1739
1740	intel_reference_shared_dpll(state, crtc,
1741				    pll, &crtc_state->dpll_hw_state);
1742
1743	crtc_state->shared_dpll = pll;
1744
1745	return true;
1746}
1747
1748static int skl_ddi_pll_get_freq(struct drm_i915_private *i915,
1749				const struct intel_shared_dpll *pll)
 
1750{
1751	/*
1752	 * ctrl1 register is already shifted for each pll, just use 0 to get
1753	 * the internal shift for each field
1754	 */
1755	if (pll->state.hw_state.ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
1756		return skl_ddi_wrpll_get_freq(i915, pll);
1757	else
1758		return skl_ddi_lcpll_get_freq(i915, pll);
1759}
1760
1761static void skl_update_dpll_ref_clks(struct drm_i915_private *i915)
1762{
1763	/* No SSC ref */
1764	i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
1765}
1766
1767static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
1768			      const struct intel_dpll_hw_state *hw_state)
1769{
1770	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: "
1771		      "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1772		      hw_state->ctrl1,
1773		      hw_state->cfgcr1,
1774		      hw_state->cfgcr2);
1775}
1776
 
 
 
 
 
 
 
 
1777static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
1778	.enable = skl_ddi_pll_enable,
1779	.disable = skl_ddi_pll_disable,
1780	.get_hw_state = skl_ddi_pll_get_hw_state,
1781	.get_freq = skl_ddi_pll_get_freq,
1782};
1783
1784static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
1785	.enable = skl_ddi_dpll0_enable,
1786	.disable = skl_ddi_dpll0_disable,
1787	.get_hw_state = skl_ddi_dpll0_get_hw_state,
1788	.get_freq = skl_ddi_pll_get_freq,
1789};
1790
1791static const struct dpll_info skl_plls[] = {
1792	{ "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
1793	{ "DPLL 1", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL1, 0 },
1794	{ "DPLL 2", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL2, 0 },
1795	{ "DPLL 3", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL3, 0 },
1796	{ },
 
1797};
1798
1799static const struct intel_dpll_mgr skl_pll_mgr = {
1800	.dpll_info = skl_plls,
 
1801	.get_dplls = skl_get_dpll,
1802	.put_dplls = intel_put_dpll,
1803	.update_ref_clks = skl_update_dpll_ref_clks,
1804	.dump_hw_state = skl_dump_hw_state,
 
1805};
1806
1807static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
1808				struct intel_shared_dpll *pll)
1809{
1810	u32 temp;
1811	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1812	enum dpio_phy phy;
1813	enum dpio_channel ch;
1814
1815	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1816
1817	/* Non-SSC reference */
1818	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1819	temp |= PORT_PLL_REF_SEL;
1820	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1821
1822	if (IS_GEMINILAKE(dev_priv)) {
1823		temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1824		temp |= PORT_PLL_POWER_ENABLE;
1825		intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1826
1827		if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
 
 
 
 
1828				 PORT_PLL_POWER_STATE), 200))
1829			drm_err(&dev_priv->drm,
1830				"Power state not set for PLL:%d\n", port);
1831	}
1832
1833	/* Disable 10 bit clock */
1834	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1835	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1836	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1837
1838	/* Write P1 & P2 */
1839	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
1840	temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
1841	temp |= pll->state.hw_state.ebb0;
1842	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch), temp);
1843
1844	/* Write M2 integer */
1845	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
1846	temp &= ~PORT_PLL_M2_MASK;
1847	temp |= pll->state.hw_state.pll0;
1848	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 0), temp);
1849
1850	/* Write N */
1851	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
1852	temp &= ~PORT_PLL_N_MASK;
1853	temp |= pll->state.hw_state.pll1;
1854	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 1), temp);
1855
1856	/* Write M2 fraction */
1857	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
1858	temp &= ~PORT_PLL_M2_FRAC_MASK;
1859	temp |= pll->state.hw_state.pll2;
1860	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 2), temp);
1861
1862	/* Write M2 fraction enable */
1863	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
1864	temp &= ~PORT_PLL_M2_FRAC_ENABLE;
1865	temp |= pll->state.hw_state.pll3;
1866	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 3), temp);
1867
1868	/* Write coeff */
1869	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
1870	temp &= ~PORT_PLL_PROP_COEFF_MASK;
1871	temp &= ~PORT_PLL_INT_COEFF_MASK;
1872	temp &= ~PORT_PLL_GAIN_CTL_MASK;
1873	temp |= pll->state.hw_state.pll6;
1874	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 6), temp);
1875
1876	/* Write calibration val */
1877	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
1878	temp &= ~PORT_PLL_TARGET_CNT_MASK;
1879	temp |= pll->state.hw_state.pll8;
1880	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 8), temp);
1881
1882	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
1883	temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
1884	temp |= pll->state.hw_state.pll9;
1885	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 9), temp);
1886
1887	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
 
 
 
1888	temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
1889	temp &= ~PORT_PLL_DCO_AMP_MASK;
1890	temp |= pll->state.hw_state.pll10;
1891	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 10), temp);
1892
1893	/* Recalibrate with new settings */
1894	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1895	temp |= PORT_PLL_RECALIBRATE;
1896	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1897	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1898	temp |= pll->state.hw_state.ebb4;
1899	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1900
1901	/* Enable PLL */
1902	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1903	temp |= PORT_PLL_ENABLE;
1904	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1905	intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1906
1907	if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
1908			200))
1909		drm_err(&dev_priv->drm, "PLL %d not locked\n", port);
1910
1911	if (IS_GEMINILAKE(dev_priv)) {
1912		temp = intel_de_read(dev_priv, BXT_PORT_TX_DW5_LN0(phy, ch));
1913		temp |= DCC_DELAY_RANGE_2;
1914		intel_de_write(dev_priv, BXT_PORT_TX_DW5_GRP(phy, ch), temp);
1915	}
1916
1917	/*
1918	 * While we write to the group register to program all lanes at once we
1919	 * can read only lane registers and we pick lanes 0/1 for that.
1920	 */
1921	temp = intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN01(phy, ch));
1922	temp &= ~LANE_STAGGER_MASK;
1923	temp &= ~LANESTAGGER_STRAP_OVRD;
1924	temp |= pll->state.hw_state.pcsdw12;
1925	intel_de_write(dev_priv, BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
1926}
1927
1928static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
1929					struct intel_shared_dpll *pll)
1930{
1931	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1932	u32 temp;
1933
1934	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1935	temp &= ~PORT_PLL_ENABLE;
1936	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1937	intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1938
1939	if (IS_GEMINILAKE(dev_priv)) {
1940		temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1941		temp &= ~PORT_PLL_POWER_ENABLE;
1942		intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1943
1944		if (wait_for_us(!(intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
1945				  PORT_PLL_POWER_STATE), 200))
1946			drm_err(&dev_priv->drm,
1947				"Power state not reset for PLL:%d\n", port);
1948	}
1949}
1950
1951static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1952					struct intel_shared_dpll *pll,
1953					struct intel_dpll_hw_state *hw_state)
1954{
1955	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1956	intel_wakeref_t wakeref;
1957	enum dpio_phy phy;
1958	enum dpio_channel ch;
1959	u32 val;
1960	bool ret;
1961
1962	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1963
1964	wakeref = intel_display_power_get_if_enabled(dev_priv,
1965						     POWER_DOMAIN_DISPLAY_CORE);
1966	if (!wakeref)
1967		return false;
1968
1969	ret = false;
1970
1971	val = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1972	if (!(val & PORT_PLL_ENABLE))
1973		goto out;
1974
1975	hw_state->ebb0 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
1976	hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
1977
1978	hw_state->ebb4 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1979	hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
1980
1981	hw_state->pll0 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
1982	hw_state->pll0 &= PORT_PLL_M2_MASK;
1983
1984	hw_state->pll1 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
1985	hw_state->pll1 &= PORT_PLL_N_MASK;
1986
1987	hw_state->pll2 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
1988	hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
1989
1990	hw_state->pll3 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
1991	hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
1992
1993	hw_state->pll6 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
1994	hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
1995			  PORT_PLL_INT_COEFF_MASK |
1996			  PORT_PLL_GAIN_CTL_MASK;
1997
1998	hw_state->pll8 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
1999	hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
2000
2001	hw_state->pll9 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
2002	hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
2003
2004	hw_state->pll10 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
2005	hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
2006			   PORT_PLL_DCO_AMP_MASK;
2007
2008	/*
2009	 * While we write to the group register to program all lanes at once we
2010	 * can read only lane registers. We configure all lanes the same way, so
2011	 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
2012	 */
2013	hw_state->pcsdw12 = intel_de_read(dev_priv,
2014					  BXT_PORT_PCS_DW12_LN01(phy, ch));
2015	if (intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
2016		drm_dbg(&dev_priv->drm,
2017			"lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
2018			hw_state->pcsdw12,
2019			intel_de_read(dev_priv,
2020				      BXT_PORT_PCS_DW12_LN23(phy, ch)));
2021	hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
2022
2023	ret = true;
2024
2025out:
2026	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2027
2028	return ret;
2029}
2030
2031/* bxt clock parameters */
2032struct bxt_clk_div {
2033	int clock;
2034	u32 p1;
2035	u32 p2;
2036	u32 m2_int;
2037	u32 m2_frac;
2038	bool m2_frac_en;
2039	u32 n;
2040
2041	int vco;
2042};
2043
2044/* pre-calculated values for DP linkrates */
2045static const struct bxt_clk_div bxt_dp_clk_val[] = {
2046	{162000, 4, 2, 32, 1677722, 1, 1},
2047	{270000, 4, 1, 27,       0, 0, 1},
2048	{540000, 2, 1, 27,       0, 0, 1},
2049	{216000, 3, 2, 32, 1677722, 1, 1},
2050	{243000, 4, 1, 24, 1258291, 1, 1},
2051	{324000, 4, 1, 32, 1677722, 1, 1},
2052	{432000, 3, 1, 32, 1677722, 1, 1}
 
2053};
2054
2055static bool
2056bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
2057			  struct bxt_clk_div *clk_div)
2058{
2059	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2060	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2061	struct dpll best_clock;
2062
2063	/* Calculate HDMI div */
2064	/*
2065	 * FIXME: tie the following calculation into
2066	 * i9xx_crtc_compute_clock
2067	 */
2068	if (!bxt_find_best_dpll(crtc_state, &best_clock)) {
2069		drm_dbg(&i915->drm, "no PLL dividers found for clock %d pipe %c\n",
2070			crtc_state->port_clock,
2071			pipe_name(crtc->pipe));
2072		return false;
2073	}
2074
2075	clk_div->p1 = best_clock.p1;
2076	clk_div->p2 = best_clock.p2;
2077	drm_WARN_ON(&i915->drm, best_clock.m1 != 2);
2078	clk_div->n = best_clock.n;
2079	clk_div->m2_int = best_clock.m2 >> 22;
2080	clk_div->m2_frac = best_clock.m2 & ((1 << 22) - 1);
2081	clk_div->m2_frac_en = clk_div->m2_frac != 0;
2082
2083	clk_div->vco = best_clock.vco;
2084
2085	return true;
2086}
2087
2088static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
2089				    struct bxt_clk_div *clk_div)
2090{
2091	int clock = crtc_state->port_clock;
2092	int i;
2093
2094	*clk_div = bxt_dp_clk_val[0];
2095	for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
2096		if (bxt_dp_clk_val[i].clock == clock) {
2097			*clk_div = bxt_dp_clk_val[i];
2098			break;
2099		}
2100	}
2101
2102	clk_div->vco = clock * 10 / 2 * clk_div->p1 * clk_div->p2;
 
 
 
2103}
2104
2105static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
2106				      const struct bxt_clk_div *clk_div)
2107{
2108	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2109	struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
2110	int clock = crtc_state->port_clock;
2111	int vco = clk_div->vco;
2112	u32 prop_coef, int_coef, gain_ctl, targ_cnt;
2113	u32 lanestagger;
2114
2115	memset(dpll_hw_state, 0, sizeof(*dpll_hw_state));
2116
2117	if (vco >= 6200000 && vco <= 6700000) {
2118		prop_coef = 4;
2119		int_coef = 9;
2120		gain_ctl = 3;
2121		targ_cnt = 8;
2122	} else if ((vco > 5400000 && vco < 6200000) ||
2123			(vco >= 4800000 && vco < 5400000)) {
2124		prop_coef = 5;
2125		int_coef = 11;
2126		gain_ctl = 3;
2127		targ_cnt = 9;
2128	} else if (vco == 5400000) {
2129		prop_coef = 3;
2130		int_coef = 8;
2131		gain_ctl = 1;
2132		targ_cnt = 9;
2133	} else {
2134		drm_err(&i915->drm, "Invalid VCO\n");
2135		return false;
2136	}
2137
2138	if (clock > 270000)
2139		lanestagger = 0x18;
2140	else if (clock > 135000)
2141		lanestagger = 0x0d;
2142	else if (clock > 67000)
2143		lanestagger = 0x07;
2144	else if (clock > 33000)
2145		lanestagger = 0x04;
2146	else
2147		lanestagger = 0x02;
2148
2149	dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
2150	dpll_hw_state->pll0 = clk_div->m2_int;
2151	dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
2152	dpll_hw_state->pll2 = clk_div->m2_frac;
2153
2154	if (clk_div->m2_frac_en)
2155		dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
2156
2157	dpll_hw_state->pll6 = prop_coef | PORT_PLL_INT_COEFF(int_coef);
2158	dpll_hw_state->pll6 |= PORT_PLL_GAIN_CTL(gain_ctl);
 
2159
2160	dpll_hw_state->pll8 = targ_cnt;
2161
2162	dpll_hw_state->pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT;
2163
2164	dpll_hw_state->pll10 =
2165		PORT_PLL_DCO_AMP(PORT_PLL_DCO_AMP_DEFAULT)
2166		| PORT_PLL_DCO_AMP_OVR_EN_H;
2167
2168	dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
2169
2170	dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
2171
2172	return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2173}
2174
2175static bool
2176bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2177{
2178	struct bxt_clk_div clk_div = {};
2179
2180	bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
2181
2182	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2183}
2184
2185static bool
2186bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2187{
2188	struct bxt_clk_div clk_div = {};
 
 
2189
2190	bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
2191
2192	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
 
 
 
 
 
 
 
2193}
2194
2195static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
2196				const struct intel_shared_dpll *pll)
 
2197{
2198	const struct intel_dpll_hw_state *pll_state = &pll->state.hw_state;
2199	struct dpll clock;
2200
2201	clock.m1 = 2;
2202	clock.m2 = (pll_state->pll0 & PORT_PLL_M2_MASK) << 22;
2203	if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
2204		clock.m2 |= pll_state->pll2 & PORT_PLL_M2_FRAC_MASK;
2205	clock.n = (pll_state->pll1 & PORT_PLL_N_MASK) >> PORT_PLL_N_SHIFT;
2206	clock.p1 = (pll_state->ebb0 & PORT_PLL_P1_MASK) >> PORT_PLL_P1_SHIFT;
2207	clock.p2 = (pll_state->ebb0 & PORT_PLL_P2_MASK) >> PORT_PLL_P2_SHIFT;
2208
2209	return chv_calc_dpll_params(i915->dpll.ref_clks.nssc, &clock);
2210}
2211
2212static bool bxt_get_dpll(struct intel_atomic_state *state,
2213			 struct intel_crtc *crtc,
2214			 struct intel_encoder *encoder)
2215{
2216	struct intel_crtc_state *crtc_state =
2217		intel_atomic_get_new_crtc_state(state, crtc);
2218	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2219	struct intel_shared_dpll *pll;
2220	enum intel_dpll_id id;
2221
2222	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
2223	    !bxt_ddi_hdmi_set_dpll_hw_state(crtc_state))
2224		return false;
2225
2226	if (intel_crtc_has_dp_encoder(crtc_state) &&
2227	    !bxt_ddi_dp_set_dpll_hw_state(crtc_state))
2228		return false;
2229
2230	/* 1:1 mapping between ports and PLLs */
2231	id = (enum intel_dpll_id) encoder->port;
2232	pll = intel_get_shared_dpll_by_id(dev_priv, id);
2233
2234	drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] using pre-allocated %s\n",
2235		    crtc->base.base.id, crtc->base.name, pll->info->name);
2236
2237	intel_reference_shared_dpll(state, crtc,
2238				    pll, &crtc_state->dpll_hw_state);
2239
2240	crtc_state->shared_dpll = pll;
2241
2242	return true;
2243}
2244
2245static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915)
2246{
2247	i915->dpll.ref_clks.ssc = 100000;
2248	i915->dpll.ref_clks.nssc = 100000;
2249	/* DSI non-SSC ref 19.2MHz */
2250}
2251
2252static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
2253			      const struct intel_dpll_hw_state *hw_state)
2254{
2255	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
2256		    "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
2257		    "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
2258		    hw_state->ebb0,
2259		    hw_state->ebb4,
2260		    hw_state->pll0,
2261		    hw_state->pll1,
2262		    hw_state->pll2,
2263		    hw_state->pll3,
2264		    hw_state->pll6,
2265		    hw_state->pll8,
2266		    hw_state->pll9,
2267		    hw_state->pll10,
2268		    hw_state->pcsdw12);
2269}
2270
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2271static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
2272	.enable = bxt_ddi_pll_enable,
2273	.disable = bxt_ddi_pll_disable,
2274	.get_hw_state = bxt_ddi_pll_get_hw_state,
2275	.get_freq = bxt_ddi_pll_get_freq,
2276};
2277
2278static const struct dpll_info bxt_plls[] = {
2279	{ "PORT PLL A", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
2280	{ "PORT PLL B", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2281	{ "PORT PLL C", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2282	{ },
2283};
2284
2285static const struct intel_dpll_mgr bxt_pll_mgr = {
2286	.dpll_info = bxt_plls,
 
2287	.get_dplls = bxt_get_dpll,
2288	.put_dplls = intel_put_dpll,
2289	.update_ref_clks = bxt_update_dpll_ref_clks,
2290	.dump_hw_state = bxt_dump_hw_state,
 
2291};
2292
2293static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
2294			       struct intel_shared_dpll *pll)
2295{
2296	const enum intel_dpll_id id = pll->info->id;
2297	u32 val;
2298
2299	/* 1. Enable DPLL power in DPLL_ENABLE. */
2300	val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2301	val |= PLL_POWER_ENABLE;
2302	intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
2303
2304	/* 2. Wait for DPLL power state enabled in DPLL_ENABLE. */
2305	if (intel_de_wait_for_set(dev_priv, CNL_DPLL_ENABLE(id),
2306				  PLL_POWER_STATE, 5))
2307		drm_err(&dev_priv->drm, "PLL %d Power not enabled\n", id);
2308
2309	/*
2310	 * 3. Configure DPLL_CFGCR0 to set SSC enable/disable,
2311	 * select DP mode, and set DP link rate.
2312	 */
2313	val = pll->state.hw_state.cfgcr0;
2314	intel_de_write(dev_priv, CNL_DPLL_CFGCR0(id), val);
2315
2316	/* 4. Reab back to ensure writes completed */
2317	intel_de_posting_read(dev_priv, CNL_DPLL_CFGCR0(id));
2318
2319	/* 3. Configure DPLL_CFGCR0 */
2320	/* Avoid touch CFGCR1 if HDMI mode is not enabled */
2321	if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
2322		val = pll->state.hw_state.cfgcr1;
2323		intel_de_write(dev_priv, CNL_DPLL_CFGCR1(id), val);
2324		/* 4. Reab back to ensure writes completed */
2325		intel_de_posting_read(dev_priv, CNL_DPLL_CFGCR1(id));
2326	}
2327
2328	/*
2329	 * 5. If the frequency will result in a change to the voltage
2330	 * requirement, follow the Display Voltage Frequency Switching
2331	 * Sequence Before Frequency Change
2332	 *
2333	 * Note: DVFS is actually handled via the cdclk code paths,
2334	 * hence we do nothing here.
2335	 */
2336
2337	/* 6. Enable DPLL in DPLL_ENABLE. */
2338	val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2339	val |= PLL_ENABLE;
2340	intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
2341
2342	/* 7. Wait for PLL lock status in DPLL_ENABLE. */
2343	if (intel_de_wait_for_set(dev_priv, CNL_DPLL_ENABLE(id), PLL_LOCK, 5))
2344		drm_err(&dev_priv->drm, "PLL %d not locked\n", id);
2345
2346	/*
2347	 * 8. If the frequency will result in a change to the voltage
2348	 * requirement, follow the Display Voltage Frequency Switching
2349	 * Sequence After Frequency Change
2350	 *
2351	 * Note: DVFS is actually handled via the cdclk code paths,
2352	 * hence we do nothing here.
2353	 */
2354
2355	/*
2356	 * 9. turn on the clock for the DDI and map the DPLL to the DDI
2357	 * Done at intel_ddi_clk_select
2358	 */
2359}
2360
2361static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
2362				struct intel_shared_dpll *pll)
2363{
2364	const enum intel_dpll_id id = pll->info->id;
2365	u32 val;
2366
2367	/*
2368	 * 1. Configure DPCLKA_CFGCR0 to turn off the clock for the DDI.
2369	 * Done at intel_ddi_post_disable
2370	 */
2371
2372	/*
2373	 * 2. If the frequency will result in a change to the voltage
2374	 * requirement, follow the Display Voltage Frequency Switching
2375	 * Sequence Before Frequency Change
2376	 *
2377	 * Note: DVFS is actually handled via the cdclk code paths,
2378	 * hence we do nothing here.
2379	 */
2380
2381	/* 3. Disable DPLL through DPLL_ENABLE. */
2382	val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2383	val &= ~PLL_ENABLE;
2384	intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
2385
2386	/* 4. Wait for PLL not locked status in DPLL_ENABLE. */
2387	if (intel_de_wait_for_clear(dev_priv, CNL_DPLL_ENABLE(id), PLL_LOCK, 5))
2388		drm_err(&dev_priv->drm, "PLL %d locked\n", id);
2389
2390	/*
2391	 * 5. If the frequency will result in a change to the voltage
2392	 * requirement, follow the Display Voltage Frequency Switching
2393	 * Sequence After Frequency Change
2394	 *
2395	 * Note: DVFS is actually handled via the cdclk code paths,
2396	 * hence we do nothing here.
2397	 */
2398
2399	/* 6. Disable DPLL power in DPLL_ENABLE. */
2400	val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2401	val &= ~PLL_POWER_ENABLE;
2402	intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
2403
2404	/* 7. Wait for DPLL power state disabled in DPLL_ENABLE. */
2405	if (intel_de_wait_for_clear(dev_priv, CNL_DPLL_ENABLE(id),
2406				    PLL_POWER_STATE, 5))
2407		drm_err(&dev_priv->drm, "PLL %d Power not disabled\n", id);
2408}
2409
2410static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2411				     struct intel_shared_dpll *pll,
2412				     struct intel_dpll_hw_state *hw_state)
2413{
2414	const enum intel_dpll_id id = pll->info->id;
2415	intel_wakeref_t wakeref;
2416	u32 val;
2417	bool ret;
2418
2419	wakeref = intel_display_power_get_if_enabled(dev_priv,
2420						     POWER_DOMAIN_DISPLAY_CORE);
2421	if (!wakeref)
2422		return false;
2423
2424	ret = false;
2425
2426	val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2427	if (!(val & PLL_ENABLE))
2428		goto out;
2429
2430	val = intel_de_read(dev_priv, CNL_DPLL_CFGCR0(id));
2431	hw_state->cfgcr0 = val;
2432
2433	/* avoid reading back stale values if HDMI mode is not enabled */
2434	if (val & DPLL_CFGCR0_HDMI_MODE) {
2435		hw_state->cfgcr1 = intel_de_read(dev_priv,
2436						 CNL_DPLL_CFGCR1(id));
2437	}
2438	ret = true;
2439
2440out:
2441	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2442
2443	return ret;
2444}
2445
2446static void cnl_wrpll_get_multipliers(int bestdiv, int *pdiv,
2447				      int *qdiv, int *kdiv)
2448{
2449	/* even dividers */
2450	if (bestdiv % 2 == 0) {
2451		if (bestdiv == 2) {
2452			*pdiv = 2;
2453			*qdiv = 1;
2454			*kdiv = 1;
2455		} else if (bestdiv % 4 == 0) {
2456			*pdiv = 2;
2457			*qdiv = bestdiv / 4;
2458			*kdiv = 2;
2459		} else if (bestdiv % 6 == 0) {
2460			*pdiv = 3;
2461			*qdiv = bestdiv / 6;
2462			*kdiv = 2;
2463		} else if (bestdiv % 5 == 0) {
2464			*pdiv = 5;
2465			*qdiv = bestdiv / 10;
2466			*kdiv = 2;
2467		} else if (bestdiv % 14 == 0) {
2468			*pdiv = 7;
2469			*qdiv = bestdiv / 14;
2470			*kdiv = 2;
2471		}
2472	} else {
2473		if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2474			*pdiv = bestdiv;
2475			*qdiv = 1;
2476			*kdiv = 1;
2477		} else { /* 9, 15, 21 */
2478			*pdiv = bestdiv / 3;
2479			*qdiv = 1;
2480			*kdiv = 3;
2481		}
2482	}
2483}
2484
2485static void cnl_wrpll_params_populate(struct skl_wrpll_params *params,
2486				      u32 dco_freq, u32 ref_freq,
2487				      int pdiv, int qdiv, int kdiv)
2488{
2489	u32 dco;
2490
2491	switch (kdiv) {
2492	case 1:
2493		params->kdiv = 1;
2494		break;
2495	case 2:
2496		params->kdiv = 2;
2497		break;
2498	case 3:
2499		params->kdiv = 4;
2500		break;
2501	default:
2502		WARN(1, "Incorrect KDiv\n");
2503	}
2504
2505	switch (pdiv) {
2506	case 2:
2507		params->pdiv = 1;
2508		break;
2509	case 3:
2510		params->pdiv = 2;
2511		break;
2512	case 5:
2513		params->pdiv = 4;
2514		break;
2515	case 7:
2516		params->pdiv = 8;
2517		break;
2518	default:
2519		WARN(1, "Incorrect PDiv\n");
2520	}
2521
2522	WARN_ON(kdiv != 2 && qdiv != 1);
2523
2524	params->qdiv_ratio = qdiv;
2525	params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2526
2527	dco = div_u64((u64)dco_freq << 15, ref_freq);
2528
2529	params->dco_integer = dco >> 15;
2530	params->dco_fraction = dco & 0x7fff;
2531}
2532
 
 
 
 
2533static bool
2534__cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
2535			  struct skl_wrpll_params *wrpll_params,
2536			  int ref_clock)
2537{
2538	u32 afe_clock = crtc_state->port_clock * 5;
2539	u32 dco_min = 7998000;
2540	u32 dco_max = 10000000;
2541	u32 dco_mid = (dco_min + dco_max) / 2;
2542	static const int dividers[] = {  2,  4,  6,  8, 10, 12,  14,  16,
2543					 18, 20, 24, 28, 30, 32,  36,  40,
2544					 42, 44, 48, 50, 52, 54,  56,  60,
2545					 64, 66, 68, 70, 72, 76,  78,  80,
2546					 84, 88, 90, 92, 96, 98, 100, 102,
2547					  3,  5,  7,  9, 15, 21 };
2548	u32 dco, best_dco = 0, dco_centrality = 0;
2549	u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2550	int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2551
2552	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2553		dco = afe_clock * dividers[d];
2554
2555		if ((dco <= dco_max) && (dco >= dco_min)) {
2556			dco_centrality = abs(dco - dco_mid);
2557
2558			if (dco_centrality < best_dco_centrality) {
2559				best_dco_centrality = dco_centrality;
2560				best_div = dividers[d];
2561				best_dco = dco;
2562			}
2563		}
2564	}
2565
2566	if (best_div == 0)
2567		return false;
2568
2569	cnl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2570	cnl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
2571				  pdiv, qdiv, kdiv);
2572
2573	return true;
2574}
2575
2576static bool
2577cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
2578			struct skl_wrpll_params *wrpll_params)
2579{
2580	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2581
2582	return __cnl_ddi_calculate_wrpll(crtc_state, wrpll_params,
2583					 i915->dpll.ref_clks.nssc);
2584}
2585
2586static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
2587{
2588	u32 cfgcr0, cfgcr1;
2589	struct skl_wrpll_params wrpll_params = { 0, };
2590
2591	cfgcr0 = DPLL_CFGCR0_HDMI_MODE;
2592
2593	if (!cnl_ddi_calculate_wrpll(crtc_state, &wrpll_params))
2594		return false;
2595
2596	cfgcr0 |= DPLL_CFGCR0_DCO_FRACTION(wrpll_params.dco_fraction) |
2597		wrpll_params.dco_integer;
2598
2599	cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(wrpll_params.qdiv_ratio) |
2600		DPLL_CFGCR1_QDIV_MODE(wrpll_params.qdiv_mode) |
2601		DPLL_CFGCR1_KDIV(wrpll_params.kdiv) |
2602		DPLL_CFGCR1_PDIV(wrpll_params.pdiv) |
2603		DPLL_CFGCR1_CENTRAL_FREQ;
2604
2605	memset(&crtc_state->dpll_hw_state, 0,
2606	       sizeof(crtc_state->dpll_hw_state));
2607
2608	crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
2609	crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
2610	return true;
2611}
2612
2613static int __cnl_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
2614				    const struct intel_shared_dpll *pll,
2615				    int ref_clock)
2616{
2617	const struct intel_dpll_hw_state *pll_state = &pll->state.hw_state;
2618	u32 p0, p1, p2, dco_freq;
2619
2620	p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
2621	p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
2622
2623	if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
2624		p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
2625			DPLL_CFGCR1_QDIV_RATIO_SHIFT;
2626	else
2627		p1 = 1;
2628
2629
2630	switch (p0) {
2631	case DPLL_CFGCR1_PDIV_2:
2632		p0 = 2;
2633		break;
2634	case DPLL_CFGCR1_PDIV_3:
2635		p0 = 3;
2636		break;
2637	case DPLL_CFGCR1_PDIV_5:
2638		p0 = 5;
2639		break;
2640	case DPLL_CFGCR1_PDIV_7:
2641		p0 = 7;
2642		break;
2643	}
2644
2645	switch (p2) {
2646	case DPLL_CFGCR1_KDIV_1:
2647		p2 = 1;
2648		break;
2649	case DPLL_CFGCR1_KDIV_2:
2650		p2 = 2;
2651		break;
2652	case DPLL_CFGCR1_KDIV_3:
2653		p2 = 3;
2654		break;
2655	}
2656
2657	dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
2658		   ref_clock;
2659
2660	dco_freq += (((pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
2661		      DPLL_CFGCR0_DCO_FRACTION_SHIFT) * ref_clock) / 0x8000;
2662
2663	if (drm_WARN_ON(&dev_priv->drm, p0 == 0 || p1 == 0 || p2 == 0))
2664		return 0;
2665
2666	return dco_freq / (p0 * p1 * p2 * 5);
2667}
2668
2669static int cnl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
2670				  const struct intel_shared_dpll *pll)
2671{
2672	return __cnl_ddi_wrpll_get_freq(i915, pll, i915->dpll.ref_clks.nssc);
2673}
2674
2675static bool
2676cnl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2677{
2678	u32 cfgcr0;
2679
2680	cfgcr0 = DPLL_CFGCR0_SSC_ENABLE;
2681
2682	switch (crtc_state->port_clock / 2) {
2683	case 81000:
2684		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_810;
2685		break;
2686	case 135000:
2687		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1350;
2688		break;
2689	case 270000:
2690		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2700;
2691		break;
2692		/* eDP 1.4 rates */
2693	case 162000:
2694		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1620;
2695		break;
2696	case 108000:
2697		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1080;
2698		break;
2699	case 216000:
2700		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2160;
2701		break;
2702	case 324000:
2703		/* Some SKUs may require elevated I/O voltage to support this */
2704		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_3240;
2705		break;
2706	case 405000:
2707		/* Some SKUs may require elevated I/O voltage to support this */
2708		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_4050;
2709		break;
2710	}
2711
2712	memset(&crtc_state->dpll_hw_state, 0,
2713	       sizeof(crtc_state->dpll_hw_state));
2714
2715	crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
2716
2717	return true;
2718}
2719
2720static int cnl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
2721				  const struct intel_shared_dpll *pll)
2722{
2723	int link_clock = 0;
2724
2725	switch (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_LINK_RATE_MASK) {
2726	case DPLL_CFGCR0_LINK_RATE_810:
2727		link_clock = 81000;
2728		break;
2729	case DPLL_CFGCR0_LINK_RATE_1080:
2730		link_clock = 108000;
2731		break;
2732	case DPLL_CFGCR0_LINK_RATE_1350:
2733		link_clock = 135000;
2734		break;
2735	case DPLL_CFGCR0_LINK_RATE_1620:
2736		link_clock = 162000;
2737		break;
2738	case DPLL_CFGCR0_LINK_RATE_2160:
2739		link_clock = 216000;
2740		break;
2741	case DPLL_CFGCR0_LINK_RATE_2700:
2742		link_clock = 270000;
2743		break;
2744	case DPLL_CFGCR0_LINK_RATE_3240:
2745		link_clock = 324000;
2746		break;
2747	case DPLL_CFGCR0_LINK_RATE_4050:
2748		link_clock = 405000;
2749		break;
2750	default:
2751		drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
2752		break;
2753	}
2754
2755	return link_clock * 2;
2756}
2757
2758static bool cnl_get_dpll(struct intel_atomic_state *state,
2759			 struct intel_crtc *crtc,
2760			 struct intel_encoder *encoder)
2761{
2762	struct intel_crtc_state *crtc_state =
2763		intel_atomic_get_new_crtc_state(state, crtc);
2764	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2765	struct intel_shared_dpll *pll;
2766	bool bret;
2767
2768	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
2769		bret = cnl_ddi_hdmi_pll_dividers(crtc_state);
2770		if (!bret) {
2771			drm_dbg_kms(&i915->drm,
2772				    "Could not get HDMI pll dividers.\n");
2773			return false;
2774		}
2775	} else if (intel_crtc_has_dp_encoder(crtc_state)) {
2776		bret = cnl_ddi_dp_set_dpll_hw_state(crtc_state);
2777		if (!bret) {
2778			drm_dbg_kms(&i915->drm,
2779				    "Could not set DP dpll HW state.\n");
2780			return false;
2781		}
2782	} else {
2783		drm_dbg_kms(&i915->drm,
2784			    "Skip DPLL setup for output_types 0x%x\n",
2785			    crtc_state->output_types);
2786		return false;
2787	}
2788
2789	pll = intel_find_shared_dpll(state, crtc,
2790				     &crtc_state->dpll_hw_state,
2791				     BIT(DPLL_ID_SKL_DPLL2) |
2792				     BIT(DPLL_ID_SKL_DPLL1) |
2793				     BIT(DPLL_ID_SKL_DPLL0));
2794	if (!pll) {
2795		drm_dbg_kms(&i915->drm, "No PLL selected\n");
2796		return false;
2797	}
2798
2799	intel_reference_shared_dpll(state, crtc,
2800				    pll, &crtc_state->dpll_hw_state);
2801
2802	crtc_state->shared_dpll = pll;
2803
2804	return true;
2805}
2806
2807static int cnl_ddi_pll_get_freq(struct drm_i915_private *i915,
2808				const struct intel_shared_dpll *pll)
2809{
2810	if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE)
2811		return cnl_ddi_wrpll_get_freq(i915, pll);
2812	else
2813		return cnl_ddi_lcpll_get_freq(i915, pll);
2814}
2815
2816static void cnl_update_dpll_ref_clks(struct drm_i915_private *i915)
2817{
2818	/* No SSC reference */
2819	i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
2820}
2821
2822static void cnl_dump_hw_state(struct drm_i915_private *dev_priv,
2823			      const struct intel_dpll_hw_state *hw_state)
2824{
2825	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: "
2826		    "cfgcr0: 0x%x, cfgcr1: 0x%x\n",
2827		    hw_state->cfgcr0,
2828		    hw_state->cfgcr1);
2829}
2830
2831static const struct intel_shared_dpll_funcs cnl_ddi_pll_funcs = {
2832	.enable = cnl_ddi_pll_enable,
2833	.disable = cnl_ddi_pll_disable,
2834	.get_hw_state = cnl_ddi_pll_get_hw_state,
2835	.get_freq = cnl_ddi_pll_get_freq,
2836};
2837
2838static const struct dpll_info cnl_plls[] = {
2839	{ "DPLL 0", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
2840	{ "DPLL 1", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2841	{ "DPLL 2", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2842	{ },
2843};
2844
2845static const struct intel_dpll_mgr cnl_pll_mgr = {
2846	.dpll_info = cnl_plls,
2847	.get_dplls = cnl_get_dpll,
2848	.put_dplls = intel_put_dpll,
2849	.update_ref_clks = cnl_update_dpll_ref_clks,
2850	.dump_hw_state = cnl_dump_hw_state,
2851};
2852
2853struct icl_combo_pll_params {
2854	int clock;
2855	struct skl_wrpll_params wrpll;
2856};
2857
2858/*
2859 * These values alrea already adjusted: they're the bits we write to the
2860 * registers, not the logical values.
2861 */
2862static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
2863	{ 540000,
2864	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [0]: 5.4 */
2865	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2866	{ 270000,
2867	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [1]: 2.7 */
2868	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2869	{ 162000,
2870	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [2]: 1.62 */
2871	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2872	{ 324000,
2873	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [3]: 3.24 */
2874	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2875	{ 216000,
2876	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2877	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2878	{ 432000,
2879	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2880	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2881	{ 648000,
2882	  { .dco_integer = 0x195, .dco_fraction = 0x0000,		/* [6]: 6.48 */
2883	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2884	{ 810000,
2885	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [7]: 8.1 */
2886	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2887};
2888
2889
2890/* Also used for 38.4 MHz values. */
2891static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
2892	{ 540000,
2893	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [0]: 5.4 */
2894	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2895	{ 270000,
2896	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [1]: 2.7 */
2897	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2898	{ 162000,
2899	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [2]: 1.62 */
2900	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2901	{ 324000,
2902	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [3]: 3.24 */
2903	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2904	{ 216000,
2905	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2906	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2907	{ 432000,
2908	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2909	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2910	{ 648000,
2911	  { .dco_integer = 0x1FA, .dco_fraction = 0x2000,		/* [6]: 6.48 */
2912	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2913	{ 810000,
2914	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [7]: 8.1 */
2915	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2916};
2917
2918static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
2919	.dco_integer = 0x151, .dco_fraction = 0x4000,
2920	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2921};
2922
2923static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
2924	.dco_integer = 0x1A5, .dco_fraction = 0x7000,
2925	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2926};
2927
2928static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = {
2929	.dco_integer = 0x54, .dco_fraction = 0x3000,
2930	/* the following params are unused */
2931	.pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
2932};
2933
2934static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
2935	.dco_integer = 0x43, .dco_fraction = 0x4000,
2936	/* the following params are unused */
2937};
2938
2939/*
2940 * Display WA #22010492432: tgl
2941 * Divide the nominal .dco_fraction value by 2.
2942 */
2943static const struct skl_wrpll_params tgl_tbt_pll_38_4MHz_values = {
2944	.dco_integer = 0x54, .dco_fraction = 0x1800,
2945	/* the following params are unused */
2946	.pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
2947};
2948
2949static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
2950				  struct skl_wrpll_params *pll_params)
2951{
2952	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2953	const struct icl_combo_pll_params *params =
2954		dev_priv->dpll.ref_clks.nssc == 24000 ?
2955		icl_dp_combo_pll_24MHz_values :
2956		icl_dp_combo_pll_19_2MHz_values;
2957	int clock = crtc_state->port_clock;
2958	int i;
2959
2960	for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
2961		if (clock == params[i].clock) {
2962			*pll_params = params[i].wrpll;
2963			return true;
2964		}
2965	}
2966
2967	MISSING_CASE(clock);
2968	return false;
2969}
2970
2971static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
2972			     struct skl_wrpll_params *pll_params)
2973{
2974	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2975
2976	if (INTEL_GEN(dev_priv) >= 12) {
2977		switch (dev_priv->dpll.ref_clks.nssc) {
2978		default:
2979			MISSING_CASE(dev_priv->dpll.ref_clks.nssc);
2980			fallthrough;
2981		case 19200:
 
2982			*pll_params = tgl_tbt_pll_19_2MHz_values;
2983			break;
2984		case 24000:
2985			*pll_params = tgl_tbt_pll_24MHz_values;
2986			break;
2987		case 38400:
2988			*pll_params = tgl_tbt_pll_38_4MHz_values;
2989			break;
2990		}
2991	} else {
2992		switch (dev_priv->dpll.ref_clks.nssc) {
2993		default:
2994			MISSING_CASE(dev_priv->dpll.ref_clks.nssc);
2995			fallthrough;
2996		case 19200:
2997		case 38400:
2998			*pll_params = icl_tbt_pll_19_2MHz_values;
2999			break;
3000		case 24000:
3001			*pll_params = icl_tbt_pll_24MHz_values;
3002			break;
3003		}
3004	}
3005
3006	return true;
3007}
3008
3009static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915,
3010				    const struct intel_shared_dpll *pll)
 
3011{
3012	/*
3013	 * The PLL outputs multiple frequencies at the same time, selection is
3014	 * made at DDI clock mux level.
3015	 */
3016	drm_WARN_ON(&i915->drm, 1);
3017
3018	return 0;
3019}
3020
3021static int icl_wrpll_ref_clock(struct drm_i915_private *i915)
3022{
3023	int ref_clock = i915->dpll.ref_clks.nssc;
3024
3025	/*
3026	 * For ICL+, the spec states: if reference frequency is 38.4,
3027	 * use 19.2 because the DPLL automatically divides that by 2.
3028	 */
3029	if (ref_clock == 38400)
3030		ref_clock = 19200;
3031
3032	return ref_clock;
3033}
3034
3035static bool
3036icl_calc_wrpll(struct intel_crtc_state *crtc_state,
3037	       struct skl_wrpll_params *wrpll_params)
3038{
3039	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3040
3041	return __cnl_ddi_calculate_wrpll(crtc_state, wrpll_params,
3042					 icl_wrpll_ref_clock(i915));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3043}
3044
3045static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
3046				      const struct intel_shared_dpll *pll)
 
3047{
3048	return __cnl_ddi_wrpll_get_freq(i915, pll,
3049					icl_wrpll_ref_clock(i915));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3050}
3051
3052static void icl_calc_dpll_state(struct drm_i915_private *i915,
3053				const struct skl_wrpll_params *pll_params,
3054				struct intel_dpll_hw_state *pll_state)
3055{
3056	memset(pll_state, 0, sizeof(*pll_state));
3057
3058	pll_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(pll_params->dco_fraction) |
 
 
 
3059			    pll_params->dco_integer;
3060
3061	pll_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) |
3062			    DPLL_CFGCR1_QDIV_MODE(pll_params->qdiv_mode) |
3063			    DPLL_CFGCR1_KDIV(pll_params->kdiv) |
3064			    DPLL_CFGCR1_PDIV(pll_params->pdiv);
3065
3066	if (INTEL_GEN(i915) >= 12)
3067		pll_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
3068	else
3069		pll_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
3070}
3071
3072static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
3073{
3074	return id - DPLL_ID_ICL_MGPLL1;
3075}
3076
3077enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
3078{
3079	return tc_port + DPLL_ID_ICL_MGPLL1;
3080}
3081
3082static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
3083				     u32 *target_dco_khz,
3084				     struct intel_dpll_hw_state *state,
3085				     bool is_dkl)
3086{
 
3087	u32 dco_min_freq, dco_max_freq;
3088	int div1_vals[] = {7, 5, 3, 2};
3089	unsigned int i;
3090	int div2;
3091
3092	dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
3093	dco_max_freq = is_dp ? 8100000 : 10000000;
3094
3095	for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
3096		int div1 = div1_vals[i];
3097
3098		for (div2 = 10; div2 > 0; div2--) {
3099			int dco = div1 * div2 * clock_khz * 5;
3100			int a_divratio, tlinedrv, inputsel;
3101			u32 hsdiv;
3102
3103			if (dco < dco_min_freq || dco > dco_max_freq)
3104				continue;
3105
3106			if (div2 >= 2) {
3107				/*
3108				 * Note: a_divratio not matching TGL BSpec
3109				 * algorithm but matching hardcoded values and
3110				 * working on HW for DP alt-mode at least
3111				 */
3112				a_divratio = is_dp ? 10 : 5;
3113				tlinedrv = is_dkl ? 1 : 2;
3114			} else {
3115				a_divratio = 5;
3116				tlinedrv = 0;
3117			}
3118			inputsel = is_dp ? 0 : 1;
3119
3120			switch (div1) {
3121			default:
3122				MISSING_CASE(div1);
3123				fallthrough;
3124			case 2:
3125				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
3126				break;
3127			case 3:
3128				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
3129				break;
3130			case 5:
3131				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
3132				break;
3133			case 7:
3134				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
3135				break;
3136			}
3137
3138			*target_dco_khz = dco;
3139
3140			state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
3141
3142			state->mg_clktop2_coreclkctl1 =
3143				MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
3144
3145			state->mg_clktop2_hsclkctl =
3146				MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
3147				MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
3148				hsdiv |
3149				MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
3150
3151			return true;
3152		}
3153	}
3154
3155	return false;
3156}
3157
3158/*
3159 * The specification for this function uses real numbers, so the math had to be
3160 * adapted to integer-only calculation, that's why it looks so different.
3161 */
3162static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
3163				  struct intel_dpll_hw_state *pll_state)
3164{
3165	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
3166	int refclk_khz = dev_priv->dpll.ref_clks.nssc;
3167	int clock = crtc_state->port_clock;
3168	u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
3169	u32 iref_ndiv, iref_trim, iref_pulse_w;
3170	u32 prop_coeff, int_coeff;
3171	u32 tdc_targetcnt, feedfwgain;
3172	u64 ssc_stepsize, ssc_steplen, ssc_steplog;
3173	u64 tmp;
3174	bool use_ssc = false;
3175	bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
3176	bool is_dkl = INTEL_GEN(dev_priv) >= 12;
 
3177
3178	memset(pll_state, 0, sizeof(*pll_state));
3179
3180	if (!icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
3181				      pll_state, is_dkl)) {
3182		drm_dbg_kms(&dev_priv->drm,
3183			    "Failed to find divisors for clock %d\n", clock);
3184		return false;
3185	}
3186
3187	m1div = 2;
3188	m2div_int = dco_khz / (refclk_khz * m1div);
3189	if (m2div_int > 255) {
3190		if (!is_dkl) {
3191			m1div = 4;
3192			m2div_int = dco_khz / (refclk_khz * m1div);
3193		}
3194
3195		if (m2div_int > 255) {
3196			drm_dbg_kms(&dev_priv->drm,
3197				    "Failed to find mdiv for clock %d\n",
3198				    clock);
3199			return false;
3200		}
3201	}
3202	m2div_rem = dco_khz % (refclk_khz * m1div);
3203
3204	tmp = (u64)m2div_rem * (1 << 22);
3205	do_div(tmp, refclk_khz * m1div);
3206	m2div_frac = tmp;
3207
3208	switch (refclk_khz) {
3209	case 19200:
3210		iref_ndiv = 1;
3211		iref_trim = 28;
3212		iref_pulse_w = 1;
3213		break;
3214	case 24000:
3215		iref_ndiv = 1;
3216		iref_trim = 25;
3217		iref_pulse_w = 2;
3218		break;
3219	case 38400:
3220		iref_ndiv = 2;
3221		iref_trim = 28;
3222		iref_pulse_w = 1;
3223		break;
3224	default:
3225		MISSING_CASE(refclk_khz);
3226		return false;
3227	}
3228
3229	/*
3230	 * tdc_res = 0.000003
3231	 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
3232	 *
3233	 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
3234	 * was supposed to be a division, but we rearranged the operations of
3235	 * the formula to avoid early divisions so we don't multiply the
3236	 * rounding errors.
3237	 *
3238	 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
3239	 * we also rearrange to work with integers.
3240	 *
3241	 * The 0.5 transformed to 5 results in a multiplication by 10 and the
3242	 * last division by 10.
3243	 */
3244	tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
3245
3246	/*
3247	 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
3248	 * 32 bits. That's not a problem since we round the division down
3249	 * anyway.
3250	 */
3251	feedfwgain = (use_ssc || m2div_rem > 0) ?
3252		m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
3253
3254	if (dco_khz >= 9000000) {
3255		prop_coeff = 5;
3256		int_coeff = 10;
3257	} else {
3258		prop_coeff = 4;
3259		int_coeff = 8;
3260	}
3261
3262	if (use_ssc) {
3263		tmp = mul_u32_u32(dco_khz, 47 * 32);
3264		do_div(tmp, refclk_khz * m1div * 10000);
3265		ssc_stepsize = tmp;
3266
3267		tmp = mul_u32_u32(dco_khz, 1000);
3268		ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
3269	} else {
3270		ssc_stepsize = 0;
3271		ssc_steplen = 0;
3272	}
3273	ssc_steplog = 4;
3274
3275	/* write pll_state calculations */
3276	if (is_dkl) {
3277		pll_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
3278					 DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
3279					 DKL_PLL_DIV0_FBPREDIV(m1div) |
3280					 DKL_PLL_DIV0_FBDIV_INT(m2div_int);
 
 
 
 
 
3281
3282		pll_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
3283					 DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
3284
3285		pll_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
3286					DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
3287					DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
3288					(use_ssc ? DKL_PLL_SSC_EN : 0);
3289
3290		pll_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
3291					  DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);
3292
3293		pll_state->mg_pll_tdc_coldst_bias =
3294				DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
3295				DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);
3296
3297	} else {
3298		pll_state->mg_pll_div0 =
3299			(m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
3300			MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
3301			MG_PLL_DIV0_FBDIV_INT(m2div_int);
3302
3303		pll_state->mg_pll_div1 =
3304			MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
3305			MG_PLL_DIV1_DITHER_DIV_2 |
3306			MG_PLL_DIV1_NDIVRATIO(1) |
3307			MG_PLL_DIV1_FBPREDIV(m1div);
3308
3309		pll_state->mg_pll_lf =
3310			MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
3311			MG_PLL_LF_AFCCNTSEL_512 |
3312			MG_PLL_LF_GAINCTRL(1) |
3313			MG_PLL_LF_INT_COEFF(int_coeff) |
3314			MG_PLL_LF_PROP_COEFF(prop_coeff);
3315
3316		pll_state->mg_pll_frac_lock =
3317			MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
3318			MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
3319			MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
3320			MG_PLL_FRAC_LOCK_DCODITHEREN |
3321			MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
3322		if (use_ssc || m2div_rem > 0)
3323			pll_state->mg_pll_frac_lock |=
3324				MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
3325
3326		pll_state->mg_pll_ssc =
3327			(use_ssc ? MG_PLL_SSC_EN : 0) |
3328			MG_PLL_SSC_TYPE(2) |
3329			MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
3330			MG_PLL_SSC_STEPNUM(ssc_steplog) |
3331			MG_PLL_SSC_FLLEN |
3332			MG_PLL_SSC_STEPSIZE(ssc_stepsize);
3333
3334		pll_state->mg_pll_tdc_coldst_bias =
3335			MG_PLL_TDC_COLDST_COLDSTART |
3336			MG_PLL_TDC_COLDST_IREFINT_EN |
3337			MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
3338			MG_PLL_TDC_TDCOVCCORR_EN |
3339			MG_PLL_TDC_TDCSEL(3);
3340
3341		pll_state->mg_pll_bias =
3342			MG_PLL_BIAS_BIAS_GB_SEL(3) |
3343			MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
3344			MG_PLL_BIAS_BIAS_BONUS(10) |
3345			MG_PLL_BIAS_BIASCAL_EN |
3346			MG_PLL_BIAS_CTRIM(12) |
3347			MG_PLL_BIAS_VREF_RDAC(4) |
3348			MG_PLL_BIAS_IREFTRIM(iref_trim);
3349
3350		if (refclk_khz == 38400) {
3351			pll_state->mg_pll_tdc_coldst_bias_mask =
3352				MG_PLL_TDC_COLDST_COLDSTART;
3353			pll_state->mg_pll_bias_mask = 0;
3354		} else {
3355			pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
3356			pll_state->mg_pll_bias_mask = -1U;
3357		}
3358
3359		pll_state->mg_pll_tdc_coldst_bias &=
3360			pll_state->mg_pll_tdc_coldst_bias_mask;
3361		pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
3362	}
3363
3364	return true;
3365}
3366
3367static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *dev_priv,
3368				   const struct intel_shared_dpll *pll)
 
3369{
3370	const struct intel_dpll_hw_state *pll_state = &pll->state.hw_state;
3371	u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
3372	u64 tmp;
3373
3374	ref_clock = dev_priv->dpll.ref_clks.nssc;
3375
3376	if (INTEL_GEN(dev_priv) >= 12) {
3377		m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
3378		m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
3379		m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
3380
3381		if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
3382			m2_frac = pll_state->mg_pll_bias &
3383				  DKL_PLL_BIAS_FBDIV_FRAC_MASK;
3384			m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
3385		} else {
3386			m2_frac = 0;
3387		}
3388	} else {
3389		m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
3390		m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
3391
3392		if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
3393			m2_frac = pll_state->mg_pll_div0 &
3394				  MG_PLL_DIV0_FBDIV_FRAC_MASK;
3395			m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
3396		} else {
3397			m2_frac = 0;
3398		}
3399	}
3400
3401	switch (pll_state->mg_clktop2_hsclkctl &
3402		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
3403	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
3404		div1 = 2;
3405		break;
3406	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
3407		div1 = 3;
3408		break;
3409	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
3410		div1 = 5;
3411		break;
3412	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
3413		div1 = 7;
3414		break;
3415	default:
3416		MISSING_CASE(pll_state->mg_clktop2_hsclkctl);
3417		return 0;
3418	}
3419
3420	div2 = (pll_state->mg_clktop2_hsclkctl &
3421		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
3422		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
3423
3424	/* div2 value of 0 is same as 1 means no div */
3425	if (div2 == 0)
3426		div2 = 1;
3427
3428	/*
3429	 * Adjust the original formula to delay the division by 2^22 in order to
3430	 * minimize possible rounding errors.
3431	 */
3432	tmp = (u64)m1 * m2_int * ref_clock +
3433	      (((u64)m1 * m2_frac * ref_clock) >> 22);
3434	tmp = div_u64(tmp, 5 * div1 * div2);
3435
3436	return tmp;
3437}
3438
3439/**
3440 * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
3441 * @crtc_state: state for the CRTC to select the DPLL for
3442 * @port_dpll_id: the active @port_dpll_id to select
3443 *
3444 * Select the given @port_dpll_id instance from the DPLLs reserved for the
3445 * CRTC.
3446 */
3447void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
3448			      enum icl_port_dpll_id port_dpll_id)
3449{
3450	struct icl_port_dpll *port_dpll =
3451		&crtc_state->icl_port_dplls[port_dpll_id];
3452
3453	crtc_state->shared_dpll = port_dpll->pll;
3454	crtc_state->dpll_hw_state = port_dpll->hw_state;
3455}
3456
3457static void icl_update_active_dpll(struct intel_atomic_state *state,
3458				   struct intel_crtc *crtc,
3459				   struct intel_encoder *encoder)
3460{
3461	struct intel_crtc_state *crtc_state =
3462		intel_atomic_get_new_crtc_state(state, crtc);
3463	struct intel_digital_port *primary_port;
3464	enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
3465
3466	primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
3467		enc_to_mst(encoder)->primary :
3468		enc_to_dig_port(encoder);
3469
3470	if (primary_port &&
3471	    (primary_port->tc_mode == TC_PORT_DP_ALT ||
3472	     primary_port->tc_mode == TC_PORT_LEGACY))
3473		port_dpll_id = ICL_PORT_DPLL_MG_PHY;
3474
3475	icl_set_active_port_dpll(crtc_state, port_dpll_id);
3476}
3477
3478static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
3479				   struct intel_crtc *crtc,
3480				   struct intel_encoder *encoder)
3481{
 
3482	struct intel_crtc_state *crtc_state =
3483		intel_atomic_get_new_crtc_state(state, crtc);
3484	struct skl_wrpll_params pll_params = { };
3485	struct icl_port_dpll *port_dpll =
3486		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3487	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3488	enum port port = encoder->port;
3489	unsigned long dpll_mask;
3490	int ret;
3491
3492	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
3493	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
3494		ret = icl_calc_wrpll(crtc_state, &pll_params);
3495	else
3496		ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
3497
3498	if (!ret) {
3499		drm_dbg_kms(&dev_priv->drm,
3500			    "Could not calculate combo PHY PLL state.\n");
 
 
 
 
 
 
 
3501
3502		return false;
3503	}
3504
3505	icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
 
 
 
 
 
 
 
 
 
 
3506
3507	if (IS_ELKHARTLAKE(dev_priv) && port != PORT_A)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3508		dpll_mask =
3509			BIT(DPLL_ID_EHL_DPLL4) |
3510			BIT(DPLL_ID_ICL_DPLL1) |
3511			BIT(DPLL_ID_ICL_DPLL0);
3512	else
3513		dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0);
 
 
 
 
3514
3515	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3516						&port_dpll->hw_state,
3517						dpll_mask);
3518	if (!port_dpll->pll) {
3519		drm_dbg_kms(&dev_priv->drm,
3520			    "No combo PHY PLL found for [ENCODER:%d:%s]\n",
3521			    encoder->base.base.id, encoder->base.name);
3522		return false;
3523	}
3524
3525	intel_reference_shared_dpll(state, crtc,
3526				    port_dpll->pll, &port_dpll->hw_state);
3527
3528	icl_update_active_dpll(state, crtc, encoder);
3529
3530	return true;
3531}
3532
3533static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
3534				 struct intel_crtc *crtc,
3535				 struct intel_encoder *encoder)
3536{
3537	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3538	struct intel_crtc_state *crtc_state =
3539		intel_atomic_get_new_crtc_state(state, crtc);
3540	struct skl_wrpll_params pll_params = { };
3541	struct icl_port_dpll *port_dpll;
3542	enum intel_dpll_id dpll_id;
 
 
 
3543
3544	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3545	if (!icl_calc_tbt_pll(crtc_state, &pll_params)) {
3546		drm_dbg_kms(&dev_priv->drm,
3547			    "Could not calculate TBT PLL state.\n");
3548		return false;
3549	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3550
3551	icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
 
3552
 
 
 
 
 
 
 
 
 
 
 
 
 
3553	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3554						&port_dpll->hw_state,
3555						BIT(DPLL_ID_ICL_TBTPLL));
3556	if (!port_dpll->pll) {
3557		drm_dbg_kms(&dev_priv->drm, "No TBT-ALT PLL found\n");
3558		return false;
3559	}
3560	intel_reference_shared_dpll(state, crtc,
3561				    port_dpll->pll, &port_dpll->hw_state);
3562
3563
3564	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3565	if (!icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state)) {
3566		drm_dbg_kms(&dev_priv->drm,
3567			    "Could not calculate MG PHY PLL state.\n");
3568		goto err_unreference_tbt_pll;
3569	}
3570
3571	dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
3572							 encoder->port));
3573	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3574						&port_dpll->hw_state,
3575						BIT(dpll_id));
3576	if (!port_dpll->pll) {
3577		drm_dbg_kms(&dev_priv->drm, "No MG PHY PLL found\n");
3578		goto err_unreference_tbt_pll;
3579	}
3580	intel_reference_shared_dpll(state, crtc,
3581				    port_dpll->pll, &port_dpll->hw_state);
3582
3583	icl_update_active_dpll(state, crtc, encoder);
3584
3585	return true;
3586
3587err_unreference_tbt_pll:
3588	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3589	intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
3590
3591	return false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3592}
3593
3594static bool icl_get_dplls(struct intel_atomic_state *state,
3595			  struct intel_crtc *crtc,
3596			  struct intel_encoder *encoder)
3597{
3598	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3599	enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
3600
3601	if (intel_phy_is_combo(dev_priv, phy))
3602		return icl_get_combo_phy_dpll(state, crtc, encoder);
3603	else if (intel_phy_is_tc(dev_priv, phy))
3604		return icl_get_tc_phy_dplls(state, crtc, encoder);
3605
3606	MISSING_CASE(phy);
3607
3608	return false;
3609}
3610
3611static void icl_put_dplls(struct intel_atomic_state *state,
3612			  struct intel_crtc *crtc)
3613{
3614	const struct intel_crtc_state *old_crtc_state =
3615		intel_atomic_get_old_crtc_state(state, crtc);
3616	struct intel_crtc_state *new_crtc_state =
3617		intel_atomic_get_new_crtc_state(state, crtc);
3618	enum icl_port_dpll_id id;
3619
3620	new_crtc_state->shared_dpll = NULL;
3621
3622	for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
3623		const struct icl_port_dpll *old_port_dpll =
3624			&old_crtc_state->icl_port_dplls[id];
3625		struct icl_port_dpll *new_port_dpll =
3626			&new_crtc_state->icl_port_dplls[id];
3627
3628		new_port_dpll->pll = NULL;
3629
3630		if (!old_port_dpll->pll)
3631			continue;
3632
3633		intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
3634	}
3635}
3636
3637static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
3638				struct intel_shared_dpll *pll,
3639				struct intel_dpll_hw_state *hw_state)
3640{
3641	const enum intel_dpll_id id = pll->info->id;
3642	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3643	intel_wakeref_t wakeref;
3644	bool ret = false;
3645	u32 val;
3646
3647	wakeref = intel_display_power_get_if_enabled(dev_priv,
 
 
3648						     POWER_DOMAIN_DISPLAY_CORE);
3649	if (!wakeref)
3650		return false;
3651
3652	val = intel_de_read(dev_priv, MG_PLL_ENABLE(tc_port));
3653	if (!(val & PLL_ENABLE))
3654		goto out;
3655
3656	hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
3657						  MG_REFCLKIN_CTL(tc_port));
3658	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3659
3660	hw_state->mg_clktop2_coreclkctl1 =
3661		intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
3662	hw_state->mg_clktop2_coreclkctl1 &=
3663		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3664
3665	hw_state->mg_clktop2_hsclkctl =
3666		intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
3667	hw_state->mg_clktop2_hsclkctl &=
3668		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3669		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3670		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3671		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3672
3673	hw_state->mg_pll_div0 = intel_de_read(dev_priv, MG_PLL_DIV0(tc_port));
3674	hw_state->mg_pll_div1 = intel_de_read(dev_priv, MG_PLL_DIV1(tc_port));
3675	hw_state->mg_pll_lf = intel_de_read(dev_priv, MG_PLL_LF(tc_port));
3676	hw_state->mg_pll_frac_lock = intel_de_read(dev_priv,
3677						   MG_PLL_FRAC_LOCK(tc_port));
3678	hw_state->mg_pll_ssc = intel_de_read(dev_priv, MG_PLL_SSC(tc_port));
3679
3680	hw_state->mg_pll_bias = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
3681	hw_state->mg_pll_tdc_coldst_bias =
3682		intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3683
3684	if (dev_priv->dpll.ref_clks.nssc == 38400) {
3685		hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
3686		hw_state->mg_pll_bias_mask = 0;
3687	} else {
3688		hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3689		hw_state->mg_pll_bias_mask = -1U;
3690	}
3691
3692	hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
3693	hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3694
3695	ret = true;
3696out:
3697	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3698	return ret;
3699}
3700
3701static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3702				 struct intel_shared_dpll *pll,
3703				 struct intel_dpll_hw_state *hw_state)
3704{
3705	const enum intel_dpll_id id = pll->info->id;
3706	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3707	intel_wakeref_t wakeref;
3708	bool ret = false;
3709	u32 val;
3710
3711	wakeref = intel_display_power_get_if_enabled(dev_priv,
3712						     POWER_DOMAIN_DISPLAY_CORE);
3713	if (!wakeref)
3714		return false;
3715
3716	val = intel_de_read(dev_priv, MG_PLL_ENABLE(tc_port));
3717	if (!(val & PLL_ENABLE))
3718		goto out;
3719
3720	/*
3721	 * All registers read here have the same HIP_INDEX_REG even though
3722	 * they are on different building blocks
3723	 */
3724	intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
3725		       HIP_INDEX_VAL(tc_port, 0x2));
3726
3727	hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
3728						  DKL_REFCLKIN_CTL(tc_port));
3729	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3730
3731	hw_state->mg_clktop2_hsclkctl =
3732		intel_de_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
3733	hw_state->mg_clktop2_hsclkctl &=
3734		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3735		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3736		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3737		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3738
3739	hw_state->mg_clktop2_coreclkctl1 =
3740		intel_de_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3741	hw_state->mg_clktop2_coreclkctl1 &=
3742		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3743
3744	hw_state->mg_pll_div0 = intel_de_read(dev_priv, DKL_PLL_DIV0(tc_port));
3745	hw_state->mg_pll_div0 &= (DKL_PLL_DIV0_INTEG_COEFF_MASK |
3746				  DKL_PLL_DIV0_PROP_COEFF_MASK |
3747				  DKL_PLL_DIV0_FBPREDIV_MASK |
3748				  DKL_PLL_DIV0_FBDIV_INT_MASK);
3749
3750	hw_state->mg_pll_div1 = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port));
3751	hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
3752				  DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3753
3754	hw_state->mg_pll_ssc = intel_de_read(dev_priv, DKL_PLL_SSC(tc_port));
3755	hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3756				 DKL_PLL_SSC_STEP_LEN_MASK |
3757				 DKL_PLL_SSC_STEP_NUM_MASK |
3758				 DKL_PLL_SSC_EN);
3759
3760	hw_state->mg_pll_bias = intel_de_read(dev_priv, DKL_PLL_BIAS(tc_port));
3761	hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
3762				  DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3763
3764	hw_state->mg_pll_tdc_coldst_bias =
3765		intel_de_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3766	hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3767					     DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3768
3769	ret = true;
3770out:
3771	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3772	return ret;
3773}
3774
3775static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3776				 struct intel_shared_dpll *pll,
3777				 struct intel_dpll_hw_state *hw_state,
3778				 i915_reg_t enable_reg)
3779{
3780	const enum intel_dpll_id id = pll->info->id;
3781	intel_wakeref_t wakeref;
3782	bool ret = false;
3783	u32 val;
3784
3785	wakeref = intel_display_power_get_if_enabled(dev_priv,
3786						     POWER_DOMAIN_DISPLAY_CORE);
3787	if (!wakeref)
3788		return false;
3789
3790	val = intel_de_read(dev_priv, enable_reg);
3791	if (!(val & PLL_ENABLE))
3792		goto out;
3793
3794	if (INTEL_GEN(dev_priv) >= 12) {
3795		hw_state->cfgcr0 = intel_de_read(dev_priv,
 
 
 
 
 
 
 
 
 
 
 
3796						 TGL_DPLL_CFGCR0(id));
3797		hw_state->cfgcr1 = intel_de_read(dev_priv,
3798						 TGL_DPLL_CFGCR1(id));
 
 
 
 
3799	} else {
3800		if (IS_ELKHARTLAKE(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3801			hw_state->cfgcr0 = intel_de_read(dev_priv,
 
3802							 ICL_DPLL_CFGCR0(4));
3803			hw_state->cfgcr1 = intel_de_read(dev_priv,
3804							 ICL_DPLL_CFGCR1(4));
3805		} else {
3806			hw_state->cfgcr0 = intel_de_read(dev_priv,
3807							 ICL_DPLL_CFGCR0(id));
3808			hw_state->cfgcr1 = intel_de_read(dev_priv,
3809							 ICL_DPLL_CFGCR1(id));
3810		}
3811	}
3812
3813	ret = true;
3814out:
3815	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3816	return ret;
3817}
3818
3819static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv,
3820				   struct intel_shared_dpll *pll,
3821				   struct intel_dpll_hw_state *hw_state)
3822{
3823	i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id);
3824
3825	if (IS_ELKHARTLAKE(dev_priv) &&
3826	    pll->info->id == DPLL_ID_EHL_DPLL4) {
3827		enable_reg = MG_PLL_ENABLE(0);
3828	}
3829
3830	return icl_pll_get_hw_state(dev_priv, pll, hw_state, enable_reg);
3831}
3832
3833static bool tbt_pll_get_hw_state(struct drm_i915_private *dev_priv,
3834				 struct intel_shared_dpll *pll,
3835				 struct intel_dpll_hw_state *hw_state)
3836{
3837	return icl_pll_get_hw_state(dev_priv, pll, hw_state, TBT_PLL_ENABLE);
3838}
3839
3840static void icl_dpll_write(struct drm_i915_private *dev_priv,
3841			   struct intel_shared_dpll *pll)
3842{
3843	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3844	const enum intel_dpll_id id = pll->info->id;
3845	i915_reg_t cfgcr0_reg, cfgcr1_reg;
3846
3847	if (INTEL_GEN(dev_priv) >= 12) {
 
 
 
 
 
 
 
 
 
3848		cfgcr0_reg = TGL_DPLL_CFGCR0(id);
3849		cfgcr1_reg = TGL_DPLL_CFGCR1(id);
 
3850	} else {
3851		if (IS_ELKHARTLAKE(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
 
3852			cfgcr0_reg = ICL_DPLL_CFGCR0(4);
3853			cfgcr1_reg = ICL_DPLL_CFGCR1(4);
3854		} else {
3855			cfgcr0_reg = ICL_DPLL_CFGCR0(id);
3856			cfgcr1_reg = ICL_DPLL_CFGCR1(id);
3857		}
3858	}
3859
3860	intel_de_write(dev_priv, cfgcr0_reg, hw_state->cfgcr0);
3861	intel_de_write(dev_priv, cfgcr1_reg, hw_state->cfgcr1);
3862	intel_de_posting_read(dev_priv, cfgcr1_reg);
 
 
 
 
 
 
3863}
3864
3865static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
3866			     struct intel_shared_dpll *pll)
3867{
3868	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3869	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3870	u32 val;
3871
3872	/*
3873	 * Some of the following registers have reserved fields, so program
3874	 * these with RMW based on a mask. The mask can be fixed or generated
3875	 * during the calc/readout phase if the mask depends on some other HW
3876	 * state like refclk, see icl_calc_mg_pll_state().
3877	 */
3878	val = intel_de_read(dev_priv, MG_REFCLKIN_CTL(tc_port));
3879	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3880	val |= hw_state->mg_refclkin_ctl;
3881	intel_de_write(dev_priv, MG_REFCLKIN_CTL(tc_port), val);
3882
3883	val = intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
3884	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3885	val |= hw_state->mg_clktop2_coreclkctl1;
3886	intel_de_write(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port), val);
3887
3888	val = intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
3889	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3890		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3891		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3892		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3893	val |= hw_state->mg_clktop2_hsclkctl;
3894	intel_de_write(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port), val);
3895
3896	intel_de_write(dev_priv, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
3897	intel_de_write(dev_priv, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
3898	intel_de_write(dev_priv, MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
3899	intel_de_write(dev_priv, MG_PLL_FRAC_LOCK(tc_port),
3900		       hw_state->mg_pll_frac_lock);
3901	intel_de_write(dev_priv, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
3902
3903	val = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
3904	val &= ~hw_state->mg_pll_bias_mask;
3905	val |= hw_state->mg_pll_bias;
3906	intel_de_write(dev_priv, MG_PLL_BIAS(tc_port), val);
3907
3908	val = intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3909	val &= ~hw_state->mg_pll_tdc_coldst_bias_mask;
3910	val |= hw_state->mg_pll_tdc_coldst_bias;
3911	intel_de_write(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port), val);
3912
3913	intel_de_posting_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3914}
3915
3916static void dkl_pll_write(struct drm_i915_private *dev_priv,
3917			  struct intel_shared_dpll *pll)
3918{
3919	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3920	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3921	u32 val;
3922
3923	/*
3924	 * All registers programmed here have the same HIP_INDEX_REG even
3925	 * though on different building block
3926	 */
3927	intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
3928		       HIP_INDEX_VAL(tc_port, 0x2));
3929
3930	/* All the registers are RMW */
3931	val = intel_de_read(dev_priv, DKL_REFCLKIN_CTL(tc_port));
3932	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3933	val |= hw_state->mg_refclkin_ctl;
3934	intel_de_write(dev_priv, DKL_REFCLKIN_CTL(tc_port), val);
3935
3936	val = intel_de_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3937	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3938	val |= hw_state->mg_clktop2_coreclkctl1;
3939	intel_de_write(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
3940
3941	val = intel_de_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
3942	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3943		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3944		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3945		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3946	val |= hw_state->mg_clktop2_hsclkctl;
3947	intel_de_write(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
3948
3949	val = intel_de_read(dev_priv, DKL_PLL_DIV0(tc_port));
3950	val &= ~(DKL_PLL_DIV0_INTEG_COEFF_MASK |
3951		 DKL_PLL_DIV0_PROP_COEFF_MASK |
3952		 DKL_PLL_DIV0_FBPREDIV_MASK |
3953		 DKL_PLL_DIV0_FBDIV_INT_MASK);
3954	val |= hw_state->mg_pll_div0;
3955	intel_de_write(dev_priv, DKL_PLL_DIV0(tc_port), val);
3956
3957	val = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port));
3958	val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
3959		 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3960	val |= hw_state->mg_pll_div1;
3961	intel_de_write(dev_priv, DKL_PLL_DIV1(tc_port), val);
3962
3963	val = intel_de_read(dev_priv, DKL_PLL_SSC(tc_port));
3964	val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3965		 DKL_PLL_SSC_STEP_LEN_MASK |
3966		 DKL_PLL_SSC_STEP_NUM_MASK |
3967		 DKL_PLL_SSC_EN);
3968	val |= hw_state->mg_pll_ssc;
3969	intel_de_write(dev_priv, DKL_PLL_SSC(tc_port), val);
3970
3971	val = intel_de_read(dev_priv, DKL_PLL_BIAS(tc_port));
3972	val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
3973		 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3974	val |= hw_state->mg_pll_bias;
3975	intel_de_write(dev_priv, DKL_PLL_BIAS(tc_port), val);
3976
3977	val = intel_de_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3978	val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3979		 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3980	val |= hw_state->mg_pll_tdc_coldst_bias;
3981	intel_de_write(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
3982
3983	intel_de_posting_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3984}
3985
3986static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
3987				 struct intel_shared_dpll *pll,
3988				 i915_reg_t enable_reg)
3989{
3990	u32 val;
3991
3992	val = intel_de_read(dev_priv, enable_reg);
3993	val |= PLL_POWER_ENABLE;
3994	intel_de_write(dev_priv, enable_reg, val);
3995
3996	/*
3997	 * The spec says we need to "wait" but it also says it should be
3998	 * immediate.
3999	 */
4000	if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_POWER_STATE, 1))
4001		drm_err(&dev_priv->drm, "PLL %d Power not enabled\n",
4002			pll->info->id);
4003}
4004
4005static void icl_pll_enable(struct drm_i915_private *dev_priv,
4006			   struct intel_shared_dpll *pll,
4007			   i915_reg_t enable_reg)
4008{
4009	u32 val;
4010
4011	val = intel_de_read(dev_priv, enable_reg);
4012	val |= PLL_ENABLE;
4013	intel_de_write(dev_priv, enable_reg, val);
4014
4015	/* Timeout is actually 600us. */
4016	if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 1))
4017		drm_err(&dev_priv->drm, "PLL %d not locked\n", pll->info->id);
4018}
4019
4020static void combo_pll_enable(struct drm_i915_private *dev_priv,
4021			     struct intel_shared_dpll *pll)
4022{
4023	i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id);
4024
4025	if (IS_ELKHARTLAKE(dev_priv) &&
4026	    pll->info->id == DPLL_ID_EHL_DPLL4) {
4027		enable_reg = MG_PLL_ENABLE(0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4028
4029		/*
4030		 * We need to disable DC states when this DPLL is enabled.
4031		 * This can be done by taking a reference on DPLL4 power
4032		 * domain.
4033		 */
4034		pll->wakeref = intel_display_power_get(dev_priv,
4035						       POWER_DOMAIN_DPLL_DC_OFF);
4036	}
4037
4038	icl_pll_power_enable(dev_priv, pll, enable_reg);
4039
4040	icl_dpll_write(dev_priv, pll);
4041
4042	/*
4043	 * DVFS pre sequence would be here, but in our driver the cdclk code
4044	 * paths should already be setting the appropriate voltage, hence we do
4045	 * nothing here.
4046	 */
4047
4048	icl_pll_enable(dev_priv, pll, enable_reg);
 
 
4049
4050	/* DVFS post sequence would be here. See the comment above. */
4051}
4052
4053static void tbt_pll_enable(struct drm_i915_private *dev_priv,
4054			   struct intel_shared_dpll *pll)
4055{
4056	icl_pll_power_enable(dev_priv, pll, TBT_PLL_ENABLE);
4057
4058	icl_dpll_write(dev_priv, pll);
4059
4060	/*
4061	 * DVFS pre sequence would be here, but in our driver the cdclk code
4062	 * paths should already be setting the appropriate voltage, hence we do
4063	 * nothing here.
4064	 */
4065
4066	icl_pll_enable(dev_priv, pll, TBT_PLL_ENABLE);
4067
4068	/* DVFS post sequence would be here. See the comment above. */
4069}
4070
4071static void mg_pll_enable(struct drm_i915_private *dev_priv,
4072			  struct intel_shared_dpll *pll)
4073{
4074	i915_reg_t enable_reg =
4075		MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id));
4076
4077	icl_pll_power_enable(dev_priv, pll, enable_reg);
4078
4079	if (INTEL_GEN(dev_priv) >= 12)
4080		dkl_pll_write(dev_priv, pll);
4081	else
4082		icl_mg_pll_write(dev_priv, pll);
4083
4084	/*
4085	 * DVFS pre sequence would be here, but in our driver the cdclk code
4086	 * paths should already be setting the appropriate voltage, hence we do
4087	 * nothing here.
4088	 */
4089
4090	icl_pll_enable(dev_priv, pll, enable_reg);
4091
4092	/* DVFS post sequence would be here. See the comment above. */
4093}
4094
4095static void icl_pll_disable(struct drm_i915_private *dev_priv,
4096			    struct intel_shared_dpll *pll,
4097			    i915_reg_t enable_reg)
4098{
4099	u32 val;
4100
4101	/* The first steps are done by intel_ddi_post_disable(). */
4102
4103	/*
4104	 * DVFS pre sequence would be here, but in our driver the cdclk code
4105	 * paths should already be setting the appropriate voltage, hence we do
4106	 * nothign here.
4107	 */
4108
4109	val = intel_de_read(dev_priv, enable_reg);
4110	val &= ~PLL_ENABLE;
4111	intel_de_write(dev_priv, enable_reg, val);
4112
4113	/* Timeout is actually 1us. */
4114	if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_LOCK, 1))
4115		drm_err(&dev_priv->drm, "PLL %d locked\n", pll->info->id);
4116
4117	/* DVFS post sequence would be here. See the comment above. */
4118
4119	val = intel_de_read(dev_priv, enable_reg);
4120	val &= ~PLL_POWER_ENABLE;
4121	intel_de_write(dev_priv, enable_reg, val);
4122
4123	/*
4124	 * The spec says we need to "wait" but it also says it should be
4125	 * immediate.
4126	 */
4127	if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_POWER_STATE, 1))
4128		drm_err(&dev_priv->drm, "PLL %d Power not disabled\n",
4129			pll->info->id);
4130}
4131
4132static void combo_pll_disable(struct drm_i915_private *dev_priv,
4133			      struct intel_shared_dpll *pll)
4134{
4135	i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id);
4136
4137	if (IS_ELKHARTLAKE(dev_priv) &&
4138	    pll->info->id == DPLL_ID_EHL_DPLL4) {
4139		enable_reg = MG_PLL_ENABLE(0);
4140		icl_pll_disable(dev_priv, pll, enable_reg);
4141
4142		intel_display_power_put(dev_priv, POWER_DOMAIN_DPLL_DC_OFF,
4143					pll->wakeref);
4144		return;
4145	}
4146
4147	icl_pll_disable(dev_priv, pll, enable_reg);
4148}
4149
4150static void tbt_pll_disable(struct drm_i915_private *dev_priv,
4151			    struct intel_shared_dpll *pll)
4152{
4153	icl_pll_disable(dev_priv, pll, TBT_PLL_ENABLE);
4154}
4155
4156static void mg_pll_disable(struct drm_i915_private *dev_priv,
4157			   struct intel_shared_dpll *pll)
4158{
4159	i915_reg_t enable_reg =
4160		MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id));
4161
4162	icl_pll_disable(dev_priv, pll, enable_reg);
4163}
4164
4165static void icl_update_dpll_ref_clks(struct drm_i915_private *i915)
4166{
4167	/* No SSC ref */
4168	i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
4169}
4170
4171static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
4172			      const struct intel_dpll_hw_state *hw_state)
4173{
4174	drm_dbg_kms(&dev_priv->drm,
4175		    "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, "
4176		    "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
4177		    "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
4178		    "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
4179		    "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
4180		    "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
4181		    hw_state->cfgcr0, hw_state->cfgcr1,
 
4182		    hw_state->mg_refclkin_ctl,
4183		    hw_state->mg_clktop2_coreclkctl1,
4184		    hw_state->mg_clktop2_hsclkctl,
4185		    hw_state->mg_pll_div0,
4186		    hw_state->mg_pll_div1,
4187		    hw_state->mg_pll_lf,
4188		    hw_state->mg_pll_frac_lock,
4189		    hw_state->mg_pll_ssc,
4190		    hw_state->mg_pll_bias,
4191		    hw_state->mg_pll_tdc_coldst_bias);
4192}
4193
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4194static const struct intel_shared_dpll_funcs combo_pll_funcs = {
4195	.enable = combo_pll_enable,
4196	.disable = combo_pll_disable,
4197	.get_hw_state = combo_pll_get_hw_state,
4198	.get_freq = icl_ddi_combo_pll_get_freq,
4199};
4200
4201static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
4202	.enable = tbt_pll_enable,
4203	.disable = tbt_pll_disable,
4204	.get_hw_state = tbt_pll_get_hw_state,
4205	.get_freq = icl_ddi_tbt_pll_get_freq,
4206};
4207
4208static const struct intel_shared_dpll_funcs mg_pll_funcs = {
4209	.enable = mg_pll_enable,
4210	.disable = mg_pll_disable,
4211	.get_hw_state = mg_pll_get_hw_state,
4212	.get_freq = icl_ddi_mg_pll_get_freq,
4213};
4214
4215static const struct dpll_info icl_plls[] = {
4216	{ "DPLL 0",   &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
4217	{ "DPLL 1",   &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
4218	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4219	{ "MG PLL 1", &mg_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4220	{ "MG PLL 2", &mg_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4221	{ "MG PLL 3", &mg_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4222	{ "MG PLL 4", &mg_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4223	{ },
 
4224};
4225
4226static const struct intel_dpll_mgr icl_pll_mgr = {
4227	.dpll_info = icl_plls,
 
4228	.get_dplls = icl_get_dplls,
4229	.put_dplls = icl_put_dplls,
4230	.update_active_dpll = icl_update_active_dpll,
4231	.update_ref_clks = icl_update_dpll_ref_clks,
4232	.dump_hw_state = icl_dump_hw_state,
 
4233};
4234
4235static const struct dpll_info ehl_plls[] = {
4236	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4237	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4238	{ "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
4239	{ },
 
4240};
4241
4242static const struct intel_dpll_mgr ehl_pll_mgr = {
4243	.dpll_info = ehl_plls,
 
4244	.get_dplls = icl_get_dplls,
4245	.put_dplls = icl_put_dplls,
4246	.update_ref_clks = icl_update_dpll_ref_clks,
4247	.dump_hw_state = icl_dump_hw_state,
 
4248};
4249
4250static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
4251	.enable = mg_pll_enable,
4252	.disable = mg_pll_disable,
4253	.get_hw_state = dkl_pll_get_hw_state,
4254	.get_freq = icl_ddi_mg_pll_get_freq,
4255};
4256
4257static const struct dpll_info tgl_plls[] = {
4258	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
4259	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
4260	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4261	{ "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4262	{ "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4263	{ "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4264	{ "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4265	{ "TC PLL 5", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL5, 0 },
4266	{ "TC PLL 6", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL6, 0 },
4267	{ },
 
4268};
4269
4270static const struct intel_dpll_mgr tgl_pll_mgr = {
4271	.dpll_info = tgl_plls,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4272	.get_dplls = icl_get_dplls,
4273	.put_dplls = icl_put_dplls,
4274	.update_active_dpll = icl_update_active_dpll,
4275	.update_ref_clks = icl_update_dpll_ref_clks,
4276	.dump_hw_state = icl_dump_hw_state,
 
4277};
4278
4279/**
4280 * intel_shared_dpll_init - Initialize shared DPLLs
4281 * @dev: drm device
4282 *
4283 * Initialize shared DPLLs for @dev.
4284 */
4285void intel_shared_dpll_init(struct drm_device *dev)
4286{
4287	struct drm_i915_private *dev_priv = to_i915(dev);
4288	const struct intel_dpll_mgr *dpll_mgr = NULL;
4289	const struct dpll_info *dpll_info;
4290	int i;
4291
4292	if (INTEL_GEN(dev_priv) >= 12)
 
 
 
 
 
 
 
 
 
 
 
 
 
4293		dpll_mgr = &tgl_pll_mgr;
4294	else if (IS_ELKHARTLAKE(dev_priv))
4295		dpll_mgr = &ehl_pll_mgr;
4296	else if (INTEL_GEN(dev_priv) >= 11)
4297		dpll_mgr = &icl_pll_mgr;
4298	else if (IS_CANNONLAKE(dev_priv))
4299		dpll_mgr = &cnl_pll_mgr;
4300	else if (IS_GEN9_BC(dev_priv))
4301		dpll_mgr = &skl_pll_mgr;
4302	else if (IS_GEN9_LP(dev_priv))
4303		dpll_mgr = &bxt_pll_mgr;
4304	else if (HAS_DDI(dev_priv))
4305		dpll_mgr = &hsw_pll_mgr;
4306	else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
4307		dpll_mgr = &pch_pll_mgr;
4308
4309	if (!dpll_mgr) {
4310		dev_priv->dpll.num_shared_dpll = 0;
4311		return;
4312	}
4313
4314	dpll_info = dpll_mgr->dpll_info;
4315
4316	for (i = 0; dpll_info[i].name; i++) {
4317		drm_WARN_ON(dev, i != dpll_info[i].id);
4318		dev_priv->dpll.shared_dplls[i].info = &dpll_info[i];
 
 
 
 
 
 
 
 
4319	}
4320
4321	dev_priv->dpll.mgr = dpll_mgr;
4322	dev_priv->dpll.num_shared_dpll = i;
4323	mutex_init(&dev_priv->dpll.lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4324
4325	BUG_ON(dev_priv->dpll.num_shared_dpll > I915_NUM_PLLS);
4326}
4327
4328/**
4329 * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
4330 * @state: atomic state
4331 * @crtc: CRTC to reserve DPLLs for
4332 * @encoder: encoder
4333 *
4334 * This function reserves all required DPLLs for the given CRTC and encoder
4335 * combination in the current atomic commit @state and the new @crtc atomic
4336 * state.
4337 *
4338 * The new configuration in the atomic commit @state is made effective by
4339 * calling intel_shared_dpll_swap_state().
4340 *
4341 * The reserved DPLLs should be released by calling
4342 * intel_release_shared_dplls().
4343 *
4344 * Returns:
4345 * True if all required DPLLs were successfully reserved.
 
4346 */
4347bool intel_reserve_shared_dplls(struct intel_atomic_state *state,
4348				struct intel_crtc *crtc,
4349				struct intel_encoder *encoder)
4350{
4351	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4352	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
4353
4354	if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4355		return false;
4356
4357	return dpll_mgr->get_dplls(state, crtc, encoder);
4358}
4359
4360/**
4361 * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
4362 * @state: atomic state
4363 * @crtc: crtc from which the DPLLs are to be released
4364 *
4365 * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
4366 * from the current atomic commit @state and the old @crtc atomic state.
4367 *
4368 * The new configuration in the atomic commit @state is made effective by
4369 * calling intel_shared_dpll_swap_state().
4370 */
4371void intel_release_shared_dplls(struct intel_atomic_state *state,
4372				struct intel_crtc *crtc)
4373{
4374	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4375	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
4376
4377	/*
4378	 * FIXME: this function is called for every platform having a
4379	 * compute_clock hook, even though the platform doesn't yet support
4380	 * the shared DPLL framework and intel_reserve_shared_dplls() is not
4381	 * called on those.
4382	 */
4383	if (!dpll_mgr)
4384		return;
4385
4386	dpll_mgr->put_dplls(state, crtc);
4387}
4388
4389/**
4390 * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
4391 * @state: atomic state
4392 * @crtc: the CRTC for which to update the active DPLL
4393 * @encoder: encoder determining the type of port DPLL
4394 *
4395 * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
4396 * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
4397 * DPLL selected will be based on the current mode of the encoder's port.
4398 */
4399void intel_update_active_dpll(struct intel_atomic_state *state,
4400			      struct intel_crtc *crtc,
4401			      struct intel_encoder *encoder)
4402{
4403	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4404	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
4405
4406	if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4407		return;
4408
4409	dpll_mgr->update_active_dpll(state, crtc, encoder);
4410}
4411
4412/**
4413 * intel_dpll_get_freq - calculate the DPLL's output frequency
4414 * @i915: i915 device
4415 * @pll: DPLL for which to calculate the output frequency
 
4416 *
4417 * Return the output frequency corresponding to @pll's current state.
4418 */
4419int intel_dpll_get_freq(struct drm_i915_private *i915,
4420			const struct intel_shared_dpll *pll)
 
4421{
4422	if (drm_WARN_ON(&i915->drm, !pll->info->funcs->get_freq))
4423		return 0;
4424
4425	return pll->info->funcs->get_freq(i915, pll);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4426}
4427
4428static void readout_dpll_hw_state(struct drm_i915_private *i915,
4429				  struct intel_shared_dpll *pll)
4430{
4431	struct intel_crtc *crtc;
4432
4433	pll->on = pll->info->funcs->get_hw_state(i915, pll,
4434						 &pll->state.hw_state);
4435
4436	if (IS_ELKHARTLAKE(i915) && pll->on &&
4437	    pll->info->id == DPLL_ID_EHL_DPLL4) {
4438		pll->wakeref = intel_display_power_get(i915,
4439						       POWER_DOMAIN_DPLL_DC_OFF);
4440	}
4441
4442	pll->state.crtc_mask = 0;
4443	for_each_intel_crtc(&i915->drm, crtc) {
4444		struct intel_crtc_state *crtc_state =
4445			to_intel_crtc_state(crtc->base.state);
4446
4447		if (crtc_state->hw.active && crtc_state->shared_dpll == pll)
4448			pll->state.crtc_mask |= 1 << crtc->pipe;
4449	}
4450	pll->active_mask = pll->state.crtc_mask;
4451
4452	drm_dbg_kms(&i915->drm,
4453		    "%s hw state readout: crtc_mask 0x%08x, on %i\n",
4454		    pll->info->name, pll->state.crtc_mask, pll->on);
 
 
 
 
 
 
4455}
4456
4457void intel_dpll_readout_hw_state(struct drm_i915_private *i915)
4458{
 
4459	int i;
4460
4461	if (i915->dpll.mgr && i915->dpll.mgr->update_ref_clks)
4462		i915->dpll.mgr->update_ref_clks(i915);
4463
4464	for (i = 0; i < i915->dpll.num_shared_dpll; i++)
4465		readout_dpll_hw_state(i915, &i915->dpll.shared_dplls[i]);
4466}
4467
4468static void sanitize_dpll_state(struct drm_i915_private *i915,
4469				struct intel_shared_dpll *pll)
4470{
4471	if (!pll->on || pll->active_mask)
 
 
 
 
 
4472		return;
4473
4474	drm_dbg_kms(&i915->drm,
4475		    "%s enabled but not in use, disabling\n",
4476		    pll->info->name);
4477
4478	pll->info->funcs->disable(i915, pll);
4479	pll->on = false;
4480}
4481
4482void intel_dpll_sanitize_state(struct drm_i915_private *i915)
4483{
 
4484	int i;
4485
4486	for (i = 0; i < i915->dpll.num_shared_dpll; i++)
4487		sanitize_dpll_state(i915, &i915->dpll.shared_dplls[i]);
4488}
4489
4490/**
4491 * intel_shared_dpll_dump_hw_state - write hw_state to dmesg
4492 * @dev_priv: i915 drm device
4493 * @hw_state: hw state to be written to the log
4494 *
4495 * Write the relevant values in @hw_state to dmesg using drm_dbg_kms.
4496 */
4497void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
4498			      const struct intel_dpll_hw_state *hw_state)
4499{
4500	if (dev_priv->dpll.mgr) {
4501		dev_priv->dpll.mgr->dump_hw_state(dev_priv, hw_state);
4502	} else {
4503		/* fallback for platforms that don't use the shared dpll
4504		 * infrastructure
4505		 */
4506		drm_dbg_kms(&dev_priv->drm,
4507			    "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
4508			    "fp0: 0x%x, fp1: 0x%x\n",
4509			    hw_state->dpll,
4510			    hw_state->dpll_md,
4511			    hw_state->fp0,
4512			    hw_state->fp1);
4513	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4514}