Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Mar 24-27, 2025, special US time zones
Register
Loading...
Note: File does not exist in v3.1.
   1// SPDX-License-Identifier: MIT
   2/*
   3 * Copyright © 2020 Intel Corporation
   4 */
   5
   6#include <linux/string_helpers.h>
   7
   8#include <drm/drm_fixed.h>
   9
  10#include "i915_reg.h"
  11#include "intel_atomic.h"
  12#include "intel_crtc.h"
  13#include "intel_ddi.h"
  14#include "intel_de.h"
  15#include "intel_dp.h"
  16#include "intel_display_types.h"
  17#include "intel_fdi.h"
  18#include "intel_fdi_regs.h"
  19#include "intel_link_bw.h"
  20
  21struct intel_fdi_funcs {
  22	void (*fdi_link_train)(struct intel_crtc *crtc,
  23			       const struct intel_crtc_state *crtc_state);
  24};
  25
  26static void assert_fdi_tx(struct drm_i915_private *dev_priv,
  27			  enum pipe pipe, bool state)
  28{
  29	struct intel_display *display = &dev_priv->display;
  30	bool cur_state;
  31
  32	if (HAS_DDI(display)) {
  33		/*
  34		 * DDI does not have a specific FDI_TX register.
  35		 *
  36		 * FDI is never fed from EDP transcoder
  37		 * so pipe->transcoder cast is fine here.
  38		 */
  39		enum transcoder cpu_transcoder = (enum transcoder)pipe;
  40		cur_state = intel_de_read(display,
  41					  TRANS_DDI_FUNC_CTL(display, cpu_transcoder)) & TRANS_DDI_FUNC_ENABLE;
  42	} else {
  43		cur_state = intel_de_read(display, FDI_TX_CTL(pipe)) & FDI_TX_ENABLE;
  44	}
  45	INTEL_DISPLAY_STATE_WARN(display, cur_state != state,
  46				 "FDI TX state assertion failure (expected %s, current %s)\n",
  47				 str_on_off(state), str_on_off(cur_state));
  48}
  49
  50void assert_fdi_tx_enabled(struct drm_i915_private *i915, enum pipe pipe)
  51{
  52	assert_fdi_tx(i915, pipe, true);
  53}
  54
  55void assert_fdi_tx_disabled(struct drm_i915_private *i915, enum pipe pipe)
  56{
  57	assert_fdi_tx(i915, pipe, false);
  58}
  59
  60static void assert_fdi_rx(struct drm_i915_private *dev_priv,
  61			  enum pipe pipe, bool state)
  62{
  63	struct intel_display *display = &dev_priv->display;
  64	bool cur_state;
  65
  66	cur_state = intel_de_read(display, FDI_RX_CTL(pipe)) & FDI_RX_ENABLE;
  67	INTEL_DISPLAY_STATE_WARN(display, cur_state != state,
  68				 "FDI RX state assertion failure (expected %s, current %s)\n",
  69				 str_on_off(state), str_on_off(cur_state));
  70}
  71
  72void assert_fdi_rx_enabled(struct drm_i915_private *i915, enum pipe pipe)
  73{
  74	assert_fdi_rx(i915, pipe, true);
  75}
  76
  77void assert_fdi_rx_disabled(struct drm_i915_private *i915, enum pipe pipe)
  78{
  79	assert_fdi_rx(i915, pipe, false);
  80}
  81
  82void assert_fdi_tx_pll_enabled(struct drm_i915_private *i915,
  83			       enum pipe pipe)
  84{
  85	struct intel_display *display = &i915->display;
  86	bool cur_state;
  87
  88	/* ILK FDI PLL is always enabled */
  89	if (IS_IRONLAKE(i915))
  90		return;
  91
  92	/* On Haswell, DDI ports are responsible for the FDI PLL setup */
  93	if (HAS_DDI(display))
  94		return;
  95
  96	cur_state = intel_de_read(display, FDI_TX_CTL(pipe)) & FDI_TX_PLL_ENABLE;
  97	INTEL_DISPLAY_STATE_WARN(display, !cur_state,
  98				 "FDI TX PLL assertion failure, should be active but is disabled\n");
  99}
 100
 101static void assert_fdi_rx_pll(struct drm_i915_private *i915,
 102			      enum pipe pipe, bool state)
 103{
 104	struct intel_display *display = &i915->display;
 105	bool cur_state;
 106
 107	cur_state = intel_de_read(display, FDI_RX_CTL(pipe)) & FDI_RX_PLL_ENABLE;
 108	INTEL_DISPLAY_STATE_WARN(display, cur_state != state,
 109				 "FDI RX PLL assertion failure (expected %s, current %s)\n",
 110				 str_on_off(state), str_on_off(cur_state));
 111}
 112
 113void assert_fdi_rx_pll_enabled(struct drm_i915_private *i915, enum pipe pipe)
 114{
 115	assert_fdi_rx_pll(i915, pipe, true);
 116}
 117
 118void assert_fdi_rx_pll_disabled(struct drm_i915_private *i915, enum pipe pipe)
 119{
 120	assert_fdi_rx_pll(i915, pipe, false);
 121}
 122
 123void intel_fdi_link_train(struct intel_crtc *crtc,
 124			  const struct intel_crtc_state *crtc_state)
 125{
 126	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 127
 128	dev_priv->display.funcs.fdi->fdi_link_train(crtc, crtc_state);
 129}
 130
 131/**
 132 * intel_fdi_add_affected_crtcs - add CRTCs on FDI affected by other modeset CRTCs
 133 * @state: intel atomic state
 134 *
 135 * Add a CRTC using FDI to @state if changing another CRTC's FDI BW usage is
 136 * known to affect the available FDI BW for the former CRTC. In practice this
 137 * means adding CRTC B on IVYBRIDGE if its use of FDI lanes is limited (by
 138 * CRTC C) and CRTC C is getting disabled.
 139 *
 140 * Returns 0 in case of success, or a negative error code otherwise.
 141 */
 142int intel_fdi_add_affected_crtcs(struct intel_atomic_state *state)
 143{
 144	struct intel_display *display = to_intel_display(state);
 145	struct drm_i915_private *i915 = to_i915(state->base.dev);
 146	const struct intel_crtc_state *old_crtc_state;
 147	const struct intel_crtc_state *new_crtc_state;
 148	struct intel_crtc *crtc;
 149
 150	if (!IS_IVYBRIDGE(i915) || INTEL_NUM_PIPES(i915) != 3)
 151		return 0;
 152
 153	crtc = intel_crtc_for_pipe(display, PIPE_C);
 154	new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
 155	if (!new_crtc_state)
 156		return 0;
 157
 158	if (!intel_crtc_needs_modeset(new_crtc_state))
 159		return 0;
 160
 161	old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
 162	if (!old_crtc_state->fdi_lanes)
 163		return 0;
 164
 165	crtc = intel_crtc_for_pipe(display, PIPE_B);
 166	new_crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
 167	if (IS_ERR(new_crtc_state))
 168		return PTR_ERR(new_crtc_state);
 169
 170	old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
 171	if (!old_crtc_state->fdi_lanes)
 172		return 0;
 173
 174	return intel_modeset_pipes_in_mask_early(state,
 175						 "FDI link BW decrease on pipe C",
 176						 BIT(PIPE_B));
 177}
 178
 179/* units of 100MHz */
 180static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
 181{
 182	if (crtc_state->hw.enable && crtc_state->has_pch_encoder)
 183		return crtc_state->fdi_lanes;
 184
 185	return 0;
 186}
 187
 188static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
 189			       struct intel_crtc_state *pipe_config,
 190			       enum pipe *pipe_to_reduce)
 191{
 192	struct intel_display *display = to_intel_display(dev);
 193	struct drm_i915_private *dev_priv = to_i915(dev);
 194	struct drm_atomic_state *state = pipe_config->uapi.state;
 195	struct intel_crtc *other_crtc;
 196	struct intel_crtc_state *other_crtc_state;
 197
 198	*pipe_to_reduce = pipe;
 199
 200	drm_dbg_kms(&dev_priv->drm,
 201		    "checking fdi config on pipe %c, lanes %i\n",
 202		    pipe_name(pipe), pipe_config->fdi_lanes);
 203	if (pipe_config->fdi_lanes > 4) {
 204		drm_dbg_kms(&dev_priv->drm,
 205			    "invalid fdi lane config on pipe %c: %i lanes\n",
 206			    pipe_name(pipe), pipe_config->fdi_lanes);
 207		return -EINVAL;
 208	}
 209
 210	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
 211		if (pipe_config->fdi_lanes > 2) {
 212			drm_dbg_kms(&dev_priv->drm,
 213				    "only 2 lanes on haswell, required: %i lanes\n",
 214				    pipe_config->fdi_lanes);
 215			return -EINVAL;
 216		} else {
 217			return 0;
 218		}
 219	}
 220
 221	if (INTEL_NUM_PIPES(dev_priv) == 2)
 222		return 0;
 223
 224	/* Ivybridge 3 pipe is really complicated */
 225	switch (pipe) {
 226	case PIPE_A:
 227		return 0;
 228	case PIPE_B:
 229		if (pipe_config->fdi_lanes <= 2)
 230			return 0;
 231
 232		other_crtc = intel_crtc_for_pipe(display, PIPE_C);
 233		other_crtc_state =
 234			intel_atomic_get_crtc_state(state, other_crtc);
 235		if (IS_ERR(other_crtc_state))
 236			return PTR_ERR(other_crtc_state);
 237
 238		if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
 239			drm_dbg_kms(&dev_priv->drm,
 240				    "invalid shared fdi lane config on pipe %c: %i lanes\n",
 241				    pipe_name(pipe), pipe_config->fdi_lanes);
 242			return -EINVAL;
 243		}
 244		return 0;
 245	case PIPE_C:
 246		if (pipe_config->fdi_lanes > 2) {
 247			drm_dbg_kms(&dev_priv->drm,
 248				    "only 2 lanes on pipe %c: required %i lanes\n",
 249				    pipe_name(pipe), pipe_config->fdi_lanes);
 250			return -EINVAL;
 251		}
 252
 253		other_crtc = intel_crtc_for_pipe(display, PIPE_B);
 254		other_crtc_state =
 255			intel_atomic_get_crtc_state(state, other_crtc);
 256		if (IS_ERR(other_crtc_state))
 257			return PTR_ERR(other_crtc_state);
 258
 259		if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
 260			drm_dbg_kms(&dev_priv->drm,
 261				    "fdi link B uses too many lanes to enable link C\n");
 262
 263			*pipe_to_reduce = PIPE_B;
 264
 265			return -EINVAL;
 266		}
 267		return 0;
 268	default:
 269		MISSING_CASE(pipe);
 270		return 0;
 271	}
 272}
 273
 274void intel_fdi_pll_freq_update(struct drm_i915_private *i915)
 275{
 276	if (IS_IRONLAKE(i915)) {
 277		u32 fdi_pll_clk =
 278			intel_de_read(i915, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
 279
 280		i915->display.fdi.pll_freq = (fdi_pll_clk + 2) * 10000;
 281	} else if (IS_SANDYBRIDGE(i915) || IS_IVYBRIDGE(i915)) {
 282		i915->display.fdi.pll_freq = 270000;
 283	} else {
 284		return;
 285	}
 286
 287	drm_dbg(&i915->drm, "FDI PLL freq=%d\n", i915->display.fdi.pll_freq);
 288}
 289
 290int intel_fdi_link_freq(struct drm_i915_private *i915,
 291			const struct intel_crtc_state *pipe_config)
 292{
 293	if (HAS_DDI(i915))
 294		return pipe_config->port_clock; /* SPLL */
 295	else
 296		return i915->display.fdi.pll_freq;
 297}
 298
 299/**
 300 * intel_fdi_compute_pipe_bpp - compute pipe bpp limited by max link bpp
 301 * @crtc_state: the crtc state
 302 *
 303 * Compute the pipe bpp limited by the CRTC's maximum link bpp. Encoders can
 304 * call this function during state computation in the simple case where the
 305 * link bpp will always match the pipe bpp. This is the case for all non-DP
 306 * encoders, while DP encoders will use a link bpp lower than pipe bpp in case
 307 * of DSC compression.
 308 *
 309 * Returns %true in case of success, %false if pipe bpp would need to be
 310 * reduced below its valid range.
 311 */
 312bool intel_fdi_compute_pipe_bpp(struct intel_crtc_state *crtc_state)
 313{
 314	int pipe_bpp = min(crtc_state->pipe_bpp,
 315			   fxp_q4_to_int(crtc_state->max_link_bpp_x16));
 316
 317	pipe_bpp = rounddown(pipe_bpp, 2 * 3);
 318
 319	if (pipe_bpp < 6 * 3)
 320		return false;
 321
 322	crtc_state->pipe_bpp = pipe_bpp;
 323
 324	return true;
 325}
 326
 327int ilk_fdi_compute_config(struct intel_crtc *crtc,
 328			   struct intel_crtc_state *pipe_config)
 329{
 330	struct drm_device *dev = crtc->base.dev;
 331	struct drm_i915_private *i915 = to_i915(dev);
 332	const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
 333	int lane, link_bw, fdi_dotclock;
 334
 335	/* FDI is a binary signal running at ~2.7GHz, encoding
 336	 * each output octet as 10 bits. The actual frequency
 337	 * is stored as a divider into a 100MHz clock, and the
 338	 * mode pixel clock is stored in units of 1KHz.
 339	 * Hence the bw of each lane in terms of the mode signal
 340	 * is:
 341	 */
 342	link_bw = intel_fdi_link_freq(i915, pipe_config);
 343
 344	fdi_dotclock = adjusted_mode->crtc_clock;
 345
 346	lane = ilk_get_lanes_required(fdi_dotclock, link_bw,
 347				      pipe_config->pipe_bpp);
 348
 349	pipe_config->fdi_lanes = lane;
 350
 351	intel_link_compute_m_n(fxp_q4_from_int(pipe_config->pipe_bpp),
 352			       lane, fdi_dotclock,
 353			       link_bw,
 354			       intel_dp_bw_fec_overhead(false),
 355			       &pipe_config->fdi_m_n);
 356
 357	return 0;
 358}
 359
 360static int intel_fdi_atomic_check_bw(struct intel_atomic_state *state,
 361				     struct intel_crtc *crtc,
 362				     struct intel_crtc_state *pipe_config,
 363				     struct intel_link_bw_limits *limits)
 364{
 365	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
 366	enum pipe pipe_to_reduce;
 367	int ret;
 368
 369	ret = ilk_check_fdi_lanes(&i915->drm, crtc->pipe, pipe_config,
 370				  &pipe_to_reduce);
 371	if (ret != -EINVAL)
 372		return ret;
 373
 374	ret = intel_link_bw_reduce_bpp(state, limits,
 375				       BIT(pipe_to_reduce),
 376				       "FDI link BW");
 377
 378	return ret ? : -EAGAIN;
 379}
 380
 381/**
 382 * intel_fdi_atomic_check_link - check all modeset FDI link configuration
 383 * @state: intel atomic state
 384 * @limits: link BW limits
 385 *
 386 * Check the link configuration for all modeset FDI outputs. If the
 387 * configuration is invalid @limits will be updated if possible to
 388 * reduce the total BW, after which the configuration for all CRTCs in
 389 * @state must be recomputed with the updated @limits.
 390 *
 391 * Returns:
 392 *   - 0 if the confugration is valid
 393 *   - %-EAGAIN, if the configuration is invalid and @limits got updated
 394 *     with fallback values with which the configuration of all CRTCs
 395 *     in @state must be recomputed
 396 *   - Other negative error, if the configuration is invalid without a
 397 *     fallback possibility, or the check failed for another reason
 398 */
 399int intel_fdi_atomic_check_link(struct intel_atomic_state *state,
 400				struct intel_link_bw_limits *limits)
 401{
 402	struct intel_crtc *crtc;
 403	struct intel_crtc_state *crtc_state;
 404	int i;
 405
 406	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
 407		int ret;
 408
 409		if (!crtc_state->has_pch_encoder ||
 410		    !intel_crtc_needs_modeset(crtc_state) ||
 411		    !crtc_state->hw.enable)
 412			continue;
 413
 414		ret = intel_fdi_atomic_check_bw(state, crtc, crtc_state, limits);
 415		if (ret)
 416			return ret;
 417	}
 418
 419	return 0;
 420}
 421
 422static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
 423{
 424	u32 temp;
 425
 426	temp = intel_de_read(dev_priv, SOUTH_CHICKEN1);
 427	if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
 428		return;
 429
 430	drm_WARN_ON(&dev_priv->drm,
 431		    intel_de_read(dev_priv, FDI_RX_CTL(PIPE_B)) &
 432		    FDI_RX_ENABLE);
 433	drm_WARN_ON(&dev_priv->drm,
 434		    intel_de_read(dev_priv, FDI_RX_CTL(PIPE_C)) &
 435		    FDI_RX_ENABLE);
 436
 437	temp &= ~FDI_BC_BIFURCATION_SELECT;
 438	if (enable)
 439		temp |= FDI_BC_BIFURCATION_SELECT;
 440
 441	drm_dbg_kms(&dev_priv->drm, "%sabling fdi C rx\n",
 442		    enable ? "en" : "dis");
 443	intel_de_write(dev_priv, SOUTH_CHICKEN1, temp);
 444	intel_de_posting_read(dev_priv, SOUTH_CHICKEN1);
 445}
 446
 447static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
 448{
 449	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 450	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 451
 452	switch (crtc->pipe) {
 453	case PIPE_A:
 454		break;
 455	case PIPE_B:
 456		if (crtc_state->fdi_lanes > 2)
 457			cpt_set_fdi_bc_bifurcation(dev_priv, false);
 458		else
 459			cpt_set_fdi_bc_bifurcation(dev_priv, true);
 460
 461		break;
 462	case PIPE_C:
 463		cpt_set_fdi_bc_bifurcation(dev_priv, true);
 464
 465		break;
 466	default:
 467		MISSING_CASE(crtc->pipe);
 468	}
 469}
 470
 471void intel_fdi_normal_train(struct intel_crtc *crtc)
 472{
 473	struct drm_device *dev = crtc->base.dev;
 474	struct drm_i915_private *dev_priv = to_i915(dev);
 475	enum pipe pipe = crtc->pipe;
 476	i915_reg_t reg;
 477	u32 temp;
 478
 479	/* enable normal train */
 480	reg = FDI_TX_CTL(pipe);
 481	temp = intel_de_read(dev_priv, reg);
 482	if (IS_IVYBRIDGE(dev_priv)) {
 483		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
 484		temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
 485	} else {
 486		temp &= ~FDI_LINK_TRAIN_NONE;
 487		temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
 488	}
 489	intel_de_write(dev_priv, reg, temp);
 490
 491	reg = FDI_RX_CTL(pipe);
 492	temp = intel_de_read(dev_priv, reg);
 493	if (HAS_PCH_CPT(dev_priv)) {
 494		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
 495		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
 496	} else {
 497		temp &= ~FDI_LINK_TRAIN_NONE;
 498		temp |= FDI_LINK_TRAIN_NONE;
 499	}
 500	intel_de_write(dev_priv, reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
 501
 502	/* wait one idle pattern time */
 503	intel_de_posting_read(dev_priv, reg);
 504	udelay(1000);
 505
 506	/* IVB wants error correction enabled */
 507	if (IS_IVYBRIDGE(dev_priv))
 508		intel_de_rmw(dev_priv, reg, 0, FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE);
 509}
 510
 511/* The FDI link training functions for ILK/Ibexpeak. */
 512static void ilk_fdi_link_train(struct intel_crtc *crtc,
 513			       const struct intel_crtc_state *crtc_state)
 514{
 515	struct drm_device *dev = crtc->base.dev;
 516	struct drm_i915_private *dev_priv = to_i915(dev);
 517	enum pipe pipe = crtc->pipe;
 518	i915_reg_t reg;
 519	u32 temp, tries;
 520
 521	/*
 522	 * Write the TU size bits before fdi link training, so that error
 523	 * detection works.
 524	 */
 525	intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
 526		       intel_de_read(dev_priv, PIPE_DATA_M1(dev_priv, pipe)) & TU_SIZE_MASK);
 527
 528	/* FDI needs bits from pipe first */
 529	assert_transcoder_enabled(dev_priv, crtc_state->cpu_transcoder);
 530
 531	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
 532	   for train result */
 533	reg = FDI_RX_IMR(pipe);
 534	temp = intel_de_read(dev_priv, reg);
 535	temp &= ~FDI_RX_SYMBOL_LOCK;
 536	temp &= ~FDI_RX_BIT_LOCK;
 537	intel_de_write(dev_priv, reg, temp);
 538	intel_de_read(dev_priv, reg);
 539	udelay(150);
 540
 541	/* enable CPU FDI TX and PCH FDI RX */
 542	reg = FDI_TX_CTL(pipe);
 543	temp = intel_de_read(dev_priv, reg);
 544	temp &= ~FDI_DP_PORT_WIDTH_MASK;
 545	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
 546	temp &= ~FDI_LINK_TRAIN_NONE;
 547	temp |= FDI_LINK_TRAIN_PATTERN_1;
 548	intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
 549
 550	reg = FDI_RX_CTL(pipe);
 551	temp = intel_de_read(dev_priv, reg);
 552	temp &= ~FDI_LINK_TRAIN_NONE;
 553	temp |= FDI_LINK_TRAIN_PATTERN_1;
 554	intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
 555
 556	intel_de_posting_read(dev_priv, reg);
 557	udelay(150);
 558
 559	/* Ironlake workaround, enable clock pointer after FDI enable*/
 560	intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
 561		       FDI_RX_PHASE_SYNC_POINTER_OVR);
 562	intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
 563		       FDI_RX_PHASE_SYNC_POINTER_OVR | FDI_RX_PHASE_SYNC_POINTER_EN);
 564
 565	reg = FDI_RX_IIR(pipe);
 566	for (tries = 0; tries < 5; tries++) {
 567		temp = intel_de_read(dev_priv, reg);
 568		drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
 569
 570		if ((temp & FDI_RX_BIT_LOCK)) {
 571			drm_dbg_kms(&dev_priv->drm, "FDI train 1 done.\n");
 572			intel_de_write(dev_priv, reg, temp | FDI_RX_BIT_LOCK);
 573			break;
 574		}
 575	}
 576	if (tries == 5)
 577		drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
 578
 579	/* Train 2 */
 580	intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
 581		     FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_2);
 582	intel_de_rmw(dev_priv, FDI_RX_CTL(pipe),
 583		     FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_2);
 584	intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe));
 585	udelay(150);
 586
 587	reg = FDI_RX_IIR(pipe);
 588	for (tries = 0; tries < 5; tries++) {
 589		temp = intel_de_read(dev_priv, reg);
 590		drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
 591
 592		if (temp & FDI_RX_SYMBOL_LOCK) {
 593			intel_de_write(dev_priv, reg,
 594				       temp | FDI_RX_SYMBOL_LOCK);
 595			drm_dbg_kms(&dev_priv->drm, "FDI train 2 done.\n");
 596			break;
 597		}
 598	}
 599	if (tries == 5)
 600		drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
 601
 602	drm_dbg_kms(&dev_priv->drm, "FDI train done\n");
 603
 604}
 605
 606static const int snb_b_fdi_train_param[] = {
 607	FDI_LINK_TRAIN_400MV_0DB_SNB_B,
 608	FDI_LINK_TRAIN_400MV_6DB_SNB_B,
 609	FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
 610	FDI_LINK_TRAIN_800MV_0DB_SNB_B,
 611};
 612
 613/* The FDI link training functions for SNB/Cougarpoint. */
 614static void gen6_fdi_link_train(struct intel_crtc *crtc,
 615				const struct intel_crtc_state *crtc_state)
 616{
 617	struct drm_device *dev = crtc->base.dev;
 618	struct drm_i915_private *dev_priv = to_i915(dev);
 619	enum pipe pipe = crtc->pipe;
 620	i915_reg_t reg;
 621	u32 temp, i, retry;
 622
 623	/*
 624	 * Write the TU size bits before fdi link training, so that error
 625	 * detection works.
 626	 */
 627	intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
 628		       intel_de_read(dev_priv, PIPE_DATA_M1(dev_priv, pipe)) & TU_SIZE_MASK);
 629
 630	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
 631	   for train result */
 632	reg = FDI_RX_IMR(pipe);
 633	temp = intel_de_read(dev_priv, reg);
 634	temp &= ~FDI_RX_SYMBOL_LOCK;
 635	temp &= ~FDI_RX_BIT_LOCK;
 636	intel_de_write(dev_priv, reg, temp);
 637
 638	intel_de_posting_read(dev_priv, reg);
 639	udelay(150);
 640
 641	/* enable CPU FDI TX and PCH FDI RX */
 642	reg = FDI_TX_CTL(pipe);
 643	temp = intel_de_read(dev_priv, reg);
 644	temp &= ~FDI_DP_PORT_WIDTH_MASK;
 645	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
 646	temp &= ~FDI_LINK_TRAIN_NONE;
 647	temp |= FDI_LINK_TRAIN_PATTERN_1;
 648	temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
 649	/* SNB-B */
 650	temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
 651	intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
 652
 653	intel_de_write(dev_priv, FDI_RX_MISC(pipe),
 654		       FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
 655
 656	reg = FDI_RX_CTL(pipe);
 657	temp = intel_de_read(dev_priv, reg);
 658	if (HAS_PCH_CPT(dev_priv)) {
 659		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
 660		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
 661	} else {
 662		temp &= ~FDI_LINK_TRAIN_NONE;
 663		temp |= FDI_LINK_TRAIN_PATTERN_1;
 664	}
 665	intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
 666
 667	intel_de_posting_read(dev_priv, reg);
 668	udelay(150);
 669
 670	for (i = 0; i < 4; i++) {
 671		intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
 672			     FDI_LINK_TRAIN_VOL_EMP_MASK, snb_b_fdi_train_param[i]);
 673		intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
 674		udelay(500);
 675
 676		for (retry = 0; retry < 5; retry++) {
 677			reg = FDI_RX_IIR(pipe);
 678			temp = intel_de_read(dev_priv, reg);
 679			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
 680			if (temp & FDI_RX_BIT_LOCK) {
 681				intel_de_write(dev_priv, reg,
 682					       temp | FDI_RX_BIT_LOCK);
 683				drm_dbg_kms(&dev_priv->drm,
 684					    "FDI train 1 done.\n");
 685				break;
 686			}
 687			udelay(50);
 688		}
 689		if (retry < 5)
 690			break;
 691	}
 692	if (i == 4)
 693		drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
 694
 695	/* Train 2 */
 696	reg = FDI_TX_CTL(pipe);
 697	temp = intel_de_read(dev_priv, reg);
 698	temp &= ~FDI_LINK_TRAIN_NONE;
 699	temp |= FDI_LINK_TRAIN_PATTERN_2;
 700	if (IS_SANDYBRIDGE(dev_priv)) {
 701		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
 702		/* SNB-B */
 703		temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
 704	}
 705	intel_de_write(dev_priv, reg, temp);
 706
 707	reg = FDI_RX_CTL(pipe);
 708	temp = intel_de_read(dev_priv, reg);
 709	if (HAS_PCH_CPT(dev_priv)) {
 710		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
 711		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
 712	} else {
 713		temp &= ~FDI_LINK_TRAIN_NONE;
 714		temp |= FDI_LINK_TRAIN_PATTERN_2;
 715	}
 716	intel_de_write(dev_priv, reg, temp);
 717
 718	intel_de_posting_read(dev_priv, reg);
 719	udelay(150);
 720
 721	for (i = 0; i < 4; i++) {
 722		intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
 723			     FDI_LINK_TRAIN_VOL_EMP_MASK, snb_b_fdi_train_param[i]);
 724		intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
 725		udelay(500);
 726
 727		for (retry = 0; retry < 5; retry++) {
 728			reg = FDI_RX_IIR(pipe);
 729			temp = intel_de_read(dev_priv, reg);
 730			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
 731			if (temp & FDI_RX_SYMBOL_LOCK) {
 732				intel_de_write(dev_priv, reg,
 733					       temp | FDI_RX_SYMBOL_LOCK);
 734				drm_dbg_kms(&dev_priv->drm,
 735					    "FDI train 2 done.\n");
 736				break;
 737			}
 738			udelay(50);
 739		}
 740		if (retry < 5)
 741			break;
 742	}
 743	if (i == 4)
 744		drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
 745
 746	drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
 747}
 748
 749/* Manual link training for Ivy Bridge A0 parts */
 750static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
 751				      const struct intel_crtc_state *crtc_state)
 752{
 753	struct drm_device *dev = crtc->base.dev;
 754	struct drm_i915_private *dev_priv = to_i915(dev);
 755	enum pipe pipe = crtc->pipe;
 756	i915_reg_t reg;
 757	u32 temp, i, j;
 758
 759	ivb_update_fdi_bc_bifurcation(crtc_state);
 760
 761	/*
 762	 * Write the TU size bits before fdi link training, so that error
 763	 * detection works.
 764	 */
 765	intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
 766		       intel_de_read(dev_priv, PIPE_DATA_M1(dev_priv, pipe)) & TU_SIZE_MASK);
 767
 768	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
 769	   for train result */
 770	reg = FDI_RX_IMR(pipe);
 771	temp = intel_de_read(dev_priv, reg);
 772	temp &= ~FDI_RX_SYMBOL_LOCK;
 773	temp &= ~FDI_RX_BIT_LOCK;
 774	intel_de_write(dev_priv, reg, temp);
 775
 776	intel_de_posting_read(dev_priv, reg);
 777	udelay(150);
 778
 779	drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR before link train 0x%x\n",
 780		    intel_de_read(dev_priv, FDI_RX_IIR(pipe)));
 781
 782	/* Try each vswing and preemphasis setting twice before moving on */
 783	for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
 784		/* disable first in case we need to retry */
 785		reg = FDI_TX_CTL(pipe);
 786		temp = intel_de_read(dev_priv, reg);
 787		temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
 788		temp &= ~FDI_TX_ENABLE;
 789		intel_de_write(dev_priv, reg, temp);
 790
 791		reg = FDI_RX_CTL(pipe);
 792		temp = intel_de_read(dev_priv, reg);
 793		temp &= ~FDI_LINK_TRAIN_AUTO;
 794		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
 795		temp &= ~FDI_RX_ENABLE;
 796		intel_de_write(dev_priv, reg, temp);
 797
 798		/* enable CPU FDI TX and PCH FDI RX */
 799		reg = FDI_TX_CTL(pipe);
 800		temp = intel_de_read(dev_priv, reg);
 801		temp &= ~FDI_DP_PORT_WIDTH_MASK;
 802		temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
 803		temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
 804		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
 805		temp |= snb_b_fdi_train_param[j/2];
 806		temp |= FDI_COMPOSITE_SYNC;
 807		intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
 808
 809		intel_de_write(dev_priv, FDI_RX_MISC(pipe),
 810			       FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
 811
 812		reg = FDI_RX_CTL(pipe);
 813		temp = intel_de_read(dev_priv, reg);
 814		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
 815		temp |= FDI_COMPOSITE_SYNC;
 816		intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
 817
 818		intel_de_posting_read(dev_priv, reg);
 819		udelay(1); /* should be 0.5us */
 820
 821		for (i = 0; i < 4; i++) {
 822			reg = FDI_RX_IIR(pipe);
 823			temp = intel_de_read(dev_priv, reg);
 824			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
 825
 826			if (temp & FDI_RX_BIT_LOCK ||
 827			    (intel_de_read(dev_priv, reg) & FDI_RX_BIT_LOCK)) {
 828				intel_de_write(dev_priv, reg,
 829					       temp | FDI_RX_BIT_LOCK);
 830				drm_dbg_kms(&dev_priv->drm,
 831					    "FDI train 1 done, level %i.\n",
 832					    i);
 833				break;
 834			}
 835			udelay(1); /* should be 0.5us */
 836		}
 837		if (i == 4) {
 838			drm_dbg_kms(&dev_priv->drm,
 839				    "FDI train 1 fail on vswing %d\n", j / 2);
 840			continue;
 841		}
 842
 843		/* Train 2 */
 844		intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
 845			     FDI_LINK_TRAIN_NONE_IVB,
 846			     FDI_LINK_TRAIN_PATTERN_2_IVB);
 847		intel_de_rmw(dev_priv, FDI_RX_CTL(pipe),
 848			     FDI_LINK_TRAIN_PATTERN_MASK_CPT,
 849			     FDI_LINK_TRAIN_PATTERN_2_CPT);
 850		intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe));
 851		udelay(2); /* should be 1.5us */
 852
 853		for (i = 0; i < 4; i++) {
 854			reg = FDI_RX_IIR(pipe);
 855			temp = intel_de_read(dev_priv, reg);
 856			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
 857
 858			if (temp & FDI_RX_SYMBOL_LOCK ||
 859			    (intel_de_read(dev_priv, reg) & FDI_RX_SYMBOL_LOCK)) {
 860				intel_de_write(dev_priv, reg,
 861					       temp | FDI_RX_SYMBOL_LOCK);
 862				drm_dbg_kms(&dev_priv->drm,
 863					    "FDI train 2 done, level %i.\n",
 864					    i);
 865				goto train_done;
 866			}
 867			udelay(2); /* should be 1.5us */
 868		}
 869		if (i == 4)
 870			drm_dbg_kms(&dev_priv->drm,
 871				    "FDI train 2 fail on vswing %d\n", j / 2);
 872	}
 873
 874train_done:
 875	drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
 876}
 877
 878/* Starting with Haswell, different DDI ports can work in FDI mode for
 879 * connection to the PCH-located connectors. For this, it is necessary to train
 880 * both the DDI port and PCH receiver for the desired DDI buffer settings.
 881 *
 882 * The recommended port to work in FDI mode is DDI E, which we use here. Also,
 883 * please note that when FDI mode is active on DDI E, it shares 2 lines with
 884 * DDI A (which is used for eDP)
 885 */
 886void hsw_fdi_link_train(struct intel_encoder *encoder,
 887			const struct intel_crtc_state *crtc_state)
 888{
 889	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 890	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 891	u32 temp, i, rx_ctl_val;
 892	int n_entries;
 893
 894	encoder->get_buf_trans(encoder, crtc_state, &n_entries);
 895
 896	hsw_prepare_dp_ddi_buffers(encoder, crtc_state);
 897
 898	/* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the
 899	 * mode set "sequence for CRT port" document:
 900	 * - TP1 to TP2 time with the default value
 901	 * - FDI delay to 90h
 902	 *
 903	 * WaFDIAutoLinkSetTimingOverrride:hsw
 904	 */
 905	intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A),
 906		       FDI_RX_PWRDN_LANE1_VAL(2) |
 907		       FDI_RX_PWRDN_LANE0_VAL(2) |
 908		       FDI_RX_TP1_TO_TP2_48 |
 909		       FDI_RX_FDI_DELAY_90);
 910
 911	/* Enable the PCH Receiver FDI PLL */
 912	rx_ctl_val = dev_priv->display.fdi.rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
 913		     FDI_RX_PLL_ENABLE |
 914		     FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
 915	intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
 916	intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
 917	udelay(220);
 918
 919	/* Switch from Rawclk to PCDclk */
 920	rx_ctl_val |= FDI_PCDCLK;
 921	intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
 922
 923	/* Configure Port Clock Select */
 924	drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll->info->id != DPLL_ID_SPLL);
 925	intel_ddi_enable_clock(encoder, crtc_state);
 926
 927	/* Start the training iterating through available voltages and emphasis,
 928	 * testing each value twice. */
 929	for (i = 0; i < n_entries * 2; i++) {
 930		/* Configure DP_TP_CTL with auto-training */
 931		intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
 932			       DP_TP_CTL_FDI_AUTOTRAIN |
 933			       DP_TP_CTL_ENHANCED_FRAME_ENABLE |
 934			       DP_TP_CTL_LINK_TRAIN_PAT1 |
 935			       DP_TP_CTL_ENABLE);
 936
 937		/* Configure and enable DDI_BUF_CTL for DDI E with next voltage.
 938		 * DDI E does not support port reversal, the functionality is
 939		 * achieved on the PCH side in FDI_RX_CTL, so no need to set the
 940		 * port reversal bit */
 941		intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E),
 942			       DDI_BUF_CTL_ENABLE |
 943			       ((crtc_state->fdi_lanes - 1) << 1) |
 944			       DDI_BUF_TRANS_SELECT(i / 2));
 945		intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E));
 946
 947		udelay(600);
 948
 949		/* Program PCH FDI Receiver TU */
 950		intel_de_write(dev_priv, FDI_RX_TUSIZE1(PIPE_A), TU_SIZE(64));
 951
 952		/* Enable PCH FDI Receiver with auto-training */
 953		rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO;
 954		intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
 955		intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
 956
 957		/* Wait for FDI receiver lane calibration */
 958		udelay(30);
 959
 960		/* Unset FDI_RX_MISC pwrdn lanes */
 961		intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A),
 962			     FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK, 0);
 963		intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
 964
 965		/* Wait for FDI auto training time */
 966		udelay(5);
 967
 968		temp = intel_de_read(dev_priv, DP_TP_STATUS(PORT_E));
 969		if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) {
 970			drm_dbg_kms(&dev_priv->drm,
 971				    "FDI link training done on step %d\n", i);
 972			break;
 973		}
 974
 975		/*
 976		 * Leave things enabled even if we failed to train FDI.
 977		 * Results in less fireworks from the state checker.
 978		 */
 979		if (i == n_entries * 2 - 1) {
 980			drm_err(&dev_priv->drm, "FDI link training failed!\n");
 981			break;
 982		}
 983
 984		rx_ctl_val &= ~FDI_RX_ENABLE;
 985		intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
 986		intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
 987
 988		intel_de_rmw(dev_priv, DDI_BUF_CTL(PORT_E), DDI_BUF_CTL_ENABLE, 0);
 989		intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E));
 990
 991		/* Disable DP_TP_CTL and FDI_RX_CTL and retry */
 992		intel_de_rmw(dev_priv, DP_TP_CTL(PORT_E), DP_TP_CTL_ENABLE, 0);
 993		intel_de_posting_read(dev_priv, DP_TP_CTL(PORT_E));
 994
 995		intel_wait_ddi_buf_idle(dev_priv, PORT_E);
 996
 997		/* Reset FDI_RX_MISC pwrdn lanes */
 998		intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A),
 999			     FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK,
1000			     FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2));
1001		intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
1002	}
1003
1004	/* Enable normal pixel sending for FDI */
1005	intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
1006		       DP_TP_CTL_FDI_AUTOTRAIN |
1007		       DP_TP_CTL_LINK_TRAIN_NORMAL |
1008		       DP_TP_CTL_ENHANCED_FRAME_ENABLE |
1009		       DP_TP_CTL_ENABLE);
1010}
1011
1012void hsw_fdi_disable(struct intel_encoder *encoder)
1013{
1014	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1015
1016	/*
1017	 * Bspec lists this as both step 13 (before DDI_BUF_CTL disable)
1018	 * and step 18 (after clearing PORT_CLK_SEL). Based on a BUN,
1019	 * step 13 is the correct place for it. Step 18 is where it was
1020	 * originally before the BUN.
1021	 */
1022	intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_RX_ENABLE, 0);
1023	intel_de_rmw(dev_priv, DDI_BUF_CTL(PORT_E), DDI_BUF_CTL_ENABLE, 0);
1024	intel_wait_ddi_buf_idle(dev_priv, PORT_E);
1025	intel_ddi_disable_clock(encoder);
1026	intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A),
1027		     FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK,
1028		     FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2));
1029	intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_PCDCLK, 0);
1030	intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_RX_PLL_ENABLE, 0);
1031}
1032
1033void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
1034{
1035	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1036	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1037	enum pipe pipe = crtc->pipe;
1038	i915_reg_t reg;
1039	u32 temp;
1040
1041	/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
1042	reg = FDI_RX_CTL(pipe);
1043	temp = intel_de_read(dev_priv, reg);
1044	temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
1045	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
1046	temp |= (intel_de_read(dev_priv, TRANSCONF(dev_priv, pipe)) & TRANSCONF_BPC_MASK) << 11;
1047	intel_de_write(dev_priv, reg, temp | FDI_RX_PLL_ENABLE);
1048
1049	intel_de_posting_read(dev_priv, reg);
1050	udelay(200);
1051
1052	/* Switch from Rawclk to PCDclk */
1053	intel_de_rmw(dev_priv, reg, 0, FDI_PCDCLK);
1054	intel_de_posting_read(dev_priv, reg);
1055	udelay(200);
1056
1057	/* Enable CPU FDI TX PLL, always on for Ironlake */
1058	reg = FDI_TX_CTL(pipe);
1059	temp = intel_de_read(dev_priv, reg);
1060	if ((temp & FDI_TX_PLL_ENABLE) == 0) {
1061		intel_de_write(dev_priv, reg, temp | FDI_TX_PLL_ENABLE);
1062
1063		intel_de_posting_read(dev_priv, reg);
1064		udelay(100);
1065	}
1066}
1067
1068void ilk_fdi_pll_disable(struct intel_crtc *crtc)
1069{
1070	struct drm_device *dev = crtc->base.dev;
1071	struct drm_i915_private *dev_priv = to_i915(dev);
1072	enum pipe pipe = crtc->pipe;
1073
1074	/* Switch from PCDclk to Rawclk */
1075	intel_de_rmw(dev_priv, FDI_RX_CTL(pipe), FDI_PCDCLK, 0);
1076
1077	/* Disable CPU FDI TX PLL */
1078	intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), FDI_TX_PLL_ENABLE, 0);
1079	intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
1080	udelay(100);
1081
1082	/* Wait for the clocks to turn off. */
1083	intel_de_rmw(dev_priv, FDI_RX_CTL(pipe), FDI_RX_PLL_ENABLE, 0);
1084	intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe));
1085	udelay(100);
1086}
1087
1088void ilk_fdi_disable(struct intel_crtc *crtc)
1089{
1090	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1091	enum pipe pipe = crtc->pipe;
1092	i915_reg_t reg;
1093	u32 temp;
1094
1095	/* disable CPU FDI tx and PCH FDI rx */
1096	intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), FDI_TX_ENABLE, 0);
1097	intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
1098
1099	reg = FDI_RX_CTL(pipe);
1100	temp = intel_de_read(dev_priv, reg);
1101	temp &= ~(0x7 << 16);
1102	temp |= (intel_de_read(dev_priv, TRANSCONF(dev_priv, pipe)) & TRANSCONF_BPC_MASK) << 11;
1103	intel_de_write(dev_priv, reg, temp & ~FDI_RX_ENABLE);
1104
1105	intel_de_posting_read(dev_priv, reg);
1106	udelay(100);
1107
1108	/* Ironlake workaround, disable clock pointer after downing FDI */
1109	if (HAS_PCH_IBX(dev_priv))
1110		intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
1111			       FDI_RX_PHASE_SYNC_POINTER_OVR);
1112
1113	/* still set train pattern 1 */
1114	intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
1115		     FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_1);
1116
1117	reg = FDI_RX_CTL(pipe);
1118	temp = intel_de_read(dev_priv, reg);
1119	if (HAS_PCH_CPT(dev_priv)) {
1120		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
1121		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
1122	} else {
1123		temp &= ~FDI_LINK_TRAIN_NONE;
1124		temp |= FDI_LINK_TRAIN_PATTERN_1;
1125	}
1126	/* BPC in FDI rx is consistent with that in TRANSCONF */
1127	temp &= ~(0x07 << 16);
1128	temp |= (intel_de_read(dev_priv, TRANSCONF(dev_priv, pipe)) & TRANSCONF_BPC_MASK) << 11;
1129	intel_de_write(dev_priv, reg, temp);
1130
1131	intel_de_posting_read(dev_priv, reg);
1132	udelay(100);
1133}
1134
1135static const struct intel_fdi_funcs ilk_funcs = {
1136	.fdi_link_train = ilk_fdi_link_train,
1137};
1138
1139static const struct intel_fdi_funcs gen6_funcs = {
1140	.fdi_link_train = gen6_fdi_link_train,
1141};
1142
1143static const struct intel_fdi_funcs ivb_funcs = {
1144	.fdi_link_train = ivb_manual_fdi_link_train,
1145};
1146
1147void
1148intel_fdi_init_hook(struct drm_i915_private *dev_priv)
1149{
1150	if (IS_IRONLAKE(dev_priv)) {
1151		dev_priv->display.funcs.fdi = &ilk_funcs;
1152	} else if (IS_SANDYBRIDGE(dev_priv)) {
1153		dev_priv->display.funcs.fdi = &gen6_funcs;
1154	} else if (IS_IVYBRIDGE(dev_priv)) {
1155		/* FIXME: detect B0+ stepping and use auto training */
1156		dev_priv->display.funcs.fdi = &ivb_funcs;
1157	}
1158}