Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: MIT
   2/*
   3 * Copyright © 2020 Intel Corporation
   4 */
   5
   6#include <linux/string_helpers.h>
   7
   8#include <drm/drm_fixed.h>
   9
  10#include "i915_reg.h"
  11#include "intel_atomic.h"
  12#include "intel_crtc.h"
  13#include "intel_ddi.h"
  14#include "intel_de.h"
  15#include "intel_dp.h"
  16#include "intel_display_types.h"
  17#include "intel_fdi.h"
  18#include "intel_fdi_regs.h"
  19#include "intel_link_bw.h"
  20
  21struct intel_fdi_funcs {
  22	void (*fdi_link_train)(struct intel_crtc *crtc,
  23			       const struct intel_crtc_state *crtc_state);
  24};
  25
  26static void assert_fdi_tx(struct drm_i915_private *dev_priv,
  27			  enum pipe pipe, bool state)
  28{
  29	struct intel_display *display = &dev_priv->display;
  30	bool cur_state;
  31
  32	if (HAS_DDI(display)) {
  33		/*
  34		 * DDI does not have a specific FDI_TX register.
  35		 *
  36		 * FDI is never fed from EDP transcoder
  37		 * so pipe->transcoder cast is fine here.
  38		 */
  39		enum transcoder cpu_transcoder = (enum transcoder)pipe;
  40		cur_state = intel_de_read(display,
  41					  TRANS_DDI_FUNC_CTL(display, cpu_transcoder)) & TRANS_DDI_FUNC_ENABLE;
  42	} else {
  43		cur_state = intel_de_read(display, FDI_TX_CTL(pipe)) & FDI_TX_ENABLE;
  44	}
  45	INTEL_DISPLAY_STATE_WARN(display, cur_state != state,
  46				 "FDI TX state assertion failure (expected %s, current %s)\n",
  47				 str_on_off(state), str_on_off(cur_state));
  48}
  49
  50void assert_fdi_tx_enabled(struct drm_i915_private *i915, enum pipe pipe)
  51{
  52	assert_fdi_tx(i915, pipe, true);
  53}
  54
  55void assert_fdi_tx_disabled(struct drm_i915_private *i915, enum pipe pipe)
  56{
  57	assert_fdi_tx(i915, pipe, false);
  58}
  59
  60static void assert_fdi_rx(struct drm_i915_private *dev_priv,
  61			  enum pipe pipe, bool state)
  62{
  63	struct intel_display *display = &dev_priv->display;
  64	bool cur_state;
  65
  66	cur_state = intel_de_read(display, FDI_RX_CTL(pipe)) & FDI_RX_ENABLE;
  67	INTEL_DISPLAY_STATE_WARN(display, cur_state != state,
  68				 "FDI RX state assertion failure (expected %s, current %s)\n",
  69				 str_on_off(state), str_on_off(cur_state));
  70}
  71
  72void assert_fdi_rx_enabled(struct drm_i915_private *i915, enum pipe pipe)
  73{
  74	assert_fdi_rx(i915, pipe, true);
  75}
  76
  77void assert_fdi_rx_disabled(struct drm_i915_private *i915, enum pipe pipe)
  78{
  79	assert_fdi_rx(i915, pipe, false);
  80}
  81
  82void assert_fdi_tx_pll_enabled(struct drm_i915_private *i915,
  83			       enum pipe pipe)
  84{
  85	struct intel_display *display = &i915->display;
  86	bool cur_state;
  87
  88	/* ILK FDI PLL is always enabled */
  89	if (IS_IRONLAKE(i915))
  90		return;
  91
  92	/* On Haswell, DDI ports are responsible for the FDI PLL setup */
  93	if (HAS_DDI(display))
  94		return;
  95
  96	cur_state = intel_de_read(display, FDI_TX_CTL(pipe)) & FDI_TX_PLL_ENABLE;
  97	INTEL_DISPLAY_STATE_WARN(display, !cur_state,
  98				 "FDI TX PLL assertion failure, should be active but is disabled\n");
  99}
 100
 101static void assert_fdi_rx_pll(struct drm_i915_private *i915,
 102			      enum pipe pipe, bool state)
 103{
 104	struct intel_display *display = &i915->display;
 105	bool cur_state;
 106
 107	cur_state = intel_de_read(display, FDI_RX_CTL(pipe)) & FDI_RX_PLL_ENABLE;
 108	INTEL_DISPLAY_STATE_WARN(display, cur_state != state,
 109				 "FDI RX PLL assertion failure (expected %s, current %s)\n",
 110				 str_on_off(state), str_on_off(cur_state));
 111}
 112
 113void assert_fdi_rx_pll_enabled(struct drm_i915_private *i915, enum pipe pipe)
 114{
 115	assert_fdi_rx_pll(i915, pipe, true);
 116}
 117
 118void assert_fdi_rx_pll_disabled(struct drm_i915_private *i915, enum pipe pipe)
 119{
 120	assert_fdi_rx_pll(i915, pipe, false);
 121}
 122
 123void intel_fdi_link_train(struct intel_crtc *crtc,
 124			  const struct intel_crtc_state *crtc_state)
 125{
 126	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 127
 128	dev_priv->display.funcs.fdi->fdi_link_train(crtc, crtc_state);
 129}
 130
 131/**
 132 * intel_fdi_add_affected_crtcs - add CRTCs on FDI affected by other modeset CRTCs
 133 * @state: intel atomic state
 134 *
 135 * Add a CRTC using FDI to @state if changing another CRTC's FDI BW usage is
 136 * known to affect the available FDI BW for the former CRTC. In practice this
 137 * means adding CRTC B on IVYBRIDGE if its use of FDI lanes is limited (by
 138 * CRTC C) and CRTC C is getting disabled.
 139 *
 140 * Returns 0 in case of success, or a negative error code otherwise.
 141 */
 142int intel_fdi_add_affected_crtcs(struct intel_atomic_state *state)
 143{
 144	struct intel_display *display = to_intel_display(state);
 145	struct drm_i915_private *i915 = to_i915(state->base.dev);
 146	const struct intel_crtc_state *old_crtc_state;
 147	const struct intel_crtc_state *new_crtc_state;
 148	struct intel_crtc *crtc;
 149
 150	if (!IS_IVYBRIDGE(i915) || INTEL_NUM_PIPES(i915) != 3)
 151		return 0;
 152
 153	crtc = intel_crtc_for_pipe(display, PIPE_C);
 154	new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
 155	if (!new_crtc_state)
 156		return 0;
 157
 158	if (!intel_crtc_needs_modeset(new_crtc_state))
 159		return 0;
 160
 161	old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
 162	if (!old_crtc_state->fdi_lanes)
 163		return 0;
 164
 165	crtc = intel_crtc_for_pipe(display, PIPE_B);
 166	new_crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
 167	if (IS_ERR(new_crtc_state))
 168		return PTR_ERR(new_crtc_state);
 169
 170	old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
 171	if (!old_crtc_state->fdi_lanes)
 172		return 0;
 173
 174	return intel_modeset_pipes_in_mask_early(state,
 175						 "FDI link BW decrease on pipe C",
 176						 BIT(PIPE_B));
 177}
 178
 179/* units of 100MHz */
 180static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
 181{
 182	if (crtc_state->hw.enable && crtc_state->has_pch_encoder)
 183		return crtc_state->fdi_lanes;
 184
 185	return 0;
 186}
 187
 188static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
 189			       struct intel_crtc_state *pipe_config,
 190			       enum pipe *pipe_to_reduce)
 191{
 192	struct intel_display *display = to_intel_display(dev);
 193	struct drm_i915_private *dev_priv = to_i915(dev);
 194	struct drm_atomic_state *state = pipe_config->uapi.state;
 195	struct intel_crtc *other_crtc;
 196	struct intel_crtc_state *other_crtc_state;
 197
 198	*pipe_to_reduce = pipe;
 199
 200	drm_dbg_kms(&dev_priv->drm,
 201		    "checking fdi config on pipe %c, lanes %i\n",
 202		    pipe_name(pipe), pipe_config->fdi_lanes);
 203	if (pipe_config->fdi_lanes > 4) {
 204		drm_dbg_kms(&dev_priv->drm,
 205			    "invalid fdi lane config on pipe %c: %i lanes\n",
 206			    pipe_name(pipe), pipe_config->fdi_lanes);
 207		return -EINVAL;
 208	}
 209
 210	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
 211		if (pipe_config->fdi_lanes > 2) {
 212			drm_dbg_kms(&dev_priv->drm,
 213				    "only 2 lanes on haswell, required: %i lanes\n",
 214				    pipe_config->fdi_lanes);
 215			return -EINVAL;
 216		} else {
 217			return 0;
 218		}
 219	}
 220
 221	if (INTEL_NUM_PIPES(dev_priv) == 2)
 222		return 0;
 223
 224	/* Ivybridge 3 pipe is really complicated */
 225	switch (pipe) {
 226	case PIPE_A:
 227		return 0;
 228	case PIPE_B:
 229		if (pipe_config->fdi_lanes <= 2)
 230			return 0;
 231
 232		other_crtc = intel_crtc_for_pipe(display, PIPE_C);
 233		other_crtc_state =
 234			intel_atomic_get_crtc_state(state, other_crtc);
 235		if (IS_ERR(other_crtc_state))
 236			return PTR_ERR(other_crtc_state);
 237
 238		if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
 239			drm_dbg_kms(&dev_priv->drm,
 240				    "invalid shared fdi lane config on pipe %c: %i lanes\n",
 241				    pipe_name(pipe), pipe_config->fdi_lanes);
 242			return -EINVAL;
 243		}
 244		return 0;
 245	case PIPE_C:
 246		if (pipe_config->fdi_lanes > 2) {
 247			drm_dbg_kms(&dev_priv->drm,
 248				    "only 2 lanes on pipe %c: required %i lanes\n",
 249				    pipe_name(pipe), pipe_config->fdi_lanes);
 250			return -EINVAL;
 251		}
 252
 253		other_crtc = intel_crtc_for_pipe(display, PIPE_B);
 254		other_crtc_state =
 255			intel_atomic_get_crtc_state(state, other_crtc);
 256		if (IS_ERR(other_crtc_state))
 257			return PTR_ERR(other_crtc_state);
 258
 259		if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
 260			drm_dbg_kms(&dev_priv->drm,
 261				    "fdi link B uses too many lanes to enable link C\n");
 262
 263			*pipe_to_reduce = PIPE_B;
 264
 265			return -EINVAL;
 266		}
 267		return 0;
 268	default:
 269		MISSING_CASE(pipe);
 270		return 0;
 271	}
 272}
 273
 274void intel_fdi_pll_freq_update(struct drm_i915_private *i915)
 275{
 276	if (IS_IRONLAKE(i915)) {
 277		u32 fdi_pll_clk =
 278			intel_de_read(i915, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
 279
 280		i915->display.fdi.pll_freq = (fdi_pll_clk + 2) * 10000;
 281	} else if (IS_SANDYBRIDGE(i915) || IS_IVYBRIDGE(i915)) {
 282		i915->display.fdi.pll_freq = 270000;
 283	} else {
 284		return;
 285	}
 286
 287	drm_dbg(&i915->drm, "FDI PLL freq=%d\n", i915->display.fdi.pll_freq);
 288}
 289
 290int intel_fdi_link_freq(struct drm_i915_private *i915,
 291			const struct intel_crtc_state *pipe_config)
 292{
 293	if (HAS_DDI(i915))
 294		return pipe_config->port_clock; /* SPLL */
 295	else
 296		return i915->display.fdi.pll_freq;
 297}
 298
 299/**
 300 * intel_fdi_compute_pipe_bpp - compute pipe bpp limited by max link bpp
 301 * @crtc_state: the crtc state
 302 *
 303 * Compute the pipe bpp limited by the CRTC's maximum link bpp. Encoders can
 304 * call this function during state computation in the simple case where the
 305 * link bpp will always match the pipe bpp. This is the case for all non-DP
 306 * encoders, while DP encoders will use a link bpp lower than pipe bpp in case
 307 * of DSC compression.
 308 *
 309 * Returns %true in case of success, %false if pipe bpp would need to be
 310 * reduced below its valid range.
 311 */
 312bool intel_fdi_compute_pipe_bpp(struct intel_crtc_state *crtc_state)
 313{
 314	int pipe_bpp = min(crtc_state->pipe_bpp,
 315			   fxp_q4_to_int(crtc_state->max_link_bpp_x16));
 316
 317	pipe_bpp = rounddown(pipe_bpp, 2 * 3);
 318
 319	if (pipe_bpp < 6 * 3)
 320		return false;
 321
 322	crtc_state->pipe_bpp = pipe_bpp;
 323
 324	return true;
 325}
 326
 327int ilk_fdi_compute_config(struct intel_crtc *crtc,
 328			   struct intel_crtc_state *pipe_config)
 329{
 330	struct drm_device *dev = crtc->base.dev;
 331	struct drm_i915_private *i915 = to_i915(dev);
 332	const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
 333	int lane, link_bw, fdi_dotclock;
 334
 335	/* FDI is a binary signal running at ~2.7GHz, encoding
 336	 * each output octet as 10 bits. The actual frequency
 337	 * is stored as a divider into a 100MHz clock, and the
 338	 * mode pixel clock is stored in units of 1KHz.
 339	 * Hence the bw of each lane in terms of the mode signal
 340	 * is:
 341	 */
 342	link_bw = intel_fdi_link_freq(i915, pipe_config);
 343
 344	fdi_dotclock = adjusted_mode->crtc_clock;
 345
 346	lane = ilk_get_lanes_required(fdi_dotclock, link_bw,
 347				      pipe_config->pipe_bpp);
 348
 349	pipe_config->fdi_lanes = lane;
 350
 351	intel_link_compute_m_n(fxp_q4_from_int(pipe_config->pipe_bpp),
 352			       lane, fdi_dotclock,
 353			       link_bw,
 354			       intel_dp_bw_fec_overhead(false),
 355			       &pipe_config->fdi_m_n);
 356
 357	return 0;
 358}
 359
 360static int intel_fdi_atomic_check_bw(struct intel_atomic_state *state,
 361				     struct intel_crtc *crtc,
 362				     struct intel_crtc_state *pipe_config,
 363				     struct intel_link_bw_limits *limits)
 364{
 365	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
 366	enum pipe pipe_to_reduce;
 367	int ret;
 368
 369	ret = ilk_check_fdi_lanes(&i915->drm, crtc->pipe, pipe_config,
 370				  &pipe_to_reduce);
 371	if (ret != -EINVAL)
 372		return ret;
 373
 374	ret = intel_link_bw_reduce_bpp(state, limits,
 375				       BIT(pipe_to_reduce),
 376				       "FDI link BW");
 377
 378	return ret ? : -EAGAIN;
 379}
 380
 381/**
 382 * intel_fdi_atomic_check_link - check all modeset FDI link configuration
 383 * @state: intel atomic state
 384 * @limits: link BW limits
 385 *
 386 * Check the link configuration for all modeset FDI outputs. If the
 387 * configuration is invalid @limits will be updated if possible to
 388 * reduce the total BW, after which the configuration for all CRTCs in
 389 * @state must be recomputed with the updated @limits.
 390 *
 391 * Returns:
 392 *   - 0 if the confugration is valid
 393 *   - %-EAGAIN, if the configuration is invalid and @limits got updated
 394 *     with fallback values with which the configuration of all CRTCs
 395 *     in @state must be recomputed
 396 *   - Other negative error, if the configuration is invalid without a
 397 *     fallback possibility, or the check failed for another reason
 398 */
 399int intel_fdi_atomic_check_link(struct intel_atomic_state *state,
 400				struct intel_link_bw_limits *limits)
 401{
 402	struct intel_crtc *crtc;
 403	struct intel_crtc_state *crtc_state;
 404	int i;
 405
 406	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
 407		int ret;
 408
 409		if (!crtc_state->has_pch_encoder ||
 410		    !intel_crtc_needs_modeset(crtc_state) ||
 411		    !crtc_state->hw.enable)
 412			continue;
 413
 414		ret = intel_fdi_atomic_check_bw(state, crtc, crtc_state, limits);
 415		if (ret)
 416			return ret;
 417	}
 418
 419	return 0;
 420}
 421
 422static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
 423{
 424	u32 temp;
 425
 426	temp = intel_de_read(dev_priv, SOUTH_CHICKEN1);
 427	if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
 428		return;
 429
 430	drm_WARN_ON(&dev_priv->drm,
 431		    intel_de_read(dev_priv, FDI_RX_CTL(PIPE_B)) &
 432		    FDI_RX_ENABLE);
 433	drm_WARN_ON(&dev_priv->drm,
 434		    intel_de_read(dev_priv, FDI_RX_CTL(PIPE_C)) &
 435		    FDI_RX_ENABLE);
 436
 437	temp &= ~FDI_BC_BIFURCATION_SELECT;
 438	if (enable)
 439		temp |= FDI_BC_BIFURCATION_SELECT;
 440
 441	drm_dbg_kms(&dev_priv->drm, "%sabling fdi C rx\n",
 442		    enable ? "en" : "dis");
 443	intel_de_write(dev_priv, SOUTH_CHICKEN1, temp);
 444	intel_de_posting_read(dev_priv, SOUTH_CHICKEN1);
 445}
 446
 447static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
 448{
 449	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 450	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 451
 452	switch (crtc->pipe) {
 453	case PIPE_A:
 454		break;
 455	case PIPE_B:
 456		if (crtc_state->fdi_lanes > 2)
 457			cpt_set_fdi_bc_bifurcation(dev_priv, false);
 458		else
 459			cpt_set_fdi_bc_bifurcation(dev_priv, true);
 460
 461		break;
 462	case PIPE_C:
 463		cpt_set_fdi_bc_bifurcation(dev_priv, true);
 464
 465		break;
 466	default:
 467		MISSING_CASE(crtc->pipe);
 468	}
 469}
 470
 471void intel_fdi_normal_train(struct intel_crtc *crtc)
 472{
 473	struct drm_device *dev = crtc->base.dev;
 474	struct drm_i915_private *dev_priv = to_i915(dev);
 475	enum pipe pipe = crtc->pipe;
 476	i915_reg_t reg;
 477	u32 temp;
 478
 479	/* enable normal train */
 480	reg = FDI_TX_CTL(pipe);
 481	temp = intel_de_read(dev_priv, reg);
 482	if (IS_IVYBRIDGE(dev_priv)) {
 483		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
 484		temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
 485	} else {
 486		temp &= ~FDI_LINK_TRAIN_NONE;
 487		temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
 488	}
 489	intel_de_write(dev_priv, reg, temp);
 490
 491	reg = FDI_RX_CTL(pipe);
 492	temp = intel_de_read(dev_priv, reg);
 493	if (HAS_PCH_CPT(dev_priv)) {
 494		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
 495		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
 496	} else {
 497		temp &= ~FDI_LINK_TRAIN_NONE;
 498		temp |= FDI_LINK_TRAIN_NONE;
 499	}
 500	intel_de_write(dev_priv, reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
 501
 502	/* wait one idle pattern time */
 503	intel_de_posting_read(dev_priv, reg);
 504	udelay(1000);
 505
 506	/* IVB wants error correction enabled */
 507	if (IS_IVYBRIDGE(dev_priv))
 508		intel_de_rmw(dev_priv, reg, 0, FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE);
 509}
 510
 511/* The FDI link training functions for ILK/Ibexpeak. */
 512static void ilk_fdi_link_train(struct intel_crtc *crtc,
 513			       const struct intel_crtc_state *crtc_state)
 514{
 515	struct drm_device *dev = crtc->base.dev;
 516	struct drm_i915_private *dev_priv = to_i915(dev);
 517	enum pipe pipe = crtc->pipe;
 518	i915_reg_t reg;
 519	u32 temp, tries;
 520
 521	/*
 522	 * Write the TU size bits before fdi link training, so that error
 523	 * detection works.
 524	 */
 525	intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
 526		       intel_de_read(dev_priv, PIPE_DATA_M1(dev_priv, pipe)) & TU_SIZE_MASK);
 527
 528	/* FDI needs bits from pipe first */
 529	assert_transcoder_enabled(dev_priv, crtc_state->cpu_transcoder);
 530
 531	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
 532	   for train result */
 533	reg = FDI_RX_IMR(pipe);
 534	temp = intel_de_read(dev_priv, reg);
 535	temp &= ~FDI_RX_SYMBOL_LOCK;
 536	temp &= ~FDI_RX_BIT_LOCK;
 537	intel_de_write(dev_priv, reg, temp);
 538	intel_de_read(dev_priv, reg);
 539	udelay(150);
 540
 541	/* enable CPU FDI TX and PCH FDI RX */
 542	reg = FDI_TX_CTL(pipe);
 543	temp = intel_de_read(dev_priv, reg);
 544	temp &= ~FDI_DP_PORT_WIDTH_MASK;
 545	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
 546	temp &= ~FDI_LINK_TRAIN_NONE;
 547	temp |= FDI_LINK_TRAIN_PATTERN_1;
 548	intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
 549
 550	reg = FDI_RX_CTL(pipe);
 551	temp = intel_de_read(dev_priv, reg);
 552	temp &= ~FDI_LINK_TRAIN_NONE;
 553	temp |= FDI_LINK_TRAIN_PATTERN_1;
 554	intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
 555
 556	intel_de_posting_read(dev_priv, reg);
 557	udelay(150);
 558
 559	/* Ironlake workaround, enable clock pointer after FDI enable*/
 560	intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
 561		       FDI_RX_PHASE_SYNC_POINTER_OVR);
 562	intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
 563		       FDI_RX_PHASE_SYNC_POINTER_OVR | FDI_RX_PHASE_SYNC_POINTER_EN);
 564
 565	reg = FDI_RX_IIR(pipe);
 566	for (tries = 0; tries < 5; tries++) {
 567		temp = intel_de_read(dev_priv, reg);
 568		drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
 569
 570		if ((temp & FDI_RX_BIT_LOCK)) {
 571			drm_dbg_kms(&dev_priv->drm, "FDI train 1 done.\n");
 572			intel_de_write(dev_priv, reg, temp | FDI_RX_BIT_LOCK);
 573			break;
 574		}
 575	}
 576	if (tries == 5)
 577		drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
 578
 579	/* Train 2 */
 580	intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
 581		     FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_2);
 582	intel_de_rmw(dev_priv, FDI_RX_CTL(pipe),
 583		     FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_2);
 584	intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe));
 585	udelay(150);
 586
 587	reg = FDI_RX_IIR(pipe);
 588	for (tries = 0; tries < 5; tries++) {
 589		temp = intel_de_read(dev_priv, reg);
 590		drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
 591
 592		if (temp & FDI_RX_SYMBOL_LOCK) {
 593			intel_de_write(dev_priv, reg,
 594				       temp | FDI_RX_SYMBOL_LOCK);
 595			drm_dbg_kms(&dev_priv->drm, "FDI train 2 done.\n");
 596			break;
 597		}
 598	}
 599	if (tries == 5)
 600		drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
 601
 602	drm_dbg_kms(&dev_priv->drm, "FDI train done\n");
 603
 604}
 605
 606static const int snb_b_fdi_train_param[] = {
 607	FDI_LINK_TRAIN_400MV_0DB_SNB_B,
 608	FDI_LINK_TRAIN_400MV_6DB_SNB_B,
 609	FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
 610	FDI_LINK_TRAIN_800MV_0DB_SNB_B,
 611};
 612
 613/* The FDI link training functions for SNB/Cougarpoint. */
 614static void gen6_fdi_link_train(struct intel_crtc *crtc,
 615				const struct intel_crtc_state *crtc_state)
 616{
 617	struct drm_device *dev = crtc->base.dev;
 618	struct drm_i915_private *dev_priv = to_i915(dev);
 619	enum pipe pipe = crtc->pipe;
 620	i915_reg_t reg;
 621	u32 temp, i, retry;
 622
 623	/*
 624	 * Write the TU size bits before fdi link training, so that error
 625	 * detection works.
 626	 */
 627	intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
 628		       intel_de_read(dev_priv, PIPE_DATA_M1(dev_priv, pipe)) & TU_SIZE_MASK);
 629
 630	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
 631	   for train result */
 632	reg = FDI_RX_IMR(pipe);
 633	temp = intel_de_read(dev_priv, reg);
 634	temp &= ~FDI_RX_SYMBOL_LOCK;
 635	temp &= ~FDI_RX_BIT_LOCK;
 636	intel_de_write(dev_priv, reg, temp);
 637
 638	intel_de_posting_read(dev_priv, reg);
 639	udelay(150);
 640
 641	/* enable CPU FDI TX and PCH FDI RX */
 642	reg = FDI_TX_CTL(pipe);
 643	temp = intel_de_read(dev_priv, reg);
 644	temp &= ~FDI_DP_PORT_WIDTH_MASK;
 645	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
 646	temp &= ~FDI_LINK_TRAIN_NONE;
 647	temp |= FDI_LINK_TRAIN_PATTERN_1;
 648	temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
 649	/* SNB-B */
 650	temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
 651	intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
 652
 653	intel_de_write(dev_priv, FDI_RX_MISC(pipe),
 654		       FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
 655
 656	reg = FDI_RX_CTL(pipe);
 657	temp = intel_de_read(dev_priv, reg);
 658	if (HAS_PCH_CPT(dev_priv)) {
 659		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
 660		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
 661	} else {
 662		temp &= ~FDI_LINK_TRAIN_NONE;
 663		temp |= FDI_LINK_TRAIN_PATTERN_1;
 664	}
 665	intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
 666
 667	intel_de_posting_read(dev_priv, reg);
 668	udelay(150);
 669
 670	for (i = 0; i < 4; i++) {
 671		intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
 672			     FDI_LINK_TRAIN_VOL_EMP_MASK, snb_b_fdi_train_param[i]);
 673		intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
 674		udelay(500);
 675
 676		for (retry = 0; retry < 5; retry++) {
 677			reg = FDI_RX_IIR(pipe);
 678			temp = intel_de_read(dev_priv, reg);
 679			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
 680			if (temp & FDI_RX_BIT_LOCK) {
 681				intel_de_write(dev_priv, reg,
 682					       temp | FDI_RX_BIT_LOCK);
 683				drm_dbg_kms(&dev_priv->drm,
 684					    "FDI train 1 done.\n");
 685				break;
 686			}
 687			udelay(50);
 688		}
 689		if (retry < 5)
 690			break;
 691	}
 692	if (i == 4)
 693		drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
 694
 695	/* Train 2 */
 696	reg = FDI_TX_CTL(pipe);
 697	temp = intel_de_read(dev_priv, reg);
 698	temp &= ~FDI_LINK_TRAIN_NONE;
 699	temp |= FDI_LINK_TRAIN_PATTERN_2;
 700	if (IS_SANDYBRIDGE(dev_priv)) {
 701		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
 702		/* SNB-B */
 703		temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
 704	}
 705	intel_de_write(dev_priv, reg, temp);
 706
 707	reg = FDI_RX_CTL(pipe);
 708	temp = intel_de_read(dev_priv, reg);
 709	if (HAS_PCH_CPT(dev_priv)) {
 710		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
 711		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
 712	} else {
 713		temp &= ~FDI_LINK_TRAIN_NONE;
 714		temp |= FDI_LINK_TRAIN_PATTERN_2;
 715	}
 716	intel_de_write(dev_priv, reg, temp);
 717
 718	intel_de_posting_read(dev_priv, reg);
 719	udelay(150);
 720
 721	for (i = 0; i < 4; i++) {
 722		intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
 723			     FDI_LINK_TRAIN_VOL_EMP_MASK, snb_b_fdi_train_param[i]);
 724		intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
 725		udelay(500);
 726
 727		for (retry = 0; retry < 5; retry++) {
 728			reg = FDI_RX_IIR(pipe);
 729			temp = intel_de_read(dev_priv, reg);
 730			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
 731			if (temp & FDI_RX_SYMBOL_LOCK) {
 732				intel_de_write(dev_priv, reg,
 733					       temp | FDI_RX_SYMBOL_LOCK);
 734				drm_dbg_kms(&dev_priv->drm,
 735					    "FDI train 2 done.\n");
 736				break;
 737			}
 738			udelay(50);
 739		}
 740		if (retry < 5)
 741			break;
 742	}
 743	if (i == 4)
 744		drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
 745
 746	drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
 747}
 748
 749/* Manual link training for Ivy Bridge A0 parts */
 750static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
 751				      const struct intel_crtc_state *crtc_state)
 752{
 753	struct drm_device *dev = crtc->base.dev;
 754	struct drm_i915_private *dev_priv = to_i915(dev);
 755	enum pipe pipe = crtc->pipe;
 756	i915_reg_t reg;
 757	u32 temp, i, j;
 758
 759	ivb_update_fdi_bc_bifurcation(crtc_state);
 760
 761	/*
 762	 * Write the TU size bits before fdi link training, so that error
 763	 * detection works.
 764	 */
 765	intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
 766		       intel_de_read(dev_priv, PIPE_DATA_M1(dev_priv, pipe)) & TU_SIZE_MASK);
 767
 768	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
 769	   for train result */
 770	reg = FDI_RX_IMR(pipe);
 771	temp = intel_de_read(dev_priv, reg);
 772	temp &= ~FDI_RX_SYMBOL_LOCK;
 773	temp &= ~FDI_RX_BIT_LOCK;
 774	intel_de_write(dev_priv, reg, temp);
 775
 776	intel_de_posting_read(dev_priv, reg);
 777	udelay(150);
 778
 779	drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR before link train 0x%x\n",
 780		    intel_de_read(dev_priv, FDI_RX_IIR(pipe)));
 781
 782	/* Try each vswing and preemphasis setting twice before moving on */
 783	for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
 784		/* disable first in case we need to retry */
 785		reg = FDI_TX_CTL(pipe);
 786		temp = intel_de_read(dev_priv, reg);
 787		temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
 788		temp &= ~FDI_TX_ENABLE;
 789		intel_de_write(dev_priv, reg, temp);
 790
 791		reg = FDI_RX_CTL(pipe);
 792		temp = intel_de_read(dev_priv, reg);
 793		temp &= ~FDI_LINK_TRAIN_AUTO;
 794		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
 795		temp &= ~FDI_RX_ENABLE;
 796		intel_de_write(dev_priv, reg, temp);
 797
 798		/* enable CPU FDI TX and PCH FDI RX */
 799		reg = FDI_TX_CTL(pipe);
 800		temp = intel_de_read(dev_priv, reg);
 801		temp &= ~FDI_DP_PORT_WIDTH_MASK;
 802		temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
 803		temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
 804		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
 805		temp |= snb_b_fdi_train_param[j/2];
 806		temp |= FDI_COMPOSITE_SYNC;
 807		intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
 808
 809		intel_de_write(dev_priv, FDI_RX_MISC(pipe),
 810			       FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
 811
 812		reg = FDI_RX_CTL(pipe);
 813		temp = intel_de_read(dev_priv, reg);
 814		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
 815		temp |= FDI_COMPOSITE_SYNC;
 816		intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
 817
 818		intel_de_posting_read(dev_priv, reg);
 819		udelay(1); /* should be 0.5us */
 820
 821		for (i = 0; i < 4; i++) {
 822			reg = FDI_RX_IIR(pipe);
 823			temp = intel_de_read(dev_priv, reg);
 824			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
 825
 826			if (temp & FDI_RX_BIT_LOCK ||
 827			    (intel_de_read(dev_priv, reg) & FDI_RX_BIT_LOCK)) {
 828				intel_de_write(dev_priv, reg,
 829					       temp | FDI_RX_BIT_LOCK);
 830				drm_dbg_kms(&dev_priv->drm,
 831					    "FDI train 1 done, level %i.\n",
 832					    i);
 833				break;
 834			}
 835			udelay(1); /* should be 0.5us */
 836		}
 837		if (i == 4) {
 838			drm_dbg_kms(&dev_priv->drm,
 839				    "FDI train 1 fail on vswing %d\n", j / 2);
 840			continue;
 841		}
 842
 843		/* Train 2 */
 844		intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
 845			     FDI_LINK_TRAIN_NONE_IVB,
 846			     FDI_LINK_TRAIN_PATTERN_2_IVB);
 847		intel_de_rmw(dev_priv, FDI_RX_CTL(pipe),
 848			     FDI_LINK_TRAIN_PATTERN_MASK_CPT,
 849			     FDI_LINK_TRAIN_PATTERN_2_CPT);
 850		intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe));
 851		udelay(2); /* should be 1.5us */
 852
 853		for (i = 0; i < 4; i++) {
 854			reg = FDI_RX_IIR(pipe);
 855			temp = intel_de_read(dev_priv, reg);
 856			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
 857
 858			if (temp & FDI_RX_SYMBOL_LOCK ||
 859			    (intel_de_read(dev_priv, reg) & FDI_RX_SYMBOL_LOCK)) {
 860				intel_de_write(dev_priv, reg,
 861					       temp | FDI_RX_SYMBOL_LOCK);
 862				drm_dbg_kms(&dev_priv->drm,
 863					    "FDI train 2 done, level %i.\n",
 864					    i);
 865				goto train_done;
 866			}
 867			udelay(2); /* should be 1.5us */
 868		}
 869		if (i == 4)
 870			drm_dbg_kms(&dev_priv->drm,
 871				    "FDI train 2 fail on vswing %d\n", j / 2);
 872	}
 873
 874train_done:
 875	drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
 876}
 877
 878/* Starting with Haswell, different DDI ports can work in FDI mode for
 879 * connection to the PCH-located connectors. For this, it is necessary to train
 880 * both the DDI port and PCH receiver for the desired DDI buffer settings.
 881 *
 882 * The recommended port to work in FDI mode is DDI E, which we use here. Also,
 883 * please note that when FDI mode is active on DDI E, it shares 2 lines with
 884 * DDI A (which is used for eDP)
 885 */
 886void hsw_fdi_link_train(struct intel_encoder *encoder,
 887			const struct intel_crtc_state *crtc_state)
 888{
 889	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 890	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 891	u32 temp, i, rx_ctl_val;
 892	int n_entries;
 893
 894	encoder->get_buf_trans(encoder, crtc_state, &n_entries);
 895
 896	hsw_prepare_dp_ddi_buffers(encoder, crtc_state);
 897
 898	/* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the
 899	 * mode set "sequence for CRT port" document:
 900	 * - TP1 to TP2 time with the default value
 901	 * - FDI delay to 90h
 902	 *
 903	 * WaFDIAutoLinkSetTimingOverrride:hsw
 904	 */
 905	intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A),
 906		       FDI_RX_PWRDN_LANE1_VAL(2) |
 907		       FDI_RX_PWRDN_LANE0_VAL(2) |
 908		       FDI_RX_TP1_TO_TP2_48 |
 909		       FDI_RX_FDI_DELAY_90);
 910
 911	/* Enable the PCH Receiver FDI PLL */
 912	rx_ctl_val = dev_priv->display.fdi.rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
 913		     FDI_RX_PLL_ENABLE |
 914		     FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
 915	intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
 916	intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
 917	udelay(220);
 918
 919	/* Switch from Rawclk to PCDclk */
 920	rx_ctl_val |= FDI_PCDCLK;
 921	intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
 922
 923	/* Configure Port Clock Select */
 924	drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll->info->id != DPLL_ID_SPLL);
 925	intel_ddi_enable_clock(encoder, crtc_state);
 926
 927	/* Start the training iterating through available voltages and emphasis,
 928	 * testing each value twice. */
 929	for (i = 0; i < n_entries * 2; i++) {
 930		/* Configure DP_TP_CTL with auto-training */
 931		intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
 932			       DP_TP_CTL_FDI_AUTOTRAIN |
 933			       DP_TP_CTL_ENHANCED_FRAME_ENABLE |
 934			       DP_TP_CTL_LINK_TRAIN_PAT1 |
 935			       DP_TP_CTL_ENABLE);
 936
 937		/* Configure and enable DDI_BUF_CTL for DDI E with next voltage.
 938		 * DDI E does not support port reversal, the functionality is
 939		 * achieved on the PCH side in FDI_RX_CTL, so no need to set the
 940		 * port reversal bit */
 941		intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E),
 942			       DDI_BUF_CTL_ENABLE |
 943			       ((crtc_state->fdi_lanes - 1) << 1) |
 944			       DDI_BUF_TRANS_SELECT(i / 2));
 945		intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E));
 946
 947		udelay(600);
 948
 949		/* Program PCH FDI Receiver TU */
 950		intel_de_write(dev_priv, FDI_RX_TUSIZE1(PIPE_A), TU_SIZE(64));
 951
 952		/* Enable PCH FDI Receiver with auto-training */
 953		rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO;
 954		intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
 955		intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
 956
 957		/* Wait for FDI receiver lane calibration */
 958		udelay(30);
 959
 960		/* Unset FDI_RX_MISC pwrdn lanes */
 961		intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A),
 962			     FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK, 0);
 963		intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
 964
 965		/* Wait for FDI auto training time */
 966		udelay(5);
 967
 968		temp = intel_de_read(dev_priv, DP_TP_STATUS(PORT_E));
 969		if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) {
 970			drm_dbg_kms(&dev_priv->drm,
 971				    "FDI link training done on step %d\n", i);
 972			break;
 973		}
 974
 975		/*
 976		 * Leave things enabled even if we failed to train FDI.
 977		 * Results in less fireworks from the state checker.
 978		 */
 979		if (i == n_entries * 2 - 1) {
 980			drm_err(&dev_priv->drm, "FDI link training failed!\n");
 981			break;
 982		}
 983
 984		rx_ctl_val &= ~FDI_RX_ENABLE;
 985		intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
 986		intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
 987
 988		intel_de_rmw(dev_priv, DDI_BUF_CTL(PORT_E), DDI_BUF_CTL_ENABLE, 0);
 989		intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E));
 990
 991		/* Disable DP_TP_CTL and FDI_RX_CTL and retry */
 992		intel_de_rmw(dev_priv, DP_TP_CTL(PORT_E), DP_TP_CTL_ENABLE, 0);
 993		intel_de_posting_read(dev_priv, DP_TP_CTL(PORT_E));
 994
 995		intel_wait_ddi_buf_idle(dev_priv, PORT_E);
 996
 997		/* Reset FDI_RX_MISC pwrdn lanes */
 998		intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A),
 999			     FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK,
1000			     FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2));
1001		intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
1002	}
1003
1004	/* Enable normal pixel sending for FDI */
1005	intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
1006		       DP_TP_CTL_FDI_AUTOTRAIN |
1007		       DP_TP_CTL_LINK_TRAIN_NORMAL |
1008		       DP_TP_CTL_ENHANCED_FRAME_ENABLE |
1009		       DP_TP_CTL_ENABLE);
1010}
1011
1012void hsw_fdi_disable(struct intel_encoder *encoder)
1013{
1014	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1015
1016	/*
1017	 * Bspec lists this as both step 13 (before DDI_BUF_CTL disable)
1018	 * and step 18 (after clearing PORT_CLK_SEL). Based on a BUN,
1019	 * step 13 is the correct place for it. Step 18 is where it was
1020	 * originally before the BUN.
1021	 */
1022	intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_RX_ENABLE, 0);
1023	intel_de_rmw(dev_priv, DDI_BUF_CTL(PORT_E), DDI_BUF_CTL_ENABLE, 0);
1024	intel_wait_ddi_buf_idle(dev_priv, PORT_E);
1025	intel_ddi_disable_clock(encoder);
1026	intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A),
1027		     FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK,
1028		     FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2));
1029	intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_PCDCLK, 0);
1030	intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_RX_PLL_ENABLE, 0);
1031}
1032
1033void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
1034{
1035	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1036	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1037	enum pipe pipe = crtc->pipe;
1038	i915_reg_t reg;
1039	u32 temp;
1040
1041	/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
1042	reg = FDI_RX_CTL(pipe);
1043	temp = intel_de_read(dev_priv, reg);
1044	temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
1045	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
1046	temp |= (intel_de_read(dev_priv, TRANSCONF(dev_priv, pipe)) & TRANSCONF_BPC_MASK) << 11;
1047	intel_de_write(dev_priv, reg, temp | FDI_RX_PLL_ENABLE);
1048
1049	intel_de_posting_read(dev_priv, reg);
1050	udelay(200);
1051
1052	/* Switch from Rawclk to PCDclk */
1053	intel_de_rmw(dev_priv, reg, 0, FDI_PCDCLK);
1054	intel_de_posting_read(dev_priv, reg);
1055	udelay(200);
1056
1057	/* Enable CPU FDI TX PLL, always on for Ironlake */
1058	reg = FDI_TX_CTL(pipe);
1059	temp = intel_de_read(dev_priv, reg);
1060	if ((temp & FDI_TX_PLL_ENABLE) == 0) {
1061		intel_de_write(dev_priv, reg, temp | FDI_TX_PLL_ENABLE);
1062
1063		intel_de_posting_read(dev_priv, reg);
1064		udelay(100);
1065	}
1066}
1067
1068void ilk_fdi_pll_disable(struct intel_crtc *crtc)
1069{
1070	struct drm_device *dev = crtc->base.dev;
1071	struct drm_i915_private *dev_priv = to_i915(dev);
1072	enum pipe pipe = crtc->pipe;
1073
1074	/* Switch from PCDclk to Rawclk */
1075	intel_de_rmw(dev_priv, FDI_RX_CTL(pipe), FDI_PCDCLK, 0);
1076
1077	/* Disable CPU FDI TX PLL */
1078	intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), FDI_TX_PLL_ENABLE, 0);
1079	intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
1080	udelay(100);
1081
1082	/* Wait for the clocks to turn off. */
1083	intel_de_rmw(dev_priv, FDI_RX_CTL(pipe), FDI_RX_PLL_ENABLE, 0);
1084	intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe));
1085	udelay(100);
1086}
1087
1088void ilk_fdi_disable(struct intel_crtc *crtc)
1089{
1090	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1091	enum pipe pipe = crtc->pipe;
1092	i915_reg_t reg;
1093	u32 temp;
1094
1095	/* disable CPU FDI tx and PCH FDI rx */
1096	intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), FDI_TX_ENABLE, 0);
1097	intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
1098
1099	reg = FDI_RX_CTL(pipe);
1100	temp = intel_de_read(dev_priv, reg);
1101	temp &= ~(0x7 << 16);
1102	temp |= (intel_de_read(dev_priv, TRANSCONF(dev_priv, pipe)) & TRANSCONF_BPC_MASK) << 11;
1103	intel_de_write(dev_priv, reg, temp & ~FDI_RX_ENABLE);
1104
1105	intel_de_posting_read(dev_priv, reg);
1106	udelay(100);
1107
1108	/* Ironlake workaround, disable clock pointer after downing FDI */
1109	if (HAS_PCH_IBX(dev_priv))
1110		intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
1111			       FDI_RX_PHASE_SYNC_POINTER_OVR);
1112
1113	/* still set train pattern 1 */
1114	intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
1115		     FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_1);
1116
1117	reg = FDI_RX_CTL(pipe);
1118	temp = intel_de_read(dev_priv, reg);
1119	if (HAS_PCH_CPT(dev_priv)) {
1120		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
1121		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
1122	} else {
1123		temp &= ~FDI_LINK_TRAIN_NONE;
1124		temp |= FDI_LINK_TRAIN_PATTERN_1;
1125	}
1126	/* BPC in FDI rx is consistent with that in TRANSCONF */
1127	temp &= ~(0x07 << 16);
1128	temp |= (intel_de_read(dev_priv, TRANSCONF(dev_priv, pipe)) & TRANSCONF_BPC_MASK) << 11;
1129	intel_de_write(dev_priv, reg, temp);
1130
1131	intel_de_posting_read(dev_priv, reg);
1132	udelay(100);
1133}
1134
1135static const struct intel_fdi_funcs ilk_funcs = {
1136	.fdi_link_train = ilk_fdi_link_train,
1137};
1138
1139static const struct intel_fdi_funcs gen6_funcs = {
1140	.fdi_link_train = gen6_fdi_link_train,
1141};
1142
1143static const struct intel_fdi_funcs ivb_funcs = {
1144	.fdi_link_train = ivb_manual_fdi_link_train,
1145};
1146
1147void
1148intel_fdi_init_hook(struct drm_i915_private *dev_priv)
1149{
1150	if (IS_IRONLAKE(dev_priv)) {
1151		dev_priv->display.funcs.fdi = &ilk_funcs;
1152	} else if (IS_SANDYBRIDGE(dev_priv)) {
1153		dev_priv->display.funcs.fdi = &gen6_funcs;
1154	} else if (IS_IVYBRIDGE(dev_priv)) {
1155		/* FIXME: detect B0+ stepping and use auto training */
1156		dev_priv->display.funcs.fdi = &ivb_funcs;
1157	}
1158}
v6.9.4
   1// SPDX-License-Identifier: MIT
   2/*
   3 * Copyright © 2020 Intel Corporation
   4 */
   5
   6#include <linux/string_helpers.h>
   7
 
 
   8#include "i915_reg.h"
   9#include "intel_atomic.h"
  10#include "intel_crtc.h"
  11#include "intel_ddi.h"
  12#include "intel_de.h"
  13#include "intel_dp.h"
  14#include "intel_display_types.h"
  15#include "intel_fdi.h"
  16#include "intel_fdi_regs.h"
  17#include "intel_link_bw.h"
  18
  19struct intel_fdi_funcs {
  20	void (*fdi_link_train)(struct intel_crtc *crtc,
  21			       const struct intel_crtc_state *crtc_state);
  22};
  23
  24static void assert_fdi_tx(struct drm_i915_private *dev_priv,
  25			  enum pipe pipe, bool state)
  26{
 
  27	bool cur_state;
  28
  29	if (HAS_DDI(dev_priv)) {
  30		/*
  31		 * DDI does not have a specific FDI_TX register.
  32		 *
  33		 * FDI is never fed from EDP transcoder
  34		 * so pipe->transcoder cast is fine here.
  35		 */
  36		enum transcoder cpu_transcoder = (enum transcoder)pipe;
  37		cur_state = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder)) & TRANS_DDI_FUNC_ENABLE;
 
  38	} else {
  39		cur_state = intel_de_read(dev_priv, FDI_TX_CTL(pipe)) & FDI_TX_ENABLE;
  40	}
  41	I915_STATE_WARN(dev_priv, cur_state != state,
  42			"FDI TX state assertion failure (expected %s, current %s)\n",
  43			str_on_off(state), str_on_off(cur_state));
  44}
  45
  46void assert_fdi_tx_enabled(struct drm_i915_private *i915, enum pipe pipe)
  47{
  48	assert_fdi_tx(i915, pipe, true);
  49}
  50
  51void assert_fdi_tx_disabled(struct drm_i915_private *i915, enum pipe pipe)
  52{
  53	assert_fdi_tx(i915, pipe, false);
  54}
  55
  56static void assert_fdi_rx(struct drm_i915_private *dev_priv,
  57			  enum pipe pipe, bool state)
  58{
 
  59	bool cur_state;
  60
  61	cur_state = intel_de_read(dev_priv, FDI_RX_CTL(pipe)) & FDI_RX_ENABLE;
  62	I915_STATE_WARN(dev_priv, cur_state != state,
  63			"FDI RX state assertion failure (expected %s, current %s)\n",
  64			str_on_off(state), str_on_off(cur_state));
  65}
  66
  67void assert_fdi_rx_enabled(struct drm_i915_private *i915, enum pipe pipe)
  68{
  69	assert_fdi_rx(i915, pipe, true);
  70}
  71
  72void assert_fdi_rx_disabled(struct drm_i915_private *i915, enum pipe pipe)
  73{
  74	assert_fdi_rx(i915, pipe, false);
  75}
  76
  77void assert_fdi_tx_pll_enabled(struct drm_i915_private *i915,
  78			       enum pipe pipe)
  79{
 
  80	bool cur_state;
  81
  82	/* ILK FDI PLL is always enabled */
  83	if (IS_IRONLAKE(i915))
  84		return;
  85
  86	/* On Haswell, DDI ports are responsible for the FDI PLL setup */
  87	if (HAS_DDI(i915))
  88		return;
  89
  90	cur_state = intel_de_read(i915, FDI_TX_CTL(pipe)) & FDI_TX_PLL_ENABLE;
  91	I915_STATE_WARN(i915, !cur_state,
  92			"FDI TX PLL assertion failure, should be active but is disabled\n");
  93}
  94
  95static void assert_fdi_rx_pll(struct drm_i915_private *i915,
  96			      enum pipe pipe, bool state)
  97{
 
  98	bool cur_state;
  99
 100	cur_state = intel_de_read(i915, FDI_RX_CTL(pipe)) & FDI_RX_PLL_ENABLE;
 101	I915_STATE_WARN(i915, cur_state != state,
 102			"FDI RX PLL assertion failure (expected %s, current %s)\n",
 103			str_on_off(state), str_on_off(cur_state));
 104}
 105
 106void assert_fdi_rx_pll_enabled(struct drm_i915_private *i915, enum pipe pipe)
 107{
 108	assert_fdi_rx_pll(i915, pipe, true);
 109}
 110
 111void assert_fdi_rx_pll_disabled(struct drm_i915_private *i915, enum pipe pipe)
 112{
 113	assert_fdi_rx_pll(i915, pipe, false);
 114}
 115
 116void intel_fdi_link_train(struct intel_crtc *crtc,
 117			  const struct intel_crtc_state *crtc_state)
 118{
 119	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 120
 121	dev_priv->display.funcs.fdi->fdi_link_train(crtc, crtc_state);
 122}
 123
 124/**
 125 * intel_fdi_add_affected_crtcs - add CRTCs on FDI affected by other modeset CRTCs
 126 * @state: intel atomic state
 127 *
 128 * Add a CRTC using FDI to @state if changing another CRTC's FDI BW usage is
 129 * known to affect the available FDI BW for the former CRTC. In practice this
 130 * means adding CRTC B on IVYBRIDGE if its use of FDI lanes is limited (by
 131 * CRTC C) and CRTC C is getting disabled.
 132 *
 133 * Returns 0 in case of success, or a negative error code otherwise.
 134 */
 135int intel_fdi_add_affected_crtcs(struct intel_atomic_state *state)
 136{
 
 137	struct drm_i915_private *i915 = to_i915(state->base.dev);
 138	const struct intel_crtc_state *old_crtc_state;
 139	const struct intel_crtc_state *new_crtc_state;
 140	struct intel_crtc *crtc;
 141
 142	if (!IS_IVYBRIDGE(i915) || INTEL_NUM_PIPES(i915) != 3)
 143		return 0;
 144
 145	crtc = intel_crtc_for_pipe(i915, PIPE_C);
 146	new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
 147	if (!new_crtc_state)
 148		return 0;
 149
 150	if (!intel_crtc_needs_modeset(new_crtc_state))
 151		return 0;
 152
 153	old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
 154	if (!old_crtc_state->fdi_lanes)
 155		return 0;
 156
 157	crtc = intel_crtc_for_pipe(i915, PIPE_B);
 158	new_crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
 159	if (IS_ERR(new_crtc_state))
 160		return PTR_ERR(new_crtc_state);
 161
 162	old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
 163	if (!old_crtc_state->fdi_lanes)
 164		return 0;
 165
 166	return intel_modeset_pipes_in_mask_early(state,
 167						 "FDI link BW decrease on pipe C",
 168						 BIT(PIPE_B));
 169}
 170
 171/* units of 100MHz */
 172static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
 173{
 174	if (crtc_state->hw.enable && crtc_state->has_pch_encoder)
 175		return crtc_state->fdi_lanes;
 176
 177	return 0;
 178}
 179
 180static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
 181			       struct intel_crtc_state *pipe_config,
 182			       enum pipe *pipe_to_reduce)
 183{
 
 184	struct drm_i915_private *dev_priv = to_i915(dev);
 185	struct drm_atomic_state *state = pipe_config->uapi.state;
 186	struct intel_crtc *other_crtc;
 187	struct intel_crtc_state *other_crtc_state;
 188
 189	*pipe_to_reduce = pipe;
 190
 191	drm_dbg_kms(&dev_priv->drm,
 192		    "checking fdi config on pipe %c, lanes %i\n",
 193		    pipe_name(pipe), pipe_config->fdi_lanes);
 194	if (pipe_config->fdi_lanes > 4) {
 195		drm_dbg_kms(&dev_priv->drm,
 196			    "invalid fdi lane config on pipe %c: %i lanes\n",
 197			    pipe_name(pipe), pipe_config->fdi_lanes);
 198		return -EINVAL;
 199	}
 200
 201	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
 202		if (pipe_config->fdi_lanes > 2) {
 203			drm_dbg_kms(&dev_priv->drm,
 204				    "only 2 lanes on haswell, required: %i lanes\n",
 205				    pipe_config->fdi_lanes);
 206			return -EINVAL;
 207		} else {
 208			return 0;
 209		}
 210	}
 211
 212	if (INTEL_NUM_PIPES(dev_priv) == 2)
 213		return 0;
 214
 215	/* Ivybridge 3 pipe is really complicated */
 216	switch (pipe) {
 217	case PIPE_A:
 218		return 0;
 219	case PIPE_B:
 220		if (pipe_config->fdi_lanes <= 2)
 221			return 0;
 222
 223		other_crtc = intel_crtc_for_pipe(dev_priv, PIPE_C);
 224		other_crtc_state =
 225			intel_atomic_get_crtc_state(state, other_crtc);
 226		if (IS_ERR(other_crtc_state))
 227			return PTR_ERR(other_crtc_state);
 228
 229		if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
 230			drm_dbg_kms(&dev_priv->drm,
 231				    "invalid shared fdi lane config on pipe %c: %i lanes\n",
 232				    pipe_name(pipe), pipe_config->fdi_lanes);
 233			return -EINVAL;
 234		}
 235		return 0;
 236	case PIPE_C:
 237		if (pipe_config->fdi_lanes > 2) {
 238			drm_dbg_kms(&dev_priv->drm,
 239				    "only 2 lanes on pipe %c: required %i lanes\n",
 240				    pipe_name(pipe), pipe_config->fdi_lanes);
 241			return -EINVAL;
 242		}
 243
 244		other_crtc = intel_crtc_for_pipe(dev_priv, PIPE_B);
 245		other_crtc_state =
 246			intel_atomic_get_crtc_state(state, other_crtc);
 247		if (IS_ERR(other_crtc_state))
 248			return PTR_ERR(other_crtc_state);
 249
 250		if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
 251			drm_dbg_kms(&dev_priv->drm,
 252				    "fdi link B uses too many lanes to enable link C\n");
 253
 254			*pipe_to_reduce = PIPE_B;
 255
 256			return -EINVAL;
 257		}
 258		return 0;
 259	default:
 260		MISSING_CASE(pipe);
 261		return 0;
 262	}
 263}
 264
 265void intel_fdi_pll_freq_update(struct drm_i915_private *i915)
 266{
 267	if (IS_IRONLAKE(i915)) {
 268		u32 fdi_pll_clk =
 269			intel_de_read(i915, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
 270
 271		i915->display.fdi.pll_freq = (fdi_pll_clk + 2) * 10000;
 272	} else if (IS_SANDYBRIDGE(i915) || IS_IVYBRIDGE(i915)) {
 273		i915->display.fdi.pll_freq = 270000;
 274	} else {
 275		return;
 276	}
 277
 278	drm_dbg(&i915->drm, "FDI PLL freq=%d\n", i915->display.fdi.pll_freq);
 279}
 280
 281int intel_fdi_link_freq(struct drm_i915_private *i915,
 282			const struct intel_crtc_state *pipe_config)
 283{
 284	if (HAS_DDI(i915))
 285		return pipe_config->port_clock; /* SPLL */
 286	else
 287		return i915->display.fdi.pll_freq;
 288}
 289
 290/**
 291 * intel_fdi_compute_pipe_bpp - compute pipe bpp limited by max link bpp
 292 * @crtc_state: the crtc state
 293 *
 294 * Compute the pipe bpp limited by the CRTC's maximum link bpp. Encoders can
 295 * call this function during state computation in the simple case where the
 296 * link bpp will always match the pipe bpp. This is the case for all non-DP
 297 * encoders, while DP encoders will use a link bpp lower than pipe bpp in case
 298 * of DSC compression.
 299 *
 300 * Returns %true in case of success, %false if pipe bpp would need to be
 301 * reduced below its valid range.
 302 */
 303bool intel_fdi_compute_pipe_bpp(struct intel_crtc_state *crtc_state)
 304{
 305	int pipe_bpp = min(crtc_state->pipe_bpp,
 306			   to_bpp_int(crtc_state->max_link_bpp_x16));
 307
 308	pipe_bpp = rounddown(pipe_bpp, 2 * 3);
 309
 310	if (pipe_bpp < 6 * 3)
 311		return false;
 312
 313	crtc_state->pipe_bpp = pipe_bpp;
 314
 315	return true;
 316}
 317
 318int ilk_fdi_compute_config(struct intel_crtc *crtc,
 319			   struct intel_crtc_state *pipe_config)
 320{
 321	struct drm_device *dev = crtc->base.dev;
 322	struct drm_i915_private *i915 = to_i915(dev);
 323	const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
 324	int lane, link_bw, fdi_dotclock;
 325
 326	/* FDI is a binary signal running at ~2.7GHz, encoding
 327	 * each output octet as 10 bits. The actual frequency
 328	 * is stored as a divider into a 100MHz clock, and the
 329	 * mode pixel clock is stored in units of 1KHz.
 330	 * Hence the bw of each lane in terms of the mode signal
 331	 * is:
 332	 */
 333	link_bw = intel_fdi_link_freq(i915, pipe_config);
 334
 335	fdi_dotclock = adjusted_mode->crtc_clock;
 336
 337	lane = ilk_get_lanes_required(fdi_dotclock, link_bw,
 338				      pipe_config->pipe_bpp);
 339
 340	pipe_config->fdi_lanes = lane;
 341
 342	intel_link_compute_m_n(to_bpp_x16(pipe_config->pipe_bpp),
 343			       lane, fdi_dotclock,
 344			       link_bw,
 345			       intel_dp_bw_fec_overhead(false),
 346			       &pipe_config->fdi_m_n);
 347
 348	return 0;
 349}
 350
 351static int intel_fdi_atomic_check_bw(struct intel_atomic_state *state,
 352				     struct intel_crtc *crtc,
 353				     struct intel_crtc_state *pipe_config,
 354				     struct intel_link_bw_limits *limits)
 355{
 356	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
 357	enum pipe pipe_to_reduce;
 358	int ret;
 359
 360	ret = ilk_check_fdi_lanes(&i915->drm, crtc->pipe, pipe_config,
 361				  &pipe_to_reduce);
 362	if (ret != -EINVAL)
 363		return ret;
 364
 365	ret = intel_link_bw_reduce_bpp(state, limits,
 366				       BIT(pipe_to_reduce),
 367				       "FDI link BW");
 368
 369	return ret ? : -EAGAIN;
 370}
 371
 372/**
 373 * intel_fdi_atomic_check_link - check all modeset FDI link configuration
 374 * @state: intel atomic state
 375 * @limits: link BW limits
 376 *
 377 * Check the link configuration for all modeset FDI outputs. If the
 378 * configuration is invalid @limits will be updated if possible to
 379 * reduce the total BW, after which the configuration for all CRTCs in
 380 * @state must be recomputed with the updated @limits.
 381 *
 382 * Returns:
 383 *   - 0 if the confugration is valid
 384 *   - %-EAGAIN, if the configuration is invalid and @limits got updated
 385 *     with fallback values with which the configuration of all CRTCs
 386 *     in @state must be recomputed
 387 *   - Other negative error, if the configuration is invalid without a
 388 *     fallback possibility, or the check failed for another reason
 389 */
 390int intel_fdi_atomic_check_link(struct intel_atomic_state *state,
 391				struct intel_link_bw_limits *limits)
 392{
 393	struct intel_crtc *crtc;
 394	struct intel_crtc_state *crtc_state;
 395	int i;
 396
 397	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
 398		int ret;
 399
 400		if (!crtc_state->has_pch_encoder ||
 401		    !intel_crtc_needs_modeset(crtc_state) ||
 402		    !crtc_state->hw.enable)
 403			continue;
 404
 405		ret = intel_fdi_atomic_check_bw(state, crtc, crtc_state, limits);
 406		if (ret)
 407			return ret;
 408	}
 409
 410	return 0;
 411}
 412
 413static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
 414{
 415	u32 temp;
 416
 417	temp = intel_de_read(dev_priv, SOUTH_CHICKEN1);
 418	if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
 419		return;
 420
 421	drm_WARN_ON(&dev_priv->drm,
 422		    intel_de_read(dev_priv, FDI_RX_CTL(PIPE_B)) &
 423		    FDI_RX_ENABLE);
 424	drm_WARN_ON(&dev_priv->drm,
 425		    intel_de_read(dev_priv, FDI_RX_CTL(PIPE_C)) &
 426		    FDI_RX_ENABLE);
 427
 428	temp &= ~FDI_BC_BIFURCATION_SELECT;
 429	if (enable)
 430		temp |= FDI_BC_BIFURCATION_SELECT;
 431
 432	drm_dbg_kms(&dev_priv->drm, "%sabling fdi C rx\n",
 433		    enable ? "en" : "dis");
 434	intel_de_write(dev_priv, SOUTH_CHICKEN1, temp);
 435	intel_de_posting_read(dev_priv, SOUTH_CHICKEN1);
 436}
 437
 438static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
 439{
 440	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 441	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 442
 443	switch (crtc->pipe) {
 444	case PIPE_A:
 445		break;
 446	case PIPE_B:
 447		if (crtc_state->fdi_lanes > 2)
 448			cpt_set_fdi_bc_bifurcation(dev_priv, false);
 449		else
 450			cpt_set_fdi_bc_bifurcation(dev_priv, true);
 451
 452		break;
 453	case PIPE_C:
 454		cpt_set_fdi_bc_bifurcation(dev_priv, true);
 455
 456		break;
 457	default:
 458		MISSING_CASE(crtc->pipe);
 459	}
 460}
 461
 462void intel_fdi_normal_train(struct intel_crtc *crtc)
 463{
 464	struct drm_device *dev = crtc->base.dev;
 465	struct drm_i915_private *dev_priv = to_i915(dev);
 466	enum pipe pipe = crtc->pipe;
 467	i915_reg_t reg;
 468	u32 temp;
 469
 470	/* enable normal train */
 471	reg = FDI_TX_CTL(pipe);
 472	temp = intel_de_read(dev_priv, reg);
 473	if (IS_IVYBRIDGE(dev_priv)) {
 474		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
 475		temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
 476	} else {
 477		temp &= ~FDI_LINK_TRAIN_NONE;
 478		temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
 479	}
 480	intel_de_write(dev_priv, reg, temp);
 481
 482	reg = FDI_RX_CTL(pipe);
 483	temp = intel_de_read(dev_priv, reg);
 484	if (HAS_PCH_CPT(dev_priv)) {
 485		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
 486		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
 487	} else {
 488		temp &= ~FDI_LINK_TRAIN_NONE;
 489		temp |= FDI_LINK_TRAIN_NONE;
 490	}
 491	intel_de_write(dev_priv, reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
 492
 493	/* wait one idle pattern time */
 494	intel_de_posting_read(dev_priv, reg);
 495	udelay(1000);
 496
 497	/* IVB wants error correction enabled */
 498	if (IS_IVYBRIDGE(dev_priv))
 499		intel_de_rmw(dev_priv, reg, 0, FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE);
 500}
 501
 502/* The FDI link training functions for ILK/Ibexpeak. */
 503static void ilk_fdi_link_train(struct intel_crtc *crtc,
 504			       const struct intel_crtc_state *crtc_state)
 505{
 506	struct drm_device *dev = crtc->base.dev;
 507	struct drm_i915_private *dev_priv = to_i915(dev);
 508	enum pipe pipe = crtc->pipe;
 509	i915_reg_t reg;
 510	u32 temp, tries;
 511
 512	/*
 513	 * Write the TU size bits before fdi link training, so that error
 514	 * detection works.
 515	 */
 516	intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
 517		       intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
 518
 519	/* FDI needs bits from pipe first */
 520	assert_transcoder_enabled(dev_priv, crtc_state->cpu_transcoder);
 521
 522	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
 523	   for train result */
 524	reg = FDI_RX_IMR(pipe);
 525	temp = intel_de_read(dev_priv, reg);
 526	temp &= ~FDI_RX_SYMBOL_LOCK;
 527	temp &= ~FDI_RX_BIT_LOCK;
 528	intel_de_write(dev_priv, reg, temp);
 529	intel_de_read(dev_priv, reg);
 530	udelay(150);
 531
 532	/* enable CPU FDI TX and PCH FDI RX */
 533	reg = FDI_TX_CTL(pipe);
 534	temp = intel_de_read(dev_priv, reg);
 535	temp &= ~FDI_DP_PORT_WIDTH_MASK;
 536	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
 537	temp &= ~FDI_LINK_TRAIN_NONE;
 538	temp |= FDI_LINK_TRAIN_PATTERN_1;
 539	intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
 540
 541	reg = FDI_RX_CTL(pipe);
 542	temp = intel_de_read(dev_priv, reg);
 543	temp &= ~FDI_LINK_TRAIN_NONE;
 544	temp |= FDI_LINK_TRAIN_PATTERN_1;
 545	intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
 546
 547	intel_de_posting_read(dev_priv, reg);
 548	udelay(150);
 549
 550	/* Ironlake workaround, enable clock pointer after FDI enable*/
 551	intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
 552		       FDI_RX_PHASE_SYNC_POINTER_OVR);
 553	intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
 554		       FDI_RX_PHASE_SYNC_POINTER_OVR | FDI_RX_PHASE_SYNC_POINTER_EN);
 555
 556	reg = FDI_RX_IIR(pipe);
 557	for (tries = 0; tries < 5; tries++) {
 558		temp = intel_de_read(dev_priv, reg);
 559		drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
 560
 561		if ((temp & FDI_RX_BIT_LOCK)) {
 562			drm_dbg_kms(&dev_priv->drm, "FDI train 1 done.\n");
 563			intel_de_write(dev_priv, reg, temp | FDI_RX_BIT_LOCK);
 564			break;
 565		}
 566	}
 567	if (tries == 5)
 568		drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
 569
 570	/* Train 2 */
 571	intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
 572		     FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_2);
 573	intel_de_rmw(dev_priv, FDI_RX_CTL(pipe),
 574		     FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_2);
 575	intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe));
 576	udelay(150);
 577
 578	reg = FDI_RX_IIR(pipe);
 579	for (tries = 0; tries < 5; tries++) {
 580		temp = intel_de_read(dev_priv, reg);
 581		drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
 582
 583		if (temp & FDI_RX_SYMBOL_LOCK) {
 584			intel_de_write(dev_priv, reg,
 585				       temp | FDI_RX_SYMBOL_LOCK);
 586			drm_dbg_kms(&dev_priv->drm, "FDI train 2 done.\n");
 587			break;
 588		}
 589	}
 590	if (tries == 5)
 591		drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
 592
 593	drm_dbg_kms(&dev_priv->drm, "FDI train done\n");
 594
 595}
 596
 597static const int snb_b_fdi_train_param[] = {
 598	FDI_LINK_TRAIN_400MV_0DB_SNB_B,
 599	FDI_LINK_TRAIN_400MV_6DB_SNB_B,
 600	FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
 601	FDI_LINK_TRAIN_800MV_0DB_SNB_B,
 602};
 603
 604/* The FDI link training functions for SNB/Cougarpoint. */
 605static void gen6_fdi_link_train(struct intel_crtc *crtc,
 606				const struct intel_crtc_state *crtc_state)
 607{
 608	struct drm_device *dev = crtc->base.dev;
 609	struct drm_i915_private *dev_priv = to_i915(dev);
 610	enum pipe pipe = crtc->pipe;
 611	i915_reg_t reg;
 612	u32 temp, i, retry;
 613
 614	/*
 615	 * Write the TU size bits before fdi link training, so that error
 616	 * detection works.
 617	 */
 618	intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
 619		       intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
 620
 621	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
 622	   for train result */
 623	reg = FDI_RX_IMR(pipe);
 624	temp = intel_de_read(dev_priv, reg);
 625	temp &= ~FDI_RX_SYMBOL_LOCK;
 626	temp &= ~FDI_RX_BIT_LOCK;
 627	intel_de_write(dev_priv, reg, temp);
 628
 629	intel_de_posting_read(dev_priv, reg);
 630	udelay(150);
 631
 632	/* enable CPU FDI TX and PCH FDI RX */
 633	reg = FDI_TX_CTL(pipe);
 634	temp = intel_de_read(dev_priv, reg);
 635	temp &= ~FDI_DP_PORT_WIDTH_MASK;
 636	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
 637	temp &= ~FDI_LINK_TRAIN_NONE;
 638	temp |= FDI_LINK_TRAIN_PATTERN_1;
 639	temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
 640	/* SNB-B */
 641	temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
 642	intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
 643
 644	intel_de_write(dev_priv, FDI_RX_MISC(pipe),
 645		       FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
 646
 647	reg = FDI_RX_CTL(pipe);
 648	temp = intel_de_read(dev_priv, reg);
 649	if (HAS_PCH_CPT(dev_priv)) {
 650		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
 651		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
 652	} else {
 653		temp &= ~FDI_LINK_TRAIN_NONE;
 654		temp |= FDI_LINK_TRAIN_PATTERN_1;
 655	}
 656	intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
 657
 658	intel_de_posting_read(dev_priv, reg);
 659	udelay(150);
 660
 661	for (i = 0; i < 4; i++) {
 662		intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
 663			     FDI_LINK_TRAIN_VOL_EMP_MASK, snb_b_fdi_train_param[i]);
 664		intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
 665		udelay(500);
 666
 667		for (retry = 0; retry < 5; retry++) {
 668			reg = FDI_RX_IIR(pipe);
 669			temp = intel_de_read(dev_priv, reg);
 670			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
 671			if (temp & FDI_RX_BIT_LOCK) {
 672				intel_de_write(dev_priv, reg,
 673					       temp | FDI_RX_BIT_LOCK);
 674				drm_dbg_kms(&dev_priv->drm,
 675					    "FDI train 1 done.\n");
 676				break;
 677			}
 678			udelay(50);
 679		}
 680		if (retry < 5)
 681			break;
 682	}
 683	if (i == 4)
 684		drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
 685
 686	/* Train 2 */
 687	reg = FDI_TX_CTL(pipe);
 688	temp = intel_de_read(dev_priv, reg);
 689	temp &= ~FDI_LINK_TRAIN_NONE;
 690	temp |= FDI_LINK_TRAIN_PATTERN_2;
 691	if (IS_SANDYBRIDGE(dev_priv)) {
 692		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
 693		/* SNB-B */
 694		temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
 695	}
 696	intel_de_write(dev_priv, reg, temp);
 697
 698	reg = FDI_RX_CTL(pipe);
 699	temp = intel_de_read(dev_priv, reg);
 700	if (HAS_PCH_CPT(dev_priv)) {
 701		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
 702		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
 703	} else {
 704		temp &= ~FDI_LINK_TRAIN_NONE;
 705		temp |= FDI_LINK_TRAIN_PATTERN_2;
 706	}
 707	intel_de_write(dev_priv, reg, temp);
 708
 709	intel_de_posting_read(dev_priv, reg);
 710	udelay(150);
 711
 712	for (i = 0; i < 4; i++) {
 713		intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
 714			     FDI_LINK_TRAIN_VOL_EMP_MASK, snb_b_fdi_train_param[i]);
 715		intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
 716		udelay(500);
 717
 718		for (retry = 0; retry < 5; retry++) {
 719			reg = FDI_RX_IIR(pipe);
 720			temp = intel_de_read(dev_priv, reg);
 721			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
 722			if (temp & FDI_RX_SYMBOL_LOCK) {
 723				intel_de_write(dev_priv, reg,
 724					       temp | FDI_RX_SYMBOL_LOCK);
 725				drm_dbg_kms(&dev_priv->drm,
 726					    "FDI train 2 done.\n");
 727				break;
 728			}
 729			udelay(50);
 730		}
 731		if (retry < 5)
 732			break;
 733	}
 734	if (i == 4)
 735		drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
 736
 737	drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
 738}
 739
 740/* Manual link training for Ivy Bridge A0 parts */
 741static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
 742				      const struct intel_crtc_state *crtc_state)
 743{
 744	struct drm_device *dev = crtc->base.dev;
 745	struct drm_i915_private *dev_priv = to_i915(dev);
 746	enum pipe pipe = crtc->pipe;
 747	i915_reg_t reg;
 748	u32 temp, i, j;
 749
 750	ivb_update_fdi_bc_bifurcation(crtc_state);
 751
 752	/*
 753	 * Write the TU size bits before fdi link training, so that error
 754	 * detection works.
 755	 */
 756	intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
 757		       intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
 758
 759	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
 760	   for train result */
 761	reg = FDI_RX_IMR(pipe);
 762	temp = intel_de_read(dev_priv, reg);
 763	temp &= ~FDI_RX_SYMBOL_LOCK;
 764	temp &= ~FDI_RX_BIT_LOCK;
 765	intel_de_write(dev_priv, reg, temp);
 766
 767	intel_de_posting_read(dev_priv, reg);
 768	udelay(150);
 769
 770	drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR before link train 0x%x\n",
 771		    intel_de_read(dev_priv, FDI_RX_IIR(pipe)));
 772
 773	/* Try each vswing and preemphasis setting twice before moving on */
 774	for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
 775		/* disable first in case we need to retry */
 776		reg = FDI_TX_CTL(pipe);
 777		temp = intel_de_read(dev_priv, reg);
 778		temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
 779		temp &= ~FDI_TX_ENABLE;
 780		intel_de_write(dev_priv, reg, temp);
 781
 782		reg = FDI_RX_CTL(pipe);
 783		temp = intel_de_read(dev_priv, reg);
 784		temp &= ~FDI_LINK_TRAIN_AUTO;
 785		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
 786		temp &= ~FDI_RX_ENABLE;
 787		intel_de_write(dev_priv, reg, temp);
 788
 789		/* enable CPU FDI TX and PCH FDI RX */
 790		reg = FDI_TX_CTL(pipe);
 791		temp = intel_de_read(dev_priv, reg);
 792		temp &= ~FDI_DP_PORT_WIDTH_MASK;
 793		temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
 794		temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
 795		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
 796		temp |= snb_b_fdi_train_param[j/2];
 797		temp |= FDI_COMPOSITE_SYNC;
 798		intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
 799
 800		intel_de_write(dev_priv, FDI_RX_MISC(pipe),
 801			       FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
 802
 803		reg = FDI_RX_CTL(pipe);
 804		temp = intel_de_read(dev_priv, reg);
 805		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
 806		temp |= FDI_COMPOSITE_SYNC;
 807		intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
 808
 809		intel_de_posting_read(dev_priv, reg);
 810		udelay(1); /* should be 0.5us */
 811
 812		for (i = 0; i < 4; i++) {
 813			reg = FDI_RX_IIR(pipe);
 814			temp = intel_de_read(dev_priv, reg);
 815			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
 816
 817			if (temp & FDI_RX_BIT_LOCK ||
 818			    (intel_de_read(dev_priv, reg) & FDI_RX_BIT_LOCK)) {
 819				intel_de_write(dev_priv, reg,
 820					       temp | FDI_RX_BIT_LOCK);
 821				drm_dbg_kms(&dev_priv->drm,
 822					    "FDI train 1 done, level %i.\n",
 823					    i);
 824				break;
 825			}
 826			udelay(1); /* should be 0.5us */
 827		}
 828		if (i == 4) {
 829			drm_dbg_kms(&dev_priv->drm,
 830				    "FDI train 1 fail on vswing %d\n", j / 2);
 831			continue;
 832		}
 833
 834		/* Train 2 */
 835		intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
 836			     FDI_LINK_TRAIN_NONE_IVB,
 837			     FDI_LINK_TRAIN_PATTERN_2_IVB);
 838		intel_de_rmw(dev_priv, FDI_RX_CTL(pipe),
 839			     FDI_LINK_TRAIN_PATTERN_MASK_CPT,
 840			     FDI_LINK_TRAIN_PATTERN_2_CPT);
 841		intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe));
 842		udelay(2); /* should be 1.5us */
 843
 844		for (i = 0; i < 4; i++) {
 845			reg = FDI_RX_IIR(pipe);
 846			temp = intel_de_read(dev_priv, reg);
 847			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
 848
 849			if (temp & FDI_RX_SYMBOL_LOCK ||
 850			    (intel_de_read(dev_priv, reg) & FDI_RX_SYMBOL_LOCK)) {
 851				intel_de_write(dev_priv, reg,
 852					       temp | FDI_RX_SYMBOL_LOCK);
 853				drm_dbg_kms(&dev_priv->drm,
 854					    "FDI train 2 done, level %i.\n",
 855					    i);
 856				goto train_done;
 857			}
 858			udelay(2); /* should be 1.5us */
 859		}
 860		if (i == 4)
 861			drm_dbg_kms(&dev_priv->drm,
 862				    "FDI train 2 fail on vswing %d\n", j / 2);
 863	}
 864
 865train_done:
 866	drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
 867}
 868
 869/* Starting with Haswell, different DDI ports can work in FDI mode for
 870 * connection to the PCH-located connectors. For this, it is necessary to train
 871 * both the DDI port and PCH receiver for the desired DDI buffer settings.
 872 *
 873 * The recommended port to work in FDI mode is DDI E, which we use here. Also,
 874 * please note that when FDI mode is active on DDI E, it shares 2 lines with
 875 * DDI A (which is used for eDP)
 876 */
 877void hsw_fdi_link_train(struct intel_encoder *encoder,
 878			const struct intel_crtc_state *crtc_state)
 879{
 880	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 881	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 882	u32 temp, i, rx_ctl_val;
 883	int n_entries;
 884
 885	encoder->get_buf_trans(encoder, crtc_state, &n_entries);
 886
 887	hsw_prepare_dp_ddi_buffers(encoder, crtc_state);
 888
 889	/* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the
 890	 * mode set "sequence for CRT port" document:
 891	 * - TP1 to TP2 time with the default value
 892	 * - FDI delay to 90h
 893	 *
 894	 * WaFDIAutoLinkSetTimingOverrride:hsw
 895	 */
 896	intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A),
 897		       FDI_RX_PWRDN_LANE1_VAL(2) |
 898		       FDI_RX_PWRDN_LANE0_VAL(2) |
 899		       FDI_RX_TP1_TO_TP2_48 |
 900		       FDI_RX_FDI_DELAY_90);
 901
 902	/* Enable the PCH Receiver FDI PLL */
 903	rx_ctl_val = dev_priv->display.fdi.rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
 904		     FDI_RX_PLL_ENABLE |
 905		     FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
 906	intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
 907	intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
 908	udelay(220);
 909
 910	/* Switch from Rawclk to PCDclk */
 911	rx_ctl_val |= FDI_PCDCLK;
 912	intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
 913
 914	/* Configure Port Clock Select */
 915	drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll->info->id != DPLL_ID_SPLL);
 916	intel_ddi_enable_clock(encoder, crtc_state);
 917
 918	/* Start the training iterating through available voltages and emphasis,
 919	 * testing each value twice. */
 920	for (i = 0; i < n_entries * 2; i++) {
 921		/* Configure DP_TP_CTL with auto-training */
 922		intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
 923			       DP_TP_CTL_FDI_AUTOTRAIN |
 924			       DP_TP_CTL_ENHANCED_FRAME_ENABLE |
 925			       DP_TP_CTL_LINK_TRAIN_PAT1 |
 926			       DP_TP_CTL_ENABLE);
 927
 928		/* Configure and enable DDI_BUF_CTL for DDI E with next voltage.
 929		 * DDI E does not support port reversal, the functionality is
 930		 * achieved on the PCH side in FDI_RX_CTL, so no need to set the
 931		 * port reversal bit */
 932		intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E),
 933			       DDI_BUF_CTL_ENABLE |
 934			       ((crtc_state->fdi_lanes - 1) << 1) |
 935			       DDI_BUF_TRANS_SELECT(i / 2));
 936		intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E));
 937
 938		udelay(600);
 939
 940		/* Program PCH FDI Receiver TU */
 941		intel_de_write(dev_priv, FDI_RX_TUSIZE1(PIPE_A), TU_SIZE(64));
 942
 943		/* Enable PCH FDI Receiver with auto-training */
 944		rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO;
 945		intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
 946		intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
 947
 948		/* Wait for FDI receiver lane calibration */
 949		udelay(30);
 950
 951		/* Unset FDI_RX_MISC pwrdn lanes */
 952		intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A),
 953			     FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK, 0);
 954		intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
 955
 956		/* Wait for FDI auto training time */
 957		udelay(5);
 958
 959		temp = intel_de_read(dev_priv, DP_TP_STATUS(PORT_E));
 960		if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) {
 961			drm_dbg_kms(&dev_priv->drm,
 962				    "FDI link training done on step %d\n", i);
 963			break;
 964		}
 965
 966		/*
 967		 * Leave things enabled even if we failed to train FDI.
 968		 * Results in less fireworks from the state checker.
 969		 */
 970		if (i == n_entries * 2 - 1) {
 971			drm_err(&dev_priv->drm, "FDI link training failed!\n");
 972			break;
 973		}
 974
 975		rx_ctl_val &= ~FDI_RX_ENABLE;
 976		intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
 977		intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
 978
 979		intel_de_rmw(dev_priv, DDI_BUF_CTL(PORT_E), DDI_BUF_CTL_ENABLE, 0);
 980		intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E));
 981
 982		/* Disable DP_TP_CTL and FDI_RX_CTL and retry */
 983		intel_de_rmw(dev_priv, DP_TP_CTL(PORT_E), DP_TP_CTL_ENABLE, 0);
 984		intel_de_posting_read(dev_priv, DP_TP_CTL(PORT_E));
 985
 986		intel_wait_ddi_buf_idle(dev_priv, PORT_E);
 987
 988		/* Reset FDI_RX_MISC pwrdn lanes */
 989		intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A),
 990			     FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK,
 991			     FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2));
 992		intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
 993	}
 994
 995	/* Enable normal pixel sending for FDI */
 996	intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
 997		       DP_TP_CTL_FDI_AUTOTRAIN |
 998		       DP_TP_CTL_LINK_TRAIN_NORMAL |
 999		       DP_TP_CTL_ENHANCED_FRAME_ENABLE |
1000		       DP_TP_CTL_ENABLE);
1001}
1002
1003void hsw_fdi_disable(struct intel_encoder *encoder)
1004{
1005	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1006
1007	/*
1008	 * Bspec lists this as both step 13 (before DDI_BUF_CTL disable)
1009	 * and step 18 (after clearing PORT_CLK_SEL). Based on a BUN,
1010	 * step 13 is the correct place for it. Step 18 is where it was
1011	 * originally before the BUN.
1012	 */
1013	intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_RX_ENABLE, 0);
1014	intel_de_rmw(dev_priv, DDI_BUF_CTL(PORT_E), DDI_BUF_CTL_ENABLE, 0);
1015	intel_wait_ddi_buf_idle(dev_priv, PORT_E);
1016	intel_ddi_disable_clock(encoder);
1017	intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A),
1018		     FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK,
1019		     FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2));
1020	intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_PCDCLK, 0);
1021	intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_RX_PLL_ENABLE, 0);
1022}
1023
1024void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
1025{
1026	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1027	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1028	enum pipe pipe = crtc->pipe;
1029	i915_reg_t reg;
1030	u32 temp;
1031
1032	/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
1033	reg = FDI_RX_CTL(pipe);
1034	temp = intel_de_read(dev_priv, reg);
1035	temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
1036	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
1037	temp |= (intel_de_read(dev_priv, TRANSCONF(pipe)) & TRANSCONF_BPC_MASK) << 11;
1038	intel_de_write(dev_priv, reg, temp | FDI_RX_PLL_ENABLE);
1039
1040	intel_de_posting_read(dev_priv, reg);
1041	udelay(200);
1042
1043	/* Switch from Rawclk to PCDclk */
1044	intel_de_rmw(dev_priv, reg, 0, FDI_PCDCLK);
1045	intel_de_posting_read(dev_priv, reg);
1046	udelay(200);
1047
1048	/* Enable CPU FDI TX PLL, always on for Ironlake */
1049	reg = FDI_TX_CTL(pipe);
1050	temp = intel_de_read(dev_priv, reg);
1051	if ((temp & FDI_TX_PLL_ENABLE) == 0) {
1052		intel_de_write(dev_priv, reg, temp | FDI_TX_PLL_ENABLE);
1053
1054		intel_de_posting_read(dev_priv, reg);
1055		udelay(100);
1056	}
1057}
1058
1059void ilk_fdi_pll_disable(struct intel_crtc *crtc)
1060{
1061	struct drm_device *dev = crtc->base.dev;
1062	struct drm_i915_private *dev_priv = to_i915(dev);
1063	enum pipe pipe = crtc->pipe;
1064
1065	/* Switch from PCDclk to Rawclk */
1066	intel_de_rmw(dev_priv, FDI_RX_CTL(pipe), FDI_PCDCLK, 0);
1067
1068	/* Disable CPU FDI TX PLL */
1069	intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), FDI_TX_PLL_ENABLE, 0);
1070	intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
1071	udelay(100);
1072
1073	/* Wait for the clocks to turn off. */
1074	intel_de_rmw(dev_priv, FDI_RX_CTL(pipe), FDI_RX_PLL_ENABLE, 0);
1075	intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe));
1076	udelay(100);
1077}
1078
1079void ilk_fdi_disable(struct intel_crtc *crtc)
1080{
1081	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1082	enum pipe pipe = crtc->pipe;
1083	i915_reg_t reg;
1084	u32 temp;
1085
1086	/* disable CPU FDI tx and PCH FDI rx */
1087	intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), FDI_TX_ENABLE, 0);
1088	intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
1089
1090	reg = FDI_RX_CTL(pipe);
1091	temp = intel_de_read(dev_priv, reg);
1092	temp &= ~(0x7 << 16);
1093	temp |= (intel_de_read(dev_priv, TRANSCONF(pipe)) & TRANSCONF_BPC_MASK) << 11;
1094	intel_de_write(dev_priv, reg, temp & ~FDI_RX_ENABLE);
1095
1096	intel_de_posting_read(dev_priv, reg);
1097	udelay(100);
1098
1099	/* Ironlake workaround, disable clock pointer after downing FDI */
1100	if (HAS_PCH_IBX(dev_priv))
1101		intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
1102			       FDI_RX_PHASE_SYNC_POINTER_OVR);
1103
1104	/* still set train pattern 1 */
1105	intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
1106		     FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_1);
1107
1108	reg = FDI_RX_CTL(pipe);
1109	temp = intel_de_read(dev_priv, reg);
1110	if (HAS_PCH_CPT(dev_priv)) {
1111		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
1112		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
1113	} else {
1114		temp &= ~FDI_LINK_TRAIN_NONE;
1115		temp |= FDI_LINK_TRAIN_PATTERN_1;
1116	}
1117	/* BPC in FDI rx is consistent with that in TRANSCONF */
1118	temp &= ~(0x07 << 16);
1119	temp |= (intel_de_read(dev_priv, TRANSCONF(pipe)) & TRANSCONF_BPC_MASK) << 11;
1120	intel_de_write(dev_priv, reg, temp);
1121
1122	intel_de_posting_read(dev_priv, reg);
1123	udelay(100);
1124}
1125
1126static const struct intel_fdi_funcs ilk_funcs = {
1127	.fdi_link_train = ilk_fdi_link_train,
1128};
1129
1130static const struct intel_fdi_funcs gen6_funcs = {
1131	.fdi_link_train = gen6_fdi_link_train,
1132};
1133
1134static const struct intel_fdi_funcs ivb_funcs = {
1135	.fdi_link_train = ivb_manual_fdi_link_train,
1136};
1137
1138void
1139intel_fdi_init_hook(struct drm_i915_private *dev_priv)
1140{
1141	if (IS_IRONLAKE(dev_priv)) {
1142		dev_priv->display.funcs.fdi = &ilk_funcs;
1143	} else if (IS_SANDYBRIDGE(dev_priv)) {
1144		dev_priv->display.funcs.fdi = &gen6_funcs;
1145	} else if (IS_IVYBRIDGE(dev_priv)) {
1146		/* FIXME: detect B0+ stepping and use auto training */
1147		dev_priv->display.funcs.fdi = &ivb_funcs;
1148	}
1149}