Linux Audio

Check our new training course

Loading...
v6.13.7
   1/*
   2 * Copyright © 2014-2016 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  21 * DEALINGS IN THE SOFTWARE.
  22 */
  23
  24#include "bxt_dpio_phy_regs.h"
  25#include "i915_reg.h"
  26#include "intel_ddi.h"
  27#include "intel_ddi_buf_trans.h"
  28#include "intel_de.h"
  29#include "intel_display_power_well.h"
  30#include "intel_display_types.h"
  31#include "intel_dp.h"
  32#include "intel_dpio_phy.h"
  33#include "vlv_dpio_phy_regs.h"
  34#include "vlv_sideband.h"
  35
  36/**
  37 * DOC: DPIO
  38 *
  39 * VLV, CHV and BXT have slightly peculiar display PHYs for driving DP/HDMI
  40 * ports. DPIO is the name given to such a display PHY. These PHYs
  41 * don't follow the standard programming model using direct MMIO
  42 * registers, and instead their registers must be accessed trough IOSF
  43 * sideband. VLV has one such PHY for driving ports B and C, and CHV
  44 * adds another PHY for driving port D. Each PHY responds to specific
  45 * IOSF-SB port.
  46 *
  47 * Each display PHY is made up of one or two channels. Each channel
  48 * houses a common lane part which contains the PLL and other common
  49 * logic. CH0 common lane also contains the IOSF-SB logic for the
  50 * Common Register Interface (CRI) ie. the DPIO registers. CRI clock
  51 * must be running when any DPIO registers are accessed.
  52 *
  53 * In addition to having their own registers, the PHYs are also
  54 * controlled through some dedicated signals from the display
  55 * controller. These include PLL reference clock enable, PLL enable,
  56 * and CRI clock selection, for example.
  57 *
  58 * Eeach channel also has two splines (also called data lanes), and
  59 * each spline is made up of one Physical Access Coding Sub-Layer
  60 * (PCS) block and two TX lanes. So each channel has two PCS blocks
  61 * and four TX lanes. The TX lanes are used as DP lanes or TMDS
  62 * data/clock pairs depending on the output type.
  63 *
  64 * Additionally the PHY also contains an AUX lane with AUX blocks
  65 * for each channel. This is used for DP AUX communication, but
  66 * this fact isn't really relevant for the driver since AUX is
  67 * controlled from the display controller side. No DPIO registers
  68 * need to be accessed during AUX communication,
  69 *
  70 * Generally on VLV/CHV the common lane corresponds to the pipe and
  71 * the spline (PCS/TX) corresponds to the port.
  72 *
  73 * For dual channel PHY (VLV/CHV):
  74 *
  75 *  pipe A == CMN/PLL/REF CH0
  76 *
  77 *  pipe B == CMN/PLL/REF CH1
  78 *
  79 *  port B == PCS/TX CH0
  80 *
  81 *  port C == PCS/TX CH1
  82 *
  83 * This is especially important when we cross the streams
  84 * ie. drive port B with pipe B, or port C with pipe A.
  85 *
  86 * For single channel PHY (CHV):
  87 *
  88 *  pipe C == CMN/PLL/REF CH0
  89 *
  90 *  port D == PCS/TX CH0
  91 *
  92 * On BXT the entire PHY channel corresponds to the port. That means
  93 * the PLL is also now associated with the port rather than the pipe,
  94 * and so the clock needs to be routed to the appropriate transcoder.
  95 * Port A PLL is directly connected to transcoder EDP and port B/C
  96 * PLLs can be routed to any transcoder A/B/C.
  97 *
  98 * Note: DDI0 is digital port B, DD1 is digital port C, and DDI2 is
  99 * digital port D (CHV) or port A (BXT). ::
 100 *
 101 *
 102 *     Dual channel PHY (VLV/CHV/BXT)
 103 *     ---------------------------------
 104 *     |      CH0      |      CH1      |
 105 *     |  CMN/PLL/REF  |  CMN/PLL/REF  |
 106 *     |---------------|---------------| Display PHY
 107 *     | PCS01 | PCS23 | PCS01 | PCS23 |
 108 *     |-------|-------|-------|-------|
 109 *     |TX0|TX1|TX2|TX3|TX0|TX1|TX2|TX3|
 110 *     ---------------------------------
 111 *     |     DDI0      |     DDI1      | DP/HDMI ports
 112 *     ---------------------------------
 113 *
 114 *     Single channel PHY (CHV/BXT)
 115 *     -----------------
 116 *     |      CH0      |
 117 *     |  CMN/PLL/REF  |
 118 *     |---------------| Display PHY
 119 *     | PCS01 | PCS23 |
 120 *     |-------|-------|
 121 *     |TX0|TX1|TX2|TX3|
 122 *     -----------------
 123 *     |     DDI2      | DP/HDMI port
 124 *     -----------------
 125 */
 126
 127/**
 128 * struct bxt_dpio_phy_info - Hold info for a broxton DDI phy
 129 */
 130struct bxt_dpio_phy_info {
 131	/**
 132	 * @dual_channel: true if this phy has a second channel.
 133	 */
 134	bool dual_channel;
 135
 136	/**
 137	 * @rcomp_phy: If -1, indicates this phy has its own rcomp resistor.
 138	 * Otherwise the GRC value will be copied from the phy indicated by
 139	 * this field.
 140	 */
 141	enum dpio_phy rcomp_phy;
 142
 143	/**
 144	 * @reset_delay: delay in us to wait before setting the common reset
 145	 * bit in BXT_PHY_CTL_FAMILY, which effectively enables the phy.
 146	 */
 147	int reset_delay;
 148
 149	/**
 150	 * @pwron_mask: Mask with the appropriate bit set that would cause the
 151	 * punit to power this phy if written to BXT_P_CR_GT_DISP_PWRON.
 152	 */
 153	u32 pwron_mask;
 154
 155	/**
 156	 * @channel: struct containing per channel information.
 157	 */
 158	struct {
 159		/**
 160		 * @channel.port: which port maps to this channel.
 161		 */
 162		enum port port;
 163	} channel[2];
 164};
 165
 166static const struct bxt_dpio_phy_info bxt_dpio_phy_info[] = {
 167	[DPIO_PHY0] = {
 168		.dual_channel = true,
 169		.rcomp_phy = DPIO_PHY1,
 170		.pwron_mask = BIT(0),
 171
 172		.channel = {
 173			[DPIO_CH0] = { .port = PORT_B },
 174			[DPIO_CH1] = { .port = PORT_C },
 175		}
 176	},
 177	[DPIO_PHY1] = {
 178		.dual_channel = false,
 179		.rcomp_phy = -1,
 180		.pwron_mask = BIT(1),
 181
 182		.channel = {
 183			[DPIO_CH0] = { .port = PORT_A },
 184		}
 185	},
 186};
 187
 188static const struct bxt_dpio_phy_info glk_dpio_phy_info[] = {
 189	[DPIO_PHY0] = {
 190		.dual_channel = false,
 191		.rcomp_phy = DPIO_PHY1,
 192		.pwron_mask = BIT(0),
 193		.reset_delay = 20,
 194
 195		.channel = {
 196			[DPIO_CH0] = { .port = PORT_B },
 197		}
 198	},
 199	[DPIO_PHY1] = {
 200		.dual_channel = false,
 201		.rcomp_phy = -1,
 202		.pwron_mask = BIT(3),
 203		.reset_delay = 20,
 204
 205		.channel = {
 206			[DPIO_CH0] = { .port = PORT_A },
 207		}
 208	},
 209	[DPIO_PHY2] = {
 210		.dual_channel = false,
 211		.rcomp_phy = DPIO_PHY1,
 212		.pwron_mask = BIT(1),
 213		.reset_delay = 20,
 214
 215		.channel = {
 216			[DPIO_CH0] = { .port = PORT_C },
 217		}
 218	},
 219};
 220
 221static const struct bxt_dpio_phy_info *
 222bxt_get_phy_list(struct intel_display *display, int *count)
 223{
 224	struct drm_i915_private *dev_priv = to_i915(display->drm);
 225
 226	if (IS_GEMINILAKE(dev_priv)) {
 227		*count =  ARRAY_SIZE(glk_dpio_phy_info);
 228		return glk_dpio_phy_info;
 229	} else {
 230		*count =  ARRAY_SIZE(bxt_dpio_phy_info);
 231		return bxt_dpio_phy_info;
 232	}
 233}
 234
 235static const struct bxt_dpio_phy_info *
 236bxt_get_phy_info(struct intel_display *display, enum dpio_phy phy)
 237{
 238	int count;
 239	const struct bxt_dpio_phy_info *phy_list =
 240		bxt_get_phy_list(display, &count);
 241
 242	return &phy_list[phy];
 243}
 244
 245void bxt_port_to_phy_channel(struct intel_display *display, enum port port,
 246			     enum dpio_phy *phy, enum dpio_channel *ch)
 247{
 248	const struct bxt_dpio_phy_info *phy_info, *phys;
 249	int i, count;
 250
 251	phys = bxt_get_phy_list(display, &count);
 252
 253	for (i = 0; i < count; i++) {
 254		phy_info = &phys[i];
 255
 256		if (port == phy_info->channel[DPIO_CH0].port) {
 257			*phy = i;
 258			*ch = DPIO_CH0;
 259			return;
 260		}
 261
 262		if (phy_info->dual_channel &&
 263		    port == phy_info->channel[DPIO_CH1].port) {
 264			*phy = i;
 265			*ch = DPIO_CH1;
 266			return;
 267		}
 268	}
 269
 270	drm_WARN(display->drm, 1, "PHY not found for PORT %c",
 271		 port_name(port));
 272	*phy = DPIO_PHY0;
 273	*ch = DPIO_CH0;
 274}
 275
 276/*
 277 * Like intel_de_rmw() but reads from a single per-lane register and
 278 * writes to the group register to write the same value to all the lanes.
 279 */
 280static u32 bxt_dpio_phy_rmw_grp(struct intel_display *display,
 281				i915_reg_t reg_single,
 282				i915_reg_t reg_group,
 283				u32 clear, u32 set)
 284{
 285	u32 old, val;
 286
 287	old = intel_de_read(display, reg_single);
 288	val = (old & ~clear) | set;
 289	intel_de_write(display, reg_group, val);
 290
 291	return old;
 292}
 293
 294void bxt_dpio_phy_set_signal_levels(struct intel_encoder *encoder,
 295				    const struct intel_crtc_state *crtc_state)
 296{
 297	struct intel_display *display = to_intel_display(encoder);
 298	const struct intel_ddi_buf_trans *trans;
 299	enum dpio_channel ch;
 300	enum dpio_phy phy;
 301	int lane, n_entries;
 302
 303	trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
 304	if (drm_WARN_ON_ONCE(display->drm, !trans))
 305		return;
 306
 307	bxt_port_to_phy_channel(display, encoder->port, &phy, &ch);
 308
 309	/*
 310	 * While we write to the group register to program all lanes at once we
 311	 * can read only lane registers and we pick lanes 0/1 for that.
 312	 */
 313	bxt_dpio_phy_rmw_grp(display, BXT_PORT_PCS_DW10_LN01(phy, ch),
 314			     BXT_PORT_PCS_DW10_GRP(phy, ch),
 315			     TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT, 0);
 316
 317	for (lane = 0; lane < crtc_state->lane_count; lane++) {
 318		int level = intel_ddi_level(encoder, crtc_state, lane);
 
 
 319
 320		intel_de_rmw(display, BXT_PORT_TX_DW2_LN(phy, ch, lane),
 321			     MARGIN_000_MASK | UNIQ_TRANS_SCALE_MASK,
 322			     MARGIN_000(trans->entries[level].bxt.margin) |
 323			     UNIQ_TRANS_SCALE(trans->entries[level].bxt.scale));
 324	}
 325
 326	for (lane = 0; lane < crtc_state->lane_count; lane++) {
 327		int level = intel_ddi_level(encoder, crtc_state, lane);
 328		u32 val;
 329
 330		intel_de_rmw(display, BXT_PORT_TX_DW3_LN(phy, ch, lane),
 331			     SCALE_DCOMP_METHOD,
 332			     trans->entries[level].bxt.enable ?
 333			     SCALE_DCOMP_METHOD : 0);
 334
 335		val = intel_de_read(display, BXT_PORT_TX_DW3_LN(phy, ch, lane));
 336		if ((val & UNIQUE_TRANGE_EN_METHOD) && !(val & SCALE_DCOMP_METHOD))
 337			drm_err(display->drm,
 338				"Disabled scaling while ouniqetrangenmethod was set");
 339	}
 340
 341	for (lane = 0; lane < crtc_state->lane_count; lane++) {
 342		int level = intel_ddi_level(encoder, crtc_state, lane);
 
 
 343
 344		intel_de_rmw(display, BXT_PORT_TX_DW4_LN(phy, ch, lane),
 345			     DE_EMPHASIS_MASK,
 346			     DE_EMPHASIS(trans->entries[level].bxt.deemphasis));
 347	}
 348
 349	bxt_dpio_phy_rmw_grp(display, BXT_PORT_PCS_DW10_LN01(phy, ch),
 350			     BXT_PORT_PCS_DW10_GRP(phy, ch),
 351			     0, TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT);
 352}
 353
 354bool bxt_dpio_phy_is_enabled(struct intel_display *display,
 355			     enum dpio_phy phy)
 356{
 357	const struct bxt_dpio_phy_info *phy_info;
 358
 359	phy_info = bxt_get_phy_info(display, phy);
 360
 361	if (!(intel_de_read(display, BXT_P_CR_GT_DISP_PWRON) & phy_info->pwron_mask))
 362		return false;
 363
 364	if ((intel_de_read(display, BXT_PORT_CL1CM_DW0(phy)) &
 365	     (PHY_POWER_GOOD | PHY_RESERVED)) != PHY_POWER_GOOD) {
 366		drm_dbg(display->drm,
 367			"DDI PHY %d powered, but power hasn't settled\n", phy);
 368
 369		return false;
 370	}
 371
 372	if (!(intel_de_read(display, BXT_PHY_CTL_FAMILY(phy)) & COMMON_RESET_DIS)) {
 373		drm_dbg(display->drm,
 374			"DDI PHY %d powered, but still in reset\n", phy);
 375
 376		return false;
 377	}
 378
 379	return true;
 380}
 381
 382static u32 bxt_get_grc(struct intel_display *display, enum dpio_phy phy)
 383{
 384	u32 val = intel_de_read(display, BXT_PORT_REF_DW6(phy));
 385
 386	return REG_FIELD_GET(GRC_CODE_MASK, val);
 387}
 388
 389static void bxt_phy_wait_grc_done(struct intel_display *display,
 390				  enum dpio_phy phy)
 391{
 392	if (intel_de_wait_for_set(display, BXT_PORT_REF_DW3(phy), GRC_DONE, 10))
 393		drm_err(display->drm, "timeout waiting for PHY%d GRC\n", phy);
 
 
 394}
 395
 396static void _bxt_dpio_phy_init(struct intel_display *display, enum dpio_phy phy)
 
 397{
 398	const struct bxt_dpio_phy_info *phy_info;
 399	u32 val;
 400
 401	phy_info = bxt_get_phy_info(display, phy);
 402
 403	if (bxt_dpio_phy_is_enabled(display, phy)) {
 404		/* Still read out the GRC value for state verification */
 405		if (phy_info->rcomp_phy != -1)
 406			display->state.bxt_phy_grc = bxt_get_grc(display, phy);
 407
 408		if (bxt_dpio_phy_verify_state(display, phy)) {
 409			drm_dbg(display->drm, "DDI PHY %d already enabled, "
 410				"won't reprogram it\n", phy);
 411			return;
 412		}
 413
 414		drm_dbg(display->drm,
 415			"DDI PHY %d enabled with invalid state, "
 416			"force reprogramming it\n", phy);
 417	}
 418
 419	intel_de_rmw(display, BXT_P_CR_GT_DISP_PWRON, 0, phy_info->pwron_mask);
 
 
 420
 421	/*
 422	 * The PHY registers start out inaccessible and respond to reads with
 423	 * all 1s.  Eventually they become accessible as they power up, then
 424	 * the reserved bit will give the default 0.  Poll on the reserved bit
 425	 * becoming 0 to find when the PHY is accessible.
 426	 * The flag should get set in 100us according to the HW team, but
 427	 * use 1ms due to occasional timeouts observed with that.
 428	 */
 429	if (intel_de_wait_fw(display, BXT_PORT_CL1CM_DW0(phy),
 430			     PHY_RESERVED | PHY_POWER_GOOD, PHY_POWER_GOOD, 1))
 431		drm_err(display->drm, "timeout during PHY%d power on\n",
 
 
 
 432			phy);
 433
 434	/* Program PLL Rcomp code offset */
 435	intel_de_rmw(display, BXT_PORT_CL1CM_DW9(phy),
 436		     IREF0RC_OFFSET_MASK, IREF0RC_OFFSET(0xE4));
 437
 438	intel_de_rmw(display, BXT_PORT_CL1CM_DW10(phy),
 439		     IREF1RC_OFFSET_MASK, IREF1RC_OFFSET(0xE4));
 
 
 
 
 440
 441	/* Program power gating */
 442	intel_de_rmw(display, BXT_PORT_CL1CM_DW28(phy), 0,
 443		     OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN | SUS_CLK_CONFIG);
 444
 445	if (phy_info->dual_channel)
 446		intel_de_rmw(display, BXT_PORT_CL2CM_DW6(phy), 0,
 447			     DW6_OLDO_DYN_PWR_DOWN_EN);
 
 
 
 
 448
 449	if (phy_info->rcomp_phy != -1) {
 450		u32 grc_code;
 451
 452		bxt_phy_wait_grc_done(display, phy_info->rcomp_phy);
 453
 454		/*
 455		 * PHY0 isn't connected to an RCOMP resistor so copy over
 456		 * the corresponding calibrated value from PHY1, and disable
 457		 * the automatic calibration on PHY0.
 458		 */
 459		val = bxt_get_grc(display, phy_info->rcomp_phy);
 460		display->state.bxt_phy_grc = val;
 461
 462		grc_code = GRC_CODE_FAST(val) |
 463			GRC_CODE_SLOW(val) |
 464			GRC_CODE_NOM(val);
 465		intel_de_write(display, BXT_PORT_REF_DW6(phy), grc_code);
 466		intel_de_rmw(display, BXT_PORT_REF_DW8(phy),
 467			     0, GRC_DIS | GRC_RDY_OVRD);
 
 468	}
 469
 470	if (phy_info->reset_delay)
 471		udelay(phy_info->reset_delay);
 472
 473	intel_de_rmw(display, BXT_PHY_CTL_FAMILY(phy), 0, COMMON_RESET_DIS);
 
 
 474}
 475
 476void bxt_dpio_phy_uninit(struct intel_display *display, enum dpio_phy phy)
 477{
 478	const struct bxt_dpio_phy_info *phy_info;
 
 479
 480	phy_info = bxt_get_phy_info(display, phy);
 481
 482	intel_de_rmw(display, BXT_PHY_CTL_FAMILY(phy), COMMON_RESET_DIS, 0);
 
 
 483
 484	intel_de_rmw(display, BXT_P_CR_GT_DISP_PWRON, phy_info->pwron_mask, 0);
 
 
 485}
 486
 487void bxt_dpio_phy_init(struct intel_display *display, enum dpio_phy phy)
 488{
 489	const struct bxt_dpio_phy_info *phy_info = bxt_get_phy_info(display, phy);
 
 490	enum dpio_phy rcomp_phy = phy_info->rcomp_phy;
 491	bool was_enabled;
 492
 493	lockdep_assert_held(&display->power.domains.lock);
 494
 495	was_enabled = true;
 496	if (rcomp_phy != -1)
 497		was_enabled = bxt_dpio_phy_is_enabled(display, rcomp_phy);
 498
 499	/*
 500	 * We need to copy the GRC calibration value from rcomp_phy,
 501	 * so make sure it's powered up.
 502	 */
 503	if (!was_enabled)
 504		_bxt_dpio_phy_init(display, rcomp_phy);
 505
 506	_bxt_dpio_phy_init(display, phy);
 507
 508	if (!was_enabled)
 509		bxt_dpio_phy_uninit(display, rcomp_phy);
 510}
 511
 512static bool __printf(6, 7)
 513__phy_reg_verify_state(struct intel_display *display, enum dpio_phy phy,
 514		       i915_reg_t reg, u32 mask, u32 expected,
 515		       const char *reg_fmt, ...)
 516{
 517	struct va_format vaf;
 518	va_list args;
 519	u32 val;
 520
 521	val = intel_de_read(display, reg);
 522	if ((val & mask) == expected)
 523		return true;
 524
 525	va_start(args, reg_fmt);
 526	vaf.fmt = reg_fmt;
 527	vaf.va = &args;
 528
 529	drm_dbg(display->drm, "DDI PHY %d reg %pV [%08x] state mismatch: "
 530			 "current %08x, expected %08x (mask %08x)\n",
 531			 phy, &vaf, reg.reg, val, (val & ~mask) | expected,
 532			 mask);
 533
 534	va_end(args);
 535
 536	return false;
 537}
 538
 539bool bxt_dpio_phy_verify_state(struct intel_display *display,
 540			       enum dpio_phy phy)
 541{
 542	const struct bxt_dpio_phy_info *phy_info;
 543	u32 mask;
 544	bool ok;
 545
 546	phy_info = bxt_get_phy_info(display, phy);
 547
 548#define _CHK(reg, mask, exp, fmt, ...)					\
 549	__phy_reg_verify_state(display, phy, reg, mask, exp, fmt,	\
 550			       ## __VA_ARGS__)
 551
 552	if (!bxt_dpio_phy_is_enabled(display, phy))
 553		return false;
 554
 555	ok = true;
 556
 557	/* PLL Rcomp code offset */
 558	ok &= _CHK(BXT_PORT_CL1CM_DW9(phy),
 559		   IREF0RC_OFFSET_MASK, IREF0RC_OFFSET(0xe4),
 560		   "BXT_PORT_CL1CM_DW9(%d)", phy);
 561	ok &= _CHK(BXT_PORT_CL1CM_DW10(phy),
 562		   IREF1RC_OFFSET_MASK, IREF1RC_OFFSET(0xe4),
 563		   "BXT_PORT_CL1CM_DW10(%d)", phy);
 564
 565	/* Power gating */
 566	mask = OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN | SUS_CLK_CONFIG;
 567	ok &= _CHK(BXT_PORT_CL1CM_DW28(phy), mask, mask,
 568		   "BXT_PORT_CL1CM_DW28(%d)", phy);
 569
 570	if (phy_info->dual_channel)
 571		ok &= _CHK(BXT_PORT_CL2CM_DW6(phy),
 572			   DW6_OLDO_DYN_PWR_DOWN_EN, DW6_OLDO_DYN_PWR_DOWN_EN,
 573			   "BXT_PORT_CL2CM_DW6(%d)", phy);
 574
 575	if (phy_info->rcomp_phy != -1) {
 576		u32 grc_code = display->state.bxt_phy_grc;
 577
 578		grc_code = GRC_CODE_FAST(grc_code) |
 579			GRC_CODE_SLOW(grc_code) |
 580			GRC_CODE_NOM(grc_code);
 581		mask = GRC_CODE_FAST_MASK | GRC_CODE_SLOW_MASK |
 582		       GRC_CODE_NOM_MASK;
 583		ok &= _CHK(BXT_PORT_REF_DW6(phy), mask, grc_code,
 584			   "BXT_PORT_REF_DW6(%d)", phy);
 585
 586		mask = GRC_DIS | GRC_RDY_OVRD;
 587		ok &= _CHK(BXT_PORT_REF_DW8(phy), mask, mask,
 588			   "BXT_PORT_REF_DW8(%d)", phy);
 589	}
 590
 591	return ok;
 592#undef _CHK
 593}
 594
 595u8
 596bxt_dpio_phy_calc_lane_lat_optim_mask(u8 lane_count)
 597{
 598	switch (lane_count) {
 599	case 1:
 600		return 0;
 601	case 2:
 602		return BIT(2) | BIT(0);
 603	case 4:
 604		return BIT(3) | BIT(2) | BIT(0);
 605	default:
 606		MISSING_CASE(lane_count);
 607
 608		return 0;
 609	}
 610}
 611
 612void bxt_dpio_phy_set_lane_optim_mask(struct intel_encoder *encoder,
 613				      u8 lane_lat_optim_mask)
 614{
 615	struct intel_display *display = to_intel_display(encoder);
 616	enum port port = encoder->port;
 617	enum dpio_phy phy;
 618	enum dpio_channel ch;
 619	int lane;
 620
 621	bxt_port_to_phy_channel(display, port, &phy, &ch);
 622
 623	for (lane = 0; lane < 4; lane++) {
 
 
 
 624		/*
 625		 * Note that on CHV this flag is called UPAR, but has
 626		 * the same function.
 627		 */
 628		intel_de_rmw(display, BXT_PORT_TX_DW14_LN(phy, ch, lane),
 629			     LATENCY_OPTIM,
 630			     lane_lat_optim_mask & BIT(lane) ? LATENCY_OPTIM : 0);
 
 
 
 631	}
 632}
 633
 634u8
 635bxt_dpio_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder)
 636{
 637	struct intel_display *display = to_intel_display(encoder);
 638	enum port port = encoder->port;
 639	enum dpio_phy phy;
 640	enum dpio_channel ch;
 641	int lane;
 642	u8 mask;
 643
 644	bxt_port_to_phy_channel(display, port, &phy, &ch);
 645
 646	mask = 0;
 647	for (lane = 0; lane < 4; lane++) {
 648		u32 val = intel_de_read(display,
 649					BXT_PORT_TX_DW14_LN(phy, ch, lane));
 650
 651		if (val & LATENCY_OPTIM)
 652			mask |= BIT(lane);
 653	}
 654
 655	return mask;
 656}
 657
 658enum dpio_channel vlv_dig_port_to_channel(struct intel_digital_port *dig_port)
 659{
 660	switch (dig_port->base.port) {
 661	default:
 662		MISSING_CASE(dig_port->base.port);
 663		fallthrough;
 664	case PORT_B:
 665	case PORT_D:
 666		return DPIO_CH0;
 667	case PORT_C:
 668		return DPIO_CH1;
 669	}
 670}
 671
 672enum dpio_phy vlv_dig_port_to_phy(struct intel_digital_port *dig_port)
 673{
 674	switch (dig_port->base.port) {
 675	default:
 676		MISSING_CASE(dig_port->base.port);
 677		fallthrough;
 678	case PORT_B:
 679	case PORT_C:
 680		return DPIO_PHY0;
 681	case PORT_D:
 682		return DPIO_PHY1;
 683	}
 684}
 685
 686enum dpio_phy vlv_pipe_to_phy(enum pipe pipe)
 687{
 688	switch (pipe) {
 689	default:
 690		MISSING_CASE(pipe);
 691		fallthrough;
 692	case PIPE_A:
 693	case PIPE_B:
 694		return DPIO_PHY0;
 695	case PIPE_C:
 696		return DPIO_PHY1;
 697	}
 698}
 699
 700enum dpio_channel vlv_pipe_to_channel(enum pipe pipe)
 701{
 702	switch (pipe) {
 703	default:
 704		MISSING_CASE(pipe);
 705		fallthrough;
 706	case PIPE_A:
 707	case PIPE_C:
 708		return DPIO_CH0;
 709	case PIPE_B:
 710		return DPIO_CH1;
 711	}
 712}
 713
 714void chv_set_phy_signal_level(struct intel_encoder *encoder,
 715			      const struct intel_crtc_state *crtc_state,
 716			      u32 deemph_reg_value, u32 margin_reg_value,
 717			      bool uniq_trans_scale)
 718{
 719	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 720	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
 
 721	enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
 722	enum dpio_phy phy = vlv_dig_port_to_phy(dig_port);
 723	u32 val;
 724	int i;
 725
 726	vlv_dpio_get(dev_priv);
 727
 728	/* Clear calc init */
 729	val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW10(ch));
 730	val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
 731	val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
 732	val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
 733	vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW10(ch), val);
 734
 735	if (crtc_state->lane_count > 2) {
 736		val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW10(ch));
 737		val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
 738		val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
 739		val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
 740		vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW10(ch), val);
 741	}
 742
 743	val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW9(ch));
 744	val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
 745	val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
 746	vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW9(ch), val);
 747
 748	if (crtc_state->lane_count > 2) {
 749		val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW9(ch));
 750		val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
 751		val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
 752		vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW9(ch), val);
 753	}
 754
 755	/* Program swing deemph */
 756	for (i = 0; i < crtc_state->lane_count; i++) {
 757		val = vlv_dpio_read(dev_priv, phy, CHV_TX_DW4(ch, i));
 758		val &= ~DPIO_SWING_DEEMPH9P5_MASK;
 759		val |= DPIO_SWING_DEEMPH9P5(deemph_reg_value);
 760		vlv_dpio_write(dev_priv, phy, CHV_TX_DW4(ch, i), val);
 761	}
 762
 763	/* Program swing margin */
 764	for (i = 0; i < crtc_state->lane_count; i++) {
 765		val = vlv_dpio_read(dev_priv, phy, CHV_TX_DW2(ch, i));
 766
 767		val &= ~DPIO_SWING_MARGIN000_MASK;
 768		val |= DPIO_SWING_MARGIN000(margin_reg_value);
 769
 770		/*
 771		 * Supposedly this value shouldn't matter when unique transition
 772		 * scale is disabled, but in fact it does matter. Let's just
 773		 * always program the same value and hope it's OK.
 774		 */
 775		val &= ~DPIO_UNIQ_TRANS_SCALE_MASK;
 776		val |= DPIO_UNIQ_TRANS_SCALE(0x9a);
 777
 778		vlv_dpio_write(dev_priv, phy, CHV_TX_DW2(ch, i), val);
 779	}
 780
 781	/*
 782	 * The document said it needs to set bit 27 for ch0 and bit 26
 783	 * for ch1. Might be a typo in the doc.
 784	 * For now, for this unique transition scale selection, set bit
 785	 * 27 for ch0 and ch1.
 786	 */
 787	for (i = 0; i < crtc_state->lane_count; i++) {
 788		val = vlv_dpio_read(dev_priv, phy, CHV_TX_DW3(ch, i));
 789		if (uniq_trans_scale)
 790			val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
 791		else
 792			val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
 793		vlv_dpio_write(dev_priv, phy, CHV_TX_DW3(ch, i), val);
 794	}
 795
 796	/* Start swing calculation */
 797	val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW10(ch));
 798	val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
 799	vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW10(ch), val);
 800
 801	if (crtc_state->lane_count > 2) {
 802		val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW10(ch));
 803		val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
 804		vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW10(ch), val);
 805	}
 806
 807	vlv_dpio_put(dev_priv);
 808}
 809
 810void chv_data_lane_soft_reset(struct intel_encoder *encoder,
 811			      const struct intel_crtc_state *crtc_state,
 812			      bool reset)
 813{
 814	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 815	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
 816	enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
 817	enum dpio_phy phy = vlv_dig_port_to_phy(dig_port);
 818	u32 val;
 819
 820	val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW0(ch));
 821	if (reset)
 822		val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
 823	else
 824		val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
 825	vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW0(ch), val);
 826
 827	if (crtc_state->lane_count > 2) {
 828		val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW0(ch));
 829		if (reset)
 830			val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
 831		else
 832			val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
 833		vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW0(ch), val);
 834	}
 835
 836	val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW1(ch));
 837	val |= CHV_PCS_REQ_SOFTRESET_EN;
 838	if (reset)
 839		val &= ~DPIO_PCS_CLK_SOFT_RESET;
 840	else
 841		val |= DPIO_PCS_CLK_SOFT_RESET;
 842	vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW1(ch), val);
 843
 844	if (crtc_state->lane_count > 2) {
 845		val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW1(ch));
 846		val |= CHV_PCS_REQ_SOFTRESET_EN;
 847		if (reset)
 848			val &= ~DPIO_PCS_CLK_SOFT_RESET;
 849		else
 850			val |= DPIO_PCS_CLK_SOFT_RESET;
 851		vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW1(ch), val);
 852	}
 853}
 854
 855void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
 856			    const struct intel_crtc_state *crtc_state)
 857{
 858	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
 859	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 860	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 861	enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
 862	enum dpio_phy phy = vlv_dig_port_to_phy(dig_port);
 863	enum pipe pipe = crtc->pipe;
 864	unsigned int lane_mask =
 865		intel_dp_unused_lane_mask(crtc_state->lane_count);
 866	u32 val;
 867
 868	/*
 869	 * Must trick the second common lane into life.
 870	 * Otherwise we can't even access the PLL.
 871	 */
 872	if (ch == DPIO_CH0 && pipe == PIPE_B)
 873		dig_port->release_cl2_override =
 874			!chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
 875
 876	chv_phy_powergate_lanes(encoder, true, lane_mask);
 877
 878	vlv_dpio_get(dev_priv);
 879
 880	/* Assert data lane reset */
 881	chv_data_lane_soft_reset(encoder, crtc_state, true);
 882
 883	/* program left/right clock distribution */
 884	if (pipe != PIPE_B) {
 885		val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW5_CH0);
 886		val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
 887		if (ch == DPIO_CH0)
 888			val |= CHV_BUFLEFTENA1_FORCE;
 889		if (ch == DPIO_CH1)
 890			val |= CHV_BUFRIGHTENA1_FORCE;
 891		vlv_dpio_write(dev_priv, phy, CHV_CMN_DW5_CH0, val);
 892	} else {
 893		val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW1_CH1);
 894		val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
 895		if (ch == DPIO_CH0)
 896			val |= CHV_BUFLEFTENA2_FORCE;
 897		if (ch == DPIO_CH1)
 898			val |= CHV_BUFRIGHTENA2_FORCE;
 899		vlv_dpio_write(dev_priv, phy, CHV_CMN_DW1_CH1, val);
 900	}
 901
 902	/* program clock channel usage */
 903	val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW8(ch));
 904	val |= DPIO_PCS_USEDCLKCHANNEL_OVRRIDE;
 905	if (pipe == PIPE_B)
 906		val |= DPIO_PCS_USEDCLKCHANNEL;
 907	else
 908		val &= ~DPIO_PCS_USEDCLKCHANNEL;
 909	vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW8(ch), val);
 910
 911	if (crtc_state->lane_count > 2) {
 912		val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW8(ch));
 913		val |= DPIO_PCS_USEDCLKCHANNEL_OVRRIDE;
 914		if (pipe == PIPE_B)
 915			val |= DPIO_PCS_USEDCLKCHANNEL;
 916		else
 917			val &= ~DPIO_PCS_USEDCLKCHANNEL;
 918		vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW8(ch), val);
 919	}
 920
 921	/*
 922	 * This a a bit weird since generally CL
 923	 * matches the pipe, but here we need to
 924	 * pick the CL based on the port.
 925	 */
 926	val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW19(ch));
 927	if (pipe == PIPE_B)
 928		val |= CHV_CMN_USEDCLKCHANNEL;
 929	else
 930		val &= ~CHV_CMN_USEDCLKCHANNEL;
 931	vlv_dpio_write(dev_priv, phy, CHV_CMN_DW19(ch), val);
 
 
 932
 933	vlv_dpio_put(dev_priv);
 934}
 935
 936void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
 937				const struct intel_crtc_state *crtc_state)
 938{
 939	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 940	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 941	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 
 942	enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
 943	enum dpio_phy phy = vlv_dig_port_to_phy(dig_port);
 944	int data, i, stagger;
 945	u32 val;
 946
 947	vlv_dpio_get(dev_priv);
 948
 949	/* allow hardware to manage TX FIFO reset source */
 950	val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW11(ch));
 951	val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
 952	vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW11(ch), val);
 953
 954	if (crtc_state->lane_count > 2) {
 955		val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW11(ch));
 956		val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
 957		vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW11(ch), val);
 958	}
 959
 960	/* Program Tx lane latency optimal setting*/
 961	for (i = 0; i < crtc_state->lane_count; i++) {
 962		/* Set the upar bit */
 963		if (crtc_state->lane_count == 1)
 964			data = 0;
 965		else
 966			data = (i == 1) ? 0 : DPIO_UPAR;
 967		vlv_dpio_write(dev_priv, phy, CHV_TX_DW14(ch, i), data);
 
 968	}
 969
 970	/* Data lane stagger programming */
 971	if (crtc_state->port_clock > 270000)
 972		stagger = 0x18;
 973	else if (crtc_state->port_clock > 135000)
 974		stagger = 0xd;
 975	else if (crtc_state->port_clock > 67500)
 976		stagger = 0x7;
 977	else if (crtc_state->port_clock > 33750)
 978		stagger = 0x4;
 979	else
 980		stagger = 0x2;
 981
 982	val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW11(ch));
 983	val |= DPIO_TX2_STAGGER_MASK(0x1f);
 984	vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW11(ch), val);
 985
 986	if (crtc_state->lane_count > 2) {
 987		val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW11(ch));
 988		val |= DPIO_TX2_STAGGER_MASK(0x1f);
 989		vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW11(ch), val);
 990	}
 991
 992	vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW12(ch),
 993		       DPIO_LANESTAGGER_STRAP(stagger) |
 994		       DPIO_LANESTAGGER_STRAP_OVRD |
 995		       DPIO_TX1_STAGGER_MASK(0x1f) |
 996		       DPIO_TX1_STAGGER_MULT(6) |
 997		       DPIO_TX2_STAGGER_MULT(0));
 998
 999	if (crtc_state->lane_count > 2) {
1000		vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW12(ch),
1001			       DPIO_LANESTAGGER_STRAP(stagger) |
1002			       DPIO_LANESTAGGER_STRAP_OVRD |
1003			       DPIO_TX1_STAGGER_MASK(0x1f) |
1004			       DPIO_TX1_STAGGER_MULT(7) |
1005			       DPIO_TX2_STAGGER_MULT(5));
1006	}
1007
1008	/* Deassert data lane reset */
1009	chv_data_lane_soft_reset(encoder, crtc_state, false);
1010
1011	vlv_dpio_put(dev_priv);
1012}
1013
1014void chv_phy_release_cl2_override(struct intel_encoder *encoder)
1015{
1016	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1017	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1018
1019	if (dig_port->release_cl2_override) {
1020		chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
1021		dig_port->release_cl2_override = false;
1022	}
1023}
1024
1025void chv_phy_post_pll_disable(struct intel_encoder *encoder,
1026			      const struct intel_crtc_state *old_crtc_state)
1027{
1028	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1029	enum dpio_phy phy = vlv_dig_port_to_phy(enc_to_dig_port(encoder));
1030	enum pipe pipe = to_intel_crtc(old_crtc_state->uapi.crtc)->pipe;
1031	u32 val;
1032
1033	vlv_dpio_get(dev_priv);
1034
1035	/* disable left/right clock distribution */
1036	if (pipe != PIPE_B) {
1037		val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW5_CH0);
1038		val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
1039		vlv_dpio_write(dev_priv, phy, CHV_CMN_DW5_CH0, val);
1040	} else {
1041		val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW1_CH1);
1042		val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
1043		vlv_dpio_write(dev_priv, phy, CHV_CMN_DW1_CH1, val);
1044	}
1045
1046	vlv_dpio_put(dev_priv);
1047
1048	/*
1049	 * Leave the power down bit cleared for at least one
1050	 * lane so that chv_powergate_phy_ch() will power
1051	 * on something when the channel is otherwise unused.
1052	 * When the port is off and the override is removed
1053	 * the lanes power down anyway, so otherwise it doesn't
1054	 * really matter what the state of power down bits is
1055	 * after this.
1056	 */
1057	chv_phy_powergate_lanes(encoder, false, 0x0);
1058}
1059
1060void vlv_set_phy_signal_level(struct intel_encoder *encoder,
1061			      const struct intel_crtc_state *crtc_state,
1062			      u32 demph_reg_value, u32 preemph_reg_value,
1063			      u32 uniqtranscale_reg_value, u32 tx3_demph)
1064{
1065	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 
1066	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1067	enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
1068	enum dpio_phy phy = vlv_dig_port_to_phy(dig_port);
1069
1070	vlv_dpio_get(dev_priv);
1071
1072	vlv_dpio_write(dev_priv, phy, VLV_TX_DW5_GRP(ch), 0x00000000);
1073	vlv_dpio_write(dev_priv, phy, VLV_TX_DW4_GRP(ch), demph_reg_value);
1074	vlv_dpio_write(dev_priv, phy, VLV_TX_DW2_GRP(ch),
1075			 uniqtranscale_reg_value);
1076	vlv_dpio_write(dev_priv, phy, VLV_TX_DW3_GRP(ch), 0x0C782040);
1077
1078	if (tx3_demph)
1079		vlv_dpio_write(dev_priv, phy, VLV_TX_DW4(ch, 3), tx3_demph);
1080
1081	vlv_dpio_write(dev_priv, phy, VLV_PCS_DW11_GRP(ch), 0x00030000);
1082	vlv_dpio_write(dev_priv, phy, VLV_PCS_DW9_GRP(ch), preemph_reg_value);
1083	vlv_dpio_write(dev_priv, phy, VLV_TX_DW5_GRP(ch), DPIO_TX_OCALINIT_EN);
1084
1085	vlv_dpio_put(dev_priv);
1086}
1087
1088void vlv_phy_pre_pll_enable(struct intel_encoder *encoder,
1089			    const struct intel_crtc_state *crtc_state)
1090{
1091	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1092	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1093	enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
1094	enum dpio_phy phy = vlv_dig_port_to_phy(dig_port);
 
1095
1096	/* Program Tx lane resets to default */
1097	vlv_dpio_get(dev_priv);
1098
1099	vlv_dpio_write(dev_priv, phy, VLV_PCS_DW0_GRP(ch),
1100		       DPIO_PCS_TX_LANE2_RESET |
1101		       DPIO_PCS_TX_LANE1_RESET);
1102	vlv_dpio_write(dev_priv, phy, VLV_PCS_DW1_GRP(ch),
1103		       DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
1104		       DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
1105		       DPIO_PCS_CLK_DATAWIDTH_8_10 |
1106		       DPIO_PCS_CLK_SOFT_RESET);
1107
1108	/* Fix up inter-pair skew failure */
1109	vlv_dpio_write(dev_priv, phy, VLV_PCS_DW12_GRP(ch), 0x00750f00);
1110	vlv_dpio_write(dev_priv, phy, VLV_TX_DW11_GRP(ch), 0x00001500);
1111	vlv_dpio_write(dev_priv, phy, VLV_TX_DW14_GRP(ch), 0x40400000);
1112
1113	vlv_dpio_put(dev_priv);
1114}
1115
1116void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
1117				const struct intel_crtc_state *crtc_state)
1118{
1119	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1120	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1121	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1122	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1123	enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
1124	enum dpio_phy phy = vlv_dig_port_to_phy(dig_port);
1125	enum pipe pipe = crtc->pipe;
1126	u32 val;
1127
1128	vlv_dpio_get(dev_priv);
1129
1130	/* Enable clock channels for this port */
1131	val = DPIO_PCS_USEDCLKCHANNEL_OVRRIDE;
1132	if (pipe == PIPE_B)
1133		val |= DPIO_PCS_USEDCLKCHANNEL;
1134	val |= 0xc4;
1135	vlv_dpio_write(dev_priv, phy, VLV_PCS_DW8_GRP(ch), val);
 
 
 
1136
1137	/* Program lane clock */
1138	vlv_dpio_write(dev_priv, phy, VLV_PCS_DW14_GRP(ch), 0x00760018);
1139	vlv_dpio_write(dev_priv, phy, VLV_PCS_DW23_GRP(ch), 0x00400888);
1140
1141	vlv_dpio_put(dev_priv);
1142}
1143
1144void vlv_phy_reset_lanes(struct intel_encoder *encoder,
1145			 const struct intel_crtc_state *old_crtc_state)
1146{
1147	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1148	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1149	enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
1150	enum dpio_phy phy = vlv_dig_port_to_phy(dig_port);
 
1151
1152	vlv_dpio_get(dev_priv);
1153	vlv_dpio_write(dev_priv, phy, VLV_PCS_DW0_GRP(ch), 0x00000000);
1154	vlv_dpio_write(dev_priv, phy, VLV_PCS_DW1_GRP(ch), 0x00e00060);
1155	vlv_dpio_put(dev_priv);
1156}
v5.9
   1/*
   2 * Copyright © 2014-2016 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  21 * DEALINGS IN THE SOFTWARE.
  22 */
  23
  24#include "display/intel_dp.h"
  25
 
 
 
 
  26#include "intel_display_types.h"
 
  27#include "intel_dpio_phy.h"
  28#include "intel_sideband.h"
 
  29
  30/**
  31 * DOC: DPIO
  32 *
  33 * VLV, CHV and BXT have slightly peculiar display PHYs for driving DP/HDMI
  34 * ports. DPIO is the name given to such a display PHY. These PHYs
  35 * don't follow the standard programming model using direct MMIO
  36 * registers, and instead their registers must be accessed trough IOSF
  37 * sideband. VLV has one such PHY for driving ports B and C, and CHV
  38 * adds another PHY for driving port D. Each PHY responds to specific
  39 * IOSF-SB port.
  40 *
  41 * Each display PHY is made up of one or two channels. Each channel
  42 * houses a common lane part which contains the PLL and other common
  43 * logic. CH0 common lane also contains the IOSF-SB logic for the
  44 * Common Register Interface (CRI) ie. the DPIO registers. CRI clock
  45 * must be running when any DPIO registers are accessed.
  46 *
  47 * In addition to having their own registers, the PHYs are also
  48 * controlled through some dedicated signals from the display
  49 * controller. These include PLL reference clock enable, PLL enable,
  50 * and CRI clock selection, for example.
  51 *
  52 * Eeach channel also has two splines (also called data lanes), and
  53 * each spline is made up of one Physical Access Coding Sub-Layer
  54 * (PCS) block and two TX lanes. So each channel has two PCS blocks
  55 * and four TX lanes. The TX lanes are used as DP lanes or TMDS
  56 * data/clock pairs depending on the output type.
  57 *
  58 * Additionally the PHY also contains an AUX lane with AUX blocks
  59 * for each channel. This is used for DP AUX communication, but
  60 * this fact isn't really relevant for the driver since AUX is
  61 * controlled from the display controller side. No DPIO registers
  62 * need to be accessed during AUX communication,
  63 *
  64 * Generally on VLV/CHV the common lane corresponds to the pipe and
  65 * the spline (PCS/TX) corresponds to the port.
  66 *
  67 * For dual channel PHY (VLV/CHV):
  68 *
  69 *  pipe A == CMN/PLL/REF CH0
  70 *
  71 *  pipe B == CMN/PLL/REF CH1
  72 *
  73 *  port B == PCS/TX CH0
  74 *
  75 *  port C == PCS/TX CH1
  76 *
  77 * This is especially important when we cross the streams
  78 * ie. drive port B with pipe B, or port C with pipe A.
  79 *
  80 * For single channel PHY (CHV):
  81 *
  82 *  pipe C == CMN/PLL/REF CH0
  83 *
  84 *  port D == PCS/TX CH0
  85 *
  86 * On BXT the entire PHY channel corresponds to the port. That means
  87 * the PLL is also now associated with the port rather than the pipe,
  88 * and so the clock needs to be routed to the appropriate transcoder.
  89 * Port A PLL is directly connected to transcoder EDP and port B/C
  90 * PLLs can be routed to any transcoder A/B/C.
  91 *
  92 * Note: DDI0 is digital port B, DD1 is digital port C, and DDI2 is
  93 * digital port D (CHV) or port A (BXT). ::
  94 *
  95 *
  96 *     Dual channel PHY (VLV/CHV/BXT)
  97 *     ---------------------------------
  98 *     |      CH0      |      CH1      |
  99 *     |  CMN/PLL/REF  |  CMN/PLL/REF  |
 100 *     |---------------|---------------| Display PHY
 101 *     | PCS01 | PCS23 | PCS01 | PCS23 |
 102 *     |-------|-------|-------|-------|
 103 *     |TX0|TX1|TX2|TX3|TX0|TX1|TX2|TX3|
 104 *     ---------------------------------
 105 *     |     DDI0      |     DDI1      | DP/HDMI ports
 106 *     ---------------------------------
 107 *
 108 *     Single channel PHY (CHV/BXT)
 109 *     -----------------
 110 *     |      CH0      |
 111 *     |  CMN/PLL/REF  |
 112 *     |---------------| Display PHY
 113 *     | PCS01 | PCS23 |
 114 *     |-------|-------|
 115 *     |TX0|TX1|TX2|TX3|
 116 *     -----------------
 117 *     |     DDI2      | DP/HDMI port
 118 *     -----------------
 119 */
 120
 121/**
 122 * struct bxt_ddi_phy_info - Hold info for a broxton DDI phy
 123 */
 124struct bxt_ddi_phy_info {
 125	/**
 126	 * @dual_channel: true if this phy has a second channel.
 127	 */
 128	bool dual_channel;
 129
 130	/**
 131	 * @rcomp_phy: If -1, indicates this phy has its own rcomp resistor.
 132	 * Otherwise the GRC value will be copied from the phy indicated by
 133	 * this field.
 134	 */
 135	enum dpio_phy rcomp_phy;
 136
 137	/**
 138	 * @reset_delay: delay in us to wait before setting the common reset
 139	 * bit in BXT_PHY_CTL_FAMILY, which effectively enables the phy.
 140	 */
 141	int reset_delay;
 142
 143	/**
 144	 * @pwron_mask: Mask with the appropriate bit set that would cause the
 145	 * punit to power this phy if written to BXT_P_CR_GT_DISP_PWRON.
 146	 */
 147	u32 pwron_mask;
 148
 149	/**
 150	 * @channel: struct containing per channel information.
 151	 */
 152	struct {
 153		/**
 154		 * @channel.port: which port maps to this channel.
 155		 */
 156		enum port port;
 157	} channel[2];
 158};
 159
 160static const struct bxt_ddi_phy_info bxt_ddi_phy_info[] = {
 161	[DPIO_PHY0] = {
 162		.dual_channel = true,
 163		.rcomp_phy = DPIO_PHY1,
 164		.pwron_mask = BIT(0),
 165
 166		.channel = {
 167			[DPIO_CH0] = { .port = PORT_B },
 168			[DPIO_CH1] = { .port = PORT_C },
 169		}
 170	},
 171	[DPIO_PHY1] = {
 172		.dual_channel = false,
 173		.rcomp_phy = -1,
 174		.pwron_mask = BIT(1),
 175
 176		.channel = {
 177			[DPIO_CH0] = { .port = PORT_A },
 178		}
 179	},
 180};
 181
 182static const struct bxt_ddi_phy_info glk_ddi_phy_info[] = {
 183	[DPIO_PHY0] = {
 184		.dual_channel = false,
 185		.rcomp_phy = DPIO_PHY1,
 186		.pwron_mask = BIT(0),
 187		.reset_delay = 20,
 188
 189		.channel = {
 190			[DPIO_CH0] = { .port = PORT_B },
 191		}
 192	},
 193	[DPIO_PHY1] = {
 194		.dual_channel = false,
 195		.rcomp_phy = -1,
 196		.pwron_mask = BIT(3),
 197		.reset_delay = 20,
 198
 199		.channel = {
 200			[DPIO_CH0] = { .port = PORT_A },
 201		}
 202	},
 203	[DPIO_PHY2] = {
 204		.dual_channel = false,
 205		.rcomp_phy = DPIO_PHY1,
 206		.pwron_mask = BIT(1),
 207		.reset_delay = 20,
 208
 209		.channel = {
 210			[DPIO_CH0] = { .port = PORT_C },
 211		}
 212	},
 213};
 214
 215static const struct bxt_ddi_phy_info *
 216bxt_get_phy_list(struct drm_i915_private *dev_priv, int *count)
 217{
 
 
 218	if (IS_GEMINILAKE(dev_priv)) {
 219		*count =  ARRAY_SIZE(glk_ddi_phy_info);
 220		return glk_ddi_phy_info;
 221	} else {
 222		*count =  ARRAY_SIZE(bxt_ddi_phy_info);
 223		return bxt_ddi_phy_info;
 224	}
 225}
 226
 227static const struct bxt_ddi_phy_info *
 228bxt_get_phy_info(struct drm_i915_private *dev_priv, enum dpio_phy phy)
 229{
 230	int count;
 231	const struct bxt_ddi_phy_info *phy_list =
 232		bxt_get_phy_list(dev_priv, &count);
 233
 234	return &phy_list[phy];
 235}
 236
 237void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port,
 238			     enum dpio_phy *phy, enum dpio_channel *ch)
 239{
 240	const struct bxt_ddi_phy_info *phy_info, *phys;
 241	int i, count;
 242
 243	phys = bxt_get_phy_list(dev_priv, &count);
 244
 245	for (i = 0; i < count; i++) {
 246		phy_info = &phys[i];
 247
 248		if (port == phy_info->channel[DPIO_CH0].port) {
 249			*phy = i;
 250			*ch = DPIO_CH0;
 251			return;
 252		}
 253
 254		if (phy_info->dual_channel &&
 255		    port == phy_info->channel[DPIO_CH1].port) {
 256			*phy = i;
 257			*ch = DPIO_CH1;
 258			return;
 259		}
 260	}
 261
 262	drm_WARN(&dev_priv->drm, 1, "PHY not found for PORT %c",
 263		 port_name(port));
 264	*phy = DPIO_PHY0;
 265	*ch = DPIO_CH0;
 266}
 267
 268void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv,
 269				  enum port port, u32 margin, u32 scale,
 270				  u32 enable, u32 deemphasis)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 271{
 272	u32 val;
 
 
 273	enum dpio_phy phy;
 274	enum dpio_channel ch;
 
 
 
 
 275
 276	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
 277
 278	/*
 279	 * While we write to the group register to program all lanes at once we
 280	 * can read only lane registers and we pick lanes 0/1 for that.
 281	 */
 282	val = intel_de_read(dev_priv, BXT_PORT_PCS_DW10_LN01(phy, ch));
 283	val &= ~(TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT);
 284	intel_de_write(dev_priv, BXT_PORT_PCS_DW10_GRP(phy, ch), val);
 285
 286	val = intel_de_read(dev_priv, BXT_PORT_TX_DW2_LN0(phy, ch));
 287	val &= ~(MARGIN_000 | UNIQ_TRANS_SCALE);
 288	val |= margin << MARGIN_000_SHIFT | scale << UNIQ_TRANS_SCALE_SHIFT;
 289	intel_de_write(dev_priv, BXT_PORT_TX_DW2_GRP(phy, ch), val);
 290
 291	val = intel_de_read(dev_priv, BXT_PORT_TX_DW3_LN0(phy, ch));
 292	val &= ~SCALE_DCOMP_METHOD;
 293	if (enable)
 294		val |= SCALE_DCOMP_METHOD;
 
 295
 296	if ((val & UNIQUE_TRANGE_EN_METHOD) && !(val & SCALE_DCOMP_METHOD))
 297		drm_err(&dev_priv->drm,
 298			"Disabled scaling while ouniqetrangenmethod was set");
 299
 300	intel_de_write(dev_priv, BXT_PORT_TX_DW3_GRP(phy, ch), val);
 
 
 
 
 
 
 
 
 
 301
 302	val = intel_de_read(dev_priv, BXT_PORT_TX_DW4_LN0(phy, ch));
 303	val &= ~DE_EMPHASIS;
 304	val |= deemphasis << DEEMPH_SHIFT;
 305	intel_de_write(dev_priv, BXT_PORT_TX_DW4_GRP(phy, ch), val);
 306
 307	val = intel_de_read(dev_priv, BXT_PORT_PCS_DW10_LN01(phy, ch));
 308	val |= TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT;
 309	intel_de_write(dev_priv, BXT_PORT_PCS_DW10_GRP(phy, ch), val);
 
 
 
 
 
 310}
 311
 312bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
 313			    enum dpio_phy phy)
 314{
 315	const struct bxt_ddi_phy_info *phy_info;
 316
 317	phy_info = bxt_get_phy_info(dev_priv, phy);
 318
 319	if (!(intel_de_read(dev_priv, BXT_P_CR_GT_DISP_PWRON) & phy_info->pwron_mask))
 320		return false;
 321
 322	if ((intel_de_read(dev_priv, BXT_PORT_CL1CM_DW0(phy)) &
 323	     (PHY_POWER_GOOD | PHY_RESERVED)) != PHY_POWER_GOOD) {
 324		drm_dbg(&dev_priv->drm,
 325			"DDI PHY %d powered, but power hasn't settled\n", phy);
 326
 327		return false;
 328	}
 329
 330	if (!(intel_de_read(dev_priv, BXT_PHY_CTL_FAMILY(phy)) & COMMON_RESET_DIS)) {
 331		drm_dbg(&dev_priv->drm,
 332			"DDI PHY %d powered, but still in reset\n", phy);
 333
 334		return false;
 335	}
 336
 337	return true;
 338}
 339
 340static u32 bxt_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy)
 341{
 342	u32 val = intel_de_read(dev_priv, BXT_PORT_REF_DW6(phy));
 343
 344	return (val & GRC_CODE_MASK) >> GRC_CODE_SHIFT;
 345}
 346
 347static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv,
 348				  enum dpio_phy phy)
 349{
 350	if (intel_de_wait_for_set(dev_priv, BXT_PORT_REF_DW3(phy),
 351				  GRC_DONE, 10))
 352		drm_err(&dev_priv->drm, "timeout waiting for PHY%d GRC\n",
 353			phy);
 354}
 355
 356static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
 357			      enum dpio_phy phy)
 358{
 359	const struct bxt_ddi_phy_info *phy_info;
 360	u32 val;
 361
 362	phy_info = bxt_get_phy_info(dev_priv, phy);
 363
 364	if (bxt_ddi_phy_is_enabled(dev_priv, phy)) {
 365		/* Still read out the GRC value for state verification */
 366		if (phy_info->rcomp_phy != -1)
 367			dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv, phy);
 368
 369		if (bxt_ddi_phy_verify_state(dev_priv, phy)) {
 370			drm_dbg(&dev_priv->drm, "DDI PHY %d already enabled, "
 371				"won't reprogram it\n", phy);
 372			return;
 373		}
 374
 375		drm_dbg(&dev_priv->drm,
 376			"DDI PHY %d enabled with invalid state, "
 377			"force reprogramming it\n", phy);
 378	}
 379
 380	val = intel_de_read(dev_priv, BXT_P_CR_GT_DISP_PWRON);
 381	val |= phy_info->pwron_mask;
 382	intel_de_write(dev_priv, BXT_P_CR_GT_DISP_PWRON, val);
 383
 384	/*
 385	 * The PHY registers start out inaccessible and respond to reads with
 386	 * all 1s.  Eventually they become accessible as they power up, then
 387	 * the reserved bit will give the default 0.  Poll on the reserved bit
 388	 * becoming 0 to find when the PHY is accessible.
 389	 * The flag should get set in 100us according to the HW team, but
 390	 * use 1ms due to occasional timeouts observed with that.
 391	 */
 392	if (intel_wait_for_register_fw(&dev_priv->uncore,
 393				       BXT_PORT_CL1CM_DW0(phy),
 394				       PHY_RESERVED | PHY_POWER_GOOD,
 395				       PHY_POWER_GOOD,
 396				       1))
 397		drm_err(&dev_priv->drm, "timeout during PHY%d power on\n",
 398			phy);
 399
 400	/* Program PLL Rcomp code offset */
 401	val = intel_de_read(dev_priv, BXT_PORT_CL1CM_DW9(phy));
 402	val &= ~IREF0RC_OFFSET_MASK;
 403	val |= 0xE4 << IREF0RC_OFFSET_SHIFT;
 404	intel_de_write(dev_priv, BXT_PORT_CL1CM_DW9(phy), val);
 405
 406	val = intel_de_read(dev_priv, BXT_PORT_CL1CM_DW10(phy));
 407	val &= ~IREF1RC_OFFSET_MASK;
 408	val |= 0xE4 << IREF1RC_OFFSET_SHIFT;
 409	intel_de_write(dev_priv, BXT_PORT_CL1CM_DW10(phy), val);
 410
 411	/* Program power gating */
 412	val = intel_de_read(dev_priv, BXT_PORT_CL1CM_DW28(phy));
 413	val |= OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN |
 414		SUS_CLK_CONFIG;
 415	intel_de_write(dev_priv, BXT_PORT_CL1CM_DW28(phy), val);
 416
 417	if (phy_info->dual_channel) {
 418		val = intel_de_read(dev_priv, BXT_PORT_CL2CM_DW6(phy));
 419		val |= DW6_OLDO_DYN_PWR_DOWN_EN;
 420		intel_de_write(dev_priv, BXT_PORT_CL2CM_DW6(phy), val);
 421	}
 422
 423	if (phy_info->rcomp_phy != -1) {
 424		u32 grc_code;
 425
 426		bxt_phy_wait_grc_done(dev_priv, phy_info->rcomp_phy);
 427
 428		/*
 429		 * PHY0 isn't connected to an RCOMP resistor so copy over
 430		 * the corresponding calibrated value from PHY1, and disable
 431		 * the automatic calibration on PHY0.
 432		 */
 433		val = dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv,
 434							  phy_info->rcomp_phy);
 435		grc_code = val << GRC_CODE_FAST_SHIFT |
 436			   val << GRC_CODE_SLOW_SHIFT |
 437			   val;
 438		intel_de_write(dev_priv, BXT_PORT_REF_DW6(phy), grc_code);
 439
 440		val = intel_de_read(dev_priv, BXT_PORT_REF_DW8(phy));
 441		val |= GRC_DIS | GRC_RDY_OVRD;
 442		intel_de_write(dev_priv, BXT_PORT_REF_DW8(phy), val);
 443	}
 444
 445	if (phy_info->reset_delay)
 446		udelay(phy_info->reset_delay);
 447
 448	val = intel_de_read(dev_priv, BXT_PHY_CTL_FAMILY(phy));
 449	val |= COMMON_RESET_DIS;
 450	intel_de_write(dev_priv, BXT_PHY_CTL_FAMILY(phy), val);
 451}
 452
 453void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy)
 454{
 455	const struct bxt_ddi_phy_info *phy_info;
 456	u32 val;
 457
 458	phy_info = bxt_get_phy_info(dev_priv, phy);
 459
 460	val = intel_de_read(dev_priv, BXT_PHY_CTL_FAMILY(phy));
 461	val &= ~COMMON_RESET_DIS;
 462	intel_de_write(dev_priv, BXT_PHY_CTL_FAMILY(phy), val);
 463
 464	val = intel_de_read(dev_priv, BXT_P_CR_GT_DISP_PWRON);
 465	val &= ~phy_info->pwron_mask;
 466	intel_de_write(dev_priv, BXT_P_CR_GT_DISP_PWRON, val);
 467}
 468
 469void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy)
 470{
 471	const struct bxt_ddi_phy_info *phy_info =
 472		bxt_get_phy_info(dev_priv, phy);
 473	enum dpio_phy rcomp_phy = phy_info->rcomp_phy;
 474	bool was_enabled;
 475
 476	lockdep_assert_held(&dev_priv->power_domains.lock);
 477
 478	was_enabled = true;
 479	if (rcomp_phy != -1)
 480		was_enabled = bxt_ddi_phy_is_enabled(dev_priv, rcomp_phy);
 481
 482	/*
 483	 * We need to copy the GRC calibration value from rcomp_phy,
 484	 * so make sure it's powered up.
 485	 */
 486	if (!was_enabled)
 487		_bxt_ddi_phy_init(dev_priv, rcomp_phy);
 488
 489	_bxt_ddi_phy_init(dev_priv, phy);
 490
 491	if (!was_enabled)
 492		bxt_ddi_phy_uninit(dev_priv, rcomp_phy);
 493}
 494
 495static bool __printf(6, 7)
 496__phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy,
 497		       i915_reg_t reg, u32 mask, u32 expected,
 498		       const char *reg_fmt, ...)
 499{
 500	struct va_format vaf;
 501	va_list args;
 502	u32 val;
 503
 504	val = intel_de_read(dev_priv, reg);
 505	if ((val & mask) == expected)
 506		return true;
 507
 508	va_start(args, reg_fmt);
 509	vaf.fmt = reg_fmt;
 510	vaf.va = &args;
 511
 512	drm_dbg(&dev_priv->drm, "DDI PHY %d reg %pV [%08x] state mismatch: "
 513			 "current %08x, expected %08x (mask %08x)\n",
 514			 phy, &vaf, reg.reg, val, (val & ~mask) | expected,
 515			 mask);
 516
 517	va_end(args);
 518
 519	return false;
 520}
 521
 522bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
 523			      enum dpio_phy phy)
 524{
 525	const struct bxt_ddi_phy_info *phy_info;
 526	u32 mask;
 527	bool ok;
 528
 529	phy_info = bxt_get_phy_info(dev_priv, phy);
 530
 531#define _CHK(reg, mask, exp, fmt, ...)					\
 532	__phy_reg_verify_state(dev_priv, phy, reg, mask, exp, fmt,	\
 533			       ## __VA_ARGS__)
 534
 535	if (!bxt_ddi_phy_is_enabled(dev_priv, phy))
 536		return false;
 537
 538	ok = true;
 539
 540	/* PLL Rcomp code offset */
 541	ok &= _CHK(BXT_PORT_CL1CM_DW9(phy),
 542		    IREF0RC_OFFSET_MASK, 0xe4 << IREF0RC_OFFSET_SHIFT,
 543		    "BXT_PORT_CL1CM_DW9(%d)", phy);
 544	ok &= _CHK(BXT_PORT_CL1CM_DW10(phy),
 545		    IREF1RC_OFFSET_MASK, 0xe4 << IREF1RC_OFFSET_SHIFT,
 546		    "BXT_PORT_CL1CM_DW10(%d)", phy);
 547
 548	/* Power gating */
 549	mask = OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN | SUS_CLK_CONFIG;
 550	ok &= _CHK(BXT_PORT_CL1CM_DW28(phy), mask, mask,
 551		    "BXT_PORT_CL1CM_DW28(%d)", phy);
 552
 553	if (phy_info->dual_channel)
 554		ok &= _CHK(BXT_PORT_CL2CM_DW6(phy),
 555			   DW6_OLDO_DYN_PWR_DOWN_EN, DW6_OLDO_DYN_PWR_DOWN_EN,
 556			   "BXT_PORT_CL2CM_DW6(%d)", phy);
 557
 558	if (phy_info->rcomp_phy != -1) {
 559		u32 grc_code = dev_priv->bxt_phy_grc;
 560
 561		grc_code = grc_code << GRC_CODE_FAST_SHIFT |
 562			   grc_code << GRC_CODE_SLOW_SHIFT |
 563			   grc_code;
 564		mask = GRC_CODE_FAST_MASK | GRC_CODE_SLOW_MASK |
 565		       GRC_CODE_NOM_MASK;
 566		ok &= _CHK(BXT_PORT_REF_DW6(phy), mask, grc_code,
 567			   "BXT_PORT_REF_DW6(%d)", phy);
 568
 569		mask = GRC_DIS | GRC_RDY_OVRD;
 570		ok &= _CHK(BXT_PORT_REF_DW8(phy), mask, mask,
 571			    "BXT_PORT_REF_DW8(%d)", phy);
 572	}
 573
 574	return ok;
 575#undef _CHK
 576}
 577
 578u8
 579bxt_ddi_phy_calc_lane_lat_optim_mask(u8 lane_count)
 580{
 581	switch (lane_count) {
 582	case 1:
 583		return 0;
 584	case 2:
 585		return BIT(2) | BIT(0);
 586	case 4:
 587		return BIT(3) | BIT(2) | BIT(0);
 588	default:
 589		MISSING_CASE(lane_count);
 590
 591		return 0;
 592	}
 593}
 594
 595void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
 596				     u8 lane_lat_optim_mask)
 597{
 598	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 599	enum port port = encoder->port;
 600	enum dpio_phy phy;
 601	enum dpio_channel ch;
 602	int lane;
 603
 604	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
 605
 606	for (lane = 0; lane < 4; lane++) {
 607		u32 val = intel_de_read(dev_priv,
 608					BXT_PORT_TX_DW14_LN(phy, ch, lane));
 609
 610		/*
 611		 * Note that on CHV this flag is called UPAR, but has
 612		 * the same function.
 613		 */
 614		val &= ~LATENCY_OPTIM;
 615		if (lane_lat_optim_mask & BIT(lane))
 616			val |= LATENCY_OPTIM;
 617
 618		intel_de_write(dev_priv, BXT_PORT_TX_DW14_LN(phy, ch, lane),
 619			       val);
 620	}
 621}
 622
 623u8
 624bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder)
 625{
 626	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 627	enum port port = encoder->port;
 628	enum dpio_phy phy;
 629	enum dpio_channel ch;
 630	int lane;
 631	u8 mask;
 632
 633	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
 634
 635	mask = 0;
 636	for (lane = 0; lane < 4; lane++) {
 637		u32 val = intel_de_read(dev_priv,
 638					BXT_PORT_TX_DW14_LN(phy, ch, lane));
 639
 640		if (val & LATENCY_OPTIM)
 641			mask |= BIT(lane);
 642	}
 643
 644	return mask;
 645}
 646
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 647
 648void chv_set_phy_signal_level(struct intel_encoder *encoder,
 
 649			      u32 deemph_reg_value, u32 margin_reg_value,
 650			      bool uniq_trans_scale)
 651{
 652	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 653	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
 654	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
 655	enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
 656	enum pipe pipe = intel_crtc->pipe;
 657	u32 val;
 658	int i;
 659
 660	vlv_dpio_get(dev_priv);
 661
 662	/* Clear calc init */
 663	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
 664	val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
 665	val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
 666	val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
 667	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
 668
 669	if (intel_crtc->config->lane_count > 2) {
 670		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
 671		val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
 672		val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
 673		val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
 674		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
 675	}
 676
 677	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
 678	val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
 679	val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
 680	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
 681
 682	if (intel_crtc->config->lane_count > 2) {
 683		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
 684		val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
 685		val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
 686		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
 687	}
 688
 689	/* Program swing deemph */
 690	for (i = 0; i < intel_crtc->config->lane_count; i++) {
 691		val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
 692		val &= ~DPIO_SWING_DEEMPH9P5_MASK;
 693		val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
 694		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
 695	}
 696
 697	/* Program swing margin */
 698	for (i = 0; i < intel_crtc->config->lane_count; i++) {
 699		val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
 700
 701		val &= ~DPIO_SWING_MARGIN000_MASK;
 702		val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
 703
 704		/*
 705		 * Supposedly this value shouldn't matter when unique transition
 706		 * scale is disabled, but in fact it does matter. Let's just
 707		 * always program the same value and hope it's OK.
 708		 */
 709		val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
 710		val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
 711
 712		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
 713	}
 714
 715	/*
 716	 * The document said it needs to set bit 27 for ch0 and bit 26
 717	 * for ch1. Might be a typo in the doc.
 718	 * For now, for this unique transition scale selection, set bit
 719	 * 27 for ch0 and ch1.
 720	 */
 721	for (i = 0; i < intel_crtc->config->lane_count; i++) {
 722		val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
 723		if (uniq_trans_scale)
 724			val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
 725		else
 726			val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
 727		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
 728	}
 729
 730	/* Start swing calculation */
 731	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
 732	val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
 733	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
 734
 735	if (intel_crtc->config->lane_count > 2) {
 736		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
 737		val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
 738		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
 739	}
 740
 741	vlv_dpio_put(dev_priv);
 742}
 743
 744void chv_data_lane_soft_reset(struct intel_encoder *encoder,
 745			      const struct intel_crtc_state *crtc_state,
 746			      bool reset)
 747{
 748	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 749	enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder));
 750	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 751	enum pipe pipe = crtc->pipe;
 752	u32 val;
 753
 754	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
 755	if (reset)
 756		val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
 757	else
 758		val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
 759	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
 760
 761	if (crtc_state->lane_count > 2) {
 762		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
 763		if (reset)
 764			val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
 765		else
 766			val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
 767		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
 768	}
 769
 770	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
 771	val |= CHV_PCS_REQ_SOFTRESET_EN;
 772	if (reset)
 773		val &= ~DPIO_PCS_CLK_SOFT_RESET;
 774	else
 775		val |= DPIO_PCS_CLK_SOFT_RESET;
 776	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
 777
 778	if (crtc_state->lane_count > 2) {
 779		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
 780		val |= CHV_PCS_REQ_SOFTRESET_EN;
 781		if (reset)
 782			val &= ~DPIO_PCS_CLK_SOFT_RESET;
 783		else
 784			val |= DPIO_PCS_CLK_SOFT_RESET;
 785		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
 786	}
 787}
 788
 789void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
 790			    const struct intel_crtc_state *crtc_state)
 791{
 792	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
 793	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 794	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 795	enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
 
 796	enum pipe pipe = crtc->pipe;
 797	unsigned int lane_mask =
 798		intel_dp_unused_lane_mask(crtc_state->lane_count);
 799	u32 val;
 800
 801	/*
 802	 * Must trick the second common lane into life.
 803	 * Otherwise we can't even access the PLL.
 804	 */
 805	if (ch == DPIO_CH0 && pipe == PIPE_B)
 806		dig_port->release_cl2_override =
 807			!chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
 808
 809	chv_phy_powergate_lanes(encoder, true, lane_mask);
 810
 811	vlv_dpio_get(dev_priv);
 812
 813	/* Assert data lane reset */
 814	chv_data_lane_soft_reset(encoder, crtc_state, true);
 815
 816	/* program left/right clock distribution */
 817	if (pipe != PIPE_B) {
 818		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
 819		val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
 820		if (ch == DPIO_CH0)
 821			val |= CHV_BUFLEFTENA1_FORCE;
 822		if (ch == DPIO_CH1)
 823			val |= CHV_BUFRIGHTENA1_FORCE;
 824		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
 825	} else {
 826		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
 827		val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
 828		if (ch == DPIO_CH0)
 829			val |= CHV_BUFLEFTENA2_FORCE;
 830		if (ch == DPIO_CH1)
 831			val |= CHV_BUFRIGHTENA2_FORCE;
 832		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
 833	}
 834
 835	/* program clock channel usage */
 836	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
 837	val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
 838	if (pipe != PIPE_B)
 839		val &= ~CHV_PCS_USEDCLKCHANNEL;
 840	else
 841		val |= CHV_PCS_USEDCLKCHANNEL;
 842	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
 843
 844	if (crtc_state->lane_count > 2) {
 845		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
 846		val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
 847		if (pipe != PIPE_B)
 848			val &= ~CHV_PCS_USEDCLKCHANNEL;
 849		else
 850			val |= CHV_PCS_USEDCLKCHANNEL;
 851		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
 852	}
 853
 854	/*
 855	 * This a a bit weird since generally CL
 856	 * matches the pipe, but here we need to
 857	 * pick the CL based on the port.
 858	 */
 859	val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
 860	if (pipe != PIPE_B)
 
 
 861		val &= ~CHV_CMN_USEDCLKCHANNEL;
 862	else
 863		val |= CHV_CMN_USEDCLKCHANNEL;
 864	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
 865
 866	vlv_dpio_put(dev_priv);
 867}
 868
 869void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
 870				const struct intel_crtc_state *crtc_state)
 871{
 872	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 873	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 874	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 875	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 876	enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
 877	enum pipe pipe = crtc->pipe;
 878	int data, i, stagger;
 879	u32 val;
 880
 881	vlv_dpio_get(dev_priv);
 882
 883	/* allow hardware to manage TX FIFO reset source */
 884	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
 885	val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
 886	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
 887
 888	if (crtc_state->lane_count > 2) {
 889		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
 890		val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
 891		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
 892	}
 893
 894	/* Program Tx lane latency optimal setting*/
 895	for (i = 0; i < crtc_state->lane_count; i++) {
 896		/* Set the upar bit */
 897		if (crtc_state->lane_count == 1)
 898			data = 0x0;
 899		else
 900			data = (i == 1) ? 0x0 : 0x1;
 901		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
 902				data << DPIO_UPAR_SHIFT);
 903	}
 904
 905	/* Data lane stagger programming */
 906	if (crtc_state->port_clock > 270000)
 907		stagger = 0x18;
 908	else if (crtc_state->port_clock > 135000)
 909		stagger = 0xd;
 910	else if (crtc_state->port_clock > 67500)
 911		stagger = 0x7;
 912	else if (crtc_state->port_clock > 33750)
 913		stagger = 0x4;
 914	else
 915		stagger = 0x2;
 916
 917	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
 918	val |= DPIO_TX2_STAGGER_MASK(0x1f);
 919	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
 920
 921	if (crtc_state->lane_count > 2) {
 922		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
 923		val |= DPIO_TX2_STAGGER_MASK(0x1f);
 924		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
 925	}
 926
 927	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
 928		       DPIO_LANESTAGGER_STRAP(stagger) |
 929		       DPIO_LANESTAGGER_STRAP_OVRD |
 930		       DPIO_TX1_STAGGER_MASK(0x1f) |
 931		       DPIO_TX1_STAGGER_MULT(6) |
 932		       DPIO_TX2_STAGGER_MULT(0));
 933
 934	if (crtc_state->lane_count > 2) {
 935		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
 936			       DPIO_LANESTAGGER_STRAP(stagger) |
 937			       DPIO_LANESTAGGER_STRAP_OVRD |
 938			       DPIO_TX1_STAGGER_MASK(0x1f) |
 939			       DPIO_TX1_STAGGER_MULT(7) |
 940			       DPIO_TX2_STAGGER_MULT(5));
 941	}
 942
 943	/* Deassert data lane reset */
 944	chv_data_lane_soft_reset(encoder, crtc_state, false);
 945
 946	vlv_dpio_put(dev_priv);
 947}
 948
 949void chv_phy_release_cl2_override(struct intel_encoder *encoder)
 950{
 951	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
 952	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 953
 954	if (dig_port->release_cl2_override) {
 955		chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
 956		dig_port->release_cl2_override = false;
 957	}
 958}
 959
 960void chv_phy_post_pll_disable(struct intel_encoder *encoder,
 961			      const struct intel_crtc_state *old_crtc_state)
 962{
 963	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 
 964	enum pipe pipe = to_intel_crtc(old_crtc_state->uapi.crtc)->pipe;
 965	u32 val;
 966
 967	vlv_dpio_get(dev_priv);
 968
 969	/* disable left/right clock distribution */
 970	if (pipe != PIPE_B) {
 971		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
 972		val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
 973		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
 974	} else {
 975		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
 976		val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
 977		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
 978	}
 979
 980	vlv_dpio_put(dev_priv);
 981
 982	/*
 983	 * Leave the power down bit cleared for at least one
 984	 * lane so that chv_powergate_phy_ch() will power
 985	 * on something when the channel is otherwise unused.
 986	 * When the port is off and the override is removed
 987	 * the lanes power down anyway, so otherwise it doesn't
 988	 * really matter what the state of power down bits is
 989	 * after this.
 990	 */
 991	chv_phy_powergate_lanes(encoder, false, 0x0);
 992}
 993
 994void vlv_set_phy_signal_level(struct intel_encoder *encoder,
 
 995			      u32 demph_reg_value, u32 preemph_reg_value,
 996			      u32 uniqtranscale_reg_value, u32 tx3_demph)
 997{
 998	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 999	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
1000	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1001	enum dpio_channel port = vlv_dig_port_to_channel(dig_port);
1002	enum pipe pipe = intel_crtc->pipe;
1003
1004	vlv_dpio_get(dev_priv);
1005
1006	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
1007	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
1008	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
1009			 uniqtranscale_reg_value);
1010	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
1011
1012	if (tx3_demph)
1013		vlv_dpio_write(dev_priv, pipe, VLV_TX3_DW4(port), tx3_demph);
1014
1015	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
1016	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
1017	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
1018
1019	vlv_dpio_put(dev_priv);
1020}
1021
1022void vlv_phy_pre_pll_enable(struct intel_encoder *encoder,
1023			    const struct intel_crtc_state *crtc_state)
1024{
1025	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1026	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1027	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1028	enum dpio_channel port = vlv_dig_port_to_channel(dig_port);
1029	enum pipe pipe = crtc->pipe;
1030
1031	/* Program Tx lane resets to default */
1032	vlv_dpio_get(dev_priv);
1033
1034	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
1035			 DPIO_PCS_TX_LANE2_RESET |
1036			 DPIO_PCS_TX_LANE1_RESET);
1037	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
1038			 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
1039			 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
1040			 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
1041				 DPIO_PCS_CLK_SOFT_RESET);
1042
1043	/* Fix up inter-pair skew failure */
1044	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
1045	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
1046	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
1047
1048	vlv_dpio_put(dev_priv);
1049}
1050
1051void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
1052				const struct intel_crtc_state *crtc_state)
1053{
1054	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1055	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1056	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1057	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1058	enum dpio_channel port = vlv_dig_port_to_channel(dig_port);
 
1059	enum pipe pipe = crtc->pipe;
1060	u32 val;
1061
1062	vlv_dpio_get(dev_priv);
1063
1064	/* Enable clock channels for this port */
1065	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
1066	val = 0;
1067	if (pipe)
1068		val |= (1<<21);
1069	else
1070		val &= ~(1<<21);
1071	val |= 0x001000c4;
1072	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
1073
1074	/* Program lane clock */
1075	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
1076	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
1077
1078	vlv_dpio_put(dev_priv);
1079}
1080
1081void vlv_phy_reset_lanes(struct intel_encoder *encoder,
1082			 const struct intel_crtc_state *old_crtc_state)
1083{
1084	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1085	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1086	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
1087	enum dpio_channel port = vlv_dig_port_to_channel(dig_port);
1088	enum pipe pipe = crtc->pipe;
1089
1090	vlv_dpio_get(dev_priv);
1091	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), 0x00000000);
1092	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), 0x00e00060);
1093	vlv_dpio_put(dev_priv);
1094}